query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
babaf3c56f104e69df4c50b6984ebdb6
signal handler used to shut down system.
[ { "docid": "ceb1e55cfb234fbe289bc7eda933f52d", "score": "0.7846179", "text": "def signal_handler(dummy_signum, dummy_frame):\n log.info(\"main process is shut down\")\n global need_shutdown\n need_shutdown = True\n if child_pid:\n os.kill(child_pid, signal.SIGINT)", "title": "" } ]
[ { "docid": "810555f96212df6b2b32cbbf4e43d0fe", "score": "0.8261516", "text": "def handler(self, signum, _):\n logging.warning(\"Signal Handler called with signal %s\", signum)\n logging.warning(\"Shutting down...\\n\")\n\n self.close_all()\n\n raise SystemExit(0)", "title": "" }, { "docid": "f7107d440bc02c7f4f904a59e4cc1998", "score": "0.80267245", "text": "def signal_handler(signum, frame):\n\tprint(\"\\n(Terminated with signal %d)\\n\" % (signum))\n\t# do handle shutdown stuff here\n\tsys.exit(0)", "title": "" }, { "docid": "135731b21198d5971341d131a9e22339", "score": "0.795713", "text": "def signal_handler(dummy_signum, dummy_frame):\n log.info(\"subprocess {pid} is shutting down\".format(pid=os.getpid()))\n global need_shutdown\n need_shutdown = True", "title": "" }, { "docid": "5d0f89cde13a2fbd1e110d0328e866dd", "score": "0.78103685", "text": "def signal_handler(*args):\n print \"Exit py signal {signal}\".format(signal=args[0])\n remove(pid_path)", "title": "" }, { "docid": "d837f43ce1a4f6abc2add6e4b7db6310", "score": "0.780656", "text": "def shutdown_handler(signum, frame):\n print('Caught signal {}'.format(signum))\n raise ServiceExit", "title": "" }, { "docid": "20df01676afbc4468dee67c615cd9484", "score": "0.77555346", "text": "def signal_handler(self, signum, frame):\n if signum == signal.SIGHUP:\n pass\n if signum in (signal.SIGINT, signal.SIGTERM):\n self.stop()", "title": "" }, { "docid": "0fa0859f95467774dcb062939da4a17a", "score": "0.7733787", "text": "def exit_signal_handler(signum, frame):\n log.info(\"Signal '%s' received. Shutdown...\"\n % (signal.Signals(signum).name))\n sys.exit()", "title": "" }, { "docid": "cc3f97028f9e60b8454f54b8b2d49366", "score": "0.7689309", "text": "def __signal_handler(self, signal, frame):\n # SIGTERM\n if signal is 15:\n self.run = False\n sys.exit(0)", "title": "" }, { "docid": "d96c644ea6de75dd961c3e1138a71972", "score": "0.7683797", "text": "def signal_handler( signal, frame ):\n print \"\\n[+] Quitting\"\n sys.exit( 0 )", "title": "" }, { "docid": "06acc41990ab6d652bb0d5d6907e2c05", "score": "0.7675129", "text": "def signal_handler(self, signum, _):\n if not signum in (signal.SIGTERM, signal.SIGUSR2):\n self.logger.warn(\"Unknown signal %d received\" % signum)\n return\n if self.stopping:\n self.logger.info(\"In signal_handler, ignoring\")\n return\n self.stopping = True\n self.logger.info(\"In signal_handler, stopping\")\n restarting = signum == signal.SIGUSR2\n if self.cleanup(restarting) is None:\n os._exit(0)", "title": "" }, { "docid": "ef7e1ff56f8b298d1aa6de5d4fb2dee6", "score": "0.7657259", "text": "def shutdown(self, *args):\n logger.info('Sensor Server: Caught signal: %d. Shutting down...', args[0])\n self.cleanup()\n sys.exit(0)", "title": "" }, { "docid": "f25521d524e54a5ca70ca6c836d9d9a8", "score": "0.7641597", "text": "def _signal_handler(signum: int, _: Any) -> None:\n _terminate_processes()\n sys.exit(signum)", "title": "" }, { "docid": "4e4b77d2015cb96a4ff9fdf761ef9319", "score": "0.7611257", "text": "def on_signal(sig_handler: Signal, signal: int):\n logger.info(\"Caught signal {signal}, shutting down\".format(signal=signal))\n sig_handler.stop()\n queue.stop()\n a.stop()\n s.close()\n loop.stop()", "title": "" }, { "docid": "f65a2ca22652ad14845de5353affccd2", "score": "0.7584789", "text": "def signal_int_handler(signal, frame):\n stop_daemon()", "title": "" }, { "docid": "e1b4a8ab5e1936dee058bf56f1df5eeb", "score": "0.75510687", "text": "def signal_handler(signal, frame):\n print(\"[!] Terminating script.\")\n sys.exit(0)", "title": "" }, { "docid": "2129f9da5918bbfa9e62f370a8f39c06", "score": "0.7546446", "text": "def sig_handle(sig, frame, *rest):\n raise KeyboardInterrupt('Shut it all down!')", "title": "" }, { "docid": "350ab0935421c5da0b6b4f67263f4afc", "score": "0.7440826", "text": "def signal_handler(_, __):\n print(\"\\nEnd of program.\")\n sys.exit(0)", "title": "" }, { "docid": "99becba18292d84728957ebc40ae2faa", "score": "0.74364674", "text": "def _signal_handler(self, signum, frame):\n self.terminate.set()", "title": "" }, { "docid": "3e2f7ff20e5c152b3dac698b0c0b34b8", "score": "0.7416336", "text": "def sigterm_handler(signal_number, stack_frame):\n print_datetime(\"End\")\n sys.exit(0)", "title": "" }, { "docid": "e048ee1a399eb84765506029e434ec1f", "score": "0.7414418", "text": "def signal_handler(signum, frame):\n global IS_CLOSING\n logging.info('exiting...')\n IS_CLOSING = True", "title": "" }, { "docid": "e6b2fa10ee439bb8f18b7a3cbc9aebfa", "score": "0.7393493", "text": "def signal_handler(signal, frame):\n s.close()\n sys.exit(0)", "title": "" }, { "docid": "db8a736c2e6a2b7f67801905ec688aa9", "score": "0.73903704", "text": "def shutdown_handler(self, event):\n _LOGGER.debug(\"Running homeassistant_stop event handler: %s\", event)\n self.hcidump.kill()\n self.hcidump.communicate()\n self.hcitool.kill()\n self.hcitool.communicate()\n self.tempf.close()", "title": "" }, { "docid": "ed0337116889605b298872b2309ccad9", "score": "0.73839384", "text": "def _signal_handler(*args):\n print(\"\\nEnding...\")", "title": "" }, { "docid": "e782288ad915d88b5453f3ac5cc1b81a", "score": "0.7375529", "text": "def signal_handler():\n\n sys.stdout.write(\"You pressed Ctrl+C!\")\n kill_chromedrivers()\n sys.exit(0)", "title": "" }, { "docid": "efbb8be2a086b2cc9b52e87672436708", "score": "0.73458403", "text": "def signal_handler(sig, frame):\n\n\tprint('You pressed Ctrl+C!')\n\tsys.exit(0)", "title": "" }, { "docid": "0851d94ccda4fb930dc063ac2c885ba3", "score": "0.73374814", "text": "def signal_shutdown(self) -> None:\n logging.info(\"SIGNALING SHUTDOWN\")\n self.loop.call_soon_threadsafe(self.lifecycle.event_signal_shutdown.set)", "title": "" }, { "docid": "533ac52e29c589a85f321e63cccd29bd", "score": "0.732291", "text": "def signal_handler(sig, frame):\n logging.debug('Exiting')\n sys.exit(0)", "title": "" }, { "docid": "8e5f45584b50490a20e2ea038b0b87f1", "score": "0.7303321", "text": "def shutdown(self, signum, frame):\n self.logger.info(\"Shutting down with signal %s\", Signals(signum).name)\n exit(1)", "title": "" }, { "docid": "979a4d4781d6b97d7c52b5e5bb2680ab", "score": "0.72847563", "text": "def signal_handler(signal, frame):\n print \"\\n\"\n Utils.print_info(\"Ctrl+C pressed. Killing Kalliope\")\n sys.exit(0)", "title": "" }, { "docid": "23dd6f0784e39419972f96711c8540cb", "score": "0.7259255", "text": "def interrupt_handler(self, signal, frame):\n self.close()", "title": "" }, { "docid": "3f6e0f8f9775b45c66aaa93448bc9969", "score": "0.72448546", "text": "def signalhandler(signal, frame):\n logger.info(\"Received signal {}. Shutting down...\".format(signal))\n\n if mysql is not None:\n mysql.close()\n\n if analtux is not None():\n analtux.die(\"I'll be back!\")\n\n sys.exit(0)", "title": "" }, { "docid": "e4fbf849ae77c2dda33dcd90ba9b397f", "score": "0.7219392", "text": "def interrupt_handler(signal, frame):\n\n disconnect(USERS)\n\n logging.info('Server turning off')\n SERVER_SOCKET.close()\n\n sys.exit(0)", "title": "" }, { "docid": "052b4bad8d2a5ccd936c6bb5f7b44485", "score": "0.720535", "text": "def sig_handler(sig,frame):\n ioloop.IOLoop.instance().add_callback(shutdown)", "title": "" }, { "docid": "6f7407dc8cb72db96c78613a10f055c7", "score": "0.72008955", "text": "def sighandler(self, signum, frame):\n # Close the server\n print 'Shutting down server...'\n # Close existing client sockets\n for output in self.outputs:\n output.close()\n self.server.close()", "title": "" }, { "docid": "3248a3710cd1de8bc17c300cdcbfd0b6", "score": "0.71931773", "text": "def handlerSIGTERM(signum, frame):\n \n stopDaemon()\n sys.exit(0)", "title": "" }, { "docid": "2ca56dbf7b09d2fff6cf91b194a51d77", "score": "0.7167457", "text": "def signal_handler(_signal, _frame):\n #print('Bye ...')\n strip.stop()\n os.kill(os.getpid(), signal.SIGKILL)\n sys.exit(0)", "title": "" }, { "docid": "000c38bebae6ea6e796b69033d582287", "score": "0.7137852", "text": "def handler(signal_received, frame):\n print(\"SIGINT or CTRL-C detected. Exiting...\")\n cleanup_function()\n exit(0)", "title": "" }, { "docid": "15eb8fc493e833e96a93cd01fa03ba64", "score": "0.7136185", "text": "def signal_handler(signal, frame):\n if client and client.connected:\n if signal == SIGINT:\n info('Program interrupt received, client exiting')\n client.Destroy()\n remove(pidfile)\n else:\n client.Restart()\n elif signal == SIGINT:\n remove(pidfile)\n raise SystemExit", "title": "" }, { "docid": "3c6abf0612c553d641d7409d019f6cdc", "score": "0.7135804", "text": "def _signal_handler(_signal, frame):\n simplescn_gui.guiclient_instance.quit()\n logging.shutdown()\n sys.exit(0)", "title": "" }, { "docid": "e759f820fd797537f5127d8e31518c9f", "score": "0.7132091", "text": "def signal_handler(sig, frame):\n print('\\nYou pressed Ctrl+C!')\n print(\"Bye!!!\")\n sys.exit(0)", "title": "" }, { "docid": "31f41d083707dcd184f814249bf9f762", "score": "0.7128665", "text": "def stop(self, signal):\n pass", "title": "" }, { "docid": "dd3a79e799b08a1e7b86e9b7e899c055", "score": "0.71221805", "text": "def signal_handler(recvd_signal, stack_frame):\n logger = logging.getLogger(sys._getframe().f_code.co_name)\n logger.warning('Got termination signal. Exiting...')\n sys.exit(0)", "title": "" }, { "docid": "aba4d825395adb29f8492d9713747401", "score": "0.7109597", "text": "def terminate(self):\n self.send_signal(signal.SIGTERM)", "title": "" }, { "docid": "1f2a6c3e3e21b7d5bec25a5317ec63fa", "score": "0.70785636", "text": "def clean_shutdown_when_killed(signum, *args):\n logging.warning('SIGTERM detected, shutting down')\n sys.exit()", "title": "" }, { "docid": "4a7f158710d60ce31e504a4a9bd090af", "score": "0.7049816", "text": "def cleanup(self, signum, frame):\n util.log(\"Shutting down signal={} frame={}\".format(signum, frame))\n self.remove_pid_file()\n exit(0)", "title": "" }, { "docid": "7aeca5c474492f22325a61664dd3ce24", "score": "0.7042923", "text": "def sigint_handler(self, signum, frame):\n self.log.debug(f\"Received signal {signum} received, stopping launchers...\")\n self.loop.add_callback_from_signal(self.stop_cluster)", "title": "" }, { "docid": "ab8fb26d80dd9c6f50f140507a32cb13", "score": "0.7038717", "text": "def SigIntHandler(signum, _stackframe):\n LOG.debug('SigInt - Signal handler called with signal {}'.format(signum))\n LOG.info(\"Interrupted.\\n\\n\\n\")\n g_API.Quit()\n exit", "title": "" }, { "docid": "be5e33a4ad0ab1f44f85ba0bb6529ecc", "score": "0.7027194", "text": "def _signal_handler(self, signal_received, frame):\n self.rmq_connection.close_connection()\n sys.exit(0)", "title": "" }, { "docid": "4972fd0df00d09940799c62f44837e22", "score": "0.7008538", "text": "def signal_handler(signal, frame):\n\n print '\\nBye'\n sys.exit(0)", "title": "" }, { "docid": "1cc0c9c467478437c7ad43c9991077c3", "score": "0.7002635", "text": "def sig_handler(self, sig, frame):\n tornado.ioloop.IOLoop.instance().add_callback(self.shutdown)", "title": "" }, { "docid": "08e0bed47013e89419a97dac15715a21", "score": "0.70000464", "text": "def signal_handler(signum, frame):\n if signum == SIGINT or signum == SIGQUIT:\n failbot.close()", "title": "" }, { "docid": "09eb7ac6a88b754fb5f8a40f3998c3fc", "score": "0.6962669", "text": "def signal_handler(sig, frame):\n global thread_stop_requested\n global webserver_stop_requested\n global wait_and_check_auto_certificate_thread_stop_requested\n \n thread_stop_requested = True\n webserver_stop_requested = True\n wait_and_check_auto_certificate_thread_stop_requested = True\n agent_log.info(\"Agent stopped\")\n \n if verbose:\n print(\"Agent stopped\\n\")\n sys.exit(0)", "title": "" }, { "docid": "8156f8639df6363b85257138f16418ad", "score": "0.6952976", "text": "def shutdown_on_interrupt(cls):\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n cls._released = True\n cls._condVar.notify()\n cls._ctrlCHandler.setCallback(cls._shutdown_on_interrupt_callback)\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")", "title": "" }, { "docid": "8156f8639df6363b85257138f16418ad", "score": "0.6952976", "text": "def shutdown_on_interrupt(cls):\n if Application._signalPolicy == Application.HandleSignals:\n cls._condVar.acquire()\n if cls._ctrlCHandler.getCallback() == cls._hold_interrupt_callback:\n cls._released = True\n cls._condVar.notify()\n cls._ctrlCHandler.setCallback(cls._shutdown_on_interrupt_callback)\n cls._condVar.release()\n else:\n Ice.getProcessLogger().error(\"interrupt method called on Application configured to not handle interrupts.\")", "title": "" }, { "docid": "ad5056f0883b018bf7d8b9d079719817", "score": "0.69524753", "text": "def signal_handler(sig, frame):\n print('\\nYou pressed Ctrl+C')\n if board is not None:\n board.send_reset()\n board.shutdown()\n sys.exit(0)", "title": "" }, { "docid": "f5d995ae29760da8aa678c21dc4b3fd1", "score": "0.69406307", "text": "def ioshutdown(self, **argv):\n\n ActorNub.ioshutdown(self, **argv)\n \n try:\n os.kill(self.pid, self.sig)\n except Exception as e:\n CPL.log(\"Shell.shutdown\",\n \"os.kill(pid=%s, sig=%s) failed with %s\" % \\\n (self.pid, self.sig, e))\n\n pid, status = os.waitpid(self.pid, 0)\n CPL.log(\"Shell.shutdown\", \"waitpid returned pid=%s and status=%s\" % (pid, status))", "title": "" }, { "docid": "91219ed0c861413cb775f2889a5774a8", "score": "0.691788", "text": "def signal_handler(signal, frame):\n sys.exit(\"CTRL+C detected, stopping execution\")", "title": "" }, { "docid": "64d6c1f57bc6ef63ef7dd5aa829f8c4d", "score": "0.6909473", "text": "def sig_handler(_signo, _stack_frame):\n print(\n f\"\\n\\n{Fore.RED}[Interrupt Handler]{Style.RESET_ALL} Esecuzione programma interrotta\\n\"\n )\n sys.exit(0)", "title": "" }, { "docid": "fdbb551143f5aa3ef72d9f84ec7d12cc", "score": "0.6900718", "text": "def signal_handler(sig, frame):\n if sig == 2:\n str_sig = 'SIGINT'\n elif sig == 3:\n str_sig = 'SIGQUIT'\n elif sig == 15:\n str_sig = 'SIGTERM'\n else:\n str_sig = str(sig)\n\n logging.info('Signal received: {0} {1}'.format(str_sig, frame))\n sys.exit(0)", "title": "" }, { "docid": "fbfb15929cedd26d3ba80a074dd34e2f", "score": "0.6890645", "text": "def shutdown(self):", "title": "" }, { "docid": "09f9761cd96972ddf39661a182915ace", "score": "0.68864065", "text": "def shutdown(self, sig, frame):\n self.role = None\n for pthread in self.threads:\n pthread.join()\n\n self.loop.stop()\n self.sub_sock.close()\n self.req_sock.close()\n self.log(\"shutdown\")\n\n sys.exit(0)", "title": "" }, { "docid": "041e04b31fb2bc75607f99d69fd3ff2c", "score": "0.6884249", "text": "def signal_handler(signal, frame):\n print(\"You pressed Control-c. Exiting.\")\n sys.exit(0)", "title": "" }, { "docid": "97b23c8938144f045b8d6709abc212e7", "score": "0.6883591", "text": "def signal_handler(sig, frame):\n print(\"SIGINT or Control-C detected... exiting...\")\n sys.exit(0)", "title": "" }, { "docid": "a19726d3f4ca029fb2b2078b27f45402", "score": "0.6878909", "text": "def register_subprocess_sighandler():\n def signal_handler(dummy_signum, dummy_frame):\n \"\"\"\n signal handler used to set the shut down flag.\n \"\"\"\n log.info(\"subprocess {pid} is shutting down\".format(pid=os.getpid()))\n global need_shutdown\n need_shutdown = True\n\n signal.signal(signal.SIGINT, signal_handler)", "title": "" }, { "docid": "966f76cb6bb544b2a8f2f978eaae44a7", "score": "0.6828296", "text": "def shutdown(self):\n self.terminate()", "title": "" }, { "docid": "059d852c9f6519bd9d94f40e64e6cb40", "score": "0.68282217", "text": "def handler_stop_signal(signum, frame):\n\n global run\n run = False", "title": "" }, { "docid": "f057e48c7d7de6b62d1e64d7ee019ecf", "score": "0.68162626", "text": "def respond_to_SIGTERM(signal_number, frame):\n raise KeyboardInterrupt", "title": "" }, { "docid": "43fe3ddc0698f0bcc78044e34d297ba2", "score": "0.681456", "text": "def ignore_signal_exit_handler(context):\n if context.launch_state.teardown:\n # Check the return code\n if context.task_state.signals_received:\n context.task_state.returncode = 0\n\n default_exit_handler(context)", "title": "" }, { "docid": "00281d1f4e9312a6e43a9da13e984951", "score": "0.6808808", "text": "def on_terminate(signum, frame):\n\n print(\"Daemon stopped [by signal \" + signum.__str__() + \"], clean-up hosts file ...\")\n # Stop event loop on next event\n global running\n running = False\n # Clean-up hosts file\n remove_host()\n\n exit(0)", "title": "" }, { "docid": "4ba00e2755e6e30cd0b2f7d56caab889", "score": "0.67934185", "text": "def shutdown_hard(self):\n self.terminate_hard()", "title": "" }, { "docid": "a18d5cd686675c246366bc67c79dbafb", "score": "0.6789014", "text": "def termination_signal_handler(_sig_id, _frame):\n # We need to use an array so that terminal_signal_received is not a\n # local variable assignment, but a lookup in the clojure's outer scope.\n termination_signal_received[0] = True", "title": "" }, { "docid": "cc55643472acb101087cc386c14bc110", "score": "0.67880505", "text": "def stop(self):\n self.exit_signal.set()", "title": "" }, { "docid": "fd276c2e4121ccc6c9cbe28b5b5115a9", "score": "0.678234", "text": "def shutdown(self, **kwargs):", "title": "" }, { "docid": "dcc30e23432704dc637d32c23084961b", "score": "0.677311", "text": "def _CleanupSignalHandler(signal_num, cur_stack_frame):\n _Cleanup()\n if (gslib.utils.parallelism_framework_util.\n CheckMultiprocessingAvailableAndInit().is_available):\n gslib.command.TeardownMultiprocessingProcesses()", "title": "" }, { "docid": "8272de4eff7ae3541d86ad4ebe05839e", "score": "0.6733569", "text": "def shutdown(self, signum, frame):\n self.logger.warning(\"Ctrl+C inputted so shutting down server\")\n main_thread = threading.currentThread()\n for t in threading.enumerate():\n if t is main_thread:\n self.logger.error(\"Attempt to join() {}\".format(t.getName()))\n continue\n t.join()\n self.socket.close()\n sys.exit(0)", "title": "" }, { "docid": "5ab26888e54fb301ab47b49d27a66d29", "score": "0.67329425", "text": "def shutdown(self, signum, frame):\n self.logger.info('Shutting down gracefully...')\n main_thread = threading.currentThread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()", "title": "" }, { "docid": "124ba0463a95a69e160c901364514361", "score": "0.67282706", "text": "def stop(self):\n _logger.info(\"Initiating shutdown\")\n _logger.info(\"Hit CTRL-C again or send a second signal to force the shutdown.\")\n\n if self.httpd:\n self.httpd.shutdown()\n self.close_socket(self.httpd.socket)\n\n # Manually join() all threads before calling sys.exit() to allow a second signal\n # to trigger _force_quit() in case some non-daemon threads won't exit cleanly.\n # threading.Thread.join() should not mask signals (at least in python 2.5).\n me = threading.currentThread()\n _logger.info('current thread: %r', me)\n for thread in threading.enumerate():\n _logger.info('process %r (%r)', thread, thread.isDaemon())\n if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:\n while thread.isAlive():\n _logger.info('join and sleep')\n # Need a busyloop here as thread.join() masks signals\n # and would prevent the forced shutdown.\n thread.join(0.05)\n time.sleep(0.05)\n\n _logger.info('--')\n openerp.modules.registry.RegistryManager.delete_all()\n logging.shutdown()", "title": "" }, { "docid": "59542916997d28869c325973e412373d", "score": "0.67227185", "text": "def exit_handler(sig, frame):\n sys.exit(0)", "title": "" }, { "docid": "bc8b6d6dc636b55ac0e9e41e483f74fc", "score": "0.67175466", "text": "def _signalTerm(self,signal,frame):\n self.logger.warn(\"SIGNTERM signal received, quitting\")\n self._isRunning = False\n map(self.__scheduler.cancel, self.__scheduler.queue)", "title": "" }, { "docid": "d1d2ee90e66212d32ced709382640b48", "score": "0.671323", "text": "def cbk_action_signal_term(self):\n self.send_signal(signal.SIGTERM)", "title": "" }, { "docid": "7dadeeaf2eebdb01c8bdd01b2ec9db88", "score": "0.67123884", "text": "def _sigterm_handler(signum, frame):\n # First of all, on TERMINATE, print the stacktrace.\n assert signum == signal.SIGTERM\n logging.error('SIGTERM is received.')\n debug.write_frames(sys.stderr)\n\n # If we can send SIGTERM to child processes, we do not exit here,\n # with expecting the graceful shutdown.\n # Note that, although we do this in atexit handler, too, it is too late\n # (runs after all threads are terminated). So we need it here.\n # Note that, to avoid race conditions, the program must not poll or wait\n # on a non-main thread. Practically, it is almost safe, but there is\n # a small chance for un-related processes to be killed by SIGTERM\n # accidentally.\n _terminate_subprocess()\n\n # Then, terminate the script. Note that at the end of the interpreter,\n # functions registered by atexit.register() will run.\n sys.exit(1)", "title": "" }, { "docid": "ab97986b50fa4e217425367204c73acb", "score": "0.67028475", "text": "def shutdown():\n \n pass", "title": "" }, { "docid": "ad9387950cb2baa4b394b01a32871fd9", "score": "0.6684507", "text": "def register_mainprocess_sighandler():\n print \"register signal handler\"\n def signal_handler(dummy_signum, dummy_frame):\n \"\"\"\n signal handler used to shut down system.\n \"\"\"\n log.info(\"main process is shut down\")\n global need_shutdown\n need_shutdown = True\n if child_pid:\n os.kill(child_pid, signal.SIGINT)\n\n signal.signal(signal.SIGINT, signal_handler)", "title": "" }, { "docid": "63039de99d26784db10c29d5fd10e205", "score": "0.66767186", "text": "def on_shutdown(self):\n pass", "title": "" }, { "docid": "be2ec26d0612712835f1d3472a7645f4", "score": "0.66495985", "text": "def _cb_sighup(self):\n self.log_server(\"SIGHUP, stopping children\")\n gevent.spawn(self.notify_workers, \"stop\")", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "0cf96eac2d6f20ebb4d3f8b00401bee0", "score": "0.66449976", "text": "def shutdown(self):\n pass", "title": "" }, { "docid": "94e6598af190a9a63a54d81612926ad9", "score": "0.66332674", "text": "def initiate_shutdown(self):\r\n # Windows does not provide SIGKILL, go with SIGTERM then.\r\n sig = getattr(signal, 'SIGKILL', signal.SIGTERM)\r\n # reloader active\r\n if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':\r\n os.kill(os.getpid(), sig)\r\n # python 2.7\r\n self.server._BaseServer__shutdown_request = True\r\n # python 2.6\r\n self.server._BaseServer__serving = False", "title": "" }, { "docid": "cfced53d0cae21207130abfa844ed16a", "score": "0.6611216", "text": "def shutdown(self):\n self.log.info(f\"shutting down\")\n self.stop_event.set()", "title": "" }, { "docid": "d0d4c7c20bf49d811db8b58f77bbabcb", "score": "0.66095597", "text": "def signal_handler(signal, frame):\n print()\n endProgram(0)", "title": "" }, { "docid": "a5cbc1745562df3362bc413df5f20166", "score": "0.65959656", "text": "def destroy(self, signal=9):\n if not self._launched:\n return\n\n self._pre_shutdown()\n subprocess.run(\"kill -%d %s\" % (signal, self.pid), shell=True, check=True)\n if not self.daemon:\n self._popen.wait()\n else:\n self.wait_pid_exit()\n self._post_shutdown()\n self._launched = False", "title": "" }, { "docid": "6deab790b7cadd11f653347491015e66", "score": "0.65821344", "text": "def reset_sigint_handler():\n sig.signal(sig.SIGINT, sig.default_int_handler)", "title": "" }, { "docid": "6705a3e2974e27b08c35d2715b4cc2e9", "score": "0.6575946", "text": "def stop_signal_handler(self, signum: int, param) -> None: # pylint: disable=W0613\n chn: PairChannel = self.mngr.channels[SVC_CTRL]\n chn.send(cast(ICCPController, chn.protocol).stop_msg(), chn.session)", "title": "" }, { "docid": "a25faff0a53f821231daaf0b89b45f2a", "score": "0.65728974", "text": "def signal_handler(server, sig, frame):\n io_loop = ioloop.IOLoop.current()\n def stop_loop(deadline):\n if (time.time() < deadline and\n (io_loop._callbacks or io_loop._timeouts)): #pylint: disable=no-member\n io_loop.add_timeout(time.time() + 1, stop_loop, deadline)\n else:\n io_loop.stop()\n\n def shutdown():\n logging.info('Signal received, stopping web server')\n server.stop()\n # wait 2 seconds after receiving SIGINT to complete requests\n stop_loop(time.time() + 2)\n\n io_loop.add_callback_from_signal(shutdown)", "title": "" } ]
9c0ab71f295a9810ed36b32027816ef2
equivalent to java's toString()
[ { "docid": "ecd70f62174ad356df3ac3fba685cd97", "score": "0.0", "text": "def __str__(self):\n return \"%s clients connected to %s. Overall %s connections\" % (len(self.connections), self.type.__name__, Connection_manager.all_conn_count)", "title": "" } ]
[ { "docid": "81546b32127a82d536e19a034200923f", "score": "0.79608345", "text": "def to_string(self):", "title": "" }, { "docid": "43b3a37a9629538ccb503d07e5946651", "score": "0.79494643", "text": "def to_string(self) -> str:", "title": "" }, { "docid": "2b1110d4fe19e8c324324b90388e8239", "score": "0.7838506", "text": "def to_string(cls):", "title": "" }, { "docid": "161115709611109ea799a0abb7b6e7b0", "score": "0.7820034", "text": "def toString(self):\n return self.__str__()", "title": "" }, { "docid": "e73025db20c4b3cba8cc285e990b98b9", "score": "0.7694883", "text": "def toString(self, *args):\n pass", "title": "" }, { "docid": "51e9bbaf75ebbc2dbffc31e3901c6183", "score": "0.75897765", "text": "def __str__(self):\n return ''.join(map(str, self))", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "9d76dd1fa785ed6ad862e20fa14f0781", "score": "0.7542934", "text": "def __repr__(self):\n return str(self)", "title": "" }, { "docid": "f5c4598eb929d4cabec9545a80973ee1", "score": "0.7493158", "text": "def ToString(self):\n return self.__repr__()", "title": "" }, { "docid": "f5c4598eb929d4cabec9545a80973ee1", "score": "0.7493158", "text": "def ToString(self):\n return self.__repr__()", "title": "" }, { "docid": "6cf2e1516e0d96f58dbeacefba6f285c", "score": "0.7475585", "text": "def toString(self) -> unicode:\n ...", "title": "" }, { "docid": "31b540a5620a27c4c475b53455ae28ff", "score": "0.7468453", "text": "def to_string(self) -> str:\n return jsii.invoke(self, \"toString\", [])", "title": "" }, { "docid": "c1cd4f716a4bf5e723c274bbaf6ab603", "score": "0.7446048", "text": "def to_string(self):\n return self.__str__()", "title": "" }, { "docid": "c1cd4f716a4bf5e723c274bbaf6ab603", "score": "0.7446048", "text": "def to_string(self):\n return self.__str__()", "title": "" }, { "docid": "8700733182d8bab33ca0cb7fbcceb1fe", "score": "0.7434429", "text": "def __str__(self) -> str:\n return ''.join(map(str, self))", "title": "" }, { "docid": "9f7a1fe277348ae6c9e9fb85eff6d81c", "score": "0.74243224", "text": "def to_string(self):\n return str(self)", "title": "" }, { "docid": "f02d3a4b4fa0c6eaecb8f83fadb7b4c9", "score": "0.73975337", "text": "def str(self):\r\n return str.__str__(self)", "title": "" }, { "docid": "b2f3a7956de03b9ff86735d586cab29d", "score": "0.738695", "text": "def ToString(self):\n return self.name + \", \" + str(self.zero_zero) + \", \" + str(self.zero_one) + \", \" + \\\n str(self.one_zero) + \", \" + str(self.one_one) + \", \" + str(self.total_entropy) + \", \" + \\\n str(self.zero_entropy) + \", \" + str(self.one_entropy) + \", \" + str(self.gain)", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" }, { "docid": "bff2052ac2ba06621cc51030f2133214", "score": "0.73594743", "text": "def __repr__(self):\n return self.to_str()", "title": "" } ]
f2b09bd130b23dbfb9aea3c73b0071d6
acadadmin can delete the outdated exam timetable.
[ { "docid": "8582c4a57a7c60d7c63e189a44e553d7", "score": "0.71408355", "text": "def delete_exam_timetable(request):\r\n if request.method == \"POST\":\r\n data = request.POST['delete']\r\n t = Exam_timetable.objects.get(exam_time_table=data)\r\n t.delete()\r\n return HttpResponse(\"TimeTable Deleted\")", "title": "" } ]
[ { "docid": "094c5803b3b85df1dcc34f4e9d81a60e", "score": "0.6372826", "text": "def delete_timetable(request):\r\n if request.method == \"POST\":\r\n data = request.POST['delete']\r\n t = Timetable.objects.get(time_table=data)\r\n t.delete()\r\n return HttpResponse(\"TimeTable Deleted\")", "title": "" }, { "docid": "6c553b5ff8fa4cf5dc1cb61465d1d3da", "score": "0.6183167", "text": "def clear_expired(cls):\n rethinkdb_handler = cls.__establish_rethinkdb()\n reference_time = time.mktime(timezone.now().timetuple())\n\n delete_request = rethinkdb.table(SESSION_RETHINK_TABLE).filter(rethinkdb.row[\"expire\"] < reference_time).delete()\n delete_result = delete_request.run(rethinkdb_handler)\n\n ##print delete_result", "title": "" }, { "docid": "5be45334b5c9ae628e248b2c0a152246", "score": "0.6125597", "text": "def time_deleted(self):\n ...", "title": "" }, { "docid": "da2bda12240543429c0d28786a9469ce", "score": "0.61252767", "text": "def delete_time(request):\n if is_time_available(request):\n time_id = request.session['cur_timestep']\n timestep = TimeStep.objects.get(id=time_id)\n timestep.delete()\n if is_time_available(request):\n time_last = TimeStep.objects.latest('id')\n set_current_time(request, time_last)\n else:\n request.session['cur_timestep'] = None\n request.session['current_time'] = None\n output = True\n else:\n output = False\n return output", "title": "" }, { "docid": "fb8fafe767d5936137bf8c222aca41ac", "score": "0.61071265", "text": "def test_delete_checkplan(self):\n pass", "title": "" }, { "docid": "e2ba1a9df9991f0d949451f9174a70b8", "score": "0.6056648", "text": "def delete():", "title": "" }, { "docid": "e2ba1a9df9991f0d949451f9174a70b8", "score": "0.6056648", "text": "def delete():", "title": "" }, { "docid": "058a21c04045089496d1e9e7bffa0088", "score": "0.60159594", "text": "def test_delete_table(self):\r\n self.setUpDatabase(self.test_table_name + self.test_table_name)\r\n self.setUpDatabase(self.test_table_name)\r\n last_before_delete = self.setUpQueryCertainTable(-1, 0)\r\n self.assertEqual(self.test_database_name, last_before_delete)\r\n\r\n gui_w = AppInterface()\r\n gui_w.delete_table(self.test_database_name)\r\n last_after_delete = self.setUpQueryCertainTable(-1, 0)\r\n self.assertNotEqual(self.test_database_name, last_after_delete)\r\n\r\n self.tearDownTable(self.test_table_name + self.test_database_name)", "title": "" }, { "docid": "c44d714b484c0ea2bef476083cb07455", "score": "0.60098726", "text": "def testLegitimate(self):\n api.deleteReminders('patient', datetime.datetime.now())\n\n self.assertEqual(testDatabase.Reminder.select().where(testDatabase.Reminder.username == 'patient').count(),0)", "title": "" }, { "docid": "a1a7365926daaef9b0d737ec88c90cb3", "score": "0.5993989", "text": "def effaceTout(self):\n\t\tself.c.delete('all')", "title": "" }, { "docid": "cd56ae436b3fd9df1e55184ff544ea8a", "score": "0.59935033", "text": "def test_time_deleted_after_successful_submission(self):\n # get Time object\n time = Time.objects.get(pk=self.time_data['time-time'])\n \n # post the data to the schedule submission form\n response = self.client.post(self.url, data=self.valid_data, follow=True)\n\n # check that the time object got deleted \n self.assertFalse(Time.objects.filter(pk=time.pk).exists())", "title": "" }, { "docid": "1b71e5583c8d37ef961271ab354a3d8f", "score": "0.59676194", "text": "def cron_delete_trash(request):\n month = datetime.today() - timedelta(days=30)\n mails = Mail.objects.filter(state='T', updated_at__lte=month)\n mails.delete()\n return HttpResponse(\"Nuked\")", "title": "" }, { "docid": "699b9aff0f8433089bdc5d7c0c2b9fd1", "score": "0.5943596", "text": "def delete_Alarm(self,cloudwatch_conn,del_info_all):", "title": "" }, { "docid": "9ee4cec5fbc500c0f03a164a30743a0f", "score": "0.59186614", "text": "def delete_entry(cls, db, dow, time, zone):\n cursor = db.cursor()\n cursor.execute(\"delete from schedule where day=%s and starttime=%s \"\n \"and zone=%s\",\n (dow, time, zone))", "title": "" }, { "docid": "cf9adc762ba010c2ea2fb6ab07ed0436", "score": "0.59179914", "text": "def test_05_remove_date(self):\n self.crontab.remove_all(time='2 * * * *')\n self.assertEqual(len(self.crontab), 2)\n self.assertEqual(len(self.crontab.render()), 67)", "title": "" }, { "docid": "68b889b424207a149902148feba9ad45", "score": "0.5904593", "text": "def delete(self):", "title": "" }, { "docid": "68b889b424207a149902148feba9ad45", "score": "0.5904593", "text": "def delete(self):", "title": "" }, { "docid": "68b889b424207a149902148feba9ad45", "score": "0.5904593", "text": "def delete(self):", "title": "" }, { "docid": "68b889b424207a149902148feba9ad45", "score": "0.5904593", "text": "def delete(self):", "title": "" }, { "docid": "19f4652d60cb7eeed8259175eb049314", "score": "0.5903692", "text": "def test_delete_action(self):\n resp = self.api.delete(self.assessment_template)\n self.assert200(resp)\n self.assertFalse(all_models.AssessmentTemplate.query.filter(\n all_models.AssessmentTemplate == self.assessment_template.id).all())", "title": "" }, { "docid": "bdce2af3070cfa7f623faecf85b5df25", "score": "0.5892439", "text": "def test_delete_dashboard_using_delete(self):\n pass", "title": "" }, { "docid": "e574e5351c612c5737c5829786bef8c7", "score": "0.5883527", "text": "def test_del_by_time():\n interface = DataTransfer(Database(PATH))\n data_num = len(interface.get_all_msgs())\n test_save_msg(time=datetime.now() - timedelta(days=3))\n interface.del_msg_by_timedelta(timedelta(days=3))\n assert len(interface.get_all_msgs()) == data_num", "title": "" }, { "docid": "f30465b3b90fdc294588af21d005d701", "score": "0.5847235", "text": "def check_delete(self):\r\n return True", "title": "" }, { "docid": "ac8afd1276937b7383c892323693a97d", "score": "0.582822", "text": "def del_afa(self):\n self.afa_table.entry_widget.delete_afa_entry()", "title": "" }, { "docid": "0fd1ad8726e1abc2e8e0c7f02f461f25", "score": "0.5826654", "text": "def _remove_expired(self, table_name):\n lasttimestamp = int(time.time()) - self.timetoexpire\n dbc = sqlite3.connect(self.filename)\n try:\n sql = self.statements['remove_expired'] % (table_name, )\n parameters = (lasttimestamp, )\n dbc.execute(sql, parameters)\n dbc.commit()\n finally:\n dbc.close()", "title": "" }, { "docid": "92f462c338cdc611a8a1f14bd60150a2", "score": "0.58228624", "text": "def delete_table(self, table_name, timeout):\n _abstract()", "title": "" }, { "docid": "9e21dc0bbdab6fb90a1944564d74156e", "score": "0.58228254", "text": "def delete():\n pass", "title": "" }, { "docid": "9e21dc0bbdab6fb90a1944564d74156e", "score": "0.58228254", "text": "def delete():\n pass", "title": "" }, { "docid": "9d2ff412c9596376fb8f11c9ab2185d1", "score": "0.5803048", "text": "def delete(table: str, iD: int) -> bool:\n pass", "title": "" }, { "docid": "c01b5b2d0c39beb610983223e4d37b35", "score": "0.5798485", "text": "def test_delete_experiment(self):\n pass", "title": "" }, { "docid": "7543f663e6629b693ab62983bde09dfc", "score": "0.5790025", "text": "def testNotDelete(self):\n api.deleteReminders('patient2', datetime.datetime.now())\n\n self.assertEqual(testDatabase.Reminder.select().where(testDatabase.Reminder.username == 'patient2').count(),1)", "title": "" }, { "docid": "0f37ded8a9b5e3152abda88b3b8fa1db", "score": "0.5783209", "text": "async def delete(self, dt: float):\n self.cursor.execute(\n self.query(\"DELETE FROM reminders WHERE remind_date <= %(date)s\"),\n {\"date\": dt + 60})\n self.connection.commit()", "title": "" }, { "docid": "77db36490c006fe1aa97ac84bbd0b3a1", "score": "0.57718885", "text": "def test_sla_delete_exclusion_period(self):\n pass", "title": "" }, { "docid": "d9191dc3d97e2cd30a4bd4fbc9b7d302", "score": "0.5763397", "text": "def pre_delete(self):", "title": "" }, { "docid": "92f53c8a4bbf3526216d538e8440a1d8", "score": "0.5739159", "text": "def test_delete_question(self):\n pass", "title": "" }, { "docid": "1ff5faba2f24fc26542c06724836ea45", "score": "0.5733912", "text": "def delete_data(self):\n Task.delete().where(Task.title == 'tester').execute()", "title": "" }, { "docid": "d10aa2f5032b6d136775272ecd80cc37", "score": "0.5725279", "text": "def test_on_delete_constraints(self):\n u = util.create_vimma_user('a', 'a@example.com', 'pass')\n\n tz = TimeZone.objects.create(name='Europe/Helsinki')\n tz.full_clean()\n s = Schedule.objects.create(name='s', timezone=tz,\n matrix=json.dumps(7 * [48 * [True]]))\n s.full_clean()\n prv = Provider.objects.create(name='My Prov', type=Provider.TYPE_DUMMY)\n prv.full_clean()\n prj = Project.objects.create(name='Prj', email='a@b.com')\n prj.full_clean()\n vm = VM.objects.create(provider=prv, project=prj, schedule=s)\n vm.full_clean()\n\n a = Audit.objects.create(level=Audit.INFO, text='hi',\n user=u, vm=vm)\n a.full_clean()\n a_id = a.id\n del a\n\n u.delete()\n self.assertEqual(Audit.objects.get(id=a_id).user, None)\n vm.delete()\n self.assertEqual(Audit.objects.get(id=a_id).vm, None)", "title": "" }, { "docid": "5116d55903c079b79c0cc7a52d4f213a", "score": "0.5719011", "text": "def cron_clear_finger_print_attendance(self):\n return self._cr.execute(\"DELETE FROM hr_attendance_finger\")", "title": "" }, { "docid": "c785d33f32bb5fe3ff8c505e2eb60654", "score": "0.57178885", "text": "def delete_tribe_assertions(apps, schema_editor):\n TribeAssertion = apps.get_model(\"promrep\", \"TribeAssertion\")\n TribeAssertion.objects.all().delete()", "title": "" }, { "docid": "9f271f1de37d6fd071146f1845d40e11", "score": "0.57023877", "text": "def test_delete_runtime(self):\n a_rt = Runtime.objects.get(uuid=self.valid_uuid)\n a_rt.delete()\n assert(Runtime.objects.all(), [])", "title": "" }, { "docid": "65e749004d89a18fbf0f2829fc72e798", "score": "0.56984633", "text": "def test_case_5(self):\n\n accounts_maint.AccountsMaintenance.navigate_to()\n am = accounts_maint.AccountsMaintenance()\n \n if not am.delete(self.account_ids):\n tc_fail(\"Failed to delete accounts\")\n \n mws.recover()", "title": "" }, { "docid": "d585c3cd85dc5d6b09c0c6b831b193ea", "score": "0.569638", "text": "def test_delete_table_1():\n table_name = \"table_1\"\n to_be_deleted = DB._db[table_name]\n result_1 = DB.delete_table(\n table_name=table_name\n )\n\n assert(\n result_1 == to_be_deleted and\n table_name not in DB._db[\"__schemas__\"] and\n table_name not in DB._db\n )", "title": "" }, { "docid": "a16face1074b7c5046e013253059358d", "score": "0.56933856", "text": "def delete_old():\r\n # Conncect to DB\r\n connection = connect()\r\n try:\r\n logger.info(\"[O] Suppression des donnees datant de plus d'1 semaine\")\r\n # Initializations\r\n cursor = connection.cursor(buffered=True)\r\n request = 'DELETE FROM historique WHERE TIMEDIFF(CURRENT_TIMESTAMP, current) > \"07:00:00\"'\r\n # Execute request\r\n cursor.execute(request)\r\n connection.commit()\r\n logger.info(\"[O] Succes de la requete, {} ligne(s) effacee\".format(cursor.rowcount))\r\n # Handle error\r\n except Exception as error :\r\n logger.error(\"[X] Echec de la requete : {}\".format(error))\r\n finally:\r\n # Closing database connection.\r\n if(connection.is_connected()):\r\n connection.close()\r\n print(\"Connectionclosed\")\r\n\r\n return cursor.rowcount", "title": "" }, { "docid": "c770e47fe57583b9dbbf8bac77ba9c89", "score": "0.56905067", "text": "def test_delete_notificant(self):\n pass", "title": "" }, { "docid": "2f179039897cdcb153be2707165a2e9d", "score": "0.5685597", "text": "def delete_log_in_db():\n lib_cm.message_write_to_console(ac, u\"delete_log_in_db\")\n\n #n_days_back = int( db.ac_config_1[4] )\n date_log_back = (datetime.datetime.now()\n + datetime.timedelta(days=- int(db.ac_config_1[4])))\n c_date_log_back = date_log_back.strftime(\"%Y-%m-%d %H:%M\")\n\n ACTION = (\"DELETE FROM USER_LOGS WHERE USER_LOG_TIME <= '\"\n + c_date_log_back + \"' ROWS 4000\")\n\n db.dbase_connect(ac)\n if db.db_con is None:\n err_message = u\"No connect to db for delete_log_in_db:\"\n lib_cm.error_write_to_file(ac, err_message)\n return None\n\n try:\n db_cur = db.db_con.cursor()\n db_cur.execute(ACTION)\n db.db_con.commit()\n db.db_con.close()\n log_message = (u\"Loeschen der Action- \"\n \"und Errorlogs in DB-Tabelle die aelter sind als: \"\n + c_date_log_back)\n db.write_log_to_db(ac, log_message, \"e\")\n except Exception, e:\n lib_cm.message_write_to_console(ac, log_message +\n u\"Error 2 delete_log_in_db: %s</p>\" % str(e))\n err_message = log_message + u\"Error 2 delete_log_in_db: %s\" % str(e)\n lib_cm.error_write_to_file(ac, err_message)\n db.db_con.rollback()\n db.db_con.close()\n return None\n return \"ok\"", "title": "" }, { "docid": "cccf4ecdec9518e16b213a3b5906faaa", "score": "0.56803375", "text": "def delete_data():\n conn = sqlite3.connect('reminders.db')\n cur = conn.cursor()\n cur.execute('DELETE FROM reminder')\n conn.commit()\n conn.close()", "title": "" }, { "docid": "0f09f008b105d9bbed5e6a96a0bfd7aa", "score": "0.5677477", "text": "def delete(self,sqlobject,table):\n return True", "title": "" }, { "docid": "3b218cc2037cf0ca33c4161a0df126f1", "score": "0.5667701", "text": "def test_tasks_delete(self):\n pass", "title": "" }, { "docid": "5515a3a53d372135c0e81a0b571e3be2", "score": "0.5665403", "text": "def test_would_delete_schedule(self,\n app_setup_to_add_job\n ):\n schedule = Models.Schedule(\n schedule_type=Constants.SCHEDULE_HOURLY,\n start_time=self.start_time_beautifully,\n **self.active_schedule_skeleton\n )\n db.session.add(schedule)\n db.session.commit()\n\n no_active_schedules = Models.Schedule.query.filter(\n Models.Schedule.is_active\n ).count()\n assert no_active_schedules == 1\n\n schedule.is_deleted = True\n db.session.commit()\n assert schedule.is_deleted\n assert schedule._is_active\n assert not schedule.is_active\n\n no_active_schedules = Models.Schedule.query.filter(\n Models.Schedule.is_active\n ).count()\n assert no_active_schedules == 0\n\n no_schedules = Models.Schedule.query.count()\n assert no_schedules == 1", "title": "" }, { "docid": "1887357f59f6033e1f8f29d16ccbe001", "score": "0.56652975", "text": "def test_cli_table_delete(python_path):\n\n _ = _run_command(\n f\"{python_path} -m cli table delete --mode=staging {DATASET_ID} {TABLE_ID}\",\n output=\"out\",\n )\n\n table = bd.Table(DATASET_ID, TABLE_ID)\n\n assert not _table_exists(table, mode=\"staging\")", "title": "" }, { "docid": "5ba2e00bf3ce1710378d1419144b919f", "score": "0.566376", "text": "def delete_ticket_hours(self, tid):\n execute_non_query(self.env, \"\"\"\n DELETE FROM ticket_time WHERE ticket=%s\"\"\", tid)", "title": "" }, { "docid": "346fdf04268cd6d3348c630b40d5296e", "score": "0.56621265", "text": "def test_unassign_dashboard_from_customer_using_delete(self):\n pass", "title": "" }, { "docid": "7881e074b47bc8d917f0ce7d23440ea6", "score": "0.5662039", "text": "def delete_appointment(self):\n self.delete()", "title": "" }, { "docid": "5e6f3caf6be6d77755777af1a6fe3245", "score": "0.5651613", "text": "def test_delete_charging_log_using_delete(self):\n pass", "title": "" }, { "docid": "9868f0ef0f94aaa615afe6cc796a03de", "score": "0.5648196", "text": "def delete(self):\r\n pass", "title": "" }, { "docid": "e60e2a004554a5d4d3aad7b1c98806b7", "score": "0.5646205", "text": "def test_delete6(self):\n pass", "title": "" }, { "docid": "9ece40030a64832c7f582a6fa08bf334", "score": "0.5645428", "text": "def delete_task(conn, id):\n sql = 'DELETE FROM temp_attendance WHERE ID=?'\n cur = conn.cursor()\n cur.execute(sql, (id,))\n conn.commit()", "title": "" }, { "docid": "a4930ef08af6088853a1c6b956394cdc", "score": "0.5643186", "text": "def case_delete_records(self, refresh_db_before):\n new_name = \"admin1\"\n\n AdminOp.add(new_name)\n\n adm_obj = AdminOp.get(name=new_name)\n self.assertTrue(len(adm_obj) is 1)\n self.assertEqual(adm_obj[0].name, new_name)\n\n AdminOp.delete(adm_obj[0])\n\n adm_obj = AdminOp.get(name=new_name)\n self.assertFalse(adm_obj)", "title": "" }, { "docid": "84881611ea8cef2730d7fb575e15d542", "score": "0.56427807", "text": "def delete(self, request):\n\n data = request.data\n exam_id = data[\"exam_id\"]\n LOGGER.info(\"exam id:%d\", exam_id)\n exam = Exam.objects.get(id=exam_id)\n try:\n #ExamCollegeMap.objects.filter(exam=exam_id).delete()\n exam.delete()\n LOGGER.info(\"exam deleted successfully\")\n return Response({\"status\": \"SUCCESS\", \"message\": \"Exam deleted successfully\"})\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": \"Failed to delete exam\"})", "title": "" }, { "docid": "7a20dac7db871d80c35a7e3a68e78695", "score": "0.5640759", "text": "def test_questionDeletion(self):\n # Create a temporary question\n login(self.layer['portal'], MANAGER_ID)\n self.layer['portal']['dept1']['tut1']['lec1'].invokeFactory(\n type_name=\"tw_latexquestion\",\n id=\"qntmp\",\n title=\"Unittest D1 T1 L1 QTmp\",\n )\n self.notifyModify(self.layer['portal']['dept1']['tut1']['lec1'])\n transaction.commit()\n\n # Allocate to user A, should get questions\n aAlloc = self.getJson('http://nohost/plone/dept1/tut1/lec1/@@quizdb-sync', user=USER_A_ID)\n self.assertTrue(getAllQuestionPath(aAlloc['questions']) in aAlloc['question_uri'])\n self.assertEqual(\n sorted([q['title'] for q in self.getJson(aAlloc['question_uri']).values()]),\n [u'Unittest D1 T1 L1 Q1', u'Unittest D1 T1 L1 Q2', u'Unittest D1 T1 L1 QTmp'],\n )\n\n # Delete QTmp\n browser = self.getBrowser('http://nohost/plone/dept1/tut1/lec1/qntmp/delete_confirmation', user=MANAGER_ID)\n browser.getControl('Delete').click()\n\n # Qntmp goes, as question data can't be got\n self.assertEqual(\n sorted([q['title'] for q in self.getJson(aAlloc['question_uri']).values()]),\n [u'Unittest D1 T1 L1 Q1', u'Unittest D1 T1 L1 Q2'],\n )\n\n # After sync, qntmp still gone\n aAlloc = self.getJson('http://nohost/plone/dept1/tut1/lec1/@@quizdb-sync', user=USER_A_ID)\n self.assertTrue(getAllQuestionPath(aAlloc['questions']) in aAlloc['question_uri'])\n self.assertEqual(\n sorted([q['title'] for q in self.getJson(aAlloc['question_uri']).values()]),\n [u'Unittest D1 T1 L1 Q1', u'Unittest D1 T1 L1 Q2'],\n )", "title": "" }, { "docid": "c22eb195ae0f2bed837f7f299cc62729", "score": "0.5633643", "text": "def test_delete_answer(self):\n pass", "title": "" }, { "docid": "592c188c5f5727575034f523a4c9198f", "score": "0.56314445", "text": "def test_destroy(self):\n create_table_feet_leg(self.user)\n self.data[\"description\"] = \"from partial update test case\"\n url = reverse(\"table:table_viewset-detail\", kwargs={'pk': 'steel'})\n res = self.client.delete(url)\n table = Table.objects.all()\n serializer = table_serializer.TableSerializer(table)\n expected_data = {\"message\": \"'steel' deleted successfully\", \"status\": \"success\"}\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(res.data, expected_data)", "title": "" }, { "docid": "506f1ca3832b0a414a59893e593d2214", "score": "0.56306815", "text": "def cleanup(self, pk):\n\n Confirmation.objects.expired().delete()", "title": "" }, { "docid": "de902a15e1aed2e63d89d78e5dfcd3ae", "score": "0.5627253", "text": "def checkDeleted(self) -> None:\n ...", "title": "" }, { "docid": "de902a15e1aed2e63d89d78e5dfcd3ae", "score": "0.5627253", "text": "def checkDeleted(self) -> None:\n ...", "title": "" }, { "docid": "7b65ba7432c903197368e4212a96278d", "score": "0.56237686", "text": "def delete_standoff(standoff_id):\n c.execute(\"DELETE FROM 'standoff' WHERE id =?\",(standoff_id,))\n conn.commit()", "title": "" }, { "docid": "fdd6317c316001ae16d0485ffb7fad6d", "score": "0.5621159", "text": "def test_delete_checker(self):\n pass", "title": "" }, { "docid": "3955edbaba36b849622f4015767413ef", "score": "0.56152266", "text": "def test_api_delete_admin(self):\n self.client.login(email=self.adminUN, password='pass')\n id = AdmissionsRate.objects.all()[0].id\n url = self.url + str(id) + \".json\"\n response = self.client.delete(url)\n self.assertEqual(response.status_code, 204)", "title": "" }, { "docid": "1e83ba44a440f4398fdbaac8bf2229d9", "score": "0.56120586", "text": "def delete(self) -> None:\n ...", "title": "" }, { "docid": "5bfcc3718782fe9f4dead00e355a73b4", "score": "0.5607251", "text": "def delete_day_old_schedules():\n from .services import remove_inactive_schedules\n print(\"Removing inactive schedules...\")\n remove_inactive_schedules()\n print(\"Done!\")", "title": "" }, { "docid": "9ae282cc8c14c895a4a74cac9f581940", "score": "0.5604484", "text": "def test_admin_delete_instructor_notification_112520(self):\n self.ps.test_updates['name'] = 't1.57.007' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = ['t1', 't1.57', 't1.57.007', '112520']\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n self.admin.wait.until(\n expect.visibility_of_element_located(\n (By.PARTIAL_LINK_TEXT, 'System Setting')\n )\n ).click()\n self.admin.wait.until(\n expect.visibility_of_element_located(\n (By.PARTIAL_LINK_TEXT, 'Notifications')\n )\n ).click()\n self.admin.sleep(5)\n\n self.admin.find(By.XPATH, \"//div[3]//input[@id='message']\").send_keys(\n 'automated test')\n\n self.admin.driver.find_elements_by_xpath(\n \"//input[@class='btn btn-default']\")[1].click()\n\n self.admin.sleep(5)\n\n notif = self.admin.driver.find_elements_by_xpath(\n \"//div[@class='col-xs-12']\")\n\n for index, n in enumerate(notif):\n if n.text.find('automated test') >= 0:\n self.admin.driver.find_elements_by_xpath(\n \"//a[@class='btn btn-warning']\")[index].click()\n self.admin.driver.switch_to_alert().accept()\n self.ps.test_updates['passed'] = True\n break\n\n self.admin.sleep(5)\n\n deleted = True\n\n notif = self.admin.driver.find_elements_by_xpath(\n \"//div[@class='col-xs-12']\")\n\n for n in notif:\n if n.text.find('automated test') >= 0:\n deleted = False\n break\n\n assert(deleted), 'notification not deleted'\n\n self.ps.test_updates['passed'] = True", "title": "" }, { "docid": "323da42d434e323f0822bf464db20c5d", "score": "0.5598231", "text": "def remove_already_running(ctx, force: bool):\n db_overview = ctx.obj[\"dboverview\"]\n print(db_overview.delete_last_row(force))", "title": "" }, { "docid": "7bb7ed51a8b1207ec9764390ba2f038c", "score": "0.55924964", "text": "def test_unassign_dashboard_from_public_customer_using_delete(self):\n pass", "title": "" }, { "docid": "9e73030976f85c6f96187a6a6fb184cc", "score": "0.55910367", "text": "def __del_table(self, table):\n self.client.disableTable(table)\n self.client.deleteTable(table)", "title": "" }, { "docid": "09a99c5ebb441bb1fe49d7dc6cc9e022", "score": "0.55790126", "text": "def _delete_ts(self, op):\n self.server.db.delete_ts(op['pk'])\n self._run_trigger('delete_ts', [op['pk']])\n return TSDBOp_Return(TSDBStatus.OK, op['op'])", "title": "" }, { "docid": "c26abaeaf4b5bd1f17278e751deb2af1", "score": "0.55774456", "text": "def test_delete_tpc_afterpulse1t_item(self):\n pass", "title": "" }, { "docid": "449d0f89b0c10648f6f3816a0bf1de84", "score": "0.5577333", "text": "def taken_opschonen(stdout):\n\n now = timezone.now()\n oud = now - datetime.timedelta(days=92)\n\n aantal = 0\n for obj in (Taak\n .objects\n .filter(is_afgerond=True,\n deadline__lt=oud)):\n aantal += 1\n obj.delete()\n # for\n\n if aantal > 0:\n stdout.write('[INFO] Aantal oude afgehandelde taken verwijderd: %s' % aantal)", "title": "" }, { "docid": "957533255171a5f3b9f8dadfc95ab59b", "score": "0.55746585", "text": "def delete(self):\n if not self.delete_date:\n self.delete_date = func.current_timestamp()\n return True\n return False", "title": "" }, { "docid": "5bd36f9832c2aa13c3495c0a5fc0cb6b", "score": "0.5569345", "text": "def test_delete_experiment_v1(self):\n pass", "title": "" }, { "docid": "f36dae97a3864ddf3bd5b11882f047ed", "score": "0.5565221", "text": "def test_delete(self):\n pass", "title": "" }, { "docid": "f36dae97a3864ddf3bd5b11882f047ed", "score": "0.5565221", "text": "def test_delete(self):\n pass", "title": "" }, { "docid": "f36dae97a3864ddf3bd5b11882f047ed", "score": "0.5565221", "text": "def test_delete(self):\n pass", "title": "" }, { "docid": "d64994df1dc6dfbf1a62e3bbc9ef519b", "score": "0.5564725", "text": "def aboutToDelete(self):\n \n pass", "title": "" }, { "docid": "1559d17a618ddee56cc4e8386454e9d7", "score": "0.5561951", "text": "def delete(self):\n self.update(deleted_at=timezone.now())", "title": "" }, { "docid": "be6232e14b9d00149c5c32e2074cfd7c", "score": "0.555467", "text": "def delete_from_db(self):\n pass", "title": "" }, { "docid": "6ef680c1e23841569cb7f864c651e244", "score": "0.55521876", "text": "def test_ts_delete(self):\n\n # Fake answer definition\n httpretty.register_uri(\n httpretty.DELETE,\n '%s/ts/tsuid' % ROOT_URL,\n body='Deletion of timeseries tsuid OK',\n status=204\n )\n\n IkatsApi.ts.delete('tsuid')", "title": "" }, { "docid": "7802e331993a823c94be3c49651ea321", "score": "0.55488485", "text": "def delete(self):\n self.set_dayu_delete(True)\n return self", "title": "" }, { "docid": "73b04e63293c537ebf46f3513a1d08b6", "score": "0.5545489", "text": "def test_failed_permanent_deletion(self):\n self._make_submissions()\n formid = self.xform.pk\n dataid = self.xform.instances.all().order_by(\"id\")[0].pk\n view = DataViewSet.as_view({\"delete\": \"destroy\"})\n\n request = self.factory.delete(\n \"/\", **self.extra, data={\"permanent_delete\": True}\n )\n response = view(request, pk=formid, dataid=dataid)\n self.assertEqual(response.status_code, 400)\n error_msg = \"Permanent submission deletion is not enabled for this server.\"\n self.assertEqual(response.data, {\"error\": error_msg})", "title": "" }, { "docid": "4b11324520205eabb46377158a156649", "score": "0.5536886", "text": "def test_on_delete_constraints(self):\n tz = TimeZone.objects.create(name='Europe/Helsinki')\n tz.full_clean()\n s = Schedule.objects.create(name='s', timezone=tz,\n matrix=json.dumps(7 * [48 * [True]]))\n s.full_clean()\n prv = Provider.objects.create(name='My Prov', type=Provider.TYPE_DUMMY)\n prv.full_clean()\n prj = Project.objects.create(name='Prj', email='a@b.com')\n prj.full_clean()\n vm = VM.objects.create(provider=prv, project=prj, schedule=s)\n vm.full_clean()\n\n pl = PowerLog.objects.create(vm=vm, powered_on=False)\n pl.full_clean()\n pl_id = pl.id\n del pl\n vm.delete()\n\n with self.assertRaises(PowerLog.DoesNotExist):\n PowerLog.objects.get(id=pl_id)", "title": "" }, { "docid": "ac05e954c4598ea4a7d7544766c5c37e", "score": "0.55357146", "text": "def delete(self):\n\n if self.oracle:\n self.time += self.oracle.time_accum() # keep SAT solving time\n\n self.oracle.delete()\n self.oracle = None", "title": "" }, { "docid": "4907309d8cd8bdddd2ebe47056f2e105", "score": "0.5529697", "text": "def test_client_risk_assessment_delete(self):\n pass", "title": "" }, { "docid": "da3f38dfa1ac0197b5146d1587e04d34", "score": "0.55287385", "text": "def test_delete_check_plan_result(self):\n pass", "title": "" }, { "docid": "0956ae1cc6cfb0f26ca1cc06f9f5fce4", "score": "0.5526524", "text": "def test_helm_sections_delete(self):\n pass", "title": "" }, { "docid": "d5578268d6c07879c42c57e0050dfa89", "score": "0.5524918", "text": "def delete(self):\n pass", "title": "" }, { "docid": "d5578268d6c07879c42c57e0050dfa89", "score": "0.5524918", "text": "def delete(self):\n pass", "title": "" }, { "docid": "d5578268d6c07879c42c57e0050dfa89", "score": "0.5524918", "text": "def delete(self):\n pass", "title": "" }, { "docid": "d5578268d6c07879c42c57e0050dfa89", "score": "0.5524918", "text": "def delete(self):\n pass", "title": "" }, { "docid": "d5578268d6c07879c42c57e0050dfa89", "score": "0.5524918", "text": "def delete(self):\n pass", "title": "" }, { "docid": "d5578268d6c07879c42c57e0050dfa89", "score": "0.5524918", "text": "def delete(self):\n pass", "title": "" }, { "docid": "3da3eae6014e696d7c93c9162643fada", "score": "0.5518105", "text": "def test_delete_event_alert_condition(self):\n pass", "title": "" } ]
fa08f5b6263b151bdf2725af3507e987
clearall(global()) suprime les variables locales dans la console
[ { "docid": "f4b5c8c1a565a91e5a21fe1391e73ba0", "score": "0.68569124", "text": "def clearall(local_global_var):\n\tall = [var for var in local_global_var if var[0] != \"_\"]\n\tfor var in all:\n\t\tprint(var)\n\t\tdel local_global_var[var]", "title": "" } ]
[ { "docid": "7260233c4631e8ea5925bda2da5e0a27", "score": "0.6935533", "text": "def unload_all_languages():\n lingua_franca._set_active_langs([])", "title": "" }, { "docid": "694f8068527d028e3c48d1d2161f838b", "score": "0.6595749", "text": "def clear_all():\n gl = globals().copy()\n for var in gl:\n if var[0] == '_': continue\n if 'func' in str(globals()[var]): continue\n if 'module' in str(globals()[var]): continue\n\n del globals()[var]", "title": "" }, { "docid": "3000114a604a3bcb35de822501c4835d", "score": "0.6068267", "text": "def clear_vars_packages() -> None:\n\n for name in dir():\n if not name.startswith('_'):\n del globals()[name]", "title": "" }, { "docid": "f1744fb5f8b4a2ec202e1008091d8816", "score": "0.59879494", "text": "def clear_all():\n\n frame = inspect.currentframe().f_back\n try:\n if frame.f_globals.get('variables_order'):\n # we should avoid to declare symbols twice !\n del frame.f_globals['variables_order']\n if frame.f_globals.get('parameters_order'):\n # we should avoid to declare symbols twice !\n del frame.f_globals['parameters_order']\n finally:\n del frame", "title": "" }, { "docid": "11cc7cff1d08f4bdf7f8dde2be27b0bb", "score": "0.5951799", "text": "def clear():\n # Local variables\n\n # ******** start clear() ******** #\n\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")", "title": "" }, { "docid": "9d2f5eeff22f340f06d17329db60ad5e", "score": "0.58672976", "text": "def borrar_pantalla():\n # para windows\n if os.name == 'nt':\n _ = os.system('cls')\n # para mac y linux (os.name es 'posix')\n else:\n _ = os.system('clear')", "title": "" }, { "docid": "021be25bd42d73a3f5a2fca37a9b29dd", "score": "0.58508843", "text": "def clear_all(self):\n self._scope.WriteString('vbc app.Measure.ClearAll', 1)", "title": "" }, { "docid": "26932448c1e334cfec3fece3a07ac59a", "score": "0.5847412", "text": "def localization():\n \n\n print(\"[+] Locale: LANG={} CE={}\".format(getlocale()[0], getlocale()[1]))\n print(\"[+] Time: {}\".format(asctime()))\n print(\"[+] Timezone: Normal:{} DST:{}\".format(tzname[0], tzname[1]))", "title": "" }, { "docid": "422372a8f1dd26a8fdfc97761e16820f", "score": "0.580411", "text": "def limpiar_pantalla ():\n if platform.system() == \"Linux\":\n os.system(\"clear\") #NUNCA ENTRA EN LA EXCEPCION\n else:\n os.system(\"cls\")", "title": "" }, { "docid": "1266a894f85cb3c8403ee7fd00652087", "score": "0.5800706", "text": "def ClearConsole():\n\n if \"win\" in sys.platform.lower():\n # for windows\n os.system(\"cls\")\n elif \"linux\" in sys.platform.lower():\n # for linux\n os.system(\"clear\")", "title": "" }, { "docid": "e949f8385e1c30690ff6f1478135467d", "score": "0.57858586", "text": "def clear_console():\n clear = lambda: os.system('cls')\n clear()", "title": "" }, { "docid": "cebaa9417374176f94d7b982032c881e", "score": "0.57735837", "text": "def clear():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")", "title": "" }, { "docid": "cebaa9417374176f94d7b982032c881e", "score": "0.57735837", "text": "def clear():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")", "title": "" }, { "docid": "cb4a99e41aa0f9a926a8e86e1295b2c5", "score": "0.57630855", "text": "def clear():\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "810ecbb9c5628143a91bc614cc0db24f", "score": "0.5729844", "text": "def clear():\n if os.name == \"posix\":\n os.system(\"reset\")\n elif os.name == (\"nt\"):\n os.system(\"cls\")", "title": "" }, { "docid": "0b5e376fb1ca5e420cf94c92af175110", "score": "0.5720399", "text": "def clear():\n kwargs = {}\n if platform.system() == \"Windows\":\n actual_clear = windows_cmd_builtin(\"cls\")\n else:\n actual_clear = which(\"clear\", log_calls=False)\n # TERM not always set on CI machines, but we want coverage. THIS IS \"DANGEROUS\"\n # since we're convincing `clear` it can do things it probably shouldn't ;)\n if CI_EXEC_DEMOS_COVERAGE:\n term = os.getenv(\"TERM\", False)\n if not term:\n kwargs[\"env\"] = {\"TERM\": \"xterm\"}\n\n actual_clear(**kwargs)", "title": "" }, { "docid": "878c008584cfbd7e1033de07c68994e8", "score": "0.5709897", "text": "def reset_globals(self):\n\n for global_id, value in self._global_inits.iteritems():\n self._globals[global_id] = value", "title": "" }, { "docid": "413de7d2dee7e638015f14af6a175038", "score": "0.570297", "text": "def clear() -> None:\n name = re.sub(r\"[^\\w]+\", \"\", platform.system()).lower()\n if \"windows\" in name:\n os.system(\"cls\")\n else:\n os.system(\"clear\")", "title": "" }, { "docid": "9143463789d0c0af359b6a9bd0d24aed", "score": "0.568067", "text": "def clear_default_variables(self):\n self._default_variables.clear()", "title": "" }, { "docid": "19881841da0a3307b18a1fd65d08c172", "score": "0.5671368", "text": "def reset_all():\n func()", "title": "" }, { "docid": "139c68bbcaf900facc2fd07f6981e70b", "score": "0.56596965", "text": "def clearConsole() :\n\n try :\n os.system('cls') #clears console on Windows\n\n except :\n os.system('clear') #clears console on Linux", "title": "" }, { "docid": "5f7e58613ebb1a738224c6e3f41c8a77", "score": "0.56409377", "text": "def clear():\n\n if os.name == 'nt':\n _ = os.system('cls')\n\n else:\n _ = os.system('clear')", "title": "" }, { "docid": "04ff074444fbe5bfd4790996bf60500d", "score": "0.5617814", "text": "def clear_console():\n os.system('clear' if os.name == 'posix' else 'cls')", "title": "" }, { "docid": "8ce15df631d3940917e2ed8b3406b5bd", "score": "0.5609005", "text": "def clear_console():\n os.system('cls' if os.name=='nt' else 'clear')", "title": "" }, { "docid": "ee107090ddbabf6a0b02922af47aa468", "score": "0.5583999", "text": "def all(self):\n os.system(\"clear\")\n print \"COUNTRIES AND CAPITALS\"\n for i in self.country: #list of capitals\n print \"\\n-\", i, \"-\", self.country[i] #prints a dash and the capitals\n raw_input(\"\\nPress enter to continue\")\n os.system(\"clear\")\n self.menus()", "title": "" }, { "docid": "cfb7b25cf9641a5c07c0471037de3692", "score": "0.5543426", "text": "def resetlocale(category=LC_ALL):\r\n _setlocale(category, _build_localename(getdefaultlocale()))", "title": "" }, { "docid": "38e46ea73381e73e3ac9fadea6fb40d2", "score": "0.5522336", "text": "def screen_clear():\n # for mac and linux(here, os.name is 'posix')\n if os.name == 'posix':\n _ = os.system('clear')\n else:\n # for windows platfrom\n _ = os.system('cls')", "title": "" }, { "docid": "8dcc683632395565a5c32e52e844f49d", "score": "0.54922974", "text": "def reset():\n Context.all_top_level_contexts.clear()\n global before_once_functions\n before_once_functions.clear()\n global before_once_executed\n before_once_executed = False", "title": "" }, { "docid": "c65a5762e6a0b24a33dc8a97630d491b", "score": "0.54344535", "text": "def clear():", "title": "" }, { "docid": "6c4d4da43bb7a201841a77c1e8518dfc", "score": "0.54326254", "text": "def clearAll(self) -> None:\n ...", "title": "" }, { "docid": "de70992b3bf112d4d944b79611504064", "score": "0.54170334", "text": "def __clear_console(self):\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "d06d47db4db53a8d9a0c394f7759e422", "score": "0.5413276", "text": "def screen_clear(self):\n if sys.platform == \"win32\":\n os.system(\"cls\")\n elif sys.platform == \"linux\":\n os.system(\"clear\")", "title": "" }, { "docid": "cd09cbe3bd218914995a30c770c854ed", "score": "0.53707135", "text": "def clear():\n # the following write a pair of ANSI escape sequences to clear the screen!!\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()", "title": "" }, { "docid": "eb853b0db7ebf2942e8fe804b27e2fa9", "score": "0.5351689", "text": "def clear(self):\n print(self.t.home + self.t.clear)", "title": "" }, { "docid": "6e720f396d9e8e2db7e65daf1cdcc02c", "score": "0.53403527", "text": "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "title": "" }, { "docid": "1f4d7b54e4d60ccaddb617121ac3ac09", "score": "0.5331332", "text": "def clear(self):\n os.system('cls' if os.name =='nt' else 'clear')", "title": "" }, { "docid": "e0ea842bd13c18ef21db634150a5083a", "score": "0.5321604", "text": "def clear_everything(context):\n commands = [\n \"View: Revert and Close Editor\",\n \"Terminal: Kill the Active Terminal Instance\",\n \"Debug: Remove All Breakpoints\",\n \"File: Clear Recently Opened\",\n \"Clear Editor History\",\n \"Clear Command History\",\n \"View: Close All Editors\",\n \"View: Close Panel\",\n \"Notifications: Clear All Notifications\",\n ]\n for command in commands:\n quick_open.select_command(context, command)", "title": "" }, { "docid": "3924c886c6a581377faf3b9c49b90da9", "score": "0.529773", "text": "def action_clear_all(self):\n self.action_clear()\n self.py_lines.append(\"mem = [0 for i in range(8)]\")", "title": "" }, { "docid": "b05e01f6f6187579cea7ec60809d95a7", "score": "0.5289042", "text": "def clear(cls):\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "4611b24b84bfa471ae8d7af1e8d48589", "score": "0.5267777", "text": "def clean_pali(c):\n clean_all_lang_db(LANG_PALI)", "title": "" }, { "docid": "e47b85fd3452705a26d69ba4ad4f71a3", "score": "0.52597576", "text": "async def locale(self, ctx):\n pass", "title": "" }, { "docid": "bf2dbd1df799b3e588470609f105acbc", "score": "0.5238289", "text": "def clean(self):\n os.system (\"clear\")", "title": "" }, { "docid": "96156da72af9ca0027dd7baa94250fda", "score": "0.52317137", "text": "def clear_screen():\r\n if not DEBUG:\r\n if running_on_windows:\r\n os.system(\"cls\") # Clear screen\r\n else:\r\n os.system(\"clear\") # Clear screen\r", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "1eab7ca7ed69b510ac76675c54904a45", "score": "0.523082", "text": "def clear_calendar_terms(self):\n pass", "title": "" }, { "docid": "658c7d2e6bd705da6dd5652a7e467ee6", "score": "0.5227262", "text": "def _reset():\n global __global_logger, __loggers\n __global_logger = None\n __loggers = {}", "title": "" }, { "docid": "5d4d41065538d51d9ab1c66d42550c15", "score": "0.5221155", "text": "async def cvar_deleteall(self, ctx):\n char: Character = await Character.from_ctx(ctx)\n\n await ctx.send(f\"This will delete **ALL** of your character variables for {char.name}. \"\n \"Are you *absolutely sure* you want to continue?\\n\"\n \"Type `Yes, I am sure` to confirm.\")\n try:\n reply = await self.bot.wait_for('message', timeout=30, check=auth_and_chan(ctx))\n except asyncio.TimeoutError:\n reply = None\n if (not reply) or (not reply.content == \"Yes, I am sure\"):\n return await ctx.send(\"Unconfirmed. Aborting.\")\n\n char.cvars = {}\n\n await char.commit(ctx)\n return await ctx.send(f\"OK. I have deleted all of {char.name}'s cvars.\")", "title": "" }, { "docid": "59fee5f6af626438f7ee0881613ace1a", "score": "0.52055055", "text": "def clear_all(self):\n self._clear_all()", "title": "" }, { "docid": "e0a1fdeecc65e8b6f0ac88c36cc7566f", "score": "0.5205316", "text": "def clear_weekday_terms(self):\n pass", "title": "" }, { "docid": "695a0ac29cc24764203ce2386fef2993", "score": "0.52010745", "text": "def clear():\n subprocess.call(['clear'])\n return True", "title": "" }, { "docid": "dd1bbf19d0c7627ee597e9f37e614814", "score": "0.5200558", "text": "def clear(self, *args):\n\n self.shell.log.clear()", "title": "" }, { "docid": "8b08df58c31f71fc2059b5966a4860b7", "score": "0.5199647", "text": "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "8b08df58c31f71fc2059b5966a4860b7", "score": "0.5199647", "text": "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "8b08df58c31f71fc2059b5966a4860b7", "score": "0.5199647", "text": "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "8b08df58c31f71fc2059b5966a4860b7", "score": "0.5199647", "text": "def clear_screen():\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "96208f26549c6daeb8cf1a004751cffc", "score": "0.5196485", "text": "def clearScreen():\n\tos.system(\"clear\")", "title": "" }, { "docid": "dce6641c050c94bc324098eae38a7e1d", "score": "0.5187059", "text": "def clear_global_line_profiler():\n global_line_profiler.clear()", "title": "" }, { "docid": "e68a2412612ba1c01ef7e82f9fd444e4", "score": "0.51847744", "text": "def reset_all_counters():\n global _counter_state\n _counter_state = {}", "title": "" }, { "docid": "8ca526f2f089a52ee956b30fad5ac0f5", "score": "0.5180029", "text": "def clear_screen():\n os.system(\"clear\")\n return", "title": "" }, { "docid": "7e4ddc5d4ace8f9a29cae430cabe3e69", "score": "0.517827", "text": "def clear_screen():\n\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "c865d72884cdf47bd24f501c1b707071", "score": "0.5177745", "text": "def clear_screen():\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')", "title": "" }, { "docid": "5936b6de75d79405ef7097eccb296a71", "score": "0.5177116", "text": "def screen_cleaner():\n # for windows platforms\n if name == 'nt':\n _ = system('cls')\n\n # for mac and linux(here, os.name is 'posix') platforms\n else:\n _ = system('clear')", "title": "" }, { "docid": "c121f8b63ce0cf6aacb03643f4bb23db", "score": "0.5164841", "text": "def clear_all(self) -> None:\n self.clear_context()\n self.clear_tracing()", "title": "" }, { "docid": "ccc504a6c3c115ab2869a8ae18a0f280", "score": "0.5160614", "text": "def __reset():\n\n global _COMMANDS\n\n _COMMANDS = {}\n\n arguments.reset_parser()", "title": "" }, { "docid": "7d30990f61c08449f055697d26239f8c", "score": "0.51430756", "text": "def ClearAvailableFontNames():", "title": "" }, { "docid": "0ccb9dac71c7ee21ad401226b0cef5b0", "score": "0.51368755", "text": "def clear(self):\n self._desc = {}\n for key, value in merge.DEFAULT_PROJECT.items():\n if key not in self._HIDDEN:\n self._desc[key] = type(value)()", "title": "" }, { "docid": "39c8695f82f36ada0807a659aed878bc", "score": "0.5135754", "text": "def countries(self):\n os.system(\"clear\")\n print \"COUNTRIES\"\n for i in self.country: #lists the countries\n print \"-\", i #prints a star and the countries\n raw_input(\"\\nPress enter to continue\")\n os.system(\"clear\")\n self.menus()", "title": "" }, { "docid": "39d4ab0238b4316cf690839f62558347", "score": "0.5124635", "text": "def clear_all(self):\n self.clear_diagnostics()\n self.temp_sensor_problem = None\n self.unit_status = []\n self.oaf_condition = []\n self.sensor_limit = []\n self.sensor_limit_msg = \"\"\n self.timestamp_array = []", "title": "" }, { "docid": "65a8b96636fb37e3f9a36469ae24b940", "score": "0.5122941", "text": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "05fd317c74d879cd6adea54cc5c940b6", "score": "0.5122685", "text": "def _clear_all(self):\n try:\n self._widgets['TabControl'].forget(self._widgets['AdvancedTab'])\n self._widgets['AdvancedTab'].destroy()\n self._widgets['TabControl'].forget(self._widgets['ResultsTab'])\n self._widgets['ResultsTab'].destroy()\n self._widgets['SolvePerc'].config(text='SOLVED:\\n 0%')\n self._rectangles = []\n self._advanced_properties = {}\n except TclError:\n pass\n return", "title": "" }, { "docid": "ede5f1fefd358e18a31727fcb07ce141", "score": "0.512231", "text": "def efface_ecran():\n\tif sys.platform.startswith(\"win\"):\n\t\t# Si systeme windows\n\t\tos.system(\"cls\")\n\telse:\n\t\t# Si system unix ou OS x\n\t\tos.system(\"clear\")", "title": "" }, { "docid": "381254fcf9648c7bb6a7f027f03ad7ef", "score": "0.5116768", "text": "def clearAll(self, e):\n self.fr.publish(KeyEvent(\"Clear\"))", "title": "" }, { "docid": "b3db6eae7baf94c95cefcc1f19a3b3cc", "score": "0.5116681", "text": "def clearPersonalDictionary(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "52758dc120a738b2df6d3218d25abd37", "score": "0.51160693", "text": "def clearAll(self):\r\n self.txt_subcode.setText('')\r\n self.txt_subname.setText('')", "title": "" }, { "docid": "39e7ad9f45beb369cc19fc1737666960", "score": "0.5106593", "text": "def clear_screen() -> None:\n os.system('cls' if os.name == 'nt' else 'clear')", "title": "" }, { "docid": "5aa58cef07f6aecb0e4a6dddf551ba67", "score": "0.50922114", "text": "def clear_screen():\n try:\n command = 'cls' if os.name in ('nt', 'dos') else 'clear'\n os.system(command)\n except:\n pass", "title": "" }, { "docid": "2569af8eb36dab464e5e670723c0a69f", "score": "0.5087032", "text": "def clear(self):\n os.system(\"clear\")", "title": "" }, { "docid": "2a80926997d3c11749fa27557b46d0ca", "score": "0.5086686", "text": "def clear_all(self):\n self.clear_cache()\n self.clear_history()", "title": "" }, { "docid": "a4f06dff81e864c991e0ac12d91809da", "score": "0.5084384", "text": "def set_clear_on_exit(value=True):\n global _clear_on_exit\n _clear_on_exit = value", "title": "" }, { "docid": "2fe5bffdd6dedf7fb0c545060db58695", "score": "0.507833", "text": "def clean_chinese(c):\n clean_all_lang_db(LANG_CHINESE)", "title": "" }, { "docid": "b0bea722d1f31cf0c7e2b08d9a39a33c", "score": "0.5078317", "text": "def clear_all(self):\n self.clear()", "title": "" }, { "docid": "7441c9c11b98f8e8cf1d6d6e3be76d82", "score": "0.5077798", "text": "def set_french(self):\n\t\tlocale.setlocale(locale.LC_ALL, \"fr_FR.utf8\")\n\t\tself.fr.install()\n\t\tglobal _\n\t\t_ = self.fr.gettext", "title": "" }, { "docid": "fffbbf28e9cf29907181ce55ecd9843b", "score": "0.5073885", "text": "def clear_screen():\n print('\\n' * 100)", "title": "" }, { "docid": "ac53d7e3ec93f561868360d12f058e9a", "score": "0.5069702", "text": "async def reset_user_language(context, arguments):\n return await reset_language(context, \"user\")", "title": "" }, { "docid": "a581f7b2fd43c7a4c61890593f2b3577", "score": "0.5068868", "text": "def clear_all(self):\n reset_queries()", "title": "" }, { "docid": "5746ed900553adb3a8dde127c2b7eb76", "score": "0.5049173", "text": "def clear_week_of_month_terms(self):\n pass", "title": "" }, { "docid": "94b87f0aa8bac0bc0a602f5e232e0bd2", "score": "0.5048457", "text": "def Clear():\r\n pass", "title": "" }, { "docid": "d8f62676eedcca3f810ea865b494bf2d", "score": "0.5043984", "text": "def reset_all_memoized_functions():\n openedx_webhooks.utils.clear_memoized_values()", "title": "" }, { "docid": "fdaf29fb43b92f84eec8b2e45bea1f11", "score": "0.5041354", "text": "async def reset_guild_language(context, arguments):\n return await reset_language(context, \"guild\")", "title": "" }, { "docid": "5a3de2a12239bdccd14e0b22be4a9132", "score": "0.50389946", "text": "def clear():\n # type: () -> None\n lcd = lcd_module.I2CLcd()\n lcd.clear()", "title": "" }, { "docid": "fcb874c570ba93756ab16d1ba0ed6584", "score": "0.5036905", "text": "def resetTranslations(self):\n self.trans_x_ =0.\n self.trans_y_ =0.\n self.counter_trans_x=0.\n self.counter_trans_y=0.", "title": "" }, { "docid": "513222ee5c0c93adbc147660f024b31b", "score": "0.5031493", "text": "def clear() -> int:\n system('clear')\n return 0", "title": "" }, { "docid": "7819be91d25649e298999a40a7d577d9", "score": "0.50270253", "text": "async def _clear_all(self):\n self.clear_scoresheet()\n await self.bot.say(\"Removed everything from scoresheet...\")", "title": "" }, { "docid": "c5dc251f0fa39031e843ceee7d22da20", "score": "0.5026161", "text": "def clear_all_buttons(self):\n\t\tself._all_buttons.clear()", "title": "" } ]
74a791aaa5b84c51ccfc8a0dc4110aed
Test case for get_web_form
[ { "docid": "3e80d6e0fe4ad055ebde3fcb6ec48a54", "score": "0.9336786", "text": "def test_get_web_form(self):\n pass", "title": "" } ]
[ { "docid": "c07256acd291408bd2ba0e1918ad6f8a", "score": "0.7466407", "text": "def test_get_form(self):\n self.client.login(username='featuretest', password='pword')\n response = self.client.get(self.options.get_create_form())\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "578d82ecb3bf0d8bbfa32d9f62ed6c55", "score": "0.72960025", "text": "def test_get_start_form(self):\n pass", "title": "" }, { "docid": "19d6cf2269fe11e9c427e6901a977509", "score": "0.7151478", "text": "def test_get_form(self):\n url = reverse(\"registrations:registration-details\")\n r = self.client.get(url)\n self.assertTemplateUsed(r, \"registrations/registration_details.html\")\n self.assertContains(r, '<form method=\"post\">')", "title": "" }, { "docid": "e072d1d20664ea968bba63c885304352", "score": "0.7095786", "text": "def test_submit_form(self):\n pass", "title": "" }, { "docid": "a394fe47df9025ff24bd2958e3585983", "score": "0.6732274", "text": "def test_properties():\n browser = mechanicalsoup.StatefulBrowser()\n browser.open_fake_page('<form></form>', url=\"http://example.com\")\n assert browser.page == browser.get_current_page()\n assert browser.page is not None\n assert browser.url == browser.get_url()\n assert browser.url is not None\n browser.select_form()\n assert browser.form == browser.get_current_form()\n assert browser.form is not None", "title": "" }, { "docid": "eff0fea3d4e8aa8d6e6ee3ded5aef9dc", "score": "0.6723744", "text": "def test_route_form(self, client):\n\n resp = client.get(type(self).SIGNUP_URL)\n html = BeautifulSoup(resp.data, 'html.parser')\n\n assert html.find('form') is not None", "title": "" }, { "docid": "61d4e78d250af3e53d6f0a45c5759cce", "score": "0.67207336", "text": "def test_get_rendered_start_form(self):\n pass", "title": "" }, { "docid": "44463f0791047e00950163e41150728e", "score": "0.6720138", "text": "def form_get(self, form):\r\n pass", "title": "" }, { "docid": "0f0d5a14c384c4efd8a9f1f2bc2ad6f4", "score": "0.6655922", "text": "def test_get_form():\n with pytest.raises(HubspotNotFound):\n FORMS.get(\"not_a_form_id\")\n\n form = FORMS.get(BASE_FORM[\"guid\"])\n assert form\n assert isinstance(form, dict)", "title": "" }, { "docid": "d8f9b1ef22475b8da4d97cf18499af69", "score": "0.6413376", "text": "def test_get_start_form_variables(self):\n pass", "title": "" }, { "docid": "12e4b11023e0376d21c4374b8c7a162e", "score": "0.64070386", "text": "def get_form(self, page):\n form = page.find('form')\n return form", "title": "" }, { "docid": "34d6d838c30d6eacd1a638864bccbf98", "score": "0.63931423", "text": "def test_submit_form_by_key(self):\n pass", "title": "" }, { "docid": "4960496ae9a5e8a258262be3951803b4", "score": "0.6389518", "text": "def test_get_deployed_start_form(self):\n pass", "title": "" }, { "docid": "bada08723231695c688353915686d575", "score": "0.63810384", "text": "def test_html(self):\n self.assertContains(self.resp, '<form')\n self.assertContains(self.resp, 'type=\"submit\"')", "title": "" }, { "docid": "a5eb9a6fab1c19b21741f9de19cd49e8", "score": "0.632159", "text": "def test_paper_form(self):\n form = PaperForm \n # other tests for the form should be here, add them when you add new functionality", "title": "" }, { "docid": "5df774ab2ebb553d3d5792090cd8a8c1", "score": "0.6238651", "text": "def test_route_inputs(self, client, form):\n\n resp = client.get(type(self).SIGNUP_URL)\n html = BeautifulSoup(resp.data, 'html.parser')\n html_form = html.find('form')\n\n # Is there a \"Name\" input with a proper label of \"Name\"?\n ele = html_form.find('input', {'id': \"name\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert ele.parent.find(\n 'label').decode_contents() == form.name.label.text\n\n # Is there a \"Username\" input with a proper label of \"Username\"?\n ele = html_form.find('input', {'id': \"username\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert ele.parent.find(\n 'label').decode_contents() == form.username.label.text\n\n # Is there a \"Email\" input with a proper label of \"Email\"?\n ele = html_form.find('input', {'id': \"email\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert ele.parent.find(\n 'label').decode_contents() == form.email.label.text\n\n # Is there a \"Password\" input with a proper label of \"Password\"?\n ele = html_form.find('input', {'id': \"password\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert ele.parent.find(\n 'label').decode_contents() == form.password.label.text\n\n # Is there a \"Month\" select-tag with a proper label of \"Month\" and does\n # it have children (option tags)?\n ele = html_form.find('select', {'id': \"month\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert len(tuple(ele.children))\n assert ele.parent.find(\n 'label').decode_contents() == form.month.label.text\n\n # Is there a \"Day\" select-tag with a proper label of \"Day\" and does it\n # have children (option tags)?\n ele = html_form.find('select', {'id': \"day\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert len(tuple(ele.children))\n assert ele.parent.find(\n 'label').decode_contents() == form.day.label.text\n\n # Is there a \"year\" select-tag with a proper label of \"year\" and does\n # it have children (option tags)?\n ele = html_form.find('select', {'id': \"year\"})\n assert ele is not None\n assert ele.parent.find('label') is not None\n assert len(tuple(ele.children))\n assert ele.parent.find(\n 'label').decode_contents() == form.year.label.text", "title": "" }, { "docid": "99f04f4dc1ef3a79b032521bb26ff3da", "score": "0.6232527", "text": "def test_get_market_search_form(self):\n response = self.client.get(reverse('market-zip-search'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.request['PATH_INFO'], '/market-zip-search/')\n self.assertContains(response, 'zip code')\n # Django bug? Sometimes request.templates is empty but response contents\n # are still the same.\n if len(response.templates):\n self.assertTemplateUsed(response,\n 'include/frm/frm_market_search.html')\n self.assertTemplateUsed(response,\n 'include/scripts/js_market_search_form.html')\n else:\n self.assertContains(response, '<form id=\"id_frm_market_search\"')\n self.assertContains(response,\n '(\"#id_frm_market_search\").submit(function(e)')", "title": "" }, { "docid": "83d56cc27556cfef5b83ea558e30eec0", "score": "0.6174638", "text": "def test_non_js_post_workflow(self):\n\n view_name = 'thumber_tests:example_form'\n path = reverse(view_name)\n http_referer = 'http://example.com{0}'.format(path)\n\n # Get the form view, and 'follow' so that the session cookie gets set on the client\n response = self.client.get(path, follow=True)\n self.assertIn(settings.SESSION_COOKIE_NAME, response.cookies)\n\n # Post the thumber form, and get the same page but with teh success message\n data = {'satisfied': 'True', 'comment': 'test comment', 'thumber_token': 'sync'}\n response = self.client.post(path, data, HTTP_REFERER=http_referer)\n self.assertContains(response, 'Thank you for your feedback', status_code=200)\n\n # Check that a Feedback model was created with the correct details\n self.assertEquals(Feedback.objects.count(), 1)\n feedback = Feedback.objects.all()[0]\n self.assertEquals(feedback.view_name, view_name)\n self.assertEquals(feedback.url, http_referer)\n self.assertEquals(feedback.satisfied, True)\n self.assertEquals(feedback.comment, 'test comment')", "title": "" }, { "docid": "2098d5602dfe916d8b0998444461168f", "score": "0.6169079", "text": "def test_login_form_get(self):\n\n with self.client as c:\n resp = c.get(\"/login\")\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('<h2>Sign In</h2>', html)\n self.assertIn('<button class=\"btn waves-effect waves-light\">Log In</button>', html)", "title": "" }, { "docid": "05b4c441843f3f2fdcf2698bd59c4c18", "score": "0.6135576", "text": "def test_html(self):\n self.assertContains(self.response,'<form')\n self.assertContains(self.response,'<input',6)\n self.assertContains(self.response,'type=\"text\"',3)\n self.assertContains(self.response,'type=\"email\"')\n self.assertContains(self.response,'type=\"submit\"')", "title": "" }, { "docid": "a44bee5907bd26c3938e12b510dc461c", "score": "0.609064", "text": "def form_page():\n prompts = story.prompts\n return render_template(\"form.html\", prompts=prompts)", "title": "" }, { "docid": "f680554e710bb1ff3590170232490ef5", "score": "0.6078432", "text": "def main_form():\n template = env.get_template(main_page)\n html = template.render()\n return html", "title": "" }, { "docid": "291590d821ff68926ff4141c3728058d", "score": "0.6069954", "text": "def test_form_inputs(self):\n self.assertContains(self.response, '<input', 2) \n self.assertContains(self.response, 'type=\"email\"', 1)", "title": "" }, { "docid": "ace6f9fa355a66f924dfd7fa3b55c320", "score": "0.60602474", "text": "def DoGetValidator(self):", "title": "" }, { "docid": "ace6f9fa355a66f924dfd7fa3b55c320", "score": "0.60602474", "text": "def DoGetValidator(self):", "title": "" }, { "docid": "ace6f9fa355a66f924dfd7fa3b55c320", "score": "0.60602474", "text": "def DoGetValidator(self):", "title": "" }, { "docid": "ace6f9fa355a66f924dfd7fa3b55c320", "score": "0.60602474", "text": "def DoGetValidator(self):", "title": "" }, { "docid": "ace6f9fa355a66f924dfd7fa3b55c320", "score": "0.60602474", "text": "def DoGetValidator(self):", "title": "" }, { "docid": "ace6f9fa355a66f924dfd7fa3b55c320", "score": "0.60602474", "text": "def DoGetValidator(self):", "title": "" }, { "docid": "b29b7efaeae67e002a99e9f60eacba18", "score": "0.605976", "text": "def test_search_form_live_data(client):\n with app.test_request_context():\n form_data = {'search_term': 'r.c. bigelow', 'choice': 'business_name'}\n rv = client.post('/', data=form_data, follow_redirects=False)\n assert rv.status_code == 302\n expected = url_for('basic_search_results', query='r.c. bigelow', index_field='business_name', page=1)\n assert urlparse(rv.location).path == expected", "title": "" }, { "docid": "12ca68558eb2e703748751d620ac6c7d", "score": "0.60554576", "text": "def test_homepage(self):\n response = self.client.get(reverse(\"home\"))\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"form\", response.context)", "title": "" }, { "docid": "58dc93385d3e7ed610ccd1bc06ecbe64", "score": "0.603827", "text": "def test_form_fields(self):\n response = self.client.get(\n reverse('edit-preparation', kwargs={'id': '1'}))\n\n fields = {\n 'name': u'Live',\n 'description': u'The food goes straight from sea \\\nto you with live food, sitting in saltwater tanks!',\n 'additional_info': u'Live octopus requires a locking container'\n }\n\n form = response.context['preparation_form']\n\n for field in fields:\n self.assertEqual(fields[field], form[field].value())", "title": "" }, { "docid": "f5e7a5eca4698ea95735f57c11e44a1c", "score": "0.60349965", "text": "def test_training_program_add_form_has_expected_html(self):\n\n response = self.client.get(reverse('employee_portal:add_training'))\n\n self.assertIn('<form action=\"/employees/\"'.encode(), response.content)\n self.assertIn('<input type=\"submit\" value=\"Employees\"'.encode(), response.content)\n self.assertIn('<input type=\"submit\" value=\"Training Programs\"'.encode(), response.content)\n self.assertIn('<input type=\"submit\" value=\"Departments\"'.encode(), response.content)\n self.assertIn('<input type=\"text\" name=\"name\" maxlength=\"40\" required id=\"id_name\"'.encode(), response.content)\n self.assertIn('<input type=\"text\" name=\"description\" maxlength=\"200\" required id=\"id_description\"'.encode(), response.content)\n self.assertIn('<input type=\"text\" name=\"start_date\" required id=\"id_start_date\"'.encode(), response.content)\n self.assertIn('<input type=\"text\" name=\"end_date\" required id=\"id_end_date\"'.encode(), response.content)\n self.assertIn('<input type=\"number\" name=\"max_attendees\" required id=\"id_max_attendees\"'.encode(), response.content)\n self.assertIn('<input type=\"submit\" value=\"Add Program\"'.encode(), response.content)", "title": "" }, { "docid": "9974c110282602e34d09dc0b5d8edf5e", "score": "0.6033135", "text": "def test_form_load(self):\n with self.login(username=self.user1.username):\n self.assertGoodView(\"fiction_outlines:outline_create\")\n self.assertInContext(\"form\")", "title": "" }, { "docid": "714d87f246767ee1be52c2b4ec5e8f58", "score": "0.60270745", "text": "def test_form_and_scrape(self):\n form_data={\n 'year':2000,\n 'rate':8,\n 'genres':'Action',\n 'votes':30000\n }\n\n request = self.client.post(FORM_URL,form_data)\n self.assertTrue(getattr(MovieIDs.objects.first(), 'movId'))", "title": "" }, { "docid": "fcefe7c33f8dbadee2ac9a9c980970b1", "score": "0.60240865", "text": "def test_html(self):\n self.data = self._cria_reserva()\n self.resp = self.client.get(r('reserva_new'), {'veiculo': self.data['veiculo']})\n tags = (('<form',1),\n ('<input', 3),\n ('<select', 2),\n ('type=\"submit\"',1))\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "title": "" }, { "docid": "918aea9ca5bf0688a9dc8676547c93e0", "score": "0.6011474", "text": "def test_form_has_fields(self):\n form = self.response.context['form']\n self.assertSequenceEqual(['name','cpf','email','phone'],list(form.fields))", "title": "" }, { "docid": "a3539dda430eca594c21bf86e2b041fa", "score": "0.59877574", "text": "def test_advanced_search_form_submit(client):\n form_data = {'search_term': '342sdfjkl;ajs',\n 'choice': 'bus_name',\n 'start_date': None,\n 'end_date': None,\n 'active': False,\n 'business_type': None}\n rv = client.post('/advanced_search', data= form_data, follow_redirects=True)\n assert rv.status_code == 200", "title": "" }, { "docid": "cc047f9c8d6d19e0e1f1c066fd94f88b", "score": "0.5977729", "text": "def page22(self):\n result = request2201.GET('/sustain/common/forms_js.jsp')\n\n return result", "title": "" }, { "docid": "222bf76c8a46ac8abd64946f2137d436", "score": "0.5971319", "text": "def test_form_fields(self):\n response = self.client.post(\n reverse('login'),\n {'username': 'temporary', 'password': 'temporary'})\n response = self.client.get(reverse('new-vendor'))\n\n fields = {'name': 'input', 'description': 'textarea', 'hours': 'input',\n 'story': 'select', 'status': 'select',\n 'location_description': 'textarea',\n 'contact_name': 'input', 'website': 'input',\n 'email': 'input', 'phone': 'input'}\n form = response.context['vendor_form']\n\n for field in fields:\n # for the Edit tests, you should be able to access\n # form[field].value\n self.assertIn(fields[field], str(form[field]))", "title": "" }, { "docid": "6990acff89b7ec8e38395bf23873f473", "score": "0.59583545", "text": "def _select_form(self, url_path) -> Tuple[BeautifulSoup, str]:\n resp = self.browser.open(f'{self.baseurl}{url_path}').read()\n soup = BeautifulSoup(resp, features='html.parser')\n html = soup.find('form', id='aspnetForm').prettify().encode('utf8')\n\n self.browser.select_form('aspnetForm')\n self.browser.form.set_all_readonly(False)\n\n return soup, html", "title": "" }, { "docid": "ad5cf4ad897e7ae52a21c289e1671983", "score": "0.5953148", "text": "def test_get_start_form_by_key(self):\n pass", "title": "" }, { "docid": "8ec2a9673ffe744580e492af7df94667", "score": "0.59478325", "text": "def test_get_form_class(self):\n self.view.tokens = []\n form_class = self.view.get_form_class()\n\n self.assertEqual(form_class.__name__, 'NamedEntityRecognizerFormFormSet')\n self.assertEqual(form_class().form().__class__, NamedEntityRecognizerForm)", "title": "" }, { "docid": "dcd40eaffb8654995496d42e49d6d2af", "score": "0.59371537", "text": "def getForm(self, pageNo):\n return self._readPageForm(pageNo)", "title": "" }, { "docid": "2e1e3c4883ed78dcc4aa2b94cc5b032e", "score": "0.59291595", "text": "def process_form(self):", "title": "" }, { "docid": "2e1e3c4883ed78dcc4aa2b94cc5b032e", "score": "0.59291595", "text": "def process_form(self):", "title": "" }, { "docid": "945d431f32845bdd007738aae3ad5a81", "score": "0.59276396", "text": "def formRoute():\n formWebPage = render_template(\"form.html\")\n return formWebPage", "title": "" }, { "docid": "acbeff2369484ca76a5e795ee2a35ab1", "score": "0.59105706", "text": "def test_person__PersonEditForm__6(person_with_field_data, browser):\n browser.login('visitor')\n browser.open(browser.PERSON_EDIT_URL)\n assert 'petra@example.com' in browser.contents\n assert 'my value' in browser.contents\n assert ['form.buttons.apply',\n 'form.buttons.cancel',\n 'form.buttons.export'] == browser.all_control_names", "title": "" }, { "docid": "44d3347c5a4b82a8a5f1614c4474cb6c", "score": "0.5906509", "text": "def test_valid_suggestion_form(self):", "title": "" }, { "docid": "32dc09488465391f13fe74257135caf2", "score": "0.58719766", "text": "def test_quality_control_get(self):\n response = self._common_view_tests(views.QualityControlView.as_view())\n self.assertContains(response, 'Quality Control')\n self.assertIn(b'<form id=\"result_form\"', response.content)", "title": "" }, { "docid": "ee61bdd2e446fea9e3fdc64730192acc", "score": "0.58657336", "text": "def test_form_fields(self):\n response = self.client.get(\n reverse('edit-product', kwargs={'id': '1'}))\n\n fields = {\n \"name\": \"Ezri Dax\",\n \"variety\": \"Freshwater Eel\",\n \"alt_name\": \"Jadzia\",\n \"description\": \"That's not actually an eel, it's a symbiote.\",\n \"origin\": \"Trill\",\n \"season\": \"Season 7\",\n \"available\": True,\n \"market_price\": \"$32.64 per season\",\n \"link\": \"http://www.amazon.com/\\\nStar-Trek-Deep-Space-Nine/dp/B00008KA57/\",\n \"image\": 2,\n \"story\": 2\n }\n form = response.context['product_form']\n\n for field in fields:\n self.assertEqual(fields[field], form[field].value())", "title": "" }, { "docid": "9d775491ad2c05a1792c23f76f3efbc8", "score": "0.5843518", "text": "def processForm():", "title": "" }, { "docid": "f2f4c9faa9f313c9e70329e6244b025e", "score": "0.5842209", "text": "def test_search_form_submit(client):\n form_data = {'search_term': '342sdfjkl;ajs', 'choice': 'bus_name'}\n rv = client.post('/', data= form_data, follow_redirects=True)\n assert rv.status_code == 200", "title": "" }, { "docid": "4c7215ab22d596ff742cda24da3ba608", "score": "0.5812456", "text": "def test_invalid_suggestion_form(self):", "title": "" }, { "docid": "32528b7c057f3926185d12446104d895", "score": "0.58046204", "text": "def test_fetch_remote_data_action_form(self):\n pass", "title": "" }, { "docid": "bc5d69bc8c1c99be419f084db379669a", "score": "0.57878536", "text": "def test_select_form_tag_object():\n forms = \"\"\"<form id=\"a\"></form><form id=\"b\"></form><p></p>\"\"\"\n soup = BeautifulSoup(forms, \"lxml\")\n with mechanicalsoup.StatefulBrowser() as browser:\n browser.open_fake_page(forms)\n form = browser.select_form(soup.find(\"form\", {\"id\": \"b\"}))\n assert form.form['id'] == \"b\"\n with pytest.raises(mechanicalsoup.LinkNotFoundError):\n browser.select_form(soup.find(\"p\"))", "title": "" }, { "docid": "0e4c51190077975e7aaf926c21b1487c", "score": "0.57766485", "text": "def test_has_form(self):\n form = self.response.context['form']\n self.assertIsInstance(form,SubscriptionForm)", "title": "" }, { "docid": "1fc249a2d5cadf3e118ddcd81ddd1855", "score": "0.5776213", "text": "def test_updateForm(self, *_):\n # logout to invalidate any logged-in sessions that may exist\n self.open(base_url +'/logout')\n # open login page\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", test_user.email)\n self.type(\"#password\", test_user.password)\n # click enter button\n self.click('input[type=\"submit\"]')\n \n # test if the page that loads is the home page and that it has \n # a form that has the id \"update_form\" and has the fields name, quantity,\n # price, date\n self.assert_element(\"#update_form\")\n # test if there is a name field\n self.assert_element(\"#update_form label[for='name']\")\n self.assert_text('Name',\"#update_form label[for='name']\")\n\n # test if there is a quantity field\n self.assert_element(\"#update_form label[for='quantity']\")\n self.assert_text('Quantity',\"#update_form label[for='quantity']\")\n\n # test if there is a price field\n self.assert_element(\"#update_form label[for='price']\")\n self.assert_text('Price',\"#update_form label[for='price']\")\n\n # test if there is a date field\n self.assert_element(\"#update_form label[for='date']\")\n self.assert_text('Date (yyyymmdd)',\"#update_form label[for='date']\")\n self.open(base_url +'/logout')", "title": "" }, { "docid": "9299915b7424cf9f7d16c20764d72f0f", "score": "0.57682973", "text": "def test_submit_form(self):\n params = {\n \"short_from\":\"NTE\",\n \"short_to\":\"EDI\",\n }\n response = self.get_any_flights_page(**params)\n self.assertTrue(response.ok)", "title": "" }, { "docid": "c946fb7daf58f6f528b5c8181e43938a", "score": "0.57650405", "text": "def test_edit_page_form_with_no_data(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post(reverse('edit', kwargs={'pk': 1}),\n {'name': 'Somebody',\n 'last_name': 'Unknown',\n 'date': '2015-01-01'},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertTrue('This field is required' in response.content)", "title": "" }, { "docid": "edd8fafeda2b99be71cf16a303185091", "score": "0.57609195", "text": "def test_signup_form(self):\n\n self.selenium.get(f\"{self.live_server_url}/signup/\")\n\n firstname = self.selenium.find_element_by_id(\"id_first_name\")\n lastname = self.selenium.find_element_by_id(\"id_last_name\")\n email = self.selenium.find_element_by_id(\"id_email\")\n password1 = self.selenium.find_element_by_id(\"id_password1\")\n password2 = self.selenium.find_element_by_id(\"id_password2\")\n submit = self.selenium.find_element_by_id(\"submit_button\")\n\n firstname.send_keys(self.firstname)\n lastname.send_keys(self.lastname)\n email.send_keys(self.email)\n password1.send_keys(self.password)\n password2.send_keys(self.password)\n submit.send_keys(Keys.RETURN)\n\n self.selenium.get(f\"{self.live_server_url}/account/\")\n\n assert self.firstname in self.selenium.page_source\n self.assertInHTML(self.email, self.selenium.page_source)", "title": "" }, { "docid": "16fd7503a106058fb7dc14c8d8ba795a", "score": "0.5755959", "text": "def test_index_post_form_is_not_valid(self):\n self.mock_form.is_valid.return_value = False\n response = self.client.post(reverse(\"research:index\"))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"Problème dans le formulaire\")", "title": "" }, { "docid": "cc11b1031a32079d77671bb1c30fbccb", "score": "0.57544595", "text": "def form_example():\n # handle the POST request\n if request.method == 'POST':\n link = request.form.get('link')\n if 'watch?v=' not in link:\n return '''\n <form method=\"POST\">\n <div><label>No Stream found! Link: <input type=\"text\" name=\"link\"></label></div>\n <input type=\"submit\" value=\"Submit\">\n </form>'''\n link = link.split(\"watch?v=\")[1]\n return redirect(url_for('test', link=link))\n # handle the GET request\n return '''\n <form method=\"POST\">\n <div><label>Link: <input type=\"text\" name=\"link\"></label></div>\n <input type=\"submit\" value=\"Submit\">\n </form>'''", "title": "" }, { "docid": "0d02e08d130cb25d3e559b2549df2f8a", "score": "0.5740236", "text": "def test_login_view(app):\n response = app.get('/login')\n expected = \"\"\"<form action=\"\" method=\"post\">\n<span>thatonelegend login: <input autofocus=\"\" name=\"username\"/></span><br/>\n<span>password: <input name=\"password\" type=\"password\"/></span><br/>\"\"\"\n assert expected in str(response.html.find(\"form\"))", "title": "" }, { "docid": "6d35956cdceec323ba0344ca1e12fb96", "score": "0.5735517", "text": "def test_submit_form_by_key_and_tenant_id(self):\n pass", "title": "" }, { "docid": "3fd17ba200191902ee64bb7148461fac", "score": "0.5732213", "text": "def test_form_has_fields(self):\n form = self.resp.context['form']\n self.assertSequenceEqual(['name', 'email', 'telefone', 'message'], list(form.fields))", "title": "" }, { "docid": "2bff05268159baf2034b7060f5c04041", "score": "0.5726577", "text": "def test_form_list(self):\n response = self.lg.handle_request(\n method=\"GET\", path=\"/forms\", headers={\"authorization\": \"auth\"}, body=\"\"\n )\n self.assertEqual(response[\"statusCode\"], 200, response)\n body = json.loads(response[\"body\"])\n self.assertEqual(\n set((\"_id\", \"name\", \"cff_permissions\", \"date_created\", \"date_modified\")),\n set(body[\"res\"][0].keys()),\n )", "title": "" }, { "docid": "632925bb2f531225c1bac38efde78e4f", "score": "0.5725306", "text": "def test_form_action_allowed(browser, header, meta, method):\n policy = \"form-action 'self'\"\n params = \"method={0}\".format(method)\n server = Server(config['server_address'], config['server_port'])\n server.update_log_pointer()\n url = generate_test_url(policy, header=header, meta=meta, allow=True,\n fixture_url='form-action', params=params)\n form_page = FormActionPage(browser).open(url)\n echo_page = form_page.submit_form()\n assert echo_page.on_page(wait_for_page_to_load=True)\n assert server.is_request_received(method, '/echo', ignore_query=True)", "title": "" }, { "docid": "ae479665b438bea59b76cb996a7d6533", "score": "0.5721637", "text": "def test_has_contact_form(self):\n form = self.resp.context['form']\n self.assertIsInstance(form, ContactForm)", "title": "" }, { "docid": "d6c48a186d8223cae35ebc71b8f28188", "score": "0.5714063", "text": "def test_new_user_signup_form(self):\n\n with app.test_client() as client:\n resp = client.get('/signup')\n html = resp.get_data(as_text=True)\n\n self.assertEqual(resp.status_code, 200)\n self.assertIn('<button class=\"btn btn-primary btn-lg btn-block\">Sign me up!</button>', html)", "title": "" }, { "docid": "31e921f05100abeb6ce86cb591195959", "score": "0.57119167", "text": "def test_get_rendered_start_form_by_key(self):\n pass", "title": "" }, { "docid": "4a821ee7e345adca2d186ea98231ef68", "score": "0.5697153", "text": "def test_form_fields(self):\r\n for required in (True, False):\r\n form = Form.objects.create(title=\"Test\", status=STATUS_PUBLISHED)\r\n if USE_SITES:\r\n form.sites.add(self._site)\r\n form.save()\r\n for (field, _) in NAMES:\r\n form.fields.create(label=field, field_type=field,\r\n required=required, visible=True)\r\n response = self.client.get(form.get_absolute_url())\r\n self.assertEqual(response.status_code, 200)\r\n fields = form.fields.visible()\r\n data = dict([(f.slug, \"test\") for f in fields])\r\n response = self.client.post(form.get_absolute_url(), data=data)\r\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "3407b7a2c45f2bcc6b8c025030ec4352", "score": "0.56685424", "text": "def test_form_validation_success(self, client):\n template_registry.register(self.type, \"foo/bar\", \"foo bar\", default=False)\n template_registry.register(self.type, \"foo/bar2\", \"foo bar\", default=True)\n template_registry.register(self.type, \"foo/bar3\", \"foo bar\", default=False)\n p = Node.root()\n data = self.valid_data(slug=\"s\", title=\"t\", template=\"foo/bar3\", language=\"en\")\n\n form = self.type.form(parent=p, data=data, files=self.valid_files())\n\n assert form.is_valid()\n assert form.cleaned_data['template'] == \"foo/bar3\"", "title": "" }, { "docid": "4ddd4a57306c60dd984c0a9a11e0270d", "score": "0.56636614", "text": "def test_standard_behaviour_determines_form_content_POST(self):\n self.ensure_determines_form_content_POST(RequestMixin())", "title": "" }, { "docid": "322be36ea93c9531e8b34634c82926f9", "score": "0.56552565", "text": "def webflow_form_view(request):\n try:\n data = request.data[\"data\"]\n model_kwargs = {\n \"name\": data[\"Name\"],\n \"email\": data[\"Email\"],\n \"phone\": data[\"Phone Number\"],\n \"referral\": data[\"Referral\"],\n }\n except KeyError:\n raise ValidationError(\"Invalid request format.\")\n\n WebflowContact.objects.create(**model_kwargs)\n return Response({\"message\": \"We got the form. :)\"}, status=201)", "title": "" }, { "docid": "5f9395246cba4d0cd1511ec8ca29f906", "score": "0.5650341", "text": "def gymformfiller(buttontime):\r\n\ttoday = datetime.today()\r\n\tnow = today.strftime('%Y-%m-%d')\r\n\ttry:\r\n\t\tweb = webdriver.Chrome()\r\n\t\tweb.get(\r\n\t\t\t\"https://splashworld.legendonlineservices.co.uk/enterprise/ticketing/browse?StartDate=\" + now + \"&ActivityId=2 \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"&LocationId=3 \"\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"&ResourceId=8\")\r\n\t\ttime.sleep(6)\r\n\t\t# clicks the correct time thats is selected by user\r\n\t\tweb.find_element_by_xpath(\"//*[contains(text(),'\" + str(buttontime) + \"')]\").click()\r\n\t\ttime.sleep(3)\r\n\t\t# drop down menu fill out\r\n\t\tdropdownmenu = web.find_element_by_xpath(\r\n\t\t\t'/html/body/div[1]/div[2]/div/booking-tickets-base/div[3]/form/div/div['\r\n\t\t\t'1]/div/div[1]/ticket-selection/div/div/div[1]')\r\n\t\tdropdownmenu.click()\r\n\t\ttime.sleep(5)\r\n\t\t# memeber number left out for Github sorry it wont work from here :(\r\n\t\tmembernum = '------'\r\n\t\t# adds member number to form\r\n\t\tmembernumadd = web.find_element_by_xpath(\r\n\t\t\t'/html/body/div[1]/div[2]/div/booking-tickets-base/div[3]/form/div/div['\r\n\t\t\t'1]/div/div[1]/ticket-selection/div/div/div[2]/div/div/div['\r\n\t\t\t'1]/div/input-text-button-group/div/div/input')\r\n\t\tmembernumadd.send_keys(membernum)\r\n\t\t# clicks submit member button\r\n\t\taddbut = web.find_element_by_xpath('//*[@id=\"collapsible-ticket-panel-0\"]/div/div/div['\r\n\t\t\t\t\t\t\t\t\t\t '1]/div/input-text-button-group/div/div/span/button-primary/button')\r\n\t\taddbut.click()\r\n\r\n\t\ttime.sleep(3)\r\n\t\t# opens rules and agrees to them\r\n\t\trulesopen = web.find_element_by_xpath('//*[@id=\"basket-summary-base\"]/div['\r\n\t\t\t\t\t\t\t\t\t\t\t '1]/booking-summary-base/booking-details/div[1]/div[2]/button[1]')\r\n\t\trulesopen.click()\r\n\r\n\t\ttime.sleep(3)\r\n\t\trulesaccept = web.find_element_by_xpath(\r\n\t\t\t'//*[@id=\"basket-summary-base\"]/div[1]/booking-summary-base/booking-details/div[2]/div/div/div['\r\n\t\t\t'3]/button-primary/button')\r\n\t\trulesaccept.click()\r\n\r\n\t\ttime.sleep(3)\r\n\t\t# add to basket button clicked\r\n\t\tbasketadd = web.find_element_by_xpath('//*[@id=\"basket-summary-base\"]/div[2]/submit-action/button')\r\n\t\tbasketadd.click()\r\n\r\n\t\ttime.sleep(8)\r\n\t\t# another click\r\n\t\tweb.find_element_by_xpath(\r\n\t\t\t'//*[@id=\"universal-basket-process-request\"]/div[2]/div['\r\n\t\t\t'2]/universal-basket-options/universal-basket-continue-options/button[1]').click()\r\n\r\n\t\t# adds email to submission box\r\n\t\temail = \"sullivanlouis0@gmail.com\"\r\n\t\ttime.sleep(6)\r\n\t\tweb.find_element_by_xpath('//*[@id=\"universal-basket-email-address\"]').send_keys(email)\r\n\t\ttime.sleep(3)\r\n\t\t# adds confirmation email to submission box\r\n\t\tweb.find_element_by_xpath('//*[@id=\"universal-basket-confirm-email-address\"]').send_keys(email)\r\n\t\ttime.sleep(3)\r\n\t\t# clicks accept\r\n\t\tweb.find_element_by_xpath('//*[@id=\"universal-basket-process-request\"]/div[2]/div[1]/div['\r\n\t\t\t\t\t\t\t\t '4]/universal-basket-payment-details/div/div/div['\r\n\t\t\t\t\t\t\t\t '5]/universal-basket-terms-conditions/ul/li/div[1]/label/span[2]/i').click()\r\n\t\ttime.sleep(4)\r\n\t\t# clicks submit\r\n\t\tweb.find_element_by_xpath('//*[@id=\"universal-basket-process-request\"]/div[2]/div['\r\n\t\t\t\t\t\t\t\t '2]/universal-basket-options/universal-basket-continue-options/button[1]').click()\r\n\t\tprint(\"Done! :)\")\r\n\r\n\texcept Exception as e:\r\n\t\tprint(\"Oops! Looks like something went wrong :(\")", "title": "" }, { "docid": "62bcc2cfc0bcfb637b74cd782266ba4c", "score": "0.5645508", "text": "def test_get_rendered_start_form_by_key_and_tenant_id(self):\n pass", "title": "" }, { "docid": "79d6520e84c12251551730ab2b8ccf9d", "score": "0.5645233", "text": "def test_home_buy_form(self, *_):\n self.login()\n\n # open home page\n self.open(base_url)\n\n # Click the buy form button\n self.click('form[action*=\"/buy\"] input[type=\"submit\"]')\n\n # If the form does not go anywhere and POST correctly we will see a 404 error page\n self.assert_text_not_visible(\"404 error\")", "title": "" }, { "docid": "8590d80cdf8fadfbf8a6c19980e75aa8", "score": "0.56400996", "text": "def test_cufflinks_import_form_view(self):\n \n test_response = self.client.get('/data/cufflinks_import/')\n self.assertEqual(test_response.status_code, 200)\n self.assertTemplateUsed(test_response, 'base.html')\n self.assertTemplateUsed(test_response, 'import_form.html')", "title": "" }, { "docid": "43047e895928451242cbd005bf7fe34d", "score": "0.5630485", "text": "def _get_mock_web_page():\n type_default_for_test = 0\n return WebPage(type=type_default_for_test, content=\"web page\")", "title": "" }, { "docid": "e8fa7f51c67922e448fac8cbd73f86a1", "score": "0.56270504", "text": "def test_valid_form(self):\n User.objects.create_user(username='test',\n email='test@email.com',\n password='test')\n self.client.login(username='test@email.com', password='test')\n form_data = {\n 'username': 'test',\n 'email': 'test@email.com',\n 'first_name': 'test_name',\n 'last_name': 'test_last_name',\n 'teams': 1,\n }\n response = self.client.post('/contact_form/', form_data)\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "fb9a83f73c9b5afe6328c0b0c0c8f89f", "score": "0.5606187", "text": "def parse_form(self, req, name, arg):\r\n return None", "title": "" }, { "docid": "59fc0f449d2b204f3cf3960367e5a0d8", "score": "0.5597182", "text": "def test_has_form(self):\n form = self.resp.context['form']\n self.assertIsInstance(form, SubscriptionForm)", "title": "" }, { "docid": "f220dd261d3be5a6f14e9e19bc379ea4", "score": "0.5595108", "text": "def test_get_form_not_valid(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n list(response.context['locations']), list(Location.objects.all()))\n self.assertEqual(response.context['daterange'], '10-14-2011 - 01-14-2012')", "title": "" }, { "docid": "46b350fb6d07eab33df5b606753abaae", "score": "0.5581561", "text": "def test_get_start_form_by_key_and_tenant_id(self):\n pass", "title": "" }, { "docid": "8e2f19b1bf9225d878b8d7b1cf936530", "score": "0.5578959", "text": "def test_harvester_form_is_valid(self):\n user = User.objects.get(id=1)\n url = \"http://www.isgoing.to/api\"\n data = {\n 'name': \"foo_bar1\",\n 'owner': user,\n 'url': url,\n }\n form = HarvesterForm(data=data)\n self.assertTrue(form.is_valid())", "title": "" }, { "docid": "05b8f6e834ad8174922fc9684e4f523e", "score": "0.55712223", "text": "def select_form(self, response, **kwargs):\n self.browser._update_state(response)\n forms = self.browser.get_forms()\n form = self.pick_form(forms, **kwargs)\n\n if not forms:\n raise Exception(\"Can't pick a form !!\")\n\n if \"set\" in kwargs:\n for key, val in list(kwargs[\"set\"].items()):\n if key.startswith(\"_\"):\n continue\n if \"click\" in kwargs and kwargs[\"click\"] == key:\n continue\n\n try:\n form[key].value = val\n except (ValueError):\n pass\n except Exception as err:\n raise\n # cntrl = form.find_control(key)\n # if isinstance(cntrl, ListControl):\n # form[key] = [val]\n # else:\n # raise\n\n if form.action in kwargs[\"tester\"].my_endpoints():\n _res = {}\n for name, cnt in form.fields.items():\n _res[name] = cnt.value\n return _res\n\n try:\n requests_args = kwargs[\"requests_args\"]\n except KeyError:\n requests_args = {}\n\n self.browser.submit_form(form, **requests_args)\n return self.browser.state.response", "title": "" }, { "docid": "61776f7c2a69b474dc4ad99ac3b3c4e9", "score": "0.55656254", "text": "def test_home_buy_form_exists(self, *_):\n self.login()\n\n # open home page\n self.open(base_url)\n\n # make sure each field exists under a form with the action to buy\n self.assert_element_present('form[action*=\"/buy\"] #buy-name')\n self.assert_element_present('form[action*=\"/buy\"] #buy-quantity')", "title": "" }, { "docid": "27f7895dc9dd240b0f714c158931e8ea", "score": "0.5559481", "text": "async def test_form(hass: HomeAssistant, laundrify_setup_entry) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": SOURCE_USER}\n )\n assert result[\"type\"] == FlowResultType.FORM\n assert result[\"errors\"] is None\n\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n user_input=VALID_USER_INPUT,\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.CREATE_ENTRY\n assert result[\"title\"] == DOMAIN\n assert result[\"data\"] == {\n CONF_ACCESS_TOKEN: VALID_ACCESS_TOKEN,\n }\n assert len(laundrify_setup_entry.mock_calls) == 1", "title": "" }, { "docid": "11b9d1640aecf3d246b6a5c5ff81f756", "score": "0.5556469", "text": "def test_empty_form(self, empty_form):\n expected_result, current_result = empty_form\n assert expected_result in current_result", "title": "" }, { "docid": "7f755e60df18d17af557f325870386a9", "score": "0.55535734", "text": "def madlib_form():\n\n inputs = story.prompts\n\n return render_template('form.html', prompts=inputs)", "title": "" }, { "docid": "89ae1f1db8f93187f2ec3e3b0d6a6bcd", "score": "0.5553398", "text": "def test_home_sell_form_exists(self, *_):\n self.login()\n\n # open home page\n self.open(base_url)\n\n # make sure each field exists under a form with the action to sell\n self.assert_element_present('form[action*=\"/sell\"] #name')\n self.assert_element_present('form[action*=\"/sell\"] #quantity')\n self.assert_element_present('form[action*=\"/sell\"] #price')\n self.assert_element_present('form[action*=\"/sell\"] #date')", "title": "" }, { "docid": "051ee3ea0903aaa4710f50475db328c7", "score": "0.55471665", "text": "def test_login_page_form_with_no_data(self):\n data = {}\n response = self.client.post(reverse('login'), data)\n self.assertTrue('This field is required' in response.content)", "title": "" }, { "docid": "666d22ad68c830bf5eb57c756f5c92c3", "score": "0.5537706", "text": "def test_get_deployed_start_form_by_key(self):\n pass", "title": "" }, { "docid": "860f3a27c1153e5d7d87ea102f1ea683", "score": "0.5537515", "text": "def generate_form():\n default = get_default_text().decode('utf8')\n form_html = load_template('form').substitute(default_text=default)\n header_ok()\n run_main_template(form_html)", "title": "" }, { "docid": "994ffd50bd13df8d1aad8fd19f02eb94", "score": "0.5534182", "text": "def test_person__PersonEditForm__7(\n person_with_field_data, browser2, browser, username):\n delete_field(browser2, 'Field-1')\n browser.login(username)\n browser.open(browser.PERSON_EDIT_URL)\n with pytest.raises(LookupError):\n browser.getControl('foobar')", "title": "" }, { "docid": "846b5b9addacae6812733ebc6f8ef320", "score": "0.5531953", "text": "def test_home_sell_form(self, *_):\n self.login()\n\n # open home page\n self.open(base_url)\n\n # Click the sell form button\n self.click('form[action*=\"/sell\"] input[type=\"submit\"]')\n\n # If the form does not go anywhere and POST correctly we will see a 404 error page\n self.assert_text_not_visible(\"404 error\")", "title": "" }, { "docid": "4ba54249adf71f152064bc3322b17f6e", "score": "0.55270076", "text": "def get_form():\n return CommentFormWithCaptcha", "title": "" }, { "docid": "7d8fd948420a3b05984c49ea69c70da8", "score": "0.5523368", "text": "def test_bad_manually(self):\n self.twill.browser.go(self.twill.url(\"/foo/\"))\n self.twill.browser.showforms()\n self.twill.browser.submit(1)", "title": "" }, { "docid": "8940d2ccf1df48555fc2dc8b3f44ad92", "score": "0.5522241", "text": "def test_has_form_fields(self):\n\n tags = (\n ('<form', 1),\n ('<input', 4),\n ('<textarea', 1),\n ('type=\"text', 3),\n ('type=\"email', 1),\n ('type=\"submit', 1),\n )\n\n for text, count in tags:\n with self.subTest():\n self.assertContains(self.resp, text, count)", "title": "" } ]
a9252f98f9ee6d2450a98f1c83844315
Get real path of image. self.train_image_path + '/' + path
[ { "docid": "3d821415e9a5a3c32013794d773a2f47", "score": "0.9221826", "text": "def _real_image_path(self, path):\r\n return osp.join(self.train_image_path, path)", "title": "" } ]
[ { "docid": "8e98291675304f4138e28ce227c4e5c7", "score": "0.8561364", "text": "def train_image_path(self) -> str:\n return join(self.directory_path, 'train-images')", "title": "" }, { "docid": "7c7b4b0da3f358ad243b4f0b76751007", "score": "0.7292678", "text": "def imagePath(self):\n if self.use_dic:\n if self.imlist:\n paths = []\n for img in self.allimgs:\n paths.append(join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(img)+'.jpg'))\n return paths\n else:\n path = join(self.home, 'data'+str(self.data), self.activity, self.imsize, str(self.img)+'.jpg')\n else:\n path = self.img\n return path", "title": "" }, { "docid": "b76cd808e72c3fb8d8e35fd68d769312", "score": "0.72723365", "text": "def predict_image_path(self) -> str:\n return join(self.directory_path, 'predict-images')", "title": "" }, { "docid": "cc841b713af4c0ce4eca05abea467981", "score": "0.7262094", "text": "def real_image_path(self, image):\n return osp.join(self._croped_image_path, image)", "title": "" }, { "docid": "a32c2e3323b46c25d3a25411d8cf0a74", "score": "0.7221066", "text": "def imagePath(self):\n return self.path", "title": "" }, { "docid": "b2403ec2a20b76ad09e8c9078f04bc2d", "score": "0.7165861", "text": "def get_raw_path(self):\n\n return self.raw_img_path", "title": "" }, { "docid": "af102bc6dbc94f400c9f6d2985c1e504", "score": "0.7111311", "text": "def get_pathname(self):\n return self.image_data.path", "title": "" }, { "docid": "0e8829c4c08a681d801d7dc4b15a8bbb", "score": "0.7011626", "text": "def path_of_image():\n top.folder_name = filedialog.askdirectory(title=\"select directory\",\n initialdir=\"C:/Users/Ayush sagore/JUPITER NOTEBOOK ML/CNN Model/\"\n \"test_dataset/\")\n path_name.insert(0, top.folder_name)", "title": "" }, { "docid": "03ca95ecc27157a1d6edb1bdf34b60b7", "score": "0.6882875", "text": "def get_processed_path(self):\n\n return self.processed_img_path", "title": "" }, { "docid": "a0a236fdbeb82c1e476b13fa069a7436", "score": "0.6777518", "text": "def images_path(self):\n return os.path.join(self.extracted_path, \"images\")", "title": "" }, { "docid": "2840e0e071b4382cc1a993718dec8c68", "score": "0.6768887", "text": "def get_image_dir(self):\n return self.img_dir", "title": "" }, { "docid": "ecfe58e19e6cb6eddec2d02620b49f4d", "score": "0.6746203", "text": "def image_path_from_index(self, index):\n # set the prefix\n if self._image_set == 'test':\n prefix = 'testing/image_2'\n else:\n prefix = 'training/image_2'\n\n image_path = os.path.join(self._data_path, prefix,\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "610371fdf355a2ca4c5096cf97ab0fa9", "score": "0.6691406", "text": "def get_image_path(source_path):\n\n split = source_path.split('\\\\')\n # get filename\n filename = split[-1].lstrip()\n # get folder name\n folder = split[-3]\n # get full data path\n current_path = folder + '/IMG/' + filename\n return current_path", "title": "" }, { "docid": "ec535ad95313a0b01dd96dd893b487c4", "score": "0.66477543", "text": "def _directory_path(self):\n if not os.path.isdir(self.new_img_dir) : os.mkdir(self.new_img_dir)\n if not os.path.isdir(os.path.join(self.new_img_dir, \"train\")) : os.mkdir(os.path.join(self.new_img_dir, \"train\"))\n if not os.path.isdir(os.path.join(self.new_img_dir, \"test\")) : os.mkdir(os.path.join(self.new_img_dir, \"test\"))", "title": "" }, { "docid": "0a73ae4fa8b11ff3a7b31614fb005acb", "score": "0.6617539", "text": "def _image_path_from_index(self, index):\n # Example image path for index=119993:\n # images/train2014/COCO_train2014_000000119993.jpg\n file_name = (str(index).zfill(12) + '.jpg')\n image_path = os.path.join(self._root_dir, self._data_name, file_name)\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "1f6972965a8da5c415f2e0b8ebb3273b", "score": "0.65784323", "text": "def get_data_path():\n up_folder = os.path.abspath(os.path.join(ROOT_DIR, '..'))\n img_folder = os.path.join(up_folder, 'data_set', 'XX-ImageLabel', 'train_data_416')\n img_file = os.path.join(DATA_DIR, \"t_img_tags_train.txt\") # 数据类别\n return img_folder, img_file", "title": "" }, { "docid": "8af26eff93f4dfb61abd4a3a12d453df", "score": "0.6554828", "text": "def propose_image_path():\n image_name = \"image_{}.png\".format(''.join(random.choice('abcdefghijklmnopqrstuvwxyz') for _ in range(8)))\n image_path = os.path.join(_IMAGE_BASE_PATH, image_name)\n image_url = '/images/' + image_name\n return image_path, image_url", "title": "" }, { "docid": "612ef353974c792268bc82f6d257f8dc", "score": "0.65527064", "text": "def image_path_from_index(self, index):\n assert self.image_set_index is not None, \"Dataset not initialized\"\n name = self.image_set_index[index]\n image_file = os.path.join(self.image_dir, 'images', name)\n assert os.path.isfile(image_file), 'Path does not exist: {}'.format(image_file)\n return image_file", "title": "" }, { "docid": "2134c2ca03782a7135ea9e357e869fb3", "score": "0.6551155", "text": "def get_image_path(self) -> Optional[str]:\n if not self.image or not self.image.file_path:\n return None\n return self.image.file_path", "title": "" }, { "docid": "1ac041ebfb486a1e141c940e0d26086f", "score": "0.6549644", "text": "def real_image_path(self, index):\r\n\r\n index = index.replace(\"\\\\\", \"/\")\r\n\r\n if not os.path.exists(index):\r\n image_file = os.path.join(self.prefix_path, index)\r\n else:\r\n image_file = index\r\n if not image_file.endswith('.jpg'):\r\n image_file = image_file + '.jpg'\r\n assert os.path.exists(\r\n image_file), 'Path does not exist: {}'.format(image_file)\r\n return image_file", "title": "" }, { "docid": "cbe1d35326a825ff32e421be61dfb224", "score": "0.6526983", "text": "def _get_image_absolute_path(image_scraper_model: ImageScraperModel) -> str:\n return image_scraper_model.image.path", "title": "" }, { "docid": "36d535c7f9e3d8a88640da03c94c475b", "score": "0.6508069", "text": "def root_path(self):\n return os.path.dirname(self.image.path)", "title": "" }, { "docid": "b76dc1df158d1b642c62fd2b694462f7", "score": "0.6501401", "text": "def imagefile(self):\n return os.path.join(self.__folder, self.__name + '.jpg')", "title": "" }, { "docid": "37cc3b391668e912eb706ed10e79a057", "score": "0.644133", "text": "def normalize_path(img_file):\n\n img_file = img_file.split('/')[-1]\n img_file = 'data/IMG/'+img_file.split('\\\\')[-1]\n return img_file", "title": "" }, { "docid": "ca27d251e73d4b58183630518adb3f9f", "score": "0.64333844", "text": "def get_image_path(img_name):\n return os.path.join(\n rospkg.RosPack().get_path('march_rqt_input_device'),\n 'resource',\n 'img{0}'.format(img_name))", "title": "" }, { "docid": "afafa6d499a65773f81ca7e384d373bb", "score": "0.6401894", "text": "def path_image(image):\n return bpy.path.abspath(image.filepath, library=image.library).replace(\"\\\\\", \"/\")\n # .replace(\"\\\\\",\"/\") to get only forward slashes as it's what POV prefers,\n # even on windows", "title": "" }, { "docid": "ec65bbec24e1abdaa6c705a52c8b41f2", "score": "0.63913625", "text": "def setNewImagePath(self):\n\t\tnextNum = NumTracker(\"tmp\").getCurrentFileNumber();\n\t\t\n\t\tbasePath=InterfaceProvider.getPathConstructor().createFromDefaultProperty() + File.separator;\n\t\tsubDir=\"%d_%s\"%(nextNum, self.pathPostfix); \n\t\tnewImagePath = os.path.join(basePath, subDir);\n\n\t\tif not os.path.exists(newImagePath):\n\t\t\t#print \"Path does not exist. Create new one.\"\n\t\t\tos.makedirs(newImagePath);\n\t\t\tself.imageNumber=0;#To reset the image number\n\t\t\t\n\t\tif not os.path.isdir(newImagePath):\n\t\t\tprint \"Invalid path\";\n\t\t\treturn;\n\t\t\t\t\n\t\tself.filePath = newImagePath;\n\t\t#print \"Image file path set to \" + self.filePath;\n\t\treturn self.filePath;", "title": "" }, { "docid": "a5e3a38216977ecc7e5805aacd5c8827", "score": "0.6375474", "text": "def set_image_path(self, image_path):\n i = image_path.rfind('/')\n self.image_path = image_path[i+1:]", "title": "" }, { "docid": "3f07af74e760cd120d41b1eb0a23a83f", "score": "0.63619345", "text": "def collect_train_paths(self):\n\n image_paths = []\n annotation_paths = []\n\n n_images = 10000\n for i in range(1, n_images + 1):\n added = False\n for extension in ['jpg', 'png']:\n image_path = os.path.join(self.folder,\n f'ImagesPart{(i - 1) // 5000 + 1}',\n f'tr_img_{i:05}.{extension}')\n if os.path.exists(image_path):\n image_paths.append(image_path)\n added = True\n break\n if added:\n annotation_paths.append(\n os.path.join(self.folder, 'train_gt_t13', f'tr_img_{i:05}.txt')\n )\n else:\n print(f'Could not find: {image_path[:-3]}*')\n\n return image_paths, annotation_paths", "title": "" }, { "docid": "0b0eab54f96049dbb1506c8a7b939776", "score": "0.63279766", "text": "def build_image_path(self, src):\r\n o = urlparse(src)\r\n # we have a full url\r\n if o.hostname:\r\n return o.geturl()\r\n # we have a relative url\r\n return urljoin(self.target_url, src)", "title": "" }, { "docid": "35da2dd9af8287da31a0edd9eddfa02c", "score": "0.63213974", "text": "def get_img_path(self, inp=None, aux=None):\n img_path = \"\".join([\n self._report_generator.config.img_dir,\n os.sep,\n self.get_tag(inp, aux),\n \".png\"])\n return img_path", "title": "" }, { "docid": "bb01a825254861ee30ed6bb9df68ddcd", "score": "0.6282285", "text": "def collect_train_paths(self):\n\n image_paths = []\n annotation_paths = []\n n_images = 7200\n for i in tqdm(range(1, n_images + 1)):\n added = False\n for extension in ['jpg', 'png']:\n image_path = os.path.join(self.folder,\n f'ch8_training_images_{(i - 1) // 1000 + 1}',\n f'img_{i}.{extension}')\n if os.path.exists(image_path):\n image_paths.append(image_path)\n added = True\n break\n if added:\n annotation_paths.append(\n os.path.join(self.folder, 'ch8_training_localization_transcription_gt_v2',\n f'gt_img_{i}.txt')\n )\n else:\n logging.warning(f'Could not find: {image_path[:-3]}*')\n return image_paths, annotation_paths", "title": "" }, { "docid": "31b273ef7d6aad5fd9fee5a8b20acadd", "score": "0.62715214", "text": "def uploaded_image_path(filename):\n return '/'.join((app.config['UPLOAD_FOLDER'], filename))", "title": "" }, { "docid": "c2ea02910f14c23e0e7156c38e7061d7", "score": "0.6268017", "text": "def set_image_out_path(self, image_path):\n i = image_path.rfind('/')\n self.image_out_path = image_path[:i+1] + \"out/\" + image_path[i+1:]", "title": "" }, { "docid": "c4e64cf9d5ccd7ac77fed675b3ce9cb1", "score": "0.6252097", "text": "def get_path_image(path_data, label, filename):\n\n return path_data.joinpath(f'label_{label}', filename)", "title": "" }, { "docid": "86d149c70ec53705afdd8415e2276c19", "score": "0.6251187", "text": "def getImagePath():\n currentPath = os.path.dirname(__file__)\n resourcesPath = os.path.join(currentPath, \"Resources\")\n imagesPath = os.path.join(resourcesPath, \"Images\")\n return imagesPath", "title": "" }, { "docid": "a8bbb2382b9986b0570fc0e7880c5846", "score": "0.6237819", "text": "def image_path_from_index(self, index):\n url = index.image.url\n path_lst = url.split('/')\n\n image_path = os.path.join(self._data_path, 'JPEGImages', path_lst[-2], path_lst[-1])\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "60e67a1bc0a0a97f2e9e05c775bd535c", "score": "0.62296015", "text": "def img_dir(self):\n try:\n return dirname(self.img_files[0])\n except:\n return \"Not available\"", "title": "" }, { "docid": "f36ce8df6b08d3a7cfb9e57a61828f39", "score": "0.61954516", "text": "def getImagePath(self)->str:\n\n returnStr = '../../../../assets/image/{}.png'.format(randint(1,15))\n return returnStr", "title": "" }, { "docid": "096dcc180c77a6fa8b47da5b2d367e13", "score": "0.61933595", "text": "def _getImagePath(self, link):\n return settings.WIKI_UPLOAD_URL + urlquote(link)", "title": "" }, { "docid": "495a40a49dd8d6bde2223f5dbe17666e", "score": "0.6190036", "text": "def doctor_image_upload_path(instance, file_path):\n file_name, file_ext = get_file_name_ext(file_path)\n random_file_name = generate_random_text(file_name)\n final_file_name = f\"profile_photos/{random_file_name}\"\n return final_file_name", "title": "" }, { "docid": "b6c7f5c51fc2c37829d2695420567778", "score": "0.6179128", "text": "def train_features_path(self) -> str:\n return join(self.feature_path, 'train_features')", "title": "" }, { "docid": "9ba5c17f3231d124b0c08d98d2db57c0", "score": "0.61775565", "text": "def source_path(self, workspace):\n if self.file_name_method.value == FN_FROM_IMAGE:\n path_feature = \"%s_%s\" % (\n C_PATH_NAME,\n self.file_image_name.value,\n )\n assert workspace.measurements.has_feature(\"Image\", path_feature), (\n \"Image %s does not have a path!\" % self.file_image_name.value\n )\n return workspace.measurements.get_current_image_measurement(path_feature)\n\n # ... otherwise, chase the cpimage hierarchy looking for an image with a path\n cur_image = workspace.image_set.get_image(self.image_name.value)\n while cur_image.path_name is None:\n cur_image = cur_image.parent_image\n assert (\n cur_image is not None\n ), \"Could not determine source path for image %s' % (self.image_name.value)\"\n return cur_image.path_name", "title": "" }, { "docid": "b373867788dfbc1941660c59f13b6f83", "score": "0.61774313", "text": "def ml_train_samples_path(self) -> str:\n return join(self.machine_learning_path, 'train_machinelearning')", "title": "" }, { "docid": "c30279a2939e6c55cff4128258b898a4", "score": "0.6176953", "text": "def _submodel_images_path(self, i):\n template = self.config['submodel_images_relpath_template']\n return os.path.join(self.data_path, template % i)", "title": "" }, { "docid": "4c44b0c3049bc2a4ae1bd43645f0dece", "score": "0.61664134", "text": "def make_image_path(raw_img, input_base, base_path):\n path = os.path.dirname(raw_img)\n relpath = os.path.relpath(path, input_base)\n if relpath == '.':\n dest_folder = base_path\n else:\n dest_folder = os.path.join(base_path, relpath)\n return os.path.normpath(dest_folder)\n # return dest_folder", "title": "" }, { "docid": "e8b6885393ec097db8df1850260ed97b", "score": "0.6157156", "text": "def image_path_from_index(self, index):\n assert self.annotation_set is not None, \"Dataset not initialized\"\n name = self.annotation_set[index] # e.g. 'set00_V010_img00577.xml'\n set_name, video_name, xml_name = name.split('_')\n img_name = os.path.splitext(xml_name)[0] + self.extension\n img_path = os.path.join(self.data_path, set_name, video_name, img_name)\n assert os.path.exists(img_path), 'Path does not exist: {}'.format(img_path)\n\n return img_path", "title": "" }, { "docid": "1ddad599e0b6801d8c6d4eee7e389dd3", "score": "0.6142533", "text": "def image_path_at(self, i):\n image_path = os.path.join(self._image_path, self._image_index[i])\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "2a50f8789a016bdc8452591582252e1d", "score": "0.6140405", "text": "def get_image_path(self) -> Optional[str]:\n try:\n return self.localised_faces.all()[0].get_image_path()\n except IndexError:\n logging.exception(\"Failed to find an image for %s\", self)\n return None", "title": "" }, { "docid": "8a1988dde0eb6c7a838485297cc5a1ac", "score": "0.6138136", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'Images', index + self._image_ext)\n assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "f7d50c9ed612fa790e2936036b6ae430", "score": "0.6118029", "text": "def get_image_path(self):\n\t\treturn call_sdk_function('PrlVmDev_GetImagePath', self.handle)", "title": "" }, { "docid": "0f36ea14331f4c815730bf335bd751c5", "score": "0.605578", "text": "def imagefile(self):\n return self.__inputfilename", "title": "" }, { "docid": "01ff50dc31e2eb8e6ed4f45bd852ab3b", "score": "0.6052587", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, index)\n assert os.path.exists(image_path), 'path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "59a5cf8d8cfb126058bf62b4ece36590", "score": "0.60455924", "text": "def get_replay_folder(self):\n\n images_with_labels_folder = self.base_folder_path + \"/data/replay_images_and_labels/\"\n return images_with_labels_folder", "title": "" }, { "docid": "53f52818e31a1a8d275ce2629efeb446", "score": "0.6044128", "text": "def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")", "title": "" }, { "docid": "ce44732f67c5419576216eb89018ff42", "score": "0.60409415", "text": "def recipe_image_file_path(instance, filename):\n\n ext = filename.split('.')[-1]\n filename = f'{uuid.uuid4()}.{ext}'\n\n return os.path.join('uploads/recipe/', filename)", "title": "" }, { "docid": "ad4077c08f075883086a4378beee873b", "score": "0.60396683", "text": "def get_filepath(image):\r\n extension = (len(image.split('/')[-1:][0]))\r\n return image[:-extension]", "title": "" }, { "docid": "08af94b77c423e10cc2f2a11fe5776be", "score": "0.6027028", "text": "def name_to_path(img, origin):\n\n orig_file_parts = img.split('_')[1:]\n\n category = orig_file_parts[-3]\n filename = orig_file_parts[-2]+'_'+orig_file_parts[-1]\n\n orig_file = os.path.sep.join([origin, category])\n orig_file = os.path.sep.join([orig_file, filename])\n\n return orig_file", "title": "" }, { "docid": "d442a25b75fdec0c8b2fd6150a403394", "score": "0.6023852", "text": "def get_image_path(image_lists, label_name, index, image_dir, category):\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n \n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n \n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path", "title": "" }, { "docid": "dc3a315c3e1a5a8ad2177d9e1bd1350d", "score": "0.6022275", "text": "def imgOutputPath(self, newpath):\n newimg = self.imagePath().replace(self.home, newpath)\n return newimg", "title": "" }, { "docid": "9209e71069ff76562ee8e21e2ba02d4b", "score": "0.60168666", "text": "def image_2_path_at(self, i):\n image_path = os.path.join(self._data_path,'query',\n self.roidb[i].donor_file)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "07582d572d258eb44846ff08347f6642", "score": "0.6013142", "text": "def get_out_file_path(self):\n dir_path = self._get_output_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.FOCUSED_IMAGE_NAME)", "title": "" }, { "docid": "0200838d52d4219faa3d8da920717039", "score": "0.5993045", "text": "def full_path(self):\n return os.path.join(settings.MEDIA_ROOT, self.path)", "title": "" }, { "docid": "f3cfea1bc4105f5e8a0e3af851eebaad", "score": "0.59799343", "text": "def _getFullPath(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self)._getFullPath(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "title": "" }, { "docid": "74dbec71e48e93bea0b28620765bfb06", "score": "0.5978689", "text": "def student_image_file_path(instance, filename):\n ext = filename.split('.')[-1] # [-1] returns the last item from a list\n filename = f'{uuid.uuid4()}.{ext}'\n\n return os.path.join('uploads/students/', filename)", "title": "" }, { "docid": "3a07f06c2627581e4652a9f27d14d6d7", "score": "0.59741765", "text": "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "title": "" }, { "docid": "b553a40225e7eb4df5a3f1fc7407cc10", "score": "0.5971996", "text": "def test_image_path(self):\n self.assertEqual(\n self.mineral.image_path,\n 'minerals/images/some_filename.jpg')", "title": "" }, { "docid": "34287da6d500a97d5814f796db1f5a13", "score": "0.5971283", "text": "def image_path(dataset,\n experiment,\n plate,\n address,\n site,\n channel,\n base_path):\n return os.path.join(base_path, dataset, experiment, \"Plate{}\".format(plate),\n \"{}_s{}_w{}.png\".format(address, site, channel))", "title": "" }, { "docid": "b0f96641d70fdbe4b24b23f0810255e2", "score": "0.59669054", "text": "def path(self) -> str:\n return self.src + \"/\"", "title": "" }, { "docid": "990b72c6e9876ff875b9eff558bb3a41", "score": "0.5960352", "text": "def get_full_path(_path: str):\n if _path is None:\n return None\n\n with open(r\"bot\\data\\image_config.json\") as f:\n try:\n image_config = json.load(f)\n\n except json.decoder.JSONDecodeError as e:\n print(e)\n return None\n\n base_url = image_config.get(\"base_url\")\n poster_size = image_config.get(\"poster_sizes\")[-2]\n return f\"{base_url}{poster_size}{_path}\"", "title": "" }, { "docid": "ebc46d671d460ba9c9ffa869deef028a", "score": "0.59509236", "text": "def get_image_dir():\n directory = os.path.abspath(os.path.dirname(__file__))\n directory = os.path.join(directory, 'images')\n return directory", "title": "" }, { "docid": "5c0ae3b9cc9a2939b865338fa6682bd2", "score": "0.5934962", "text": "def get_image_fullpath(kapture_dir_path: str, image_filename: Optional[str] = None) -> str:\n return get_record_fullpath(kapture_dir_path, image_filename)", "title": "" }, { "docid": "c72462340186a9fa7ed6f061242fe964", "score": "0.5925715", "text": "def image_path_from_index(self, index):\n for ext in self._image_ext:\n image_path = os.path.join(self._data_path, 'Images',\n index + ext)\n if os.path.exists(image_path):\n break\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n\treturn image_path", "title": "" }, { "docid": "ddfcfdef0a151fe92dab5c01717dac08", "score": "0.5924495", "text": "def clinic_image_file_path(instance, file_path):\n file_name, file_ext = get_file_name_ext(file_path)\n random_file_name = generate_random_text(file_name)\n final_file_name = f\"clinic/{random_file_name}\"\n return final_file_name", "title": "" }, { "docid": "f9cfc918ed6b669716493aa9f55f0625", "score": "0.58993256", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self.cfg.file_path, 'JPEGImages',\n index + self.cfg.image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "d21e1f09aa371108bbc15a301319cefa", "score": "0.58802605", "text": "def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)", "title": "" }, { "docid": "ca70820e5d6e184173796512cd872e7b", "score": "0.5868258", "text": "def recipe_image_file_path(instance, filename):\n # Return the extention of the file name\n ext = filename.split('.')[-1]\n # Create a new name using the uuid\n filename = f'{uuid.uuid4()}.{ext}'\n\n # A relable method that allowed us to join 2 strings into a vaild path\n return os.path.join('uploads/recipe/', filename)", "title": "" }, { "docid": "6e93580422ea1287ceb6f33ce8b133a3", "score": "0.58652526", "text": "def get_image_filename(self, filename):\n path = 'images/{folder}/{filename}'.format(\n folder=self.folder,\n filename=filename\n )\n return path", "title": "" }, { "docid": "b90fa7583f75d7aa748fc2f205aee7ce", "score": "0.5858098", "text": "def imagePath(image):\n return os.path.join(\":/images\", image)", "title": "" }, { "docid": "b77209dc9aa7c1191fb346c3f3ca0578", "score": "0.58491546", "text": "def _image_location(image_info):\n return os.path.join(tempfile.gettempdir(), image_info['id'])", "title": "" }, { "docid": "f9557fce3d4ba04fc09c0bad47bcbba3", "score": "0.58460355", "text": "def get_image_url():", "title": "" }, { "docid": "c1c38026e5b0123795d1f948d23c68b3", "score": "0.5841986", "text": "def update_filepath(image):\n if image == \"none\":\n image = \"\"\n else:\n image = '/static/uploads/' + image\n return image", "title": "" }, { "docid": "87a0cda1d0b6d4b1971c331d53f144b9", "score": "0.584081", "text": "def path(self):\n return self.get_upload_set().path(self.filename)", "title": "" }, { "docid": "99ab1eb9d63b7e2e5d85c1372a3660ef", "score": "0.58388925", "text": "def image_upload_path(instance, filename):\n return \"adverts/{}/{}\".format(instance.uuid, filename)", "title": "" }, { "docid": "15c02a2c2bcfb04730947fee39f410a3", "score": "0.58215016", "text": "def create_train(train_img_path):\n\n f = open(\"train.txt\", \"w+\")\n for subdirs, dirs, files in os.walk(train_img_path):\n for filename in files:\n if filename.endswith(\".jpg\"):\n train_image_path = os.path.join(train_img_path, filename)\n print(train_image_path)\n f.write(train_image_path + \"\\n\")\n f.close()", "title": "" }, { "docid": "8b2fde3226269c269692ddefed5b4402", "score": "0.5816581", "text": "def _getImage(self, img):\n\n # lazily fill in some attributes\n if not 'local_file_path' in img:\n img['local_file_path'] = os.path.join(self.image_root, img['filename'])\n if not 'feat' in img: # also fill in the features\n # NOTE: imgid is an integer, and it indexes into features\n fn = os.path.basename(img['filename'])\n return img", "title": "" }, { "docid": "b5589755a0523372b5fe7fddb543deca", "score": "0.581282", "text": "def get_file_path(self):\n if self.path[0] in self._simulation_data.mfpath.model_relative_path:\n return os.path.join(\n self._simulation_data.mfpath.get_model_path(self.path[0]),\n self._filename,\n )\n else:\n return os.path.join(\n self._simulation_data.mfpath.get_sim_path(), self._filename\n )", "title": "" }, { "docid": "41e9d960f68273ad27538c2e75079d3b", "score": "0.5809328", "text": "def get_image_paths(self):\n return self.image_paths", "title": "" }, { "docid": "41e9d960f68273ad27538c2e75079d3b", "score": "0.5809328", "text": "def get_image_paths(self):\n return self.image_paths", "title": "" }, { "docid": "d1d7d770095ace06d009071e1c0261f8", "score": "0.5807375", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "9892575d09bfbf4c325cf716dc7ba580", "score": "0.57926506", "text": "def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path", "title": "" }, { "docid": "d73b67b1893ee3d6ab0d2cb9d5166a6b", "score": "0.57897186", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self.image_index[i])", "title": "" }, { "docid": "6acc22ac80ca34378ed1ebf4f284d249", "score": "0.57884985", "text": "def image_path_from_index(self, index):\n image_path = os.path.join(self._data_path,'query',\n index)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "title": "" }, { "docid": "c7531fe822448b79b28330481ffb7021", "score": "0.57874537", "text": "def image(self) -> str:\n return pulumi.get(self, \"image\")", "title": "" }, { "docid": "21bf7983d7793a3ef188b6e2b15471f5", "score": "0.578322", "text": "def roompost_image_file_path(instance, filename):\n ext = filename.split('.')[-1]\n filename = f'{uuid.uuid4()}.{ext}'\n\n return os.path.join('uploads/post/', filename)", "title": "" }, { "docid": "b3d668a4b7e119668972b9c8e17e2a5e", "score": "0.5781855", "text": "def parse_path(mode, image_type, is_label):\n suffix = \"labels\" if is_label else (\"images\" if image_type == ImageType.DIGIT else \"\")\n if image_type == ImageType.DIGIT:\n return f\"data/digitdata/{mode.path_infix}{suffix}\"\n if mode == Mode.TRAINING:\n return f\"data/facedata/facedatatrain{suffix}\"\n return f\"data/facedata/facedata{mode.path_infix}{suffix}\"", "title": "" }, { "docid": "a639faf5f667116fcd732eb542a0b852", "score": "0.57616484", "text": "def get_path(filename: str = None, folder: str = None) -> str:\n return IMAGE_SET.path(filename, folder)", "title": "" }, { "docid": "a8787d953b425636a880a56d72b823df", "score": "0.5752142", "text": "def _get_file_path(self, epoch, logs):\n # pylint: disable=protected-access\n if not self.model._in_multi_worker_mode(\n ) or multi_worker_util.should_save_checkpoint():\n try:\n # `filepath` may contain placeholders such as `{epoch:02d}` and\n # `{mape:.2f}`. A mismatch between logged metrics and the path's\n # placeholders can cause formatting to fail.\n return self.filepath.format(epoch=epoch + 1, **logs)\n except KeyError as e:\n raise KeyError('Failed to format this callback filepath: \"{}\". '\n 'Reason: {}'.format(self.filepath, e))\n else:\n # If this is multi-worker training, and this worker should not\n # save checkpoint, we use a temp filepath to store a dummy checkpoint, so\n # it writes to a file that will be removed at the end of `_save_model()`\n # call. This is because the SyncOnReadVariable needs to be synced across\n # all the workers in order to be read, and all workers need to initiate\n # that.\n self._temp_file_dir = tempfile.mkdtemp()\n extension = os.path.splitext(self.filepath)[1]\n return os.path.join(self._temp_file_dir, 'temp' + extension)", "title": "" }, { "docid": "caeea62373d5d837e61ea4d01f8cb885", "score": "0.5742764", "text": "def machine_learning_path(self) -> str:\n return join(self.directory_path, 'machinelearning')", "title": "" }, { "docid": "d2f3266fe141e1a4e98f5350b33040ab", "score": "0.5738519", "text": "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "title": "" } ]
b6db145505be221d5f1a34fffcaf86d3
Test sending simple input and echoing it back
[ { "docid": "e8838f529ec16f7ec00237b56f9a6b00", "score": "0.6970885", "text": "def __test_basic_input(conn):\n code= \"\"\"\ndef echo():\n api.send('hello')\n data= api.recv()\n api.send('echoing response...')\n api.send(data)\n \n \"\"\"\n token, errmsg= conn.send_store_request(\"echo\", code)\n if errmsg is not None:\n logging.error(f\"error message from server: {errmsg}\")\n else:\n logging.debug(f\"got token: {token}\")\n call= f\"echo()\"\n function_id, errmsg= conn.send_execute_request(call, token)\n if errmsg is not None:\n logging.error(f\"error message from server: {errmsg}\")\n else:\n logging.debug(f\"got function_id: {function_id}\")\n logging.debug(\"sending input\")\n logging.debug(f\"sending open request and attempting to recv output\")\n conn.send_open_request(function_id)\n conn.send_input(function_id, 'hello')\n while True:\n try:\n data, err= conn.recv_output()\n print(data)\n except Exception as e:\n print(e)\n break", "title": "" } ]
[ { "docid": "ca6bb92a4a52407a7fd63abd2c560c4d", "score": "0.6779401", "text": "def test(test_input):\n print(str(test_input))\n return 1", "title": "" }, { "docid": "4a28dd6b41fb3d80f2890367ac6f5628", "score": "0.67543083", "text": "def request(inquiry='What is you question?'):\n print(inquiry)\n return input(\" :\")", "title": "" }, { "docid": "9b85394964a84587748bf135c66bcd6c", "score": "0.6722108", "text": "def plainMessage():\n print (\"Message: \")\n return input()", "title": "" }, { "docid": "bd76b2aecce4778c6be337444fbb77c4", "score": "0.6709708", "text": "def test_confirm(self):\n with mock.patch(\"__builtin__.raw_input\", return_value=\"yes\"):\n utils.confirm(\"test string\", False)", "title": "" }, { "docid": "aa6c2828d4372dfbe15f41a9378d1fd6", "score": "0.6689178", "text": "def basic_input_request(message, expected_responses):\n while True:\n response = input(message).upper()\n if validate_choice(response, expected_responses):\n break\n return response", "title": "" }, { "docid": "6ccf44b6b72295b2349fa5dcd5446a3a", "score": "0.66609335", "text": "def test_echo_command_002(self):\n self.proto.lineReceived(b'echo test test\\n')\n self.assertEquals(self.tr.value(), b'test test\\n' + PROMPT)", "title": "" }, { "docid": "6a83f482207b7388d2dfdc4fa133b4ab", "score": "0.655926", "text": "def test_prompt_ty(self, input):\n self.assertEqual(mailroom.mailroom_prompt(), 'TY')", "title": "" }, { "docid": "eeef99a75cba2d615ffab3f4dff063df", "score": "0.6511053", "text": "def test_receive_c_echo(self):\n pass", "title": "" }, { "docid": "31adcd480d19d3e8fcca7e49c5fef5f8", "score": "0.64764977", "text": "def test_send_thanks(self, input):\n self.assertEqual(mailroom.send_thanks(), \"Doe Doe\")", "title": "" }, { "docid": "0819fc5619bf8f91161fadc6fc4fd555", "score": "0.6461077", "text": "def input(self):\n from prompt_toolkit import prompt\n\n text = prompt('Give me some input: ')\n print('You said: %s' % text)", "title": "" }, { "docid": "70f0538fc4ce8863b4e520c6c8972161", "score": "0.6459937", "text": "def test_prompt_re(self, input):\n self.assertEqual(mailroom.mailroom_prompt(), 'RE')", "title": "" }, { "docid": "c45bb8bebeba8edd44c620099097d62a", "score": "0.64321876", "text": "def fake_input(prompt):\n assert emitter.paused\n return \"\"", "title": "" }, { "docid": "54bd3391de4b5d07eb501edc9d9dc43e", "score": "0.64219874", "text": "def ask(message):\n py3 = version_info[0] > 2\n if py3:\n response = input(message)\n else:\n response = raw_input(message)\n return response", "title": "" }, { "docid": "1ad56ef80d3fd6e720cf469850191e4f", "score": "0.6407902", "text": "def test_getting_input(self):\n print \"Getting input from the user\"\n with mock_raw_input(\"berries\"):\n self.assertEqual(self.menu.get_user_input(), \"berries\")\n\n print \"Found Item\"", "title": "" }, { "docid": "dedcdc30116dea4948e6966984c93ef2", "score": "0.6406278", "text": "def test_echo(cam):\n cmd = [('cli', 'custom'), ('cmd', 'enableall'), ('value', 'true'),\n ('integer', 1234), ('float', 0.00234)]\n\n cam.send(cmd)\n response = cam.receive()[0]\n\n sent = tuples_as_dict(cam.prefix + cmd)\n\n assert sent == response", "title": "" }, { "docid": "ef7aa6789baeeac3432ee84009401a3b", "score": "0.6392581", "text": "def export_echo(self,input):\n\n return S_OK(input)", "title": "" }, { "docid": "e55bb8d04da695a650fe72d44af6d881", "score": "0.63884896", "text": "def test_prompt_q(self, input):\n self.assertEqual(mailroom.mailroom_prompt(), 'Q')", "title": "" }, { "docid": "9d2384fec2eb77427ba0bc5f594b5e40", "score": "0.63690245", "text": "def test_prompt_else(self, input):\n self.assertEqual(mailroom.mailroom_prompt(), 'You must type \"TY\" for\\\nthank-you emails,\"RE\" for a report, or \"Q\" to quit!')", "title": "" }, { "docid": "91e684d01fcd554c91505c93dc2f993c", "score": "0.634451", "text": "def test1(self):\n self.spawn(\"./water\").stdin(\"1\").stdout(bottles(12), \"12\\n\")", "title": "" }, { "docid": "bfb81a1624f6a99b16206c1c5bb9b6e6", "score": "0.6322677", "text": "def test_echo_command_005(self):\n self.proto.lineReceived(b'echo test > test5; cat test5')\n self.assertEquals(self.tr.value(), b'test\\n' + PROMPT)", "title": "" }, { "docid": "9e0814c1a9af82823b495c049bb43448", "score": "0.6320585", "text": "def test_input_and_output(self):\n # Test that the CLI input works first.\n test_input = \"This is some test input\"\n self.test_stdinout.to_send_data.append(test_input + '\\n')\n self.assertEqual(self.from_cli_q.get(timeout=0.5), test_input)\n\n # Now check the output. Pass a message to be written out.\n test_output = 'This is some test output'\n self.to_cli_q.put(test_output)\n # Wait for the output to be written.\n time.sleep(0.1)\n self.assertEqual(self.test_stdinout.received_data.pop(0),\n test_output + '\\n')\n\n # Check the CLI is still running happily.\n self.assertTrue(self.test_cli.running)\n\n # Finally, check there is no unread data in the files.\n self.assertEqual(self.test_stdinout.received_data, [])\n self.assertEqual(self.test_stdinout.to_send_data, [])", "title": "" }, { "docid": "512d6256fa0e4439374ba0476ebcd225", "score": "0.6317425", "text": "def test(self, cmd, input_text=None, timeout=None, echo=True, loud=False, loose=False, ignore_lines=None):\n return self._handle_cmd(cmd, input_text=input_text, timeout=timeout, echo=echo, loose=loose, loud=loud, want_output=True, ignore_lines=ignore_lines)", "title": "" }, { "docid": "b90dec838c94d4965421a6cd1235b2c1", "score": "0.6290481", "text": "def test_EnteringChars(self):\n test_str = b'abc'\n input_stream = BytesToByteList(test_str)\n\n # Send the characters in.\n for byte in input_stream:\n self.console.HandleChar(byte)\n\n # Check the input position.\n exp_pos = len(test_str)\n CheckInputBufferPosition(self, exp_pos)\n\n # Verify that the input buffer is correct.\n expected_buffer = test_str\n CheckInputBuffer(self, expected_buffer)\n\n # Check console output\n exp_console_out = test_str\n CheckConsoleOutput(self, exp_console_out)", "title": "" }, { "docid": "29f26658b8d7d00ec80657be7da729e7", "score": "0.62743866", "text": "def _test_process_input(self):", "title": "" }, { "docid": "02d322737d396332db9f4b4419ab1a73", "score": "0.6248013", "text": "def test_echo_command_009(self):\n self.proto.lineReceived(b'echo test | grep test')\n self.assertEquals(self.tr.value(), b'test\\n' + PROMPT)", "title": "" }, { "docid": "4bbaf15b25179f94b53d5f84ac895b14", "score": "0.623098", "text": "def test_output_case2(self):\n user_input = [\"100\", \"10\", \"5\"]\n expected = \"This is a program to model the population of fish over time, with a growth rate of 0.05.\\nYear - Number of Fish\\n1 - 55\\n2 - 43\\n3 - 36\\n4 - 31\\n5 - 28\\nThe fish population changed by 72 fish in 5.0 years.\"\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(), expected)", "title": "" }, { "docid": "052b3163cb48be665f9d818e02ac21fd", "score": "0.6227807", "text": "def test_confirm_asks_again_for_invalid_input(self):\n returns = [\"invalid input\", \"yes\"]\n side_effect = lambda x: returns.pop(0)\n\n with mock.patch(\"__builtin__.raw_input\", side_effect=side_effect):\n utils.confirm(\"test string\", False)", "title": "" }, { "docid": "d6b742854233c7aed5cdd67421aa8e5b", "score": "0.6225542", "text": "def talk():\n try:\n while True:\n text = input()\n post(text)\n except KeyboardInterrupt:\n pass", "title": "" }, { "docid": "cf0aa40d3c6ed62b6dca380d19a2ed26", "score": "0.6224488", "text": "def test_echo_command_010(self):\n self.proto.lineReceived(b'echo test | grep test2')\n self.assertEquals(self.tr.value(), PROMPT)", "title": "" }, { "docid": "571f9dccb64d267d6531ed23b94fb6a3", "score": "0.6198619", "text": "def test_echo_command_007(self):\n self.proto.lineReceived(b'echo test >> test7; cat test7')\n self.assertEquals(self.tr.value(), b'test\\n' + PROMPT)", "title": "" }, { "docid": "16c72b634a60874e7dcc1514ab106551", "score": "0.6173226", "text": "def get_input(message):\n\n answer = input(message + ': ')\n\n return answer", "title": "" }, { "docid": "5ecb72a9e96ec4c587073331c5c6fdd0", "score": "0.61547065", "text": "def test_do_prompt_true(self):\n resps = ['', 'y']\n self.input_mock.side_effect = lambda x: resps.pop(0)\n\n var = Bool('foo')\n self.assertEqual(var.do_prompt(), True)\n self.assertEqual(self.input_mock.call_count, 2)", "title": "" }, { "docid": "fc794430a4ea5368cb6d16cb2c469a03", "score": "0.61483806", "text": "def pyinput(self, message: str = '', verb_level: int = 0):\n total_Message = \"Please input {}: \".format(message)\n out = input(total_Message)\n if verb_level <= self.verbosity:\n full_msg = self._make_full_msg(total_Message, verb_level)\n self._write(self.Yellow, full_msg, False)\n return out", "title": "" }, { "docid": "788528e076422784e52f6d00c6ea2068", "score": "0.6145046", "text": "def test_handle_text_input(self):\n controller = Controller()\n event = sdl2.SDL_Event()\n event.type = sdl2.SDL_TEXTINPUT\n\n event.text.text = b'1'\n controller.handle_text_input(event)\n self.assertEqual(controller.text, '1')\n\n event.text.text = b'0'\n controller.handle_text_input(event)\n self.assertEqual(controller.text, '10')\n\n event.text.text = b'a'\n controller.handle_text_input(event)\n self.assertEqual(controller.text, '')", "title": "" }, { "docid": "9fda4f5502fa8db41353b0e5513e3139", "score": "0.6139527", "text": "def test_echo_command_008(self):\n self.proto.lineReceived(b'echo test > test8; echo test >> test8; cat test8')\n self.assertEquals(self.tr.value(), b'test\\ntest\\n' + PROMPT)", "title": "" }, { "docid": "869b43e7c0ae4dd89a061ef5d1b12214", "score": "0.61182046", "text": "def test_input1():\n return '12345678'", "title": "" }, { "docid": "4c9080e77629f92f30610d983e5a4ed8", "score": "0.6117694", "text": "def test_option_1(controller):\n with mock.patch.object(builtins, 'input', side_effect= [1, \"\", 6]):\n check = controller.run()\n assert check == False", "title": "" }, { "docid": "2a3a7b4802cc143fd5199a3e59265f0c", "score": "0.6111642", "text": "def test_echo_command_011(self):\n self.proto.lineReceived(b'echo test > test011; cat test011 | grep test')\n self.assertEquals(self.tr.value(), b'test\\n' + PROMPT)", "title": "" }, { "docid": "36a3cf68b7432d4dfa780053a2f98b2f", "score": "0.61115235", "text": "def test_getting_input_main_menu(self):\n print \"Getting input from the user\"\n with mock_raw_input(\"1\"):\n self.assertEqual(self.menu.main_menu(), \"1\")\n\n print \"Main Menu Entered\"", "title": "" }, { "docid": "916d13ded821cd07052cdafa2738fbb3", "score": "0.61111337", "text": "def test_stdin_input(self, api_client, text):\n runner = CliRunner()\n\n api_client.analyze.return_value = self.DEFAULT_API_RESPONSE\n\n result = runner.invoke(subcommand.analyze, input=text)\n assert result.exit_code == 0\n assert result.output == self.DEFAULT_OUTPUT\n assert api_client.analyze.call_args[0][0].read() == text", "title": "" }, { "docid": "33343bbcc2bf82ffa079f54e3faa28e0", "score": "0.6108689", "text": "def test_stdin_input(self, api_client, ip_address, expected_response):\n runner = CliRunner()\n\n api_client.interesting.return_value = expected_response\n\n result = runner.invoke(subcommand.interesting, input=ip_address)\n assert result.exit_code == 0\n assert result.output == \"\"\n api_client.interesting.assert_called_with(ip_address=ip_address)", "title": "" }, { "docid": "df2268bff00dd142e781162e62f56eea", "score": "0.6089178", "text": "def test_output_case1(self):\n user_input = [\"10\", \"50\", \"10\"]\n expected = \"This is a program to model the population of fish over time, with a growth rate of 0.05.\\nYear - Number of Fish\\n1 - 10\\n2 - 11\\n3 - 11\\n4 - 12\\n5 - 12\\n6 - 13\\n7 - 13\\n8 - 14\\n9 - 14\\n10 - 15\\nThe fish population changed by 5 fish in 10.0 years.\"\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(), expected)", "title": "" }, { "docid": "763eac353a1f944f3a14a7b4810e6c90", "score": "0.60712403", "text": "def test_stdin_input(self, api_client, text, expected_output):\n runner = CliRunner()\n\n api_client.filter.return_value = expected_output\n\n result = runner.invoke(subcommand.filter, input=text)\n assert result.exit_code == 0\n assert result.output == \"\".join(expected_output)\n assert api_client.filter.call_args[0][0].read() == text\n assert api_client.filter.call_args[1] == {\"noise_only\": False}", "title": "" }, { "docid": "4773accd1b73da2928ab35b514be9732", "score": "0.6060128", "text": "def test_simple(self):\n self.protocol.dataReceived(b\"moshez\\r\\n\")\n self.assertEqual(\n self.transport.value(),\n b\"Login: moshez\\nNo such user\\n\")", "title": "" }, { "docid": "dacb97598a15e57ca08b8e83e6b73d79", "score": "0.60453534", "text": "def main():\n input_a_string()\n input_an_integer()\n input_a_float()", "title": "" }, { "docid": "ff20687c39693b38957313a02a597dd7", "score": "0.6037515", "text": "def test_pos(self):\n\t\tuser_input = str(randint(0,100)) # random number between 0 and 100\n\t\twith patch(\"builtins.input\", return_value=user_input) as input_call:\n\t\t\twith patch(\"sys.stdout\", new=StringIO()) as output:\n\t\t\t\timport attempt\n\t\t\t\tself.assertEqual(output.getvalue().strip(), \"Glad to see you're feeling non-negative!\")", "title": "" }, { "docid": "5a5df330d22a3296c51a832d517379db", "score": "0.60282546", "text": "def unit_test():\r\n while True:\r\n wrd = raw_input(\"Enter word: \")\r\n if wrd == 'q' or wrd == \"quit\":\r\n return\r\n print_wrd_info(wrd)\r\n print ''", "title": "" }, { "docid": "e251ece5d58a3d989e4cb33baf61db95", "score": "0.6028071", "text": "def test_choice(self):\n with mock.patch(\"__builtin__.raw_input\", return_value=\"1\"):\n utils.prompt_choice([\"first test option\", \"second test option\", \"third test option\"])", "title": "" }, { "docid": "ddf662a52028204600fe9dc253ca0ee8", "score": "0.6025513", "text": "def gets_message_to_send():\n\n to_message = raw_input(\"Say something sessy!\")\n return to_message", "title": "" }, { "docid": "6414191b1bcdf632e9129a09f6743340", "score": "0.6018365", "text": "def get_input(message):\n val = input(message).strip()\n return val", "title": "" }, { "docid": "adaf5d4c01c1a38c1f95ab2a163afed6", "score": "0.60096306", "text": "def test_input():\n job = SubprocessJob()\n result = job.run([\"sed\", \"-e\", \"\"], input=b\"Yo!\")\n assert result[\"result\"] == \"Yo!\"\n assert result[\"log\"] == []", "title": "" }, { "docid": "5483205b8ba334ad2a29cadfb6245cc8", "score": "0.60077614", "text": "def test_prompt_for_new_game(self,mock_stdout):\n\n self.view.prompt_play_again()\n self.assertEqual(\"Would you care to play again? Yes or No (Y / N) \\n\"\n \"\\n\", mock_stdout.getvalue())", "title": "" }, { "docid": "fceee4a6196d3fa6ea5d9c40e5dab5bb", "score": "0.6002772", "text": "def input_text(): # required by Whand\r\n if sys.version_info[0]==2:\r\n text=raw_input() # Python 2.7\r\n else:\r\n text=input() # Python 3\r\n return text", "title": "" }, { "docid": "082304460495e62585f536265fcdf399", "score": "0.6000705", "text": "def test_explicit_stdin_input(self, api_client, text, expected_output):\n runner = CliRunner()\n\n api_client.filter.return_value = expected_output\n\n result = runner.invoke(subcommand.filter, [\"-i\", \"-\"], input=text)\n assert result.exit_code == 0\n assert result.output == \"\".join(expected_output)\n assert api_client.filter.call_args[0][0].read() == text\n assert api_client.filter.call_args[1] == {\"noise_only\": False}", "title": "" }, { "docid": "6b08c9db17e1112d5acc54d78c3508ae", "score": "0.59996825", "text": "def input_string(question=\"\"):\n if sys.version_info[0] == 2:\n return raw_input(question)\n else:\n return input(question)", "title": "" }, { "docid": "0f0a10dbdf479ea9febd7c5a6e40f716", "score": "0.59911925", "text": "def test_output_for_all_right_inputs(self):\r\n sys_orig = sys.stdout\r\n my_file = StringIO()\r\n\r\n sys.stdout = my_file \r\n calculator.run()\r\n out_put = my_file.getvalue() #actually capturing the output so that we can test it\r\n\r\n self.assertEqual(\"\"\"Welcome friend ! lets go !\\n Please enter 2 numbers seperated by a comma :\\nPlease choose and operator : + ,- ,/ ,* :\\nThe result is : 16\\n Please enter 2 numbers seperated by a comma :\\n\"\"\",out_put)\r\n \r\n sys.stdout = sys_orig", "title": "" }, { "docid": "e68503736acd9c98b7bf895d916a80da", "score": "0.5986987", "text": "def test_input_output(self, input_str, expected_output):\n assert utils.sanitize_body(input_str) == expected_output", "title": "" }, { "docid": "3f294356462c03c07a128a0eac2de973", "score": "0.5973941", "text": "def test_teach_ask_again(self):\n qa_inst = Interface()\n Interface.ask(qa_inst, \"Who am I?\")\n Interface.teach(qa_inst, \"Kat\")\n result = Interface.ask(qa_inst, \"Who am I?\")\n self.assertEquals(result, \"Kat\")\n result = Interface.teach(qa_inst, \"Julie\")\n self.assertEquals(result, \"I don't know about that. I was taught differently\")", "title": "" }, { "docid": "009aad22742663552b2e93851825c5b9", "score": "0.5967645", "text": "def test_simpleW(self):\n self.protocol.dataReceived(b\"/w moshez\\r\\n\")\n self.assertEqual(\n self.transport.value(),\n b\"Login: moshez\\nNo such user\\n\")", "title": "" }, { "docid": "c2fecb08db2e93377bbe33e69cda7d64", "score": "0.5963913", "text": "def test_confirm_unicode(self):\n with mock.patch(\"__builtin__.raw_input\", return_value=\"yes\"):\n utils.confirm(u\"test string with unicode: \\u2007\", False)", "title": "" }, { "docid": "f82fa49e29a3c0d85819ecb7273ca7af", "score": "0.59619385", "text": "def test2(self):\n self.spawn(\"./water\").stdin(\"2\").stdout(bottles(24), \"24\\n\")", "title": "" }, { "docid": "0b2bd990da3fcc08900a82efc14e48a3", "score": "0.5956858", "text": "def test_press_of_input_button_by_value(self):", "title": "" }, { "docid": "bc9ad4154aea7a69bdb3719afaea4e18", "score": "0.59423864", "text": "def test_zero(self):\n\t\tuser_input = str(0) # random number between 0 and 100\n\t\twith patch(\"builtins.input\", return_value=user_input) as input_call:\n\t\t\twith patch(\"sys.stdout\", new=StringIO()) as output:\n\t\t\t\timport attempt\n\t\t\t\tself.assertEqual(output.getvalue().strip(), \"Glad to see you're feeling non-negative!\")", "title": "" }, { "docid": "ae3bea6ea413a1779482d32dfe19d093", "score": "0.5941939", "text": "def better_input(query: str) -> str:\n try:\n response = input(query).strip()\n return response\n except KeyboardInterrupt:\n print(\"\\nGood Bye~ \")\n exit(1)", "title": "" }, { "docid": "fa0b12a02561dc8c0d53127646c2162a", "score": "0.5930553", "text": "def test_input1() -> None:\n\n run(*TEST1)", "title": "" }, { "docid": "3d99bca87c5f8c17642b00ad932e1bc9", "score": "0.5930546", "text": "def inputer(question):\n print(question)\n ans = raw_input(\"\")\n return ans", "title": "" }, { "docid": "728e414626d76ee7c42acecde413a9ac", "score": "0.59289527", "text": "def test_no_input(self):\n # Mock user input\n with mock.patch('builtins.input', return_value=''):\n self.assertRaisesRegex(ValueError, main())", "title": "" }, { "docid": "2b08da04c048e22c97ad6bb8d318cfef", "score": "0.5926872", "text": "def test1(self):\n self.spawn(\"./water\").stdin(\"1\").stdout(\"^.*12.*$\", 12)", "title": "" }, { "docid": "8e0a6d0eae968d96a1eb6d11c6f50949", "score": "0.59263945", "text": "def retrieve():\r\n r = input()\r\n return r", "title": "" }, { "docid": "820780d9891a5c4bc5d26a4ec648a560", "score": "0.59237295", "text": "def textinput(question, require_answer=False):\n while True:\n sys.stdout.write(question)\n if sys.version_info >= (3, 0):\n answer = input().lower()\n else:\n answer = raw_input().lower()\n\n if not (require_answer and answer.strip() == ''):\n return answer", "title": "" }, { "docid": "49d56ef21a4d29922a695462619bd111", "score": "0.5922919", "text": "def input_assert(message: str, choices: List[str]) -> str:\n output = input(message).lower()\n if output not in choices:\n print('Wrong input!')\n return input_assert(message, choices)\n else:\n return output", "title": "" }, { "docid": "279a296c1e8b2584d047449463f405e6", "score": "0.5920174", "text": "def test_do_prompt_empty_default(self):\n resps = ['']\n self.input_mock.side_effect = lambda x: resps.pop(0)\n\n var = Var('foo', default='')\n self.assertEqual(var.do_prompt(), '')\n self.assertEqual(self.input_mock.call_count, 1)", "title": "" }, { "docid": "03482430e1a98b558692b010e5f2c12c", "score": "0.59156495", "text": "def input_message(message):\n temp = input(message)\n if temp == '':\n temp = None\n return temp", "title": "" }, { "docid": "8361976e4414e6df8106d240589f731e", "score": "0.5912935", "text": "def test001(self):\n self.spawn(\"python greedy.py\").stdin(\"0.01\").stdout(coins(1), \"1\\n\").exit(0)", "title": "" }, { "docid": "28cb06e39423727781ffe4fded8a959e", "score": "0.5908624", "text": "def f_ping(cenni, input):\n cenni.say('pong')", "title": "" }, { "docid": "e429beb0d66dd10f5fbbfb7f73e45670", "score": "0.59070414", "text": "def ui_input() -> str:\n string = input('Your string: ')\n return string", "title": "" }, { "docid": "604aaf9fb3d9fb9de3b5cb40d915f25c", "score": "0.58965975", "text": "def test_stdin_input(self, api_client):\n runner = CliRunner()\n\n query = \"<query>\"\n api_client.query.return_value = []\n expected = json.dumps([[]], indent=4, sort_keys=True)\n\n result = runner.invoke(subcommand.query, [\"-f\", \"json\"], input=query)\n assert result.exit_code == 0\n assert result.output.strip(\"\\n\") == expected\n api_client.query.assert_called_with(query=query)", "title": "" }, { "docid": "673f9ffd4c0fec4ab0f7041b438c4bf6", "score": "0.5885981", "text": "def test_stdin_input(self, api_client, ip_address, expected_response):\n runner = CliRunner()\n\n api_client.ip.return_value = expected_response\n\n result = runner.invoke(subcommand.ip, [\"-f\", \"json\"], input=ip_address)\n assert result.exit_code == 0\n assert result.output.strip(\"\\n\") == json.dumps(\n [expected_response], indent=4, sort_keys=True\n )\n api_client.ip.assert_called_with(ip_address=ip_address)", "title": "" }, { "docid": "09e40c0bef75a17b94aade16421ec2b3", "score": "0.5884543", "text": "def test_do_prompt_false(self):\n resps = ['', 'n']\n self.input_mock.side_effect = lambda x: resps.pop(0)\n\n var = Bool('foo')\n self.assertEqual(var.do_prompt(), False)\n self.assertEqual(self.input_mock.call_count, 2)", "title": "" }, { "docid": "2ff51780493d95ff9f5fa915deaf3187", "score": "0.58695", "text": "def test_echo_command_012(self):\n self.proto.lineReceived(b'echo test > test012; grep test test012')\n self.assertEquals(self.tr.value(), b'test\\n' + PROMPT)", "title": "" }, { "docid": "d6c1f6708fcd5d57dfc973d6a3f80e34", "score": "0.5868891", "text": "def test015(self):\n self.spawn(\"python greedy.py\").stdin(\"0.15\").stdout(coins(2), \"2\\n\").exit(0)", "title": "" }, { "docid": "bb72f659e32d04b00e77cf9a7ad7cacb", "score": "0.58629644", "text": "def test_do_prompt_with_default(self):\n resps = ['']\n self.input_mock.side_effect = lambda x: resps.pop(0)\n\n var = Var('foo', default='baz')\n self.assertEqual(var.do_prompt(), 'baz')\n self.assertEqual(self.input_mock.call_count, 1)", "title": "" }, { "docid": "d74ee715fc7ce8877b936551553c9e06", "score": "0.5861715", "text": "def test_running(controller):\n with mock.patch.object(builtins, 'input', side_effect= [6]):\n check = controller.run()\n assert check == False", "title": "" }, { "docid": "44861d69aad2daa621521e0a546cf170", "score": "0.5859341", "text": "def echo():\n\n# To do that, we're going to turn our echo function into a generator.\n# Generators are a really powerful feature of python, that you should\n# look up if you have time. The unique feature about generators is\n# that they don't return all at once, they yield one thing, and can\n# then be run again, yielding another, until they're done. This\n# function returns one character from the text you give it, sleeps for\n# 5/100ths of a second, then prints another.\n\tfor char in request.body.read():\n\t\tsleep(0.05)\n\t\tyield char", "title": "" }, { "docid": "10149358dea84d5e416bc25fbf2c03e8", "score": "0.58292305", "text": "def prompt_echo(\n context: Context,\n output: Response,\n success: Pattern = None,\n failure: Pattern = None,\n repeat: Pattern = None,\n force_newline: bool = False\n):\n success = re.compile(success, re.IGNORECASE) if success else None\n failure = re.compile(failure, re.IGNORECASE) if failure else None\n repeat = re.compile(repeat, re.IGNORECASE) if repeat else None\n\n user_input = click.prompt(output.full_text)\n\n res = context.client.send_message_await(user_input)\n\n if repeat:\n while repeat.search(res.full_text):\n user_input = click.prompt(res.full_text)\n res = context.client.send_message_await(user_input)\n\n if success:\n if not success.search(res.full_text):\n return disconnect(context, error=res.full_text)\n if failure:\n if failure.search(res.full_text):\n return disconnect(context, error=res.full_text)\n\n if force_newline or len(res.full_text.split('\\n')) >= 2:\n click.echo()\n\n return res", "title": "" }, { "docid": "d3987416acb61e28f4055776695b1e1e", "score": "0.582668", "text": "def test_parse_raw_input_text(self):\n response = utility.ExecutorResponse()\n response._stdout = 'non-json string'\n response._parse_raw_input()\n\n self.assertEqual(response._json, '\"non-json string\"')\n self.assertEqual(response._parsed_output, 'non-json string')", "title": "" }, { "docid": "d3987416acb61e28f4055776695b1e1e", "score": "0.582668", "text": "def test_parse_raw_input_text(self):\n response = utility.ExecutorResponse()\n response._stdout = 'non-json string'\n response._parse_raw_input()\n\n self.assertEqual(response._json, '\"non-json string\"')\n self.assertEqual(response._parsed_output, 'non-json string')", "title": "" }, { "docid": "4413629a68a52fa1cf70edff3ef506bf", "score": "0.58205974", "text": "def _get_user_input():\n print(\"> \", end=\"\")\n sys.stdout.flush()\n return sys.stdin.readline()", "title": "" }, { "docid": "4413629a68a52fa1cf70edff3ef506bf", "score": "0.58205974", "text": "def _get_user_input():\n print(\"> \", end=\"\")\n sys.stdout.flush()\n return sys.stdin.readline()", "title": "" }, { "docid": "4413629a68a52fa1cf70edff3ef506bf", "score": "0.58205974", "text": "def _get_user_input():\n print(\"> \", end=\"\")\n sys.stdout.flush()\n return sys.stdin.readline()", "title": "" }, { "docid": "ebaa14bfb3371bce9d40378d09ad5da9", "score": "0.58144253", "text": "def test_open_door_hal(self):\n qa_inst = Interface()\n result = Interface.ask(qa_inst, \"Open the door hal\")\n self.assertEqual(result, \"I'm afraid I can't do that \" + getpass.getuser())", "title": "" }, { "docid": "17defe653d57fc4f7f8555689f337896", "score": "0.5810013", "text": "def test230(self):\n self.spawn(\"python greedy.py\").stdin(\"23\").stdout(coins(92), \"92\\n\").exit(0)", "title": "" }, { "docid": "090b3838958786a967103ba864f9e7dd", "score": "0.5794553", "text": "def test_stdin_input(self, api_client, ip_addresses, mock_response, expected):\n runner = CliRunner()\n\n api_client.quick.return_value = mock_response\n\n result = runner.invoke(\n subcommand.quick, [\"-f\", \"json\"], input=\"\\n\".join(ip_addresses)\n )\n assert result.exit_code == 0\n assert result.output.strip(\"\\n\") == expected\n api_client.quick.assert_called_with(ip_addresses=ip_addresses)", "title": "" }, { "docid": "ca6046c0f8169a09892ff9eabbb98323", "score": "0.5792038", "text": "def test_do_prompt_with_default(self):\n resps = ['', ]\n self.input_mock.side_effect = lambda x: resps.pop(0)\n\n var = Bool('foo', default=False)\n self.assertEqual(var.do_prompt(), False)\n self.assertEqual(self.input_mock.call_count, 1)", "title": "" }, { "docid": "e8e2542ae981daf9cb144b34711f6da7", "score": "0.57900566", "text": "def test_ssh_echo(self, address):\n return self.test_ssh(\n address,\n 'echo',\n ['CANARY_PAYLOAD']\n )", "title": "" }, { "docid": "d23f240bc4dbbc9bd24e0775acbadc01", "score": "0.5789995", "text": "def test_say(self):\n self.console.msgPrefix = \"B3:\"\n self.console.say(\"something\")\n verify(self.output_mock).write('say B3: something')", "title": "" }, { "docid": "f1ba201d13f13a8fe99519e8d2ed28e0", "score": "0.5789776", "text": "def test_context_ask():\n assert Context[int].ask()(1) == 1\n assert Context[str].ask()('a') == 'a'", "title": "" }, { "docid": "5d6490fc40a00f9b9bc6080b5a0720f5", "score": "0.5787468", "text": "def testsimple():\n click.echo(\"Test\")", "title": "" }, { "docid": "6f586cbfb6bb1c8ee45804404f13ac6f", "score": "0.5778514", "text": "def test_explicit_stdin_input(self, api_client, text):\n runner = CliRunner()\n\n api_client.analyze.return_value = self.DEFAULT_API_RESPONSE\n\n result = runner.invoke(subcommand.analyze, [\"-i\", \"-\"], input=text)\n assert result.exit_code == 0\n assert result.output == self.DEFAULT_OUTPUT\n assert api_client.analyze.call_args[0][0].read() == text", "title": "" }, { "docid": "ee70c8b3a16ed418ee9b16955ef2f432", "score": "0.5775258", "text": "def test3(self):\r\n self.spawn(\"java Divisible2\").stdin(\"32\").stdout(\"2^5\\n\", \"2^5\\n\")", "title": "" } ]
aca5a5ef0b7fa6f75b18141d8962491e
Train for one epoch.
[ { "docid": "7072e85b9c4c058931cd666cb5f6add8", "score": "0.0", "text": "def train_cn_consistency(net, train_loader, optimizer, scheduler):\n print('running train_cn_consistency')\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n s_losses = AverageMeter()\n c_losses = AverageMeter()\n net.train()\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n input, target = input.cuda(), target.cuda()\n\n # compute output\n r = np.random.rand(1)\n if r < args.cn_prob:\n logits_clean = net(input, aug=False)\n # Cross-entropy is only computed on clean images\n loss = F.cross_entropy(logits_clean, target)\n\n logits_aug1 = net(input, aug=True)\n logits_aug2 = net(input, aug=True)\n\n p_clean, p_aug1, p_aug2 = F.softmax(\n logits_clean, dim=1), F.softmax(\n logits_aug1, dim=1), F.softmax(\n logits_aug2, dim=1)\n\n # Clamp mixture distribution to avoid exploding KL divergence\n p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()\n consist_loss = (F.kl_div(p_mixture, p_clean, reduction='batchmean') +\n F.kl_div(p_mixture, p_aug1, reduction='batchmean') +\n F.kl_div(p_mixture, p_aug2, reduction='batchmean')) / 3.\n\n s_losses.update(loss.item(), input.size(0))\n c_losses.update(consist_loss.item(), input.size(0))\n loss += args.consist_wt * consist_loss\n losses.update(loss.item(), input.size(0))\n else:\n output = net(input, aug=False)\n loss = F.cross_entropy(output, target)\n s_losses.update(loss.item(), input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n scheduler.step()\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Supervised Loss {s_losses.val:.4f} ({s_losses.avg:.4f})\\t'\n 'Consistency Loss {c_losses.val:.4f} ({c_losses.avg:.4f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'.format(\n s_losses=s_losses, c_losses=c_losses, loss=losses))\n\n return losses.avg", "title": "" } ]
[ { "docid": "3ab8b4deed91d5005b9e0436d2f04d99", "score": "0.90381783", "text": "def train_one_epoch(self):\n pass", "title": "" }, { "docid": "a77c909a79c94abcf34eb1cb53ea77d6", "score": "0.84027237", "text": "def train(self):\n\n for epoch in range(self.current_epoch, self.config.max_epoch):\n self.current_epoch = epoch\n self.train_one_epoch()", "title": "" }, { "docid": "6b1dd213c1b3ff55078bddedad2a162b", "score": "0.8283078", "text": "def train_one_epoch(self) -> None:\n self.handler.fire_event(Events.EPOCH_START)\n\n self.train_loss = 0.0\n # Set the model to \"train\" mode.\n self.model.train()\n\n self.last_log = time.time()\n\n logger.info(\"Training\")\n self.batches_this_epoch = 0\n\n batch_groups_tqdm = Tqdm.tqdm(self.training_batches, total=self.num_training_batches)\n\n for self.batch_group in batch_groups_tqdm:\n description = self.train_one_batch_group(self.batch_group)\n batch_groups_tqdm.set_description(description, refresh=False)\n\n self.handler.fire_event(Events.VALIDATE)\n self.handler.fire_event(Events.EPOCH_END)", "title": "" }, { "docid": "deeace54315f96188302f72f7205fdb9", "score": "0.8134534", "text": "def _train_epoch(self, epoch):\n raise NotImplementedError", "title": "" }, { "docid": "deeace54315f96188302f72f7205fdb9", "score": "0.8134534", "text": "def _train_epoch(self, epoch):\n raise NotImplementedError", "title": "" }, { "docid": "deeace54315f96188302f72f7205fdb9", "score": "0.8134534", "text": "def _train_epoch(self, epoch):\n raise NotImplementedError", "title": "" }, { "docid": "4930182b6b46c0c18f0a4cee0c1d673e", "score": "0.7933903", "text": "def epoch_start(self):\n self.train_loss, self.num_batch = torch.zeros((), device=TORCH_DEVICE), 0\n\n self.valid_loss = np.inf\n self.is_best = False\n\n self.train()", "title": "" }, { "docid": "073b29727c2620e6516303727154c496", "score": "0.77863115", "text": "def train(self):\n self.epoch = 0\n self.step = 0\n self.val()\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()\n\n avg_epoch_training_time = np.array(self.epoch_training_times).mean()\n print(\"average training time per epoch:\", sec_to_hm_str(avg_epoch_training_time))", "title": "" }, { "docid": "471bc508ad7e0e632a1704a82e9ad9ef", "score": "0.7684001", "text": "def train_epoch(self, dl_train: DataLoader, **kw) -> EpochResult:\n self.model.train(True) # set train mode\n return self._foreach_batch(dl_train, self.train_batch, **kw)", "title": "" }, { "docid": "b0a6a0e4312b902a7d296a8eafdd44ca", "score": "0.7657663", "text": "def train_one_epoch():\n time_start = datetime.now()\n\n MODEL.train()\n \n loss = 0.\n loss_10_batches = 0.\n batch_idx = 0\n m = ceil(len(TRAIN_DATASET) / BATCH_SIZE)\n beta = 1 / m if model_type == \"bayesian\" else 0\n\n log_string(\"---- EPOCH %03d TRAINING ----\" % EPOCH_CNT)\n log_string(str(time_start))\n\n log_string(\"learning rate: \" + str(optimizer.param_groups[0][\"lr\"]))\n\n while TRAIN_DATASET.has_next_batch():\n # (BATCH_SIZE, TRAIN_DATASET.num_channels, TRAIN_DATASET.window, TRAIN_DATASET.num_features), (BATCH_SIZE)\n sample_batch, label_batch = TRAIN_DATASET.next_batch()\n bsize = sample_batch.shape[0]\n sample_batch = torch.tensor(sample_batch).float().to(DEVICE)\n label_batch = torch.tensor(label_batch).float().to(DEVICE)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # compute prediction\n pred_batch = MODEL(sample_batch)\n\n # compute loss\n loss_batch = MODEL.loss(pred_batch, label_batch, beta)\n\n # backpropagation\n loss_batch.backward()\n\n # optimization step\n optimizer.step()\n\n loss_10_batches += loss_batch / bsize\n loss += loss_batch / bsize\n if (batch_idx + 1) % 10 == 0:\n loss_10_batches /= 10\n log_string(\" ---- batch: %03d ----\" % (batch_idx + 1))\n log_string(\"mean loss: %.2f\" % loss_10_batches.item())\n loss_10_batches = 0.\n batch_idx += 1\n\n loss /= float(batch_idx)\n loss = loss.item()\n log_string(\"train mean loss: %.2f\" % loss)\n\n TRAIN_DATASET.reset()\n\n delta = datetime.now() - time_start\n log_string(\"epoch train time: \" + str(delta))\n return loss", "title": "" }, { "docid": "4e5f74cdc4e7a99a4694c43346c1797d", "score": "0.7634456", "text": "def train_one_epoch(self):\n # Initialize tqdm\n tqdm_batch = tqdm(self.data_loader.train_loader, total=self.data_loader.train_iterations,\n desc=\"Epoch-{}-\".format(self.current_epoch))\n\n # Set the model to be in training mode (for batchnorm)\n self.model.train()\n\n for x, y in tqdm_batch:\n ## TODO\n x, y = x.float(), y.float()\n if self.cuda:\n x, y = x.pin_memory().cuda(non_blocking=self.config.non_blocking), y.cuda(non_blocking=self.config.non_blocking)\n\n __import__('pdb').set_trace() \n # model\n pred = self.model(x)\n # loss\n cur_loss = self.loss(pred, y)\n if np.isnan(float(cur_loss.item())):\n raise ValueError('Loss is nan during training...')\n\n # optimizer\n self.optimizer.zero_grad()\n cur_loss.backward()\n self.optimizer.step()\n\n epoch_loss.update(cur_loss.item())\n _, pred_max = torch.max(pred, 1)\n metrics.add_batch(pred_max.data.cpu().numpy(), y.data.cpu().numpy())\n\n self.current_iteration += 1\n # exit(0)\n\n epoch_acc, _, epoch_iou_class, epoch_mean_iou, _ = metrics.evaluate()\n self.summary_writer.add_scalar(\"epoch-training/loss\", epoch_loss.val, self.current_iteration)\n self.summary_writer.add_scalar(\"epoch_training/mean_iou\", epoch_mean_iou, self.current_iteration)\n tqdm_batch.close()\n\n print(\"Training Results at epoch-\" + str(self.current_epoch) + \" | \" + \"loss: \" + str(\n epoch_loss.val) + \" - acc-: \" + str(\n epoch_acc) + \"- mean_iou: \" + str(epoch_mean_iou) + \"\\n iou per class: \\n\" + str(\n epoch_iou_class))", "title": "" }, { "docid": "b72a1d009002896d292a1537903e7c94", "score": "0.76067483", "text": "def _train_epoch(self):\n for train_steps_per_epoch, batch in enumerate(self.data_loader[\"train\"], 1):\n # train one step\n self._train_step(batch)\n\n # check interval\n if self.config[\"rank\"] == 0:\n self._check_log_interval()\n self._check_eval_interval()\n self._check_save_interval()\n\n # check whether training is finished\n if self.finish_train:\n return\n\n # update\n self.epochs += 1\n self.train_steps_per_epoch = train_steps_per_epoch\n logging.info(\n f\"(Steps: {self.steps}) Finished {self.epochs} epoch training \"\n f\"({self.train_steps_per_epoch} steps per epoch).\"\n )\n\n # needed for shuffle in distributed training\n if self.config[\"distributed\"]:\n self.sampler[\"train\"].set_epoch(self.epochs)", "title": "" }, { "docid": "f2af63fb5244168b5708e570509e507e", "score": "0.7539746", "text": "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "title": "" }, { "docid": "f735ca4953b9f9e613a2d22ebb13ba23", "score": "0.7527993", "text": "def run_training():", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.74928707", "text": "def train(self):\n pass", "title": "" }, { "docid": "271fd194c5ed02885c3b571828815d3a", "score": "0.7490837", "text": "def training():", "title": "" }, { "docid": "7154b79ec02a0fdf92c894f9055aa1e6", "score": "0.7483169", "text": "def train_one_epoch(self):\n num_batches = self.train_len // self.config.batch_size\n tqdm_batch = tqdm(self.train_loader, total=num_batches,\n desc=\"[Epoch {}]\".format(self.current_epoch), disable=self.disable_progressbar)\n\n # num_batches = self.overfit_debug_len // self.config.batch_size\n # tqdm_batch = tqdm(self.overfit_debug_loader, total=num_batches,\n # desc=\"[Epoch {}]\".format(self.current_epoch))\n\n val_every = None if self.config['validations_per_epoch'] == 0 else max(num_batches // self.config['validations_per_epoch'], 1)\n self.model.train()\n\n epoch_loss = AverageMeter()\n epoch_acc = AverageMeter()\n\n for batch_i, data_list in enumerate(tqdm_batch):\n program_args, rvAssignments, _, rvOrders, rvOrders_lengths = \\\n data_list[:-4], data_list[-4], data_list[-3], data_list[-2], data_list[-1]\n\n for i in range(len(program_args)):\n program_args[i] = program_args[i].to(self.device)\n\n # N x all_num_rvs tesnor containing (indexes of) final values of all rvs for each data point\n rvAssignments = rvAssignments.to(self.device)\n\n # rvOrders is a list of shape N x T where each row contains the (padded) render order for each data point\n # Each entry in columnt t is the index of the rv that was rendered at time t for this datapoint\n rvOrders = rvOrders.to(self.device)\n rvOrders_lengths = rvOrders_lengths.to(self.device)\n\n # shape N x (T - 1)\n labels = self._createLabels(rvOrders, rvAssignments, rvOrders_lengths)\n\n # reset optimiser gradients\n self.optim.zero_grad()\n\n # outputs are list of list of tensors of shape num_batches x (T-1) x c_t\n output, alphas = self.model(rvOrders.long(), rvOrders_lengths.long(), rvAssignments.long(), program_args)\n\n loss, avg_acc, num_total = self._compute_loss(output, labels, rvOrders_lengths)\n\n if self.use_attention:\n assert len(alphas) > 0\n frob_loss = self._compute_frobenius_norm(alphas)\n loss = loss + frob_loss\n\n loss.backward()\n self.optim.step()\n\n epoch_loss.update(loss.item(), n=num_total)\n epoch_acc.update(avg_acc, n=num_total)\n tqdm_batch.set_postfix({\"Loss\": epoch_loss.avg, \"Avg acc\": epoch_acc.avg})\n\n self.summary_writer.add_scalars(\"epoch/loss\", {'loss': epoch_loss.val}, self.current_iteration)\n self.summary_writer.add_scalars(\"epoch/accuracy\", {'accuracy': epoch_acc.val}, self.current_iteration)\n\n self.current_iteration += 1\n\n if val_every and (batch_i + 1) % val_every == 0:\n self.validate()\n self.model.train() # put back in training mode\n\n tqdm_batch.close()", "title": "" }, { "docid": "19034b9287168814d5a974eff05d7b82", "score": "0.74696976", "text": "def TrainEpoch(ss):\n ss.SetParams(\"Network\", False)\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "title": "" }, { "docid": "ff31395642d44e9f253f56a3d6c6d7ec", "score": "0.7424826", "text": "def train_epoch(self, X, Y):\n self._train_epoch(X, Y)", "title": "" }, { "docid": "ff31395642d44e9f253f56a3d6c6d7ec", "score": "0.7424826", "text": "def train_epoch(self, X, Y):\n self._train_epoch(X, Y)", "title": "" }, { "docid": "1bc8372c848c0a22895eb1d99fd45a7f", "score": "0.7422653", "text": "def train_epoch(self):\n # Copy policy state accumulated during data collection to the trainer.\n self._loop.update_weights_and_state(state=self._collect_model.state)\n # Perform one gradient step per training epoch to ensure we stay on policy.\n self._loop.run(n_steps=self._n_train_steps_per_epoch)", "title": "" }, { "docid": "f785a506e942e2fc986e81fecc7e6de3", "score": "0.741695", "text": "def train_epoch(self, train_loader):\n raise NotImplementedError('users must define train_epoch to use this base class')", "title": "" }, { "docid": "0cd41322f54e8a7333ea7fd6b14df651", "score": "0.74155045", "text": "def train(self, epoch):\n \n train_loss = 0\n train_acc = 0\n for i, (x_array, t_array) in enumerate(self.train_iter):\n device = torch.device(\"cuda\" if cuda.is_available() else \"cpu\")\n self.optimizer.zero_grad()\n\n x = x_array.to(device)\n t = t_array.to(device)\n y = self.model(x)\n if self.opt.BC:\n t = t.to(device, dtype=torch.float32)\n y = y.to(device, dtype=torch.float32)\n loss = utils.kl_divergence(y, t)\n t_indices = torch.argmax(t, dim=1)\n acc = accuracy(y.data, t_indices)\n else:\n \"\"\" F.cross_entropy already combines log_softmax and NLLLoss \"\"\"\n t = t.to(device, dtype=torch.int64)\n loss = F.cross_entropy(y, t)\n acc = accuracy(y.data, t)\n\n \n loss.backward()\n self.optimizer.step()\n\n train_loss += float(loss.item()) * len(t.data)\n train_acc += float(acc.item()) * len(t.data)\n\n elapsed_time = time.time() - self.start_time\n progress = (self.n_batches * (epoch - 1) + i + 1) * 1.0 / (self.n_batches * self.opt.nEpochs)\n eta = elapsed_time / progress - elapsed_time\n\n line = '* Epoch: {}/{} ({}/{}) | Train: LR {} | Time: {} (ETA: {})'.format(\n epoch, self.opt.nEpochs, i + 1, self.n_batches,\n self.scheduler.get_last_lr(), utils.to_hms(elapsed_time), utils.to_hms(eta))\n sys.stderr.write('\\r\\033[K' + line)\n sys.stderr.flush()\n\n train_loss /= len(self.train_iter.dataset)\n train_top1 = 100 * (1 - train_acc / len(self.train_iter.dataset))\n\n return train_loss, train_top1", "title": "" }, { "docid": "a60f6e34f02f94c74b34f9c75005f4e0", "score": "0.74043757", "text": "def train_epoch(self):\n\n self.netg.train()#进入训练状态\n epoch_iter = 0\n self.D_count = 0\n\n for data in tqdm(self.dataloader['train'], leave=False, total=len(self.dataloader['train'])):# data和label 一个epoach\n self.total_steps += self.opt.batchsize # 取多少个样本\n epoch_iter += self.opt.batchsize # 取多少个样本\n #print(\"data.size()=\", len(data))\n #print(\"self.total_steps=\", self.total_steps)\n self.set_input(data) # data已经是batchsize了\n self.optimize()\n if self.D_count == self.CRITIC_ITERS3 - 1:\n self.D_count = 0\n else:\n self.D_count += 1\n #print(\"self.D_count=\",self.D_count)\n #print(\"data=\",data) dataloader 里面把data全部变成了-1 到1 之间的数字所以能跟tanh\n if self.total_steps % self.opt.print_freq == 0:\n errors, errors_2 = self.get_errors()\n if self.opt.display:\n counter_ratio = float(epoch_iter) / len(self.dataloader['train'].dataset)\n self.visualizer.plot_current_errors(self.epoch, counter_ratio, errors)\n self.visualizer.plot_current_errors2(self.epoch, counter_ratio, errors_2)\n\n if self.total_steps % self.opt.save_image_freq == 0:\n reals, fakes, fixed = self.get_current_images()\n self.visualizer.save_current_images(self.epoch, reals, fakes, fixed, self.fixed_input)\n if self.opt.display:\n self.visualizer.display_current_images(reals, fakes, fixed)\n\n\n ###if self.opt.dataset == \"cifar10\":\n ###if self.opt.dataset == \"cifar10\" and self.epoch >= 8:\n ###if self.opt.dataset == \"mnist\":##注释掉相当与每个采样 测试一下\n if 0:\n res = self.test()\n if res['AUC'] > self.best_auc:\n self.best_auc = res['AUC']\n self.save_weights(self.epoch)\n self.visualizer.print_current_performance(res, self.best_auc) # 当前的auc打印出来\n\n if res['AUC_PR'] > self.best_auc_pr:\n self.best_auc_pr = res['AUC_PR']\n # self.save_weights(self.epoch)\n self.visualizer.print_current_performance(res, self.best_auc_pr) # 当前的auc打印出来\n\n if res['f1_score'] > self.best_f1_score:\n self.best_f1_score = res['f1_score']\n # self.save_weights(self.epoch)\n self.visualizer.print_current_performance(res, self.best_f1_score) # 当前的auc打印出来\n\n\n print(\">> Training model %s. Epoch %d/%d\" % (self.name(), self.epoch+1, self.opt.niter))\n\n\n # self.visualizer.print_current_errors(self.epoch, errors)", "title": "" }, { "docid": "b0cd43399be0d6567eb69d392e761a76", "score": "0.74008983", "text": "def train_it(self,X_train, y_train,nb_epochs=10,batch_size=32,verbose=True):\t\n self.history = self.model.fit(\n X_train, y_train,\n epochs=nb_epochs,\n batch_size=nb_epochs,\n validation_split=0.2,\n shuffle=False,\n verbose=verbose\n )", "title": "" }, { "docid": "facd642ff9ba0f4182aecbf04c4a6dce", "score": "0.7395686", "text": "def train_epoch(self):\n assert_debug(self.loss_module_ is not None)\n assert_debug(self.prediction_module_ is not None)\n\n self.prediction_module_.train()\n self.loss_module_.train()\n self.train_ = True\n if self.config.num_workers == 0:\n dataloader = DataLoader(\n self.train_dataset,\n pin_memory=self.config.pin_memory,\n batch_size=self.config.batch_size,\n num_workers=self.config.num_workers,\n collate_fn=collate_fun,\n shuffle=self.config.shuffle)\n\n else:\n dataloader = DataLoader(\n self.train_dataset,\n pin_memory=self.config.pin_memory,\n batch_size=self.config.batch_size,\n num_workers=self.config.num_workers,\n prefetch_factor=self.config.prefetch_factor,\n collate_fn=collate_fun,\n shuffle=self.config.shuffle)\n\n loss_meter = AverageMeter()\n progress_bar = self.progress_bar(dataloader, desc=f\"Training epoch n°{self.num_epochs}\")\n for batch_idx, batch in progress_bar:\n # Reinitialize the optimizer\n self._optimizer.zero_grad()\n # send the data to the GPU\n batch = self.send_to_device(batch)\n\n # Prediction step\n prediction_dict = self.prediction_module_(batch)\n # Loss step\n loss, log_dict = self.loss_module_(prediction_dict)\n if loss is not None:\n if torch.any(torch.isnan(loss)):\n raise RuntimeError(\"[ERROR]Loss is NaN.\")\n # Optimizer step\n try:\n loss.backward()\n self._optimizer.step()\n except RuntimeError as e:\n print(\"[ERROR]NaN During back progation... Good luck.\")\n raise e\n\n if batch_idx % self.config.average_meter_frequency == 0:\n loss_meter.update(loss.detach().cpu())\n\n self.log_dict(log_dict)\n self.train_iter += 1\n # Save module to checkpoint\n self.num_epochs += 1\n self.train_ = False\n if loss_meter.count > 0:\n print(f\"Train average loss : {loss_meter.average}\")\n self._average_train_loss = loss_meter.average", "title": "" }, { "docid": "3c3a330bfc61822522f5e9aae57429a9", "score": "0.7389642", "text": "def train(self, epochs):\n self.engine.run(self.train_loader, max_epochs=epochs)", "title": "" }, { "docid": "e4348c17bffde6876308bd6921354207", "score": "0.7376508", "text": "def train(self) -> None:\n self.training = True\n self.model.train()", "title": "" }, { "docid": "d870ca12612e6db9db130f01e967f9e9", "score": "0.7373133", "text": "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n run_step = 0\n loss_sum = 0.0\n epoch_training_time = time.time()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n duration = time.time() - before_op_time\n\n run_step += 1\n loss_sum += losses[\"loss\"].cpu().data\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = batch_idx % self.opt.log_frequency == 0 and self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, loss_sum/run_step)\n if (\"depth_gt\", 0) in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n if (\"pose_gt\", 0) in inputs:\n self.compute_pose_losses(inputs, outputs, losses)\n self.log(\"train\", inputs, outputs, losses)\n\n self.step += 1\n epoch_training_time = time.time() - epoch_training_time\n self.epoch_training_times.append(epoch_training_time)\n self.val()", "title": "" }, { "docid": "fb341f86e5143ff4828afd7c34190792", "score": "0.7351378", "text": "def train(self, num_timesteps):\n pass", "title": "" }, { "docid": "c71ae6f6f30ee8f38d2e104d04b2c856", "score": "0.7350667", "text": "def on_epoch_begin(self, training_config: BaseTrainerConfig, **kwargs):", "title": "" }, { "docid": "300057a09de0c7122ed03a2496f7d3a9", "score": "0.7324073", "text": "def train(self):\n print(\"traing started, this may take a while\")\n self.estimator.fit(self.X_train, self.y_train)", "title": "" }, { "docid": "3bd767b574461e41ade5cb4245f75280", "score": "0.73199844", "text": "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n ml_loss = AverageMeter()\n perplexity = AverageMeter()\n epoch_time = Timer()\n\n current_epoch = global_stats['epoch']\n pbar = tqdm(data_loader)\n #\n # pbar.set_description(\"%s\" % 'Epoch = %d [perplexity = x.xx, ml_loss = x.xx]' %\n # current_epoch)\n if sys.stderr.isatty():\n pbar.set_description(\"%s\" % 'Epoch = %d [perplexity = x.xx, ml_loss = x.xx]' %\n current_epoch)\n # Run one epoch\n for idx, ex in enumerate(pbar):\n # try:\n bsz = ex['batch_size']\n if args.optimizer in ['sgd', 'adam'] and current_epoch <= args.warmup_epochs:\n cur_lrate = global_stats['warmup_factor'] * (model.updates + 1)\n for param_group in model.optimizer.param_groups:\n param_group['lr'] = cur_lrate\n\n net_loss = model.update(ex)\n ml_loss.update(net_loss['ml_loss'], bsz)\n perplexity.update(net_loss['perplexity'], bsz)\n log_info = 'Epoch = %d [perplexity = %.2f, ml_loss = %.2f]' % \\\n (current_epoch, perplexity.avg, ml_loss.avg)\n if sys.stderr.isatty():\n pbar.set_description(\"%s\" % log_info)\n # except RuntimeError:\n # logger.info(\"idx error%s\" % str(ex[\"ids\"]))\n # break\n logger.info('train: Epoch %d | perplexity = %.2f | ml_loss = %.2f | '\n 'Time for epoch = %.2f (s)' %\n (current_epoch, perplexity.avg, ml_loss.avg, epoch_time.time()))\n\n # Checkpoint\n if args.checkpoint:\n # model.checkpoint(args.model_file + '.checkpoint', current_epoch + 1)\n if args.local_rank == 0:\n model.checkpoint(args.model_save_path + '.checkpoint', current_epoch + 1)", "title": "" }, { "docid": "9e080653cf9dc2deb1b55b058116be55", "score": "0.73191535", "text": "def train(self, epoch):\n batch_time = Average_meter()\n data_time = Average_meter()\n losses = Average_meter()\n top1 = Average_meter()\n top5 = Average_meter()\n\n # switch to train mode\n self.model.train()\n begin = time.time()\n\n for i, (image, target) in enumerate(self.train_loader):\n batch_size= image.size(0)\n # measure data loading time\n data_time.update(time.time() - begin)\n\n image = image.cuda()\n input_var = Variable(image)\n target = target.cuda()\n target_var = Variable(target)\n \n output = self.model(input_var)\n\n if self.is_soft_regu or self.is_SRIP:\n loss = self.criterion(output, target_var, self.model, self.soft_lambda)\n else:\n loss = self.criterion(output, target_var)\n\n # measure accuracy and record loss\n prec1, prec5 = self.training_aux.accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.data.item(), batch_size)\n top1.update(prec1.item(), batch_size)\n top5.update(prec5.item(), batch_size)\n\n # compute gradient and do SGD step\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n # measure elapsed time\n batch_time.update(time.time() - begin)\n\n if i % self.print_freq == 0:\n #progress_bar(i, len(self.train_loader), 'Loss: {loss.avg:.4f} | Prec@1 {top1.avg:.3f} | Prec@5 {top5.avg:.3f}'.format(loss=losses, top1=top1, top5=top5))\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.avg:.3f}\\t'\n 'Data {data_time.avg:.3f}\\t'\n 'Loss {loss.avg:.4f}\\t'\n 'Prec@1 {top1.avg:.3f}\\t'\n 'Prec@5 {top5.avg:.3f}'.format(\n epoch, i, len(self.train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n\n begin = time.time()\n if (self.iter_svb_flag) and epoch != (self.nEpoch -1) and i != (self.train_loader.__len__() -1):\n self.fcConvWeightReguViaSVB()\n\n self.training_aux.write_err_to_file(epoch = epoch, top1 = top1, top5 = top5, trn_loss = losses, mode = 'train')\n \n return", "title": "" }, { "docid": "8fdc239b39d2a9388929bdf41961e2de", "score": "0.7306058", "text": "def train(model, dataset, model_file, epoch):\n try:\n trainer = Trainer(model, dataset, model_file)\n trainer.train(epoch)\n except Exception as e:\n logging.error(e, exc_info=True)", "title": "" }, { "docid": "43045daa453fcff8b824e55ef4b301cd", "score": "0.73004484", "text": "def train(self):\n # load the most recent checkpoint\n if self.resume:\n self.load_checkpoint(best=False)\n\n print(\n \"\\n[*] Train on {} samples, validate on {} samples\".format(\n self.num_train, self.num_valid\n )\n )\n\n for epoch in range(self.start_epoch, self.epochs):\n\n print(\n \"\\nEpoch: {}/{} - LR: {:.6f}\".format(\n epoch + 1, self.epochs, self.optimizer.param_groups[0][\"lr\"]\n )\n )\n\n # train for 1 epoch\n train_loss, train_acc = self.train_one_epoch(epoch)\n\n # evaluate on validation set\n valid_loss, valid_acc = self.validate(epoch)\n\n # # reduce lr if validation loss plateaus\n self.scheduler.step(-valid_acc)\n\n is_best = valid_acc > self.best_valid_acc\n msg1 = \"train loss: {:.3f} - train acc: {:.3f} \"\n msg2 = \"- val loss: {:.3f} - val acc: {:.3f} - val err: {:.3f}\"\n if is_best:\n self.counter = 0\n msg2 += \" [*]\"\n msg = msg1 + msg2\n print(\n msg.format(\n train_loss, train_acc, valid_loss, valid_acc, 100 - valid_acc\n )\n )\n\n # check for improvement\n if not is_best:\n self.counter += 1\n if self.counter > self.train_patience:\n print(\"[!] No improvement in a while, stopping training.\")\n return\n self.best_valid_acc = max(valid_acc, self.best_valid_acc)\n self.save_checkpoint(\n {\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optim_state\": self.optimizer.state_dict(),\n \"best_valid_acc\": self.best_valid_acc,\n },\n is_best,\n )", "title": "" }, { "docid": "b0614596a9f0d6e87137124909f42963", "score": "0.72905236", "text": "def train():\n\tpipeline_manager.train()", "title": "" }, { "docid": "1993e338fb35ea5b349fa316808a02e1", "score": "0.72818536", "text": "def _train_epoch(self, epoch):\n\n total_loss = 0\n self.model.train()\n\n self.logger.info('Train Epoch: {}'.format(epoch))\n\n for batch_id, data in enumerate(self.train_loader):\n start_time = time()\n\n image = data['image']\n target = data['target']\n\n self.model.zero_grad()\n\n prediction = self.model(image, target.shape[2:4])\n\n loss = self.model.loss(prediction, target)\n\n loss.backward()\n self.optimizer.step()\n\n step = epoch * len(self.train_loader) + batch_id\n\n self.tb_writer.add_scalar('train/loss', loss.item(), step)\n\n total_loss += loss.item()\n\n end_time = time()\n it_time = end_time - start_time\n\n if batch_id % self.log_period == 0:\n accuracy = evaluate_accuracy(prediction, target)\n self.tb_writer.add_scalar('train/accuracy', accuracy, step)\n self.logger.info(\n ' > [{}/{} ({:.0f}%), {:.2f}s] Loss: {:.6f} Accuracy: {:.2f}'.format(\n batch_id * self.train_loader.batch_size + image.size(0),\n len(self.train_loader.dataset),\n 100.0 * batch_id / len(self.train_loader),\n it_time * (len(self.train_loader) - batch_id),\n loss.item(),\n accuracy))\n\n self.logger.info(' > Total loss: {:.6f}'.format(\n total_loss / len(self.train_loader)\n ))\n\n return total_loss / len(self.train_loader)", "title": "" }, { "docid": "d2225db2ea4bf09c88f166c0856fd0b3", "score": "0.72777426", "text": "def train(data_dir, model_name, epochs):\n train_model(data_dir=data_dir, model_name=model_name, epochs=epochs)", "title": "" }, { "docid": "1bda39ff39926510df4c8eb4468d4fed", "score": "0.72702116", "text": "def train(self):\r\n pass", "title": "" }, { "docid": "1bda39ff39926510df4c8eb4468d4fed", "score": "0.72702116", "text": "def train(self):\r\n pass", "title": "" }, { "docid": "239cbe6c3e3c6b0a5c3bac72a1a32415", "score": "0.7269673", "text": "def _do_training(self):\n imgs, actions, rewards, terminals = \\\n self.data_set.random_batch(\n self.network.batch_size)\n return self.network.train(imgs, actions, rewards, terminals)", "title": "" }, { "docid": "0784d9a91afc52af97137ea1c5bec5ea", "score": "0.7252049", "text": "def train(self):\n self.model.train()", "title": "" }, { "docid": "c82be65ef3d32072225b1b1e89739ebb", "score": "0.725159", "text": "def _train_epoch_begin(self, i_epoch):\n self.t_epoch_begin = time.time()\n \n ## update learning rate\n self.lr = self.params.lr*self.params.lr_step_decay_rate**((i_epoch-1)//self.params.lr_step_size)", "title": "" }, { "docid": "892dc777304d74b5c4a8cc24034cc369", "score": "0.7233485", "text": "def run(self):\n self._train()", "title": "" }, { "docid": "a5bb97f64b989f1d598302de5ba35724", "score": "0.7216295", "text": "def train_epoch(i):\n model.zero_grad()\n\n score = model(train_data[\"protein_feature\"],\n train_data[\"phenotype_feature\"],\n train_data[\"protein_sim\"],\n train_data[\"phenotype_sim\"],\n train_data[\"relation\"])\n trn_loss = criterion(train_data[\"train_annotation\"], score)\n # print log info every 25 iterations\n if i % 25 == 0:\n tst_auc, tst_aupr = tst_metric(train_data[\"test_label\"], score, train_data[\"test_idx\"])\n else:\n tst_auc, tst_aupr = 0, 0\n trn_loss.backward()\n optimizer.step()\n scheduler.step()\n return trn_loss, tst_auc, tst_aupr", "title": "" }, { "docid": "e89e5624941446c3af949fc64ceaac64", "score": "0.7211606", "text": "def train(self, num_epochs: int = 10):\n # progress_bar = tqdm(range(num_epochs), desc=f\"Training {num_epochs} new epochs\", ncols=max(num_epochs, 100))\n for i in range(num_epochs):\n self.train_epoch()\n self.evaluate_epoch()\n self.save_checkpoint()\n if i > 0 and i % self.test_frequency == 0:\n self.test()\n if self._scheduler:\n self._scheduler.step()\n\n lr = self._scheduler.get_last_lr()\n if lr != self.config.optimizer_learning_rate:\n print(f\"Last learning rate : {lr}\")\n self.config.optimizer_learning_rate = lr", "title": "" }, { "docid": "6250f1fe7b3929070e9ab14b12cea152", "score": "0.71989316", "text": "def run_epoch(self):\n self.model_lr_scheduler.step()\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses['totLoss'].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n if self.step % 500 == 0:\n self.record_img(inputs, outputs)\n\n if self.step % 100 == 0:\n self.log_time(batch_idx, duration, losses[\"totLoss\"])\n\n if self.step % 2 == 0:\n self.log(\"train\", inputs, outputs, losses, writeImage=False)\n\n self.step += 1", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.7193282", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "27cba51149897cda8fc90d8c42df25b1", "score": "0.7193282", "text": "def train(self):\n raise NotImplementedError", "title": "" }, { "docid": "b24441a03c65d5dca43e2f3c23451d18", "score": "0.71904856", "text": "def _train_one_epoch(self):\n epoch_error = []\n for img in self.training_set.input:\n # Normalize the image \n \n #import pdb ; pdb.set_trace()\n #norm_img = (img - img.argmin())/(img.argmax() - img.argmin())\n \n self._feed_forward(img)\n self._compute_deltas(img)\n self._update_weights()\n \n epoch_error.append(self._compute_error(img))\n \n return np.mean(epoch_error)", "title": "" }, { "docid": "64242ae21b03b149a0a556bd7760f8bf", "score": "0.7186483", "text": "def train(self, num_epochs):\n\n print('training with {} epochs'.format(num_epochs), flush=True)\n\n model_accuracies_csv = os.path.join(self.models_dir, '{}.csv'.format(self.model_file_base_name))\n f = open(model_accuracies_csv, 'w')\n if self.using_counterexample_set:\n f.write('epoch, training_loss, training_acc, test_acc, counterex_acc, is_new_best, test_class_1, test_class_2, test_class_3, test_class_4, test_class_5, counterex_class_3,\\n')\n else:\n f.write('epoch, training_loss, training_acc, test_acc, is_new_best, test_class_1, test_class_2, test_class_3, test_class_4, test_class_5,\\n')\n f.flush()\n\n best_acc = 0.0\n # test_loader_iter = itertools.cycle(self.test_loader)\n test_loader_arr = []\n\n for images, labels in self.test_loader:\n test_loader_arr.append((images, labels))\n print('finished creating test_loader_arr', flush=True)\n\n training_loader_arr = []\n\n for images, labels in self.train_loader:\n training_loader_arr.append((images, labels))\n print('finished creating training_loader_arr', flush=True)\n\n for epoch in range(num_epochs):\n if self.print:\n print('------------------------', flush=True)\n start_epoch_time = time.time()\n total_gpu_transfer_time = 0\n training_time = time.time()\n train_acc = 0.0\n train_loss = 0.0\n time_in_training_loop = 0.0\n time_loading = 0.0\n\n # for i in range(len(self.train_loader)):\n # for images, labels in self.train_loader:\n # images, labels = next(training_loader_iter)\n\n rand_arr = random.sample(range(0, len(self.train_loader)), len(self.train_loader))\n print('created rand_arr', flush=True)\n\n for i in rand_arr:\n images, labels = training_loader_arr[i]\n\n start_training_time = time.time()\n time_loading += time.time() - start_training_time\n\n self.model.train()\n # Move images and labels to gpu if available\n if self.cuda_avail:\n start_transfer_time = time.time()\n images = Variable(images.cuda(0))\n labels = Variable(labels.cuda(0))\n total_gpu_transfer_time += time.time() - start_transfer_time\n # print(labels.device)\n\n # Clear all accumulated gradients\n self.optimizer.zero_grad()\n\n # Predict classes using images from the test set\n outputs = self.model(images)\n\n # Compute the loss based on the predictions and actual labels\n loss = self.loss_fn(outputs, labels)\n\n # Backpropagate the loss\n loss.backward()\n\n # Adjust parameters according to the computed gradients\n self.optimizer.step()\n\n train_loss += loss.item() * images.size(0)\n _, prediction = torch.max(outputs.data, 1)\n\n train_acc += torch.sum(prediction == labels.data).float()\n time_in_training_loop += time.time() - start_training_time\n \n training_time = time.time() - training_time\n\n if self.print:\n print('time_loading training: {}'.format(time_loading), flush=True)\n print('total_gpu_transfer_time (training): {}'.format(total_gpu_transfer_time), flush=True)\n print('time in training loop: {}'.format(time_in_training_loop), flush=True)\n print('total training time: {}'.format(training_time), flush=True)\n\n # Call the learning rate adjustment function\n self.adjust_learning_rate(epoch)\n\n # Compute the average acc and loss over all training images\n train_acc = train_acc / self.training_set_size\n train_loss = train_loss / self.training_set_size\n\n # Evaluate on the test set\n # print('test acc', flush=True)\n test_acc, test_class_accs = self.evaluate_accuracy(test_loader_arr, len(self.test_loader))\n # test_acc, test_class_accs = self.evaluate_accuracy(self.test_loader, self.test_set_size)\n \n if self.using_counterexample_set:\n # print('counterex acc', flush=True)\n counterexample_acc, counterexample_class_accs = self.evaluate_accuracy(self.counterexample_loader, self.counterexample_set_size)\n\n # Print the metrics\n # print(\"Epoch {}, Train Accuracy: {}, Train Loss: {}, Test Accuracy: {},\"\n # \"Counterexample Accuracy: {}\".format(\n # epoch, train_acc, train_loss, test_acc, counterexample_acc))\n\n # self.train_accuracies.append(train_acc)\n # self.test_accuracies.append(test_acc)\n # self.counterexample_accuracies.append(counterexample_acc)\n\n if self.using_counterexample_set:\n f.write('{:3d}, {:2.5f}, {:1.5f}, {:1.5f}, {:1.5f}, {:1d}, {:1.5f}, {:1.5f}, {:1.5f}, {:1.5f}, {:1.5f}, {:1.5f},\\n'.format(\n epoch, train_loss, train_acc, test_acc, counterexample_acc, test_acc > best_acc,\n test_class_accs[0], test_class_accs[1], test_class_accs[2], test_class_accs[3], test_class_accs[4],\n counterexample_class_accs[2]))\n else:\n f.write('{:3d}, {:2.5f}, {:1.5f}, {:1.5f}, {:1d}, {:1.5f}, {:1.5f}, {:1.5f}, {:1.5f}, {:1.5f},\\n'.format(\n epoch, train_loss, train_acc, test_acc, test_acc > best_acc,\n test_class_accs[0], test_class_accs[1], test_class_accs[2], test_class_accs[3], test_class_accs[4]))\n f.flush()\n\n # Save the model if the test acc is greater than our current best\n if test_acc > best_acc:\n self.save_checkpoint(epoch)\n best_acc = test_acc\n print(\"New best acc is {}, epoch {}\".format(best_acc, epoch), flush=True)\n\n if self.print:\n print('epoch time: {}'.format(time.time() - start_epoch_time), flush=True)\n\n # Move last epoch to current model and delete all other models\n model_file_name = os.path.join(self.models_dir, '{}.pth.tar'.format(self.model_file_base_name))\n\n print('copying {} to {}'.format(self.model_file_names[-1], model_file_name), flush=True)\n shutil.copy(self.model_file_names[-1], model_file_name)\n\n for model_file_name in self.model_file_names:\n os.remove(model_file_name)\n\n # print('train_accuracies: {}'.format(self.train_accuracies))\n # print('test_accuracies: {}'.format(self.test_accuracies))\n # print('counterexample_accuracies: {}'.format(self.counterexample_accuracies))\n\n f.close()", "title": "" }, { "docid": "b34ac7c1cdc87122cdd5aaf140fb272f", "score": "0.71808696", "text": "def _train_epoch(self, ld_tr):\n\n ld_tr = [ld_tr] if not isinstance(ld_tr, list) or len(ld_tr)==0 else ld_tr\n for data in zip(*ld_tr):\n x = [d[0] for d in data]\n x = x[0] if len(x)==1 else x\n y = [d[1] for d in data]\n y = y[0] if len(y)==1 else y\n\n #st = time.time()\n with tf.GradientTape() as tape:\n loss_dict = self.loss_fn_train(x, y,\n model=lambda x: self.model(x, training=True),\n model_iw=self.model_iw, tape=tape)\n [setattr(self, k, v) for k, v in loss_dict.items()]\n \n if self.params.weight_decay > 0.0:\n for v in self.model.trainable_weights:\n if 'bn' in v.name:\n print(v)\n print('not to decay')\n sys.exit()\n self.loss_wd = self.params.weight_decay * reg_l2([v for v in self.model.trainable_weights if 'kernel' in v.name])\n self.loss += self.loss_wd\n\n #print(time.time() - st)\n grad = tape.gradient(self.loss, self.model.trainable_weights)\n self.opt.apply_gradients(zip(grad, self.model.trainable_weights))", "title": "" }, { "docid": "146a10cceb1dbf48f0f517f2d1269086", "score": "0.7172825", "text": "def train_one_epoch(self, epoch):\n \n import pandas as pd\n import numpy as np\n\n self.model.train()\n batch_time = AverageMeter()\n losses = AverageMeter()\n accs = AverageMeter()\n\n tic = time.time()\n with tqdm(total=self.num_train) as pbar:\n for i, (x, y) in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n \n x, y = x.to(self.device), y.to(self.device)\n x1, y1 = self.SWAT()#self.gmdataset()#self.batadal()#self.alldatacsv()#self.gmdataset()\n x1 = x1.astype(np.float32)\n y1 = y1.astype(np.float32)\n\n x,y = torch.from_numpy(x1).float(),torch.from_numpy(y1).long()\n #print(\"Here\", y)\n plot = False\n if (epoch % self.plot_freq == 0) and (i == 0):\n plot = True\n\n # initialize location vector and hidden state\n self.batch_size = x.shape[0]\n h_t, l_t = self.reset()\n\n # save images\n imgs = []\n imgs.append(x[0:9])\n \n # extract the glimpses\n locs = []\n log_pi = []\n baselines = []\n for t in range(self.num_glimpses - 1):\n # forward pass through model\n h_t, l_t, b_t, p = self.model(x, l_t, h_t)\n\n # store\n locs.append(l_t[0:9])\n baselines.append(b_t)\n log_pi.append(p)\n\n # last iteration\n h_t, l_t, b_t, log_probas, p = self.model(x, l_t, h_t, last=True)\n log_pi.append(p)\n baselines.append(b_t)\n locs.append(l_t[0:9])\n\n # convert list to tensors and reshape\n baselines = torch.stack(baselines).transpose(1, 0)\n log_pi = torch.stack(log_pi).transpose(1, 0)\n\n # calculate reward\n predicted = torch.max(log_probas, 1)[1]\n R = (predicted.detach() == y).float()\n R = R.unsqueeze(1).repeat(1, self.num_glimpses)\n\n # compute losses for differentiable modules\n loss_action = F.nll_loss(log_probas, y)\n loss_baseline = F.mse_loss(baselines, R)\n\n # compute reinforce loss\n # summed over timesteps and averaged across batch\n adjusted_reward = R - baselines.detach()\n loss_reinforce = torch.sum(-log_pi * adjusted_reward, dim=1)\n loss_reinforce = torch.mean(loss_reinforce, dim=0)\n\n # sum up into a hybrid loss\n loss = loss_action + loss_baseline + loss_reinforce * 0.01\n\n # compute accuracy\n correct = (predicted == y).float()\n acc = 100 * (correct.sum() / len(y))\n #print(\"Predicted: \", predicted, \"\\nTrue\", y)\n\n # store\n losses.update(loss.item(), x.size()[0])\n accs.update(acc.item(), x.size()[0])\n\n # compute gradients and update SGD\n loss.backward()\n self.optimizer.step()\n\n # measure elapsed time\n toc = time.time()\n batch_time.update(toc - tic)\n\n pbar.set_description(\n (\n \"{:.1f}s - loss: {:.3f} - acc: {:.3f}\".format(\n (toc - tic), loss.item(), acc.item()\n )\n )\n )\n pbar.update(self.batch_size)\n\n # dump the glimpses and locs\n if plot:\n imgs = [g.cpu().data.numpy().squeeze() for g in imgs]\n locs = [l.cpu().data.numpy() for l in locs]\n pickle.dump(\n imgs, open(self.plot_dir + \"g_{}.p\".format(epoch + 1), \"wb\")\n )\n pickle.dump(\n locs, open(self.plot_dir + \"l_{}.p\".format(epoch + 1), \"wb\")\n )\n\n # log to tensorboard\n if self.use_tensorboard:\n iteration = epoch * len(self.train_loader) + i\n log_value(\"train_loss\", losses.avg, iteration)\n log_value(\"train_acc\", accs.avg, iteration)\n\n return losses.avg, accs.avg", "title": "" }, { "docid": "88a6bec12f8c2830b6e7bf35220176bd", "score": "0.71516067", "text": "def train(self, train_params):\n self.cfg.SOLVER.IMS_PER_BATCH = train_params['images_per_batch']\n self.cfg.SOLVER.BASE_LR = train_params['learning_rate']\n self.cfg.SOLVER.MAX_ITER = train_params['maximum_iterations']\n self.cfg.SOLVER.CHECKPOINT_PERIOD = train_params['checkpoint_save_interval']\n self.cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = train_params['ROI_batch_per_image']\n self.cfg.TEST.EVAL_PERIOD = train_params['evaluation_interval']\n self.cfg.DATALOADER.NUM_WORKERS = train_params['num_workers']\n #print(DatasetCatalog.list())\n trainer = CocoTrainer(self.cfg)\n trainer.resume_or_load(resume=True)\n trainer.train()\n self.load_model(\"model_final.pth\")\n return", "title": "" }, { "docid": "d8320005c703551bd9e1622f69e07a55", "score": "0.7140852", "text": "def _train_epoch(self, epoch):\r\n self.model.train()\r\n self.train_metrics.reset()\r\n targets = []\r\n outputs = []\r\n for batch_idx, (data, target) in enumerate(self.data_loader):\r\n data, target = data.to(self.device), target.to(self.device)\r\n\r\n self.optimizer.zero_grad()\r\n output = self.model(data)\r\n loss = self.criterion(output, target)\r\n loss.backward()\r\n self.optimizer.step()\r\n targets.append(target.detach().cpu())\r\n outputs.append(output.detach().cpu())\r\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\r\n self.train_metrics.update('loss', loss.item())\r\n\r\n if batch_idx % self.log_step == 0:\r\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\r\n epoch,\r\n self._progress(batch_idx),\r\n loss.item()))\r\n if batch_idx == self.len_epoch:\r\n break\r\n targets = torch.cat(targets)\r\n outputs = torch.cat(outputs)\r\n for met in self.metric_ftns:\r\n self.train_metrics.update(met.__name__, met(outputs, targets))\r\n log = self.train_metrics.result()\r\n\r\n if self.do_validation:\r\n val_log = self._valid_epoch(epoch)\r\n log.update(**{'val_'+k : v for k, v in val_log.items()})\r\n\r\n if self.lr_scheduler is not None:\r\n self.lr_scheduler.step()\r\n return log", "title": "" }, { "docid": "92651aea4e27b7ee1ff76e493d334431", "score": "0.7120993", "text": "def train(self):\n self.model.compile(loss='categorical_crossentropy',\n optimizer=self.config.train.optimizer.type, #'adam',\n metrics=self.config.train.metrics )#['accuracy']\n self.model.fit(self.dataset['train'],\n epochs=self.epoches,\n steps_per_epoch=self.steps_per_epoch,\n validation_steps=self.validation_steps,\n validation_data=self.dataset['val'],\n verbose=1)", "title": "" }, { "docid": "bd5fa6212aa9ad27e8d890b5ef906bf8", "score": "0.71109515", "text": "def train_epoch(self, t, trn_loader):\n self._model_train(t)\n for images, targets in trn_loader:\n # Forward current model\n outputs = self.model(images.to(self.device))\n loss = self.criterion(t, outputs, targets.to(self.device))\n # Backward\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self._train_parameters(), self.clipgrad)\n self.optimizer.step()", "title": "" }, { "docid": "bd5fa6212aa9ad27e8d890b5ef906bf8", "score": "0.71109515", "text": "def train_epoch(self, t, trn_loader):\n self._model_train(t)\n for images, targets in trn_loader:\n # Forward current model\n outputs = self.model(images.to(self.device))\n loss = self.criterion(t, outputs, targets.to(self.device))\n # Backward\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self._train_parameters(), self.clipgrad)\n self.optimizer.step()", "title": "" }, { "docid": "100317f65759854bbce899632570e4a5", "score": "0.7103971", "text": "def set_train(self):\n pass", "title": "" }, { "docid": "9054ac4c8606d48ba53ac6bf7f80e239", "score": "0.70867634", "text": "def train(self):\r\n # load the most recent checkpoint\r\n if self.resume:\r\n self.load_checkpoint(best=False)\r\n\r\n print(\"\\n[*] Train on {} samples, validate on {} samples\".format(\r\n self.num_train, self.num_valid)\r\n )\r\n\r\n for epoch in range(self.start_epoch, self.epochs):\r\n\r\n for scheduler in self.schedulers:\r\n scheduler.step(epoch)\r\n \r\n print(\r\n '\\nEpoch: {}/{} - LR: {:.6f}'.format(\r\n epoch+1, self.epochs, self.optimizers[0].param_groups[0]['lr'],)\r\n )\r\n\r\n # train for 1 epoch\r\n train_losses, train_accs = self.train_one_epoch(epoch)\r\n\r\n # evaluate on validation set\r\n valid_losses, valid_accs = self.validate(epoch)\r\n\r\n for i in range(self.model_num):\r\n is_best = valid_accs[i].avg> self.best_valid_accs[i]\r\n msg1 = \"model_{:d}: train loss: {:.3f} - train acc: {:.3f} \"\r\n msg2 = \"- val loss: {:.3f} - val acc: {:.3f}\"\r\n if is_best:\r\n #self.counter = 0\r\n msg2 += \" [*]\"\r\n msg = msg1 + msg2\r\n print(msg.format(i+1, train_losses[i].avg, train_accs[i].avg, valid_losses[i].avg, valid_accs[i].avg))\r\n\r\n # check for improvement\r\n #if not is_best:\r\n #self.counter += 1\r\n #if self.counter > self.train_patience:\r\n #print(\"[!] No improvement in a while, stopping training.\")\r\n #return\r\n self.best_valid_accs[i] = max(valid_accs[i].avg, self.best_valid_accs[i])\r\n self.save_checkpoint(i,\r\n {'epoch': epoch + 1,\r\n 'model_state': self.models[i].state_dict(),\r\n 'optim_state': self.optimizers[i].state_dict(),\r\n 'best_valid_acc': self.best_valid_accs[i],\r\n }, is_best\r\n )", "title": "" }, { "docid": "156d25a2f6b2e0bad1e0e1660e5afbf0", "score": "0.70845807", "text": "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0\n for batch_id, batch_data in enumerate(train_loader):\n assert isinstance(batch_data, torch.LongTensor)\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, total_loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "title": "" }, { "docid": "b7ed8a2a75f0b5384f4b45697392712a", "score": "0.708386", "text": "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.criterion(output, target)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n self.writer.add_image('input', make_grid(data.cpu(), nrow=8, normalize=True))\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n ##Change the argument in step depending on the lr_scheduler used\n v_log = list(val_log.items())\n self.lr_scheduler.step(v_log[0][1])\n return log", "title": "" }, { "docid": "1266d37035eedc0f352844b45159c9ab", "score": "0.7072173", "text": "def train(self):\n self._trainable.value = True", "title": "" }, { "docid": "be6e70543b199756dd629416eb16761d", "score": "0.70662093", "text": "def train_start(self):\n self.img_enc.train()\n self.txt_enc.train()", "title": "" }, { "docid": "e617ab9ef373ecfac1cab7b066696d33", "score": "0.7061279", "text": "def setup_trainning(self):\r\n #move to cuda if requested\r\n if self.cuda:\r\n assert torch.cuda.is_available() ,'Cuda not available'\r\n self.model.cuda()\r\n #reset if fit again\r\n if self.do_early_stop:\r\n self.recorder = Model_recorder(self.metric)\r\n #setup epoch_dataframe\r\n self.epoch_dfs = pd.DataFrame()", "title": "" }, { "docid": "a51a60a399aeb66101509dcee689318a", "score": "0.70573395", "text": "def train(self, verbose=False):\r\n self.read_data()\r\n for i in range(self.MAX_EPOCHS):\r\n changed_count = self.tp.train_for_an_epoch(self.TRAINING_DATA)\r\n if changed_count == 0:\r\n print(\"Converged in \", i, \" epochs.\")\r\n print(\"TRAINING IS DONE\")\r\n return\r\n if verbose:\r\n print(f\"\\nchanged_count= {changed_count}\")\r\n print(\"Weights:\")\r\n print(*self.tp.W, sep='\\n')\r\n print(f\"Training did not converge in {self.MAX_EPOCHS} epochs.\")", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.70552325", "text": "def train(self):", "title": "" }, { "docid": "5c7834e9211fe63b939c1219a23a1e0c", "score": "0.7048247", "text": "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = utils.AverageMeter()\n epoch_time = utils.Timer()\n\n # Run one epoch\n for idx, ex in enumerate(data_loader):\n train_loss.update(*model.update(ex))\n\n if idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n\n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n\n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "title": "" }, { "docid": "3c77bcc6ad08a3d702afd48158c9a363", "score": "0.7045513", "text": "def train(self, x_train, y_train):\n self.algorithm.fit(x_train, y_train)", "title": "" }, { "docid": "704d45e1796a515c40a1782e25573e42", "score": "0.70411414", "text": "def train_epoch(optimizer, net, training_input, training_target):\n # permutation = torch.randperm(training_input.shape[0])\n\n # epoch_training_losses = []\n # net.train()\n # logger.info(\"-----------------------------0---------------------------------\")\n # for i in range(0, batch_size):\n # for i in range(10):\n\n # optimizer.zero_grad()\n # logger.info(\"-----------------------------1---------------------------------\")\n # indices = permutation[i:i + batch_size]\n current_X_batch, current_y_batch = training_input, training_target\n # logger.info(\"-----------------------------2---------------------------------\")\n out = net(A_wave,ele_A_wave, current_X_batch)\n loss = loss_criterion(out, current_y_batch)\n # logger.info(f'{i}/{training_input.shape[0]}: step loss:{loss.item()}')\n # print(f'{i}/{training_input.shape[0]}: step loss:{loss.item()}')\n # logger.info(f'2--------------------out:{out},current_y_batch:{current_y_batch}')\n loss.backward()\n # logger.info(f'3--------------------loss:{loss}')\n # logger.info(\"-----------------------------4---------------------------------\")\n optimizer.step()\n # logger.info(f'4--------------------loss:{loss}')\n # logger.info(\"-----------------------------5---------------------------------\")\n # epoch_training_losses.append(loss.clone().detach().cpu().numpy())\n # logger.info(\"-----------------------------6---------------------------------\")\n # return sum(epoch_training_losses)/len(epoch_training_losses)\n return loss", "title": "" }, { "docid": "944c9196c398272f9bbae3da1fa51e59", "score": "0.7038713", "text": "def train(self):\n x, y = self.get_data()\n if x and y:\n self.model, self.scaler = self.classifier.create_model(x, y)", "title": "" }, { "docid": "8e5b2866b8359efe288301590ba0e3bb", "score": "0.7031665", "text": "def train(self, X_train: np.ndarray, y_train: np.ndarray,\n X_test: np.ndarray, y_test: np.ndarray, verbose: bool = True) -> None:\n pass", "title": "" }, { "docid": "6b6a806fb330bee526cdaa40a95cf43a", "score": "0.7024369", "text": "def set_train(self) -> None:\n self.mode = 'train'\n for (name, net) in self.nets.items():\n net = net.train()", "title": "" }, { "docid": "c26fbb686c0de7938cec442f5d835d55", "score": "0.7020272", "text": "def train(self):\n min_val_error = math.inf\n min_val_error_epoch = 0\n for epoch in range(self.epochs):\n self.input = self.train_input\n self.output = self.train_output\n print(f'Epoch: {epoch}', end=' ============== ')\n self.forward_pass()\n self.backpropagation()\n val_error = self.test()\n if val_error < min_val_error:\n print(\"Validation error decreased.\", end=' ')\n min_val_error = val_error\n min_val_error_epoch = epoch\n # save to file the config with least validation error till now\n self.save_config_to_file()\n # stop training if min validation error has reached below 0.1 and\n # validation error hasn't decreased further after certain no. of epochs\n if epoch > min_val_error_epoch + self.early_stopping_patience:\n print(\n f\"Early stopping since validation error did not improve till {self.early_stopping_patience} epochs.\")\n break", "title": "" }, { "docid": "0d51274ac37b43d7c2af992b5a26a9ef", "score": "0.70186883", "text": "def Train(self):\r\n totalError = 1\r\n epoch = 0\r\n while ((totalError != 0.0) and epoch < MAXITER):\r\n totalError = 0 # reset no erro\r\n for i in range(len(self.target)):\r\n reg = np.array(self.previsores[i]) # pega uma linha da matriz de previsores\r\n output = self.ActivationFunction(self.Sum(reg)) # saida calculada (prediccao)\r\n # e = (y * (1-y) * (d-y))\r\n if (output != self.target[i]):\r\n error = self.target[i] - output # erro = (saidaEsperada - saidaCalculada) ===== d - u\r\n totalError += abs(error)\r\n self.UpdateWeigth(error, reg)\r\n\r\n epoch += 1\r\n # os.system('clear')\r\n print('--')\r\n self.ShowWeights()\r\n print(\"Treinado com \" + str(epoch) + \" epocas!\")", "title": "" }, { "docid": "647f1ea309f6475eca5b699c98e9fe7c", "score": "0.70162517", "text": "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = util.AverageMeter()\n epoch_time = util.Timer()\n # Run one epoch\n for idx, ex in enumerate(data_loader):\n train_loss.update(*model.update(ex)) # run on one batch\n\n if idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n\n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "title": "" }, { "docid": "8ee5e46599724de1f5f7014a949eac64", "score": "0.70102066", "text": "def train_one_epoch(trainloader, optimizer, criterion, model, epoch, device):\n running_loss = 0.0\n for i, data in enumerate(trainloader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data[0].to(device), data[1].to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n # print statistics\n print(\n \"epoch: %d, iter: %5d | loss: %.3f\"\n % (epoch + 1, i + 1, running_loss / len(trainloader))\n )\n running_loss = 0.0", "title": "" }, { "docid": "d60be20e4b746c3f03437bcb58dfb4eb", "score": "0.7002585", "text": "def train_model(self):\n pass", "title": "" }, { "docid": "8b6861c895dbc91f7c67a2ce036809ca", "score": "0.700224", "text": "def train(self,trainset):\n batchsize = trainset.metadata['minibatch_size']\n if self.epoch == 0:\n input_size = trainset.metadata['input_size']\n n_classes = len(trainset.metadata['targets'])\n print \"initialize ...\"\n self.initialize(input_size, n_classes, batchsize)\n print \"done\"\n\n for it in range(self.epoch,self.n_epochs):\n for input, target in trainset:\n score = self.train_batch(input.reshape(batchsize,1,72,72), target)\n self.n_updates += 1\n self.epoch = self.n_epochs", "title": "" }, { "docid": "f58d6ab461bbb950d1c2c0be915a8d97", "score": "0.69979024", "text": "def train_step(self, *args, **kwargs):\n raise NotImplementedError('Implement to run training')", "title": "" }, { "docid": "06845aba635569ece05431f96e7e5e83", "score": "0.6987769", "text": "def train(self, train_set_x, train_set_y):\n self.__log_params()\n self.__mlp.fit(train_set_x, train_set_y)\n self._logger.info('Iterations during training ann: %d', self.__mlp.n_iter_)", "title": "" }, { "docid": "a5518c16ac835e69a5982730bec8b3bb", "score": "0.698542", "text": "def train(self, save_point=None):\n for s in tqdm(range(self.num_steps)):\n for instance in range(self.nr_inst):\n self.sess.run(self.train_step, feed_dict={self.step: s})\n if (save_point != None) and (s % save_point == 0):\n self.save_weights(self.log_path + self.trial+str(s))\n print(\"Training done\")", "title": "" }, { "docid": "0daaa0b8366164bd34e692e188ec172d", "score": "0.6983097", "text": "def train(self):\n self.model.train()\n\n # Train the model for n epochs before aggregating\n for e in range(self.epochs):\n\n for i, (images, labels) in enumerate(self.train_loader):\n # Setting gradients to zero before starting backpropagation\n images, labels = images.to(self.device), labels.to(self.device)\n self.optimizer.zero_grad()\n output = self.model(images)\n loss = self.criterion(output, labels)\n loss.backward()\n self.optimizer.step()\n\n if self.verbose:\n print(\n f\"[Client {self.id}]\\t\"\n f\"Train Epoch: {e}\\t\"\n f\"Loss: {loss.item():.4f}\"\n )\n\n if self.is_private:\n epsilon, best_alpha = self.optimizer.privacy_engine.get_privacy_spent(self.delta)\n print(f\"\\t(ε = {epsilon:.2f}, δ = {self.delta}) for α = {best_alpha}\")\n\n if self.verbose:\n print(f\"Client {self.id} - done\")", "title": "" }, { "docid": "cbfeae88779dd4d691ec331f846cac21", "score": "0.6982552", "text": "def train(self, current_hyper_params):\n self.model.train()\n n_iter = 0\n total_loss = 0\n for batch_idx, (data, _) in enumerate(tqdm(self.train_loader)):\n _, _, total_loss, n_iter = self.process_batch(data, total_loss,\n n_iter, train=True)\n self.stats.train_loss_history.append(total_loss / n_iter)", "title": "" }, { "docid": "20c058898ac0c4b5a81087be489ee994", "score": "0.69798744", "text": "def epoch(self, epoch=None):\n if epoch is None:\n self.epoch_num += 1\n else:\n self.epoch_num = epoch\n\n self.learning_rate = self.get_lr()", "title": "" }, { "docid": "ab05715649d0c256b1900c39d55f895a", "score": "0.6979848", "text": "def epoch_start(self):\n self.step_start_time = time.time()\n self.epoch_start_time = time.time()\n self.step = 0\n self.epoch += 1\n self.G_loss = []\n self.D_loss = []", "title": "" }, { "docid": "7b1b11fa82ee0a0d69d5565cfbe2e8f9", "score": "0.69736904", "text": "def _train_epoch(self, epoch):\n self.train_iter, self.train_num_batches = self.data_loader.get_iterator_and_num_batches('train')\n self.train_iter = lazy_groups_of(self.train_iter, self.n_gpu_use)\n\n self.model.train()\n\n total_loss = 0\n for batch_idx, data in enumerate(self.train_iter):\n self.optimizer.zero_grad()\n\n output = self._run_model(data)\n loss = output['loss']\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.train_num_batches + batch_idx)\n self.writer.add_scalar('loss', loss.item())\n total_loss += loss.item()\n\n if batch_idx % (self.train_num_batches // 5) == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n metrics = self.model.get_metrics(True)\n metrics = {'train_' + k: v for k, v in metrics.items()}\n metrics.update({\n 'train_loss': total_loss / self.train_num_batches,\n })\n\n val_log = self._valid_epoch(epoch)\n metrics.update(val_log)\n\n # test_log = self._test_epoch()\n # metrics.update(test_log)\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return metrics", "title": "" }, { "docid": "49830a699e0de9a5d173ef6c9289a0f7", "score": "0.697152", "text": "def train(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "a51fff0b0f36e1153fa146fc84941ffd", "score": "0.6961422", "text": "def set_train(self):\r\n self.network.train()", "title": "" }, { "docid": "86f2b7d44e634c2299c476bcf75c6a4f", "score": "0.6957973", "text": "def train(self):\n if self.is_master:\n logger.info(\"Starting training\")\n self.last_log = time.time()\n self.student.train()\n self.teacher.eval()\n\n for _ in range(self.params.n_epoch):\n if self.is_master:\n logger.info(f\"--- Starting epoch {self.epoch}/{self.params.n_epoch-1}\")\n if self.multi_gpu:\n torch.distributed.barrier()\n\n iter_bar = tqdm(self.dataloader, desc=\"-Iter\", disable=self.params.local_rank not in [-1, 0])\n for batch in iter_bar:\n if self.params.n_gpu > 0:\n batch = tuple(t.to(f\"cuda:{self.params.local_rank}\") for t in batch)\n\n if self.mlm:\n token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)\n else:\n token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)\n self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)\n\n iter_bar.update()\n iter_bar.set_postfix(\n {\"Last_loss\": f\"{self.last_loss:.2f}\", \"Avg_cum_loss\": f\"{self.total_loss_epoch/self.n_iter:.2f}\"}\n )\n iter_bar.close()\n\n if self.is_master:\n logger.info(f\"--- Ending epoch {self.epoch}/{self.params.n_epoch-1}\")\n self.end_epoch()\n\n if self.is_master:\n logger.info(\"Save very last checkpoint as `pytorch_model.bin`.\")\n self.save_checkpoint(checkpoint_name=\"pytorch_model.bin\")\n logger.info(\"Training is finished\")", "title": "" }, { "docid": "fa7c726598edd501ff14a428c0d9a3a5", "score": "0.69519264", "text": "def train_step(x1_batch, x2_batch, y_batch, epoch, batch):\n if random()>0.5:\n feed_dict = {\n siameseModel.input_x1: x1_batch,\n siameseModel.input_x2: x2_batch,\n siameseModel.input_y: y_batch,\n siameseModel.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n else:\n feed_dict = {\n siameseModel.input_x1: x2_batch,\n siameseModel.input_x2: x1_batch,\n siameseModel.input_y: y_batch,\n siameseModel.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n _, step, loss, accuracy, dist, sim = sess.run([tr_op_set, global_step, siameseModel.loss, siameseModel.accuracy, siameseModel.distance, siameseModel.temp_sim], feed_dict)\n time_str = datetime.datetime.now().isoformat()\n if batch*(epoch+1) % FLAGS.log_every == 0:\n print(\"TRAIN {}: epoch/step {}/{}, loss {:g}, f1 {:g}\".format(time_str, epoch, batch, loss, accuracy))\n #train_summary_writer.add_summary(summaries, step)\n # print(y_batch, dist, sim)\n return loss, accuracy", "title": "" }, { "docid": "61eed29ca8f6271cd3f3a59c1c580c66", "score": "0.69505745", "text": "def train(self, training_set, training_labels, epochs):\n # Training\n self.model.fit(training_set, training_labels, n_epoch=epochs, validation_set=0.2,\n show_metric=True, run_id=\"dense_model\")", "title": "" } ]
d5d17d46c3da5c9a52da6df460e4060b
Generates a sample with size iterations of large numbers from 1<n<p1
[ { "docid": "06e0f0ada5afc42233cd49a94afc253c", "score": "0.77541995", "text": "def getSamplesLargeNumber(p, iterations):\r\n aValues = []\r\n while len(aValues) != iterations:\r\n sample = rd.randrange(2, p - 1)\r\n if sample not in aValues:\r\n aValues.append(sample)\r\n return aValues", "title": "" } ]
[ { "docid": "054dc7fa11549fb6cf88daddc630bcc0", "score": "0.7613719", "text": "def gen_samples(n) :\n\tsamples = [s for s in gen_sample(n)]\n\treturn samples", "title": "" }, { "docid": "a2f5b7a4bb65b9f77fd742c49250ddaa", "score": "0.75296617", "text": "def sampler(n):\r\n np.random.seed(0)\r\n return np.reshape(np.random.uniform(0,10,n),(n,1))", "title": "" }, { "docid": "272b4d1a846c8bd2769dd4a394f4a78c", "score": "0.7444329", "text": "def sample(self, Np=1):\n pass", "title": "" }, { "docid": "2e3d21764936cf3775b5d90ab3158ad6", "score": "0.73775864", "text": "def sample( n, p ):\n assert type(p) is list\n cnt = 0\n ix = list()\n for prob in p:\n if cnt == 0:\n ix = np.hstack( (ix, np.zeros( n * prob)))\n else:\n ix = np.hstack( (ix, np.ones( n * prob) * cnt) )\n cnt += 1\n\n np.random.shuffle( ix )\n ix = list(ix)\n \n print len(ix)\n print n\n\n while n - len(ix) > 0:\n ix.append(0)\n return ix", "title": "" }, { "docid": "1b6ad29bd7d0b8c0ab9182ab608c66d1", "score": "0.7213443", "text": "def bruteForceSampleN(l):\n f = int(sys.argv[2]) - 1\n for i in [random.randint(0, (f ** l) - 1) for x in range(500000)]:\n print(seqFromNumN(i, f, l))", "title": "" }, { "docid": "6815419a7f4e2d88d9c3d3bb9cc27d4d", "score": "0.7069024", "text": "def testBOFnSample(n=1):\n\n return np.random.uniform(low=-1, high=2, size=(n,1)).squeeze()", "title": "" }, { "docid": "9a8871b71216a596cef7b4b2c58dac71", "score": "0.7055669", "text": "def simulate_sample(self, n) -> list:\n pass", "title": "" }, { "docid": "4f701c080f891c6daa837c175130698d", "score": "0.7036545", "text": "def Sample(self, n):\n return [self.Random() for i in range(n)]", "title": "" }, { "docid": "43563f709ddef2bb511bddcee7109a3b", "score": "0.70106876", "text": "def notch_sampler(pm1d, N, x, **callkwargs):\n v_init = deepcopy(pm1d.v)\n for _ in tqdm(range(N)):\n for p in pm1d.v:\n pm1d.v[p] = pint_safe_uniform(pm1d.v._l[p], pm1d.v._u[p])\n\n fr = pm1d.v['fr']\n phi = np.cos(pm1d.v['phi'])\n Ql = 1/(1/(10**pm1d.v['Qi']) + 1/((10**pm1d.v['Qc'])*np.cos(phi)))\n span = 20 * (fr/Ql) # the estimated fwhm\n\n samples = pm1d(np.linspace(fr - span/2, fr + span/2, 801), **callkwargs)\n yield samples, deepcopy(pm1d.v)", "title": "" }, { "docid": "b26b699d259382be7a6732c644f70023", "score": "0.68488276", "text": "def sample(self, n):\n batch = []\n segment = self.tree.total() / n\n\n for i in range(n):\n a = segment * i\n b = segment * (i + 1)\n\n s = random.uniform(a, b)\n (idx, p, data) = self.tree.get(s)\n batch.append((idx, data))\n\n return batch", "title": "" }, { "docid": "cb2321ca0a53274daa9ee29b2afc3f6c", "score": "0.6839405", "text": "def sample(self, n):\n x = []\n while len(x) < n:\n proposal = np.matrix(np.random.uniform(0, 1, [n, self.d]))\n us = np.matrix(np.random.uniform(0,1,n)).T\n to_retain = np.array(us < self.eval(proposal)/2.0).reshape(n,)\n if x == []:\n x = proposal[to_retain,:]\n else:\n x = np.append(x, proposal[to_retain,:], 0)\n return np.matrix(x[0:n,:])", "title": "" }, { "docid": "3da0bcc1f40d4612de5a2243b21c2e30", "score": "0.6804754", "text": "def sample_n(self, n=10, random=False):\n if random is True:\n seed = random_int()\n elif random is False:\n seed = 0\n else:\n RaiseIt.value_error(random, [\"True\", \"False\"])\n\n rows_count = self.count()\n if n < rows_count:\n # n/rows_count can return a number that represent less the total number we expect. multiply by 1.1 bo\n fraction = (n / rows_count) * 1.1\n else:\n fraction = 1.0\n\n return self.sample(False, fraction, seed=seed).limit(n)", "title": "" }, { "docid": "ed86e7bc1eea2f6c4a26dbbac07a9980", "score": "0.6801164", "text": "def generate_samples(n, m, p, q, pi):\n z = np.random.choice([0, 1], size=n, replace=True, p=[pi, 1 - pi])\n x = np.zeros(shape=[n, m], dtype='int')\n\n p_q = [p, q]\n for i, z_i in enumerate(z):\n p_i = p_q[z_i]\n x[i, :] = np.random.choice([0, 1], size=m, replace=True, p=[1 - p_i, p_i])\n\n return (x, z)", "title": "" }, { "docid": "28357f7e5fe08dbc65ce3513ecb404eb", "score": "0.679389", "text": "def sample(self, n):\n x = []\n while len(x) < n:\n proposal = np.matrix(np.random.uniform(0, 1, [n, 1]))\n us = np.matrix(np.random.uniform(0,1,n)).T\n to_retain = np.array(us < self.eval(proposal)/2.0).reshape(n,)\n if x == []:\n x = proposal[to_retain,:]\n else:\n x = np.append(x, proposal[to_retain,:], 0)\n return np.matrix(x[0:n])", "title": "" }, { "docid": "28357f7e5fe08dbc65ce3513ecb404eb", "score": "0.679389", "text": "def sample(self, n):\n x = []\n while len(x) < n:\n proposal = np.matrix(np.random.uniform(0, 1, [n, 1]))\n us = np.matrix(np.random.uniform(0,1,n)).T\n to_retain = np.array(us < self.eval(proposal)/2.0).reshape(n,)\n if x == []:\n x = proposal[to_retain,:]\n else:\n x = np.append(x, proposal[to_retain,:], 0)\n return np.matrix(x[0:n])", "title": "" }, { "docid": "a0328027fd13b80c2a35281362d54c66", "score": "0.67819244", "text": "def generer_population(n):\n return [generate_scorpion() for _ in range(n)]", "title": "" }, { "docid": "20e01a72893d0b4b5d7b1e72a6877677", "score": "0.67693436", "text": "def generate_distribution(n):\n assert n >= 1\n distribution = [Fraction(random.random()).limit_denominator(10)\n for _ in range(n)]\n sum_ = sum(distribution)\n distribution = [p/sum_ for p in distribution]\n return distribution", "title": "" }, { "docid": "9c6ea00bc0d18a813c9a3b6552134b36", "score": "0.6735364", "text": "def get_random_n_points(n):\n sel = random.sample(xrange(60000), n)\n return sel", "title": "" }, { "docid": "08653bb17c230d2da9a7ef362f5c637a", "score": "0.6727722", "text": "def gen(n):\n\n for j in xrange(n):\n yield j", "title": "" }, { "docid": "81348f950a9eaca013220a6e65014edc", "score": "0.6720412", "text": "def generate_N(self):\n while True:\n rp = random.randint(1,len(self.prime))\n rq = random.randint(1,len(self.prime))\n if rp != rq:\n break\n \n self.p = self.prime[rp]\n self.q = self.prime[rq]\n self.N = int(self.p * self.q)\n print('p: {}, q: {}, N: {}'.format(self.p,self.q,self.N))", "title": "" }, { "docid": "de2b627bf1d99f05cd9f430395051413", "score": "0.67169845", "text": "def sample_multi(p_array: NDArray, n: int, random_seed: int, out_array: NDArray = None) -> NDArray:\n np.random.seed(random_seed)\n\n nbf_cumsum(p_array)\n\n if out_array is None:\n out_array = np.zeros(n, dtype=np.int64)\n else:\n assert len(out_array) == n\n\n for i in range(n):\n r = np.random.uniform(MIN_RANDOM_VALUE, 1.0, 1)[0]\n out_array[i] = logarithmic_search(r, p_array)\n return out_array", "title": "" }, { "docid": "642d15188b203828cde1f347f111a992", "score": "0.6685638", "text": "def sample_pagerank(corpus, damping_factor, n):\n #randint includes last one and first one\n #sys.setrecursionlimit(SAMPLES + 100)\n whichPlace = randint(0,len(corpus)-1)\n startPage = list(corpus.keys())[whichPlace]\n passDict = dict()\n for key in corpus.keys():\n passDict[key] =0\n passDict[startPage] = 1/n\n increaseFactor = 1/n\n #print(startPage)\n return completeSample(corpus, damping_factor, n-1, startPage, passDict,increaseFactor)\n\n\n #raise NotImplementedError", "title": "" }, { "docid": "af2ea20c014572e0172cb9e88d509ee9", "score": "0.66728723", "text": "def data_gen_simple_interactions(b, n):\n batch = np.empty(shape=(b, n), dtype=np.float)\n while True:\n for i in range(b):\n batch[i] = [np.random.uniform(-1, 1)] * n # + np.random.randn(batch.shape[-1]) * 1e-2\n\n yield batch", "title": "" }, { "docid": "b22bf0310a519e54eb02d74e34221535", "score": "0.66634816", "text": "def genetic_sample(n):\n\n alpha1 = np.random.uniform(low=120.0, high=200.0, size=(n,1))\n alpha2 = np.random.uniform(low=15.0, high=16.0, size=(n,1))\n gamma = np.random.uniform(low=2.1, high=2.9, size=(n,1))\n beta = np.random.uniform(low=0.85, high=1.15, size=(n,1))\n eta = np.random.uniform(low=1.3, high=2.7, size=(n,1))\n K = np.random.uniform(low=np.log10(2.3e-5), high=np.log10(3.7e-5), size=(n,1)) # Log prior\n\n return np.hstack([alpha1, alpha2, gamma, beta, eta, K]).squeeze()", "title": "" }, { "docid": "4db0b53bc1791b54bac1d636e90daedc", "score": "0.6648485", "text": "def rosenbrockSample(n=1, dim=2):\n\n return np.random.uniform(low=-5, high=5, size=(n,dim)).squeeze()", "title": "" }, { "docid": "25a509a15f9e258f52c1bf24dcbb9296", "score": "0.66481316", "text": "def generate_weight_block(N_out,N_in,p):\n return 1.*(np.random.rand(N_out,N_in)<p)", "title": "" }, { "docid": "77763439e9565888bf0b1c2c8148152e", "score": "0.6637659", "text": "def gen_sample_probability(val, k, n, numTrials):\n ct = 0\n for i in range(numTrials):\n if sim_n_rolls(val, k, n):\n ct+=1\n return ct/float(numTrials)", "title": "" }, { "docid": "4edfc866bdc9a4edba9cb51cc9fd5be2", "score": "0.6632115", "text": "def random_sample(p):\n\n length, _ = p.shape\n s = torch.zeros(length, dtype=torch.int64, device=device)\n for i in range(length):\n s[i] = torch.multinomial(p[i], 1)\n return s", "title": "" }, { "docid": "0cf6e340ed875bc3967959245995681c", "score": "0.66264594", "text": "def sample_batch(memory, n):\r\n batch = rnd.sample(memory, n) # List containing tuples\r\n return batch", "title": "" }, { "docid": "f408c159cf70000861dd19b4487f2848", "score": "0.66167367", "text": "def sample_size(n):\n if n < 30:\n return int(n)\n sqrt = n ** 0.5\n if sqrt < 30:\n return 30\n if sqrt > 500:\n return 500\n return int(sqrt)", "title": "" }, { "docid": "1802dcc7482e100bb751572c56b9d8da", "score": "0.66118777", "text": "def sample(self, n):\n # Create an array to store the samples, use list for speed and convert to array later\n memory_b = []\n # Initialize the position of the samples and the weights later used for fitting\n b_idx, b_ISWeights = np.empty((n,), dtype=np.int32), np.empty((n), dtype=np.float32)\n \n # Divide the total priority into n ranges to sample from\n segment = self.SumTree.total_priority/n\n \n # Every time a batch is sampled, PER_b is increased, till it reaches 1\n self.PER_b = np.min([1.,self.PER_b+self.PER_b_increment])\n # The adjusted value is returned to our agent to allow tracking it\n self.agent.PER_b = self.PER_b\n \n # The max weight is calculated\n p_min = np.max((np.min(self.SumTree.tree[-self.SumTree.capacity:]),self.PER_e**self.PER_a))/self.SumTree.total_priority\n # np.max is necessary when the SumTree is not completely filled\n max_weight = (p_min*n)**(-self.PER_b)\n \n # For every segment, a sample is drawn\n for i in range(n):\n # Random value in our range\n value = np.random.uniform(segment*i, segment*(i+1))\n # A sample is drawn\n index, priority, data = self.SumTree.get(value)\n \n # The probability to be sampled is proportional to the priority\n sampling_probability = priority/self.SumTree.total_priority\n \n # The weight for fitting is calculated and stored\n b_ISWeights[i] = np.power(n*sampling_probability,-self.PER_b)/max_weight\n # The index is stored\n b_idx[i]= index\n # The step is stored\n memory_b.append([data])\n \n return b_idx, np.asarray(memory_b), b_ISWeights", "title": "" }, { "docid": "915c8fe0ed47eb0b656b35c861320a1c", "score": "0.66041726", "text": "def main(n):\n distribution = generate_distribution(n)\n print(distribution)", "title": "" }, { "docid": "ce9a00711adc78942c6aa356695b7e76", "score": "0.65943164", "text": "def _sample_level(n: float):\n return np.random.uniform(low=0.1, high=n)", "title": "" }, { "docid": "ce9a00711adc78942c6aa356695b7e76", "score": "0.65943164", "text": "def _sample_level(n: float):\n return np.random.uniform(low=0.1, high=n)", "title": "" }, { "docid": "0e997fbd44e2255044720c30ce51e645", "score": "0.65801805", "text": "def generate_random_numbers01(N, dim, max_v = 10000):\n random_ints = np.random.randint(max_v, size=(N, dim))\n init_lb = 0\n return (random_ints - init_lb)/(max_v - 1 - init_lb)", "title": "" }, { "docid": "a93d2f4fe8f78b8a7468a71b7e828097", "score": "0.6563636", "text": "def geometricP( nt ):\n return np.random.poisson(nt)", "title": "" }, { "docid": "27d3d52ec0fd4912294491d63ddf5356", "score": "0.6547912", "text": "def pubMutGenerator(n,size_par,mean_depth,purity):\n prob_par=size_par*1.0/(size_par+mean_depth)\n mean_af = 0.5*purity\n depth_pub = []\n maf_pub = []\n for k in range(0,n):\n correct = 0\n while correct == 0:\n site_depth = np.random.negative_binomial(size_par,prob_par)\n if site_depth >= 15:\n correct =1\n var_reads = np.random.binomial(site_depth,mean_af)\n site_maf = var_reads*1.0/site_depth\n depth_pub += [site_depth]\n maf_pub += [site_maf]\n return depth_pub,maf_pub", "title": "" }, { "docid": "25c847757b2c06996d594f7d21154d64", "score": "0.6547209", "text": "def GenerateSampleGaps(self, n):\n cdf_zb = thinkstats2.Cdf(self.pmf_zb)\n sample = cdf_zb.Sample(n)\n return sample", "title": "" }, { "docid": "c5ee3261fe0d3812deef6cebb080aba0", "score": "0.6537104", "text": "def sim_bernoulli(p, n = 25):\n temp = np.zeros(shape = (len(p), n))\n for i in range(len(p)): \n temp[i,:] = nr.binomial(1, p[i], n)\n return(temp)", "title": "" }, { "docid": "ef466294977834450e16e31d0810b516", "score": "0.65323746", "text": "def gens12(size, p1, tau1, tau2, T, sigma, generator=None):\n size = (size,) if np.isscalar(size) else tuple(size)\n if generator is None:\n generator = np.random.default_rng()\n \n samp = generator.standard_exponential(size=size)\n choice = generator.binomial(n=1, p=p1, size=size).astype(bool)\n samp[choice] *= tau1\n samp[~choice] *= tau2\n \n if sigma != 0:\n normsamp = generator.standard_normal(size=size)\n normsamp *= sigma\n samp += normsamp\n \n if T != 0:\n unifsamp = generator.uniform(0, T, size=size)\n samp += unifsamp\n \n return samp", "title": "" }, { "docid": "dc551667fb64400723a8ddedbe98ab5f", "score": "0.6511141", "text": "def Sample(iterable, n, rng):\n reservoir = []\n for i, item in enumerate(iterable):\n if i < n:\n reservoir.append(item)\n continue\n r = rng.randint(0, i)\n if r < n:\n yield False, reservoir[r]\n reservoir[r] = item\n else:\n yield False, item\n for item in reservoir:\n yield True, item\n return", "title": "" }, { "docid": "4ad3187453f08dcb44f7b0e00a8429c0", "score": "0.6505986", "text": "def sample(self, n):\n batch = []\n segment = self.tree.total() / n\n\n for i in range(n): # tree is divided into segments of equal size\n a = segment * i # take one sample out of every segment\n b = segment * (i + 1)\n\n s = random.uniform(a, b)\n (idx, p, data) = self.tree.get(s) # get with O(log n)\n batch.append((idx, data))\n\n return batch", "title": "" }, { "docid": "0f4c403d2a206d18cef951e4aadf2345", "score": "0.64860386", "text": "def RandomSample(distribution, n):\n # assert n <= len(distribution), \"Cannot select %i individuals from a population of less than %i individuals\"%(n,n)\n if n > len(distribution): n = len(distribution)\n dist = distribution.copy()\n sample = []\n for i in xrange(n):\n try: dist = norm(dist)\n except ValueError: break \n item = RandomSelect(dist)\n sample.append(item)\n dist[item] = 0.0\n return array(sample)", "title": "" }, { "docid": "96aef084d9756ea9be5b1992fddcffaa", "score": "0.6476546", "text": "def _generate_varying_sequences(self, random_factory, n, min_size, max_size, none_prob):\n base_size = 10000\n base = random_factory(base_size + max_size)\n data = []\n for i in range(n):\n off = self.rnd.randint(base_size)\n if min_size == max_size:\n size = min_size\n else:\n size = self.rnd.randint(min_size, max_size + 1)\n data.append(base[off:off + size])\n self.sprinkle_nones(data, none_prob)\n assert len(data) == n\n return data", "title": "" }, { "docid": "cfe47de90fb6d28f6bd5eda74825d539", "score": "0.64728665", "text": "def Sample(self, n):\n size = n,\n return numpy.random.beta(self.alpha, self.beta, size)", "title": "" }, { "docid": "cfe47de90fb6d28f6bd5eda74825d539", "score": "0.64728665", "text": "def Sample(self, n):\n size = n,\n return numpy.random.beta(self.alpha, self.beta, size)", "title": "" }, { "docid": "0924e003229ba94880e3f94e2803bd0c", "score": "0.6472682", "text": "def generate_rand_ints(n):\r\n rand_ints = list()\r\n for x in range(n):\r\n rand_ints.append(random.randint(0, 1_000_000))\r\n\r\n return rand_ints", "title": "" }, { "docid": "492905d76b48af548b82a084ba85ae4a", "score": "0.6464729", "text": "def makeTestCase(n):\n return random.sample(range(random_minimum, random_maximum), n)", "title": "" }, { "docid": "33470b0163c1f636988689360002dbd7", "score": "0.64588755", "text": "def random_counts(n):", "title": "" }, { "docid": "7f406e4fda0e17a0fc2b73b967eaf1c1", "score": "0.6457134", "text": "def _SampledQuickFitness(population, payoff, N, samples):\n n = population\n L = len(n)\n p = []\n for i in xrange(L):\n s = 0.0\n for k in xrange(samples):\n t = tuple([i]+[RandomSelect(n) for v in xrange(N-1)])\n s += payoff[t]\n p.append(s/samples)\n return p", "title": "" }, { "docid": "5a65a71170da192f4b7ef8ca0c075762", "score": "0.64556944", "text": "def _gen_rpt(self):\n return [self._rng.randint(d) for d in self._map_size]", "title": "" }, { "docid": "b7d8b75151b16663190e791979ada7e6", "score": "0.6449973", "text": "def batch_random_generator(N, g_prob = 0.15, n=None, N_min=8, N_max=20, strategy='rand', show_progress=True):\n \n \n args = [(g_prob, n, N_min, N_max, strategy)]*N\n return run_imap_multiprocessing(auxilary, args, show_prog = show_progress)", "title": "" }, { "docid": "b9c25a2accbf9ac4dc7d8597a75f10aa", "score": "0.64470196", "text": "def batch_generate_samples(self, n=1000):\n\n cube_no = np.random.choice(self.box_nos, size=n, p=self.volume_fracs)\n\n samples = np.random.rand(n, self.n_dim)\n samples *= 2*self.box_dims[cube_no,:]\n samples += self.points[cube_no,:] - self.box_dims[cube_no,:]\n\n in_n = self.in_n_boxes(samples)\n mask = (np.random.rand(n) < (1/in_n))\n\n self.samples = samples[mask, :]\n self.no_of_samples = self.samples.shape[0]\n self.sample_no = 0", "title": "" }, { "docid": "8d582d2f084e9cc8f42b44b141c9fda8", "score": "0.64432526", "text": "def GenerateSamplePassengers(self, lam, n):\n zs = self.GenerateSampleGaps(n)\n xs, ys = SplitGaps(zs)\n\n res = []\n for x, y in zip(xs, ys):\n k1 = numpy.random.poisson(lam * x)\n k2 = numpy.random.poisson(lam * y)\n res.append((k1, y, k2))\n\n return res", "title": "" }, { "docid": "60f49b0461a62ec491e0c0ab2d27e831", "score": "0.6441084", "text": "def gen_sample(p, n):\n def add_x2_y2(a):\n \"\"\"Accepts an (N,2) array, adds 2 more columns\n which are first col squared, second col squared.\n \"\"\"\n return logistic.vstack([a.T, a[:,0]**2, a[:,1]**2]).T\n pos = gaussian(mean_pos, cov_pos, p)\n pos = add_x2_y2(pos)\n neg = gaussian(mean_neg, cov_neg, n)\n neg = add_x2_y2(neg)\n return logistic.sample_positive(c, pos, neg)", "title": "" }, { "docid": "4ad0c5db2d5a0fe56c5f7a7f50433cbe", "score": "0.64403325", "text": "def gen_rnd_smpl(low: int, high: int, size: int = 1000, d_type=np.int16) -> np.array:\n return np.random.randint(low, high, size, d_type)[:10]", "title": "" }, { "docid": "38e90b52f9a72bafa6c2d1f567605f71", "score": "0.64360803", "text": "def rand_uni(p=10000.0):\n \n return (lcg()%(p))/p", "title": "" }, { "docid": "8033c66f450c94149c298f98a989e6da", "score": "0.64342856", "text": "def sample(generator, limit) :\n numbers = []\n for i in range(limit) :\n numbers.append(generator.random())\n \n # generate distributions\n x = []\n y = {}\n y['observed'] = []\n y['expected'] = []\n \n # sample\n for i in range(100) :\n i = i/100\n x.append(i)\n e = empirical(i, numbers)\n u = uniform(i)\n y['observed'].append(e)\n y['expected'].append(u)\n \n return (x, y)", "title": "" }, { "docid": "030e60370f8939b5a1caa71c04009a65", "score": "0.64326644", "text": "def random(self, n=None, normalize=False):", "title": "" }, { "docid": "bd0f7a1b1f9b5627482891a280d46342", "score": "0.6422288", "text": "def generate_range(n):\n i = 0\n while i < n:\n yield i\n i += 1", "title": "" }, { "docid": "d7bbbd8acf6ffb72ce8fd7189292431c", "score": "0.6407323", "text": "def generate(self):\n\t\tret = np.zeros(self.n)\n\t\tix = np.random.choice(np.arange(self.n), self.k, replace=False)\n\t\tret[ix] = 1 \n\t\treturn ret", "title": "" }, { "docid": "b1859c7028d4c04836f6ddd3a5e980b6", "score": "0.6390528", "text": "def create_samples_uniform(n, parameters):\r\n lower_bound = parameters[0]\r\n upper_bound= parameters[1] - parameters[0]\r\n vector = uniform.rvs(size = n, loc = lower_bound, scale = upper_bound)\r\n return vector", "title": "" }, { "docid": "3cc914ea0aeb87b3af102f3a22c07909", "score": "0.6389754", "text": "def gen_sample(self):\n start, stop = self.config['start'], self.config['stop']\n return start + (stop - start) * np.random.random_sample()", "title": "" }, { "docid": "3cc914ea0aeb87b3af102f3a22c07909", "score": "0.6389754", "text": "def gen_sample(self):\n start, stop = self.config['start'], self.config['stop']\n return start + (stop - start) * np.random.random_sample()", "title": "" }, { "docid": "6740bab61e1510d16cd73bb899a0b004", "score": "0.6389651", "text": "def generate_sample(n_samples, seed=None):\n random_state = np.random.RandomState(seed)\n hypothesis_idx = np.random.choice(3, p=PRIOR_PROBS)\n print(hypothesis_idx)\n dist = HYPOTHESIS_SPACE[hypothesis_idx]\n return dist.rvs(n_samples, random_state=random_state)", "title": "" }, { "docid": "838d6ed7e07d4b7244fd189855618375", "score": "0.638681", "text": "def PRS_generator(n = 100, prefer = \"R\"):\n \n choice = [\"R\", \"S\", \"P\"]\n choice.remove(prefer)\n Sq = np.zeros((n))\n np.random.seed(10) # this seed is for re-run the program\n random_array = np.random.uniform(0, 1, n)\n Sq[np.logical_and(random_array >= 0., random_array < 0.5)] = 1\n Sq[np.logical_and(random_array >= 0.5, random_array < 0.75)] = 0\n Sq[np.logical_and(random_array >= 0.75, random_array < 1.)] = -1\n Sq = Sq.astype(int)\n Sequence = []\n \n for i in range(n):\n if Sq[i] == 1:\n Sequence.append(prefer)\n if Sq[i] == 0:\n Sequence.append(choice[0])\n if Sq[i] == -1:\n Sequence.append(choice[1])\n \n return Sequence", "title": "" }, { "docid": "fe2949eed133c3d0a0bdf051e8e9031d", "score": "0.63847566", "text": "def generate(self, k, size):\n states = np.arange(0,k)\n sample = np.empty((k,int(size/k))) #should have state equivalent rows and repetation equivalent columns\n for i in states:\n sample[i] = np.full((int(size/k)), i)\n i += 1\n sample = sample.flatten()\n self.sample_ = sample.astype(int)\n np.random.shuffle(self.sample_)", "title": "" }, { "docid": "982d046fa4e6e43a5f4e86128cc7d9c5", "score": "0.63801473", "text": "def generate_random_points(n, p):\n x = np.frombuffer(os.urandom(4*n*p), dtype = np.uint32)\n x = x.reshape(n,p)\n x = [row.tolist() for row in x]\n return x", "title": "" }, { "docid": "6d55cbae14857bf4f0841f0cfc70a282", "score": "0.6375608", "text": "def pptest(n):\n \n if n<=1: \n return 0\n\n # se qualquer um dos primos eh um fator, ja era\n bases = [random.randrange(2,50000) for x in xrange(90)]\n\n \n for b in bases:\n if n%b==0: \n return 0\n \n tests,s = 0L,0\n m = n-1\n\n # transformando (n-1) em (2**s) * m\n\n while not m&1: #enquanto m eh impar\n m >>= 1\n s += 1\n\n for b in bases:\n tests += 1\n isprob = algP(m,s,b,n)\n if not isprob: \n break\n \n if isprob: \n return (1-(1./(4**tests)))\n \n return 0", "title": "" }, { "docid": "b605cac4a3499b3b63886eb925631060", "score": "0.6375045", "text": "def _SampledQuickFitness2(population, payoff, samples):\n n = population\n L = len(n)\n p = []\n for i in xrange(L):\n s = 0.0\n for k in xrange(samples):\n s += payoff[i, RandomSelect(n)]\n p.append(s/samples)\n return p", "title": "" }, { "docid": "67c3803282c9792401d3bd4bb416b62c", "score": "0.6368683", "text": "def sample(self, N):\n\t\treturn np.vstack([self.sample_single() for n in range(N)])", "title": "" }, { "docid": "eeb04520d97dbcf16dd81ac20952295d", "score": "0.636016", "text": "def sample_inputs(n, num):\n return random_inputs(n, num) if num < 2**n else all_inputs(n)", "title": "" }, { "docid": "690bf20738c736347e5fb25bbbef88b9", "score": "0.63451904", "text": "def mlp1def(n, m):\n\n return np.random.uniform(-1, 1, m * (n + 1)).reshape([m, n + 1])", "title": "" }, { "docid": "5f4a5427578fc78c884e525c578172aa", "score": "0.63434374", "text": "def bernoulli_trials(n,p):\n n_success = 0\n for i in range(n):\n a = np.random.random()\n if a<p:\n n_success += 1\n return n_success", "title": "" }, { "docid": "82f3820abc45b3a247156f23dbc35331", "score": "0.6343206", "text": "def get_p_value(self, n=1e4):\n k = 0.0\n seq1_score = self.get_nw_score()\n seq1 = self.seq1\n for i in range(int(n)):\n self.seq1 = ''.join(random.sample(self.seq1, len(self.seq1)))\n self.run_needleman_wunsch()\n k_score = self.get_nw_score()\n if k_score >= seq1_score:\n k += 1\n return (k+1)/(n+1)", "title": "" }, { "docid": "9820e54041ff5bb59eb1653f2336aaa9", "score": "0.633962", "text": "def take(self, n=128):\n return list(np.random.choice(self.list(), n))", "title": "" }, { "docid": "e21bf65e7c7f38eecbb1671ee11a5e24", "score": "0.6339347", "text": "def sample_process(n_qubits, all_same = True):\n total_circuit, c_length = sample_ladder(n_qubits, all_same = all_same)\n return compute_mps(n_qubits, total_circuit).zero_overlap()", "title": "" }, { "docid": "faa7e51ed76a970ee081e09ba31c66d7", "score": "0.6335393", "text": "def genNdigitNPower( limit=30 ):\n for p in range(limit):\n i= 0\n while len(digits(i**p)) <= p:\n if len(digits(i**p)) == p:\n yield i, p, i**p\n i += 1", "title": "" }, { "docid": "d653f95c355f96569980a471d56366a1", "score": "0.63294274", "text": "def rand_bern(p=.5, size=1):\n return rand.binomial(n=1, p=p, size=size)", "title": "" }, { "docid": "076414973309cef72bac061fa990a204", "score": "0.6326559", "text": "def sample_index(p):\n return np.random.multinomial(1,p).argmax()", "title": "" }, { "docid": "4481c0335b6826678cb59cb8fa825298", "score": "0.6326455", "text": "def _sample(p):\n return [i for i, entry in enumerate(multinomial(1, p)) if entry != 0][0]", "title": "" }, { "docid": "b29ae3e1063bc15ff68f52ad0a1cac7c", "score": "0.63261163", "text": "def sample(self, n):\n phi = self.normalize(inplace=False)\n p = phi.values.ravel()\n\n # TODO: Fix this to make it work natively in torch.\n p = compat_fns.to_numpy(p)\n\n indexes = np.random.choice(range(len(p)), size=n, p=p)\n samples = []\n index_to_state = {}\n for index in indexes:\n if index in index_to_state:\n samples.append(index_to_state[index])\n else:\n assignment = self.assignment([index])[0]\n samples.append(assignment)\n index_to_state[index] = assignment\n\n return pd.DataFrame([{k: v for k, v in s} for s in samples])", "title": "" }, { "docid": "b5143f8331f505f3d54fc2c97efea550", "score": "0.63256425", "text": "def sample_n(self, size):\n for i in range(size):\n yield self.alias_generation()", "title": "" }, { "docid": "2a1a4010cffee2f3d76ab7100890efd0", "score": "0.6318134", "text": "def sample(self, n):\n z = []\n if len(self.cont_dim):\n d = self.cont_dim[0]\n z_cont = self._reparam_gauss(torch.zeros(n, d), torch.ones(n, d))\n z.append(z_cont)\n for cat_dim in self.cat_dims:\n z_cat = torch.zeros(n, cat_dim)\n indices = Categorical(torch.ones(n, cat_dim) / cat_dim).sample()\n z_cat[torch.arange(n, out=torch.LongTensor()), indices] = 1\n # z_cat = F.gumbel_softmax(torch.ones(n, cat_dim), tau=self.temp) \n z.append(z_cat)\n z = torch.cat(z, dim=1).to(self.device)\n return self.decoder(z)", "title": "" }, { "docid": "65a821a4f171b8ec1d38bdb21a18ee48", "score": "0.6312726", "text": "def random_sample(max_size):\n max_size = int(max_size)\n for i in range(1, max_size + 1):\n print(i)\n g = Grille_Generator(i, i)\n g.generate()\n with open(\"input/{}x{}\".format(i, i), 'w') as f:\n inputs = [min(get_possible(val)) for val in g.tuiles]\n g.print_sol(inputs, f=f)", "title": "" }, { "docid": "d106134e2a2913f23211699b6a3c6593", "score": "0.63094676", "text": "def pyGen():\n import itertools\n for m in itertools.count(2):\n for n in range(1, m):\n yield (m**2 - n**2, 2 * m * n, m**2 + n**2)", "title": "" }, { "docid": "cbdd9bac6546635b04b439ab329c7567", "score": "0.63081115", "text": "def binomial(n,p):\n return sum(bernoulli_trial(p) for _ in range(n))", "title": "" }, { "docid": "19c8e554633f67166835088174694ee9", "score": "0.6301231", "text": "def generate_data(N, p1=0.5):\n\n d = np.random.rand(N)\n return np.where(d<=p1, 1, 0)", "title": "" }, { "docid": "b1b8bfa688ad00ae2d76645060c021be", "score": "0.630114", "text": "def sample_z(m, n):\n return np.random.uniform(-1., 1., size=[m, n])", "title": "" }, { "docid": "f99100f030e3f88805f1d92c2e5c4930", "score": "0.6296934", "text": "def gen_poly(n):\r\n # 1.17 * sqrt(12289 / 8192)\r\n sigma = 1.43300980528773\r\n assert(n < 4096)\r\n f0 = [samplerz(0, sigma, sigma - 0.001) for _ in range(4096)]\r\n f = [0] * n\r\n k = 4096 // n\r\n for i in range(n):\r\n # We use the fact that adding k Gaussian samples of std. dev. sigma\r\n # gives a Gaussian sample of std. dev. sqrt(k) * sigma.\r\n f[i] = sum(f0[i * k + j] for j in range(k))\r\n return f", "title": "" }, { "docid": "d00acd121b5b3bd965066d5ce844dbc0", "score": "0.6293539", "text": "def GenerateSampleWaitTimes(self, n):\n cdf_y = thinkstats2.Cdf(self.pmf_y)\n sample = cdf_y.Sample(n)\n return sample", "title": "" }, { "docid": "9495571e3897ad4d45581793d1fd6425", "score": "0.62911016", "text": "def runif(n: int, min: float = 0.0, max: float = 1.0) -> List[float]:\n return numpy.random.uniform(low=min, high=max, size=n)", "title": "" }, { "docid": "e45b9cfc7286b63ab39d059733ac3162", "score": "0.62905115", "text": "def sample(seq_list, n):\n sample = SeqList()\n for i in range(n):\n sample += seq_list.random_seq()\n return sample", "title": "" }, { "docid": "c8b0dbf2d366a05510b9d69813b0f1dd", "score": "0.62892807", "text": "def sample(h, seed_ix, n):\n x = np.zeros((vocab_size, 1))\n x[seed_ix] = 1\n ixes = []\n print('sample')\n for t in range(n):\n # print(Wxh)\n # print(x)\n # print(np.dot(Wxh, x))\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n # print(y)\n # np.exp : 밑(base)이 자연상수 e 인 지수함수로 변환해준다.\n # print(np.sum(np.exp(y))) => 64.XXXXXXXXXX ~ 65.XXXXXXXXXX\n p = np.exp(y) / np.sum(np.exp(y))\n # print(p)\n # print(range(vocab_size))\n # print(p.ravel())\n # 샘플링. 임의성을 부여하기 위해 argmax대신 array p에서 주어진 확률에 의해 하나의 문자를 선택\n # ravel : 행렬의 1행부터 순차적으로 원소 값을 불러와서 1차원 array를 만드는 함수\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n # ix = np.random.choice(65, 1)\n # print(ix)\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes", "title": "" }, { "docid": "eb510b9099a208c52acadcd1359535d3", "score": "0.6288319", "text": "def generate_hermite_betas(n=10):\n return [1.77245385] + list(np.arange(0.5, n * 0.5, 0.5))", "title": "" }, { "docid": "4d1f7c85243689f12d0bc31405996799", "score": "0.6287837", "text": "def sample(h, seed_ix, n, alpha):\n\n # Start Your code\n x = np.zeros((vocab_size, 1)) # init input vector with zero of size vocab_size\n x[seed_ix] = 1\n ixes = []\n for t in range(n):\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\n y = np.dot(Why, h) + by\n p = np.exp(alpha * y) / np.sum(np.exp(alpha * y))\n\n ix = np.random.choice(range(vocab_size), p=p.ravel())\n x = np.zeros((vocab_size, 1))\n x[ix] = 1\n ixes.append(ix)\n return ixes\n # End your code", "title": "" }, { "docid": "57c8dfe7cf058c8f2bebbcc602dfe90e", "score": "0.628396", "text": "def bernoulli_trials(n, p):\r\n # Initialize number of successes: n_success\r\n n_success = 0\r\n\r\n # Perform trials\r\n for i in range(n):\r\n # Choose random number between zero and one: random_number\r\n random_number = random.random()\r\n print 'random U generated: ',random_number\r\n if random_number < p:\r\n n_success=n_success+1\r\n print 'X value from bernoulli trial: 1'\r\n else:\r\n print 'X value from bernoulli trial: 0'\r\n\r\n print 'number of successes(1) among all the samples:'\t\t\t\r\n return n_success", "title": "" }, { "docid": "40bf78673a11e0b8802d78d8002f3e92", "score": "0.62778616", "text": "def sample_uniform(self):", "title": "" }, { "docid": "3e23a72158911dc0c7f5cdc69a6bf719", "score": "0.6273487", "text": "def powerlaw_sequence(n,expo=2.0):\n return numpy.random.zipf(expo,n)", "title": "" }, { "docid": "6c029dab9fbcd3bc37abb6485740595d", "score": "0.6268523", "text": "def randomize_probs(p):\n return np.power(np.random.rand(len(p)), 1.0 / p)", "title": "" } ]
f550739e33a551c18750f07fe5309ebb
Takes a start and end date and will return a dataframe of daily hitting statistics for every player that played for each day in the date range. start_date and end_date should be in YYYYMMDD format. Data is being pulled from the PyBaseball API using their batting_stats_range() function. Each iteration through the date range will print out the current date or if no games were played that day.
[ { "docid": "445ff75c7e3a9efeefbda75e5b7faae7", "score": "0.7701687", "text": "def get_batting_data(start_date, end_date):\n start = datetime.strptime(start_date, \"%Y-%m-%d\")\n end = datetime.strptime(end_date, \"%Y-%m-%d\")\n dates_generator = [start + timedelta(days=x) for x in range(0, ((end-start).days) + 1)]\n dates = [date.strftime(\"%Y-%m-%d\") for date in dates_generator]\n batting_data_df = pd.DataFrame()\n batting_data = pd.DataFrame()\n for date in dates:\n try:\n batting_data = batting_stats_range(date) \n batting_data_df = batting_data_df.append(batting_data) \n print(date)\n except:\n print('Game not played')\n\n return batting_data_df", "title": "" } ]
[ { "docid": "44515bf5de9aac9a1fca871ee3471a1a", "score": "0.7267774", "text": "def get_pitching_data(start_date, end_date):\n start = datetime.strptime(start_date, \"%Y-%m-%d\")\n end = datetime.strptime(end_date, \"%Y-%m-%d\")\n dates_generator = [start + timedelta(days=x) for x in range(0, ((end-start).days) + 1)]\n dates = [date.strftime(\"%Y-%m-%d\") for date in dates_generator]\n pitching_data_df = pd.DataFrame()\n pitching_data = pd.DataFrame()\n for date in dates:\n try:\n pitching_data = pitching_stats_range(date) \n pitching_data_df = pitching_data_df.append(pitching_data) \n print(date)\n except:\n print('Game not played')\n\n return pitching_data_df", "title": "" }, { "docid": "d1cb5570678bc3a0766090238baa58bb", "score": "0.6812031", "text": "def statcast(start_dt: str = None, end_dt: str = None, team: str = None, verbose: bool = True) -> pd.DataFrame:\n\n start_dt_date, end_dt_date = sanitize_input(start_dt, end_dt)\n\n # 5 days or less -> a quick one-shot request.\n # Greater than 5 days -> break it into multiple smaller queries\n small_query_threshold = 5\n\n # How many days worth of data are needed?\n days_in_query = (end_dt_date - start_dt_date).days\n if days_in_query <= small_query_threshold:\n return small_request(start_dt_date, end_dt_date, team=team)\n else:\n return large_request(start_dt_date, end_dt_date, step=small_query_threshold, verbose=verbose, team=team)", "title": "" }, { "docid": "963b73addc8e41fb29a22ed0c64102fb", "score": "0.65623116", "text": "def statcast(start_dt=None, end_dt=None, team=None, verbose=True):\n\n\n start_dt, end_dt = sanitize_input(start_dt, end_dt)\n # 3 days or less -> a quick one-shot request. Greater than 3 days -> break it into multiple smaller queries\n small_query_threshold = 5\n # inputs are valid if either both or zero dates are supplied. Not valid of only one given.\n\n\n if start_dt and end_dt:\n # how many days worth of data are needed?\n date_format = \"%Y-%m-%d\"\n d1 = datetime.datetime.strptime(start_dt, date_format)\n d2 = datetime.datetime.strptime(end_dt, date_format)\n days_in_query = (d2 - d1).days\n if days_in_query <= small_query_threshold:\n data = small_request(start_dt,end_dt)\n else:\n data = large_request(start_dt,end_dt,d1,d2,step=small_query_threshold,verbose=verbose)\n\n data = postprocessing(data, team)\n return data", "title": "" }, { "docid": "4c91d59dd06c60a07553ad847500bf3a", "score": "0.60293406", "text": "def stats_for_game_day(schedule_odds_df, batting_df, pitching_df, look_back):\n \n hitting_day_list = []\n pitching_day_list = []\n for index, row in schedule_odds_df.iterrows():\n hitting_day = batting_df.loc[index - timedelta(look_back): index - timedelta(1)].groupby('Tm').sum()\n pitching_day = pitching_df.loc[index - timedelta(look_back): index - timedelta(1)].groupby('Tm').sum()\n hitting_day_list.append(hitting_day)\n pitching_day_list.append(pitching_day)\n return hitting_day_list, pitching_day_list", "title": "" }, { "docid": "3db99688450b01b695ec9c0065964b37", "score": "0.6018358", "text": "def aggregate(start_date = '2016-06-30', end_date = pd.to_datetime('today').strftime('%Y-%m-%d')):\n conn = sqlite3.connect(DATABASE)\n c = conn.cursor()\n c.execute('SELECT json FROM collectobot WHERE date(date) BETWEEN date(?) AND date(?)', (start_date, end_date))\n date_list = c.fetchall()\n data = []\n for game_data in date_list:\n date = json.loads(game_data[0])\n data.extend(date['games'])\n conn.close()\n return data", "title": "" }, { "docid": "156157c1108fc60eb30f580a292bd11c", "score": "0.60103965", "text": "def fetch_matches(\n start_date: str, end_date: str, fetch_data: bool = False\n) -> pd.DataFrame:\n return data_import.fetch_match_data(start_date, end_date, fetch_data=fetch_data)", "title": "" }, { "docid": "da38e270ddd7052bc3b84a5d0e9c5807", "score": "0.59571797", "text": "def calculate_stats(df, games_id_sublist, game_counter, per = 1, state = \"END\"):\n Start_time_counter = 0\n for game_id in games_id_sublist:\n Period = 0 \n while Period < 1:\n game_link = f\"https://statsapi.web.nhl.com/api/v1/game/{game_id}/feed/live\"\n data = API_reader(game_link)\n if data[\"liveData\"][\"linescore\"][\"currentPeriod\"] == per and data[\"liveData\"][\"linescore\"][\"currentPeriodTimeRemaining\"] == state: \n team_record = _team_records(game_id)\n some_columns = [\"Home_team\", \"Away_team\", \"Home_wins\", \"Home_losses\", \"Home_OT\", \"Away_wins\", \"Away_losses\", \"Away_OT\"]\n home_team_categories = list(data['liveData']['boxscore']['teams']['home']['teamStats']['teamSkaterStats'].keys())\n away_team_categories = list(data['liveData']['boxscore']['teams']['away']['teamStats']['teamSkaterStats'].keys())\n home_team_categories = [f\"Home_{i}\" for i in home_team_categories]\n away_team_categories = [f\"Away_{i}\" for i in away_team_categories]\n home_team = [data[\"gameData\"][\"teams\"][\"home\"][\"triCode\"]]\n away_team = [data[\"gameData\"][\"teams\"][\"away\"][\"triCode\"]]\n df.columns = some_columns + home_team_categories + away_team_categories\n away_team_stats = data['liveData']['boxscore']['teams']['away']['teamStats']['teamSkaterStats']\n home_team_stats = data['liveData']['boxscore']['teams']['home']['teamStats']['teamSkaterStats']\n home_team_values = list(home_team_stats.values())\n away_team_values = list(away_team_stats.values())\n df.loc[str(game_id)] = home_team + away_team + team_record + home_team_values + away_team_values\n Period = 1\n game_counter += 1\n #df.to_csv(f\"C:\\\\Users\\\\David\\\\OneDrive\\\\Documents\\\\OneDrive\\\\NHL API First period Prediction\\\\IntermediateDatasets\\\\{todays_date}_raw.csv\", index = True)\n print(\"Game \", str(game_counter) ,\"/\", str(len(games_id_list)), f\"ID: {game_id} ({away_team[0]}@{home_team[0]}) completed at: \", str(datetime.today().hour), \":\", str(datetime.today().minute))\n prediction = _feature_engineering(df, game_id)[0]\n prediction_probs = _feature_engineering(df, game_id)[1]\n if int(prediction) == 0:\n print(f\"The team that is predicted to win is: {home_team[0]} with probability {round(prediction_probs[0][0]*100,2)}%\")\n else:\n print(f\"The team that is predicted to win is: {away_team[0]} with probability {round(prediction_probs[0][1]*100,2)}%\")\n Start_time_counter += 1\n else:\n Period = 0\n print(\"Check Point: \", datetime.today().hour, \":\", datetime.today().minute)\n #If the game isn't at the first period yet, wait 4 minutes and try again.\n time.sleep(60*4)\n return((df, game_counter))", "title": "" }, { "docid": "5aa27f17ea8c3798e9288364e64d31dd", "score": "0.59438324", "text": "def get_all_data(service, profile_id):\n \n fname = \"game_data_21_06_2014.csv\"\n fh = open(fname,'w')\n noheader = False\n datestart = 0 \n dates = ['2012-01-01','2012-02-01',\n '2012-03-01',\n '2012-04-01',\n '2012-05-01',\n '2012-06-01',\n '2012-07-01',\n '2012-08-01',\n '2012-09-01',\n '2012-10-01',\n '2012-11-01',\n '2012-12-01',\n '2013-01-01',\n '2013-02-01',\n '2013-03-01',\n '2013-04-01',\n '2013-05-01',\n '2013-06-01',\n '2013-07-01',\n '2013-08-01',\n '2013-09-01',\n '2013-10-01',\n '2013-11-01',\n '2013-12-01',\n '2014-01-01',\n '2014-02-01',\n '2014-03-01',\n '2014-04-01',\n '2014-05-01',\n '2014-06-01',\n '2014-07-01',\n '2014-08-01'\n '2014-09-01']\n for i in xrange(datestart,len(dates)-1):\n startdate = dates[i]\n enddate = dates[i+1] \n print 'Retrieving events between {} and {}:'.format(startdate,enddate)\n old_count = -1\n count = 0\n start_index = 1\n max_results = 10000\n while old_count != count:\n \n out = service.data().ga().get(\n ids='ga:' + profile_id,\n start_date=startdate,\n end_date=enddate,\n metrics='ga:eventValue',\n dimensions='ga:eventAction,ga:eventLabel,ga:date,ga:hour',\n #sort='-ga:eventAction',\n #filters='ga:city==Leiden',\n start_index='%i' % start_index,\n max_results='%i' % max_results).execute();\n old_count = count\n fh.flush();\n n = print_results(out,fh,noheader)\n \n noheader = True\n count += n\n start_index += n\n print \"written %s lines to file total, starting at index %s next\"%(count,start_index)", "title": "" }, { "docid": "03a55d98f29dc77a8cd6eeef999a0f0a", "score": "0.584737", "text": "def query_index_member_daily(self, index, start_date, end_date):\n df_io, err_msg = self._get_index_comp(index, start_date, end_date)\n if err_msg != '0,':\n print(err_msg)\n \n def str2int(s):\n if isinstance(s, basestring):\n return int(s) if s else 99999999\n elif isinstance(s, (int, np.integer, float, np.float)):\n return s\n else:\n raise NotImplementedError(\"type s = {}\".format(type(s)))\n\n df_io.loc[:, 'in_date'] = df_io.loc[:, 'in_date'].apply(str2int)\n df_io.loc[:, 'out_date'] = df_io.loc[:, 'out_date'].apply(str2int)\n \n # df_io.set_index('symbol', inplace=True)\n dates = self.query_trade_dates(start_date=start_date, end_date=end_date)\n\n dic = dict()\n gp = df_io.groupby(by='symbol')\n for sec, df in gp:\n mask = np.zeros_like(dates, dtype=np.integer)\n for idx, row in df.iterrows():\n bool_index = np.logical_and(dates > row['in_date'], dates < row['out_date'])\n mask[bool_index] = 1\n dic[sec] = mask\n \n res = pd.DataFrame(index=dates, data=dic)\n res.index.name = 'trade_date'\n \n return res", "title": "" }, { "docid": "ef90bd18ecfa2969fff2bf25762e6dfe", "score": "0.58315897", "text": "def start_to_end_dates(start='2010-01-01',end='2010-12-31'):\n # Calculate the total for a given country\n\n results = session.query(Measurement.date, \\\n func.avg(Measurement.tobs).label('TAVG'), \\\n func.max(Measurement.tobs).label('TMAX'), \\\n func.min(Measurement.tobs).label('TMIN')) \\\n .filter(and_(Measurement.date >= start, \\\n Measurement.date <= end)).group_by(Measurement.date).all()\n\n stats = []\n for result in results:\n row = {}\n row[\"date\"] = result[0]\n row[\"TAVG\"] = result[1]\n row[\"TMAX\"] = result[2]\n row[\"TMIN\"] = result[3]\n stats.append(row)\n\n\n return jsonify(stats)", "title": "" }, { "docid": "e9e359b77f7a7852dc8e8804fb164d33", "score": "0.58215517", "text": "def team_log(team, start_date, end_date, year):\n #helpful dictionaries\n num_to_str = {12: 'december', 1: 'january', 2: 'february', 3: 'march', 4: 'april', 5: 'may',\n 6: 'june', 7: 'july', 8: 'august', 9: 'september', 10: 'october', 11: 'november'}\n next_ = {'december': 'january', 'january': 'february', 'february': 'march', 'march': 'april',\n 'april': 'may', 'may': 'june', 'june': 'july', 'july': 'august', 'august': 'september',\n 'september' : 'october', 'october': 'november', 'november': 'december'}\n month_days = {'december': 31, 'january': 31, 'february': 28, 'march': 31}\n months = []\n game_logs = {}\n #finding the months over which to grab data\n start_month = num_to_str[int(start_date[:2])]\n end_month = num_to_str[int(end_date[:2])]\n current = start_month\n months.append(current)\n while current != end_month:\n current = next_[current]\n months.append(current)\n #starting dat (first month)\n start_day = int(start_date[3:5])\n #ending day (first month)\n if len(months) == 1:\n end_day = int(end_date[3:5])\n else:\n end_day = 31\n\n #iterating through different months\n stop = False\n while not stop:\n month = months[0]\n URL = 'https://www.basketball-reference.com/leagues/NBA_' + year + '_games-' + month + '.html'\n page = requests.get(URL)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #content\n results = soup.find(id = 'all_schedule')\n #try block for if month does not exist in this season\n try:\n body = results.find('tbody')\n t_data = body.find_all('tr')\n\n #Grabbing all games for given month over specified days\n for row in t_data:\n #ensure row has data\n if row.get('class') != 'thead':\n teams = row.find_all('a')\n #ensure game has been played\n if len(teams) < 4:\n break\n game = teams[3].get('href')\n #only games after specificed start date\n if start_day <= int(game[17:19]) <= end_day:\n away = teams[1].get('href')[7:10]\n home = teams[2].get('href')[7:10]\n if team == home or team == away:\n if team == home:\n opponent = away\n else:\n opponent = home\n visitor = (team == away)\n print(game)\n #retrieving basic data from game\n b_data, b_tot, b_o_tot = get_box_score(team, opponent, game, 'basic')\n #retrieving advanced data from game\n a_data, a_tot, a_o_tot = get_box_score(team, opponent, game, 'advanced')\n game_logs[game] = [visitor, b_data, b_tot, a_data, a_tot, b_o_tot, a_o_tot, team, opponent]\n except:\n pass\n #starting new month\n start_day = 1\n months.pop(0)\n if len(months) == 1:\n end_day = int(end_date[3:5])\n elif len(months) == 0:\n stop = True\n\n return game_logs", "title": "" }, { "docid": "e8eddd63dc6bea205f4339a7829c9b33", "score": "0.5809198", "text": "def SpinRate(names, start_date, end_date, dictionary):\n\n # import warnings filter and ignore warnings\n from warnings import simplefilter\n simplefilter(action='ignore', category=Warning)\n\n # Load packages for analysis\n import pandas as pd\n import pybaseball as pb\n import numpy as np\n\n player_dict = dictionary\n\n df_final = pd.DataFrame(columns=['Name', 'Total wOBA', 'FF Spin', 'CU/KC Spin', 'FT/SI Spin', 'FT/SI Use',\n 'FT/SI wOBA'])\n if isinstance(names, list) and isinstance(start_date, list):\n for (name, sdt, edt) in zip(names, start_date, end_date):\n player_ID = player_dict[name]\n df_data = pb.statcast_pitcher(start_dt=sdt, end_dt=edt, player_id=player_ID)\n\n total_pitches = len(df_data)\n total_woba = np.mean(df_data.woba_value)\n\n FF_data = df_data[(df_data.pitch_type == 'FF')]\n CU_data = df_data[(df_data.pitch_type == 'KC') | (df_data.pitch_type == 'CU')]\n FT_data = df_data[(df_data.pitch_type == 'FT') | (df_data.pitch_type == 'SI')]\n\n FF_spin = np.mean(FF_data.release_spin_rate)\n CU_spin = np.mean(CU_data.release_spin_rate)\n FT_spin = np.mean(FT_data.release_spin_rate)\n\n FT_use = len(FT_data)/total_pitches\n\n FT_woba = np.mean(FT_data.woba_value)\n\n temp = [name, total_woba, FF_spin, CU_spin, FT_spin, FT_use, FT_woba]\n\n df_temp = pd.DataFrame([temp], columns=['Name', 'Total wOBA', 'FF Spin', 'CU/KC Spin', 'FT/SI Spin',\n 'FT/SI Use', 'FT/SI wOBA'])\n\n df_final = pd.concat([df_final, df_temp], axis=0)\n\n df_final = df_final.fillna(0.0)\n\n if isinstance(names, list):\n for name in names:\n player_ID = player_dict[name]\n if name == 'Will Smith':\n player_ID = 519293\n df_data = pb.statcast_pitcher(start_dt=start_date, end_dt=end_date, player_id=player_ID)\n\n total_pitches = len(df_data)\n total_woba = np.mean(df_data.woba_value)\n\n FF_data = df_data[(df_data.pitch_type == 'FF')]\n CU_data = df_data[(df_data.pitch_type == 'KC') | (df_data.pitch_type == 'CU')]\n FT_data = df_data[(df_data.pitch_type == 'FT') | (df_data.pitch_type == 'SI')]\n\n FF_spin = np.mean(FF_data.release_spin_rate)\n CU_spin = np.mean(CU_data.release_spin_rate)\n FT_spin = np.mean(FT_data.release_spin_rate)\n\n FT_use = len(FT_data)/total_pitches\n\n FT_woba = np.mean(FT_data.woba_value)\n\n temp = [name, total_woba, FF_spin, CU_spin, FT_spin, FT_use, FT_woba]\n\n df_temp = pd.DataFrame([temp], columns=['Name', 'Total wOBA', 'FF Spin', 'CU/KC Spin', 'FT/SI Spin',\n 'FT/SI Use', 'FT/SI wOBA'])\n\n df_final = pd.concat([df_final, df_temp], axis=0)\n\n df_final = df_final.fillna(0.0)\n\n return df_final", "title": "" }, { "docid": "ead939460142b43d7a4cec4ace26bf34", "score": "0.5796204", "text": "def main():\n args = get_args()\n start_game_year = args.start_game_year\n start_game_month = args.start_game_month\n start_game_day = args.start_game_day\n end_game_year = args.end_game_year\n end_game_month = args.end_game_month\n end_game_day = args.end_game_day\n\n start_date = dt.date(start_game_year, start_game_month, start_game_day)\n end_date = dt.date(end_game_year, end_game_month, end_game_day)\n\n assert start_date <= end_date, 'Start Date is after End Date. Please enter a valid date range'\n\n day_delta = dt.timedelta(days=1)\n\n print('Getting and Downloading Box Scores Between {} and {}'.format(start_date, end_date))\n while start_date <= end_date:\n \"\"\"Get all box scores within the date range\"\"\"\n game_year = start_date.year\n game_month = start_date.month\n game_day = start_date.day\n\n player_box_scores_df = get_player_box_scores_day(\n game_year=game_year, game_month=game_month, game_day=game_day)\n\n start_date += day_delta", "title": "" }, { "docid": "f23eae824342fb6cb6b8e13b3138554f", "score": "0.5778884", "text": "def report_data(self, date_start, date_end):\n\n with db.session_scope() as session:\n calls = [LeadsAndCalls.report_calls(date_start, date_end, dept, session) for dept in\n range(len(config.DEPARTMENTS))]\n leads = [LeadsAndCalls.report_leads(date_start, date_end, dept, session) for dept in\n range(len(config.DEPARTMENTS))]\n\n return {'calls': calls, 'leads': leads}", "title": "" }, { "docid": "79e5402284fcd1cb102dbf3782e2b13e", "score": "0.56691414", "text": "def get_user_app_stats(start_date=datetime.datetime.combine(yesterday, datetime.datetime.min.time()),\n end_date=datetime.datetime.combine(yesterday, datetime.datetime.max.time())):\n # From str to datetime, defaults to zero time.\n if type(start_date) == str:\n start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')\n end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d')\n # Due to issue with method filtering only by creation_time need to grab\n # all 14 days before begin date to insure getting all records with a possible\n # finish_time within the time window specified. (14 days, 24 hours, 60 mins, 60 secs)\n begin = (int(start_date.strftime('%s')) - (14 * 24 * 60 * 60)) * 1000\n end = int(end_date.strftime('%s')) * 1000\n statuses = ['queued', 'terminated', 'running', 'created', 'estimated']\n job_array = []\n # print(\"BEGIN: \" + str(begin))\n # print(\"END: \" + str(end))\n # For params get finished jobs from execution engine\n params = {'start_time': begin, 'end_time': end, 'ascending': 0}\n stats = ee2.check_jobs_date_range_for_all(params=params)\n for job in stats['jobs']:\n if job['status'] in statuses:\n continue\n else:\n # For finished job run calculate run time and convert values from milliseconds to seconds\n run_time = (job['finished'] - job['running'])\n finished = datetime.datetime.fromtimestamp(job['finished'] / 1000)\n run_start = datetime.datetime.fromtimestamp(job['running'] / 1000)\n is_error = \"False\"\n if 'error' in job:\n is_error = \"True\"\n # For values present construct job stats dictionary and append to job array\n job_stats = {'user': job['user'],\n 'finish_date': finished.strftime('%Y-%m-%d %H:%M:%S'),\n 'start_date': run_start.strftime('%Y-%m-%d %H:%M:%S'),\n 'run_time': run_time//1000,\n 'app_name': job['job_input']['app_id'].replace('.', '/'),\n 'func_name': job['job_input']['method'].replace('.', '/'),\n \"git_commit_hash\": job['job_input']['service_ver'],\n 'is_error': is_error\n }\n job_array.append(job_stats)\n return job_array", "title": "" }, { "docid": "0c414ff30fd3458a4b0d72eebd0d1e76", "score": "0.56688684", "text": "def get_temp_stats_by_date(self, start_date, end_date):\n session = self.session\n measurement_table = self.measurement_table\n stats = session.query(\n func.min(measurement_table.tobs),\n func.max(measurement_table.tobs),\n func.avg(measurement_table.tobs)\n ).filter(\n measurement_table.date >= start_date\n ).filter(\n measurement_table.date <= end_date).first()\n return {\n \"min\" : stats[0],\n \"max\" : stats[1],\n \"avg\" : stats[2]\n }", "title": "" }, { "docid": "e6e8c93e032e1bd7c37da7e1d7d66aef", "score": "0.56225026", "text": "def fixturesRequest(startDate, endDate):\n\n url = \"https://api-football-beta.p.rapidapi.com/fixtures\"\n headers = {\n 'x-rapidapi-key': apiKey,\n 'x-rapidapi-host': \"api-football-beta.p.rapidapi.com\"\n }\n querystring = {\"league\": \"39\", \"season\": \"2020\", \"from\": startDate, \"to\": endDate}\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\n return response.json()", "title": "" }, { "docid": "20c60bb43aa1ef7fce405135a0347fea", "score": "0.5595457", "text": "def run(self, from_date, to_date):\n\n uri = self._get_weather_endpoint(from_date, to_date)\n weather = self._get_weather_data(uri)\n return self._weather_to_dataframe(weather)", "title": "" }, { "docid": "2cf9e4f03ece526b4b8979b55357f056", "score": "0.55944943", "text": "def query_dates(start_date, end_date):\n results = session.query(func.avg(Measurement.tobs), func.max(Measurement.tobs), func.min(Measurement.tobs)).\\\n filter(Measurement.date >= start_date, Measurement.date <= end_date).all()\n\n data_list = []\n for result in results:\n row = {}\n row[\"Start Date\"] = start_date\n row[\"End Date\"] = end_date\n row[\"Average Temperature\"] = float(result[0])\n row[\"Highest Temperature\"] = float(result[1])\n row[\"Lowest Temperature\"] = float(result[2])\n data_list.append(row)\n return jsonify(data_list)", "title": "" }, { "docid": "f3d585169520c33b7a130bffd473bfb3", "score": "0.5571727", "text": "def date_range(): \n #query for the min temp, the avg temp, and the max temp for a given date range\n start = \"2017-07-01\"\n end = \"2017-07-31\"\n \n DateRange = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all()\n\n #return the date_range data\n return jsonify(DateRange)", "title": "" }, { "docid": "f3409cd0d59104bc817328df714c2aa1", "score": "0.5571658", "text": "def gen_matchday_squad(players, database):\n \n attributes = ['attacking', 'defending', 'passing']\n \n matchday_squad = pd.DataFrame(columns=players, index=attributes)\n for player in players: \n matchday_squad[player]['attacking'] = database[player].Attacking\n matchday_squad[player]['defending'] = database[player].Defending\n matchday_squad[player]['passing'] = database[player].Passing\n\n\n return matchday_squad", "title": "" }, { "docid": "d7f396ba37e25234e7175baa7ee16373", "score": "0.5571097", "text": "def _stats(tracking_data):\n\n starting_date = tracking_data[0]\n ending_date = tracking_data[1]\n wordgoal = tracking_data[2]\n wordcount_list = tracking_data[3:]\n\n target_time_period = (ending_date - starting_date).days + 1\n current_day = (datetime.date.today() - starting_date).days + 1\n days_remaining = target_time_period - current_day + 1\n\n total_words_written = wordcount_list[-1] if wordcount_list else 0\n target_average_word_count = float(wordgoal) / target_time_period if target_time_period else 0\n average_words_per_day = float(total_words_written) / current_day if current_day else 0\n\n words_written_today = 0\n if current_day == 1:\n words_written_today = wordcount_list[0]\n else:\n words_written_today = wordcount_list[-1] - wordcount_list[-2]\n words_remaining = wordgoal - total_words_written\n\n current_goal_day = current_day + int(math.ceil(words_remaining / average_words_per_day if average_words_per_day else 0))\n if current_goal_day != current_day:\n finish_date = datetime.date.today() + datetime.timedelta(current_goal_day - current_day)\n else:\n finish_date = \"Not started yet.\"\n needed_words_per_day = float(words_remaining) / days_remaining if days_remaining else 0\n\n # validation\n if days_remaining < 0:\n days_remaining = \"Time passed!\"\n if words_remaining < 0:\n words_remaining = \"Goal reached!\"\n if current_day > target_time_period:\n current_day = \"Time passed!\"\n if needed_words_per_day < 0:\n finish_date = \"Goal reached!\"\n needed_words_per_day = 0\n\n return {\"starting_date\": starting_date,\n \"ending_date\": ending_date,\n \"average_words_per_day\": int(average_words_per_day),\n \"words_written_today\": words_written_today,\n \"target_word_count\": wordgoal,\n \"target_average_word_count\": int(math.ceil(target_average_word_count)),\n \"total_words_written\": total_words_written,\n \"words_remaining\": words_remaining,\n \"current_day\": current_day,\n \"days_remaining\": days_remaining,\n \"finish_day\": current_goal_day,\n \"finish_date\": finish_date,\n \"target_time_period\": target_time_period,\n \"needed_words_per_day\": int(math.ceil(needed_words_per_day))}", "title": "" }, { "docid": "dbf7b22333b7bed327e830f34ae9a2a9", "score": "0.5544358", "text": "def get_data_table(ticker='GS', start_date='2014-01-01', end_date='2018-12-31'):\n # ticker = 'GS' # Goldman Sachs Group Inc\n # ticker = 'GDDY' # GoDaddy\n # ticker = 'GM' # General Motors\n # ticker = 'GRUB' # GrubHub\n # start_date = '2014-01-01'\n # end_date = '2018-12-31'\n s_window = 14\n l_window = 50\n\n if platform.system() == 'Windows':\n home_dir = os.path.join('C:', os.path.sep, 'Users', 'jimmy_000') # MS Windows home directory\n else: # Assumes Linux\n home_dir = os.path.join(os.path.sep + 'home', 'jgoddard') # Linux home directory\n input_dir = os.path.join(home_dir, 'src', 'git', 'CS677', 'datasets')\n output_file = os.path.join(input_dir, ticker + '.csv')\n\n if not os.path.isfile(output_file):\n df = get_stock(ticker, start_date, end_date, s_window, l_window)\n df.to_csv(output_file, index=False)\n else:\n df = pd.read_csv(output_file)\n return df", "title": "" }, { "docid": "dbf7b22333b7bed327e830f34ae9a2a9", "score": "0.5544358", "text": "def get_data_table(ticker='GS', start_date='2014-01-01', end_date='2018-12-31'):\n # ticker = 'GS' # Goldman Sachs Group Inc\n # ticker = 'GDDY' # GoDaddy\n # ticker = 'GM' # General Motors\n # ticker = 'GRUB' # GrubHub\n # start_date = '2014-01-01'\n # end_date = '2018-12-31'\n s_window = 14\n l_window = 50\n\n if platform.system() == 'Windows':\n home_dir = os.path.join('C:', os.path.sep, 'Users', 'jimmy_000') # MS Windows home directory\n else: # Assumes Linux\n home_dir = os.path.join(os.path.sep + 'home', 'jgoddard') # Linux home directory\n input_dir = os.path.join(home_dir, 'src', 'git', 'CS677', 'datasets')\n output_file = os.path.join(input_dir, ticker + '.csv')\n\n if not os.path.isfile(output_file):\n df = get_stock(ticker, start_date, end_date, s_window, l_window)\n df.to_csv(output_file, index=False)\n else:\n df = pd.read_csv(output_file)\n return df", "title": "" }, { "docid": "3f59872a314230127b7d29c5f67fecdf", "score": "0.55307215", "text": "def loop_date(self, batched, end_date):\n\n sorted_data = self.sort_by(batched, \"created_at\")\n start_date = sorted_data[0][\"created_at\"]\n end_date = self.get_end_date(sorted_data, end_date)\n return start_date, end_date", "title": "" }, { "docid": "4a429b000fe643a57f6739a1600ec2bb", "score": "0.5525255", "text": "def make_schedule_with_odds(odds_df):\n odds_df = odds_df[odds_df['Final'] != 'NL']\n new_df = pd.DataFrame()\n t = odds_df.iterrows()\n date = []\n home = []\n visitor = []\n home_pitcher = []\n visitor_pitcher = []\n home_open_odds = []\n visitor_open_odds = []\n home_close_odds = []\n visitor_close_odds = []\n home_win_loss = []\n visitor_win_loss = []\n for (i, row1), (j, row2) in zip(t, t):\n date.append(i)\n home.append(row2['Team'])\n visitor.append(row1['Team'])\n home_pitcher.append(row2['Pitcher'])\n visitor_pitcher.append(row1['Pitcher'])\n home_open_odds.append(row2['Open'])\n visitor_open_odds.append(row1['Open'])\n home_close_odds.append(row2['Close'])\n visitor_close_odds.append(row1['Close'])\n if int(row2['Final']) > int(row1['Final']):\n home_win_loss.append(1)\n visitor_win_loss.append(0)\n else:\n home_win_loss.append(0)\n visitor_win_loss.append(1)\n schedule_odds_df = pd.DataFrame(list(zip(home, visitor, home_pitcher, visitor_pitcher, home_open_odds, visitor_open_odds, home_close_odds, \n visitor_close_odds, home_win_loss, visitor_win_loss)), columns=['home','visitor', 'home_pitcher', 'visitor_pitcher', \n 'home_open_odds', 'visitor_open_odds', 'home_close_odds', 'visitor_close_odds', 'home_win_loss', 'visitor_win_loss'], \n index = date)\n return schedule_odds_df", "title": "" }, { "docid": "8486623d865a10af07445d53b635fed0", "score": "0.5516449", "text": "def df_for_feature_selection(odds_df, batting_df, pitching_df, look_back):\n \n # Use helper function to get schedule with odds\n odds_df_with_lookback = odds_df[odds_df.index[0] + timedelta(look_back): batting_df.index[-1]]\n schedule_odds_df = make_schedule_with_odds(odds_df_with_lookback)\n \n # Use helper function to get all stats for lookback period by team\n hitting_day, pitching_day = stats_for_game_day(schedule_odds_df, batting_df, pitching_df, look_back)\n\n # Create one dataframe that houses all the odds, stats, winners and losers for each game played \n total_df = pd.DataFrame()\n for i in range(len(schedule_odds_df)):\n hitting_day_df = pd.DataFrame(hitting_day[i])\n pitching_day_df = pd.DataFrame(pitching_day[i])\n hitting_games_home = pd.DataFrame(hitting_day_df.loc[schedule_odds_df['home'][i]]).T.reset_index().drop(columns = ['index', 'VH'])\n hitting_games_home['Date'] = schedule_odds_df.index[i]\n hitting_games_home = hitting_games_home.set_index('Date')\n hitting_games_home = hitting_games_home.add_prefix('Home_Hitting')\n hitting_games_visitor = pd.DataFrame(hitting_day_df.loc[schedule_odds_df['visitor'][i]]).T.reset_index().drop(columns = ['index', 'VH'])\n hitting_games_visitor['Date'] = schedule_odds_df.index[i]\n hitting_games_visitor = hitting_games_visitor.set_index('Date')\n hitting_games_visitor = hitting_games_visitor.add_prefix('Visitor_Hitting')\n pitching_games_home = pd.DataFrame(pitching_day_df.loc[schedule_odds_df['home'][i]]).T.reset_index().drop(columns = ['index', 'VH'])\n pitching_games_home['Date'] = schedule_odds_df.index[i]\n pitching_games_home = pitching_games_home.set_index('Date')\n pitching_games_home = pitching_games_home.add_prefix('Home_Pitching')\n pitching_games_visitor = pd.DataFrame(pitching_day_df.loc[schedule_odds_df['visitor'][i]]).T.reset_index().drop(columns = ['index', 'VH'])\n pitching_games_visitor['Date'] = schedule_odds_df.index[i]\n pitching_games_visitor = pitching_games_visitor.set_index('Date')\n pitching_games_visitor = pitching_games_visitor.add_prefix('Visitor_Pitching')\n total_line = pd.concat([hitting_games_home,hitting_games_visitor, pitching_games_home, pitching_games_visitor], axis = 1)\n total_df = total_df.append(total_line)\n stats_odds_df = pd.concat([schedule_odds_df,total_df], axis = 1 )\n return stats_odds_df", "title": "" }, { "docid": "f413200f577b3f218c234f7b1ea42c88", "score": "0.55032617", "text": "def start_to_end_date(start, end):\n temps = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date >= start).\\\n filter(and_(Measurement.date) <= end)\n\n \"\"\" Save the query results as a Pandas DataFrame and set the index to the date column \"\"\"\n temps_df = pd.DataFrame(temps[:], columns = [\"date\", \"tobs\"]).dropna()\n \n message_dict = {\"Temperature stats for date range: \": start + \" to \" + end}\n temps_dict = { \"Minimum Temperature\": str(round(temps_df[\"tobs\"].min(), 2)),\n \"Average Temperature\": str(round(temps_df[\"tobs\"].mean(), 2)),\n \"Maximum Temperature\": str(round(temps_df[\"tobs\"].max(), 2))\n }\n\n return jsonify(message_dict, temps_dict)", "title": "" }, { "docid": "860d6fbf5808729873831de7b09779d8", "score": "0.5503052", "text": "def stats(start=None, end=None):\n session = Session(engine)\n # Select statement to simplify query:\n # calculate TMIN, TAVG, TMAX for dates to filter by later\n sel = [func.min(Measurement.tobs), func.avg(\n Measurement.tobs), func.max(Measurement.tobs)]\n\n # if no end date is provided, only filter by start date\n if not end:\n \n results = session.query(*sel).\\\n filter(Measurement.date >= start).all()\n\n # if end and start date provided, filter by both\n else:\n results = session.query(*sel).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n\n session.close()\n\n\n temp_dict={}\n for min, avg, max in results:\n temp_dict['min']=min\n temp_dict['avg']=avg\n temp_dict['max']=max\n\n return jsonify(query_temps=temp_dict)", "title": "" }, { "docid": "11e86808239e113987db8c1961d6e217", "score": "0.55020666", "text": "def get_statistics():\n\n # Filter by city (Chicago, New York, Washington)\n city = get_city()\n print('Loading data...')\n df = pd.read_csv(city, parse_dates=['Start Time', 'End Time'])\n df.dropna(inplace=True)\n\n # change all column names to lowercase letters and replace spaces with underscores\n new_labels = []\n for col in df.columns:\n new_labels.append(col.replace(' ', '_').lower())\n df.columns = new_labels\n\n # increases the column width so that the long strings in the 'journey'\n # column can be displayed fully\n pd.set_option('max_colwidth', 100)\n\n # creates a 'journey' column that concatenates 'start_station' with\n # 'end_station' for the use popular_trip() function\n df['journey'] = df['start_station'].str.cat(df['end_station'], sep=' to ')\n\n # Filter by time period (month, day, none)\n time_period = get_time_period()\n if time_period == 'none':\n df_filtered = df\n elif time_period in ['month', 'day']:\n if time_period == 'month':\n filter_lower, filter_upper = get_month()\n elif time_period == 'day':\n filter_lower, filter_upper = get_day()\n print('Filtering data...')\n df_filtered = df[(df['start_time'] >= filter_lower) & (df['start_time'] < filter_upper)]\n print('\\nCalculating the first statistic...')\n\n if time_period == 'none':\n start_time = time.time()\n\n # What is the most popular month for start time?\n most_popular_month(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n\n if time_period in ['none', 'month']:\n start_time = time.time()\n\n # What is the most popular day of week (Monday, Tuesday, etc.) for start time?\n most_popular_day(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What is the most popular hour of day for start time?\n most_popular_hour(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What is the total trip duration and average trip duration?\n total_trip_duration(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What is the most popular start station and most popular end station?\n most_popular_stations(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What is the most popular trip?\n most_popular_trip(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What are the counts of each user type?\n get_users(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n\n if city in ['chicago.csv', 'new_york_city.csv']:\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What are the counts of gender?\n get_gender(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n print('\\nCalculating the next statistic...')\n start_time = time.time()\n\n # What are the earliest, most recent, and most popular birth years?\n get_birth_years(df_filtered)\n print('This took {} around seconds to calculate.'.format(time.time() - start_time))\n\n # Display five lines of data at a time if user specifies that they would like to\n display_data(df_filtered)\n\n # Restart if user wants to..\n restart = input('\\nWould you like to restart? Type \"yes\" or \"no\".\\n').lower()\n while restart not in ['yes', 'no']:\n print('Invalid input. Please type \"yes\" or \"no\".')\n restart = input('\\nWould you like to restart? Type \"yes\" or \"no\".\\n').lower()\n if restart == 'yes':\n get_statistics()\n else:\n print('Good Bye!')\n return", "title": "" }, { "docid": "2658f5fcff02244b803aabf042bf0f26", "score": "0.5482642", "text": "def initialise_statistics_dataframes(self) -> None:\n # Create the shared index for indexing with current datetime..\n index = [(self.start_date + datetime.timedelta(days=x)).date() for x in range((self.end_date - self.start_date).days)]\n \n # Initialise the data frames with the shared index.\n self.hospital_admissions = pd.DataFrame(index=index, columns=self.hospital_admission_columns)\n self.community_deaths = pd.DataFrame(index=index, columns=self.base_columns)\n self.outpatient_queue_deaths = pd.DataFrame(index=index, columns=self.base_columns)\n self.inpatient_queue_deaths = pd.DataFrame(index=index, columns=self.base_columns)\n self.outpatient_wait_times = pd.DataFrame(index=index, columns=self.wait_time_columns)\n self.inpatient_wait_times = pd.DataFrame(index=index, columns=self.wait_time_columns)\n self.echo_queues = pd.DataFrame(index=index, columns=self.echo_queues_columns)", "title": "" }, { "docid": "969a77b80b016680444cdfb6853dc8bd", "score": "0.5481902", "text": "def dates(start_date, end_date):\n \n # Design a query to retrieve the last 12 months of precipitation data and plot the results\n import datetime as dt\n \n \n if end_date is None and start_date is not None:\n # Query all measurements\n results = session.query(Measurement.date,\\\n Measurement.tobs).\\\n filter(Measurement.date >= start_date).\\\n all() \n elif start_date is not None:\n # Query all measurements\n results = session.query(Measurement.date,\\\n Measurement.tobs).\\\n filter(Measurement.date >= start_date).\\\n filter(Measurement.date <= end_date).\\\n all()\n \n all_dates = []\n for date, tobs in results:\n date_dict = {}\n date_dict[\"date\"] = date\n date_dict[\"tobs\"] = tobs\n all_dates.append(date_dict)\n\n return jsonify(all_temps)", "title": "" }, { "docid": "0f4d66c28474b3e39ea95811d0d6cbaf", "score": "0.54739916", "text": "def start_end(start=None, end=None):\n \n #Pull data from engine for dates range specified\n range_data = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).filter(Measurement.date <= end).group_by(Measurement.date).all()\n \n #Convert data into list for returning\n range_list=list(range_data)\n \n #Return JSONified data from list\n return jsonify(range_list)", "title": "" }, { "docid": "f56bf9a10e69dd048ca45f43f2f478f2", "score": "0.5464715", "text": "def status_date_range(app, start, end, verbosity):\n report_type = StatusReport.NONE\n if verbosity <= 0:\n error = f\"Invalid value for verbosity: {verbosity}. Value must be greater than zero.\"\n return exit_app(app, Result.Fail(error))\n elif verbosity == 1:\n report_type = StatusReport.DATE_SUMMARY_MISSING_DATA\n elif verbosity == 2:\n report_type = StatusReport.DATE_SUMMARY_ALL_DATES\n elif verbosity == 3:\n report_type = StatusReport.DATE_DETAIL_MISSING_DATA\n elif verbosity == 4:\n report_type = StatusReport.DATE_DETAIL_ALL_DATES\n else:\n report_type = StatusReport.DATE_DETAIL_MISSING_PITCHFX\n result = report_date_range_status(app.db_session, start, end, report_type)\n if result.success:\n report_viewer = result.value\n report_viewer.launch()\n return exit_app(app, result)", "title": "" }, { "docid": "3ec9a7b3f5c033061e205be6c263d6a0", "score": "0.54640174", "text": "def bh_trend(start_date, end_date, center):\n params = [\n pd.to_datetime(start_date).date(),\n pd.to_datetime(end_date).date(),\n pd.to_datetime(end_date).date(),\n ]\n\n center_sql, params = create_center_sql(center, params)\n\n query = f\"\"\"\n SELECT dx.member_id, e.enrollment_date, e.disenrollment_date\n FROM dx\n JOIN enrollment e ON dx.member_id=e.member_id\n JOIN centers on e.member_id=centers.member_id\n WHERE (instr(icd10, 'F2') > 0\n OR instr(icd10, 'F31') > 0\n OR instr(icd10, 'F32') > 0\n OR instr(icd10, 'F33') > 0\n OR instr(icd10, 'F4') > 0\n OR instr(icd10, 'F6') > 0)\n AND (disenrollment_date BETWEEN ? AND ?\n OR disenrollment_date IS NULL)\n AND (enrollment_date <= ?)\n {center_sql};\n \"\"\"\n df = sql_return_df(query, params, ['enrollment_date', 'disenrollment_date'])\n \n return percent_trend_graph(df, None,\n \"Percent with Behavioral Health Dx\",\n color_palette[0],\n start_date,\n end_date,\n center,\n use_raw_count=True)", "title": "" }, { "docid": "f0710c4a4a95022683ff4a19e082cad9", "score": "0.5463824", "text": "def get_perf_data(self, entity_scope, entity_code, from_date, to_date, asat, **kwargs) -> DataFrame:\n from_date = max(from_date, self.start_date)\n increase_base = (from_date - self.start_date).days\n\n def make_perf(i) -> Tuple[Timestamp, float, float]:\n \"\"\"\n This function is responsible for constructing performance for a particular day\n\n :param int i: The number of days into making performance\n\n :return: Tuple[Timestamp, float, float]: Data to use to construct a series of performance\n \"\"\"\n nonlocal increase_base\n increase = increase_base + i # Represents the number of days since the start date\n\n return (\n from_date + timedelta(days=i), # Date: Produce the correct date\n np.round(1000000.0 * pow(self.daily_ror, increase), 2), # Market Value: Return is consistently applied\n self.rec_flow if increase % self.rec_freq == 0 else 0 # Flows: Log a flow at the recurring frequency\n )\n\n df = pd.DataFrame.from_records(\n [\n make_perf(i) for i in range((to_date-from_date).days+1)\n ],\n columns=['date', 'mv', 'net']\n )\n\n df['key'] = 'all'\n return df", "title": "" }, { "docid": "a6914a7ccb820c9ab27012c77cd1e114", "score": "0.54429305", "text": "def format_data(session, schedule_data, team_tbl, team_stats_tbl):\n h_score = schedule_data.data['home_team_score']\n a_score = schedule_data.data['away_team_score']\n schedule_data.data['MOV'] = [h_score[i] - a_score[i] for i in range(schedule_data.num_rows())]\n schedule_data.data['playoffs'] = ['']\n schedule_data.data['game_date'] = [datetime.date(t) for t in schedule_data.data['start_time']]\n schedule_data.fill('playoffs', None)\n schedule_data.data[\"home_team_id\"] = convert.values_to_foreign_key(session, foreign_tbl=team_tbl, foreign_key=\"id\",\n foreign_value=\"team_name\",\n child_data=schedule_data.data.pop(\"home_team\"))\n schedule_data.data[\"away_team_id\"] = convert.values_to_foreign_key(session, foreign_tbl=team_tbl, foreign_key=\"id\",\n foreign_value=\"team_name\",\n child_data=schedule_data.data.pop(\"away_team\"))\n\n today = datetime.date(datetime.now())\n tomorrow = today + timedelta(days=1)\n tmrw_idx = 0\n for idx in range(len(schedule_data.data['start_time'])):\n if schedule_data.data['start_time'][idx].date() >= tomorrow:\n tmrw_idx = idx\n break\n if not tmrw_idx:\n raise ValueError(\"tmrw_idx was not found\")\n subquery = session.query(team_stats_tbl.id, team_stats_tbl.team_id, func.max(team_stats_tbl.scrape_time)). \\\n filter(team_stats_tbl.scrape_date <= today).group_by(team_stats_tbl.team_id).subquery()\n schedule_data.data['home_stats_id'] = convert.values_to_foreign_key(session, subquery, 'id', 'team_id',\n schedule_data.data['home_team_id'][:tmrw_idx])\n schedule_data.data['away_stats_id'] = convert.values_to_foreign_key(session, subquery, 'id', 'team_id',\n schedule_data.data['away_team_id'][:tmrw_idx])\n schedule_data.fill('home_stats_id', None)\n schedule_data.fill('away_stats_id', None)\n\n return schedule_data", "title": "" }, { "docid": "b64d676ced448698b31afc3842a5b423", "score": "0.54299825", "text": "def tweets_per_day(begin_date=False, end_date=False, extend_query={}, print_table=True, export_csv=False, should_return=False):\n export_data = [['screen name', 'average # tweets per day']]\n\n for name in screen_names_in_db():\n query = dict({'author.screen_name': name}.items() + extend_query.items())\n tweets = db.tweets.find(query).sort('created_at', pymongo.ASCENDING)\n if tweets.count():\n first_date = tweets[0]['created_at']\n diff_days = (datetime.now() - first_date).days\n total = db.tweets.find(query).count()\n avg = float(total) / float(diff_days)\n export_data.append([name, avg])\n else:\n export_data.append([name, 0])\n\n return export_data", "title": "" }, { "docid": "3c4ff27cc13efdd97d92c3526b675922", "score": "0.5423545", "text": "def start_end_date(start_date,end_date):\n # Perform a query to retrieve the Tmin, Tavg and Tmax for all dates between start and end dates\n dates=session.query(\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs)\n ).filter(Measurement.date>=start_date).filter(Measurement.date<=end_date).all()\n \n if None in dates[0]:\n return jsonify({\"error\": f\"Dates not found.\"})\n return jsonify(dates)", "title": "" }, { "docid": "391b3848455c18ba1df19d37a56b8a39", "score": "0.5401185", "text": "def daily(start, end):\n for dt in n_daily(start, end, 1):\n yield dt", "title": "" }, { "docid": "aedba9d5cb95f225acb8e31db16665cd", "score": "0.53975797", "text": "def startEndDate(start, end):\n session = Session(engine)\n \n # This is some data cleaning for better matching\n\n results = session.query(Measurements.date, func.max(Measurements.tobs),\\\n func.min(Measurements.tobs),func.avg(Measurements.tobs)).\\\n filter(Measurements.date >= start, Measurements.date <= end).group_by(Measurements.date).order_by(Measurements.date).all()\n \n tempList = []\n for date, tmax, tmin, tavg in results:\n tempDict = {}\n tempDict[\"date\"] = date\n tempDict[\"tmax\"] = tmax\n tempDict[\"tmin\"] = tmin\n tempDict[\"tavg\"] = tavg\n tempList.append(tempDict)\n\n\n return jsonify(tempList)", "title": "" }, { "docid": "3339476300daab4a58c17007a3770219", "score": "0.5390189", "text": "def NHL_games_today(todays_date, print_binary = 0):\n games_links = f\"https://statsapi.web.nhl.com/api/v1/schedule?startDate={todays_date}&endDate={todays_date}\"\n dates = API_reader(games_links)\n num_of_games = dates[\"totalGames\"]\n games_id_list = [dates[\"dates\"][0][\"games\"][i][\"gamePk\"] for i in range(num_of_games)]\n #Find the difference in seconds between the start times.\n if len(games_id_list) > 1:\n start_times = []\n game_start_dict = {}\n for game_id in games_id_list:\n data = API_reader(f\"https://statsapi.web.nhl.com/api/v1/game/{game_id}/feed/live\")\n start_time = data[\"gameData\"][\"datetime\"][\"dateTime\"]\n game_start_dict[str(game_id)] = start_time\n start_time = parse(start_time)\n start_times.append(start_time)\n start_times = sorted(start_times)\n delta_seconds_start_times = [(start_times[i+1]- start_times[i]).total_seconds() for i in range(len(start_times)-1)]+ [0]\n else:\n delta_seconds_start_times = [0]\n # Solution to sorting a dict found here: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value\n game_start_dict = {k : v for k, v in sorted(game_start_dict.items(), key=lambda item: item[1])}\n games_id_list =[int(j) for j in [k for k in game_start_dict.keys()]]\n if print_binary == 1:\n print(f\"Number of Games on {todays_date}:\", len(games_id_list))\n print(\"Game Ids: \", games_id_list)\n print(\"Time (seconds) between start times:\", delta_seconds_start_times)\n print(\"Number of unique start times:\", len(set(start_times)))\n return((games_id_list, delta_seconds_start_times, game_start_dict, start_times))", "title": "" }, { "docid": "1a1a11a8a965ed69130df176ebc97742", "score": "0.5389956", "text": "async def get_games(ctx, *args):\n if len(args) == 0:\n today = datetime.now()\n date = datetime(today.year, today.month, today.day).strftime(\"%Y-%m-%d\")\n else:\n value = ' '.join(args)\n try:\n today = dateparser.parse(date_string=value)\n date = datetime(today.year, today.month, today.day).strftime(\"%Y\"\n \"-%m\"\n \"-%d\")\n except AttributeError:\n await ctx.send('Improper date format. Please try again.')\n return\n if today.year == 2020 or (today.year == 2019 and today.month > 7):\n game_df = GAME_DF # this saves time, don't have to retrieve the game_df\n elif today.month > 7:\n game_df = get_game_df(str(today.year))\n else:\n game_df = get_game_df(str(today.year - 1))\n\n selected = game_df[game_df['GAME_DATE'] == date]\n game_ids = []\n fields = {}\n for i in range(len(selected)):\n row = selected.iloc[i, :]\n if row['GAME_ID'] not in game_ids:\n game_ids.append(row['GAME_ID'])\n first, second = game_finder(game_df, row['GAME_ID'], row)\n fields[first['MATCHUP']] = '-'.join([str(first.PTS),\n str(second.PTS)])\n info = ('**Get Games**', 'Games occuring on ' + date + ':', 0xBEC0C2)\n embed = embed_creator(info, None, None, fields)\n await ctx.send(embed=embed)", "title": "" }, { "docid": "8a4bd8b62e376175d293f26d1e84fb58", "score": "0.53791356", "text": "def get_region_with_most_games_in_dates(self, arg_dict: dict):\n export_path = arg_dict['export_path']\n if 'arguments' not in arg_dict:\n raise ValueError(\"No arguments, please provide\")\n\n start_date = arg_dict['arguments'][0]\n end_date = arg_dict['arguments'][1]\n\n filtered_game_df = self.game_df.filter(\n (self.game_df.started_at >= start_date) & (self.game_df.started_at < end_date))\n processed_df = self.get_most_games_by_region(filtered_game_df)\n Export.export_to_local(processed_df, export_path, file_name_prefix=\"region_with_most_games_dates\")", "title": "" }, { "docid": "ad5446be9ceb34b5482e2d7dfdd17137", "score": "0.53714746", "text": "def get_daily_activity(log_data, course_info):\n daily = pd.DataFrame()\n log_data['c_start'] = [course_info[c]['start_date']\n for c in log_data['course_id']]\n log_data['days'] = log_data['date'] - log_data['c_start']\n daily['total'] = log_data.groupby('enrollment_id')['time'].count()\n for i in range(30):\n d = pd.Timedelta(str(i) + ' days')\n log_day = log_data[log_data['days'] == d]\n d_name = 'n_' + str(i) + '_'\n daily[d_name + 'records'] = log_day.groupby('enrollment_id')['time'].count()\n log_server = log_day[log_day['source'] == 'server']\n daily[d_name + 's_navigate'] = log_server[log_server['event'] == 'nagivate'] \\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 's_access'] = log_server[log_server['event'] == 'access']\\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 's_problem'] = log_server[log_server['event'] == 'problem']\\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 's_discussion'] = log_server[log_server['event'] == 'discussion']\\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 's_wiki'] = log_server[log_server['event'] == 'wiki']\\\n .groupby('enrollment_id')['time'].count()\n\n log_browser = log_day[log_day['source'] == 'browser']\n daily[d_name + 'b_access'] = log_browser[log_browser['event'] == 'access']\\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 'b_problem'] = log_browser[log_browser['event'] == 'problem']\\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 'b_pageclose'] = log_browser[log_browser['event'] == 'page_close']\\\n .groupby('enrollment_id')['time'].count()\n daily[d_name + 'b_video'] = log_browser[log_browser['event'] == 'video']\\\n .groupby('enrollment_id')['time'].count()\n\n log_day_video = log_day[log_day['event'] == 'video']\\\n [['enrollment_id', 'object']]\n log_day_video.drop_duplicates(inplace = True)\n daily[d_name + 'video_object'] = log_day_video.groupby('enrollment_id').count()\n\n log_day_pro = log_day[log_day['event'] == 'problem']\\\n [['enrollment_id', 'object']]\n log_day_pro.drop_duplicates(inplace = True)\n daily[d_name + 'pro_object'] = log_day_pro.groupby('enrollment_id').count()\n\n log_day_seq = log_day[log_day['category'] == 'sequential']\\\n [['enrollment_id', 'object']]\n log_day_seq.drop_duplicates(inplace = True)\n daily[d_name + 'seq_object'] = log_day_seq.groupby('enrollment_id').count()\n daily.fillna(0, inplace = True)\n daily.drop('total', axis = 1, inplace = True)\n return daily", "title": "" }, { "docid": "8f640eeb09917f9fe1204c3195610f4b", "score": "0.5371201", "text": "def get_daily_trend(self, search_term: str, start_d: date, end_d: date, cat=0,\n geo='', gprop='', delta=269, overlap=100, sleep=0,\n tz=0, verbose=False) -> pd.DataFrame:\n topic = self._find_topic_encode(search_term, verbose)\n search_term = topic if topic != None else search_term\n\n init_end_d = datetime(end_d.year, end_d.month, end_d.day, 23, 59, 59)\n delta = timedelta(days=delta)\n overlap = timedelta(days=overlap)\n\n itr_d = end_d - delta\n overlap_start = None\n\n df = pd.DataFrame()\n ol = pd.DataFrame()\n\n while end_d > start_d:\n tf = itr_d.strftime('%Y-%m-%d')+' '+end_d.strftime('%Y-%m-%d')\n if verbose:\n print('Fetching \\''+search_term+'\\' for period:'+tf)\n temp = self._fetch_data([search_term], timeframe=tf, cat=cat, geo=geo, gprop=gprop).astype({search_term: float})\n temp.columns.values[0] = tf\n ol_temp = temp.copy()\n ol_temp.iloc[:, :] = None\n if overlap_start is not None: # not first iteration\n if verbose:\n print('Normalize by overlapping period:'+overlap_start.strftime('%Y-%m-%d'), end_d.strftime('%Y-%m-%d'))\n # normalize using the maximum value of the overlapped period\n y1 = temp.loc[overlap_start:end_d].iloc[:, 0].values.max()\n y2 = df.loc[overlap_start:end_d].iloc[:, -1].values.max()\n coef = y2/y1\n temp = temp * coef\n ol_temp.loc[overlap_start:end_d, :] = 1\n\n df = pd.concat([df, temp], axis=1)\n ol = pd.concat([ol, ol_temp], axis=1)\n # shift the timeframe for next iteration\n overlap_start = itr_d\n end_d -= (delta-overlap)\n itr_d -= (delta-overlap)\n # in case of short query interval getting banned by server\n time.sleep(sleep)\n\n df.sort_index(inplace=True)\n ol.sort_index(inplace=True)\n\n # If the daily trend data is missing the most recent 3-days data, need to complete with hourly data\n if df.index.max() < init_end_d:\n tf = 'now 7-d'\n hourly = self._fetch_data([search_term], timeframe=tf, cat=cat, geo=geo, gprop=gprop).astype({search_term: float})\n\n # convert hourly data to daily data\n daily = hourly.groupby(hourly.index.date).sum()\n\n # check whether the first day data is complete (i.e. has 24 hours)\n daily['hours'] = hourly.groupby(hourly.index.date).count()\n if daily.iloc[0].loc['hours'] != 24:\n daily.drop(daily.index[0], inplace=True)\n daily.drop(columns='hours', inplace=True)\n\n daily.set_index(pd.DatetimeIndex(daily.index), inplace=True)\n daily.columns = [tf]\n\n ol_temp = daily.copy()\n ol_temp.iloc[:, :] = None\n # find the overlapping date\n intersect = df.index.intersection(daily.index)\n if verbose:\n print('Normalize by overlapping period:'+(intersect.min().strftime('%Y-%m-%d')) +\n ' '+(intersect.max().strftime('%Y-%m-%d')))\n # scaling use the overlapped today-4 to today-7 data\n coef = df.loc[intersect].iloc[:, 0].max() / daily.loc[intersect].iloc[:, 0].max()\n daily = daily*coef\n ol_temp.loc[intersect, :] = 1\n\n df = pd.concat([daily, df], axis=1)\n ol = pd.concat([ol_temp, ol], axis=1)\n\n # taking averages for overlapped period\n df = df.mean(axis=1)\n ol = ol.max(axis=1)\n # merge the two dataframe (trend data and overlap flag)\n df = pd.concat([df, ol], axis=1)\n df.columns = [search_term, 'overlap']\n # Correct the timezone difference\n df.index = df.index + timedelta(minutes=tz*60)\n df = df[start_d:init_end_d]\n # re-normalized to the overall maximum value to have max =100\n #df[search_term] = 100*df[search_term]/df[search_term].max()\n\n df.index.name = 'date'\n return df.rename(columns={search_term: 'trend'})", "title": "" }, { "docid": "40ab2e16b71880e5e1c2a5866a36845b", "score": "0.5346033", "text": "def update_datatable(start_date, end_date, relayoutData, option_slctd) -> pd.DataFrame:\n if option_slctd == \"AAPL\":\n df_table = datafetch.df_tweets_aapl_full\n if option_slctd == \"GOOG\":\n df_table = datafetch.df_tweets_goog_full\n if option_slctd == \"AMZN\":\n df_table = datafetch.df_tweets_amzn_full\n\n button_id = get_button()\n\n if button_id == \"slct_ticker\":\n return df_table[start_date:end_date].to_dict(\"rows\")\n elif button_id == \"date-picker-range\":\n return df_table[start_date:end_date].to_dict(\"rows\")\n else:\n if relayoutData:\n if \"xaxis.range[0]\" in relayoutData:\n start_date = relayoutData[\"xaxis.range[0]\"].split(\".\")[0]\n end_date = relayoutData[\"xaxis.range[1]\"].split(\".\")[0]\n return df_table[start_date:end_date].to_dict(\"rows\")\n return df_table[start_date:end_date].to_dict(\"rows\")\n else:\n return df_table[start_date:end_date].to_dict(\"rows\")", "title": "" }, { "docid": "d1d29c240e0b03d0332e6ea2f95a9227", "score": "0.5341534", "text": "def demographic_df(start_date, end_date, center, cols, date_cols=None):\n params = [pd.to_datetime(start_date).date(),\n pd.to_datetime(end_date).date()]\n\n center_sql, params = create_center_sql(center, params)\n\n query = f\"\"\"\n SELECT {', '.join(col for col in cols)}\n FROM demographics d\n JOIN enrollment e ON d.member_id = e.member_id\n JOIN centers on e.member_id=centers.member_id\n WHERE (disenrollment_date >= ?\n OR disenrollment_date IS NULL)\n AND e.enrollment_date <= ?\n {center_sql};\n \"\"\"\n\n return sql_return_df(query, params, date_cols)", "title": "" }, { "docid": "ed34c3b07879d3f509ecb0d1e3bde6ae", "score": "0.53391874", "text": "def get(self, **kwargs):\n organization = kwargs['organization']\n team = kwargs['team']\n\n start_date = request.args.get('start_date', default=(datetime.utcnow().timestamp()-86300)*1000)\n end_date = request.args.get('end_date', default=(datetime.utcnow().timestamp() * 1000))\n\n start_date = datetime.fromtimestamp(int(start_date)/1000)\n end_date = datetime.fromtimestamp(int(end_date)/1000)\n\n if (start_date - end_date).days > 0:\n return response_message(EINVAL, 'start date {} is larger than end date {}'.format(start_date, end_date)), 401\n\n delta = end_date - start_date\n days = delta.days\n if delta % timedelta(days=1):\n days = days + 1\n\n stats = []\n start = start_date\n end = start + timedelta(days=1) \n\n query = {'organization': organization, 'team': team}\n query2 = {'status': 'waiting', 'organization': organization, 'team': team}\n\n for d in range(days):\n if d == (days - 1):\n end = end_date\n query['run_date__gte'] = start\n query2['schedule_date__gte'] = start\n query['run_date__lte'] = end\n query2['schedule_date__lte'] = end\n\n query['status'] = 'successful'\n tasks = Task.objects(**query)\n succeeded = tasks.count()\n\n query['status'] = 'failed'\n tasks = Task.objects(**query)\n failed = tasks.count()\n\n query['status'] = 'running'\n tasks = Task.objects(**query)\n running = tasks.count()\n\n tasks = Task.objects(**query2)\n waiting = tasks.count()\n \n stats.append({\n 'succeeded': succeeded,\n 'failed': failed,\n 'running': running,\n 'waiting': waiting,\n })\n\n start = start + timedelta(days=1)\n end = start + timedelta(days=1)\n return stats", "title": "" }, { "docid": "2e57984112bde873c5f47a5275a05525", "score": "0.5330313", "text": "def play_catchup(startstr, stopstr):\n # for d in pd.date_range(start='2018-01-01', end='2018-08-09', freq='7D'):\n for d in pd.date_range(start=startstr, end=stopstr, freq='1D'):\n start = d\n stop = d + datetime.timedelta(days=7)\n run(start, stop, '/misc/yoda/www/plots/user/sams/status/rtsdrawers')", "title": "" }, { "docid": "a0b5177d2955439dc1a57c33eccdefc1", "score": "0.5328639", "text": "def start(start):\n \"\"\"Calculates TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.\"\"\"\n\n \"\"\"Begin Error Checking\"\"\"\n\n # Ensure the date passed is prior to the last date for which data is available\n latest_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n # If not, return an error message\n if start > latest_date:\n return(f\"<b>Error:</b> No data available in the specified timeframe. Please enter a date less than {latest_date}.\")\n\n # If the start date passed is prior to the first date with available data,\n # reassign start date to the first date with data available (for clarification when printing results)\n start_adj_note = \"\"\n first_date = session.query(Measurement.date).order_by(Measurement.date).first()[0]\n if start < first_date:\n start = first_date\n start_adj_note = \" (date adjusted due to data availability)\"\n\n \"\"\"End Error Checking\"\"\"\n\n # Query temperature data\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n # Convert list of tuples into normal list\n summary = list(np.ravel(results))\n \n # Return formatted results\n return(f\"<h3>Temperature results for date range starting {start}{start_adj_note}:</h3>\"\n f\"Minimum: {'{:.2f}'.format(summary[0])} F<br/>\"\n f\"Average: {'{:.2f}'.format(summary[1])} F<br/>\"\n f\"Maximum: {'{:.2f}'.format(summary[2])} F\"\n )", "title": "" }, { "docid": "c87f13be26816fff941f0891f8f89457", "score": "0.53226984", "text": "async def stats(self, ctx):\r\n if len(self.history) <= 0:\r\n return await ctx.send(\"No score given yet, can't stat the void yet\")\r\n\r\n df = self.history\r\n\r\n first_of_month = datetime.utcnow().date().replace(day=1)\r\n first_of_month = datetime.combine(first_of_month, datetime.min.time())\r\n\r\n first_of_year = datetime.utcnow().date().replace(month=1, day=1)\r\n first_of_year = datetime.combine(first_of_year, datetime.min.time())\r\n\r\n this_week = df[\"date\"] >= datetime.utcnow() - timedelta(weeks=1)\r\n this_month = df[\"date\"] >= first_of_month\r\n this_year = df[\"date\"] >= first_of_year\r\n\r\n avg_week = df[this_week][\"score\"].sum() / len(df[this_week])\r\n avg_month = df[this_month][\"score\"].sum() / len(df[this_month])\r\n avg_year = df[this_year][\"score\"].sum() / len(df[this_year])\r\n\r\n await ctx.send(\r\n \"Average this week: {:.2f} month: {:.2f} year: {:.2f}\\n\"\r\n \"This week: max: {}, min: {}\\n\"\r\n \"This month: max: {}, min: {}\\n\"\r\n \"All time: max: {}, min: {}\".format(\r\n avg_week,\r\n avg_month,\r\n avg_year,\r\n df[this_week][\"score\"].max(),\r\n df[this_week][\"score\"].min(),\r\n df[this_month][\"score\"].max(),\r\n df[this_month][\"score\"].min(),\r\n df[\"score\"].max(),\r\n df[\"score\"].min(),\r\n )\r\n )\r\n\r\n print(\"Giving score tracking stats\")", "title": "" }, { "docid": "19e526ec3bde64ab5f9f679f58aa2615", "score": "0.5297144", "text": "def get_burnup_stats():\n\n # Getting burnup_data\n burnup_data = load_burnup_data()\n\n # Checking that the burnup data dict isn't empty\n if len(burnup_data) < 1:\n raise ValueError(\"Burnup Data has length %d. Expected length of at least 1.\" % len(burnup_data))\n\n # Collecting lists of stats\n scope_list = []\n done_list = []\n dates_list = []\n\n # Sorting date keys from the burnup data json object\n sorted_date_keys = sorted(list(burnup_data.keys()))\n for date in sorted_date_keys:\n # Getting data at the date/key location and putting data\n # into the appropriate stat lists\n data = burnup_data[date]\n scope_list.append(data[\"cards_total\"])\n done_list.append(data[\"cards_done\"])\n dates_list.append(datetime.datetime.strptime(date, \"%Y-%m-%d\"))\n\n # Populating/creating the stats dataframe\n dataframe = pd.DataFrame({\n \"scope\": scope_list,\n \"done\": done_list,\n \"dates\": dates_list\n })\n\n return dataframe", "title": "" }, { "docid": "92741e55e724322ab172930f9a5774e0", "score": "0.5294783", "text": "def getSeasons(self, FirstDay, FirstSeason, LastDay,LastSeason, league):\n self.clear()\n csv = open(\"teamproject/BundesligaData.csv\", \"w\")\n csv.write(\n \"HomeTeam\" +\n \",\" +\n \"AwayTeam\" +\n \",\" +\n \"HomeGoals\" +\n \",\" +\n \"AwayGoals\" +\n \",\" +\n \"Date\" +\n \",\" +\n \"win\"+\n \"\\n\")\n\n if league == \"1. Bundesliga\" or league == \"2. Bundesliga\" or league == \"3. Bundesliga\" or league == \"1. Handball Bundesliga\":\n for i in range(FirstSeason, (LastSeason + 1)):\n counter = 0\n startday_counter = 0\n Game = {}\n Date = {}\n\n GameDay = {}\n HomeTeam = {}\n AwayTeam = {}\n GoalsHome = {}\n GoalsAway = {}\n\n win_team = {}\n\n if FirstSeason == LastSeason:\n start_season_day = FirstDay\n end_season_day = LastDay\n elif i == FirstSeason and FirstDay != 1:\n start_season_day = FirstDay\n end_season_day = 34\n elif i == LastSeason and LastDay != 34:\n start_season_day = 1\n end_season_day = LastDay\n else:\n start_season_day = 1\n end_season_day = 34\n\n if league == \"1. Bundesliga\":\n game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/bl1/{i}').text)\n elif league == \"2. Bundesliga\":\n game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/bl2/{i}').text)\n elif league == \"3. Bundesliga\":\n game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/bl3/{i}').text)\n elif league == \"1. Handball Bundesliga\":\n game_data = json.loads(requests.get(f'http://www.openligadb.de/api/getmatchdata/hbl/{i}').text)\n\n\n for game in game_data:\n startday_counter += 1\n if (startday_counter / 9) + 1 > start_season_day and (startday_counter / 9) <= end_season_day:\n\n Date[counter] = game['MatchDateTime']\n Team1 = game['Team1']\n HomeTeam[counter] = Team1['TeamName']\n\n Team2 = game['Team2']\n AwayTeam[counter] = Team2['TeamName']\n\n Matchresults = game['MatchResults']\n\n Result_half = Matchresults[0]\n TeamA_half = Result_half['PointsTeam1']\n TeamB_half = Result_half['PointsTeam2']\n\n if not len(Matchresults) == 1:\n Result = Matchresults[1]\n TeamA = Result['PointsTeam1']\n TeamB = Result['PointsTeam2']\n else:\n TeamA = -1\n TeamB = -1\n\n if TeamA_half + TeamB_half > TeamA + TeamB:\n GoalsHome[counter] = TeamA_half\n GoalsAway[counter] = TeamB_half\n if TeamA_half > TeamB_half:\n win_team[counter] = \"h\"\n elif TeamA_half < TeamB_half:\n win_team[counter] = \"a\"\n elif TeamA_half == TeamB_half:\n win_team[counter] = \"d\"\n\n else:\n GoalsHome[counter] = TeamA\n GoalsAway[counter] = TeamB\n if TeamA_half > TeamB_half:\n win_team[counter] = \"h\"\n elif TeamA_half < TeamB_half:\n win_team[counter] = \"a\"\n elif TeamA_half == TeamB_half:\n win_team[counter] = \"d\"\n\n match = HomeTeam[counter] + \",\" + AwayTeam[counter] + \",\" + str(GoalsHome[counter]) + \",\" + str(GoalsAway[counter]) + \",\" + Date[counter] + \",\" + win_team[counter] + \"\\n\"\n csv.write(match)\n counter += 1\n else:\n print('Wrong string for crawling a certain league')", "title": "" }, { "docid": "66bd4b78fefb01aca81faed8658816c6", "score": "0.5281948", "text": "def get_satisfaction_report(analytics, start_date, end_date):\n ## View ID for Analytics - https://ga-dev-tools.appspot.com/query-explorer/\n VIEW_ID = '157729614'\n start = start_date.strftime('%Y-%m-%d')\n end = end_date.strftime('%Y-%m-%d')\n # Use the Analytics Service Object to query the Analytics Reporting API V4.\n return analytics.reports().batchGet(\n body={\n 'reportRequests': [\n ## This report gets total views and views for first project page\n {\n 'viewId': VIEW_ID,\n 'pageSize': 10000,\n 'dateRanges': [{'startDate': start, 'endDate': end}],\n 'dimensions': [{'name': 'ga:eventCategory'},\n {'name': 'ga:eventAction'},\n {'name': 'ga:eventLabel'},],\n 'metrics': [{'expression': 'ga:totalEvents'}]\n }]\n }\n ).execute()", "title": "" }, { "docid": "45ff6c1aadfe3763b382bc7b4fef421b", "score": "0.52740854", "text": "def counter_df(engine, store, date_start, date_end, start_hour, start_min, end_hour, end_min, cam):\n\n # Filter dates\n start_date = datetime.strptime(date_start + ' ' + start_hour.zfill(2) + ':' + start_min.zfill(2) + ':00', '%d/%m/%Y %H:%M:%S')\n end_date = datetime.strptime(date_end + ' ' + end_hour.zfill(2) + ':' + end_min.zfill(2) + ':00', '%d/%m/%Y %H:%M:%S')\n\n connection = engine.connect()\n query = '''\n SELECT * from counts \n WHERE \"Store_name\"= :group \n AND \"Start_date\" >= :A\n AND \"End_date\" <= :B \n AND \"Camera\"= :C\n '''\n\n counter_conn = connection.execute(text(query), group=store, A=start_date, B=end_date, C=int(cam[-1]), O='person').fetchall()\n columns= ['Store_name', 'Start_date', 'End_date', 'Camera', 'Count', 'inout', 'name_video']\n counter = pd.DataFrame(counter_conn, columns=columns)\n return counter", "title": "" }, { "docid": "ed4c2849f70cddfafed0152c9d1c2b23", "score": "0.5273277", "text": "def main(start_date, end_date, verbose):\n levels = [\n logging.WARNING,\n logging.DEBUG,\n ]\n log_format = \"%(asctime)s - %(name)s: %(message)s\"\n logger = logging.getLogger(__name__)\n verbose = min(len(levels) - 1, verbose)\n logging.basicConfig(level=levels[verbose], format=log_format)\n start_date = datetime.datetime.strptime(start_date, date_format)\n end_date = datetime.datetime.strptime(end_date, date_format)\n daterange = pd.date_range(start_date, end_date)\n n = len(daterange)\n for day in tqdm(daterange):\n url = \"http://audio.radio24.ilsole24ore.com/radio24_audio/{}/{}-lazanzara.mp3\".format(\n day.strftime(year_format), day.strftime(date_format)\n )\n response = requests.get(url, stream=True)\n logger.debug(\"requested url %s\", url)\n logger.debug(\"got %s\", response)\n if response.ok:\n output_path = os.path.basename(url)\n with open(os.path.basename(url), \"wb\") as output_file:\n for chunk in response.iter_content(4096):\n output_file.write(chunk)\n logger.debug(\"written %s\", output_path)", "title": "" }, { "docid": "664c4e6d94aea1d544be47b35dc157fc", "score": "0.5271459", "text": "def get_df(tickers,from_date, to_date):\r\n date = to_date\r\n holder = []\r\n # While the earliest date returned is later than the earliest date requested, keep on querying the API\r\n # and adding the results to a list. \r\n for ticker in tickers:\r\n data_raw = get_data(ticker,date)\r\n data_df = pd.DataFrame(data_raw['Data'])\r\n dts = pd.to_datetime(data_df['time'], unit='s') \r\n cl = data_df[\"close\"]\r\n df = pd.DataFrame({\"time\": dts.values, ticker: cl.values})\r\n df = pd.DataFrame(cl.values, index=dts.values, columns=[ticker])\r\n #df.set_index('time', inplace=True)\r\n holder.append(df) \r\n df_all = pd.concat(holder,axis=1) \r\n return df_all", "title": "" }, { "docid": "0a0e86fae6774cb5aaf59b004a61df06", "score": "0.52647465", "text": "def GetDataFeedQuery(table_id, start_date, end_date):\n return gdata.analytics.client.DataFeedQuery({\n 'ids': table_id,\n 'start-date': start_date,\n 'end-date': end_date,\n 'dimensions': 'ga:pagePath',\n 'metrics': 'ga:pageviews',\n 'sort': '-ga:pageviews',\n 'max-results': '50000'})", "title": "" }, { "docid": "94b950870fc487ddd2a6b09b8e9fd009", "score": "0.5260198", "text": "def source_daily_score():\n Session = sessionmaker(bind=engine)\n session = Session()\n \n max_scored_dt = session.query(func.max(Scores.dt)).scalar() or \"2019-01-01\"\n max_scored_dt = to_datetime(max_scored_dt) \n max_scored_dt += Timedelta(\"1 day\")\n max_scored_dt = max_scored_dt.isoformat()\n \n today = to_datetime(date.today()).isoformat()\n\n query = session.query(News.source.label(\"source\"), \n News.etl_dttm.label(\"dt\"),\n Sentiments.sent_comparative.label(\"score\"))\\\n .join(Sentiments, \n News.id == Sentiments.id)\\\n .filter(News.etl_dttm >= max_scored_dt)\\\n .filter(News.etl_dttm < today)\\\n .all()\n data = DataFrame(query)\n if data.shape[0]:\n data[\"dt\"] = data[\"dt\"].apply(lambda x: x[:10])\n groupers = [\"dt\", \"source\"]\n data = data.groupby(groupers, as_index=False).mean()\n for item in data.to_dict('records'):\n session.add(Scores(**item))\n session.commit()", "title": "" }, { "docid": "1b3396005ccfa574c0aa445f8f7e2cfe", "score": "0.52569246", "text": "def generate_summary(\n self, start_date: date = date(2020, 1, 1), end_date: date = dt.now().date()\n ) -> dict:\n select = filter_time(self.mat[\"date\"], start_date, end_date)\n df = self.mat[select]\n\n professions = self.get_professions(df)\n dgvs_keys = self.get_dgvs_keys(df)\n\n _summary = {\n \"key_dgvs\": {\n key: self.get_dgvs_key_summary(self.mat, key) for key in dgvs_keys\n },\n \"intervention_types\": {\n profession: self.get_profession_summary(self.mat, profession)\n for profession in professions\n },\n }\n\n return _summary", "title": "" }, { "docid": "994ace5322a02d0bf17a049258effedb", "score": "0.5252491", "text": "def build_result_dataframe(gh, pred, df):\n # generate a sequence of timestamp\n start_time = df.timestamp.values.max() + np.timedelta64(15, 'm')\n timestamps = pd.date_range(start_time, periods=len(pred), freq='15T')\n\n # calulate 'day' colum of the dataframe\n dtdelta = (timestamps.date - df.timestamp.max().date())\n dtdelta = list(map(lambda x: x.days, dtdelta))\n days = dtdelta + df.day.max()\n\n # calulate time of day\n tod = list(map(lambda x: x.strftime('%H:%M'), timestamps.time))\n\n # construct the result dictionary\n res = {'geohash6': [gh] * len(pred),\n 'day': days,\n 'timestamp': tod,\n 'demand': pred\n }\n\n return pd.DataFrame(res)", "title": "" }, { "docid": "efafc9d98bd0f382b5259e1f5ecbd8de", "score": "0.5249097", "text": "def update_graph_a(start_date, end_date, option_slctd) -> pd.DataFrame:\n \n #if datafetch.check_update():\n # datafetch.fetch()\n\n df_tweets_aapl_sentiments_minute = datafetch.df_tweets_aapl_sentiments_minute[\n start_date:end_date\n ]\n df_ticker_aapl_minute = datafetch.df_ticker_aapl_minute[start_date:end_date]\n\n df_tweets_goog_sentiments_minute = datafetch.df_tweets_goog_sentiments_minute[\n start_date:end_date\n ]\n df_ticker_goog_minute = datafetch.df_ticker_goog_minute[start_date:end_date]\n\n df_tweets_amzn_sentiments_minute = datafetch.df_tweets_amzn_sentiments_minute[\n start_date:end_date\n ]\n df_ticker_amzn_minute = datafetch.df_ticker_amzn_minute[start_date:end_date]\n\n if option_slctd == \"AAPL\":\n df_tweets_sentiments_minute = df_tweets_aapl_sentiments_minute\n df_ticker_minute = df_ticker_aapl_minute\n\n if option_slctd == \"GOOG\":\n df_tweets_sentiments_minute = df_tweets_goog_sentiments_minute\n df_ticker_minute = df_ticker_goog_minute\n\n if option_slctd == \"AMZN\":\n df_tweets_sentiments_minute = df_tweets_amzn_sentiments_minute\n df_ticker_minute = df_ticker_amzn_minute\n\n fig = make_subplots(\n rows=4, cols=1, shared_xaxes=True, y_title=\"Sentiments Count\", x_title=\"Date\"\n )\n\n fig.add_trace(\n go.Bar(\n x=df_tweets_sentiments_minute.index,\n y=df_tweets_sentiments_minute[\"positive_count\"],\n name=\"positive sentiment\",\n marker_color=\"#6ef77b\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Bar(\n x=df_tweets_sentiments_minute.index,\n y=df_tweets_sentiments_minute[\"negative_count\"],\n name=\"negative sentiment\",\n marker_color=\"#fa5f5f\",\n ),\n row=2,\n col=1,\n )\n fig.add_trace(\n go.Bar(\n x=df_tweets_sentiments_minute.index,\n y=df_tweets_sentiments_minute[\"neutral_count\"],\n name=\"neutral sentiment\",\n marker_color=\"#9be9db\",\n ),\n row=3,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=df_ticker_minute.index,\n y=df_ticker_minute[\"price\"],\n name=\"ticker average price per minute\",\n marker_color=\"#ff9d02\",\n ),\n row=4,\n col=1,\n )\n\n fig.update_layout(template=\"plotly_white\", title_text=\"Sentiments count per minute\",height=500)\n fig.update_yaxes(title_text=\"AVG Price\", row=4, col=1)\n fig.update_traces(dict(marker_line_width=0))\n\n return fig", "title": "" }, { "docid": "26cddf1f5c85a4f7aeac7c5911782ee5", "score": "0.52446246", "text": "def scrape_seahawks(start_year, end_year):\n\n df = pd.DataFrame()\n for year in range(start_year, end_year + 1):\n\n url = f\"https://www.pro-football-reference.com/teams/sea/{year}.htm\"\n sel = \"table#games\"\n\n response = requests.get(url)\n html = response.content\n soup = BeautifulSoup(html, \"html.parser\")\n time.sleep(random.random() * 10 + 5)\n table = soup.select_one(sel)\n table_rows = table.select(\"tr\")\n data_rows = []\n for table_row in table_rows:\n th_elements = table_row.select(\"th\")\n th_data = [th.text for th in th_elements]\n td_elements = table_row.select(\"td\")\n td_data = [td.text for td in td_elements]\n data_rows.append(th_data + td_data)\n new_df = pd.DataFrame(data_rows[2:], columns=data_rows[1])\n new_df[\"year\"] = year\n df = df.append(new_df)\n\n return df", "title": "" }, { "docid": "48944a2f33efdb525c4c6fd6b6b0bb90", "score": "0.5239138", "text": "def GetEventsInRange(self, start_date, end_date):\n self.c.execute(\"select * from events where start_time >= ? and start_time <= ? order by start_time\",\n (ToTimestamp(start_date), ToTimestamp(end_date)))\n return self.CreateEventsFromResults()", "title": "" }, { "docid": "52687c4b1b22f6d465dca62b5b865f72", "score": "0.52328193", "text": "def get_perf_data(self, entity_scope, entity_code, from_date: Timestamp, to_date: Timestamp, asat: Timestamp,\n **kwargs) -> DataFrame:\n\n entity_id = self._create_id_from_scope_code(entity_scope, entity_code)\n\n if entity_id not in self.entities:\n raise KeyError(f\"No perf data for entity with scope {entity_scope} and code {entity_code}\")\n\n keyword_arguments = self.entities[entity_id]\n keyword_arguments[\"end_date\"] = to_date\n\n return self._produce_perf_data(**keyword_arguments)", "title": "" }, { "docid": "1c3aeb9d2961e2c6b19badfb41c75a29", "score": "0.52324986", "text": "def get_stats(uid, start_date, end_date):\n\n db = get_db()\n\n rows = db.get_stats(start_date=start_date, end_date=end_date, uid=uid)\n if len(rows) == 0:\n return\n plot = {}\n\n col_names = rows[0]._asdict().keys()\n\n plot[\"stat_fields\"] = []\n for name in col_names:\n split_name = name.rsplit(\"__\")\n if len(split_name) > 1:\n if split_name[0] not in plot:\n plot[split_name[0]] = {}\n plot[\"stat_fields\"].append(split_name[0])\n plot[split_name[0]][split_name[1]] = []\n else:\n plot[name] = []\n\n ignored_fields = [\"uid\", \"id\", \"data_period\", \"temperature_in\", \"wind_dir\"]\n\n # Create lists with each data type and make timestamp pretty\n for row in rows:\n for name in col_names:\n if name == \"timestamp\":\n plot[name].append(\n datetime.fromtimestamp(getattr(row, name)).strftime(\"%Y-%m-%d\")\n )\n elif name in ignored_fields:\n continue\n else:\n split_name = name.rsplit(\"__\")\n if len(split_name) > 1:\n if getattr(row, name) is None:\n plot[split_name[0]][split_name[1]].append(0)\n else:\n plot[split_name[0]][split_name[1]].append(\n round(getattr(row, name), 3)\n )\n else:\n if getattr(row, name) is None:\n plot[name].append(0)\n else:\n plot[name].append(round(getattr(row, name), 3))\n\n if sum(plot[\"rain\"]) == 0:\n del plot[\"rain\"]\n\n return plot", "title": "" }, { "docid": "89be5d1b3856dc810ee2b9891efa49e6", "score": "0.52318335", "text": "def scrape_huskies(start_year, end_year):\n df = pd.DataFrame()\n for year in range(start_year, end_year + 1):\n\n url = f\"https://www.sports-reference.com/cfb/schools/washington/{year}-schedule.html\"\n sel = \"table#schedule\"\n\n response = requests.get(url)\n html = response.content\n soup = BeautifulSoup(html, \"html.parser\")\n time.sleep(random.random() * 10 + 5)\n table = soup.select_one(sel)\n table_rows = table.select(\"tr\")\n data_rows = []\n for table_row in table_rows:\n th_elements = table_row.select(\"th\")\n th_data = [th.text for th in th_elements]\n td_elements = table_row.select(\"td\")\n td_data = [td.text for td in td_elements]\n data_rows.append(th_data + td_data)\n new_df = pd.DataFrame(data_rows[1:], columns=data_rows[0])\n df = df.append(new_df)\n\n return df", "title": "" }, { "docid": "b008fbc587bef8aa1e64df881a01f49d", "score": "0.5225511", "text": "def gather_data():\r\n players_config = {\r\n \"Opp1\": OppAwareAI,\r\n \"Opp2\": OppAwareAI,\r\n \"Opp3\": OppAwareAI,\r\n \"NeuralNet\": NeuralNetPlayer\r\n }\r\n result = {key: 0 for key in players_config}\r\n result[\"Tie\"] = 0\r\n result[\"Total\"] = 0\r\n\r\n # Ran for 15 mins each - 1v1, 1v2, 1v3\r\n # read the data into dataframe\r\n DATABASE.read_csv()\r\n\r\n # play the game\r\n end_time = datetime.datetime.now() + datetime.timedelta(minutes=15)\r\n while True:\r\n if datetime.datetime.now() >= end_time:\r\n break\r\n\r\n players = [constructor(name) for name, constructor in players_config.items()]\r\n\r\n game = Game.Game(players)\r\n game.play(False, slow=False)\r\n result[\"Total\"] += 1\r\n\r\n if len(game.winners) > 1:\r\n result[\"Tie\"] += 1\r\n else:\r\n result[game.winners[0].name] += 1\r\n\r\n # write the resulting data to the csv\r\n DATABASE.write_csv()\r\n\r\n print(\"\\nRESULT\")\r\n print(result)\r\n\r\n print(\"\\nRESULT PERCENTAGE\")\r\n percentage = {key: \"{:.2f}%\".format(val / result[\"Total\"] * 100) for key, val in result.items()}\r\n print(percentage)", "title": "" }, { "docid": "6e957f01bef0a58d90beb5734fbca1e1", "score": "0.5219237", "text": "def start_end(start,end):\n \"\"\"Calculates the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.\"\"\"\n\n \"\"\"Begin Error Checking\"\"\"\n\n # Ensure the start date passed is prior to the last date for which data is available\n latest_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n # If not, return an error message\n if start > latest_date:\n return(f\"<b>Error:</b> No data available in the specified timeframe. Please enter a start date less than {latest_date}.\")\n\n # Ensure the end date passed is more recent than the first date for which data is available\n first_date = session.query(Measurement.date).order_by(Measurement.date).first()[0]\n # If not, return an error message\n if end < first_date:\n return(f\"<b>Error:</b> No data available in the specified timeframe. Please enter an end date greater than {first_date}.\")\n\n # Ensure the start date passed is prior to the end date passed\n # If not, return an error message\n if start > end:\n return(f\"<b>Error:</b> Please ensure the start date entered is prior to the end date.\")\n\n # If the start date passed is prior to the first date with available data,\n # reassign start date to the first date with data available (for clarification when printing results)\n start_adj_note = \"\"\n if start < first_date:\n start = first_date\n start_adj_note = \" (start date adjusted due to data availability)\"\n\n # If the end date passed is after the last date with available data,\n # reassign end date to the last date with data available (for clarification when printing results)\n end_adj_note = \"\"\n if end > latest_date:\n end = latest_date\n end_adj_note = \" (end date adjusted due to data availability)\"\n\n \"\"\"End Error Checking\"\"\"\n\n # Query temperature data\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).\\\n filter(Measurement.date <= end).all()\n \n # Convert list of tuples into normal list\n summary = list(np.ravel(results))\n\n # Return formatted results\n return(f\"<h3>Temperature results for date range {start}{start_adj_note} to {end}{end_adj_note}:</h3>\"\n f\"Minimum: {'{:.2f}'.format(summary[0])} F<br/>\"\n f\"Average: {'{:.2f}'.format(summary[1])} F<br/>\"\n f\"Maximum: {'{:.2f}'.format(summary[2])} F\"\n )", "title": "" }, { "docid": "4e7e67ce6835e9823689cb76f3531822", "score": "0.5206909", "text": "def _set_date_range(self, start_date, end_date):\n first_available = self.pg.index[0].tz_convert(self.tz)\n last_available = self.pg.index[-1].tz_convert(self.tz)\n\n timestep = (\n pd.DataFrame(\n index=pd.date_range(start_date, end_date, freq=\"H\", tz=self.tz)\n )\n .resample(self.freq, label=\"left\")\n .size()\n .rename(\"Number of Hours\")\n )\n\n if self.freq == \"H\":\n if first_available > pd.Timestamp(start_date, tz=self.tz):\n self.from_index = first_available\n else:\n self.from_index = pd.Timestamp(start_date, tz=self.tz)\n if last_available < pd.Timestamp(end_date, tz=self.tz):\n self.to_index = last_available\n else:\n self.to_index = pd.Timestamp(end_date, tz=self.tz)\n\n elif self.freq == \"D\":\n if timestep[0] == timestep[1]:\n first_full = pd.Timestamp(timestep.index.values[0], tz=self.tz)\n else:\n first_full = pd.Timestamp(timestep.index.values[1], tz=self.tz)\n if timestep[-1] == timestep[-2]:\n last_full = pd.Timestamp(timestep.index.values[-1], tz=self.tz)\n else:\n last_full = pd.Timestamp(timestep.index.values[-2], tz=self.tz)\n\n if first_available > first_full:\n self.from_index = first_available.ceil(\"D\")\n else:\n self.from_index = first_full\n if last_available < pd.Timestamp(end_date, tz=self.tz):\n self.to_index = last_available.floor(\"D\") - pd.Timedelta(\"1 days\")\n else:\n self.to_index = last_full\n\n elif self.freq == \"W\":\n if timestep[0] == timestep[1]:\n first_full = pd.Timestamp(timestep.index.values[0], tz=self.tz)\n else:\n first_full = pd.Timestamp(timestep.index.values[1], tz=self.tz)\n if timestep[-1] == timestep[-2]:\n last_full = pd.Timestamp(timestep.index.values[-1], tz=self.tz)\n else:\n last_full = pd.Timestamp(timestep.index.values[-2], tz=self.tz)\n\n if first_available > first_full:\n self.from_index = min(timestep[first_available:].index)\n else:\n self.from_index = first_full\n if last_available < last_full:\n self.to_index = max(timestep[:last_available].index)\n else:\n self.to_index = last_full\n\n self.timestep = timestep[self.from_index : self.to_index]", "title": "" }, { "docid": "e906ef5bb42206b77694a3f6ecc9def8", "score": "0.5204897", "text": "def get_json_str(start_date, end_date, table=\"day\"):\n\n db = get_db()\n\n uid = get_latest_uid()\n\n rows = db.get_records(table, start_date=start_date, end_date=end_date, uid=uid)\n\n plot = {\"hotsname\": hostname}\n\n plot[\"start_date\"] = datetime.fromtimestamp(start_date).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n plot[\"end_date\"] = datetime.fromtimestamp(end_date).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n col_names = rows[0]._asdict().keys()\n\n for name in col_names:\n plot[name] = []\n\n # Rain doesn't have the same timestamp as the rest of the data\n plot[\"rain_time\"] = []\n\n # Create lists with each data type and make timestamp pretty\n for row in rows:\n for name in col_names:\n if name == \"timestamp\":\n plot[name].append(\n datetime.fromtimestamp(getattr(row, name)).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n )\n elif name == \"uid\" or name == \"id\" or name == \"rain\":\n continue\n else:\n if getattr(row, name) is None:\n plot[name].append(0)\n else:\n plot[name].append(round(getattr(row, name), 3))\n\n # TODO - set start date to the beginning of that day\n # That way the bins are accurate to the day\n\n # Get rain data for the time period\n rain_data = db.get_rain(int(start_date), int(end_date), uid=uid)\n rain_total = 0\n\n # Bin data into the appropriate size for histograms\n idx = get_start_bin(int(end_date - 1), table)\n bins = range(int(start_date), int(end_date), rain_mod[table])\n\n # Loop through each rain bin\n for rain_bin in bins:\n plot[\"rain_time\"].append(get_rain_label(idx, table))\n rain = 0\n # Loop through each rain sample\n for rain_hour in rain_data:\n # Check if the sample falls into our bin\n if rain_hour[1] >= rain_bin and rain_hour[1] < (rain_bin + rain_mod[table]):\n rain += rain_hour[3]\n\n plot[\"rain\"].append(rain)\n\n # Wrap around depending on the number of bins (since we don't always start at 0)\n idx = (idx + 1) % len(bins)\n\n return jsonify(plot)", "title": "" }, { "docid": "b73670620286d1ea2af5ae1d5ef27669", "score": "0.5194873", "text": "def create_query_params(view_id, start_date, end_date):\n start_date_str = start_date.strftime('%Y-%m-%d')\n end_date_str = end_date.strftime('%Y-%m-%d')\n print(\"|| \", start_date_str, \" - \", end_date_str)\n query_params = {'viewId': view_id,\n 'dateRanges': [{'startDate': start_date_str, 'endDate': end_date_str}],\n 'metrics': [{'expression': 'ga:users'}],\n 'pageSize': 10\n }\n return query_params", "title": "" }, { "docid": "64f74a34b3d773ac8057eed0734f0ee8", "score": "0.51940405", "text": "def hist_opens(self, start_time, end_time, coin_dict):\n data = pd.DataFrame()\n for exchange in coin_dict.keys():\n for coin in coin_dict[exchange]:\n column = self.assure_column('price', start_time, end_time, exchange, coin)\n data[column.name] = column\n\n self.save()\n return data", "title": "" }, { "docid": "da3bb650038490c458be4a58260c6a15", "score": "0.51911026", "text": "def filter_clicks_data(self, start_date, end_date):\n df_clicks = self.data_loader.load_clicks_data()\n if start_date:\n df_clicks = df_clicks[df_clicks[\"created_at\"] > start_date].copy()\n if end_date:\n df_clicks = df_clicks[df_clicks[\"created_at\"] <= end_date].copy()\n return df_clicks", "title": "" }, { "docid": "90f9554cea9da52380bc8d7b895deb7c", "score": "0.51901764", "text": "def get_data(symbol, start=None, end=None, normal=True):\n nyc = timezone(\"America/New_York\")\n url, key, secret = credentialing()\n api = tradeapi.REST(key, secret, url)\n if symbol.lower() == \"random\":\n assets = api.list_assets()\n symbols = [\n asset.symbol\n for asset in assets\n if asset.tradable\n and asset.exchange == \"NYSE\"\n and asset.status == \"active\"\n and asset.shortable\n ]\n symbol = np.random.choice(symbols)\n\n # if normal:\n # start, end = business_day()\n if normal:\n end = start + timedelta(days=40)\n df = api.polygon.historic_agg_v2(\n symbol,\n 1,\n \"minute\",\n _from=pd.Timestamp(start).isoformat(),\n to=pd.Timestamp(end).isoformat(),\n ).df\n # df = df[np.logical_and(df.index >= start, df.index <= end)]\n return df, symbol", "title": "" }, { "docid": "6fa16d391e3c4f0ca24de4de149dcbdf", "score": "0.5189546", "text": "def start_end(start, end):\n\n select = [\n func.min(Measurement.tobs),\n func.avg(Measurement.tobs),\n func.max(Measurement.tobs)\n ]\n\n results = session.query(\n *select\n ).filter(\n Measurement.date >= start\n ).filter(\n Measurement.date <= end\n ).all()\n\n daterange_data = []\n for min, avg, max in results:\n range_dict = {}\n range_dict['Min Temperature'] = min\n range_dict['Average Temperature'] = round(avg, 2)\n range_dict['Max Temperature'] = max\n daterange_data.append(range_dict)\n\n return jsonify(daterange_data)", "title": "" }, { "docid": "da6193e64a9c490a1c5f70b0ef0f9b04", "score": "0.51785266", "text": "def large_request(start_dt_date: date, end_dt_date: date, step: int, verbose: bool,\n team: Optional[str] = None) -> pd.DataFrame:\n \n # Count failed requests. If > X, break\n error_counter = 0\n\n # A flag for passing over the success message of requests are failing\n no_success_msg_flag = False\n \n dataframe_list = []\n\n d1 = start_dt_date\n d2 = end_dt_date\n\n print(\"This is a large query, it may take a moment to complete\")\n \n # Number of days per mini-query\n # (test this later to see how large I can make this without losing data)\n\n # While intermediate query end_dt <= global query end_dt, keep looping\n d = d1 + timedelta(days=step)\n while d <= d2: \n # Dates before 3/15 and after 11/15 will always be offseason.\n # If these dates are detected, check if the next season is within the user's query.\n # If yes, fast-forward to the next season to avoid empty requests\n # If no, break the loop. all useful data has been pulled.\n if (d.month == 3 and d.day < 15) or d.month <= 2:\n print('Skipping offseason dates')\n d1 = d1.replace(month=3, day=15, year=d1.year)\n d = d1 + timedelta(days=step+1)\n elif (d1.month == 11 and d1.day > 14) or d1.month > 11:\n if d2.year > d.year:\n print('Skipping offseason dates')\n d1 = d1.replace(month=3, day=15, year=d1.year+1)\n d = d1 + timedelta(days=step+1)\n else:\n break\n\n data = small_request(d1, d, team=team)\n \n # Append to list of dataframes if not empty or failed\n # (failed requests have one row saying \"Error: Query Timeout\")\n if data.shape[0] > 1:\n dataframe_list.append(data)\n else:\n # If it failed, retry up to three times\n success = 0\n while success == 0:\n data = small_request(d1, d, team=team)\n if data.shape[0] > 1:\n dataframe_list.append(data)\n success = 1\n else:\n error_counter += 1\n if error_counter > 2:\n # This request is probably too large. Cut a day off of this request\n # and make that its own separate request. For each, append to\n # dataframe list if successful, skip and print error message if failed.\n tmp_end = d - timedelta(days=1)\n smaller_data_1 = small_request(d1, tmp_end, team=team)\n smaller_data_2 = small_request(d, d, team=team)\n if smaller_data_1.shape[0] > 1:\n dataframe_list.append(smaller_data_1)\n print(f\"Completed sub-query from {d1} to {tmp_end}\")\n else:\n print(f\"Query unsuccessful for data from {d} to {tmp_end}. Skipping these dates.\")\n if smaller_data_2.shape[0] > 1:\n dataframe_list.append(smaller_data_2)\n print(f\"Completed sub-query from {d} to {d}\")\n else:\n print(f\"Query unsuccessful for data from {d} to {d}. Skipping these dates.\")\n\n # Flag for passing over the success message since this request failed\n no_success_msg_flag = True\n \n # Reset counter\n error_counter = 0\n break\n\n\n if verbose:\n if no_success_msg_flag is False:\n print(f\"Completed sub-query from {d1} to {d}\")\n else:\n no_success_msg_flag = False # if failed, reset this flag so message will send again next iteration\n \n # Increment dates\n d1 = d + timedelta(days=1)\n d = d + timedelta(days=step+1)\n\n # If start date > end date after being incremented,\n # the loop captured each date's data\n if d1 > d2:\n pass\n else:\n # If start date <= end date, then there are a few leftover dates to grab data for.\n # start_dt from the earlier loop will work,\n # but instead of d we now want the original end_dt\n data = small_request(d1, end_dt_date, team=team)\n dataframe_list.append(data)\n if verbose:\n print(f\"Completed sub-query from {d1} to {end_dt_date}\")\n\n # Concatenate all dataframes into final result set\n final_data = pd.concat(dataframe_list, axis=0)\n return final_data", "title": "" }, { "docid": "ec10ef11c30cee885b6a3ff44d5af160", "score": "0.5159372", "text": "def stock_df():\r\n\r\n while 1:\r\n start_date = input('Enter start date in \"YYYY-MM-DD\" format (or \"default\" for last 5 years): ').lower()\r\n end_date = input('Enter date in \"YYYY-MM-DD\" format (or \"default\" for present day): ').lower()\r\n\r\n # ensure proper 'YYYY-MM-DD' format\r\n pattern = \"(([0-9]){4}-([0-9]){2}-([0-9]){2})|(default)\"\r\n if re.match(pattern, start_date) and re.match(pattern, end_date):\r\n break\r\n else:\r\n print(f'If not seeking the \"default\" dates, ensure both \"{start_date}\" and \"{end_date}\" match the \"YYYY-MM-DD\" format')\r\n continue\r\n\r\n # timeframe\r\n if start_date == 'default':\r\n start_date = str(dt.datetime.today() - dt.timedelta(weeks=260))[:10]\r\n if end_date == 'default':\r\n end_date = dt.datetime.today().strftime('%Y-%m-%d')\r\n data = yf.download(stocks, start=start_date, end=end_date)['Adj Close']\r\n print(data)\r\n return data", "title": "" }, { "docid": "ba14382df1973f923e560246f18390ad", "score": "0.51589185", "text": "def results_by_date(full_results):\n data = {}\n\n date_to_1810 = {'male': {}, 'female': {}}\n date_1810_to_1819 = {'male': {}, 'female': {}}\n date_1820_to_1829 = {'male': {}, 'female': {}}\n date_1830_to_1839 = {'male': {}, 'female': {}}\n date_1840_to_1849 = {'male': {}, 'female': {}}\n date_1850_to_1859 = {'male': {}, 'female': {}}\n date_1860_to_1869 = {'male': {}, 'female': {}}\n date_1870_to_1879 = {'male': {}, 'female': {}}\n date_1880_to_1889 = {'male': {}, 'female': {}}\n date_1890_to_1899 = {'male': {}, 'female': {}}\n date_1900_on = {'male': {}, 'female': {}}\n\n for k in list(full_results.keys()):\n print(\"date analysis:\", k.title, k.author)\n if k.date < 1810:\n date_to_1810['male'] = merge(full_results[k]['male'], date_to_1810['male'])\n date_to_1810['female'] = merge(full_results[k]['female'], date_to_1810['female'])\n elif k.date < 1820:\n date_1810_to_1819['male'] = merge(full_results[k]['male'], date_1810_to_1819['male'])\n date_1810_to_1819['female'] = merge(full_results[k]['female'], date_1810_to_1819['female'])\n elif k.date < 1830:\n date_1820_to_1829['male'] = merge(full_results[k]['male'], date_1820_to_1829['male'])\n date_1820_to_1829['female'] = merge(full_results[k]['female'], date_1820_to_1829['female'])\n elif k.date < 1840:\n date_1830_to_1839['male'] = merge(full_results[k]['male'], date_1830_to_1839['male'])\n date_1830_to_1839['female'] = merge(full_results[k]['female'], date_1830_to_1839['female'])\n elif k.date < 1850:\n date_1840_to_1849['male'] = merge(full_results[k]['male'], date_1840_to_1849['male'])\n date_1840_to_1849['female'] = merge(full_results[k]['female'], date_1840_to_1849['female'])\n elif k.date < 1860:\n date_1850_to_1859['male'] = merge(full_results[k]['male'], date_1850_to_1859['male'])\n date_1850_to_1859['female'] = merge(full_results[k]['female'], date_1850_to_1859['female'])\n elif k.date < 1870:\n date_1860_to_1869['male'] = merge(full_results[k]['male'], date_1860_to_1869['male'])\n date_1860_to_1869['female'] = merge(full_results[k]['female'], date_1860_to_1869['female'])\n elif k.date < 1880:\n date_1870_to_1879['male'] = merge(full_results[k]['male'], date_1870_to_1879['male'])\n date_1870_to_1879['female'] = merge(full_results[k]['female'], date_1870_to_1879['female'])\n elif k.date < 1890:\n date_1880_to_1889['male'] = merge(full_results[k]['male'], date_1880_to_1889['male'])\n date_1880_to_1889['female'] = merge(full_results[k]['female'], date_1880_to_1889['female'])\n elif k.date < 1900:\n date_1890_to_1899['male'] = merge(full_results[k]['male'], date_1890_to_1899['male'])\n date_1890_to_1899['female'] = merge(full_results[k]['female'], date_1890_to_1899['female'])\n else:\n date_1900_on['male'] = merge(full_results[k]['male'], date_1900_on['male'])\n date_1900_on['female'] = merge(full_results[k]['female'], date_1900_on['female'])\n\n data['date_to_1810'] = date_to_1810\n data['date_1810_to_1819'] = date_1810_to_1819\n data['date_1820_to_1829'] = date_1820_to_1829\n data['date_1830_to_1839'] = date_1830_to_1839\n data['date_1840_to_1849'] = date_1840_to_1849\n data['date_1850_to_1859'] = date_1850_to_1859\n data['date_1860_to_1869'] = date_1860_to_1869\n data['date_1870_to_1879'] = date_1870_to_1879\n data['date_1880_to_1889'] = date_1880_to_1889\n data['date_1890_to_1899'] = date_1890_to_1899\n data['date_1900_on'] = date_1900_on\n\n return data", "title": "" }, { "docid": "6eb473d818b6333f9c0e246e4b80a914", "score": "0.5157703", "text": "def create_season_table(season):\n g = season.groupby('i_home') \n home = pd.DataFrame({'home_goals': g.home_goals.sum(),\n 'home_goals_against': g.away_goals.sum(),\n 'home_wins': g.home_win.sum(),\n 'home_draws': g.home_draw.sum(),\n 'home_losses': g.home_loss.sum()\n })\n g = season.groupby('i_away') \n away = pd.DataFrame({'away_goals': g.away_goals.sum(),\n 'away_goals_against': g.home_goals.sum(),\n 'away_wins': g.away_win.sum(),\n 'away_draws': g.away_draw.sum(),\n 'away_losses': g.away_loss.sum()\n })\n df = home.join(away)\n df['wins'] = df.home_wins + df.away_wins\n df['draws'] = df.home_draws + df.away_draws\n df['losses'] = df.home_losses + df.away_losses\n df['points'] = df.wins * 3 + df.draws\n df['gf'] = df.home_goals + df.away_goals\n df['ga'] = df.home_goals_against + df.away_goals_against\n df['gd'] = df.gf - df.ga\n df = pd.merge(teams, df, left_on='i', right_index=True)\n df = df.sort_index(by='points', ascending=False)\n df = df.reset_index()\n df['position'] = df.index + 1\n df['champion'] = (df.position == 1).astype(int)\n df['qualified_for_CL'] = (df.position < 5).astype(int)\n df['relegated'] = (df.position > 17).astype(int)\n return df", "title": "" }, { "docid": "be5cdb146062184050a70393bf47413e", "score": "0.51366746", "text": "def start_date(): \n #query for the min temp, the avg temp, and the max temp for a given start date\n start = \"2017-07-01\"\n \n start_date = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).group_by(Measurement.date).all()\n\n #return the start_date data\n return jsonify(start_date)", "title": "" }, { "docid": "d78f211b1ca8740a42d57a1bc55b9cd8", "score": "0.5127818", "text": "def all_states(date_start, date_end):\n\n # Query all passengers\n weather_sel = weatherData[weatherData['date_field_str'] >= date_start]\n weather_sel = weather_sel[weather_sel['date_field_str'] <= date_end]\n\n # close the session to end the communication with the database\n # Convert list of tuples into normal list\n# all_names = list(np.ravel(results))\n all_data = []\n # for weather, index in weather_sel:\n for index, weather in weather_sel.iterrows():\n weather_dict = {}\n weather_dict[\"date\"] = weather['date_field']\n weather_dict[\"tempMax\"] = weather['tempMax']\n weather_dict[\"tempMin\"] = weather['tempMin']\n weather_dict[\"tempAvg\"] = weather['tempAvg']\n weather_dict[\"precipitation\"] = weather['precipitation']\n all_data.append(weather_dict)\n\n return jsonify(all_data)", "title": "" }, { "docid": "038ca17fa09942a608222fd413be918e", "score": "0.5117226", "text": "def start_end(start=None, end=None):\n start_date = dt.datetime.strptime(start, '%Y-%m-%d')\n end_date = dt.datetime.strptime(end, \"%Y-%m-%d\")\n \n session = Session(engine)\n \n #query for the whole trip duration and alculations for min, max, avg for the whole duration. \n dates = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).group_by(Measurement.date).all()\n \n session.close()\n # Create a list to hold results\n dates_list = []\n for date in dates:\n l = {}\n l[\"Date\"] = date[0]\n l[\"MIN Temp\"] = date[1]\n l[\"Avg Temp\"] = date[2]\n l[\"Max Temp\"] = date[3]\n dates_list.append(l)\n\n #Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.\n \n return jsonify(dates_list)", "title": "" }, { "docid": "6fe52c02c83a9cb3a6763691c1e6c0cc", "score": "0.51098365", "text": "def startdate(start):\n session = Session(engine)\n \n # This is some data cleaning for better matching\n\n results = session.query(Measurements.date, func.max(Measurements.tobs),\\\n func.min(Measurements.tobs),func.avg(Measurements.tobs)).\\\n filter(Measurements.date >= start).group_by(Measurements.date).order_by(Measurements.date).all()\n \n tempList = []\n for date, tmax, tmin, tavg in results:\n tempDict = {}\n tempDict[\"date\"] = date\n tempDict[\"tmax\"] = tmax\n tempDict[\"tmin\"] = tmin\n tempDict[\"tavg\"] = tavg\n tempList.append(tempDict)\n\n\n return jsonify(tempList)", "title": "" }, { "docid": "45259d1f9aa9d4a1f2d45d9c8552a928", "score": "0.5102573", "text": "def getData_DateRange(self, roadName, startDate, endDate, startTime, endTime):\n sum_dict = {}\n dateList = getDateRange(startDate, endDate)\n for date in dateList:\n if (self.initData(roadName, date)):\n sum_dict[date] = self.getData_TimeRange(roadName, date, startTime, endTime)\n return sum_dict", "title": "" }, { "docid": "f2fafc5790deeee8d85e01fceadfe7b6", "score": "0.5101695", "text": "def get_perf_data(self, entity_scope, entity_code, from_date, to_date, asat, **kwargs):\n\n def check(r):\n \"\"\"\n Checks a row from a Pandas DataFrame which is represented as a Pandas Series to\n see if it is relevant to the bi-temporal window requested\n\n :param pd.Series r: The row of the DataFrame to check\n\n :return: bool: True if relevant to the requested bi-temporal window, False if not\n \"\"\"\n return from_date <= r['date'] <= to_date and r['asat'] <= asat\n\n # Apply a check to each row in the DataFrame\n df = self.data[self.data.apply(check,axis=1)]\n\n # Remove the asAt column and drop records for the same day in the effectiveAt column keeping the last record\n df = df.drop('asat',axis=1).drop_duplicates('date',keep='last').sort_values('date')\n\n # Takes all columns starting with \"mv\" or \"net\" and turns it into just two columns called\n # \"mv\" and \"net\" along with a \"key\" column which contains the suffix of all the columns which start\n # with \"mv\" or \"net\" and is identified by splitting the column name on a \".\".\n # The date is also copied down for each row\n df = pd.wide_to_long(df, ['mv', 'net'], i='date', j='key', sep='.', suffix=r'\\w+').reset_index()\n return df", "title": "" }, { "docid": "67ba75c3e0f680d556c3f9354dce30de", "score": "0.50991035", "text": "def sample(self, query: str, start_date: dt.datetime, end_date: dt.datetime, limit: int = 20, **kwargs) -> List[Dict]:\n results = self._cached_query(query, start_date, end_date, limit=limit, sort='desc')\n data = []\n if 'hits' in results:\n for obj in results['hits']['hits']:\n tweet = obj['_source']\n data.append(self._tweet_to_row(tweet))\n return data", "title": "" }, { "docid": "ae98be742a310070ba687c549bf16d52", "score": "0.5098772", "text": "def get_data_dict(uid, start_date, end_date, table=\"day\"):\n db = get_db()\n\n if table == \"day\":\n real_table = \"minute\"\n else:\n real_table = \"hour\"\n rows = db.get_records(real_table, start_date=start_date, end_date=end_date, uid=uid)\n if len(rows) == 0:\n return None\n\n plot = {}\n col_names = rows[0]._asdict().keys()\n\n for name in col_names:\n plot[name] = []\n\n # Rain doesn't have the same timestamp as the rest of the data\n plot[\"rain_time\"] = []\n\n # Create lists with each data type and make timestamp pretty\n for row in rows:\n for name in col_names:\n if name == \"timestamp\":\n plot[name].append(\n datetime.fromtimestamp(getattr(row, name)).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n )\n )\n elif name == \"uid\" or name == \"id\" or name == \"rain\":\n continue\n else:\n if getattr(row, name) is None:\n plot[name].append(0)\n else:\n plot[name].append(round(getattr(row, name), 3))\n\n # Bin data into the appropriate size for histograms\n idx = get_start_bin(int(end_date - 1), table)\n bins = range(int(start_date), int(end_date), rain_mod[table])\n\n # Loop through each rain bin\n for rain_bin in bins:\n plot[\"rain_time\"].append(get_rain_label(idx, table))\n rain = 0\n # Loop through each rain sample\n for row in rows:\n if row.rain == 0:\n continue\n # Check if the sample falls into our bin\n if row.timestamp >= rain_bin and row.timestamp < (rain_bin + rain_mod[table]):\n rain += row.rain\n\n plot[\"rain\"].append(rain)\n\n # Wrap around depending on the number of bins (since we don't always start at 0)\n idx = (idx + 1) % len(bins)\n\n return plot", "title": "" }, { "docid": "98e911c3ee2c5ebe1c18e834c93eb728", "score": "0.5095537", "text": "def tempDate(start=None, end=None):\n \n # do the [sel] like in the Notebook version of this work...\n sel = []\n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n \n #query if the \"end\" part is not there\n if not end:\n #calc temp data after start\n results = session.query(*sel).filter(Measurement.date >= start).all()\n #unravel to get into list\n tempdata = list(np.ravel(results))\n return jsonify(tempdata)\n \n #calc temp data if start and end exists\n results = session.query(*sel).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n #unravel to get into list\n tempdata = list(np.ravel(results))\n return jsonify(tempdata)", "title": "" }, { "docid": "4f16b82ad332cbfc194bad08e7701ec7", "score": "0.5093952", "text": "def filter_df_all(engine, store, date, start_hour, start_min, end_hour, end_min):\n # Filter dates\n start_date = datetime.strptime(date + ' ' + start_hour.zfill(2) + ':' + start_min.zfill(2) + ':00', '%d/%m/%Y %H:%M:%S')\n end_date = datetime.strptime(date + ' ' + end_hour.zfill(2) + ':' + end_min.zfill(2) + ':00', '%d/%m/%Y %H:%M:%S')\n\n connection = engine.connect()\n\n query = '''\n SELECT * from tracker \n WHERE \"Store_name\"= :group \n AND \"current_datetime\" BETWEEN :A AND :B \n AND \"X_center_perspective\" <> 0\n AND \"Y_center_perspective\" <> 0\n AND \"Object\"= :O\n '''\n\n tracker_conn = connection.execute(text(query), group='san diego', A=start_date, B=end_date, O='person').fetchall()\n columns= ['Store_name','Start_date','End_date','current_datetime','Camera','Object','Id','X_center_original','Y_center_original',\n 'X_center_perspective','Y_center_perspective','X_min','Y_min','X_max','Y_max','Frame']\n tracker = pd.DataFrame(tracker_conn, columns=columns)\n\n return tracker", "title": "" }, { "docid": "f9f85013d76d5506643c7be2850aa0e1", "score": "0.50821364", "text": "def update_match_stats(date='today'):\n # set date_string dynamically\n if date == 'today':\n date_string = datetime.datetime.today().strftime('%Y-%m-%d')\n else:\n date_string = date\n\n print('Starting request for match stats...')\n\n schedule_url_front = 'https://api.sportradar.us/soccer-xt3/eu/en/schedules/'\n schedule_url_back = '/schedule.json?api_key=qq5z5t88838bu8kcwe4qvbjn'\n league_ids = ['ENG','ESP','ITA','FRA','DEU']\n\n match_url_front = 'https://api.sportradar.us/soccer-xt3/eu/en/matches/'\n match_url_back = '/timeline.json?api_key=qq5z5t88838bu8kcwe4qvbjn'\n\n # get match ids\n schedule_url = schedule_url_front + date_string + schedule_url_back\n schedule_response = requests.get(schedule_url)\n schedule_data = schedule_response.json()\n events = schedule_data['sport_events']\n match_ids = []\n for event in events:\n country_code = event['tournament']['category']['country_code']\n if country_code in league_ids:\n match_ids.append(event['id'])\n\n # get match stats for each player with match ids\n for id in match_ids:\n time.sleep(1)\n match_url = match_url_front + str(id) + match_url_back\n match_response = requests.get(match_url)\n match_data = match_response.json()\n if 'statistics' in match_data:\n teams = match_data['statistics']['teams']\n print('Stats for ' + match_data['sport_event']['competitors'][0]['name'] + \" vs \" + match_data['sport_event']['competitors'][1]['name'] + ':')\n for team in teams:\n players = team['players']\n for player in players:\n # get id as int\n id_string = player['id']\n id_match = re.search(r'(\\d+)$', id_string)\n id = int(id_string[id_match.start():id_match.end()])\n\n # find player record\n p = Player.objects.get(pk=id)\n print(p.name)\n\n # if there's already a stat from this game, delete it\n MatchStat.objects.filter(match_date=date_string, player_id=id).delete()\n\n # if they're a goalie\n shots_saved, shots_faced, penalties_saved, penalties_faced = 0, 0, 0, 0\n if 'shots_faced_saved' in player:\n shots_saved = player['shots_faced_saved']\n shots_faced = player['shots_faced_total']\n penalties_saved = player['penalties_saved']\n penalties_faced = player['penalties_faced']\n\n # check if they have full stats\n if 'interceptions' in player:\n stat = MatchStat(player=p, substituted_in=player['substituted_in'], substituted_out=player['substituted_out'],\n goals=player['goals_scored'], assists=player['assists'], yellow_cards=player['yellow_cards'],\n yellow_red_cards=player['yellow_red_cards'], red_cards=player['red_cards'],\n interceptions=player['interceptions'], chances_created=player['chances_created'],\n successful_crosses=player['crosses_successful'], crosses=player['crosses_total'],\n successful_tackles=player['duels_tackle_successful'], tackles=player['duels_tackle_total'],\n goals_conceded=player['goals_conceded'], shots_saved=shots_saved, shots_faced=shots_faced,\n penalties_faced=penalties_faced, penalties_saved=penalties_saved,\n fouls_committed=player['fouls_committed'], shots_on_goal=player['shots_on_goal'],\n shots_off_goal=player['shots_off_goal'], shots_blocked=player['shots_blocked'],\n minutes_played=player['minutes_played'], penalty_goals=player['goals_by_penalty'],\n match_date=date_string)\n stat.save()\n else:\n stat = MatchStat(player=p, substituted_in=player['substituted_in'], substituted_out=player['substituted_out'],\n goals=player['goals_scored'], assists=player['assists'], yellow_cards=player['yellow_cards'],\n yellow_red_cards=player['yellow_red_cards'], red_cards=player['red_cards'],\n match_date=date_string)\n stat.save()\n print('----------------------------------------------')\n\n print('Today\\'s match stats successfully added to DB!')", "title": "" }, { "docid": "b2dda30011d444d23cc65f43b33d8cdb", "score": "0.50738955", "text": "def range_of_dates(self, start_date, end_date):\n\n start_date = datetime.strptime(start_date, \"%m/%d/%Y\")\n end_date = datetime.strptime(end_date, \"%m/%d/%Y\")\n\n entries_found = []\n\n for data in self.entries:\n data_date = datetime.strptime(data['Date'], \"%m/%d/%Y\")\n\n if start_date <= data_date <= end_date:\n entries_found.append(Entry.from_dict(data))\n\n return entries_found", "title": "" }, { "docid": "6b9ebb0ad24f54f64a93bf33f1035414", "score": "0.5069932", "text": "def fill_testing_dates(self):\r\n self.testing_dates[1] = {'cv_start': '1972-01-01', \r\n 'cv_end': '1975-12-01', \r\n 'pred_start': '1976-01-01',\r\n 'pred_end': '1981-07-01'}\r\n self.testing_dates[2] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1981-07-01', \r\n 'pred_start': '1981-08-01',\r\n 'pred_end': '1983-07-01'}\r\n self.testing_dates[3] = {'cv_start': '1976-01-01', \r\n 'cv_end': '1983-07-01', \r\n 'pred_start': '1983-08-01',\r\n 'pred_end': '1992-12-01'}\r\n self.testing_dates[4] = {'cv_start': '1983-08-01', \r\n 'cv_end': '1992-12-01', \r\n 'pred_start': '1993-01-01',\r\n 'pred_end': '2003-07-01'}\r\n self.testing_dates[5] = {'cv_start': '1993-01-01', \r\n 'cv_end': '2003-07-01', \r\n 'pred_start': '2003-08-01',\r\n 'pred_end': '2010-09-01'}\r\n self.testing_dates[6] = {'cv_start': '2003-08-01', \r\n 'cv_end': '2010-09-01', \r\n 'pred_start': '2010-10-01',\r\n 'pred_end': '2021-07-01'}", "title": "" }, { "docid": "7227e96aebec15291feab6983e16c2e7", "score": "0.5069106", "text": "def do_report(from_date:date, to_date:date, events:list):\n deltas = {d:Counter() for d in daterange(from_date, to_date + timedelta(1)) }\n prior = Counter()\n all_flags = set()\n for d, tick, flag in events:\n all_flags.add(flag)\n if d < from_date:\n prior[flag] += tick\n elif d <= to_date:\n deltas[d][flag] += tick\n\n # Convert deltas to absolute values and ensure all keys are present.\n report = []\n empty = dict.fromkeys(all_flags, 0)\n for d in daterange(from_date, to_date + timedelta(1)):\n prior = prior + deltas[d]\n current = dict(empty)\n current.update(prior)\n report.append(current)\n return report", "title": "" }, { "docid": "52b128ac8071bb21db12c619a6170ee9", "score": "0.50578356", "text": "def tally_stats(self, date_1=None, date_2=None):\n\n log.debug(f'({RUNTIME_ID}) tally_stats({date_1}, {date_2})')\n\n apts = [] # Dictionary of appointment stats for each _Appointment subclass.\n\n # Executes when both date arguments are provided.\n if date_1 and date_2:\n\n # If dates are type integer, convert to type string.\n if type(date_1) is int:\n date_1 = str(date_1)\n if type(date_2) is int:\n date_2 = str(date_2)\n\n # Convert dates to datetime objects.\n if type(date_1) is str:\n date_1 = datetime.strptime(date_1, DATE_FORMAT).date()\n if type(date_2) is str:\n date_2 = datetime.strptime(date_2, DATE_FORMAT).date()\n\n for patient in self.patients:\n for appointment in patient.appointments:\n if date_1 < appointment.date < date_2: # Isolate dates within argument date ranges.\n apts.append(appointment.to_stats_dict())\n\n # Executes when date arguments are not provided.\n else:\n for patient in self.patients:\n for appointment in patient.appointments:\n apts.append(appointment.to_stats_dict())\n\n periodic = {'PeriodicExam': 0}\n limited = {'LimitedExam': 0}\n comprehensive = {'ComprehensiveExam': 0}\n surgery = {'Surgery': 0}\n\n for apt in apts:\n\n if apt['_type'] == 'PeriodicExam':\n periodic['PeriodicExam'] += 1\n for k, v in apt.items():\n if v is not None: # Eliminate procedure keys that did not happen during an appointment.\n if k in periodic:\n periodic[k] += 1\n elif k != '_type':\n periodic[k] = 1\n\n elif apt['_type'] == 'LimitedExam':\n limited['LimitedExam'] += 1\n for k, v in apt.items():\n if v is not None: # Eliminate procedure keys that did not happen during an appointment.\n if k in limited:\n limited[k] += 1\n elif k != '_type':\n limited[k] = 1\n\n elif apt['_type'] == 'ComprehensiveExam':\n comprehensive['ComprehensiveExam'] += 1\n for k, v in apt.items():\n if v is not None: # Eliminate procedure keys that did not happen during an appointment.\n if k in limited:\n comprehensive[k] += 1\n elif k != '_type':\n comprehensive[k] = 1\n\n elif apt['_type'] == 'Surgery':\n surgery['Surgery'] += 1\n for k, v in apt.items():\n if v is not None: # Eliminate procedure keys that did not happen during an appointment.\n if k in limited:\n surgery[k] += 1\n elif k != '_type':\n surgery[k] = 1\n\n stats = [periodic, limited, comprehensive, surgery]\n\n log.debug(f'({RUNTIME_ID}) tally_stats() Return: {stats}')\n\n return stats", "title": "" }, { "docid": "9d8de4671524231da8261d5bc9c37b12", "score": "0.5055484", "text": "def get_data_from_id(start_date, end_date, stock_id):\n from_date = start_date\n to_date = end_date\n data_table = []\n date_set = set() # Prevend duplicates\n\n while True:\n data_form = {\n \"searchMarketStatisticsView.symbol\": stock_id,\n \"strFromDate\": from_date,\n \"strToDate\" : to_date\n }\n\n r = requests.post(PRICE_URL, data=data_form)\n soup = bs4.BeautifulSoup(r.text, \"lxml\")\n soup = soup.find(\"div\", {\"id\": \"tab-1\"})\n soup = soup.find(\"ul\", {\"class\": \"list_tktt lichsugia\"})\n for li in soup:\n try:\n if not li.has_attr('class'):\n data = {}\n data[\"date\"] = DataLoader.get_date(li, \"row-time noline\", 0)\n if data[\"date\"] in date_set:\n continue\n date_set.add(data[\"date\"])\n # From API, only `change_value` has sign (\"+/-\")\n # So have to add sign manually\n data[\"change_value\"], sign = DataLoader.get_change_value(li, \"row2\", 0)\n data[\"change_percent\"]= sign * DataLoader.get_change_value(li, \"row2\", 1)[0]\n data[\"open_price\"] = DataLoader.get_trade_value(li, \"row1\", 0)\n data[\"high_price\"] = DataLoader.get_trade_value(li, \"row1\", 1)\n data[\"low_price\"] = DataLoader.get_trade_value(li, \"row1\", 2)\n data[\"close_price\"] = DataLoader.get_trade_value(li, \"row1\", 3)\n data[\"adjust_price\"] = DataLoader.get_trade_value(li, \"row1\", 5)\n data[\"match_volume\"] = DataLoader.get_trade_value(li, \"row3\", 0)\n data[\"reconcile_volume\"] = DataLoader.get_trade_value(li, \"row3\", 1)\n if data[\"close_price\"] != data[\"adjust_price\"]:\n data['open_price'] = data[\"adjust_price\"] * data[\"open_price\"] / data[\"close_price\"]\n data['high_price'] = data[\"adjust_price\"] * data[\"high_price\"] / data[\"close_price\"]\n data['low_price'] = data[\"adjust_price\"] * data[\"low_price\"] / data[\"close_price\"]\n data['close_price'] = data[\"adjust_price\"]\n data_table.append(data)\n except Exception as e:\n # Ignore NavigableString object\n pass\n\n # if there is not data on `from_date`\n # then break when total number of data can be query < max query (30 record/page)\n if data[\"date\"] == from_date or len(soup) < MAX_DATE_CAN_QUERY:\n break\n else:\n # continue to query\n to_date = data[\"date\"]\n\n return data_table", "title": "" }, { "docid": "5e316e6e4850ed36cc3b9d4cc46f0164", "score": "0.50536954", "text": "def get_historic_rates(client, product, start_date, end_date, \n granularity=1, beautify=True):\n startDate = dt.datetime.strptime(start_date, \"%Y-%m-%d\")\n startDateTimestamp = startDate.timestamp()\n endDate = dt.datetime.strptime(end_date, \"%Y-%m-%d\")\n endDateTimestamp = endDate.timestamp()\n \n # List of time divisions for retrieving data.\n timeRange = range(int(startDateTimestamp), int(endDateTimestamp), \n 200 * granularity)\n timeRange = list(timeRange) + [endDateTimestamp]\n \n # New DataFrame.\n columns = ['time', 'low', 'high', 'open', 'close', 'volume']\n data = pd.DataFrame(columns=columns)\n \n # Populating dataframe.\n for i in tqdm(range(len(timeRange) - 1)):\n try:\n data = append_data(client, data, product, columns, \n timeRange[i], timeRange[i+1],\n granularity)\n except ValueError:\n sleep(3)\n data = append_data(client, data, product, columns, \n timeRange[i], timeRange[i+1], \n granularity)\n except SSLError:\n sleep(15)\n data = append_data(client, data, product, columns, \n timeRange[i], timeRange[i+1], \n granularity)\n except:\n print(\"An error ocurred. The complete set of data couldn't be downloaded.\")\n if beautify:\n return beautify_data(data.drop_duplicates())\n else:\n return data.drop_duplicates()\n \n if beautify:\n return beautify_data(data.drop_duplicates())\n else:\n return data.drop_duplicates()", "title": "" }, { "docid": "e102dd4c5499cbe24b270257ef1fe28e", "score": "0.5045764", "text": "def start(start=None):\n \n #Pull data from engine for dates after start date specified\n start_data = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start).group_by(Measurement.date).all()\n \n #Convert data into list for returning\n start_list=list(start_data)\n \n #Return JSONified data from list\n return jsonify(start_list)", "title": "" }, { "docid": "acc23c54a68eff60f7d7119017af02be", "score": "0.5043921", "text": "def query_trade_dates(self, start_date, end_date):\n filter_argument = self._dic2url({'start_date': start_date,\n 'end_date': end_date})\n \n # df_raw, err_msg = self.query(\"jz.secTradeCal\", fields=\"trade_date\",\n # filter=filter_argument, orderby=\"\")\n # self._raise_error_if_msg(err_msg)\n df_raw = self._trade_dates_df[self._trade_dates_df['trade_date'] >= str(start_date)]\n df_raw = df_raw[df_raw['trade_date'] <= str(end_date)]\n \n if df_raw.empty:\n return np.array([], dtype=int)\n \n trade_dates_arr = df_raw['trade_date'].values.astype(np.integer)\n return trade_dates_arr", "title": "" } ]
7cea56bbaf7cfb2dd4488789a8f9c00a
Upon calling create_game, the Connect Four game should initialize the board
[ { "docid": "8915df425fd4c67e4d6225ffb77148ae", "score": "0.0", "text": "def create_game(self, request: dict) -> dict:\n\n ConnectFourGame.sessions += 1\n game = {\"board\": [], \"player_to_play\": 1, \"session_id\": ConnectFourGame.sessions}\n\n self.games[ConnectFourGame.sessions] = Game(game[\"board\"], game[\"player_to_play\"], game[\"session_id\"])\n\n return {\"session_id\": self.games[ConnectFourGame.sessions].get_session_ID()}", "title": "" } ]
[ { "docid": "f3fa404fc0556a1d3aece2288fcf90e1", "score": "0.81248873", "text": "def initialize_game(self):\n self._board = self.create_a_new_board()\n self.create_board(self._board)", "title": "" }, { "docid": "8ddd7f1f51f24d01a0d2d043cc935d57", "score": "0.76318944", "text": "def start_game(board):\n board.set_board([])\n config = board.generate_legal_configuration()\n board.set_board(config)", "title": "" }, { "docid": "e977fd9b8833fe0a3b37e84e8aa4e53e", "score": "0.7324038", "text": "def _setup_board(self):\n self.__board[0][0] = Chariot(0, 0, player=\"red\")\n self.__board[0][1] = Horse(0, 1, player=\"red\")\n self.__board[0][2] = Elephant(0, 2, player=\"red\")\n self.__board[0][3] = Advisor(0, 3, player=\"red\")\n self.__board[0][4] = General(0, 4, player=\"red\")\n self.__board[0][5] = Advisor(0, 5, player=\"red\")\n self.__board[0][6] = Elephant(0, 6, player=\"red\")\n self.__board[0][7] = Horse(0, 7, player=\"red\")\n self.__board[0][8] = Chariot(0, 8, player=\"red\")\n self.__board[2][1] = Cannon(2, 1, player=\"red\")\n self.__board[2][7] = Cannon(2, 7, player=\"red\")\n self.__board[3][0] = Soldier(3, 0, player=\"red\")\n self.__board[3][2] = Soldier(3, 2, player=\"red\")\n self.__board[3][4] = Soldier(3, 4, player=\"red\")\n self.__board[3][6] = Soldier(3, 6, player=\"red\")\n self.__board[3][8] = Soldier(3, 8, player=\"red\")\n\n self.__board[9][0] = Chariot(9, 0, player=\"black\")\n self.__board[9][1] = Horse(9, 1, player=\"black\")\n self.__board[9][2] = Elephant(9, 2, player=\"black\")\n self.__board[9][3] = Advisor(9, 3, player=\"black\")\n self.__board[9][4] = General(9, 4, player=\"black\")\n self.__board[9][5] = Advisor(9, 5, player=\"black\")\n self.__board[9][6] = Elephant(9, 6, player=\"black\")\n self.__board[9][7] = Horse(9, 7, player=\"black\")\n self.__board[9][8] = Chariot(9, 8, player=\"black\")\n self.__board[7][1] = Cannon(7, 1, player=\"black\")\n self.__board[7][7] = Cannon(7, 7, player=\"black\")\n self.__board[6][0] = Soldier(6, 0, player=\"black\")\n self.__board[6][2] = Soldier(6, 2, player=\"black\")\n self.__board[6][4] = Soldier(6, 4, player=\"black\")\n self.__board[6][6] = Soldier(6, 6, player=\"black\")\n self.__board[6][8] = Soldier(6, 8, player=\"black\")", "title": "" }, { "docid": "fd3fb613f84d4d353a9f2403856c8f28", "score": "0.7163653", "text": "def setup_board(self):\n self._board[0][0] = Chariot(\"red\", \"R\")\n self._board[0][1] = Horse(\"red\", \"H\")\n self._board[0][2] = Elephant(\"red\", \"E\")\n self._board[0][3] = Advisor(\"red\", \"A\")\n self._board[0][4] = General(\"red\", \"G\")\n self._board[0][5] = Advisor(\"red\", \"A\")\n self._board[0][6] = Elephant(\"red\", \"E\")\n self._board[0][7] = Horse(\"red\", \"H\")\n self._board[0][8] = Chariot(\"red\", \"R\")\n self._board[2][1] = Cannon(\"red\", \"C\")\n self._board[2][7] = Cannon(\"red\", \"C\")\n self._board[3][0] = Soldier(\"red\", \"S\")\n self._board[3][2] = Soldier(\"red\", \"S\")\n self._board[3][4] = Soldier(\"red\", \"S\")\n self._board[3][6] = Soldier(\"red\", \"S\")\n self._board[3][8] = Soldier(\"red\", \"S\")\n self._board[9][0] = Chariot(\"black\", \"R\")\n self._board[9][1] = Horse(\"black\", \"H\")\n self._board[9][2] = Elephant(\"black\", \"E\")\n self._board[9][3] = Advisor(\"black\", \"A\")\n self._board[9][4] = General(\"black\", \"G\")\n self._board[9][5] = Advisor(\"black\", \"A\")\n self._board[9][6] = Elephant(\"black\", \"E\")\n self._board[9][7] = Horse(\"black\", \"H\")\n self._board[9][8] = Chariot(\"black\", \"R\")\n self._board[7][1] = Cannon(\"black\", \"C\")\n self._board[7][7] = Cannon(\"black\", \"C\")\n self._board[6][0] = Soldier(\"black\", \"S\")\n self._board[6][2] = Soldier(\"black\", \"S\")\n self._board[6][4] = Soldier(\"black\", \"S\")\n self._board[6][6] = Soldier(\"black\", \"S\")\n self._board[6][8] = Soldier(\"black\", \"S\")", "title": "" }, { "docid": "c9c6abbe60d69566e152556716fe68e6", "score": "0.71454376", "text": "def initialise_board(self):\n for piece in self.white_pieces + self.black_pieces:\n self.board[piece.row][piece.column] = piece", "title": "" }, { "docid": "b72e9f61d9ca81dbb2d0393eb114e16b", "score": "0.7132936", "text": "def __init__(self):\n\n self._board = [[\"\"] * 9 for i in range(10)]\n self._turn = \"red\"\n self._game_state = \"UNFINISHED\"\n self._red_pieces = []\n self._black_pieces = []\n self.setup_board()", "title": "" }, { "docid": "20a664ea0d21b1dccf9565a746abbe58", "score": "0.71131825", "text": "def __init__(self):\r\n self._board = Board()\r\n self._game_state = \"UNFINISHED\"\r\n self._red_in_check = False\r\n self._black_in_check = False\r\n self._whose_turn = \"red\" # Red player starts the game\r", "title": "" }, { "docid": "53103de766a0fd503491d48aae31d33c", "score": "0.70105135", "text": "def __init__(self):\n\t\tself.GameBoard = TicTacToeBoard()", "title": "" }, { "docid": "f0583ea04e91a55f6a4fec25605bcd48", "score": "0.6938829", "text": "def __init__(self, game):\r\n self.board = game.get_board()", "title": "" }, { "docid": "a8c85729e89415de675439551a658f48", "score": "0.69251037", "text": "def __init__(self):\n self.board = [[' ']*3 for _ in range(3)]\n self.turn, self.winner = \"O\", None", "title": "" }, { "docid": "1a98a649c85e3aa08427bd016edf0278", "score": "0.69055575", "text": "def __init__(self):\n\n self.board = self.load_board()", "title": "" }, { "docid": "5e1b78a76e35fa22d6c0ec0e12702f7e", "score": "0.68785167", "text": "def __init__(self):\n self._board = GameBoard()\n self._game_state = \"UNFINISHED\"\n self._current_player = \"red\"", "title": "" }, { "docid": "1173faf085a2a5a35a7a6fdc5212a8e8", "score": "0.68663615", "text": "def initialize_game(self):\n for row in range(0, self.height):\n for col in range(0, self.width):\n self.solved_puzzle.append(Cell(row, col, self.get_box(row, col), self.puzzle, self.get_value(row, col)))", "title": "" }, { "docid": "ba0ba76362c1c545171f3adae8e5fd85", "score": "0.6837861", "text": "def create_board():\n global nums, cells, players_cells, current_round\n nums = {\n '1': '1', '2': '2', '3': '3',\n '4': '4', '5': '5', '6': '6',\n '7': '7', '8': '8', '9': '9',\n }\n cells = {\n '1': '', '2': '', '3': '',\n '4': '', '5': '', '6': '',\n '7': '', '8': '', '9': '',\n }\n players_cells = {'X': set(), 'O': set()}\n current_round += 1", "title": "" }, { "docid": "a952ac7479fa9a53666feb99479d6b4b", "score": "0.6832721", "text": "def initialize(board):\n row = col = board.get_length()\n initrows = (row // 2) - 1\n for r in range(row - 1, row - (initrows + 1), -1):\n for c in range(0 if r % 2 == 1 else 1, col, 2):\n board.place(r, c, Piece('white'))\n for r in range(0, initrows):\n for c in range(0 if r % 2 == 1 else 1, col, 2):\n board.place(r, c, Piece())", "title": "" }, { "docid": "43370c12facf5dc34c1e202bbf32ce80", "score": "0.6787076", "text": "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]", "title": "" }, { "docid": "a397d0b1ed7129b02734f2b5091a7cf9", "score": "0.67763054", "text": "def __init__(player_name):\n\n \t\t# Generate the boards\n \t\tplayer_board = PlayerBoard(player_name='Joe')\n \t\tAi_board = AiBoard()", "title": "" }, { "docid": "70dfbf333e6bb52b35f2bd0a057e68a6", "score": "0.67732716", "text": "def create_board(self):\n self.board_challenge = []\n for i in range(self.height):\n self.board_challenge.append([(0,0,0)]*self.width)\n\n #Creating a list of subtuples of 5 elements\n self.initial_positions_board = list(self.map_commands_raw[x:x + 5] for x in range(0, len(self.map_commands_raw), 5))\n\n for i in range(self.height):\n for j in range(self.width):\n for k in range(0,len(self.initial_positions_board)): #Iterate through all initial positions\n if self.initial_positions_board[k][1]==i and self.initial_positions_board[k][0]==j:\n self.board_challenge[i][j]=self.initial_positions_board[k][2:] #Filling in start positions in the board", "title": "" }, { "docid": "829aaa62bebc38f55acb68f18c278aab", "score": "0.67696434", "text": "def test_board_init():\n\n test_board = [\"x\", \"x\", \"o\", \"x\", \"\", \"o\", \"\", \"\", \"o\"]\n instance = Board(test_board)\n assert len(instance.board) == 9", "title": "" }, { "docid": "bda981a926d92b5543ef5eb8839552c8", "score": "0.67512286", "text": "def init():\n global state #utilisable dans tout le code\n global board \n global state_to_board\n \n board = [\n O, O, X, O, O, X, O, O,\n O, O, X, O, O, X, O, O,\n X, X, X, X, X, X, X, X,\n O, O, X, O, O, X, O, O,\n O, O, X, O, O, X, O, O,\n X, X, X, X, X, X, X, X,\n O, O, X, O, O, X, O, O,\n O, O, X, O, O, X, O, O,\n ]\n\n state_to_board = [[(0, 1, 8, 9), (3, 4, 11, 12), (6, 7, 14, 15)],\n [(24, 25, 32, 33), (27, 28, 35, 36), (30, 31, 38, 39)],\n [(48, 49, 56, 57), (51, 52, 59, 60), (54, 55, 62, 63)]]\n\n state = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]", "title": "" }, { "docid": "cd43e94ad486efce175ef77df1fe292d", "score": "0.673914", "text": "def __init__(self):\n self._board = [[\"R\", \"H\", \"E\", \"A\", \"G\", \"A\", \"E\", \"H\", \"R\"],\n [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"],\n [\"-\", \"C\", \"-\", \"-\", \"-\", \"-\", \"-\", \"C\", \"-\"],\n [\"S\", \"-\", \"S\", \"-\", \"S\", \"-\", \"S\", \"-\", \"S\"],\n [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"],\n [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"],\n [\"S\", \"-\", \"S\", \"-\", \"S\", \"-\", \"S\", \"-\", \"S\"],\n [\"-\", \"C\", \"-\", \"-\", \"-\", \"-\", \"-\", \"C\", \"-\"],\n [\"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\", \"-\"],\n [\"R\", \"H\", \"E\", \"A\", \"G\", \"A\", \"E\", \"H\", \"R\"]]\n\n self._min_x_coord = 0 # tracking the limits of the game board\n self._max_x_coord = 8\n self._min_y_coord = 0\n self._max_y_coord = 9\n\n # set the board\n self._pieces = []\n self._black_general = None\n self._red_general = None\n self.set_board()", "title": "" }, { "docid": "8f38fc2f67e6cfc61827ff7d361f83fe", "score": "0.6708982", "text": "def __init__(self, white, white_depth, black, black_depth):\n self.players = {\"white\": (white, white_depth),\n \"black\": (black, black_depth)\n }\n self.turn = 0\n self.board = Board()", "title": "" }, { "docid": "48817e904279bcb0ae10d490b7fa9881", "score": "0.67052674", "text": "def initBoard(self, gameType=\"normal\"):\n if gameType == \"normal\":\n self.initNormal()\n else: \n raise NotImplementedError", "title": "" }, { "docid": "aac0c36f23383a5d5dcf8461788c2ba3", "score": "0.669879", "text": "def _init_game(self):\n pass", "title": "" }, { "docid": "b01ef579864fd561b7454827eaf19f54", "score": "0.6698143", "text": "def init_game(size=4):\n if size < 2:\n print(\"Invalid grid size!\")\n return None\n\n game_grid = create_grid(size)\n g2048_add_new_tile(game_grid)\n g2048_add_new_tile(game_grid)\n return game_grid", "title": "" }, { "docid": "ff6fe9786b500c30a8ce09c73d9a2a65", "score": "0.669371", "text": "def createboard(self):\n pygame.init()\n #display_game = pygame.display.set_mode((1920,1080))\n displaysize=[1920,1080]\n function(displaysize[0], displaysize[1])\n function1(displaysize[0], displaysize[1])\n\n return board(displaysize[0], displaysize[1], 1,0)", "title": "" }, { "docid": "225fed253f232042cc7db02e20f73cd4", "score": "0.66787905", "text": "def initialize_board():\r\n #width=int(input\"Enter playable board width, between 2 and 15: \")\r\n #height=int(input\"Enter playable board height, between 2 and 15: \")\r\n #w, h = width+2, height+2\r\n w, h = 5, 5\r\n board = [[0 for x in range(w)] for y in range(h)]\r\n for x in range(w):\r\n board[x][0] = 1\r\n board[x][h-1] = 1\r\n board[0][x] = 1\r\n board[w-1][x] = 1\r\n spots=(w-1)*(h-1)\r\n t.setworldcoordinates(-30, -30, (w*100)+30, (h*100)+30)\r\n return board, spots", "title": "" }, { "docid": "ee0f7c0814f363803e9d022d0cac6aa0", "score": "0.66711986", "text": "def init_game():\n board = create_board_state()\n print_state(board)\n turnCount = random.randint(0, 1) # randomly picks who goes first\n gameOver = False\n winner = None\n while (not board_full(board)):\n\n if turnCount % 2 == 0:\n\n col = user_input(board)\n\n gameOver = execute_move(col, board, PLAYER_PIECE)\n\n if gameOver:\n winner = PLAYER_PIECE\n break\n\n print_state(board)\n turnCount += 1\n else:\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n print(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\")\n\n # Increase depth to raise AI strength (or decrease it to lower strength)\n # 1 is quite easy to beat, as it cannot predict future user moves\n # (it only looks 1 move (its own) into the future)\n # 2 is tough, but beatable\n # 3 and above are extremely difficult to win against\n minimaxValue = minimax(board, 5, AI_PIECE)\n\n print(\"AI drops piece into column:\", minimaxValue[1] + 1)\n# print(\"MiniMax value: \", minimaxValue[0]) # useful for diagnosing AI\n\n gameOver = execute_move(minimaxValue[1], board, AI_PIECE)\n\n if gameOver:\n winner = AI_PIECE\n break\n\n print_state(board)\n turnCount += 1\n\n end_game(board, winner)\n\n return", "title": "" }, { "docid": "033ba867ecdc5ae138aeacdaf8ea1400", "score": "0.6659415", "text": "def init_board(self, game_id: str, player_ids: List[str], starting_city: str = None) -> str:\n self.game_id = game_id\n return super().init_board(game_id, player_ids, starting_city)", "title": "" }, { "docid": "8949e9ddc75ccf9a9f1430d9abf2d4bd", "score": "0.66421914", "text": "def __init__(self):\n self._board = Board()\n self._move = self._board.get_move_instance()\n self._turn = 'blue'\n self._game_state = 'UNFINISHED'", "title": "" }, { "docid": "ed1c13fd3515c22cae5738456ce54207", "score": "0.66195863", "text": "def initGame(self, dimension):\n self.field = [[self.fieldState['Empty'] for i in range(dimension)] for j in range(dimension)]\n self.dimension = dimension\n self.initApple(dimension)\n self.initSnake()", "title": "" }, { "docid": "2e2d7dab4809f2921e9ac8119bb17998", "score": "0.6599154", "text": "def start_new_game(self ):\n game = SudokuBoard()\n game.start()", "title": "" }, { "docid": "7d0fc6eeb4cfd5e178fe05388081a9a7", "score": "0.65974927", "text": "def __init__(self):\n self.board_dict = dict()\n for i in range(self.BOARD_WIDTH):\n for j in range(self.BOARD_WIDTH):\n self.board_dict[i, j] = 0, None\n\n self.players_locations = dict()", "title": "" }, { "docid": "75331503b39d6f138794e248759fa0a7", "score": "0.6583816", "text": "def setup_game(self):\n self._ai_battle_ships()", "title": "" }, { "docid": "f694a8f5fd3422ada9005ab16d79a74e", "score": "0.6582518", "text": "def setup_game(self, player, opponent):\n\n self.displayer.clear_screen()\n\n ship_index = 0\n\n while not player.ready(len(self.SHIP_INFO)):\n # code to print the current board (starts empty)\n\n board = self.displayer.construct_player_board(\n player, opponent, True)\n self.displayer.print_board(board)\n\n ship_name, ship_length = self.SHIP_INFO[ship_index]\n ship_to_add = Ship(ship_name, ship_length)\n\n try:\n player.add_ship(ship_to_add)\n except Exception as e:\n ship_to_add = player.ships[ship_index]\n\n origin, orientation = self.displayer.prompt_for_ship_placement(\n ship_to_add)\n\n try:\n player.place_ship(ship_to_add, origin,\n orientation, self.BOARD_SIZE)\n except ValueError as ve:\n self.displayer.clear_screen()\n print(ve)\n print()\n continue\n\n self.displayer.clear_screen()\n ship_index += 1\n\n self.displayer.prompt_for_switch(opponent.name)", "title": "" }, { "docid": "1dc8f72b069a73de83a4fa2c047051d7", "score": "0.65804857", "text": "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "title": "" }, { "docid": "92e0172043bb04cc23d72f1f89376db3", "score": "0.6572888", "text": "def init_game(self):\r\n self.init_mode()\r\n self.init_deck()\r\n if self.mode == 'PvP':\r\n self.init_with_draw()\r\n self.init_players()\r\n self.init_board()\r\n self.first_player_to_play()\r\n self.start_game()", "title": "" }, { "docid": "77868be84db360bd8978a37eaf4c7c3b", "score": "0.65490204", "text": "def __init__(self, altStartingBoard = []): \r\n \r\n self.running = True\r\n self.boardState = [] \r\n self.colorsAI = [' b', ' B']\r\n self.colorsPlyr = [' r', ' R'] \r\n self.turnCount = 1\r\n if len(altStartingBoard) == 0: \r\n self.initBoardState()\r\n else: self.forceBoardState(altStartingBoard)", "title": "" }, { "docid": "63fc023793880ef9ccedcddb407bda4b", "score": "0.65296817", "text": "def __init__(self):\n \n self.possible_guesses = list(string.ascii_lowercase)\n self.wrong_guesses = []\n self.game_on = True\n self.board = Gameboard()", "title": "" }, { "docid": "3eca7fcb7d2051336761722bbc9169b9", "score": "0.65295583", "text": "def load_board(cls, board):\n line_x = 0#first line\n for cases in board:\n cell_y = 0#first column\n for case in cases:\n if case == 0:\n cls.WINDOW.blit(cls.WALL, (cell_y, line_x))\n cell_y += 40#40 because cell is 40 px => so go to next cell\n\n elif case == 1:#if corridor\n cls.WINDOW.blit(cls.FLOOR, (cell_y, line_x))\n cell_y += 40\n\n elif case == 2:#if ether\n cls.WINDOW.blit(cls.ETHER, (cell_y, line_x))\n cell_y += 40\n\n elif case == 3:#if needle\n cls.WINDOW.blit(cls.NEEDLE, (cell_y, line_x))\n cell_y += 40\n\n elif case == 4:#if tube\n cls.WINDOW.blit(cls.TUBE, (cell_y, line_x))\n cell_y += 40\n\n elif case == 9:#wayout\n cls.WINDOW.blit(cls.WAYOUT, (cell_y, line_x))\n cell_y += 40\n\n elif case == 5:#mcgyver\n cls.WINDOW.blit(cls.MCGYVER, (cell_y, line_x))\n pygame.display.flip()\n cell_y += 40\n\n else:\n cell_y += 40\n line_x += 40#line_x is the line, so go to next line\n return(board)", "title": "" }, { "docid": "9bfd9aaa4e8e3f09ae23ef41e9f0675b", "score": "0.6516727", "text": "def get_init():\n board = 0\n for x in range(BOARD_SIZE):\n for y in range(BOARD_SIZE):\n is_white = (x + y) % 2 != 0\n is_top = y < 3\n is_bottom = y >= BOARD_SIZE - 3\n index = coord_to_index(x, y)\n\n if is_white and (is_bottom or is_top):\n piece = WHITE_PAWN if is_bottom else BLACK_PAWN\n board = set_piece(board, index, piece)\n return board", "title": "" }, { "docid": "d9a0069294693fca498e88d1532b85d7", "score": "0.6488363", "text": "def newGame(self):\n\n self.board = Board()\n self.screen.erase()\n self.singles = 0\n self.doubles = 0\n self.triples = 0\n self.tetrises = 0\n self.level = self.startLevel\n self.blocksPlaced = 0\n self.score = self.LEVEL_SCORES[self.startLevel]\n self.isComplete = False\n self.isPaused = False\n self.didLose = False\n self.isSuspended = False\n self.extraTime = False\n self.hasExtraTime = False", "title": "" }, { "docid": "6fa940a9e51efbf0c7c26e8f4034be2d", "score": "0.6484905", "text": "def initialise_game(self):\n\t\tself.current_turn = 0\n\t\tself.save()\n\t\tfrom engine.modules import initialisation_tasks_list\n\t\tfor Task in initialisation_tasks_list:\n\t\t\t# print \"* [%s] **%s** : %s\" % (Task.RESOLUTION_ORDER, Task.__name__, Task.__doc__.strip())\n\t\t\tt = Task()\n\t\t\tt.run(self)\n\n\t\tself.current_turn = 1\n\t\tself.save()", "title": "" }, { "docid": "f5f12db54c8d4830e3608d408d9ca678", "score": "0.6483987", "text": "def __init__(self, game_state=\"UNFINISHED\", board=None, black_check=None, red_check=None, turn='red'):\n\n # initialize board as a list of lists\n if board is None:\n board = [[], [], [], [], [], [], [], [], [], []]\n\n # initialize attributes\n self._game_state = game_state\n self._black_check = black_check\n self._red_check = red_check\n self._board = board\n self._turn = turn\n\n # initialize pieces on the board\n self.start_state()", "title": "" }, { "docid": "b2719d5f0da491579a9322d77bd4261f", "score": "0.6480331", "text": "def __init__(self):\n self._board = Board()\n self._p1 = Player((4, 0), \"P1\")\n self._p2 = Player((4, 8), \"P2\")\n self.fair_player = Player((0, 0), \"\")\n self._game_state = GameState()", "title": "" }, { "docid": "4782c120a292f909a4ad854f38b10792", "score": "0.64699185", "text": "def next_boards_connectfour(board) :\n new_boards=[]\n if is_game_over_connectfour(board):\n return new_boards\n else:\n for i in range(7):\n if not board.is_column_full(i):\n new_boards.append(board.add_piece(i))\n return new_boards", "title": "" }, { "docid": "b673b82a1ef52f9d200a5cffc09e8fd2", "score": "0.6468926", "text": "def __init__(self, seed=randint(0,10000)):\n self.n_cols = 5 \n self.n_rows = 6\n self.board = self.generate_starting_board(seed) \n\n self.merchant_deck = config.NEW_MERCHANT_DECK\n shuffle(self.merchant_deck)\n\n self.djinn_deck = [4]*4 + [5] + [6]*10 + [8]*5 + [10]*2\n shuffle(self.djinn_deck)\n\n self.n_players = 2\n self.player = [None] # dummy to shift indexes to match Player 1 / Player 2 \n for _ in range(self.n_players):\n self.player.append({ 'coins' : 50, \n 'camels' : 8,\n 'fakirs' : 0, \n 'viziers' : 0,\n 'elders' : 0,\n 'djinns' : [],\n 'merchant_sets' : []\n })\n\n self.surrounding_blue_tiles = self._precompute_blue_tiles()\n self.cur_player = 1", "title": "" }, { "docid": "f4408c40e45aee5fe82cf5d727a5ac5c", "score": "0.6461678", "text": "def __init__(self):\n self.board_list = [None]*9\n self.board_map = [BOARD_NODE(0, connection_list = []), \\\n BOARD_NODE(1, connection_list = [BOARD_EDGE(0, 1, 3)]), \\\n BOARD_NODE(2, connection_list = [BOARD_EDGE(1, 1, 3)]), \\\n BOARD_NODE(3, connection_list = [BOARD_EDGE(0, 2, 0)]), \\\n BOARD_NODE(4, connection_list = [BOARD_EDGE(1, 2, 0), BOARD_EDGE(3, 1, 3)]), \\\n BOARD_NODE(5, connection_list = [BOARD_EDGE(2, 2, 0), BOARD_EDGE(4, 1, 3)]), \\\n BOARD_NODE(6, connection_list = [BOARD_EDGE(3, 2, 0)]), \\\n BOARD_NODE(7, connection_list = [BOARD_EDGE(4, 2, 0), BOARD_EDGE(6, 1, 3)]), \\\n BOARD_NODE(8, connection_list = [BOARD_EDGE(5, 2, 0), BOARD_EDGE(7, 1, 3)])]", "title": "" }, { "docid": "a61f5da14463be07653ae070444b5725", "score": "0.6457682", "text": "def new_game():\n pygame.init() # initialize all imported pygame modules\n\n window_size = [ NUM_COLS * WIDTH + 200, NUM_ROWS * HEIGHT + 20] # width, height\n screen = pygame.display.set_mode(window_size)\n\n pygame.display.set_caption(\"Infinite Runner\") # caption sets title of Window \n\n board = Board()\n\n moveCount = 0\n\n clock = pygame.time.Clock()\n #Menu function\n result = menu(screen)\n if result == 0: #THIS MEANS USER SELECTED START GAME\n main_loop(screen, board, moveCount, clock, False, False)\n elif result == 1: #USER SELECTED LOAD GAME\n print \"you can't load games -- yet!\"\n elif result == 2: #USER SELETED OPTIONS\n print \"you have no options -- yet!\"", "title": "" }, { "docid": "06999634a7061c3ecfa9331b032b1012", "score": "0.64574224", "text": "def update_board(self, display: pygame.Surface)-> None:\n\n # Adding the board background to the game\n board_background = pygame.image.load('./Assets/BOARD2.png').convert()\n display.blit(board_background, (0,0))\n\n \"\"\"\n Grid coordinates of the Board in BOARD2.png:\n \n topLeft = (138, 75)\n bottomLeft = (138, 525)\n \n topRight = (663, 75)\n bottomRight = (663, 525)\n \"\"\"\n\n # The size of each slot of the board\n SLOT_SIZE = 75\n HOLE_SIZE = 25\n\n NUMBEROFCOLUMNS = self.board.get_grid_size()[1]\n NUMBEROFROWS = self.board.get_grid_size()[0]\n\n # The matrix representation of the grid\n grid = self.board.get_grid()\n\n # If there is a winner, switch to the end screen\n if self.board.get_winner() != '-':\n\n self._game.set_winner(self.board.get_winner())\n self.reset_board()\n self._game.gamestate = STATE.End\n\n # Creates the slots and holes on the board,\n # then updates the board from the matrix board representation.\n # This strategy for creating the board was inspired by a tutorial on freeCodeCamp.org.\n # Video URL: https://www.youtube.com/watch?v=XpYz-q1lxu8\n for column in range(NUMBEROFCOLUMNS):\n\n for row in range(NUMBEROFROWS):\n\n if grid[row][column] == 'X':\n pygame.draw.circle(display,\n DARKGREY,\n (138 + (SLOT_SIZE//2) + column*(SLOT_SIZE), 75 + (SLOT_SIZE//2) + row*(SLOT_SIZE)),\n HOLE_SIZE)\n\n elif grid[row][column] == 'O':\n pygame.draw.circle(display,\n NAVY,\n (138 + (SLOT_SIZE//2) + column*(SLOT_SIZE), 75 + (SLOT_SIZE//2) + row*(SLOT_SIZE)),\n HOLE_SIZE)\n\n else:\n pygame.draw.circle(display,\n LIGHTBLUE,\n (138 + (SLOT_SIZE//2) + column*(SLOT_SIZE), 75 + (SLOT_SIZE//2) + row*(SLOT_SIZE)),\n HOLE_SIZE)\n\n #Displays who's turn it is in the game\n font = pygame.font.Font(\"./Assets/joystix_monospace.ttf\", 20)\n\n if self.turn == 0:\n text = font.render(\"Player 1's Turn. Pick Where to Drop Disc.\", True, WHITE, BLACK)\n elif self.turn == 1:\n text = font.render(\"Player 2's Turn. Pick Where to Drop Disc.\", True, WHITE, BLACK)\n\n goal = font.render(\"Connect 2^2 Discs in a Row.\", True, WHITE, BLACK)\n goalBox = goal.get_rect(center=(400, 35))\n textBox = text.get_rect(center=(400,560))\n\n display.blit(text, textBox)\n display.blit(goal, goalBox)\n self.backbtn.draw(display)\n\n pygame.display.flip()", "title": "" }, { "docid": "d16174ec6325cace25a939cef7336de2", "score": "0.6446453", "text": "def __init__(self, size, board):\r\n\t self.BoardSize = size #the size of the board\r\n\t self.CurrentGameBoard= board #the current state of the game board\r", "title": "" }, { "docid": "f016f0953ab2027c5664927147fd0c8f", "score": "0.6437924", "text": "def init_board(self):\n\t\tboard = {}\n\t\tfor i in range(9):\n\t\t\tboard[i] = 0\n\t\treturn board", "title": "" }, { "docid": "c5f94343250a8c8ae951f42d6e77bbe4", "score": "0.64335", "text": "def __init__(self):\n self.board = [\" \", \" \", \" \", \n \" \", \" \", \" \", \n \" \", \" \", \" \"]", "title": "" }, { "docid": "02617e032a738c9b51263712e198a74a", "score": "0.6432178", "text": "def __init__(self, rows=6, cols=7):\n self.rows = rows\n self.cols = cols\n self.board = zeros((rows, cols), dtype=int)\n self.slots_filled = 0\n self.p1_win_squares = set()\n self.p2_win_squares = set()", "title": "" }, { "docid": "2127017d1bcc1d25e0060f5e59a8c0a4", "score": "0.6432137", "text": "def new_board():\r\n # initiating the number of rows and columns\r\n rows = 8\r\n cols = 8\r\n # initiating an empty board for the time being\r\n board = []\r\n empty = 0\r\n # defining the stones\r\n black = 1\r\n white = 2\r\n # design the board based on the number of rows and columns\r\n for i in range(rows):\r\n row = []\r\n for j in range(cols):\r\n row.append(empty) # insert zeros for every element initiating that the board is empty\r\n board.append(row)\r\n\r\n # defining the starting positions\r\n board[3][3] = white\r\n board[4][3] = black\r\n board[3][4] = black\r\n board[4][4] = white\r\n\r\n return board", "title": "" }, { "docid": "ad80ef8429d9f2d1a73199527629188f", "score": "0.64308524", "text": "def initial_board():\n initial_board = [[1,'000000000']]\n return initial_board", "title": "" }, { "docid": "0d8ecc22d00f15ba2b48ba0cdb74ccb5", "score": "0.64308196", "text": "def set_board(self):\n # creating index valuables\n row = 0\n\n # iterate through each row\n while row < self._max_y_coord + 1:\n column = 0\n\n # iterate through each item in the row\n while column < self._max_x_coord + 1:\n position = [row, column]\n\n # if the board position contains a piece, create it\n if self.contains_a_piece(position):\n piece = self.create_piece(position)\n\n if row < self._max_y_coord / 2:\n piece.set_player(\"black\") # set the player of the piece\n piece.set_position(position)\n self._pieces.append(piece)\n\n # keep track of the general\n if piece.get_type() == \"black general\":\n self._black_general = piece\n\n elif row > self._max_y_coord / 2:\n piece.set_player(\"red\") # set the player of the piece\n piece.set_position(position)\n self._pieces.append(piece)\n\n # keep track of the general\n if piece.get_type() == \"red general\":\n self._red_general = piece\n\n else:\n pass\n\n column += 1\n\n row += 1", "title": "" }, { "docid": "7f7c526110a13a5dde8117c3239a87da", "score": "0.6429289", "text": "def init_game(self):\n self.is_accepting = True\n self.is_ongoing = True\n self.players = []\n self.townies = []\n self.mafia = []\n self.roles = {}\n self.day_num = 0\n self.day_phase = \"Night\"\n self.vote_table = {}\n self.user_to_player = {}", "title": "" }, { "docid": "b2349b4d50882a931040acaa9f5b0d40", "score": "0.6428827", "text": "def constructBoard():\n for i in range(0,Solution.rows):\n for j in range(0,Solution.columns):\n isWall=False\n if(i==0 or j==0 or i==Solution.rows-1 or j==Solution.columns-1):\n isWall=True\n #Node Assignment\n Solution.board[i][j]=Node(i,j,Solution.board[i][j],isWall)\n #Configuring Source Node\n if(Solution.board[i][j].value=='S'):\n Solution.source=Solution.board[i][j]\n Solution.source.dice['up']=1\n Solution.source.dice['below']=6\n Solution.source.dice['right']=3\n Solution.source.dice['left']=4\n Solution.source.dice['front']=2\n Solution.source.dice['back']=5\n #Configuring Goal Node\n if(Solution.board[i][j].value=='G'):\n Solution.goal=Solution.board[i][j]", "title": "" }, { "docid": "a66138e159aa3cb281fabe4d2df756d6", "score": "0.6424746", "text": "def setup(self):\n\n self.clear()\n for x in range(Board.SIZE):\n self.piece_array[x][1] = Piece.Pawn(Colour.black)\n self.piece_array[x][6] = Piece.Pawn(Colour.white)\n\n self.piece_array[0][0] = Piece.Rook(Colour.black)\n self.piece_array[7][0] = Piece.Rook(Colour.black)\n self.piece_array[1][0] = Piece.Knight(Colour.black)\n self.piece_array[6][0] = Piece.Knight(Colour.black)\n self.piece_array[2][0] = Piece.Bishop(Colour.black)\n self.piece_array[5][0] = Piece.Bishop(Colour.black)\n self.piece_array[3][0] = Piece.Queen(Colour.black)\n self.piece_array[4][0] = Piece.King(Colour.black)\n\n self.piece_array[0][7] = Piece.Rook(Colour.white)\n self.piece_array[7][7] = Piece.Rook(Colour.white)\n self.piece_array[1][7] = Piece.Knight(Colour.white)\n self.piece_array[6][7] = Piece.Knight(Colour.white)\n self.piece_array[2][7] = Piece.Bishop(Colour.white)\n self.piece_array[5][7] = Piece.Bishop(Colour.white)\n self.piece_array[3][7] = Piece.Queen(Colour.white)\n self.piece_array[4][7] = Piece.King(Colour.white)", "title": "" }, { "docid": "46d24df642b51a7f9849cfd6d61a5955", "score": "0.64237493", "text": "def __init__(self):\n self.board = [[empty for col in range(rows)] for row in range(rows)]\n self.unplayedCoordinates = list(itertools.product(range(rows),range(rows)))\n self.unplayedPieces = range(2**attributes)\n self.nextPiece = 0\n self.unplayedPieces.remove(0)\n self.pair = ()", "title": "" }, { "docid": "c9a59d758b0520dfdcface5914085b60", "score": "0.64171064", "text": "def init(self,init_state):\n self.board = init_state", "title": "" }, { "docid": "2d8914fa1d55fedcd8f0817b2271a82e", "score": "0.64140195", "text": "def init_board(self):\r\n if (not(self.with_draw)) and (len(self.deck.tiles) != 0):\r\n \r\n self.board.append_on_left_side(self.deck.tiles[0])", "title": "" }, { "docid": "fdb7b2032e626570edf58d56b89ccb12", "score": "0.64105207", "text": "def generate_board(self):\n for i in range(1, 11):\n self.board[i] = [0, 0, 0, 0]", "title": "" }, { "docid": "54d836ceee3c0b04855076252cb23917", "score": "0.64102817", "text": "def create_board(self):\n for row in range(ROWS):\n self.board.append([])\n for col in range(COLS):\n if col % 2 == ((row + 1) % 2):\n if row < 3:\n self.board[row].append(Piece(row, col, WHITE))\n elif row > 4:\n self.board[row].append(Piece(row, col, RED))\n else:\n self.board[row].append(0)\n else:\n self.board[row].append(0)", "title": "" }, { "docid": "3127c26032287ec22535f5b3e1f4dc94", "score": "0.640644", "text": "def __init__(self):\n self._board = [0]", "title": "" }, { "docid": "1dc9f54152cbc6f7906e6be8d97e354f", "score": "0.640624", "text": "def __init__(self):\n self.board = [['O' for col in range(10)] for row in range(10)]\n self.battleships = []", "title": "" }, { "docid": "698a1664fe6365c7c46c2a16ee79f5cf", "score": "0.63962793", "text": "def test_init_board(self):\n self.assertTrue(self.board.is_empty())", "title": "" }, { "docid": "9964cec56847070685d8ea762503e28d", "score": "0.63883924", "text": "def __init__(self):\n self._players = [Player(1), Player(2)]\n self._current_player = self._players[0]\n self._player1_position = (4, 0)\n self._player2_position = (4, 8)\n self._board = [[['vedge', 'hedge', 'no_pawn' ], [ 'vedge', \"no_fence\", 'no_pawn' ], [ 'vedge', \"no_fence\", 'no_pawn' ],\n ['vedge', \"no_fence\", 'no_pawn'], ['vedge', \"no_fence\", 'no_pawn'], ['vedge', \"no_fence\", 'no_pawn'],\n ['vedge', \"no_fence\", 'no_pawn'], ['vedge', \"no_fence\", 'no_pawn'], ['vedge', \"no_fence\", 'no_pawn'] ],\n [[\"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[\"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[\"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[\"no_fence\",\"hedge\", Pawn(self._players[0])], [ 'no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn' ],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", Pawn(self._players[1 ]) ] ],\n [[ \"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[ \"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[ \"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[ \"no_fence\",\"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']],\n [[ \"no_fence\", \"hedge\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'],\n ['no_fence', \"no_fence\", 'no_pawn'], ['no_fence', \"no_fence\", 'no_pawn'], [ \"no_fence\", \"no_fence\", 'no_pawn']] ]", "title": "" }, { "docid": "b9c7f7e46c607011a85d85db3f8b436e", "score": "0.6379419", "text": "def __init__(self):\n self.board = [[float('nan') for i in range(7)] for j in range(7)]\n for k in range(3):\n for i in range(k, 7-k, 3-k):\n for j in range(k, 7-k, 3-k):\n if not (i == j == 3):\n self.board[i][j] = '*'\n\n self.goals_count = {'x': [{}, 0], 'o': [{}, 0]}\n self.turns_left = {'x': 9, 'o': 9}", "title": "" }, { "docid": "0ed4fba35d96a15251ad6124563c75fb", "score": "0.6378751", "text": "def __init__(self, board=None, prev_player=2):\n if board is None:\n self.board = np.zeros((6, 7))\n else:\n self.board = board\n self.prev_player = prev_player", "title": "" }, { "docid": "ff3133c872c8636a178305012459c35a", "score": "0.6374657", "text": "def resetBoard(self):\n self.__init__()", "title": "" }, { "docid": "3ded780720da747907861682ebea7e61", "score": "0.6358401", "text": "def __init__(self):\n self._board_list = [[\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", ]\n ]\n self._current_state = \"UNFINISHED\"", "title": "" }, { "docid": "8dde9489d139af2f79998610e3a68515", "score": "0.6354896", "text": "def initializeState():\n # TODO: 1\n board = np.arange(0, 9)\n np.random.shuffle(board)\n\n board = board.reshape((3, 3))\n return EightPuzzleState(board.tolist())\n\n # Testing isGoal method\n # return EightPuzzleState([[1, 2, 3], [4, 5, 6], [7, 0, 8]])", "title": "" }, { "docid": "ef2ec555393bc2e16f7ac15269940c98", "score": "0.63523865", "text": "def main():\n print(\"\"\"\n Two players take turns dropping tiles into one of seven columns, trying\n to make Four-in-a-Row horizontally, vertically, or diagonally\n \"\"\")\n\n # Set up a new game\n game_board = get_new_board()\n player = PLAYER_X\n\n while True:\n display_board(game_board)\n player_move = get_player_move(player, game_board)\n game_board[player_move] = player\n\n # Check if win or tie\n if is_winner(player, game_board):\n display_board(game_board) # Display the board one last time\n print(f\"Player {player} has won!\")\n sys.exit()\n elif is_full(game_board):\n display_board(game_board) # Display the board one last time\n print(\"There is a tie!\")\n sys.exit()\n\n # Switch turns\n if player == PLAYER_X:\n player = PLAYER_O\n elif player == PLAYER_O:\n player = PLAYER_X", "title": "" }, { "docid": "c199aa2f15e071f657d8225a62ba2a1c", "score": "0.6352089", "text": "def __init__(self):\n # Lists are simple to call and easily mutable.\n self.board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n # We initialize current turn to player 1 and change it after every turn\n self.current_turn = 1", "title": "" }, { "docid": "2816e39010f6b0d60e5707a6bc1b4a3e", "score": "0.6351328", "text": "def __init__(self):\r\n self.board = [[\"| ______________ |\"],\r\n [\"| | 1\", \"~\", \"~\", \"~\", \"~\", \"~\", \" | |\"],\r\n [\"| | 2\", \"~\", \"~\", \"~\", \"~\", \"~\", \" | |\"],\r\n [\"| | 3\", \"~\", \"~\", \"~\", \"~\", \"~\", \" | |\"],\r\n [\"| | 4\", \"~\", \"~\", \"~\", \"~\", \"~\", \" | |\"],\r\n [\"| | 5\", \"~\", \"~\", \"~\", \"~\", \"~\", \" | |\"],\r\n [\"| | \", \"1\", \"2\", \"3\", \"4\", \"5\", \" | |\"],\r\n [\"| |______________| |\"]\r\n ]", "title": "" }, { "docid": "37f82d40a0864f138fbd63fab933ceaf", "score": "0.63473946", "text": "def __init__(self):\n self.board = Board()\n self.ships = {\"Carrier\": 5, \"Battleship\": 4, \"Cruiser\": 3, \"Submarine\": 3, \"Destroyer\": 2}\n self.opposing_player = None", "title": "" }, { "docid": "95324ec58f10c429d8de84c07746abd5", "score": "0.6341057", "text": "def __init__(self):\n\n self._game_state = \"UNFINISHED\"\n self._board = [\n [\" \", \"A \", \" \", \"B \", \" \", \"C \", \" \",\n \"D\", \" \",\n \" E\", \" \", \" F\", \" \", \" G\", \" \", \" H\",\n \" \", \" I\"],\n [\"1 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"| \", \" \",\n \"|\", \" \\ \",\n \"|\", \" / \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"2 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"| \", \" \",\n \"|\", \" / \",\n \"|\", \" \\ \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"3 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"|\", \" \",\n \"|\", \" \",\n \"|\", \" \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"4 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \",\n \" \", \" \",\n \" \", \" \", \" \", \" \", \" \", \" \", \" \",\n \" \", \" \"],\n [\"5 \", \"|\", \" \", \" \", \" \", \" \", \" \",\n \" \", \" \",\n \" \", \" \", \" \", \" \", \" \", \" \", \" \",\n \" \", \"|\"],\n [\" \", \" \", \" \", \" \", \" \", \" \", \" \",\n \" \", \" \",\n \" \", \" \", \" \", \" \", \" \", \" \", \" \",\n \" \", \" \"],\n [\"6 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"|\", \" \",\n \"|\", \" \",\n \"|\", \" \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"7 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"|\", \" \",\n \"|\", \" \",\n \"|\", \" \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"8 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"| \", \" \",\n \"|\", \" \\ \",\n \"|\", \" / \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"9 \", \" \", \" - \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"],\n [\" \", \"| \", \" \", \"| \", \" \", \"| \", \" \",\n \"|\", \" / \",\n \"|\", \" \\ \", \"|\", \" \", \" |\", \" \", \" |\",\n \" \", \" |\",\n \" \"],\n [\"10 \", \" \", \"- \", \" \", \" - \", \" \", \" - \",\n \" \", \" - \",\n \" \", \" - \", \" \", \" - \", \" \", \" - \", \" \",\n \" - \", \" \"]]", "title": "" }, { "docid": "4196d14dda848fd5c9a22e0555507a9e", "score": "0.6334763", "text": "def __init__(self):\n self._board = []", "title": "" }, { "docid": "a740095088b08575aee83f5be9030dac", "score": "0.6329333", "text": "def __init__(self):\n self._state = \"UNFINISHED\"\n self._last_turn = \"red\"\n self._board = [\n [Chariots(\"RCh1\", \"a1\", \"red\", \"Chariot\"), Elephants(\"REl1\", \"b1\", \"red\", \"Elephant\"),\n Horses(\"RHo1\", \"c1\", \"red\", \"Horse\"), Guards(\"RGu1\", \"d1\", \"red\", \"Guard\"), \"\",\n Guards(\"RGu2\", \"f1\", \"red\", \"Guard\"), Elephants(\"REl2\", \"g1\", \"red\", \"Elephant\"),\n Horses(\"RHo2\", \"h1\", \"red\", \"Horse\"), Chariots(\"RCh2\", \"i1\", \"red\", \"Chariot\")],\n [\"\", \"\", \"\", \"\", Generals(\"RGen\", \"e2\", \"red\", \"General\"), \"\", \"\", \"\", \"\"],\n [\"\", Cannons(\"RCa1\", \"b3\", \"red\", \"Cannon\"), \"\", \"\", \"\", \"\", \"\",\n Cannons(\"RCa2\", \"h3\", \"red\", \"Cannon\"), \"\"],\n [Soldiers(\"RSo1\", \"a4\", \"red\", \"Soldier\"), \"\",\n Soldiers(\"RSo2\", \"c4\", \"red\", \"Soldier\"), \"\",\n Soldiers(\"RSo3\", \"e4\", \"red\", \"Soldier\"), \"\",\n Soldiers(\"RSo4\", \"g4\", \"red\", \"Soldier\"), \"\",\n Soldiers(\"RSo5\", \"i4\", \"red\", \"Soldier\")],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n [Soldiers(\"BSo1\", \"a7\", \"blue\", \"Soldier\"), \"\",\n Soldiers(\"BSo2\", \"c7\", \"blue\", \"Soldier\"), \"\",\n Soldiers(\"BSo3\", \"e7\", \"blue\", \"Soldier\"), \"\",\n Soldiers(\"BSo4\", \"g7\", \"blue\", \"Soldier\"), \"\",\n Soldiers(\"BSo5\", \"i7\", \"blue\", \"Soldier\")],\n [\"\", Cannons(\"BCa1\", \"b8\", \"blue\", \"Cannon\"), \"\", \"\", \"\", \"\", \"\",\n Cannons(\"BCa2\", \"h8\", \"blue\", \"Cannon\"), \"\"],\n [\"\", \"\", \"\", \"\", Generals(\"BGen\", \"e9\", \"blue\", \"General\"), \"\", \"\", \"\", \"\"],\n [Chariots(\"BCh1\", \"a10\", \"blue\", \"Chariot\"),\n Elephants(\"BEl1\", \"b10\", \"blue\", \"Elephant\"),\n Horses(\"BHo1\", \"c10\", \"blue\", \"Horse\"), Guards(\"BGu1\", \"d10\", \"blue\", \"Guard\"), \"\",\n Guards(\"BGu2\", \"f10\", \"blue\", \"Guard\"), Elephants(\"BEl2\", \"g10\", \"blue\", \"Elephant\"),\n Horses(\"BHo2\", \"h10\", \"blue\", \"Horse\"), Chariots(\"BCh2\", \"i10\", \"blue\", \"Chariot\")]]", "title": "" }, { "docid": "10afad611aa8c32d3e00d41cd793b463", "score": "0.6328358", "text": "def test_new_board():\n test_board = Board()\n assert test_board.ROWS == 8\n assert test_board.COLUMNS == 8\n assert test_board.board == [[0]*8]*8", "title": "" }, { "docid": "9299a52deccdbd1e206b07256a9c2e87", "score": "0.6311875", "text": "def init_game():\n global options\n global first_player\n with open(\"game-options.json\") as input:\n options = json.load(input)\n logic.initialise_game(options)\n pygame.init()\n pygame.display.set_caption(options[\"name\"])\n graphics.initialise_game(options)\n graphics.draw_initial_board()\n player1_score, player2_score = logic.get_scores()\n graphics.draw_scores(player1_score,\n player2_score)\n first_player = logic.get_turn()", "title": "" }, { "docid": "45aed212365d380585124fe12c3abf5b", "score": "0.6307164", "text": "def initBoardState(self):\r\n for y in range(8):\r\n newRow = []\r\n for x in range(8):\r\n if (x < 3) and (((y % 2 == 0) and (x == 1)) or ((y % 2 == 1) and (x % 2 == 0))):\r\n newRow.append(self.colorsPlyr[0])\r\n elif (x >= 5) and (((y % 2 == 0) and (x % 2 == 1)) or ((y % 2 == 1) and (x % 2 == 0))):\r\n newRow.append(self.colorsAI[0])\r\n else:\r\n newRow.append(' -')\r\n self.boardState.append(newRow)", "title": "" }, { "docid": "75a1ab62f2d94286840b83aa47143910", "score": "0.6301336", "text": "def hostGame(self):\n print(\"Welcome to Connect Four! \\n\")\n print(self)\n player = input('Would you like to be \\'X\\' or \\'O\\'?')\n player = player.upper()\n if player == 'X':\n comp = 'O'\n elif player == 'O':\n comp = 'X'\n a = 1\n b = 1\n while a == b:\n users_col = -1\n while not self.allowsMove(users_col):\n users_col = int(input(\"Choose a column(\" + player +\"): \"))\n self.addMove(users_col, player)\n print(self)\n\n if self.allowsMove(users_col) == False:\n for i in range(self.width):\n if self.allowsMove(i) == True:\n users_col = i\n\n if self.winsFor(player):\n print(\"Player \" + player + \" gets the Win!\")\n return\n\n if self.isFull():\n print('tie')\n return\n\n comp_col = -1\n while not self.allowsMove(comp_col):\n comp_col = self.aiMove(comp)\n\n self.addMove(comp_col, comp)\n print(\"Computer turn\")\n print(self)\n\n if self.allowsMove(comp_col) == False:\n for i in range(self.width):\n if self.allowsMove(i) == True:\n comp_col = i\n\n if self.winsFor(comp):\n print('The Machine Wins!')\n return\n if self.isFull():\n print('tie')\n return", "title": "" }, { "docid": "c8c3274bde14f89f8c7e6f144c14d585", "score": "0.6288628", "text": "def create_a_new_board(self):\n return randomize_board()", "title": "" }, { "docid": "2fe248807e20861123a7d98579b635be", "score": "0.6284523", "text": "def initialize_game_variables(self, game_mode):\n self.game_board = Board(BOARD_SIZE[0], BOARD_SIZE[1])\n (self.board_rows, self.board_cols) = self.game_board.get_dimensions()\n self.game_logic = GameLogic(self.game_board)\n first_coin_type = random.randint(1,2)\n second_coin_type = 2 if first_coin_type == 1 else 1 \n \n if game_mode == \"single\":\n self.p1 = HumanPlayer(first_coin_type)\n if (self.trainedComputer == None):\n self.p2 = ComputerPlayer(second_coin_type, \"qlearner\")\n self.trainedComputer = self.p2\n else:\n self.trainedComputer.set_coin_type(second_coin_type)\n self.p2 = self.trainedComputer\n elif game_mode == \"two_player\":\n self.p1 = HumanPlayer(first_coin_type)\n self.p2 = HumanPlayer(second_coin_type)\n else:\n self.trainedComputer = None\n self.win_list = [0,0]\n self.p1 = ComputerPlayer(first_coin_type, \"qlearner\")\n self.p2 = ComputerPlayer(second_coin_type, \"qlearner\")", "title": "" }, { "docid": "8a7c433fa4701dc1d189714b3ce2551a", "score": "0.62818354", "text": "async def connect_four(self, ctx, *args):\n mention_msg = \"<@!{}>\".format(ctx.message.author.id)\n channel = ctx.message.channel\n parsed_args = helpers.parse_options(args)\n\n # Check if game is in progress\n if self.gameinprogress:\n await channel.send(\"A game is already in progress.\")\n return\n\n # Set flags for game in progress\n self.gameinprogress = True\n for item in parsed_args:\n if item.name == \"--chaos\":\n self.isChaos = True\n if item.name == \"--teams\":\n try:\n self.max_team_size = max(1, int(item.values[0]))\n except:\n self.max_team_size = 100\n\n # Initiate the message\n message = None\n\n # Initiate the players and turn order\n self.teams = [set(), set()]\n self.turn = 0\n\n # Initiate the title\n title = self.generate_title()\n tpost = await channel.send(title)\n\n # Initiate the board\n board = numpy.zeros((6, 7))\n bpost = await self.draw_board(ctx, message, board)\n\n #await ctx.message.delete()\n\n while True:\n try:\n reaction, user = await self.bot.wait_for(\"reaction_add\", timeout=30)\n\n # Process input\n if str(reaction.emoji) in reactions:\n if reactdict[str(reaction.emoji)] == 7 and self.isPlayer(user):\n # Surrender if white flag clicked and the user is part of the game\n if (self.isChaos is True):\n mention_msg = \"<@!{}> surrendered. No, I don't know who wins.\".format(color, user.id)\n else:\n if (user in self.teams[0]):\n color = \"Red\"\n else:\n color = \"Yellow\"\n mention_msg = \"{} team surrendered (<@!{}>).\".format(color, user.id)\n await channel.send(mention_msg)\n self.reset()\n return\n elif reactdict[str(reaction.emoji)] < 7:\n if len(self.teams[self.turn]) < self.max_team_size and user not in self.teams[1-self.turn]:\n self.teams[self.turn].add(user)\n if self.isValidPlayer(user):\n # process turn if valid user\n c = reactdict[str(reaction.emoji)] # figure out what column to drop in\n r = find_first_nonzero_in_col(board, c) # figure out what row to drop on, if possible\n if r is not None:\n board[r][c] = self.turn + 1 # 1 for p1, 2 for p2\n self.turn = 1 - self.turn # valid move; pass the turn\n if self.isChaos is True:\n self.lastPlayer = user # in chaos mode, store the last player to move.\n # update title and board messages\n title = self.generate_title()\n await tpost.edit(content=title)\n await self.draw_board(ctx, bpost, board)\n # check for victory\n v = check_for_victory(board, r, c)\n if v != 0:\n color = \"Red\"\n if int(v) == 2:\n color = \"Yellow\"\n mention_msg = \"{} team won the game! (\".format(color)\n for player in self.teams[int(v)-1]:\n mention_msg += \"<@!{}>\".format(player.id)\n mention_msg += \")\"\n await channel.send(mention_msg)\n self.reset()\n return\n\n #await bpost.remove_reaction(reaction, user)\n\n except asyncio.TimeoutError:\n print(\"Timeout\")\n await channel.send(\"Time's up, nerds.\")\n self.reset()\n return", "title": "" }, { "docid": "bd076daf09ff2e9ace6cb852f4b16f11", "score": "0.6273671", "text": "def __init__(self, player_color_1, player_color_2):\n\n preset_board = [[[\"R\"], [\"R\"], [\"G\"], [\"G\"], [\"R\"], [\"R\"]],\n [[\"G\"], [\"G\"], [\"R\"], [\"R\"], [\"G\"], [\"G\"]],\n [[\"R\"], [\"R\"], [\"G\"], [\"G\"], [\"R\"], [\"R\"]],\n [[\"G\"], [\"G\"], [\"R\"], [\"R\"], [\"G\"], [\"G\"]],\n [[\"R\"], [\"R\"], [\"G\"], [\"G\"], [\"R\"], [\"R\"]],\n [[\"G\"], [\"G\"], [\"R\"], [\"R\"], [\"G\"], [\"G\"]]]\n\n s, r = player_color_1 # s = player, r = color\n p, g = player_color_2 # p = player, g = color\n\n self._player_1 = s\n self._player_2 = p\n self._color_1 = r.upper()\n self._color_2 = g.upper()\n self._player_color_1 = player_color_1\n self._player_color_2 = player_color_2\n self._preset_board = preset_board\n self._current_state = \"UNFINISHED\"\n self._whose_turn = self._player_1\n self._reserve_player_1 = [] # contains reserved pieces for player 1\n self._reserve_player_2 = [] # contains reserved pieces for player 2\n self._capture_player_1 = [] # contains player 2's pieces that has been captured\n self._capture_player_2 = [] # contains player 1's pieces that has been captured\n self._game_over = False", "title": "" }, { "docid": "a567d96f34d7e05b6c068e733ad896c4", "score": "0.6270784", "text": "def testYetAgainAnotherBoard(self):\r\n self.__bf.readFromFile(\"testboard6\")\r\n b = self.__bf.createBoard()", "title": "" }, { "docid": "ada9bdc7d65773c755caf9a2ce240ac2", "score": "0.62647605", "text": "def __init__(self, board):\n self.board = board\n (num_rows, num_columns) = self.board.get_dimensions()\n self.board_rows = num_rows\n self.board_cols = num_columns\n self.winner_value = 0", "title": "" }, { "docid": "e2bdeaab46a4a47166c7a41920cf6a1f", "score": "0.62541443", "text": "def __init__(self):\n pygame.init()\n pygame.display.set_caption(\"2048 in Python!\")\n icon = pygame.image.load('images\\\\2048_white.png')\n pygame.display.set_icon(icon)\n self.TILE_SIZE = 100\n self.WIDTH = ((self.TILE_SIZE+20) * (4))-10\n self.HEIGHT = ((self.TILE_SIZE+20) * (5))-10\n self.SCREEN_SIZE = (self.WIDTH, self.HEIGHT)\n self.screen = pygame.display.set_mode(self.SCREEN_SIZE)\n self.running = True\n self.board = Board()", "title": "" }, { "docid": "f9a77b901f3ae2c75c93c88063d186f1", "score": "0.62533385", "text": "def begin(self):\n\n # TODO: debugging stuff\n# you = self.players[0]\n# self.players.append(User(2, \"!\", \"test1\", conn=you.conn))\n# self.players.append(User(3, \"#\", \"test3\", conn=you.conn))\n\n random.shuffle(self.players)\n for player in self.players:\n player.tile = self.tiles[0]\n player.game = self\n\n #TODO: debugging----------------\n self.tiles[15].owner = self.players[1]\n\n self.tiles[5].owner = self.players[1]\n self.tiles[25].owner = self.players[1]\n self.tiles[35].owner = self.players[1]\n #self.tiles[-1].owner = self.players[0]\n #self.tiles[-3].owner = self.players[0]\n #self.tiles[-1].num_houses = 2\n #self.tiles[-3].num_houses = 2\n #self.tiles[-11].owner = self.players[1]\n #self.tiles[-13].owner = self.players[1]\n #self.tiles[-14].owner = self.players[1]\n #self.tiles[-11].num_houses = 4\n #self.tiles[-13].num_houses = 4\n #self.tiles[-14].num_houses = 4\n #self.tiles[12].owner = self.players[1]\n\n #-------------------------------\n\n self.started = True\n self.current_player = self.players[0]\n self.tiles[0].occupants = self.players.copy()\n\n self.add_message(\"The game has begun!\")\n order = \", \".join([\"{} ({})\".format(p.name, p) for p in self.players])\n self.add_message(\"The order of play is {}\".format(order))\n self.SHOW_BOARD()\n self.current_player.send(\"It is your turn! [R]oll the dice by typing 'R'.\")", "title": "" }, { "docid": "67329e7a216abde9dc8eb7663fc2cd6a", "score": "0.62472814", "text": "def _init_game(self):\n command = \"START/\" + str(3 - self._data.starting) # 2 -> 1 and 1 -> 2\n self._send_message(command, self._connection2)", "title": "" }, { "docid": "fc32e57d45f818aa0b24bb2ca0ab9430", "score": "0.62449515", "text": "def newGame(player1,player2):\n game = {\n 'player1' : player1,\n 'player2' : player2,\n 'who' : 1,\n 'board' : [[0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,2,1,0,0,0],\n [0,0,0,1,2,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0]]}\n\n # TODO: Initialize dictionary for a new game\n return game", "title": "" }, { "docid": "9fb8f23e489d5ca183e5a29be776d19f", "score": "0.62428415", "text": "def __init__(self, white, black):\n if not all([isinstance(p, Player) for p in (white, black)]):\n raise ValueError(\"Game must be set up with valid Players\")\n self.players = {chess.WHITE: white,\n chess.BLACK: black}\n self.board = chess.Board()\n \n # Reset both players here in case they already had internal state.\n white.reset()\n black.reset()\n \n # Result is semi-independent of the board state because players can\n # lose by submitting an invalid move. \n self.result = None\n \n # PGN exporter - use this to export the game for later analysis.\n self.exportgame = chess.pgn.Game({\"White\": \n self.players[chess.WHITE].name,\n \"Black\":\n self.players[chess.BLACK].name})", "title": "" }, { "docid": "3a5fdf351f16f4f6057988f7cdba4c02", "score": "0.62423986", "text": "def _create_board(self):\n for i in range(10):\n column = []\n for j in range(9):\n column.append(None)\n self.__board.append(column)", "title": "" }, { "docid": "d7e9ba480e9d6c4ce2c9708c3ce383da", "score": "0.62321615", "text": "def new_board():\n\n\t# This function creates a new board (a list) with all nine elements \n\t# set to EMPTY and returns it\n\tboard = []\n\tfor square in range(NUM_SQUARES):\n\t\tboard.append(EMPTY)\n\treturn board", "title": "" }, { "docid": "794dfd2100069935517166538c05674e", "score": "0.6220346", "text": "def initialize_game():\n print(\"\")\n print(\"Welcome to the memory game!\")\n print(\"==============================\")\n user = input(\"Enter your name: \")\n size = get_int_input(\"The board is an A x A square. Enter a value for A: \")\n\n print(\"\")\n print(\"You chose a\", size, \"x\", size, \"board. Let's play!\")\n print(\"==============================\")\n\n memory = MemoryGame(user, size)\n words = memory.words_for_game\n board = memory.board\n\n # Reads in all the words in the given memo file\n\n memory.get_words_from_file(word_file)\n\n # Creates a new list with the right amount of words from the wordlist\n\n memory.create_list_for_game()\n\n # Makes a dictionary for the game\n\n memory.dictionary_for_game(words)\n\n # Creates a memory board\n\n memory.create_board()\n\n # Prints the memory board\n\n display_board(board)\n\n # Plays game\n\n play_game(memory, board, user)\n\n return", "title": "" }, { "docid": "8c4af82c0d96227263a4ab5738018501", "score": "0.6220281", "text": "def start_game(self):\n current_player = Tic.COMPUTER\n winner = None\n print(\"Move by entering the appropriate cell number:\\n\"\n \"+-+-+-+\\n\"\n \"|0|1|2|\\n\"\n \"+-+-+-+\\n\"\n \"|3|4|5|\\n\"\n \"+-+-+-+\\n\"\n \"|6|7|8|\\n\"\n \"+-+-+-+\\n\")\n while self.get_moves():\n\n # making a move\n if current_player == Tic.COMPUTER:\n move = self.minimax(5, True)[1]\n else:\n while True:\n try:\n move = int(input(\"Your move:\\n\"))\n except Exception:\n print(\"Illegal move, try again.\")\n continue\n if move in self.get_moves():\n break\n else:\n print(\"Illegal move, try again.\")\n self.make_move(move, current_player)\n self.print_board()\n\n # check for a winning position\n check = self.check_for_winner()\n if check == 0:\n winner = 0\n break\n elif check == 1:\n winner = 1\n break\n\n # change player\n if current_player == 0:\n current_player = 1\n else:\n current_player = 0\n\n if winner == 0:\n print(\"You lost!\")\n elif winner == 1:\n print(\"You won!\")\n else:\n print(\"Draw!\")", "title": "" }, { "docid": "f6f50591ea0268f8eeee281ee413104a", "score": "0.62165767", "text": "def __init__(self, player1, player2):\r\n self.player1 = player1\r\n self.player2 = player2\r\n self.__current_player = player1\r\n\r\n # Pawn initialization\r\n pawns1_list = [0, 1, 2, 3, 4, 5, 6, 7]\r\n p1_pawns = {}\r\n for name in pawns1_list:\r\n p1_pawns[name] = Pawn(player1)\r\n p2_pawns = {}\r\n for name in pawns1_list:\r\n p2_pawns[name] = Pawn(player2)\r\n\r\n # Pawn Placement\r\n self.board = [(), (), (), (), (), (), (), ()] * 8\r\n for i in range(8, 16):\r\n self.board[i] = p1_pawns[i - 8]\r\n for i in range(48, 56):\r\n self.board[i] = p2_pawns[i - 48]\r\n\r\n # Rook Initialization/Placement\r\n self.board[0] = Rook(player1)\r\n self.board[7] = Rook(player1)\r\n self.board[56] = Rook(player2)\r\n self.board[63] = Rook(player2)\r\n\r\n # Knight Initialization/Placement\r\n self.board[1] = Knight(player1)\r\n self.board[6] = Knight(player1)\r\n self.board[57] = Knight(player2)\r\n self.board[62] = Knight(player2)\r\n\r\n # Bishop Initialization/Placement\r\n self.board[2] = Bishop(player1)\r\n self.board[5] = Bishop(player1)\r\n self.board[58] = Bishop(player2)\r\n self.board[61] = Bishop(player2)\r\n\r\n # Queen Initialization/Placement\r\n self.board[4] = Queen(player1)\r\n self.board[59] = Queen(player2)\r\n\r\n # King Initialization/Placement\r\n self.board[3] = King(player1)\r\n self.board[60] = King(player2)", "title": "" } ]
d949997beb31ca924df3cdb23230190b
Function to initialize the Window and it's properties.
[ { "docid": "52d1d927d2154ba449870dc44c957799", "score": "0.0", "text": "def initialize_tkinter_window(self):\n self.width, self.height = 500, 600 # Screen size of the initial window\n self.ws = self.winfo_screenwidth() # width of the screen\n self.hs = self.winfo_screenheight() # height of the screen\n\n # calculate x and y coordinates for the Tk root window\n self.x = (self.ws / 2) - (self.width / 2)\n self.y = (self.hs / 2) - (self.height / 2)\n self.geometry('%dx%d+%d+%d' % (self.width, self.height, self.x, self.y))\n\n self.title(\"Sam Edelman Search Console Data Extract\")\n self.canvas = tk.Canvas(self, bg='#D6EAF8')\n self.canvas.place(relwidth=1, relheight=1)\n \n self.get_files_button = tk.Button(self, text='Get All Files', command=self.get_files)\n self.get_files_button.place(relx=0.18, rely=0.95, relwidth=0.15, relheight=0.05, anchor='w')\n\n # self.get_details_button = tk.Button(self, text='Get Details', command=self.get_details)\n # self.get_details_button.place(relx=0.43, rely=0.95, relwidth=0.15, relheight=0.05, anchor='w')\n\n self.clear_output_button = tk.Button(self, text='Clear', command=self.clear_output)\n self.clear_output_button.place(relx=0.43, rely=0.95, relwidth=0.15, relheight=0.05, anchor='w')\n\n self.exit_button = tk.Button(self, text='Exit', command=self.exit_application)\n self.exit_button.place(relx=0.68, rely=0.95, relwidth=0.15, relheight=0.05, anchor='w')\n\n self.progress_bar = ttk.Progressbar(self, orient='horizontal', length=286, mode='determinate')\n self.progress_bar.place(relx=0.5, rely=0.44, relwidth=0.75, relheight=0.05, anchor='n')", "title": "" } ]
[ { "docid": "a4521b50f655b91ad23a1e5814152959", "score": "0.81713", "text": "def Initialize(self, window):", "title": "" }, { "docid": "14c1338b2aa60718417be6d0a2e2a7ed", "score": "0.7876121", "text": "def window_init(self):\n self.window = tk.Tk()\n self.window.title(\"ArgZ Pic2Pdf v{}\".format(self.state.versionList[-1][0]))\n self.window.geometry(\n \"{}x{}\".format(C4U.grid_offsets[\"xscreen\"], C4U.grid_offsets[\"yscreen\"])\n )\n self.window.resizable(0, 0)", "title": "" }, { "docid": "6616f6e84de495b7916c682764d4e78b", "score": "0.76528263", "text": "def setup_window(self):\n self.set_canvas()\n self.set_initial_values()\n self.set_bindings()\n self.plot_change()", "title": "" }, { "docid": "ab65d509e3633b59796f15ef3f68a8a7", "score": "0.7501757", "text": "def initializeWindow(self):\n\t\tself.main_window = Win(topmost=True, resizable=False, title=\"Symbols Floater\",\n\t\t\t\t\t\t\t initial_position=\"center\", focusable=False, suppress_X=True)\n\t\t# self.main_window.resize(300, 150)\n\t\tmain_box = self.main_window.addBox(parent=self.main_window, orientation=\"vertical\")\n\t\tbutton_row = self.main_window.addBox(parent=main_box, orientation=\"horizontal\")\n\t\tself.main_window.addButton(lambda *args: self.main_window.hide(), label=\"X\", parent=button_row)\n\t\tself.main_window.addButton(self.openAddPageDialog, label=\"Add Page\", parent=button_row)\n\t\tself.main_window.addButton(self.openAddSymbolDialog, label=\"Add Symbols\", parent=button_row)\n\t\tself.main_window_nb = self.main_window.addNotebook(parent=main_box)\n\t\tself.main_window_nb.connect(\"page_reordered\", self.signal_handlers.tabReorder)", "title": "" }, { "docid": "bd4105e620470b0ffc2a608fda556d1c", "score": "0.7484979", "text": "def initialise_window(self):\n raise(NotImplementedError())", "title": "" }, { "docid": "8296914ce966671b236d6480a0848249", "score": "0.7383443", "text": "def init_window(self):\n self.view.show_window(self.to_display)", "title": "" }, { "docid": "2086aab68656b99f05679f399aa03de5", "score": "0.728844", "text": "def main():\r\n init_window()", "title": "" }, { "docid": "0acede21897eb3a4a45fdacec113c6e8", "score": "0.7257876", "text": "def init_window(self):\n if self.window:\n return\n wid = None\n for i in os.environ:\n if \"XSCREENSAVER\" in i:\n logger.debug(\"%s: %s\" % (i, os.environ[i]))\n if self.args.window_id:\n try:\n wid = int(self.args.window_id)\n except ValueError:\n wid = int(self.args.window_id, 16)\n elif \"XSCREENSAVER_WINDOW\" in os.environ:\n try:\n wid = int(os.environ[\"XSCREENSAVER_WINDOW\"])\n except ValueError:\n wid = int(os.environ[\"XSCREENSAVER_WINDOW\"], 16)\n\n if wid and not self.args.window:\n self.window = GsThemeWindow(wid)\n\n else:\n self.window = gtk.Window()\n self.window.fullscreen()\n self.window.connect('key_press_event', self.on_keypressed)\n self.window.connect('destroy', gtk.main_quit)\n self.image = ResizableImage(True, True, gdk.INTERP_BILINEAR)\n self.window.add(self.image)\n self.window.show_all()\n\n self.screen_size = self.window.get_size()", "title": "" }, { "docid": "95b0462678110753752c489d3e454275", "score": "0.7149342", "text": "def setupWindow(self, **kwargs):\n additional_kwargs = {'screen': 1, \n 'monitor': 'testMonitor'}\n additional_kwargs.update(kwargs)\n self.win = visual.Window(self.window_dims, allowGUI=False, \n fullscr=self.fullscreen, units='norm',\n allowStencil=True, color=[-1,-1,-1], \n **additional_kwargs) \n \n self.win.flip()\n self.win.flip()", "title": "" }, { "docid": "c4b9a8133072ed6604d1d2ea723c9679", "score": "0.7140696", "text": "def __init__(self, window):", "title": "" }, { "docid": "c4b9a8133072ed6604d1d2ea723c9679", "score": "0.7140696", "text": "def __init__(self, window):", "title": "" }, { "docid": "c4b9a8133072ed6604d1d2ea723c9679", "score": "0.7140696", "text": "def __init__(self, window):", "title": "" }, { "docid": "c4b9a8133072ed6604d1d2ea723c9679", "score": "0.7138954", "text": "def __init__(self, window):", "title": "" }, { "docid": "d2a13eb7d7c49e8c40da3a856f620938", "score": "0.71370983", "text": "def __init__(self):\n\n\t\tgtk.Window.__init__(self)\n\n\t\tself.buildGui()", "title": "" }, { "docid": "4c0a8831a5c6ef4b2e26cd5226c1a792", "score": "0.70644367", "text": "def _init_window_settings(self):\n self.resize(self.settings.value(\"MainWindow/size\", defaultValue=self.size()))\n self.move(self.settings.value(\"MainWindow/pos\", defaultValue=self.pos()))\n if self.settings.value(\"MainWindow/isMaximized\") == 'true':\n self.showMaximized()", "title": "" }, { "docid": "8616dd3523608f6a3d763523285cc61b", "score": "0.7050802", "text": "def __init__(self):\n super(MainWindow, self).__init__()\n\n # This function initializes the GUI.\n self.initUI()", "title": "" }, { "docid": "d04c8bbb0b55ff023ca46f0f4d822e3d", "score": "0.69478065", "text": "def initialize(self):\n pg.init()\n self._display = pg.display.set_mode((self.win_w, self.win_h))", "title": "" }, { "docid": "0182b62c277de96776ad94f990b9d98d", "score": "0.6876598", "text": "def __init__(self):\n\n super(DefaultsWindow, self).__init__()\n\n self.setWindowTitle(\"default\")\n self.resize(900, 600)\n self.knob_defaults_list = helper.load_knob_defaults()\n self.current_knobdefault_changed = False\n self.build_ui()", "title": "" }, { "docid": "844f3ca72756cc84c17b5e367bfb99ac", "score": "0.6870893", "text": "def __init__(self, window):\n self.window = window\n self.reset()", "title": "" }, { "docid": "76bb0fcc613ff4512ec5c648a325c45a", "score": "0.67823553", "text": "def setup_window():\n # intialize the game window\n screen = pygame.display.set_mode((display_width,display_height))\n screen.set_caption('Mario Level 1-1 Parody')\n return screen", "title": "" }, { "docid": "d64415f0885b0b9d607af79466beba13", "score": "0.6777881", "text": "def __init__(self):\n super(Window, self).__init__()\n self.image = None\n self.image_tk = None\n self.background = None\n self.title(\"labesoft Library\")\n self.minsize(width=MIN_WIDTH, height=MIN_HEIGHT)\n self.geometry(f\"{WIDTH}x{HEIGHT}\")", "title": "" }, { "docid": "e9cddb9b6032d12d809fb5d9759babb0", "score": "0.6775751", "text": "def __init__(self):\n\t\tself.win_content = 0\n\t\tself.win_status = 0\n\t\tself.win_command = 0\n\t\tself.viewer = 0\n\n\t\tself.size = [0,0]\n\t\tself.size_changed = True", "title": "" }, { "docid": "28846e4a0647e8ddc6b731dfee154f35", "score": "0.6767465", "text": "def __init__(self):\n self.window = None\n self.main_widget = None\n self.m_layout = None\n self.start_button = None\n self.settings_button = None\n self.exit_button = None\n self.back_button = None\n self.v_layout = None\n self.s_layout = None\n self.main_menu = None\n self.language_switch = None\n self.settings_menu = None\n self.trans = None", "title": "" }, { "docid": "a4a72624f3c310fed09be160a057273a", "score": "0.6718633", "text": "def _initialize_view(self) -> None:\n self._logger.debug(\"running\")\n self.menu_bar.set_debug_action(self._settings.value(\"logging/level\"))\n self.menu_bar.set_cam_bool_checked(eval(self._settings.value(\"cam_scanner/active\")))\n self.main_window.add_menu_bar(self.menu_bar)\n self.main_window.add_control_bar_widget(self.button_box)\n self.main_window.add_control_bar_widget(self.flag_box)\n self.main_window.add_control_bar_widget(self.note_box)\n self.main_window.add_control_bar_widget(self.layout_box)\n self.main_window.add_spacer_item(1)\n self.main_window.add_control_bar_widget(self.info_box)\n self.main_window.add_control_bar_widget(self.d_info_box)\n self.main_window.add_mdi_area(self.mdi_area)\n self.main_window.show()\n self._logger.debug(\"done\")", "title": "" }, { "docid": "5e807d4ba19604f4e7ff794fa60f9af7", "score": "0.67032194", "text": "def create_ui_window(self):\n self.window = tkinter.Tk()\n self.window.title(\"Quizzler App\")\n self.window.config(padx=20, pady=20, bg=THEME_COLOR)", "title": "" }, { "docid": "9302a45cd4bccfbe3df4a01ebbedc979", "score": "0.6668207", "text": "def __init__(self, window):#{{{\n\t\tself.window = window\n\t\tself.window.keypad(1)\n\t\tself.panel = panel.new_panel( self.window )\n\t\tself.panel.bottom()\n\t\tself.panel.hide()\n\n\t\tself.y0, self.x0 = window.getbegyx()\n\t\tself.height, self.width = window.getmaxyx()\n\t\tself.visible = False#}}}", "title": "" }, { "docid": "7a314ea5a0aab4ceee3fdfcb64ba18ba", "score": "0.66369003", "text": "def __init__(self, **traits):\n\n # Base class constructor.\n super(MainWindow, self).__init__(**traits)\n\n # Create the window's menu, tool and status bars.\n self._create_action_bars()\n \n return", "title": "" }, { "docid": "2e703ed9dbf0eabf58b56d2a5eb651c2", "score": "0.6632522", "text": "def __init__(self, parent, title):\n\t\tsuper(MainWindow, self).__init__(parent, title=title, size=(650, 400))\n\t\tself.gatherConfigInfo('weatherrecorderwx.conf')\n\t\tself.widgetids = {}\n\t\tself.InitUI()\n\t\tself.Centre()\n\t\tself.Show()", "title": "" }, { "docid": "54bfae114f839eec1db91143a4a99d7a", "score": "0.65882236", "text": "def SetWindow(self, window):", "title": "" }, { "docid": "54bfae114f839eec1db91143a4a99d7a", "score": "0.6587569", "text": "def SetWindow(self, window):", "title": "" }, { "docid": "9529a8d45e7a757598cbe2863510da1d", "score": "0.6580233", "text": "def _create_window(self):\n \n # TODO: When window resizes, resize all widgets. Prevent window from becoming too small\n \n # Main tkinter window\n self.root = tkinter.Tk()\n \n # Window Setup\n self.root.title(\"Image Filters\")\n self.root.geometry(\"400x400\")\n \n self.logger.debug(\"Successfully created a new window.\")", "title": "" }, { "docid": "b228cd656b9be9b1123a79c1829b01c9", "score": "0.65610206", "text": "def setup_window(options): \n #http://www.pyglet.org/doc/api/pyglet.window-module.html\n display = pyglet.window.get_platform().get_default_display()\n screens = display.get_screens()\n main_screen = display.get_default_screen()\n logger.debug('Found %d screens' %(len(screens)))\n logger.debug('Main screen id is %d, specs are %s' %(main_screen.id, main_screen))\n if len(screens) > 1:\n #If we have multiple monitors us the secondary one as fullscreen display\n for screen in screens:\n logger.debug('Screen id is %d: %s' %(screen.id, screen))\n for screen in screens:\n if screen.id != main_screen.id:\n window = pyglet.window.Window(fullscreen=True, screen=screen)\n break\n else:\n window = pyglet.window.Window()\n\n #This code has to come AFTER the window is initialized\n #http://www.python-forum.org/pythonforum/viewtopic.php?f=2&t=11160\n # using color tuple (r,g,b,a), a is alpha value\n # rgba values are from 0.0 to 1.0\n r = g = b = float(neutral_gray)/(neutral_gray+max_range)\n bg = (r,g,b,1.0)\n pyglet.gl.glClearColor(*bg)\n\n return window", "title": "" }, { "docid": "bee2eb801ba2398c7061061628fc4ee5", "score": "0.65577954", "text": "def __init__(self):\r\n\r\n # Create a new window and set its properties.\r\n gtk.Window.__init__(self)\r\n self.set_title(_(u\"RTK Matrices & Lists\"))\r\n self.set_resizable(True)\r\n self.set_deletable(False)\r\n self.set_skip_pager_hint(True)\r\n self.set_skip_taskbar_hint(True)\r\n\r\n _n_screens = gtk.gdk.screen_get_default().get_n_monitors()\r\n _width = gtk.gdk.screen_width() / _n_screens\r\n _height = gtk.gdk.screen_height()\r\n\r\n self.set_default_size((_width / 3) - 10, (2 * _height / 7))\r\n self.set_border_width(5)\r\n self.set_position(gtk.WIN_POS_NONE)\r\n self.move((2 * _width / 3), 0)\r\n\r\n self.connect('delete_event', destroy)\r\n\r\n self.show_all()", "title": "" }, { "docid": "e4584deeaba39a3331def291a06285e2", "score": "0.6547795", "text": "def glInit(self):\n self.window = \"\"", "title": "" }, { "docid": "c693fbcbfcc3554c2d8e856f6d7ecc36", "score": "0.6533723", "text": "def setup_window(self):\n self.setWindowIcon(QIcon(\":/icons/grant.ico\"))\n\n self.main_screen = MainScreen(self, self.data_context)\n self.setCentralWidget(self.main_screen)\n\n self.project_manager.project_changed.connect(self.project_changed_handler)\n self.project_manager.project_saved.connect(self.setup_window_title)\n\n def model_changed():\n self.project_manager.needs_saving = True\n self.setup_window_title()\n\n self.data_context.data_model.dataChanged.connect(model_changed)\n self.data_context.data_model.layoutChanged.connect(model_changed)\n self.data_context.data_model.rowsRemoved.connect(model_changed)", "title": "" }, { "docid": "0fd86a73ef62b785ca0da4ca6921c168", "score": "0.64973843", "text": "def __init__(self, parent=None):\n super(GuiWindow, self).__init__(parent)", "title": "" }, { "docid": "9ee91c938182bac23a6de45ecf2bab09", "score": "0.6484493", "text": "def initialize(self):\n self.window.set_title('Moon Lander')\n\n self.over = 0\n self.lander = None\n self.alien = None\n self.alien_drop_object = None\n self.asteroid_drop_object = None\n self.explosion = None\n self.shield_message = None", "title": "" }, { "docid": "87d50b17a9bc82c191643a0575cfa282", "score": "0.6478071", "text": "def windowSetup():\n \n scene.autoscale = false # Don't auto rescale\n scene.background = (1.00,0.76,0.80)\n scene.foreground = color.black\n scene.height = 1200 # height of graphic window in pixels\n scene.width = 1200 # width of graphic window in pixels\n scene.x = 100 # x offset of upper left corner in pixels\n scene.y = 100 # y offset of upper left corner in pixels\n scene.title = 'Twitter Sentiment Trends'", "title": "" }, { "docid": "6e82638cdf150eb0d868f21296484853", "score": "0.6455222", "text": "def create_window(self):\n\n window = pyglet.window.Window(\n width=800,\n height=600,\n resizable=True)\n\n return window", "title": "" }, { "docid": "4ada5ce7291d4dcdef61fd4cfa199f91", "score": "0.6452414", "text": "def init():\n # Set the logical resolution of the window.\n Driftwood.window.resize(240, 240)\n\n # Load the area.\n Driftwood.area.focus(\"grid.json\")\n\n # Run a UI test.\n Driftwood.script[\"test02.py\"].test()", "title": "" }, { "docid": "47aca9a4578ba4edb314a75e1173c86b", "score": "0.6452174", "text": "def __set_window(self):\r\n\r\n global currentDay, currentMonth, currentYear\r\n currentDay = datetime.now().day\r\n currentMonth = datetime.now().month\r\n currentYear = datetime.now().year\r\n\r\n\r\n\r\n # define the window title, icon & geometry\r\n self.setWindowIcon(QIcon('../src/data/icon/potager2.png'))\r\n self.setWindowTitle(self.title)\r\n # geometry = QDesktopWidget().availableGeometry() # adapt to the screen's geometry\r\n # self.setFixedSize(geometry.width(), geometry.height())\r\n self.setFixedSize(1900, 1000)\r\n\r\n self.__b_width = 300 # width of the buttons\r\n self.__b_height = 50 # height of the buttons\r\n self.__b_shift = 50 # shift of the buttons\r\n self.__mb_height = 30 # height of the menu bar\r\n\r\n self.maxw = self.width() - self.__b_width # Max width\r\n self.maxh = self.height() - self.__mb_height - self.__b_height # Max height\r\n self.right_b_width = self.__b_width # Button width\r\n\r\n # Widgets init\r\n self.the_potager()\r\n self.side_vegetables()\r\n self.menu_bar()\r\n self.my_calendar()\r\n self.buttons()\r\n self.resize_and_move()\r\n\r\n self.show()", "title": "" }, { "docid": "4d2e18edb46e8235b8e6c94c889496a0", "score": "0.64512515", "text": "def setUp(self):\n self.caption = \"mirra OSC example\" # window name\n self.size = 640, 480 #window size\n self.pos = 100,100 # window top left location\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\n self.frameRate = 15 # set refresh framerate", "title": "" }, { "docid": "b06b57cdf888241c2b53287fc498bf9f", "score": "0.64467084", "text": "def setUp(self):\n self.caption = \"mirra example BouncingBall class extends Circle\" # window name\n self.size = 640, 480 #window size\n self.pos = 100,100 # window top left location\n self.fullScreen = 0 # if fullScreen is on it will overwrite your pos and size to match the display's resolution\n self.frameRate = 15 # set refresh framerate", "title": "" }, { "docid": "9b5380fcb8a47a00c1d9dcb462e958ae", "score": "0.6407645", "text": "def OnInit(self):\n self.window = gui.MainWindowController(self, None, wx.ID_ANY, \"\")\n self.window.Show()\n return True", "title": "" }, { "docid": "7fc69131e894deeda00040deb8595389", "score": "0.6367861", "text": "def __init__(self, parentWindow=None):", "title": "" }, { "docid": "9916c8951ffc110af2a1e83aea28e0a3", "score": "0.6363622", "text": "def AssignWindow(self, window):", "title": "" }, { "docid": "6ccfd955467a87a0d1c5dcc2417c8e39", "score": "0.6329415", "text": "def setup(**geometry):\n\n global _width, _height, _startx, _starty\n\n width = geometry.get('width',_width)\n if width >= 0 or width == None:\n _width = width\n else:\n raise ValueError, \"width can not be less than 0\"\n\n height = geometry.get('height',_height)\n if height >= 0 or height == None:\n _height = height\n else:\n raise ValueError, \"height can not be less than 0\"\n\n startx = geometry.get('startx', _startx)\n if startx >= 0 or startx == None:\n _startx = startx\n else:\n raise ValueError, \"startx can not be less than 0\"\n\n starty = geometry.get('starty', _starty)\n if starty >= 0 or starty == None:\n _starty = starty\n else:\n raise ValueError, \"startx can not be less than 0\"\n\n\n if _root and _width and _height:\n if 0 < _width <= 1:\n _width = _root.winfo_screenwidth() * +width\n if 0 < _height <= 1:\n _height = _root.winfo_screenheight() * _height\n\n # center window on screen\n if _startx is None:\n _startx = (_root.winfo_screenwidth() - _width) / 2\n\n if _starty is None:\n _starty = (_root.winfo_screenheight() - _height) / 2\n\n _root.geometry(\"%dx%d+%d+%d\" % (_width, _height, _startx, _starty))", "title": "" }, { "docid": "49a8613593222ca490403d1b5fcca7ea", "score": "0.63289297", "text": "def windowSetup():\n \n scene.autoscale = False\n scene.background = (0.8, 1.0, 1.0)\n scene.foreground = color.black\n scene.height = 1000 #height of graphic window in pixels\n scene.width = 1000 #width of graphic window in pixels\n scene.up = (1, 0, 0) #project the window around a vertical line along the x-axis\n scene.forward = (0, 0, 1) #set camera pointing in the +z direction\n scene.center = (35, -100, -40) #adjust center to focus on U.S. map of state extrusions\n scene.lights = [distant_light(direction = (1, 1, 1), color = color.gray(0.1)),\n distant_light(direction=(-1, -1, -1), color = color.gray(0.7))]\n scene.title = 'Twitter Trends'", "title": "" }, { "docid": "b844931824d6ceb0d4706c8bc04bde96", "score": "0.630968", "text": "def initUI(self):\n\n self.resize(450, 750)\n self.setObjectName(\"Main Window\")\n\n # The central widget really doesn't seem to be all that important.\n # But it has to be set if the top-level widget is a QMainWindow,\n # apparently.\n central_widget = QtGui.QWidget(self)\n self.setCentralWidget(central_widget)\n central_widget.setObjectName(\"central widget\")\n\n # The main window has three sections\n # 1) menu bar\n # 2) model configuration tab\n # 3) model output tab\n self._setup_menubar()\n\n tabs = QtGui.QTabWidget()\n\n model_configuration_frame = self._set_model_configuration_frame()\n tabs.addTab(model_configuration_frame, \"Model Configuration\")\n\n model_output_frame = self._setup_model_output_frame()\n tabs.addTab(model_output_frame, \"Model Output\")\n\n layout = QtGui.QVBoxLayout()\n layout.addWidget(tabs)\n central_widget.setLayout(layout)", "title": "" }, { "docid": "c539b70dc02eb8a059cd41159a3dff94", "score": "0.6292387", "text": "def createWindow(self):\n window.createWindow(self._tools)", "title": "" }, { "docid": "67c2b7e12820e8d8edfae0fe6a5fb5a4", "score": "0.6288895", "text": "def __init__(self, windowId=0):", "title": "" }, { "docid": "498e9b3744ba9a1a7d936345feaa81ed", "score": "0.627466", "text": "def init_gui(self):\n pass", "title": "" }, { "docid": "743357586f6115d21d86071d538afddf", "score": "0.62655365", "text": "def init_main_window():\n main_window = curses.newwin(BOARD_HEIGHT + 2, 2 * BOARD_WIDTH + 2, 1, 5)\n main_window.nodelay(True)\n main_window.keypad(1)\n return main_window", "title": "" }, { "docid": "4d9f1eef151f6f072f928462d1d1d5a0", "score": "0.6265464", "text": "def OnInit(self):\n frame = MyFrame()\n frame.Show(True)\n self.SetTopWindow(frame)\n return True", "title": "" }, { "docid": "81c669d665bae63ad1ba37dc66443a7c", "score": "0.62325925", "text": "def _local_init(self):\n self.width_key = 'interface.citation-width'\n self.height_key = 'interface.citation-height'\n \n self.glade = Glade()\n self.set_window(self.glade.toplevel, None, \n self.get_menu_title())\n \n self.share_btn = self.glade.get_object('select_source')\n self.add_del_btn = self.glade.get_object('add_del_source')", "title": "" }, { "docid": "f7907088e4b1dec8f562f5acab00b5fb", "score": "0.6231921", "text": "def __build_window(self):\n from gramps.gui.glade import Glade\n glade_xml = Glade()\n self._window = glade_xml.toplevel\n #self._window.set_transient_for(parent)\n \n # remember active widgets for future use\n self._swin = glade_xml.get_object('swin')\n self._drawing_area = glade_xml.get_object('drawingarea')\n self._first_button = glade_xml.get_object('first')\n self._prev_button = glade_xml.get_object('prev')\n self._next_button = glade_xml.get_object('next')\n self._last_button = glade_xml.get_object('last')\n self._pages_entry = glade_xml.get_object('entry')\n self._pages_label = glade_xml.get_object('label')\n self._zoom_fit_width_button = glade_xml.get_object('zoom_fit_width')\n self._zoom_fit_width_button.set_stock_id('gramps-zoom-fit-width')\n self._zoom_best_fit_button = glade_xml.get_object('zoom_best_fit')\n self._zoom_best_fit_button.set_stock_id('gramps-zoom-best-fit')\n self._zoom_in_button = glade_xml.get_object('zoom_in')\n self._zoom_in_button.set_stock_id('gramps-zoom-in')\n self._zoom_out_button = glade_xml.get_object('zoom_out')\n self._zoom_out_button.set_stock_id('gramps-zoom-out')\n\n # connect the signals\n glade_xml.connect_signals(self)\n self._drawing_area.connect(\"draw\", self.on_drawingarea_draw_event)", "title": "" }, { "docid": "57ac7d62969214b24c4924e01c64c253", "score": "0.62315565", "text": "def __init__(self, win=None):", "title": "" }, { "docid": "57ac7d62969214b24c4924e01c64c253", "score": "0.6229587", "text": "def __init__(self, win=None):", "title": "" }, { "docid": "57ac7d62969214b24c4924e01c64c253", "score": "0.62291276", "text": "def __init__(self, win=None):", "title": "" }, { "docid": "57ac7d62969214b24c4924e01c64c253", "score": "0.62284434", "text": "def __init__(self, win=None):", "title": "" }, { "docid": "eed20ec32411df8107b009b6402b7501", "score": "0.6222341", "text": "def OnInit(self):\n logging.info(\"Starting...\")\n self.start_directory = os.getcwd()\n\n # Create a default active project\n logging.info(\"Create default project\")\n self.projects = {}\n self.projects[\"default\"] = project.Project(self)\n self.activeproject = self.projects[\"default\"]\n self.update_title_text()\n\n if self.gui:\n logging.info(\"Show main frame\")\n # Create and show main frame\n self.frame = tcui.ViewMain(None, self, wx.ID_ANY, \"TileCutter\")\n self.SetTopWindow(self.frame)\n\n logging.debug(\"Bind Quit Event\")\n # Bind quit event\n self.frame.Bind(wx.EVT_CLOSE, self.OnQuit)\n\n logging.debug(\"Init window sizes\")\n # Window inits itself to its minimum size\n if config.window_maximised:\n self.frame.Maximize()\n\n # If a larger size is specified in config, set to this instead\n if config.window_size[0] > self.frame.GetBestSize().GetWidth() and config.window_size[1] > self.frame.GetBestSize().GetHeight():\n self.frame.SetSize(config.window_size)\n else:\n # Otherwise just use the minimum size\n self.frame.Fit()\n\n logging.debug(\"Init window position\")\n # If a window position is saved, place the window there\n if config.window_position != [-1, -1]:\n self.frame.SetPosition(config.window_position)\n else:\n # Otherwise center window on the screen\n self.frame.CentreOnScreen(wx.BOTH)\n else:\n logging.debug(\"Command line mode, not creating GUI\")\n\n logging.debug(\"Completed!\")\n return True", "title": "" }, { "docid": "ce06910c47cfb3682a0bfd3e440e55c9", "score": "0.6199375", "text": "def __init__(self, width, height, title, monitor, share, window):\n self.__window = weakref.ref(window)\n # for singular `glfw` initiation\n ContextMaster.checkinit_context()\n # have a meta node grouping shared contexts\n self.__meta_context = MetaContext.checkbuild_meta(self, share)\n if share:\n share = share.context.glfw_window\n # store `glfw`'s window object for `glfw.make_context_current()`\n self.__glfw_context = GLFWContext(width, height, title, monitor, share)\n self.__renderer_context = OGLSubContext(self)\n\n self.__init_local_setting()", "title": "" }, { "docid": "5a77e6b25356cdf4e4d6e608b37dc2a8", "score": "0.619925", "text": "def initialize_components(self):\n self.geometry(\"650x400\")\n self.maxsize(self.width, self.height)\n self.minsize(self.width, self.height)\n self.title(\"Sign Up-Chat Room\")\n self.config(bg=\"grey\")", "title": "" }, { "docid": "41e97884d5180e1384cdcd68cc6c97bc", "score": "0.6198376", "text": "def __init__(self):\n super().__init__()\n self.sub = i3.Subscription(self.windowHandler, 'window', 'focus', daemon=True)\n\n #TODO add a call to i3 to get currently focused window", "title": "" }, { "docid": "a1becce3ab2dc5ad1c5ffda634004d26", "score": "0.6188054", "text": "def __initUI(self):\n self.__createMainMenu()\n self.__createGroupContextMenu()\n self.__createToolbar()\n self.__createLayout()\n self.setGeometry(200, 0, 1000, 800)\n self.setWindowTitle('EasyLeague')\n self.show()", "title": "" }, { "docid": "c99e9876185d15e483f18e73f72cf4a8", "score": "0.61846966", "text": "def __init__(self, window=None, doNow=True):", "title": "" }, { "docid": "7513a9a5399d1e4d61ccff2f3011362e", "score": "0.6184247", "text": "def __init__(self):\n self.database = Database()\n self.bg_color = '#dbe6f0'\n self.config_window()\n self.set_frames()", "title": "" }, { "docid": "057a6094e4036c6ab6c317a9503608dc", "score": "0.6159391", "text": "def __init__(self):\n\t\t# Initialise panda window\n\t\tShowBase.__init__(self)\n\t\tself.setBackgroundColor(.1, .1, .1)\n\n\t\t# Setup window\n\t\twp = WindowProperties()\n\t\twp.setOrigin(0, 0)\n\t\twp.setSize(self.PANDA_WINDOW_WIDTH, self.PANDA_WINDOW_HEIGHT)\n\n\t\t# Get drawing area and set its size\n\t\tpanda_drawing_area = builder.get_object(\"pandaDrawingArea\")\n\t\tpanda_drawing_area.set_size_request(self.PANDA_WINDOW_WIDTH, self.PANDA_WINDOW_HEIGHT)\n\n\t\t# Panda should not open own top level window but use the window of the drawing area in GTK\n\t\thandle = NativeWindowHandle.makeInt(panda_drawing_area.get_property('window').get_xid())\n\t\twp.setParentWindow(handle)\n\n\t\t# Open panda window\n\t\tself.openDefaultWindow(props=wp)\n\n\t\tdef gtk_iteration(task):\n\t\t\t\"\"\"\n\t\t\tHandles the gtk events and lets as many GUI iterations run as needed.\n\t\t\t\"\"\"\n\t\t\twhile Gtk.events_pending():\n\t\t\t\tGtk.main_iteration_do(False)\n\t\t\treturn task.cont\n\n\t\t# Create task to update GUI\n\t\tself.taskMgr.add(gtk_iteration, \"gtk\")\n\n\t\t# Activate antialiasing (MAuto for automatic selection of AA form)\n\t\tself.render.setAntialias(AntialiasAttrib.MAuto)\n\n\t\t# Deactivate default mouse control of the camera as they are not very helpful\n\t\tself.disableMouse()\n\n\t\t# Set camera to default position and orientation\n\t\tself.camera.setPos(0, -4, 2)\n\t\tself.camera.lookAt(0, 0, 1)\n\t\tself.camLens.setFov(90)\n\n\t\t# Load the camera control events to control camera by keyboard\n\t\tself.cam_control = CameraControl(self, handler)\n\t\t# Store it as a class variable of the Handler so the controller can be called by it\n\t\tHandler.cam_control = self.cam_control\n\n\t\t# Load scene\n\t\tself.scene = self.loader.loadModel(\"models/rooms/room_neu.egg\")\n\t\tself.scene.reparentTo(self.render) # Panda3D makes use of a scene graph, where \"render\" is the parent of the\n\t\t# tree containing all objects to be rendered\n\n\t\t# Add lights\n\t\t# TODO: Make the lighting somewhat nicer (maybe with some ambient lights?)\n\t\tfor i in range(0, 3):\n\t\t\tdlight = DirectionalLight(\"light\")\n\t\t\tdlnp = self.render.attachNewNode(dlight)\n\t\t\tdlnp.setHpr((120 * i) + 1, -30, 0)\n\t\t\tself.render.setLight(dlnp)\n\n\t\t# Create a bullet world (physics engine)\n\t\tself.world = BulletWorld()\n\t\t# self.world.setGravity(LVector3f(0, 0, -9.81))\n\t\tself.world.setGravity(LVector3f(0, 0, 0)) # No gravity for now (makes forces easier to calculate)\n\n\t\tdef update_bullet(task):\n\t\t\t\"\"\"\n\t\t\tInvokes the physics engine to update and simulate the next step.\n\t\t\t\"\"\"\n\t\t\tdt = globalClock.getDt() # get elapsed time\n\t\t\tself.world.doPhysics(dt) # actually update\n\t\t\treturn task.cont\n\n\t\t# Create task to update physics\n\t\tself.taskMgr.add(update_bullet, 'update_bullet')\n\n\t\t# Set up the ground for the physics engine\n\t\tground_shape = BulletPlaneShape(LVector3f(0, 0, 1), 0) # create a collision shape\n\t\tground_node_bullet = BulletRigidBodyNode('Ground') # create rigid body\n\t\tground_node_bullet.addShape(ground_shape) # add shape to it\n\n\t\tground_node_panda = self.render.attachNewNode(ground_node_bullet) # attach to panda scene graph\n\t\tground_node_panda.setPos(0, 0, 0) # set position\n\n\t\tself.world.attachRigidBody(ground_node_bullet) # attach to physics world\n\n\t\t# Create and activate a debug node for bullet and attach it to the panda scene graph\n\t\tdebug_node_bullet = BulletDebugNode('Debug')\n\t\tdebug_node_bullet.showWireframe(True)\n\t\tdebug_node_bullet.showConstraints(True)\n\t\tdebug_node_bullet.showBoundingBoxes(True)\n\t\tdebug_node_bullet.showNormals(True)\n\t\tdebug_node_panda = self.render.attachNewNode(debug_node_bullet)\n\t\t# debug_node_panda.show()\n\t\tself.world.setDebugNode(debug_node_panda.node())\n\t\t# Store it as a class variable of the Handler so the debug mode can be switched by it\n\t\tHandler.bullet_debug_node = debug_node_panda\n\n\t\t# Load the class to manage the drones\n\t\tself.drone_manager = DroneManager(self)\n\t\t# Store it as a class variable of the Handler changes can be invoked\n\t\tHandler.drone_manager = self.drone_manager", "title": "" }, { "docid": "bf55f9f177e0135632e2590759eb6edc", "score": "0.61498386", "text": "def initialize_screen(self):\n\n\n self.master.title(\"DAME\")\n self.create_visualization_selector()\n self.create_quit_button()\n self.create_canvas()\n self.create_open_file_button()\n self.create_x_column_selector()\n self.create_y_column_selector()\n self.create_column_selector_label()\n self.create_show_visualization_button()\n self.create_linear_regression_button()\n self.create_same_columns_warning_label()\n self.create_plot_color_selector()", "title": "" }, { "docid": "04436b8b69e012437fc44c53ada56bba", "score": "0.61465305", "text": "def __init__(self, window_width, window_height):\n self.window_width = window_width\n self.window_height = window_height\n self.current_frame = 0\n self.wins = (0, 0)\n self.reset()", "title": "" }, { "docid": "cdf51cca44dfdb1171930c5a6771eecb", "score": "0.6143801", "text": "def display(self):\n self.window_init()\n self.layer1Frame()", "title": "" }, { "docid": "298c37d78c182607c05af5643539ce51", "score": "0.6143486", "text": "def __init__(self, pos=(0, 0), size=(0, 0), mode=None):\n BaseWindowHandler.__init__(self, pos=pos, size=size, mode=mode)\n # Tests\n if DEBUG_WM:\n print \"#\" * 72\n print \"WWindowHandler.__init__\"\n print \"Desktop environment:\", desktop_environment\n for item in dir(win32con):\n if 'maxim' in item.lower() or 'minim' in item.lower() or 'full' in item.lower():\n print item, getattr(win32con, item)\n self.base_handler = display\n self.base_handler_id = display.get_wm_info()['window']", "title": "" }, { "docid": "8540caeb971cb949fdaf693b699a95ba", "score": "0.6128782", "text": "def newBrowserInit(self, mw):\n QMainWindow.__init__(self, None, Qt.Window)", "title": "" }, { "docid": "227d58784442c488d94e2cd22122bff0", "score": "0.61237466", "text": "def server_config(self):\n\n global config_window\n # initializing window with current parameters\n config_window = ConfigWindow(self.config)", "title": "" }, { "docid": "b65271b0cb157e951c151d962bfbb012", "score": "0.61203223", "text": "def initialize_window(stdscr):\n # Ensures a clean visual space.\n stdscr.clear()\n curs_set(False)\n\n # Set the background of the app to the secondary color.\n stdscr.bkgd(' ', color_pair(1))\n stdscr.refresh()", "title": "" }, { "docid": "8489942c51bda31376183adfc21906db", "score": "0.61197877", "text": "def __init__(self,name=\"Unnamed\",size=None,text_font=\"monospace\",text_size=50,text_color=WHITE,background_color=BLACK,fullscreen=False,build=True):\n Window.made+=1\n self.number=Window.made\n self.name=name\n self.text_font=text_font\n self.text_size=text_size\n self.text_color=text_color\n self.background_color=background_color\n self.fullscreen=fullscreen\n self.set()\n self.log(\"Window has been created.\")\n if build:\n self.build(size)", "title": "" }, { "docid": "44266847beaa70d2d4393749553c29e0", "score": "0.6105558", "text": "def setupWindows(self):\n cv2.namedWindow(\"Image\")\n cv2.namedWindow(\"Map\")\n cv2.moveWindow(\"Image\", 30, 50)\n cv2.moveWindow(\"Map\", 700, 50)", "title": "" }, { "docid": "12dd425201db2c304436bb1f5a27f170", "score": "0.6102763", "text": "def init_window():\n window = tk.Tk()\n window.title(\"人脸识别系统\")\n label_1 = tk.Label(text=\"南京邮电大学人脸识别系统\", font=('Arial', 20), width=50, height=10)\n label_1.pack()\n width = 1080\n height = 900\n screenwidth = window.winfo_screenwidth()\n screenheight = window.winfo_screenheight()\n alighstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)\n window.geometry(alighstr)\n canva = tk.Canvas(window, width=600, height=335)\n return window, canva", "title": "" }, { "docid": "52e343e47cc745294c0ec7052bd3e9d8", "score": "0.61023706", "text": "def initialize(self):\n # set first page of notebook tabs - meanwhile for some historic reason\n self.notebook.set_current_page([\"Servers\", \"Display\", \"Filters\", \"Actions\",\\\n \"Notification\", \"Colors\", \"Defaults\"].index(self.first_page))\n\n # store fullscreen state to avoid innecessary popwin flickering\n self.saved_fullscreen_state = str(self.conf.fullscreen)\n\n # toggle regexp options\n self.ToggleREHostOptions()\n self.ToggleREServiceOptions()\n self.ToggleREStatusInformationOptions()\n\n # care about Centreon criticality filter\n self.ToggleRECriticalityFilter()\n self.ToggleRECriticalityOptions()\n\n # toggle debug options\n self.ToggleDebugOptions()\n\n # toggle sounds options\n self.ToggleSoundOptions()\n self.ToggleCustomSoundOptions()\n\n # toggle icon in systray popup offset\n self.ToggleSystrayPopupOffset()\n\n # toggle fullscreen display selection combobox\n self.ToggleFullscreenDisplay()\n\n # toggle notification action options\n self.ToggleNotificationActions()\n self.ToggleNotificationActionWarning()\n self.ToggleNotificationActionCritical()\n self.ToggleNotificationActionDown()\n self.ToggleNotificationActionOk()\n self.ToggleSystemKeyring()", "title": "" }, { "docid": "62cba23807e843cd1679520d8a43942e", "score": "0.61004114", "text": "def runWindow(self):\n\n\t\tself.focus_set()\n\t\tself.grab_set()\n\t\tself.transient(self.master)\n\t\tself.resizable(width=False,height=False)", "title": "" }, { "docid": "2c9bde3bb634812e791b23bdc5805421", "score": "0.6097556", "text": "def glCreateWindow(self, width, height):\n self.window = Bitmap(width, height)", "title": "" }, { "docid": "68b7839a4702788cc56b1a702bd56b14", "score": "0.6088962", "text": "def __init__(self):\n w = MainWindow()\n login = LoginWindow()\n login.authenticate(w)", "title": "" }, { "docid": "cc0c6d00525aebac2c001767f2cac5f0", "score": "0.6088848", "text": "def setUp(self):\n self.window = Window()\n self.turtle = Turtle(self.window)\n self.turtle.speed = 4\n self.turtle.color = \"red\"", "title": "" }, { "docid": "641487d509981ad2c03e6ad184c26314", "score": "0.6068623", "text": "def __init__(self, windowed: bool) -> None:\n self._display = Display(windowed)\n self._images = SpriteImages.load_images(self._display)\n self._sounds = Sounds.load_sounds()", "title": "" }, { "docid": "570ff297834fa077c41d98b05bbc5f67", "score": "0.6067836", "text": "def setup(self): # run firts\n self.create_widgets() # create widget first\n self.setup_layout() # setup widget inside layouts, and initialize values", "title": "" }, { "docid": "fa4de812bfeefe308239fa921ed8ea09", "score": "0.6064214", "text": "def __init__(self, MainWindow):\n self.parent = MainWindow", "title": "" }, { "docid": "a96488fa75c600e317c1cb92fedc4fc3", "score": "0.60493606", "text": "def init_ui(self):\r\n self.setWindowTitle(self.title)\r\n self.setGeometry(self.left_, self.top_, self.width_, self.height_)\r\n\r\n \"\"\" Setting window to frameless \"\"\"\r\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\r\n\r\n # self.setWindowFlags(QtCore.Qt.CustomizeWindowHint | # Allow for customizing buttons\r\n # QtCore.Qt.WindowCloseButtonHint | # Add close button\r\n # QtCore.Qt.WindowMaximizeButtonHint | # Add maximize button\r\n # QtCore.Qt.WindowMinimizeButtonHint) # Add minimize button\r\n\r\n \"\"\" Maximize window and brings it to the front on secondary display, or to main one if not available \"\"\"\r\n desktop = QtWidgets.QApplication.desktop()\r\n screen = desktop.screenGeometry(1)\r\n if not screen.width(): # If secondary display is not found, width() == 0\r\n screen = desktop.screenGeometry(0)\r\n self.left_ = screen.left()\r\n self.top_ = screen.top()\r\n self.width_ = screen.width()\r\n self.height_ = screen.height()\r\n self.light_radius = 0.13 * self.width_ # ~250px on 1920x1080 display\r\n self.top_label_radius = 0.117 * self.width_ # ~225px on 1920x1080 display\r\n self.setGeometry(self.left_, self.top_, self.width_, self.height_)\r\n\r\n # self.open_maximized()\r\n # self.close()\r\n\r\n \"\"\" Setting background color \"\"\"\r\n background_color = QtCore.Qt.black\r\n self.set_background_color(background_color)\r\n\r\n \"\"\" Setting up light grid \"\"\"\r\n self.lights = []\r\n self.labels_on_top = []\r\n self.setup_grid()\r\n\r\n \"\"\" Ensuring visibility for all lights \"\"\"\r\n for light in self.lights:\r\n light.setVisible(True)", "title": "" }, { "docid": "6cda9a8e86a71922d57c90a8dc9a16eb", "score": "0.60475546", "text": "def initializeGUI(self):\n self.app = QtGui.QApplication(sys.argv)\n self.imageDisplay = ImageDisplay(self.args[\"fStart\"],\n self.args[\"fEnd\"],\n self.args[\"fInterval\"],\n self.args[\"fSpeedFactor\"],\n self.imageHandler)", "title": "" }, { "docid": "a5ca7d9cc8cb9f2e3fcebd4815cad331", "score": "0.60471576", "text": "def initGui(self) -> None:", "title": "" }, { "docid": "c5c5ae2eb1cc9b9ce140f2db3339fa53", "score": "0.60385096", "text": "def new(self):\n self._writeWindowAttributeSettings()\n p = mp.Process(target=main, args=())\n p.start()", "title": "" }, { "docid": "74246254216e2de3a3dc0aa9b73e0775", "score": "0.602996", "text": "def XPLMCreateWindow(self, inLeft, inTop, inRight, inBottom, inIsVisible,\n inDrawCallback, inKeyCallback, inMouseCallback, inRefcon):\n pass", "title": "" }, { "docid": "4037d589e34af8a1d169a9b7e8076ffb", "score": "0.60286355", "text": "def __init__(self, *args, **kwargs):\n Frame.__init__(self, *args, **kwargs)\n\n self.pack()\n root.title(\"ChatBot GUI\")\n self.configure(background='grey')\n self.make_widgets()", "title": "" }, { "docid": "675edeaf68002b577128b80fa6317e5b", "score": "0.6008984", "text": "def window_settings(\n self,\n title='Firefly',\n annotation=None,\n showfps=False,\n **extra):\n\n self.__window_settings = {\n 'title':title, \n ## used in the web app to check if the settings have been read in\n ## this should not be modified\n 'loaded':True, \n 'annotation':annotation ,\n 'showfps':showfps\n }", "title": "" }, { "docid": "f8999a2e2ede3ef6e8e7809b8247183d", "score": "0.60070515", "text": "def config_window(self):\n self.window = tk.Tk()\n self.window.title('ChangeYourFood')\n self.window.geometry('1080x720')\n self.window.minsize(480, 360)\n self.window.config(bg=self.bg_color)\n menu_bar = tk.Menu(self.window)\n file_menu = tk.Menu(menu_bar, tearoff=0)\n file_menu.add_command(label=\"Quitter\", command=self.window.quit)\n file_menu.add_command(label=\"Importer des produits\",\n command=self.database.insert_products_from_api)\n menu_bar.add_cascade(label=\"Fichier\", menu=file_menu)\n self.window.config(menu=menu_bar)", "title": "" }, { "docid": "086b0b82660b690d17f8c259dcee3c9d", "score": "0.6002623", "text": "def __init__(self):\n pygame.init()\n flags = RESIZABLE\n App.screen = pygame.display.set_mode((640, 240), flags)\n App.t = Text('Pygame App', pos=(20, 20))\n\n App.running = True", "title": "" }, { "docid": "1a4bb5b99982d8e2c4302f9fc8c078ce", "score": "0.59964746", "text": "def __init__(self, name, w, h):\r\n self.exit = 0\r\n\r\n # One currently loaded map at any one time\r\n self.map = None\r\n\r\n self.init_ents()\r\n\r\n self.fov_map = True\r\n\r\n # Initialise default windows with None.\r\n self.win_man = graphics.WindowManager(w,h,name)\r\n self.game_win = None\r\n self.inv_win = None\r\n self.msg_win = None\r\n\r\n # Initialise colors.\r\n self.change_color_scheme((0,0,0), (255,255,255), (0,0,0), (255,255,255), (125, 125, 125))\r\n self.create_windows()\r\n\r\n # Initialise keyboard interface.\r\n self.keyboard = interface.KeyboardListener()\r\n\r\n # Initialise menus.\r\n self.menu_stack = [ ]", "title": "" }, { "docid": "53ea6be7b25b69d90c2a0587ec5dc94e", "score": "0.5965906", "text": "def setup(self):\n\n window_size = self.get_size()\n self.game_instance = GameInstance(self)\n\n # Set the background color\n # arcade.set_background_color(arcade.color.AMAZON)", "title": "" }, { "docid": "e8cdf946fe970e48343f8b7557725e20", "score": "0.59520257", "text": "def _init_widgets(self):\n window = QMainWindow()\n window.setWindowFlags(Qt.Widget)\n\n status_bar = QFrame()\n status_lyt = QHBoxLayout()\n status_lyt.setContentsMargins(0, 0, 0, 0)\n\n self._status_lbl = QLabel()\n self._status_lbl.setText('Address: ')\n\n status_lyt.addWidget(self._status_lbl)\n status_lyt.addStretch(0)\n\n option_btn = QPushButton()\n option_btn.setText('Options')\n option_mnu = QMenu(self)\n smart_hl_act = QAction('Smart &highlighting', self)\n smart_hl_act.setCheckable(True)\n smart_hl_act.setChecked(self.smart_highlighting_enabled)\n smart_hl_act.toggled.connect(self.set_smart_highlighting_enabled)\n option_mnu.addAction(smart_hl_act)\n option_btn.setMenu(option_mnu)\n status_lyt.addWidget(option_btn)\n\n status_bar.setLayout(status_lyt)\n\n self.inner_widget = HexGraphicsView(parent=self)\n lyt = QVBoxLayout()\n lyt.addWidget(self.inner_widget)\n lyt.addWidget(status_bar)\n self.setLayout(lyt)\n\n self._widgets_initialized = True", "title": "" }, { "docid": "a980c67a03d339b13db9a90826c43421", "score": "0.59460735", "text": "def setup(self):\n self.create_widgets()\n self.setup_layout()", "title": "" }, { "docid": "3c563e0776ab8d25da871115f7722081", "score": "0.5936763", "text": "def __init__(self):\n\n self.location2 = coin.SoLocation2Event()\n self.mousebutton = coin.SoMouseButtonEvent()\n self.windowsize = coin.SbVec2s(-1, -1)", "title": "" }, { "docid": "59e0209da8fa8b1f303531816e6128db", "score": "0.59367514", "text": "def configure_window(self):\n\n\t\tself.title(\"TicTacStayOverThere\")\n\t\tself.configure(bg=BACKGROUND_SCREEN)\n\t\ticon_path = Path(os.getcwd(), \"res\", \"icon.png\")\n\t\ticon = tk.PhotoImage(file=icon_path)\n\t\tself.iconphoto(False, icon)\n\t\tself.bind_keys()", "title": "" } ]
94860492140c86be3dbeae61b5dcf0cb
Event handler which pokes the language after traversing and authentication is done, but before rendering. Normally language negotiation happens in LanguageTool.setLanguageBindings() but this is before we have found request["PUBLISHED"] and we know if we are an editor or not.
[ { "docid": "616a2ce3e989268bff0c812166c9a9c9", "score": "0.67999476", "text": "def admin_language_negotiator(event):\n\n request = event.request\n\n lang = get_editor_language(request)\n\n if lang:\n # Kill it with fire\n request[\"LANGUAGE\"] = lang\n tool = request[\"LANGUAGE_TOOL\"]\n tool.LANGUAGE = lang\n tool.LANGUAGE_LIST = [lang]\n tool.DEFAULT_LANGUAGE = lang", "title": "" } ]
[ { "docid": "a6377d501e4c8d04e631c1f22eea8aea", "score": "0.5977694", "text": "def lang(update, context):\r\n\r\n query = update.callback_query\r\n query.answer() # according to telegram api, all queries must be answered\r\n\r\n if query.data == \"en\":\r\n query.edit_message_text(text=\"language set to English\")\r\n context.user_data[\"lang\"] = \"en\"\r\n elif query.data == \"pe\":\r\n query.edit_message_text(text=\"زبان فارسی انتخاب شد\")\r\n context.user_data[\"lang\"] = \"pe\"\r\n\r\n start(update, context) # this way the user sees the start message after choosing language\r", "title": "" }, { "docid": "4c833b54844fa166497af2be0b5b0045", "score": "0.5956164", "text": "def set_language(request):\n next = request.POST.get('next', request.GET.get('next'))\n if ((next or not request.is_ajax()) and\n not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure())):\n next = request.META.get('HTTP_REFERER')\n next = next and unquote(next) # HTTP_REFERER may be encoded.\n if not is_safe_url(url=next, allowed_hosts={request.get_host()}, require_https=request.is_secure()):\n next = '/'\n response = HttpResponseRedirect(next) if next else HttpResponse(status=204)\n if request.method == 'POST':\n lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER)\n if lang_code and check_for_language(lang_code):\n if next:\n next_trans = translate_url(next, lang_code)\n if next_trans != next:\n response = HttpResponseRedirect(next_trans)\n if hasattr(request, 'session'):\n request.session[LANGUAGE_SESSION_KEY] = lang_code\n response.set_cookie(\n settings.LANGUAGE_COOKIE_NAME, lang_code,\n max_age=settings.LANGUAGE_COOKIE_AGE,\n path=settings.LANGUAGE_COOKIE_PATH,\n domain=settings.LANGUAGE_COOKIE_DOMAIN,\n )\n return response", "title": "" }, { "docid": "1138cd219ee64b4bf0f138fd3b1a2e95", "score": "0.5917116", "text": "def __call__(self, request):\n # Code to be executed for each request before the view (and later middleware) are called.\n if settings.LANGUAGE_COOKIE_NAME in request.COOKIES:\n language = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n # translation.activate(language)\n # request.LANGUAGE_CODE = translation.get_language()\n else:\n language = 'en'\n\n translation.activate(language)\n request.LANGUAGE_CODE = translation.get_language()\n\n response = self.get_response(request)\n # Code to be executed for each request/response after the view is called.\n \"\"\"\n Create cookie if not there already.\n\n Also deactivates language.\n (See http://stackoverflow.com/a/13031239/388835 )\n \"\"\"\n if settings.LANGUAGE_COOKIE_NAME not in request.COOKIES:\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, 'en-gb')\n translation.deactivate()\n return response", "title": "" }, { "docid": "95f0c19b11231e1e70bc82b36428dc38", "score": "0.5670059", "text": "def langChanged(self, text):\n self.conf.setLanguage(text)\n self.app.removeTranslator(self.translator.getGeneric())\n self.app.removeTranslator(self.translator.getDFF())\n self.translator.loadLanguage()\n self.app.installTranslator(self.translator.getGeneric())\n self.app.installTranslator(self.translator.getDFF())", "title": "" }, { "docid": "b26d6a98d9386ada9f029c254745acb6", "score": "0.56235003", "text": "def set_language(request):\r\n next = request.REQUEST.get('next', None)\r\n if not next:\r\n next = request.META.get('HTTP_REFERER', None)\r\n if not next:\r\n next = '/'\r\n response = http.HttpResponseRedirect(next)\r\n if request.method == 'POST':\r\n lang_code = request.POST.get('language', None)\r\n if lang_code and check_for_language(lang_code):\r\n if hasattr(request, 'session'):\r\n request.session['django_language'] = lang_code\r\n else:\r\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)\r\n return response", "title": "" }, { "docid": "91b212d69785fdd3c24e75940659c8e1", "score": "0.5620841", "text": "def test_setlang_performs_redirect_for_ajax_if_explicitly_requested(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code, 'next': '/'}\n response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertRedirects(response, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "34bffe89ffb1bb3b1d921ff9ee3e3c73", "score": "0.56127983", "text": "def OnSelection(self, e):\n # Set active translation to the one specified\n logging.info(\"User selected language: %s\" % gt.longname_to_name(self.language_picker.GetValue()))\n\n gt.set_active_translation(gt.longname_to_name(self.language_picker.GetValue()))\n # Call own translate function\n self.translate()\n self.app.frame.translate()", "title": "" }, { "docid": "a5fd6a9988c004f0f685fd550fbee4a7", "score": "0.5590465", "text": "def set_language(request):\n next = request.REQUEST.get('next')\n if not is_safe_url(url=next, host=request.get_host()):\n next = request.META.get('HTTP_REFERER')\n next = strip_language_code(next)\n if not is_safe_url(url=next, host=request.get_host()):\n next = '/'\n response = http.HttpResponseRedirect(next)\n if request.method == 'POST':\n lang_code = request.POST.get('language', None)\n if lang_code and check_for_language(lang_code):\n if hasattr(request, 'session'):\n request.session['django_language'] = lang_code\n else:\n response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)\n return response", "title": "" }, { "docid": "bba2b5c756247c571af98f4f22f7a567", "score": "0.5589956", "text": "def test_setlang_unsafe_next_for_ajax(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}\n response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.url, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "a3ea8cab862447fe6cb08aafc1f1e4ee", "score": "0.5523617", "text": "def change_lang(self, event=None):\n lang = self.lang.get()\n friendly_traceback.set_lang(lang)\n demo_lang.install(lang)\n self.set_ui_lang()", "title": "" }, { "docid": "dcd205688bf3902df810c8e5f1fae0c1", "score": "0.55192673", "text": "def userLanguage(self, userLanguage):\n pass", "title": "" }, { "docid": "6b56439070576dab40fad96ec4eab461", "score": "0.5469554", "text": "def UpdateLangChoice(self):\n if len(self.__annparam.get_langlist()) > 0:\n lang = self.__annparam.get_lang()\n if lang is None or len(lang) == 0:\n lang = LANG_NONE\n choice = self.FindWindow(\"lang_choice\")\n choice.SetSelection(choice.GetItems().index(lang))\n choice.Refresh()", "title": "" }, { "docid": "1bdc0d053da5b21d086fe75414bd9234", "score": "0.54316133", "text": "def get_editor_language(request):\n\n context = find_context(request)\n\n # Filter out CSS and other non-sense\n # IFolderish check includes site root\n if not (IContentish.providedBy(context) or IFolderish.providedBy(context)):\n # Early terminate\n return None\n\n # Check if we are the editor\n if not getSecurityManager().checkPermission(permissions.ModifyPortalContent, context):\n # Anon visitor, normal language ->\n return None\n\n return FORCED_LANGUAGE", "title": "" }, { "docid": "fe90d19feaf22eeb014762fbc1bdb829", "score": "0.54222184", "text": "def test_setlang_doesnt_perform_a_redirect_to_referer_for_ajax(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code}\n headers = {'HTTP_REFERER': '/', 'HTTP_X_REQUESTED_WITH': 'XMLHttpRequest'}\n response = self.client.post('/i18n/setlang/', post_data, **headers)\n self.assertEqual(response.status_code, 204)\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "d8f88239fa06680b63e10fc32939402d", "score": "0.54125404", "text": "def test_setlang_doesnt_perform_a_default_redirect_for_ajax(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code}\n response = self.client.post('/i18n/setlang/', post_data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n self.assertEqual(response.status_code, 204)\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "21a5a1b30488a260725bcc599b5bfa61", "score": "0.53964055", "text": "def _switch_language(self, lang):\n data = {\"learning_language\": lang}\n url = \"https://www.duolingo.com/switch_language\"\n request = self._make_req(url, data)\n\n try:\n parse = request.json()['tracking_properties']\n if parse['learning_language'] == lang:\n self.user_data = Struct(**self._get_data())\n except:\n raise Exception('Failed to switch language')", "title": "" }, { "docid": "7f034792c174cf5395e43e450e0d0df4", "score": "0.5366342", "text": "def test_setlang_unsafe_next(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code, 'next': '//unsafe/redirection/'}\n response = self.client.post('/i18n/setlang/', data=post_data)\n self.assertEqual(response.url, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "e54fa7a88727551a591275c4492caf60", "score": "0.53114825", "text": "def set_language(request, lang_code):\n request.method = 'POST'\n request.POST = {'language': lang_code}\n django.views.i18n.set_language(request)\n return django.http.HttpResponse(lang_code, content_type=\"text/plain\")", "title": "" }, { "docid": "466d737cce0a9e0bb5902729035fde7c", "score": "0.5296142", "text": "def show_language(q):\n if q.startswith('Q'):\n return render_template(\"language.html\", q=q)\n else:\n q = q.split('-')[-1]\n return redirect(url_for('app.show_language', q=q), code=302)", "title": "" }, { "docid": "2e0e40bd8c45d8ee112d622e1d98ffa4", "score": "0.5279068", "text": "def test_setlang(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code, 'next': '/'}\n response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i_should_not_be_used/')\n self.assertRedirects(response, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)\n # The language is set in a cookie.\n language_cookie = self.client.cookies[settings.LANGUAGE_COOKIE_NAME]\n self.assertEqual(language_cookie.value, lang_code)\n self.assertEqual(language_cookie['domain'], '')\n self.assertEqual(language_cookie['path'], '/')\n self.assertEqual(language_cookie['max-age'], '')", "title": "" }, { "docid": "2481cf95f0c9bb69de3e85c19f663128", "score": "0.52630293", "text": "async def setlanguage(self, ctx):\n languages = {\n \"gb\": \"English\",\n \"es\": \"Español\",\n \"fr\": \"Français\",\n \"de\": \"Deutsch\",\n \"ru\": \"русский\",\n \"ua\": \"Українська\",\n \"nl\": \"Nederlands\",\n \"pt\": \"Português\"\n }\n supported = \"**Already supported:**\"\n for language in self.bot.cah_packs:\n supported += f\"\\n:flag_{language}: {languages.get(language, 'Unknown')}\"\n soon = \"||**Coming Soon:**\"\n for language, name in languages.items():\n if language not in self.bot.cah_packs:\n soon += f\"\\n:flag_{language}: {name}\"\n language = self.languages.read_key(ctx.guild.id) if ctx.guild else None\n title = f\"{self.bot.emotes['choice']} All available languages:\"\n if language is not None:\n title += f\" (You currently have your language set to :flag_{language}:)\"\n if ctx.channel.permissions_for(ctx.author).manage_guild:\n menu = input.Menu(\n self.bot,\n callbacks=False\n ) # Create our reaction menu\n for language in self.bot.cah_packs:\n menu.add(flag.flag(language))\n msg = await ctx.send(\n supported + \"\\n\\n\" + soon + \"||\\n\\n*Select a flag below (or say it in chat) to set it as the default \"\n \"language for this server*\",\n title=title\n )\n try:\n emote = flag.dflagize(\n await menu(\n msg,\n ctx.author\n )\n )[1:-1].lower()\n self.languages.save_key(ctx.guild.id, emote)\n await ctx.send(\n \"We've successfully changed your language\",\n title=f\":flag_{emote}: Language changed\"\n )\n except asyncio.TimeoutError:\n pass\n finally:\n with contextlib.suppress(discord.NotFound):\n await msg.delete()\n else:\n await ctx.send(\n supported + \"\\n\\n\" + soon + \"||\",\n title=title\n )", "title": "" }, { "docid": "31c6a153df630d2604df06d04f6bd05e", "score": "0.5179484", "text": "def switch_language(self, lang, check=True):\n langs = {\"en\": \"English\",\n \"ru\": u\"Русский\"}\n\n self.app.page_index.footer.scroll_to()\n self.app.page_index.footer.combobox_lang.value = langs[lang]\n\n if check:\n expects = {\"en\": \"Login\",\n \"ru\": u\"Войти\"}\n\n check_that(\n lambda: self.app.page_index.button_login.is_present and\n self.app.page_index.button_login.value,\n returns(equal_to(expects[lang]), timeout=config.PAGE_TIMEOUT),\n \"text on login button is changed to language: \" + langs[lang])", "title": "" }, { "docid": "6d2167566e35039a6fd2e10300aca8b3", "score": "0.51657957", "text": "def test_setlang_redirect_to_referer(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code}\n response = self.client.post('/i18n/setlang/', post_data, HTTP_REFERER='/i18n/')\n self.assertRedirects(response, '/i18n/', fetch_redirect_response=False)\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "af10021c0b054584d01ed793749e6799", "score": "0.5155347", "text": "def set_language(self):\n self.parent.set_language()", "title": "" }, { "docid": "9d175e46b77543b0fd5b5dee0dd59ff4", "score": "0.51507175", "text": "def check(self):\n response = self.model.detectLanguage(self.mainView.inputField.toPlainText())\n\n if response == \"error\":\n self.mainView.outputField.setHtml(\"No connection could be established to the language service, \"\n \"are you sure the IP address and/or port is correct?\")\n else:\n reliable = \"yes\" if response[\"reliable\"] else \"no\"\n self.mainView.outputField.setHtml(\"Reliable: <b>%s</b><br/>\"\n \"Language: <b>%s</b><br/>\"\n \"Probability: <b>%i%%</b>\"\n % (reliable, response[\"language\"], response[\"prob\"]))", "title": "" }, { "docid": "cff86cff2835c6b46bc30e9d07e69932", "score": "0.5133239", "text": "def add_language(request):\n if request.authenticated_userid is None:\n raise exc.HTTPNotFound()\n\n name = request.matchdict[\"language\"]\n public_group_id = request.matchdict[\"public_group_id\"]\n\n language = models.Language.get_by_name(name)\n\n if not language:\n language = models.Language(name=name)\n request.db.add(language)\n # We need to flush the db session here so that language.id will be generated.\n request.db.flush()\n url = request.route_url('translation_read', public_language_id=language.pubid, public_group_id=public_group_id)\n return exc.HTTPSeeOther(url)", "title": "" }, { "docid": "a4f32fa292d9a42d01e7a799051bc24a", "score": "0.5094254", "text": "def Language():", "title": "" }, { "docid": "b5696185174add3e961b74ee0e7f1cd3", "score": "0.5070864", "text": "def test_language(self):\n # Defaults to the first language in the language menu for the repository\n # if no other hint is available.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/japan')\n assert doc.xpath_one('/html').get('lang') == 'ja'\n\n # Follows \"lang\" URL parameter when avaiable. Once you specify \"lang\"\n # URL parameter, it remembers the choice in a cookie.\n # \"lang\" URL parameter precedes the cookie.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/haiti')\n assert doc.xpath_one('/html').get('lang') == 'en'\n doc = self.go('/haiti?lang=ja')\n assert doc.xpath_one('/html').get('lang') == 'ja'\n doc = self.go('/haiti')\n assert doc.xpath_one('/html').get('lang') == 'ja'\n doc = self.go('/haiti?lang=ko')\n assert doc.xpath_one('/html').get('lang') == 'ko'\n\n # Follows \"Accept-Language\" HTTP header when available.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/haiti', accept_language='ko')\n assert doc.xpath_one('/html').get('lang') == 'ko'\n\n # Uses one with higher quality value (defaults to 1) in the header.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/haiti', accept_language='ko,ja;q=0.9')\n assert doc.xpath_one('/html').get('lang') == 'ko'\n\n # Falls back to lower quality languages when the language is not\n # supported.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/haiti', accept_language='xx,ja;q=0.9,ko;q=0.8')\n assert doc.xpath_one('/html').get('lang') == 'ja'\n\n # \"lang\" URL parameter precedes \"Accept-Language\" HTTP header.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/haiti?lang=ja', accept_language='ko')\n assert doc.xpath_one('/html').get('lang') == 'ja'\n\n # The cookie precedes \"Accept-Language\" HTTP header.\n self.s = scrape.Session(verbose=1)\n doc = self.go('/haiti?lang=ja')\n assert doc.xpath_one('/html').get('lang') == 'ja'\n doc = self.go('/haiti', accept_language='ko')\n assert doc.xpath_one('/html').get('lang') == 'ja'", "title": "" }, { "docid": "2b738b009e5f7be5a99e89a875da4013", "score": "0.5070012", "text": "def ChangeLanguage(self, userUuid: Any, language: str) -> bool:\n ...", "title": "" }, { "docid": "16504a6075fd604d1f04a791160fbee8", "score": "0.505985", "text": "def translate(request, appname, rosetta_i18n_lang_code, filter='all', page=1):\n\n def fix_nls(in_,out_):\n \"\"\"Fixes submitted translations by filtering carriage returns and pairing\n newlines at the begging and end of the translated string with the original\n \"\"\"\n if 0 == len(in_) or 0 == len(out_):\n return out_\n\n if \"\\r\" in out_ and \"\\r\" not in in_:\n out_=out_.replace(\"\\r\",'')\n\n if \"\\n\" == in_[0] and \"\\n\" != out_[0]:\n out_ = \"\\n\" + out_\n elif \"\\n\" != in_[0] and \"\\n\" == out_[0]:\n out_ = out_.lstrip()\n\n if \"\\n\" == in_[-1] and \"\\n\" != out_[-1]:\n out_ += \"\\n\"\n elif \"\\n\" != in_[-1] and \"\\n\" == out_[-1]:\n out_ = out_.rstrip()\n\n return out_\n\n po = pofile_by_appname(appname, rosetta_i18n_lang_code, request.user)\n rosetta_i18n_fn = po['path']\n rosetta_i18n_write = po['writable']\n rosetta_last_save_error = False\n\n if '_next' in request.POST:\n rx = re.compile(r'm_([0-9a-f]+)')\n rx_plural = re.compile(r'm_([0-9a-f]+)_([0-9]+)')\n file_change = False\n\n for key, value in request.POST.items():\n md5hash = None\n plural_id = None\n\n if rx_plural.match(key):\n md5hash = str(rx_plural.match(key).groups()[0])\n # polib parses .po files into unicode strings, but\n # doesn't bother to convert plural indexes to int,\n # so we need unicode here.\n plural_id = unicode(rx_plural.match(key).groups()[1])\n\n elif rx.match(key):\n md5hash = str(rx.match(key).groups()[0])\n\n if md5hash is not None:\n entry = po['entries'].get(md5hash)\n # If someone did a makemessage, some entries might\n # have been removed, so we need to check.\n if entry:\n entry_change = False\n old_msgstr = entry.msgstr\n\n if plural_id is not None:\n plural_string = fix_nls(entry.msgstr_plural[plural_id], value)\n entry_change = entry_changed or entry.msgstr_plural[plural_id] != plural_string\n entry.msgstr_plural[plural_id] = plural_string\n else:\n msgstr = fix_nls(entry.msgid, value)\n entry_change = entry_changed or entry.msgstr != msgstr\n entry.msgstr = msgstr\n\n old_fuzzy = 'fuzzy' in entry.flags\n new_fuzzy = bool(request.POST.get('f_%s' % md5hash, False))\n if new_fuzzy != old_fuzzy:\n entry_change = True\n if new_fuzzy:\n entry.flags.append('fuzzy')\n else:\n entry.flags.remove('fuzzy')\n\n if entry_change:\n file_change = True\n entry_changed.send(sender=entry,\n user=request.user,\n old_msgstr = old_msgstr,\n old_fuzzy = old_fuzzy,\n pofile = po['pofile'],\n language_code = rosetta_i18n_lang_code,\n )\n else:\n rosetta_last_save_error = True\n\n if file_change:\n try:\n po['pofile'].metadata['Last-Translator'] = unicodedata.normalize('NFKD', u\"%s %s <%s>\" %(request.user.first_name,request.user.last_name,request.user.email)).encode('ascii', 'ignore')\n po['pofile'].metadata['X-Translated-Using'] = u\"django-rosetta %s\" % rosetta.get_version(False)\n po['pofile'].metadata['PO-Revision-Date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M%z')\n except UnicodeDecodeError:\n pass\n\n po['last_modified'] = time.time()\n po = poutil.upd_pofile(po)\n\n post_save.send(sender=None,language_code=rosetta_i18n_lang_code,request=request)\n\n # Try auto-reloading via the WSGI daemon mode reload mechanism\n if rosetta_settings.WSGI_AUTO_RELOAD and \\\n request.environ.has_key('mod_wsgi.process_group') and \\\n request.environ.get('mod_wsgi.process_group',None) and \\\n request.environ.has_key('SCRIPT_FILENAME') and \\\n int(request.environ.get('mod_wsgi.script_reloading', '0')):\n try:\n os.utime(request.environ.get('SCRIPT_FILENAME'), None)\n except OSError:\n pass\n # Try auto-reloading via uwsgi daemon reload mechanism\n if rosetta_settings.UWSGI_AUTO_RELOAD:\n try:\n import uwsgi # pretty easy right?\n uwsgi.reload()\n except: # we may not be running under uwsgi :P\n pass\n\n return shortcuts.redirect(request.path + '?' + request.META['QUERY_STRING'])\n\n if filter == 'untranslated':\n entries_source = po['pofile'].untranslated_entries()\n elif filter == 'translated':\n entries_source = po['pofile'].translated_entries()\n elif filter == 'fuzzy':\n entries_source = po['pofile'].fuzzy_entries()\n else:\n entries_source = (e for e in po['pofile'] if not e.obsolete)\n\n query = request.GET.get('q', '').strip()\n if query:\n rx = re.compile(re.escape(query), re.IGNORECASE)\n entries_source = (e for e in entries_source if rx.search(\"\\n\".join((smart_unicode(e.msgstr), smart_unicode(e.msgid), smart_unicode(e.comment), u\"\\t\".join([o[0] for o in e.occurrences])))))\n\n paginator = Paginator(list(entries_source), rosetta_settings.MESSAGES_PER_PAGE)\n\n if int(page) <= paginator.num_pages and int(page) > 0:\n page = int(page)\n else:\n page = 1\n\n rosetta_messages = paginator.page(page).object_list\n for message in rosetta_messages:\n message.md5hash = hashlib.md5(message.msgid.encode('utf8')).hexdigest()\n\n if rosetta_settings.MAIN_LANGUAGE and rosetta_settings.MAIN_LANGUAGE != rosetta_i18n_lang_code:\n main_language = dict(rosetta_settings.LANGUAGES).get(rosetta_settings.MAIN_LANGUAGE)\n\n fl = (\"/%s/\" % rosetta_settings.MAIN_LANGUAGE).join(rosetta_i18n_fn.split(\"/%s/\" % rosetta_i18n_lang_code))\n po = polib.pofile( fl,\n klass=poutil.SmartPOFile,\n wrapwidth=rosetta_settings.POFILE_WRAP_WIDTH\n )\n\n main_messages = []\n for message in messages:\n message.main_lang = po.find(message.msgid).msgstr\n\n return shortcuts.render_to_response('rosetta/pofile.html', {\n 'rosetta_i18n_fn' : rosetta_i18n_fn,\n 'rosetta_i18n_write' : rosetta_i18n_write,\n 'rosetta_i18n_pofile' : po['pofile'],\n 'rosetta_i18n_lang_bidi': get_language_info(rosetta_i18n_lang_code)['bidi'],\n 'rosetta_messages' : rosetta_messages,\n 'rosetta_i18n_lang_name': dict(rosetta_settings.LANGUAGES)[rosetta_i18n_lang_code],\n 'rosetta_i18n_lang_code': rosetta_i18n_lang_code,\n 'rosetta_i18n_app' : appname,\n 'rosetta_i18n_filter' : filter,\n 'rosetta_last_save_error' : rosetta_last_save_error,\n\n 'ENABLE_TRANSLATION_SUGGESTIONS' : rosetta_settings.ENABLE_TRANSLATION_SUGGESTIONS and \\\n rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE != rosetta_i18n_lang_code,\n 'BING_APPID' : rosetta_settings.BING_APPID,\n 'GOOGLE_API_KEY' : rosetta_settings.GOOGLE_API_KEY,\n\n 'MESSAGES_SOURCE_LANGUAGE_NAME' : rosetta_settings.MESSAGES_SOURCE_LANGUAGE_NAME,\n 'MESSAGES_SOURCE_LANGUAGE_CODE' : rosetta_settings.MESSAGES_SOURCE_LANGUAGE_CODE,\n\n 'query' : query,\n 'paginator' : paginator,\n 'needs_pagination' : paginator.num_pages > 1,\n 'page_range' : poutil.pagination_range(1, paginator.num_pages, page),\n 'page' : page,\n }, context_instance=template.RequestContext(request))", "title": "" }, { "docid": "1e778ae8207677afe211d441b9c46a01", "score": "0.50132793", "text": "def test_setlang_default_redirect(self):\n lang_code = self._get_inactive_language_code()\n post_data = {'language': lang_code}\n response = self.client.post('/i18n/setlang/', post_data)\n self.assertRedirects(response, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "3f6603bfa4aee0a981d4c84e285f7650", "score": "0.50070053", "text": "def on_ok(self, keypress=None):\n allright = self.values_to_tmp(save=True)\n\n # everything allright? then switch form!\n if allright:\n # get object and save files\n lang = self.parentApp.tmpDefault.language\n self.parentApp.S.defaults[lang] = self.parentApp.tmpDefault\n self.parentApp.S.save_settings_to_file()\n self.parentApp.L.save_client_list_to_file()\n\n # switch back\n self.parentApp.setNextForm('Defaults')\n self.parentApp.switchFormNow()\n else:\n npyscreen.notify_confirm(\n 'Language name not possible. It already exists, ' +\n 'is empty or something else. Choose another one!',\n form_color='WARNING'\n )", "title": "" }, { "docid": "3f6603bfa4aee0a981d4c84e285f7650", "score": "0.50070053", "text": "def on_ok(self, keypress=None):\n allright = self.values_to_tmp(save=True)\n\n # everything allright? then switch form!\n if allright:\n # get object and save files\n lang = self.parentApp.tmpDefault.language\n self.parentApp.S.defaults[lang] = self.parentApp.tmpDefault\n self.parentApp.S.save_settings_to_file()\n self.parentApp.L.save_client_list_to_file()\n\n # switch back\n self.parentApp.setNextForm('Defaults')\n self.parentApp.switchFormNow()\n else:\n npyscreen.notify_confirm(\n 'Language name not possible. It already exists, ' +\n 'is empty or something else. Choose another one!',\n form_color='WARNING'\n )", "title": "" }, { "docid": "3f6603bfa4aee0a981d4c84e285f7650", "score": "0.50070053", "text": "def on_ok(self, keypress=None):\n allright = self.values_to_tmp(save=True)\n\n # everything allright? then switch form!\n if allright:\n # get object and save files\n lang = self.parentApp.tmpDefault.language\n self.parentApp.S.defaults[lang] = self.parentApp.tmpDefault\n self.parentApp.S.save_settings_to_file()\n self.parentApp.L.save_client_list_to_file()\n\n # switch back\n self.parentApp.setNextForm('Defaults')\n self.parentApp.switchFormNow()\n else:\n npyscreen.notify_confirm(\n 'Language name not possible. It already exists, ' +\n 'is empty or something else. Choose another one!',\n form_color='WARNING'\n )", "title": "" }, { "docid": "11e1265b16504277bf735ee0e9d1bccd", "score": "0.50053555", "text": "def _do_on_login(self, lti_request: LTI) -> HttpResponse:\n raise NotImplementedError()", "title": "" }, { "docid": "feda1cfee6bff441418a3392b6070f91", "score": "0.49924704", "text": "def changeEvent(self, event):\n if event.type() == QEvent.LanguageChange:\n self.translation()\n self.retranslateUi(self)\n else:\n QDialog.changeEvent(self, event)", "title": "" }, { "docid": "422f00543f3d3c3676fbc832e7f52f2b", "score": "0.4991459", "text": "def test_language_fallback(self):\n from cms.views import details\n p1 = create_page(\"page\", \"nav_playground.html\", \"en\", published=True)\n request = self.get_request('/de/', 'de')\n response = details(request, p1.get_path())\n self.assertEqual(response.status_code, 302)\n self.assertEqual(response['Location'], '/en/')\n lang_settings = copy.deepcopy(settings.CMS_LANGUAGES)\n lang_settings[1][0]['fallbacks'] = []\n lang_settings[1][1]['fallbacks'] = []\n with SettingsOverride(CMS_LANGUAGES=lang_settings):\n response = self.client.get(\"/de/\")\n self.assertEquals(response.status_code, 404)\n lang_settings = copy.deepcopy(settings.CMS_LANGUAGES)\n lang_settings[1][0]['redirect_on_fallback'] = False\n lang_settings[1][1]['redirect_on_fallback'] = False\n with SettingsOverride(CMS_LANGUAGES=lang_settings):\n response = self.client.get(\"/de/\")\n self.assertEquals(response.status_code, 200)", "title": "" }, { "docid": "0639a763527201530b16b564e1dd17cc", "score": "0.49773467", "text": "def activate(language):\r\n _active[currentThread()] = translation(language)", "title": "" }, { "docid": "5095ec3b51a3533f5b2f218cf4ef5a61", "score": "0.49678242", "text": "async def godbolt_languages(self, ctx: commands.Context) -> None:\n async with aiohttp.ClientSession() as session:\n async with session.get(self._endpoint(\"/api/languages\"),\n headers={\"accept\": \"application/json\"}) as resp:\n\n resp_data: List[Dict[str, str]] = await resp.json()\n page_data: List[str] = [ str(d[\"id\"]) + \" - \" + str(d[\"name\"]) for d in resp_data ]\n \n p: Pages = Pages(ctx, entries=page_data, per_page=20)\n p.embed.title = \"Available Languages\"\n p.embed.description = \"Listed id - language. Use the ID for other commands.\"\n\n await p.paginate()", "title": "" }, { "docid": "5f25391155e187d32e01ad896717fcba", "score": "0.49338928", "text": "def language(self, language):\n # TODO: assert language is correct\n self.b.language(language)", "title": "" }, { "docid": "e069f064661a0342843ad2f75081a415", "score": "0.4916723", "text": "def list_languages(request, do_session_warn=False):\n storage = get_storage(request)\n languages = []\n\n if 'filter' in request.GET:\n if request.GET.get('filter') in ('project', 'third-party', 'django', 'all'):\n filter_ = request.GET.get('filter')\n storage.set('rosetta_i18n_catalog_filter', filter_)\n return HttpResponseRedirect(reverse('rosetta-pick-file'))\n\n rosetta_i18n_catalog_filter = storage.get('rosetta_i18n_catalog_filter', 'project')\n\n third_party_apps = rosetta_i18n_catalog_filter in ('all', 'third-party')\n django_apps = rosetta_i18n_catalog_filter in ('all', 'django')\n project_apps = rosetta_i18n_catalog_filter in ('all', 'project')\n\n has_pos = False\n for language in settings.LANGUAGES:\n if not can_translate_language(request.user, language[0]):\n continue\n\n pos = find_pos(language[0], project_apps=project_apps, django_apps=django_apps, third_party_apps=third_party_apps)\n has_pos = has_pos or len(pos)\n languages.append(\n (\n language[0],\n _(language[1]),\n sorted([(get_app_name(l), os.path.realpath(l), pofile(l)) for l in pos], key=lambda app: app[0]),\n )\n )\n try:\n ADMIN_MEDIA_PREFIX = settings.ADMIN_MEDIA_PREFIX\n except AttributeError:\n ADMIN_MEDIA_PREFIX = settings.STATIC_URL + 'admin/'\n do_session_warn = do_session_warn and 'SessionRosettaStorage' in rosetta_settings.STORAGE_CLASS and 'signed_cookies' in settings.SESSION_ENGINE\n\n return render(request, 'rosetta/languages.html', dict(\n version=rosetta.get_version(True),\n rosetta_settings=rosetta_settings,\n ADMIN_MEDIA_PREFIX=ADMIN_MEDIA_PREFIX,\n do_session_warn=do_session_warn,\n languages=languages,\n has_pos=has_pos,\n rosetta_i18n_catalog_filter=rosetta_i18n_catalog_filter\n ))", "title": "" }, { "docid": "4a8d3587381186616bec657b4a968e4c", "score": "0.4892841", "text": "def show_text_to_languages():\n if request.method == 'GET':\n text = request.args.get('text')\n casing = request.args.get('casing')\n elif request.method == 'POST':\n text = request.form.get('text')\n casing = request.form.get('casing')\n else:\n assert False\n\n # Sanitize casing\n if casing not in ['none', 'lowercase', 'uppercase',\n 'lowercase-first-sentence-letters',\n 'uppercase-first-word-letters']:\n casing = 'lowercase_first_sentence_letters'\n casing = casing.replace('-', '_')\n\n if not text:\n return render_template('text_to_languages.html',\n casing=casing)\n\n # Casing processing\n cased_text = text.strip()\n if casing == 'none':\n pass\n elif casing == 'lowercase':\n cased_text = cased_text.lower()\n elif casing == 'uppercase':\n cased_text = cased_text.upper()\n elif casing == 'lowercase_first_sentence_letters':\n cased_text = lowercase_first_sentence_letters(cased_text)\n elif casing == 'uppercase_first_word_letters':\n cased_text = cased_text.title()\n else:\n assert False\n list_of_words = text_to_words(cased_text)\n\n # Make the list only consists of unique words\n list_of_words = list(set(list_of_words))\n\n # Only match languages with more than 100 lexemes\n language_codes = get_wikidata_language_codes_cached(100)\n\n # Build list of monolingual strings\n words = ''\n for word in list_of_words:\n for language_code in language_codes:\n if words != '':\n words += ' '\n words += u('\"{word}\"@{language_code}').format(\n word=word, language_code=language_code)\n\n return render_template('text_to_languages.html', text=text, words=words,\n casing=casing)", "title": "" }, { "docid": "9a5b49026bc4c81e886b9a68c0f07294", "score": "0.4887013", "text": "def _change_language(lang, *args, **kwargs):\n if lang is None:\n return no_update\n return [v[int(lang)] for (_, f), v in sorted(LANGUAGES.items()) if f]", "title": "" }, { "docid": "470df491fd221a0050627d8a2fca4c05", "score": "0.4886761", "text": "def requested_lang():\n lang = request.args.get(\"lang\")\n if lang: return lang\n\n return request.accept_languages.best_match(ALL_LANGUAGES.keys(), 'en')", "title": "" }, { "docid": "a2890b418a76ffe9545571ec77da2331", "score": "0.48727247", "text": "def check_language(self,data:str)->bool:\r\n if not self.lang_checker :\r\n self.lang_checker = spacy.load('en')\r\n self.lang_checker.add_pipe(LanguageDetector(), name='language_detector', last=True)\r\n self.lang_checker.max_length = 999999999999999999999999999999\r\n \r\n doc = self.lang_checker(data[0]) \r\n doc1 = self.lang_checker(data[-1])\r\n return doc._.language['language'] == doc1._.language['language'] == 'en'", "title": "" }, { "docid": "193973ca52658462838c32979d0abd82", "score": "0.48104978", "text": "def test_start_english(self):\n doc = self.go('/haiti?lang=en')\n assert 'I\\'m looking for someone' in doc.text", "title": "" }, { "docid": "f3a806d09305fe839f54b0237f504bf4", "score": "0.47928205", "text": "def lang(self, lang):\n self.graph.set((self.asNode(), DC.language, Literal(lang)))", "title": "" }, { "docid": "35817d96805b2e6085b74abf7637a2c3", "score": "0.47877172", "text": "def complete():\n if current_user.is_authenticated:\n login_complete = gettext(\"Login complete\")\n user_logged_in = gettext(\"The login process has been completed \"\n \"and %(user)s has been logged in.\", user=session['user_info']['name'])\n close_window = gettext(\"This window should close automatically in a second.\")\n cannot_send_data = gettext(\"Could not send login data back to the application. \"\n \"Please close this window manually and reload the application\")\n if request.args.get('next'):\n redirecting = gettext('You are being redirected to the application. '\n 'If it does not happen in a couple of seconds, '\n 'click <a href=\"%(next)s\">here</a>. ', next=request.args.get('next'))\n resp = make_response(f\"\"\"\n <html>\n <body style=\"display: flex; justify-content: center;\">\n <div style=\"max-width: 400px;\">\n <h3 style=\"border-bottom: 1px solid darkgreen; text-align: center; margin-bottom: 40px\">\n {login_complete}\n </h3>\n <div style=\"padding-top: 10px; padding-bottom: 10px;\">\n {user_logged_in}\n <br><br>\n {redirecting}\n </div>\n </div>\n </body>\n </html>\n \"\"\", 302)\n resp.headers['Location'] = request.args.get('next')\n return resp\n else:\n return make_response(f\"\"\"\n <html>\n <head>\n {broadcast_channel_polyfill}\n </head>\n <body style=\"display: flex; justify-content: center;\">\n <div style=\"max-width: 400px;\">\n <h3 style=\"border-bottom: 1px solid darkgreen; text-align: center; margin-bottom: 40px\">\n {login_complete}\n </h3>\n <div style=\"padding-top: 10px; padding-bottom: 10px;\">\n {user_logged_in}\n <br><br>\n {close_window}\n </div>\n <script>\n setTimeout(() => {{\n const bc = new BroadcastChannel('popup-login-channel');\n bc.postMessage({{\n type: \"login\",\n status: \"${{state.authState.loggedIn ? 'success' : 'error'}}\",\n message: \"\"\n }})\n setTimeout(() => {{\n alert('{cannot_send_data}')\n }}, 5000)\n }}, 1000)\n </script>\n </div>\n </body>\n </html>\n \"\"\")\n else:\n auth_failed = gettext(\"Authentication failed\")\n failed_expl = gettext(\"The authentication process failed. \"\n \"Please, close the application, reopen it and try again.\")\n support = gettext(\"If it does not help, please call the technical support.\")\n return make_response(f\"\"\"\n <html>\n <body style=\"display: flex; justify-content: center;\">\n <div style=\"max-width: 400px;\">\n <h3 style=\"border-bottom: 1px solid darkgreen; text-align: center; margin-bottom: 40px\">\n {auth_failed}\n </h3>\n <div style=\"padding-top: 10px; padding-bottom: 10px;\">\n {failed_expl}\n <br><br>\n {support}\n </div>\n </div>\n </body>\n </html>\n \"\"\")", "title": "" }, { "docid": "7b1324617547dd87960ccf9ba3b5bf5f", "score": "0.47555587", "text": "def get_view_lang(view, settings_storage):\n syntax = Tools.get_view_syntax(view)\n for lang, syntaxes in settings_storage.valid_lang_syntaxes.items():\n if syntax in syntaxes:\n return lang\n log.debug(\"ECC does nothing for language syntax: '%s'\", syntax)\n return None", "title": "" }, { "docid": "a24f3a0f136e5a73724c7f981e61a58d", "score": "0.47499374", "text": "def localization(request):\n if request.locale == settings.WIKI_DEFAULT_LANGUAGE:\n return HttpResponseRedirect(reverse('dashboards.contributors'))\n data = {'overview_rows': partial(overview_rows, request.locale)}\n return _kb_main(request, L10N_READOUTS, 'localization.html',\n extra_data=data)", "title": "" }, { "docid": "413d480b1bab28dfd215d086abd28322", "score": "0.47493586", "text": "def load_languages_list(self):\n\n from os.path import join\n languages_obj = languages_parser()\n content = self.utOpenFile(join(EEAGLOSSARY_PATH, 'config', 'languages.xml'))\n languages_handler, error = languages_obj.parseContent(content)\n\n for lang in languages_handler.languages:\n self.set_languages_list(lang.lang, lang.charset, lang.english_name)\n if int(lang.unicode):\n self.set_unicode_langs(lang.english_name)\n if int(lang.searchable):\n self.set_searchable_langs(lang.english_name)\n self._p_changed = 1", "title": "" }, { "docid": "2d9c2eef65c1ec86437776c3b3d88509", "score": "0.4746911", "text": "def on_text_certificazione_textEdited(self, p0):\r\n\t\tself.setChanged()", "title": "" }, { "docid": "0e4d2e91a7b4982a3134fe9954e8b9e4", "score": "0.47417298", "text": "def bot_handler(event, _):\n logger.info(\n \"%s:%s Request received:%s\",\n __name__,\n str(time.time()),\n str(event))\n\n try:\n default_language = event[\"lang\"]\n logger.info(\"Language is %s\", event[\"lang\"])\n except KeyError:\n default_language = \"fa\"\n logger.info(\"Language is not defined!\")\n\n try:\n token = event['token']\n except KeyError:\n logger.error(\"Token is not defined!\")\n return None\n\n try:\n tmsg = TelegramMessage(event, default_language)\n logger.info(\"TMSG object: {}\".format(tmsg))\n except Exception as exc:\n logger.error(\n 'Error in Telegram Message parsing {} {}'.format(event, str(exc)))\n return None\n\n preferred_lang = dynamodb.get_user_lang(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id)\n if (preferred_lang is None or\n preferred_lang not in CONFIG['SUPPORTED_LANGUAGES']):\n preferred_lang = default_language\n current_language = CONFIG['SUPPORTED_LANGUAGES'].index(preferred_lang)\n logger.info('User language is {}'.format(preferred_lang))\n\n change_lang(preferred_lang)\n tmsg.lang = preferred_lang\n\n if tmsg.body == globalvars.lang.text('MENU_BACK_HOME'):\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n return\n\n if tmsg.command == CONFIG['TELEGRAM_START_COMMAND'] and len(tmsg.command_arg) > 0:\n tmsg.command = \"\"\n tmsg.body = base64.urlsafe_b64decode(tmsg.command_arg)\n\n # Check for commands (starts with /)\n if tmsg.command == CONFIG[\"TELEGRAM_START_COMMAND\"]:\n dynamodb.create_chat_status(CONFIG['DYNAMO_TABLE'], tmsg.chat_id, STATUSES['START'])\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text(\"MSG_INITIAL_SCREEN\").format(CONFIG['VERSION']))\n keyboard = make_language_keyboard()\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_SELECT_LANGUAGE'),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['SET_LANGUAGE'])\n return None\n elif tmsg.command == CONFIG['TELEGRAM_ADMIN_COMMAND']:\n chat_status = int(dynamodb.get_chat_status(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id))\n if not admin_menu(token, tmsg, chat_status):\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME'),\n globalvars.HOME_KEYBOARD\n )\n return None\n\n # non-command texts\n elif tmsg.command == '': # This is a message not started with /\n chat_status = int(dynamodb.get_chat_status(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id))\n\n if chat_status >= STATUSES['ADMIN_SECTION_HOME']:\n if not admin_menu(token, tmsg, chat_status):\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME'),\n globalvars.HOME_KEYBOARD\n )\n return None\n\n elif chat_status == STATUSES['SET_LANGUAGE']:\n if (tmsg.body is None or\n tmsg.body not in globalvars.lang.text('SUPPORTED_LANGUAGES')):\n message = globalvars.lang.text('MSG_LANGUAGE_CHANGE_ERROR')\n else:\n new_lang = CONFIG['SUPPORTED_LANGUAGES'][globalvars.lang.text(\n 'SUPPORTED_LANGUAGES').index(tmsg.body)]\n dynamodb.save_user_lang(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id,\n language=new_lang)\n change_lang(new_lang)\n message = globalvars.lang.text('MSG_LANGUAGE_CHANGED').format(tmsg.body)\n telegram.send_message(\n token,\n tmsg.chat_id,\n message)\n\n try:\n user_exist = api.get_user(tmsg.user_uid)\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n\n if not user_exist:\n choices, a, b = get_choice(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id)\n if choices:\n keyboard = telegram.make_keyboard(choices, 2, '')\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n \"{}\\n{} + {}:\".format(globalvars.lang.text(\"MSG_ASK_CAPTCHA\"), a, b),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['FIRST_CAPTCHA'])\n else:\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n return None\n\n elif chat_status == STATUSES['FIRST_CAPTCHA']:\n check = check_captcha(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id,\n sum=int(tmsg.body))\n if check:\n tos = get_tos_link()\n pp = get_pp_link()\n if tos is not None:\n telegram.send_message(\n token,\n tmsg.chat_id,\n tos\n )\n if pp is not None:\n telegram.send_message(\n token,\n tmsg.chat_id,\n pp\n )\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text(\"MSG_OPT_IN\"),\n globalvars.OPT_IN_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['OPT_IN'])\n else:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_WRONG_CAPTCHA'))\n choices, a, b = get_choice(\n table=CONFIG[\"DYNAMO_TABLE\"],\n chat_id=tmsg.chat_id)\n if choices:\n keyboard = telegram.make_keyboard(choices, 2, '')\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n \"{}\\n{} + {}:\".format(globalvars.lang.text(\"MSG_ASK_CAPTCHA\"), a, b),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['FIRST_CAPTCHA'])\n return None\n\n elif chat_status == STATUSES['OPT_IN']:\n if tmsg.body == globalvars.lang.text('MENU_PRIVACY_POLICY_CONFIRM'):\n try:\n api.create_user(user_id=tmsg.user_uid)\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n else:\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_PRIVACY_POLICY_DECLINE'),\n globalvars.OPT_IN_DECLINED_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['OPT_IN_DECLINED'])\n return None\n\n elif chat_status == STATUSES['OPT_IN_DECLINED']:\n if tmsg.body == globalvars.lang.text('MENU_BACK_PRIVACY_POLICY'):\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text(\"MSG_OPT_IN\"),\n globalvars.OPT_IN_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['OPT_IN'])\n elif tmsg.body == globalvars.lang.text('MENU_HOME_CHANGE_LANGUAGE'):\n keyboard = make_language_keyboard()\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_SELECT_LANGUAGE'),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['SET_LANGUAGE'])\n return None\n\n elif chat_status == STATUSES['HOME']:\n if tmsg.body == globalvars.lang.text('MENU_HOME_EXISTING_KEY'):\n try:\n user_exist = api.get_user(tmsg.user_uid)\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n\n if not user_exist:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_NO_ACCOUNT'),\n parse='MARKDOWN')\n telegram.send_message(\n token,\n tmsg.chat_id,\n '/start')\n return None\n elif not user_exist['outline_key']:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_NO_EXISTING_KEY'))\n else:\n awsurl = (CONFIG['OUTLINE_AWS_URL'].format(urllib.parse.quote(user_exist['outline_key'])))\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_EXISTING_KEY_A').format(awsurl),\n parse='MARKDOWN')\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_EXISTING_KEY_B'),\n parse='MARKDOWN')\n telegram.send_message(\n token,\n tmsg.chat_id,\n user_exist['outline_key'])\n\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n return None\n elif tmsg.body == globalvars.lang.text('MENU_CHECK_STATUS'):\n blocked = False\n banned = False\n try:\n user_info = api.get_outline_user(tmsg.user_uid)\n vpnuser = api.get_user(tmsg.user_uid)\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n banned = vpnuser['banned']\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ACCOUNT_INFO_BANNED') \\\n if banned else globalvars.lang.text('MSG_ACCOUNT_INFO_OK')\n )\n if not banned:\n if user_info is not None:\n try:\n serverinfo = api.get_outline_server_info(user_info['server'])\n\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n\n if serverinfo is not None:\n blocked = serverinfo['is_blocked']\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_SERVER_INFO_BLOCKED') \\\n if blocked else globalvars.lang.text('MSG_SERVER_INFO_OK')\n )\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n return None\n elif tmsg.body == globalvars.lang.text('MENU_HOME_NEW_KEY'):\n try:\n user_exist = api.get_user(tmsg.user_uid)\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n\n if not user_exist:\n logger.info(\"New user: {}\".format(tmsg.user_uid))\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_NO_ACCOUNT'),\n parse='MARKDOWN')\n telegram.send_message(\n token,\n tmsg.chat_id,\n '/start')\n return None\n elif not user_exist['outline_key']:\n create_new_key(tmsg, token)\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n return None\n\n issues_dict = api.get_issues(tmsg.lang)\n issues = list(issues_dict.values())\n keyboard = telegram.make_keyboard(issues, 2, '')\n telegram.send_keyboard(\n token, tmsg.chat_id,\n globalvars.lang.text(\"MSG_ASK_ISSUE\"),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['ASK_ISSUE'])\n return None\n\n elif tmsg.body == globalvars.lang.text('MENU_HOME_FAQ'):\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_FAQ_URL'))\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n return None\n\n elif tmsg.body == globalvars.lang.text('MENU_HOME_INSTRUCTION'):\n photo_name = \"\"\n with open(photo_name, 'rb') as photofile:\n telegram.send_photo(\n token,\n tmsg.chat_id,\n photofile.read(),\n \"instructions\")\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n return None\n\n elif tmsg.body == globalvars.lang.text('MENU_HOME_CHANGE_LANGUAGE'):\n keyboard = make_language_keyboard()\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_SELECT_LANGUAGE'),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['SET_LANGUAGE'])\n return None\n\n elif tmsg.body == globalvars.lang.text('MENU_HOME_PRIVACY_POLICY'):\n telegram.send_message(\n token,\n tmsg.chat_id,\n get_pp_link())\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n return None\n\n elif tmsg.body == globalvars.lang.text('MENU_HOME_SUPPORT'):\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text(\"MSG_SUPPORT_BOT\"))\n telegram.send_message(\n token,\n tmsg.chat_id,\n CONFIG[\"SUPPORT_BOT\"])\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n return None\n\n elif tmsg.body == globalvars.lang.text('MENU_HOME_DELETE_ACCOUNT'):\n keyboard = telegram.make_keyboard(\n globalvars.lang.text('MENU_DELETE_REASONS'),\n 2,\n globalvars.lang.text('MENU_BACK_HOME'))\n telegram.send_keyboard(\n token, tmsg.chat_id,\n globalvars.lang.text(\"MSG_ASK_DELETE_REASONS\"),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['DELETE_ACCOUNT_REASON'])\n return None\n\n elif chat_status == STATUSES['ASK_ISSUE']:\n issues_dict = api.get_issues(tmsg.lang)\n issue_ids = [key for (key, value) in issues_dict.items() if value == tmsg.body]\n if not issue_ids:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text(\"MSG_UNSUPPORTED_COMMAND\"))\n else:\n create_new_key(tmsg, token, issue_ids[0])\n\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n return None\n\n elif chat_status == STATUSES['DELETE_ACCOUNT_REASON']:\n if tmsg.body in globalvars.lang.text('MENU_DELETE_REASONS'):\n reason_id = globalvars.lang.text('MENU_DELETE_REASONS').index(tmsg.body)\n logger.debug('user {} wants to delete her account because {}'.format(\n tmsg.user_uid,\n tmsg.body\n ))\n try:\n deleted = api.delete_user(user_id=tmsg.user_uid)\n except Exception:\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'),\n globalvars.HOME_KEYBOARD)\n return None\n if deleted:\n telegram.send_keyboard(\n token, tmsg.chat_id,\n globalvars.lang.text(\"MSG_DELETED_ACCOUNT\"),\n globalvars.BACK_TO_HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['DELETE_ACCOUNT_CONFIRM'])\n return None\n\n else: # unsupported message from user\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text(\"MSG_UNSUPPORTED_COMMAND\"))\n\n try:\n user_exist = api.get_user(tmsg.user_uid)\n except Exception:\n telegram.send_message(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_ERROR'))\n return None\n if not user_exist: # start from First step\n keyboard = make_language_keyboard()\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_SELECT_LANGUAGE'),\n keyboard)\n save_chat_status(tmsg.chat_id, STATUSES['SET_LANGUAGE'])\n return None\n else:\n telegram.send_keyboard(\n token,\n tmsg.chat_id,\n globalvars.lang.text('MSG_HOME_ELSE'),\n globalvars.HOME_KEYBOARD)\n save_chat_status(tmsg.chat_id, STATUSES['HOME'])\n return None", "title": "" }, { "docid": "ba523af4344f36c2181d3ca0836a6c5c", "score": "0.4740332", "text": "def home(self, request):\n session = ChatBotRequest.objects.get(transId=self.session_id)\n menu_text = base_menu[self.lang][\"main menu\"]\n session.level = 5\n session.save()\n\n return self.message_proceed(menu_text)", "title": "" }, { "docid": "3452b77d8807d43a894e282ce20bd23a", "score": "0.4731877", "text": "async def language(self, ctx: commands.Context, value: str=None):\n if value is None:\n lang = await self.config.guild(ctx.guild).language()\n await ctx.send(f'The language is currently set to {lang}.')\n else:\n langs = self._get_languages()\n if value in langs:\n await self.config.guild(ctx.guild).language.set(value)\n await ctx.send(f'You have now changed the language to '\n f'{value}.')\n else:\n await ctx.send(f'Language not recognised. Please choose '\n f'from: {humanize_list(langs)}')", "title": "" }, { "docid": "5cc0ef5ab05865fb5f455a6b1246994e", "score": "0.47292176", "text": "def index_html(self, instance, REQUEST=None, RESPONSE=None, disposition='inline'):\n try:\n self.resetLanguage()\n if REQUEST is None and hasattr(instance, 'REQUEST'):\n REQUEST = instance.REQUEST\n if REQUEST is not None and not 'lang' in REQUEST.keys():\n url = REQUEST['ACTUAL_URL']\n url += urlparse(url).query and '&' or '?'\n url += 'lang='+self._getCurrentLanguage(instance)\n return REQUEST.response.redirect(url)\n return super(MultilanguageBlobFieldMixin, self).index_html(instance, REQUEST=REQUEST, RESPONSE=RESPONSE, disposition=disposition)\n finally:\n self.resetLanguage()", "title": "" }, { "docid": "faaba448b8b53e85d243709771910646", "score": "0.47069618", "text": "def analyze_page():\n form = WordsForm()\n if current_user.is_authenticated:\n latest_messages = NLP.query.options(db.joinedload(NLP.chat)).filter_by(userID=current_user.id).order_by(NLP.chatID.desc()).limit(5)\n earliest_messages = NLP.query.options(db.joinedload(NLP.chat)).filter_by(userID=current_user.id).order_by(NLP.chatID).limit(5)\n #list of messages selected from DB\n return render_template(\"analyze.html\", latest_messages=latest_messages, earliest_messages=earliest_messages, form=form)\n else:\n all_latest_messages = NLP.query.options(db.joinedload(NLP.chat),db.joinedload(NLP.user)).order_by(NLP.chatID.desc()).limit(5)\n all_earliest_messages = NLP.query.options(db.joinedload(NLP.chat),db.joinedload(NLP.user)).order_by(NLP.chatID).limit(5)\n return render_template(\"analyze.html\", latest_messages=all_latest_messages , earliest_messages=all_earliest_messages, form=form)", "title": "" }, { "docid": "a5dd6447d088edb9924a171854734406", "score": "0.46977878", "text": "def inject_lang_from_header():\n lang: Optional[str] = request.headers.get(\"lang\")\n if lang:\n old_accepts: LanguageAccept = request.accept_languages\n if \"lang\" in g:\n # check LanguageAccept option in g context\n accepted_languages = g.get(\"lang\")\n if accepted_languages and isinstance(accepted_languages, LanguageAccept):\n old_accepts = accepted_languages\n # inject language from custom header as first choice into request\n values = (lang, 10), *old_accepts\n g.lang = LanguageAccept(values)\n # Force refresh to make sure that the change is applied\n flask_babel_refresh()", "title": "" }, { "docid": "69a291e9b26dfa63e0b3babc330bfa2f", "score": "0.469705", "text": "def on_selection_button_clicked(self, widget):\n #we set the current language filter to the button's label\n self.current_filter_language = widget.get_label()\n print(\"%s language selected!\" % self.current_filter_language)\n #we update the filter, which updates in turn the view\n self.language_filter.refilter()", "title": "" }, { "docid": "d9623fe63ead547d617b73910b265dfd", "score": "0.4690245", "text": "def on_selection_button_clicked(self, widget):\n # we set the current language filter to the button's label\n self.current_filter_language = widget.get_label()\n print(\"%s language selected!\" % self.current_filter_language)\n # we update the filter, which updates in turn the view\n self.language_filter.refilter()", "title": "" }, { "docid": "397cd211d950be721948bf0c6473d3fc", "score": "0.4684519", "text": "def __init__(self):\n self.allowedEntryLanguages = set()", "title": "" }, { "docid": "983848374c60510968d5f265bde5f2b3", "score": "0.46830615", "text": "def set_language(lang=\"\"):\n global _cached_language, _cached_bs_page, _cached_json\n\n if _cached_language != lang:\n _cached_language = lang\n # Update the cached copy.\n get_web_page(lang=lang)\n get_json_list(lang=lang)", "title": "" }, { "docid": "2896976cd7078172c8b3b5c438061bf4", "score": "0.4682554", "text": "def focus_is_source(self, lang_name):\n\n if (self.active_field() != None):\n answer = (lang_name == None) or (self.curr_buffer().is_language(lang_name))\n \n return answer", "title": "" }, { "docid": "fd2968e7a2e9fa278d428e92295d3945", "score": "0.4674186", "text": "def content_language(self, content_language):\n\n self._content_language = content_language", "title": "" }, { "docid": "e16efa26c109454bf0652ee5c1dff230", "score": "0.46695387", "text": "def execute_request(self, request: Request):\n if request.encryption_state == CryptoMode.EN:\n result = self.encryption_start_handler.handle_request(request)\n if request.encryption_state == CryptoMode.DE:\n result = self.decryption_start_handler.handle_request(request)\n return", "title": "" }, { "docid": "392996d4e7c82246879be33fe52563e8", "score": "0.4668674", "text": "def list_languages(request):\n filter = request.GET.get('filter', 'project')\n\n if filter != 'all' and filter not in poutil.filters:\n raise http.Http404\n\n languages = []\n for l in available_languages(request.user):\n pos = []\n for po in poutil.pofiles[l[0]].values():\n if filter=='all' or filter==po['filter']:\n #po = poutil.upd_pofile(po)\n stats = poutil.upd_stats(po)\n stats['appname'] = po['appname']\n\n translated = stats.get('translated_entries', 1)\n untranslated = stats.get('untranslated_entries', 0)\n fuzzy = stats.get('fuzzy_entries', 0)\n stats['percent_translated'] = (100 * translated)/(translated + untranslated + fuzzy)\n\n pos.append(stats)\n if pos:\n languages.append((l[0], l[1], pos))\n\n return shortcuts.render_to_response('rosetta/languages.html', {\n 'filter' : filter,\n 'languages': languages,\n }, context_instance=template.RequestContext(request))", "title": "" }, { "docid": "7f78b9a34a2e69566fc0c0e877e47f44", "score": "0.46669605", "text": "def test_setlang_http_next(self):\n lang_code = self._get_inactive_language_code()\n non_https_next_url = 'http://testserver/redirection/'\n post_data = {'language': lang_code, 'next': non_https_next_url}\n # Insecure URL in POST data.\n response = self.client.post('/i18n/setlang/', data=post_data, secure=True)\n self.assertEqual(response.url, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)\n # Insecure URL in HTTP referer.\n response = self.client.post('/i18n/setlang/', secure=True, HTTP_REFERER=non_https_next_url)\n self.assertEqual(response.url, '/')\n self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], lang_code)", "title": "" }, { "docid": "d89edb413a4b7237f31e9a78ee27321e", "score": "0.46651182", "text": "def __init__(self, context, request):\n\n self.context = context\n self.request = request\n self.response = self.request.RESPONSE\n self.settings = self.context.portal_pathkey #IPathkeyControlPanelForm(self.context)\n self.language = self.request.get('LANGUAGE', 'fi')", "title": "" }, { "docid": "8a842fe56a4baf2e2671dd003ddb22dc", "score": "0.46615198", "text": "def _check_locale():\n if not request.environ.get('LOCALE'):\n accept_language = request.get_header('Accept-Language')\n if not accept_language:\n return\n\n accepted = []\n for language in accept_language.split(','):\n if language.split(';')[0] == language:\n accepted.append(language.strip())\n else:\n accepted.append(language.split(\";\")[0].strip())\n # fine tuning order of locale_q_pair according to q-value necessary?!\n\n lang = Locale.negotiate(accepted, [l[0] for l in LANGS])\n if lang:\n request.environ['LOCALE'] = str(lang)", "title": "" }, { "docid": "ffc3fb565aabb5240ac571c58bb94fad", "score": "0.4657069", "text": "def f_reply(bot, update):\n update.message.reply_text('Language ... ')", "title": "" }, { "docid": "4d8dd09482f9c64c9449e6ae11d10d3f", "score": "0.4653195", "text": "def usingRTLLang(request):\n langs = setLocaleFromRequestHeader(request)\n\n # Grab only the language (first two characters) so we know if the language\n # is read right-to-left\n #langs = [ lang[:2] for lang in langs ]\n lang = getAssumedChosenLang(langs)\n if lang in rtl_langs:\n return True\n return False", "title": "" }, { "docid": "a790d6a41c0ca1930e80ed45dc949d42", "score": "0.4651823", "text": "def language(request):\n\n return request.config.getoption(\"--language\")", "title": "" }, { "docid": "9026bcb928b8da864038bdb7f788fe07", "score": "0.46497947", "text": "def test_parse_language_cookie(self):\n g = get_language_from_request\n r = self.rf.get('/')\n r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}\n r.META = {}\n self.assertEqual('pt-br', g(r))\n\n r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}\n r.META = {}\n self.assertEqual('pt', g(r))\n\n r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}\n r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}\n self.assertEqual('es', g(r))\n\n # This test assumes there won't be a Django translation to a US\n # variation of the Spanish language, a safe assumption. When the\n # user sets it as the preferred language, the main 'es'\n # translation should be selected instead.\n r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}\n r.META = {}\n self.assertEqual(g(r), 'es')\n\n # This tests the following scenario: there isn't a main language (zh)\n # translation of Django but there is a translation to variation (zh-hans)\n # the user sets zh-hans as the preferred language, it should be selected\n # by Django without falling back nor ignoring it.\n r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-hans'}\n r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}\n self.assertEqual(g(r), 'zh-hans')", "title": "" }, { "docid": "668397280a1a79f1e4ef44ddda680e36", "score": "0.46462518", "text": "def _load_content(self):\n query = {\"action\":\"query\", \"prop\":\"revisions|langlinks|extlinks\", \n \"titles\":self._title, \"rvprop\":\"user|content|timestamp\",\n \"rvdir\":\"older\"}\n res = self.site.query(query, query_continue=True, \n prefix=(\"rv\", \"ll\", \"el\"))\n result = res[\"query\"][\"pages\"].values()[0]\n revisions = result[\"revisions\"][0]\n i = list(res[\"query\"][\"pages\"])[0]\n langlinks = res[\"query\"][\"pages\"][i].get(\"langlinks\", None)\n extlinks = res[\"query\"][\"pages\"][i].get(\"extlinks\", None)\n content = revisions[\"*\"]\n try:\n self._content = content.decode()\n except Exception:\n self._content = content\n b = self._title.split(\":\")\n self._prefix = b[0] if not b[0] == self.title else None\n self._last_editor = revisions[\"user\"]\n self._last_edited = parse(revisions[\"timestamp\"])\n code = mwparserfromhell.parse(self._content)\n self._templates = code.filter_templates(recursive=True)\n self._links = code.filter_links()\n for link in self._links: \n title = str(link.title).lower()\n if title.startswith(\"category:\"):\n cat = title.split(\":\")\n if cat[0] == title:\n continue\n self._categories.append(unicode(link.title))\n\n elif title.startswith(\"image:\") or title.startswith(\"file:\") \\\n or title.startswith(\"media:\"):\n self._files.append(unicode(link.title))\n if langlinks:\n for langlink in langlinks:\n self._langlinks[langlink[\"lang\"]] = langlink[\"*\"]\n\n if extlinks:\n for extlink in extlinks:\n self._extlinks.append(extlink[\"*\"])\n\n # Find out if we are allowed to edit the page or not.\n user = self.site.get_username()\n regex = \"\\{\\{\\s*(no)?bots\\s*\\|?((deny|allow)=(.*?))?\\}\\}\"\n re_compile = re.search(regex, self._content)\n if not re_compile:\n return\n if re_compile.group(1):\n self._is_excluded = True\n if user.lower() in re_compile.group(4).lower():\n if re_compile.group(3) == \"allow\":\n self._is_excluded = False\n if re_compile.group(3) == \"deny\":\n self._is_excluded = True\n return", "title": "" }, { "docid": "621816bc97f6b2edbb628bb88784e370", "score": "0.46453065", "text": "def lessons():\n\n if request.method == \"GET\":\n return render_template(\"lessons.html\")\n\n else: # if method == 'POST'\n user_id = session[\"user_id\"]\n language = db.execute(\"SELECT language FROM users WHERE id = :user_id\", user_id=user_id)[0][\"language\"]\n category = request.form.get(\"category\")\n db.execute(\"UPDATE users SET {category} = 'TRUE' WHERE id = :user_id\".format(**{\"category\" : category}), user_id=user_id)\n\n if language == 'Swahili':\n if category == 'greetings':\n return redirect(\"https://www.youtube.com/watch?v=GCDNLUqFztA\")\n elif category == 'family':\n return redirect('https://www.youtube.com/watch?v=hY6AM5Ppr1A')\n elif category == 'numbers':\n return redirect('https://www.youtube.com/watch?v=asnURbNNWtM')\n elif category == 'colors':\n return redirect('https://youtu.be/DjmLoxkeMPg?t=1774')\n elif category == 'food':\n return redirect('https://www.youtube.com/watch?v=dWfCKdn46iE')\n else: # if category == 'animals'\n return redirect('https://www.youtube.com/watch?v=N6wcz6PuW0M')\n\n else: # if language == 'Chinese'\n if category == 'greetings':\n return redirect('https://www.youtube.com/watch?v=2ZA6M9EsSlM')\n elif category == 'family':\n return redirect('https://www.youtube.com/watch?v=uc7qd9xPpDY')\n elif category == 'numbers':\n return redirect('https://www.youtube.com/watch?v=LpPs5RppA5A')\n elif category == 'colors':\n return redirect('https://www.youtube.com/watch?v=9E1QHwAFCgo')\n elif category == 'food':\n return redirect('https://www.youtube.com/watch?v=W53Cn19m3T0')\n else: # if category == 'animals'\n return redirect('https://www.youtube.com/watch?v=SILtVcCErzk')", "title": "" }, { "docid": "a8192f57a2aff97dc2c8e981a929f173", "score": "0.46399903", "text": "def show_language_index():\n return render_template(\"language_index.html\")", "title": "" }, { "docid": "6a7f249446099127367d55562108b048", "score": "0.4635487", "text": "def redirect_language(language):\n q = iso639_to_q(language)\n if q:\n return redirect(url_for('app.show_language', q=q), code=302)\n return render_template('404.html')", "title": "" }, { "docid": "63202b2979252c5ec20a07cc7cc14476", "score": "0.46257764", "text": "def set_translation_needed(self):\n if self.comments.find(translateComment) < 0:\n self.comments += translateComment\n global translate_comment_issued\n translate_comment_issued = True", "title": "" }, { "docid": "755d9b08673dc4d85f3661b4d09385b6", "score": "0.4623594", "text": "def updateLanguage(self, lang: str):\n\n # Supported Languages\n # Lang code = <ISO 639-1>_<ISO 3166-1 alpha-2>\n supLang = {\n \"ar_SA\": wx.LANGUAGE_ARABIC,\n \"cs_CZ\": wx.LANGUAGE_CZECH,\n \"en_US\": wx.LANGUAGE_ENGLISH_US,\n \"fr_FR\": wx.LANGUAGE_FRENCH,\n \"es_CU\": wx.LANGUAGE_SPANISH,\n \"it_IT\": wx.LANGUAGE_ITALIAN,\n \"ja_JP\": wx.LANGUAGE_JAPANESE,\n \"ko_KR\": wx.LANGUAGE_KOREAN,\n \"pt_BR\": wx.LANGUAGE_PORTUGUESE_BRAZILIAN,\n \"ru_RU\": wx.LANGUAGE_RUSSIAN,\n \"es_ES\": wx.LANGUAGE_SPANISH,\n \"sq_AL\": wx.LANGUAGE_ALBANIAN,\n }\n\n selLang = supLang.get(lang, wx.LANGUAGE_ENGLISH_US)\n\n if self.locale:\n assert sys.getrefcount(self.locale) <= 2\n del self.locale\n\n # create a locale object for this language\n self.locale = wx.Locale(selLang)\n\n if self.locale.IsOk():\n self.locale.AddCatalog(__packagename__)\n else:\n self.locale = None", "title": "" }, { "docid": "fac743cebe8d5b07266bbf21c0a0584a", "score": "0.46039438", "text": "def query_lang(self, query_lang):\n\n self._query_lang = query_lang", "title": "" }, { "docid": "edf4c1c8d752b6fa70aed6c1316ed954", "score": "0.46024454", "text": "def published(self, request, check_language=True):\n published_field = getattr(\n self, 'published_field',\n '{}translation__is_published'.format(\n self.model._meta.module_name))\n filter_kwargs = {published_field: True, }\n results = self.get_query_set().filter(**filter_kwargs)\n\n if check_language:\n language = getattr(request, 'LANGUAGE_CODE', None)\n if not language:\n self.model.objects.none()\n language_field = getattr(\n self, 'language_field',\n '{}translation__language'.format(\n self.model._meta.module_name))\n language_filter_kwargs = {language_field: language}\n results = results.filter(**language_filter_kwargs)\n\n return results.distinct()", "title": "" }, { "docid": "2d4f80d6a05dc323615c2ee5a83aa796", "score": "0.46023163", "text": "def translate():\n if (languages.get() != \"select a language\") and (not inputBox.get(1.0, END).isspace()):\n translator = Translator()\n translation = translator.translate(text=inputBox.get(1.0, END),dest=LANGUAGE_CODE[languages.get()]).text\n outputBox.delete(1.0, END)\n outputBox.insert(END, translation)\n writeFile(translation)\n else:\n outputBox.delete(1.0, END)", "title": "" }, { "docid": "184a770295666243adf1eaa77a3df7c6", "score": "0.45994517", "text": "def msg_eng_change(self,msg):\r\n engname = msg.get_from()\r\n self.refresh_viewers(engname, oname=None)", "title": "" }, { "docid": "0f49db8d1fdb28792e60ab60ff497c81", "score": "0.45963284", "text": "def on_modified(self, view):\n\n\t\twas_user_initiated = not self.textsync.isOwned(view)\n\n\t\tpersist.log.debug(\"User initiated modification:\", was_user_initiated)\n\n\t\t# this function blocks the UI thread until it returns,\n\t\t# so we acquire the lock and promptly release it before\n\t\t# returning to ensure that no edits are happening concurrently\n\t\t# if the file is owned while there is a modification then\n\t\t# there is probably no need to\n\t\t\n\t\tpersist.log.debug(\"If you see this message right before the UI freezes, then there is a deadlock error that must be addressed, please contact the maintainer of TextSync plugin.\")\n\n\t\tif was_user_initiated:\n\t\t\tself.textsync.view_lock(view).acquire()\n\t\t\tself.textsync.view_lock(view).release()\n\t\n\n\t\t# as soon as we return, there will be a modification, so\n\t\t# notify just in case anyone out there cares\n\n\t\t# self.textsync.clientManager.notifyEvent(view, 'modified')", "title": "" }, { "docid": "685f70ebfc1f3a822f0fdd48066808d8", "score": "0.45939165", "text": "def session_language(request):\n if 'lang' in request.GET:\n lang = request.GET.get('lang')\n else:\n lang = translation.get_language()\n return {'SESSION_LANGUAGE': lang, }", "title": "" }, { "docid": "6b767aff3e15e9050a18f4029f77031a", "score": "0.45863223", "text": "def activate_translation_for_user(user):\n from django.utils import translation\n if user.languagecode:\n if translation.check_for_language(user.languagecode):\n translation.activate(user.languagecode)", "title": "" }, { "docid": "4b6787ebbb4182c46f0252be18fa1f2a", "score": "0.45837864", "text": "def language(self, lang):\n lang = lang.lower()\n if self._lang == lang:\n return\n\n url = self._api_url\n tmp = url.replace(\"/{0}.\".format(self._lang), \"/{0}.\".format(lang))\n\n self._api_url = tmp\n self._lang = lang\n self.clear_memoized()", "title": "" }, { "docid": "ca529ba0e4881df8016f1052029addcf", "score": "0.4579534", "text": "def get_language_from_request(request):\r\n global _accepted\r\n from django.conf import settings\r\n globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')\r\n supported = dict(settings.LANGUAGES)\r\n\r\n if hasattr(request, 'session'):\r\n lang_code = request.session.get('django_language', None)\r\n if lang_code in supported and lang_code is not None and check_for_language(lang_code):\r\n return lang_code\r\n\r\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\r\n if lang_code and lang_code in supported and check_for_language(lang_code):\r\n return lang_code\r\n\r\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\r\n for accept_lang, unused in parse_accept_lang_header(accept):\r\n if accept_lang == '*':\r\n break\r\n\r\n # We have a very restricted form for our language files (no encoding\r\n # specifier, since they all must be UTF-8 and only one possible\r\n # language each time. So we avoid the overhead of gettext.find() and\r\n # work out the MO file manually.\r\n\r\n # 'normalized' is the root name of the locale in POSIX format (which is\r\n # the format used for the directories holding the MO files).\r\n normalized = locale.locale_alias.get(to_locale(accept_lang, True))\r\n if not normalized:\r\n continue\r\n # Remove the default encoding from locale_alias.\r\n normalized = normalized.split('.')[0]\r\n\r\n if normalized in _accepted:\r\n # We've seen this locale before and have an MO file for it, so no\r\n # need to check again.\r\n return _accepted[normalized]\r\n\r\n for lang, dirname in ((accept_lang, normalized),\r\n (accept_lang.split('-')[0], normalized.split('_')[0])):\r\n if lang.lower() not in supported:\r\n continue\r\n langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',\r\n 'django.mo')\r\n if os.path.exists(langfile):\r\n _accepted[normalized] = lang\r\n return lang\r\n\r\n return settings.LANGUAGE_CODE", "title": "" }, { "docid": "f50a8049ffeffc1f0ead82ff78df1c0c", "score": "0.4577472", "text": "def test_set_language_wrong_lang(self):\n u = UserFactory()\n u.set_password('123')\n u.save()\n\n auth_url = prepare_url('login')\n data = {\n 'username': u.username,\n 'password': '123'\n }\n response = self.client.post(auth_url, data=data, format='json')\n token = response.data['token']\n\n self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))\n\n url = prepare_url('users-current/set/language')\n\n data = {\n 'language': 'kk',\n }\n response = self.client.post(url, data=data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "99b0846bcd41aaa69b643c880f58a3b0", "score": "0.45765355", "text": "def __init__(self, language=None):\n\n # Cache previously loaded translations\n self._gtobjs = {}\n\n # Get our language\n self.lang = AppriseLocale.detect_language(language)\n\n # Our mapping to our _fn\n self.__fn_map = None\n\n if GETTEXT_LOADED is False:\n # We're done\n return\n\n # Add language\n self.add(self.lang)", "title": "" }, { "docid": "b771bb60ca0be6051f6ee4d1d7c73906", "score": "0.45762858", "text": "def update_editor(self):\n\n # Everything should really be handled elsewhere in trait notifications.\n # Just pass here.\n pass", "title": "" }, { "docid": "d1deeed8df30aa1ab95780d4e45ef777", "score": "0.4572981", "text": "def setLanguage(self, lang):\n self.language = lang", "title": "" }, { "docid": "69908c5925be2aee5d9b5dc586143ccf", "score": "0.45692748", "text": "def test_language_not_saved_to_session(self):\n self.client.get('/fr/simple/')\n self.assertNotIn(LANGUAGE_SESSION_KEY, self.client.session)", "title": "" }, { "docid": "861a13d1db3141181f0ce12683bc80a3", "score": "0.45683706", "text": "def cycle_language(self) -> None:\n languagelist = CST.get_every_languages()\n # Finding this language index\n this_lang_index = 0\n for language in languagelist:\n if language[\"LANGUAGE\"] == CST.get_text(\"LANGUAGE\"):\n this_lang_index = languagelist.index(language)\n print(\"Index found:\", this_lang_index)\n break\n\n this_lang_index = (this_lang_index + 1) % len(languagelist)\n CST.set_text_db(languagelist[this_lang_index])", "title": "" }, { "docid": "c4aa420962e2355ccad5b6a3bb549cf2", "score": "0.45648324", "text": "def language_filter_func(self, model, iter, data):\n if self.current_filter_language is None or self.current_filter_language == \"all\":\n return True\n else:\n return model[iter][4] == self.current_filter_language", "title": "" }, { "docid": "fa4e7872197ba176879e4ba28294a5d2", "score": "0.4564364", "text": "def translate(self):\n logging.info(\"Translate UI\")\n\n self.SetLabel(gt(\"Language\"))\n self.top_label.SetLabel(gt(\"Select from the options below:\"))\n self.close_button.SetLabel(gt(\"Close\"))\n # These values taken from the active translation\n self.label_name.SetLabel(gt(\"Language:\"))\n self.label_name_value.SetLabel(gt.active.longname())\n self.label_createdby.SetLabel(gt(\"Created by:\"))\n self.label_createdby_value.SetLabel(gt.active.created_by())\n self.label_createdon.SetLabel(gt(\"Created on:\"))\n self.label_createdon_value.SetLabel(gt.active.created_date())\n self.Layout()\n self.CentreOnParent()\n self.Refresh()", "title": "" }, { "docid": "5eab38a7b83c3d1509678c1fa90ab24c", "score": "0.45597252", "text": "def requesthandler(self, sl, tl, st):\n mode = 'form'\n if len(sl) < 1:\n sl = self.request.get('sl')\n mode = 'rest'\n if len(tl) < 1:\n tl = self.request.get('tl')\n mode = 'rest'\n if len(st) > 0:\n st = transcoder.clean(st)\n st = string.replace(st, '+', ' ')\n if len(st) < 1:\n st = clean(self.request.get('st'))\n mtengine = self.request.get('mtengine')\n output = self.request.get('output')\n q = clean(self.request.get('q'))\n langpair = self.request.get('langpair')\n if len(q) > 0 and len(langpair) > 0:\n st = q\n langs = string.split(langpair, '|')\n if len(langs) == 2:\n sl = langs[0]\n tl = langs[1]\n output = 'google'\n else:\n sl = ''\n tl = ''\n userip = self.request.remote_addr\n if len(sl) > 0 and len(tl) > 0 and len(st) > 0:\n m = MTWrapper()\n tt = m.getTranslation(sl, tl, st, mtengine=mtengine,userip=userip)\n tt = string.replace(tt, '\\n', '')\n if mode == 'rest' or output != 'google':\n self.response.headers['Content-Type']='text/plain'\n self.response.headers['Accept-Encoding']='utf-8'\n self.response.out.write(tt)\n else:\n self.response.headers['Content-Type']='text/javascript'\n self.response.headers['Accept-Encoding']='utf-8'\n response='{\"responseData\": {\"translatedText\":\"[translation]\"},\"responseDetails\": null, \"responseStatus\": 200}'\n response=string.replace(response,'[translation]', string.replace(tt, '\\\"', '\\''))\n self.response.out.write(response)\n else:\n t = '<form action=/mt method=post><table>'\n t = t + '<tr><td>Source Language Code</td><td><input type=text name=sl></td></tr>'\n t = t + '<tr><td>Target Language Code</td><td><input type=text name=tl></td></tr>'\n t = t + '<tr><td>Machine Translation Engine</td><td><input type=text name=mtengine></td></tr>'\n t = t + '<tr><td>Output Format (text|google)</td><td><input type=text name=output></td></tr>'\n t = t + '<tr><td colspan=2><textarea name=st rows=4 cols=60>Hello World</textarea></td></tr>'\n t = t + '<tr><td colspan=2><input type=submit value=Translate></td></tr>'\n t = t + '</table></form>'\n www.serve(self, t, sidebar = self.__doc__, title = '/mt (Machine Translation API)')", "title": "" }, { "docid": "c9a2626db50ac55e1e588c10e4b87949", "score": "0.45522153", "text": "def on_load_modulerunnerform(self, *args, **kwargs):\n self.parentApp.switchForm(self._form_id_map[\"on_load_modulerunnerform\"])", "title": "" }, { "docid": "09bb104b97e3c0d8d4e3b013be6003f8", "score": "0.45500705", "text": "def localization(request):\n if request.locale == settings.WIKI_DEFAULT_LANGUAGE:\n return HttpResponseRedirect(reverse('dashboards.contributors'))\n data = {'overview_rows': partial(overview_rows, request.locale)}\n return render_readouts(request, L10N_READOUTS, 'localization.html',\n extra_data=data)", "title": "" }, { "docid": "2bba502dd0abe15b8e9dcb02c0176b1d", "score": "0.45488867", "text": "def test_set_language_no_lang(self):\n u = UserFactory()\n u.set_password('123')\n u.save()\n\n auth_url = prepare_url('login')\n data = {\n 'username': u.username,\n 'password': '123'\n }\n response = self.client.post(auth_url, data=data, format='json')\n token = response.data['token']\n\n self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))\n\n url = prepare_url('users-current/set/language')\n\n data = {}\n response = self.client.post(url, data=data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" } ]
94be5e35f33d58e4a6b50c4acead38ed
Handler for receiving external platform PubSub/RPC requests from internal agents. It then calls external PubSub/RPC router handler to forward the request to external platform.
[ { "docid": "47f08ee382423adf29f9e4090f62bac3", "score": "0.6289094", "text": "def outbound_request_handler(self, ch, method, props, body):\n _log.debug(\"Proxy ZMQ Router {}\".format(body))\n frames = jsonapi.loads(body.decode('utf-8'))\n if len(frames) > 6:\n if frames[5] == 'pubsub':\n # Forward the message to pubsub component of the router to take action\n self.zmq_router.pubsub.handle_subsystem(frames)\n else:\n # Forward the message to external RPC handler component of the router to take action\n self.zmq_router.ext_rpc.handle_subsystem(frames)", "title": "" } ]
[ { "docid": "fa85b609fb70c1509535350a74c6e70a", "score": "0.64669406", "text": "def on_request(self, ch, method, props, message_body):\n logger.info(' [-] Publishing through Remote Rrocedure Call (RPC)...')\n logger.info(' [x] Received %r' % message_body)\n\n message = json.loads(message_body.decode('utf-8'))\n api_name_to_execute = message['api_name']\n logger.info(f\"The API to be executed is {api_name_to_execute}\")\n\n executor_config = LISTENER_CONFIG.get(api_name_to_execute)\n executor = executor_config.get(EXECUTOR_KEY)\n\n logger.info(f\"Calling executor [{executor.__class__.__name__}] for API: {api_name_to_execute}\")\n response = executor.execute(message)\n\n ch.basic_publish(exchange='',\n routing_key=props.reply_to,\n properties=pika.BasicProperties(correlation_id=props.correlation_id),\n body=str(response))\n ch.basic_ack(delivery_tag=method.delivery_tag)", "title": "" }, { "docid": "c3c8233aba2920d5f61529fe9c623fa3", "score": "0.57575226", "text": "def pubsub(event, context):\n\n publisher = pubsub_v1.PublisherClient()\n\n slEquityForum(publisher)\n\n ftlk(publisher)\n\n lbo(publisher)\n\n pass", "title": "" }, { "docid": "10dd788dce02a2c4d705b43fb0008c7c", "score": "0.551253", "text": "def publish_callback(self, peer, sender, bus, topic, headers, message):\n json_msg = jsonapi.dumps(dict(bus=bus, headers=headers, message=message))\n # Reformat the message into ZMQ VIP message frames\n frames = [sender, '', 'VIP', '', '', 'pubsub',\n zmq.Frame('publish'), zmq.Frame(str(topic)), zmq.Frame(str(json_msg))]\n\n self.zmq_router.pubsub.handle_subsystem(frames, '')", "title": "" }, { "docid": "40de24d9b76184d38c998c5c69fc20c6", "score": "0.53822845", "text": "def _route_to_agent(self, frames):\n sender, recipient, proto, auth_token, msg_id, subsystem = frames[:6]\n args = [arg for arg in frames[6:]]\n # for f in frames:\n # _log.debug(\"Frames:; {}\".format(f))\n connection = self.core.connection\n\n app_id = \"{instance}.{identity}\".format(instance=self.core.instance_name,\n identity=sender)\n # Change queue binding for the Response message\n # After sending the message (request) on behalf of ZMQ client, the response has to\n # routed back to the caller. Queue binding is modified for that purpose.\n # outbound_response_handler() gets called (based on the binding) to reformat response\n # message and send over zmq bus\n connection.channel.queue_bind(exchange=connection.exchange,\n queue=self._outbound_response_queue,\n routing_key=app_id,\n callback=None)\n\n # Set the destination routing key to destination agent\n destination_routing_key = \"{0}.{1}\".format(self.core.instance_name, recipient)\n\n # Fit VIP frames into the PIKA properties dictionary\n # VIP format - [SENDER, RECIPIENT, PROTO, USER_ID, MSG_ID, SUBSYS, ARGS...]\n dct = {\n 'user_id': self.core.instance_name + '.' + self.core.identity,\n 'app_id': app_id, # Routing key of SOURCE AGENT\n 'headers': dict(sender=sender, # SENDER\n recipient=destination_routing_key, # RECEIVER\n proto='VIP', # PROTO\n user=auth_token, # USER_ID\n ),\n 'message_id': msg_id, # MSG_ID\n 'type': subsystem, # SUBSYS\n 'content_type': 'application/json'\n }\n properties = pika.BasicProperties(**dct)\n # _log.debug(\"PROXY PUBLISHING TO CHANNEL {0}, {1}, {2}\".format(destination_routing_key, app_id, properties))\n connection.channel.basic_publish(connection.exchange,\n destination_routing_key,\n jsonapi.dumps(args, ensure_ascii=False),\n properties)", "title": "" }, { "docid": "25b8a2cfe8837ec790a23cd4d0106954", "score": "0.52806157", "text": "def outbound_response_handler(self, ch, method, props, body):\n # Strip sender's identity from binding key\n routing_key = str(method.routing_key)\n platform, to_identity = routing_key.split(\".\", 1)\n platform, from_identity = props.app_id.split(\".\", 1)\n userid = props.headers.get('user', '')\n # Reformat message into ZMQ VIP format\n frames = [to_identity, from_identity, 'VIP1', userid,\n props.message_id, props.type]\n try:\n args = jsonapi.loads(body)\n try:\n # This is necessary because jsonrpc request/response is inside a list which the\n # ZMQ agent subsystem does not like\n args = jsonapi.loads(args[0])\n frames.append(jsonapi.dumps(args))\n except ValueError as e:\n if isinstance(args, list):\n for m in args:\n frames.append(m)\n else:\n frames.append(jsonapi.dumps(args))\n except TypeError as e:\n _log.error(\"Invalid json format {}\".format(e))\n return\n\n _log.debug(\"Proxy ZMQ Router Outbound handler {0}, {1}\".format(to_identity, args))\n\n try:\n self.zmq_router.socket.send_multipart(frames, copy=True)\n except ZMQError as ex:\n _log.error(\"ZMQ Error {}\".format(ex))", "title": "" }, { "docid": "45a1e44f840fd4877b28b98bc9790980", "score": "0.5167028", "text": "def on_publish(unused_client, unused_userdata, unused_mid):\n print('on_publish')", "title": "" }, { "docid": "45a1e44f840fd4877b28b98bc9790980", "score": "0.5167028", "text": "def on_publish(unused_client, unused_userdata, unused_mid):\n print('on_publish')", "title": "" }, { "docid": "45a1e44f840fd4877b28b98bc9790980", "score": "0.5167028", "text": "def on_publish(unused_client, unused_userdata, unused_mid):\n print('on_publish')", "title": "" }, { "docid": "8e5562cea810c929866505c4c0897147", "score": "0.51050264", "text": "def main(argv):\n\n if len(argv) != 1:\n print('Usage: python -m impl.step_2_account.app')\n return\n\n # Construct a service for the Partner Procurement API.\n database = JsonDatabase()\n procurement = Procurement(database)\n\n # Get the subscription object in order to perform actions on it.\n subscriber = pubsub_v1.SubscriberClient()\n subscription_path = subscriber.subscription_path(PROJECT_ID,\n PUBSUB_SUBSCRIPTION)\n\n def callback(message):\n \"\"\"Callback for handling Cloud Pub/Sub messages.\"\"\"\n payload = json.loads(message.data)\n\n print('Received message:')\n pprint.pprint(payload)\n print()\n\n ack = False\n if 'entitlement' in payload:\n ack = procurement.handle_entitlement_message()\n elif 'account' in payload:\n ack = procurement.handle_account_message(payload['account'])\n else:\n # If there's no account or entitlement, then just ack and ignore the\n # message. This should never happen.\n ack = True\n\n if ack:\n message.ack()\n\n subscription = subscriber.subscribe(subscription_path, callback=callback)\n\n print('Listening for messages on {}'.format(subscription_path))\n print('Exit with Ctrl-\\\\')\n\n while True:\n try:\n subscription.result()\n except Exception as exception:\n print('Listening for messages on {} threw an Exception: {}.'.format(\n subscription_path, exception))", "title": "" }, { "docid": "3f58b29208150c82c6a15982fc757b45", "score": "0.5100719", "text": "async def adapt(self, request_data, request_args):\n correlation_id = uuid.uuid4().hex.encode()\n\n await self.exchange.publish(\n aio_pika.Message(\n json.dumps({\n 'data': request_data,\n 'args': request_args,\n }).encode('utf-8'),\n reply_to=self.response_queue.name,\n correlation_id=correlation_id,\n ),\n routing_key=self.get_endpoint_name(),\n )\n\n logger.info(\n \"waiting for rpc response in {}\".format(self.response_queue.name))\n async with self.response_queue.iterator() as q:\n async for message in q:\n # TODO: timeout!\n with message.process():\n if message.correlation_id == correlation_id:\n result = json.loads(message.body.decode())\n if 'error' in result:\n raise web.HTTPInternalServerError(\n reason=result['error']['reason'])\n return result", "title": "" }, { "docid": "ffc9d2b4cd6288a73b9e5e848c2c3ebb", "score": "0.50737756", "text": "def request_to_pubsub(request, topic=None, validation_func=None):\r\n payload = request.get_json(force=True)\r\n print({\"payload\", payload})\r\n topic = topic or payload.get('topic') or os.environ.get('TOPIC_NAME')\r\n if topic is None:\r\n raise EnvironmentError('topic must be specified in the parameters or request as \"topic\" '\r\n 'or environment as TOPIC_NAME')\r\n if validation_func is not None and callable(validation_func) is True:\r\n validation_func(payload=payload)\r\n result = publish_to_pubsub(topic=topic, message=f\"request forwarded from {request.full_path}\", data=payload)\r\n return jsonify({'message_id': result})", "title": "" }, { "docid": "a95db2efe8a82a5a2189640267aed301", "score": "0.50671524", "text": "def root():\n logging.debug(\"received message\")\n\n try:\n if not request.data:\n raise ValueError(\"No request data received\")\n envelope = json.loads(request.data.decode(\"utf-8\"))\n logging.debug(\"Raw pub/sub message: {}\".format(envelope))\n\n if \"message\" not in envelope:\n raise ValueError(\"No message in envelope\")\n\n if \"messageId\" in envelope[\"message\"]:\n logging.debug(\n \"messageId: {}\".format(envelope[\"message\"].get(\"messageId\", \"\"))\n )\n message_id = envelope[\"message\"][\"messageId\"]\n\n if \"attributes\" not in envelope[\"message\"]:\n raise ValueError(\n \"Attributes such as token and batch_id missing from request\"\n )\n\n # if the pubsub PUBSUB_VERIFICATION_TOKEN isn't included or doesn't match, don't continue\n if \"token\" not in envelope[\"message\"][\"attributes\"]:\n raise ValueError(\"token missing from request\")\n if (\n not envelope[\"message\"][\"attributes\"][\"token\"]\n == config.PUBSUB_VERIFICATION_TOKEN\n ):\n raise ValueError(\n \"token from request doesn't match, received: {}\".format(\n envelope[\"message\"][\"attributes\"][\"token\"]\n )\n )\n\n # if the batch_id isn't included, fail immediately\n if \"batch_id\" not in envelope[\"message\"][\"attributes\"]:\n raise ValueError(\"batch_id missing from request\")\n batch_id = envelope[\"message\"][\"attributes\"][\"batch_id\"]\n logging.debug(\"batch_id: {} \".format(batch_id))\n\n if \"batch_start_time\" in envelope[\"message\"][\"attributes\"]:\n batch_start_time = envelope[\"message\"][\"attributes\"][\"batch_start_time\"]\n else:\n batch_start_time = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n if \"src_message_id\" in envelope[\"message\"][\"attributes\"]:\n src_message_id = envelope[\"message\"][\"attributes\"][\"src_message_id\"]\n else:\n src_message_id = \"\"\n\n if \"data\" not in envelope[\"message\"]:\n raise ValueError(\"No data in message\")\n payload = base64.b64decode(envelope[\"message\"][\"data\"])\n logging.debug(\"payload: {} \".format(payload))\n\n metadata = {\n \"batch_id\": batch_id,\n \"message_id\": message_id,\n \"src_message_id\": src_message_id,\n \"batch_start_time\": batch_start_time,\n }\n\n data = json.loads(payload)\n logging.debug(\"data: {} \".format(data))\n\n # Check the input parameters\n if not data:\n raise ValueError(\"No data in Pub/Sub Message to write to BigQuery\")\n\n # See https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries\n write_to_bigquery(data, metadata)\n\n except Exception as e:\n logging.error(\"Error: {}\".format(e))\n return Response(f\"{e}\", status=500)\n\n return Response(\"Ok\", status=200)", "title": "" }, { "docid": "50a8f258fbc8be1ce85d855c7f2e4ed7", "score": "0.5016845", "text": "def subscribe(receiver):", "title": "" }, { "docid": "50a8f258fbc8be1ce85d855c7f2e4ed7", "score": "0.5016845", "text": "def subscribe(receiver):", "title": "" }, { "docid": "516556610113006d855546c763bb0960", "score": "0.50119555", "text": "def call(self, msg):\n self.response = None\n print(\"uuid: \", self.corr_id)\n self.channel.basic_publish(exchange='',\n routing_key='rpc_queue',\n properties=pika.BasicProperties(\n reply_to = self.callback_queue,\n correlation_id = self.corr_id,\n ),\n body=str(msg))\n while self.response is None:\n self.connection.process_data_events()\n return self.response.decode(\"utf-8\")", "title": "" }, { "docid": "09c5300c13cf1e1a5913833fc058fb4e", "score": "0.49774674", "text": "def callback(msg):\n\n print(\"callback %r\" % (msg,))\n\n print(\"properties...\")\n for key, val in msg.properties.items():\n print(\" - %s: %s\" % (key, str(val)))\n\n print(\"delivery_info...\")\n for key, val in msg.delivery_info.items():\n print(\" - %s: %s\" % (key, str(val)))\n\n print(\" - body: %r\" % (msg.body,))\n\n # parse the request, build a response\n request = int(msg.body)\n response = str(request * 10000)\n\n print(\" - response: %r\" % (response,))\n\n # wrap it in a message\n response_msg = amqp.Message(\n response, content_type=\"text/plain\", correlation_id=msg.correlation_id\n )\n print(\" - response_msg: %r\" % (response_msg,))\n\n # send this message back to the caller\n rslt = msg.channel.basic_publish(\n response_msg, exchange=\"\", routing_key=msg.reply_to\n )\n print(\" - basic_publish: %r\" % (rslt,))\n\n # ack that we processed this request\n if not args.noack:\n rslt = msg.channel.basic_ack(msg.delivery_tag)\n print(\" - basic_ack: %r\" % (rslt,))\n else:\n print(\" - no acknowledgement needed\")\n print(\"\")", "title": "" }, { "docid": "f0aac1290b41a9d8f0b354ef5c1a665e", "score": "0.49772206", "text": "def host_rest_api_post_handler(self, request_dispatch):\n content_len = int(request_dispatch.headers.getheader('content-length', 0))\n content = request_dispatch.rfile.read(content_len)\n http_response = httplib.OK\n if content:\n host_data = json.loads(content)\n\n subfunctions = host_data.get('subfunctions', None)\n\n if host_data['hostname'] is None:\n DLOG.info(\"Invalid host name received, host_name=%s.\"\n % host_data['hostname'])\n\n elif subfunctions is None:\n DLOG.error(\"Invalid host subfunctions received, \"\n \"host_subfunctions=%s.\" % subfunctions)\n\n else:\n for callback in self._host_add_callbacks:\n success = callback(host_data['uuid'],\n host_data['hostname'])\n if not success:\n http_response = httplib.BAD_REQUEST\n else:\n http_response = httplib.NO_CONTENT\n\n DLOG.debug(\"Host rest-api post path: %s.\" % request_dispatch.path)\n request_dispatch.send_response(http_response)\n request_dispatch.done()", "title": "" }, { "docid": "b050406683c288043ef3c92c9fcf8179", "score": "0.49684668", "text": "def main():\n run(route.build(RoutingWebSocketRequestHandler,\n {'relay': MessageRelay()}))", "title": "" }, { "docid": "5acaea73959206768153e696e606002e", "score": "0.49585545", "text": "def _invoke_api(self, endpoint, payload, params=None, pub=True):\n\n # base64 encode the payload\n payload = str.encode(json.dumps(payload))\n b64 = base64.b64encode(payload)\n\n # sign the requests\n signature = hmac.new(str.encode(self.API_SECRET), b64, hashlib.sha384).hexdigest()\n\n headers = {\n 'Content-Type': 'text/plain',\n 'X-GEMINI-APIKEY': self.API_KEY,\n 'X-GEMINI-PAYLOAD': b64,\n 'X-GEMINI-SIGNATURE': signature\n }\n\n url = self.BASE_URI + endpoint\n\n # build a request object in case there's an error so we can echo it\n request = {'payload': payload, 'headers': headers, 'url': url}\n\n if not pub:\n # private api methods are POSTs\n response = requests.post(url, headers=headers)\n else:\n response = requests.get(url, headers=headers, params=params)\n\n return self._handle_response(request, response)", "title": "" }, { "docid": "7b8d9e461350a37146483f9ae44fa242", "score": "0.4948684", "text": "def _on_message(self, mqtt_client, userdata, msg):\n\n #print(f\"Debug-on_message: Recieved at {msg.topic}: {msg.payload}\")\n\n # Parse topic\n message_prefix = msg.topic.split('/')[0]\n message_type = msg.topic.split('/')[-1]\n plug_id = '/'.join(msg.topic.split('/')[1:-1])\n\n # Get the right message handler according to topic\n for query, params in self._QUERIES.items():\n if message_prefix == params[\"prefix\"] and message_type == params[\"type\"]:\n params[\"target\"](plug_id, msg.payload.decode(\"utf-8\"))", "title": "" }, { "docid": "82a2f1d34896525908c8213cf8b3dd25", "score": "0.49468654", "text": "def __call__(self, environ, start_response):\n request = self.request(environ)\n response = self.publish(request)\n return response(environ, start_response)", "title": "" }, { "docid": "190048651d14f6d8778f1229343edad8", "score": "0.49428186", "text": "async def adapt(self, request_data, request_args):\n await self.exchange.publish(\n aio_pika.Message(json.dumps({\n 'data': request_data,\n 'args': request_args,\n }).encode('utf-8')),\n routing_key='',\n )\n return {}", "title": "" }, { "docid": "c5a4fa52cf8d05753ecb6fa2b9d1d88d", "score": "0.4942561", "text": "def go_1_handler(self, topic, msg):", "title": "" }, { "docid": "ecd29263166039b710ca131a102af53f", "score": "0.49411464", "text": "def serve(self):\n # load balance\n self.channel.basic_qos(prefetch_count=1)\n self.channel.basic_consume(self._on_request, queue='rpc_queue')\n print(\" [x] Awaiting RPC requests\")\n self.channel.start_consuming()", "title": "" }, { "docid": "e8e258b1b82d5cdc7b3e30cb9fe68027", "score": "0.4924368", "text": "def communication_handler(self):\n self.serverObj = ipcAPI.ipcServer(config.SOCKET_FILE)\n\n self.serverObj.start_listening(self.unit_communication_handler)", "title": "" }, { "docid": "62f42f7a934fcca2d81403c6acb976eb", "score": "0.49213067", "text": "def handle_message_received(self, client, userdata, msg):\n topic = msg.topic\n m_decode = str(msg.payload.decode(\"utf-8\", \"ignore\"))\n print(\"[ResponseDecoder] Topics received \", topic)\n print(\"[ResponseDecoder] Message received: \", m_decode)\n\n if topic == \"backend/response\":\n print(\"[ResponseDecoder] Handling backend response...\")\n self.handle_backend_response(m_decode)", "title": "" }, { "docid": "957dec41a5c8bdb08842f83961b6e889", "score": "0.49183133", "text": "def agent_prism_main(args: Args) -> int:\n LOGGING.info(\"setup HTTPS connection..\")\n requester_v1 = HTTPSAuthRequester(\n args.server,\n args.port,\n \"PrismGateway/services/rest/v1\",\n args.username,\n args.password,\n )\n requester_v2 = HTTPSAuthRequester(\n args.server,\n args.port,\n \"PrismGateway/services/rest/v2.0\",\n args.username,\n args.password,\n )\n\n LOGGING.info(\"fetch and write container info..\")\n output_containers(requester_v1)\n\n LOGGING.info(\"fetch and write alerts..\")\n output_alerts(requester_v2)\n\n LOGGING.info(\"fetch and write cluster info..\")\n output_cluster(requester_v2)\n\n LOGGING.info(\"fetch and write storage_pools..\")\n output_storage_pools(requester_v1)\n\n LOGGING.info(\"fetch and write vm info..\")\n output_vms(requester_v1)\n\n LOGGING.info(\"fetch and write hosts info..\")\n output_hosts(requester_v2)\n\n LOGGING.info(\"fetch and write protection domain info..\")\n output_protection(requester_v2)\n\n LOGGING.info(\"fetch and write support info..\")\n output_support(requester_v2)\n\n LOGGING.info(\"fetch and write ha state..\")\n output_ha(requester_v2)\n\n LOGGING.info(\"all done. bye.\")\n\n sys.exit(0)", "title": "" }, { "docid": "3774704b49ea9f021d9e07d6ac77f99b", "score": "0.49003142", "text": "def main():\n context = zmq.Context()\n sink = context.socket(zmq.PULL)\n sink.bind(os.environ['ZEBU_PUBLISH'])\n fan = context.socket(zmq.PUB)\n fan.bind(os.environ['ZEBU_SUBSCRIBE'])\n while True:\n received = sink.recv()\n fan.send(received)", "title": "" }, { "docid": "54a3c841601ad3d1d2f7517a3963dc3b", "score": "0.48912877", "text": "def reply_handler(self, msg):\n if self.log_api_messages:\n self.output('message: %s %s ' % (repr(msg.typeName), msg))\n\n if msg.typeName in self.handlers.keys():\n self.handlers[msg.typeName](msg)\n else:\n self.output('unhandled: %s' % msg)", "title": "" }, { "docid": "b454971389167276acae1ca194d376b0", "score": "0.48832962", "text": "def __init__(self):\n self._skeleton = server.TrunkSkeleton()\n self._stub = server.TrunkStub()\n\n LOG.debug(\"RPC backend initialized for trunk plugin\")\n\n for event_type in (events.AFTER_CREATE, events.AFTER_DELETE):\n registry.subscribe(self.process_event,\n resources.TRUNK, event_type)\n registry.subscribe(self.process_event,\n resources.SUBPORTS, event_type)", "title": "" }, { "docid": "55a610653c302695b171a2b122e32939", "score": "0.48813748", "text": "def main():\n with pika.BlockingConnection() as conn:\n channel = conn.channel()\n\n # Set up server\n\n channel.queue_declare(\n queue=SERVER_QUEUE, exclusive=True, auto_delete=True)\n channel.basic_consume(SERVER_QUEUE, on_server_rx_rpc_request)\n\n # Set up client\n\n # NOTE Client must create its consumer and publish RPC requests on the\n # same channel to enable the RabbitMQ broker to make the necessary\n # associations.\n #\n # Also, client must create the consumer *before* starting to publish the\n # RPC requests.\n #\n # Client must create its consumer with auto_ack=True, because the reply-to\n # queue isn't real.\n\n channel.basic_consume(\n 'amq.rabbitmq.reply-to',\n on_client_rx_reply_from_server,\n auto_ack=True)\n channel.basic_publish(\n exchange='',\n routing_key=SERVER_QUEUE,\n body='Marco',\n properties=pika.BasicProperties(reply_to='amq.rabbitmq.reply-to'))\n\n channel.start_consuming()", "title": "" }, { "docid": "74e45b58e8eb993021e138648cead3b9", "score": "0.48644948", "text": "def __call__(environ, router):", "title": "" }, { "docid": "2d74987fcd793a43fbeec4af01e0a3d6", "score": "0.4862388", "text": "def _dispatch_to_kernel(self, msg):\n kernel = self.manager.kernel\n if kernel is None:\n raise RuntimeError('Cannot send request. No kernel exists.')\n\n stream = DummySocket()\n self.manager.session.send(stream, msg)\n msg_parts = stream.recv_multipart()\n kernel.dispatch_shell(stream, msg_parts)\n\n idents, reply_msg = self.manager.session.recv(stream, copy=False)\n self.call_handlers_later(reply_msg)", "title": "" }, { "docid": "79986bc60be1334170dbe930152fff54", "score": "0.486075", "text": "def handle_incoming(self, request, reply_using=None):\n raise NotImplementedError", "title": "" }, { "docid": "7242c8976a3cfaf15cc02eae32490245", "score": "0.48574805", "text": "def handle_unicast(self, content, rawmsg):\n self.on_client_message(content['data'], content, rawmsg)", "title": "" }, { "docid": "6e681b9cd6878009d947ae9d72c610a4", "score": "0.48499858", "text": "def publishPressRoomInfrastructure():", "title": "" }, { "docid": "ddb7d4019e37b0247df2abf9af9fbbb7", "score": "0.48324448", "text": "def _process_message(self, request):\n self.logger.debug('_request_handler.')\n\n request['type'] = 'external request'\n request['timestamp'] = util.current_time()\n\n try\n request['key'] = util.get_hash(request['key'])\n except:\n pass\n\n coordinator = ExternalRequestCoordinator(server=self._server, request=request)\n self._coordinators.append(coordinator)\n\n self.logger.debug('_request_handler. coordinator appended: {}'.format(coordinator))", "title": "" }, { "docid": "b46188c2ea8683fd30bb2bf797b4c1c0", "score": "0.48249647", "text": "def handleRequest(self, message):\n\n if \"pv\" not in message:\n log.warn(\"Invalid request: missing 'pv' field\")\n return\n try:\n _, _ = message[\"pv\"].split(\"://\")\n except ValueError:\n log.warn(\"Invalid request: invalid 'pv' field\")\n return False\n\n # Verify mandatory fields\n if \"req\" not in message:\n log.warn(\"Invalid request: missing 'req' field\")\n return\n\n try:\n if message[\"req\"] == \"pv_subscribe\":\n self._reqSubscribe(message)\n elif message[\"req\"] == \"pv_unsubscribe\":\n self._reqUnsubscribe(message)\n elif message[\"req\"] == \"pv_get\":\n self._reqGet(message)\n elif message[\"req\"] == \"pv_put\":\n self._reqPut(message)\n else:\n log.warn(\"Unknown request: {0}\".format(message[\"req\"]))\n except Exception, e:\n log.error(\"Request failed: {0}\".format(str(e)))", "title": "" }, { "docid": "faad6603fc3cf611dbc374d67e6c66f1", "score": "0.48216492", "text": "async def _handle_call(self, msg):\n try:\n handlers = self.route_map[msg.action]\n except KeyError:\n raise NotSupportedError(\n details={\"cause\": f\"No handler for {msg.action} registered.\"}\n )\n\n if not handlers.get(\"_skip_schema_validation\", False):\n validate_payload(msg, self._ocpp_version)\n # OCPP uses camelCase for the keys in the payload. It's more pythonic\n # to use snake_case for keyword arguments. Therefore the keys must be\n # 'translated'. Some examples:\n #\n # * chargePointVendor becomes charge_point_vendor\n # * firmwareVersion becomes firmwareVersion\n snake_case_payload = camel_to_snake_case(msg.payload)\n\n try:\n handler = handlers[\"_on_action\"]\n except KeyError:\n raise NotSupportedError(\n details={\"cause\": f\"No handler for {msg.action} registered.\"}\n )\n\n try:\n response = handler(**snake_case_payload)\n if inspect.isawaitable(response):\n response = await response\n except Exception as e:\n LOGGER.exception(\"Error while handling request '%s'\", msg)\n response = msg.create_call_error(e).to_json()\n await self._send(response)\n\n return\n\n temp_response_payload = asdict(response)\n\n # Remove nones ensures that we strip out optional arguments\n # which were not set and have a default value of None\n response_payload = remove_nones(temp_response_payload)\n\n # The response payload must be 'translated' from snake_case to\n # camelCase. So:\n #\n # * charge_point_vendor becomes chargePointVendor\n # * firmware_version becomes firmwareVersion\n camel_case_payload = snake_to_camel_case(response_payload)\n\n response = msg.create_call_result(camel_case_payload)\n\n if not handlers.get(\"_skip_schema_validation\", False):\n validate_payload(response, self._ocpp_version)\n\n await self._send(response.to_json())\n\n try:\n handler = handlers[\"_after_action\"]\n # Create task to avoid blocking when making a call inside the\n # after handler\n response = handler(**snake_case_payload)\n if inspect.isawaitable(response):\n asyncio.ensure_future(response)\n except KeyError:\n # '_on_after' hooks are not required. Therefore ignore exception\n # when no '_on_after' hook is installed.\n pass", "title": "" }, { "docid": "6531a935e1ded2607ee94950b3046439", "score": "0.4814897", "text": "def subscriber_dispatcher(self, sub_data):\n\n log = Logger()\n\n try:\n\n result = IDispatcher(Validator(sub_data)).dispatch()\n\n # NETODO what exception can happen here?\n except Exception as e:\n\n import traceback\n print traceback.format_exc()\n\n log.warning('subscriber_dispatcher exception: {}'.format(\n e.message\n ))\n\n else:\n\n if IValidator.providedBy(result):\n log.warning('WAMP Message is invalid: {}'.format(result.message))\n\n if result is not False and IJSONResource.providedBy(result):\n\n fac = None\n\n for sub in subscribers([result], IUserGlobalSubscriber):\n\n sub.subscribe(self)\n fac = True\n\n break\n\n if not fac:\n log.warning('There are no user definition for IUserGlobalSubscriber, message was skipped')", "title": "" }, { "docid": "e3f909c6928ea0158fcf9d2ed907b0a7", "score": "0.48111016", "text": "def onstart(self, sender, **kwargs):\n #Example publish to pubsub\n #self.vip.pubsub.publish('pubsub', \"some/random/topic\", message=\"HI!\")\n\n #Exmaple RPC call\n #self.vip.rpc.call(\"some_agent\", \"some_method\", arg1, arg2)\n while True:\n for i in range(24*4):\n if i%4 == 0:\n self.set_weather_factor()\n\n profile = self.simulate_solar_profile(i)\n request = {\n 'device': 'SolarAgent',\n 'profile': list(profile),\n 'timeout': 0,\n 'id': 0\n }\n self.vip.pubsub.publish('pubsub', \"devices/AGH/D17/Panel/all\", message=\n [{'moc': profile[0],\n 'czas': i/4 },{'moc':{'type':'float','tz':'US/Pacific','units':'Watt'},\n 'czas':{'type':'float','tz':'US/Pacific','units':'Hours'}}])\n\n self.vip.pubsub.publish('pubsub', \"devices/AGH/D17/Panel/profile\", message=[request])\n time.sleep(1)\n\n\n\n\n while True:\n for hour in range(24):\n for minutes in range(0,60,15):\n self.vip.pubsub.publish('pubsub', \"devices/AGH/D17/Panel/all\", message=\n [{'moc': self.clipped_power(hour+minutes/60, 0),\n 'czas': hour+minutes/60 },{'moc':{'type':'float','tz':'US/Pacific','units':'Watt'},\n 'czas':{'type':'float','tz':'US/Pacific','units':'Hours'}}])\n time.sleep(0.1)", "title": "" }, { "docid": "22a2583893e4d0f32f42caf0df158c23", "score": "0.47943398", "text": "def subscribe(receiver, catchup):", "title": "" }, { "docid": "5c464e42a5185c25142648b41ac904bc", "score": "0.47856456", "text": "def handle(self, msg):\n if msg.chan == msg.bot.nickname:\n msg.reply_to = msg.user.name\n else:\n msg.reply_to = msg.chan\n if msg.keyword in self.plugins:\n answer = self.plugins[msg.keyword].privmsg(msg)\n self._reply(msg, answer)\n elif msg.keyword == 'help':\n args = [x for x in msg.params.split(' ') if x]\n if len(args) > 0 and args[0] in self.help_topics:\n plugin = args[0]\n if len(args) > 1:\n if args[1] in self.help_topics[plugin]:\n self._reply(msg, self.plugins[plugin].help(args[1]))\n else:\n self._reply(msg, self.plugins[plugin].help())\n else:\n self._reply(msg, _('Help topics: ')\n + ', '.join(self.help_topics.keys()))\n for listener in self.listeners:\n answer = listener._listen_(msg)\n self._reply(msg, answer)", "title": "" }, { "docid": "ada52f526fd29963c7c0b7ff198654cd", "score": "0.47820812", "text": "def _client_handler(self, c, ca, uid):\n while True:\n send_json_message(c, { 'Response' : 'Success', 'Message' : 'Send UID with whom youd like to communicate' } )\n\n # Receive UID with whom to communicate\n uid_msg = receive_json_message(c)\n if uid_msg is None:\n print(f'{uid}:{ca} has disconnected')\n self._connections.remove((c, ca, uid))\n break\n print(f'\\nClient {uid}:{ca} wishes to communicate with {uid_msg[\"UID\"]}')\n\n # Establish P2P connection\n if self._establish_p2p(c, ca, uid, uid_msg['UID']):\n break", "title": "" }, { "docid": "693a957dd1564a8de402a6c91776553d", "score": "0.47814026", "text": "def handle_publish(client, userdata, mid):\n if mid not in messagesInTransit:\n logging.error(\"Message with mid: {} was not in transit but was \"\n \"passed to handle_publish.\")\n return\n topic, payload = messagesInTransit[mid]\n if topic == TOPIC_DISCOVERY:\n logging.info(\"Successfully published to discovery.\")\n del messagesInTransit[mid]", "title": "" }, { "docid": "b780f1b02edd6da25fd2344a7fbff9c9", "score": "0.47772124", "text": "def handle_response(self, payload):\n if \"msgid\" in payload:\n msgid = payload[\"msgid\"]\n if msgid in self.connections:\n if \"cmd\" in payload and \"header\" in payload and \"data\" in payload:\n cmd = payload[\"cmd\"]\n if cmd == \"httpresp\":\n self.send_response(msgid, payload[\"header\"], payload[\"data\"], True)\n return\n elif cmd == \"logresp\":\n self.send_response(msgid, payload[\"header\"], payload[\"data\"], False)\n return\n elif cmd == \"logevent\":\n result = self.send_response(msgid, payload[\"header\"], payload[\"data\"], False)\n if not result:\n msg = {\"cmd\": \"logclose\"}\n self.tunnel.send(msg)\n return\n _log.error(\"Unknown control proxy response %s\" % payload)", "title": "" }, { "docid": "0a6f45a721aec86dbc8ffaeb2a06f0bf", "score": "0.4775482", "text": "def on_msg(self, *args, **kwargs):", "title": "" }, { "docid": "75651fe51b012dfb7b8ee7fd4075baca", "score": "0.47726318", "text": "def _request_handler(self, request):\n self.logger.debug('_request_handler')\n self.logger.debug('_request_handler. request: {}'.format(request))\n self._server.internal_request_stage.handle_internal_message(message=request, reply_listener=self._reply_listener)\n self.logger.debug('_request_handler. handle_internal_message called.')\n while True:\n yield self.completed", "title": "" }, { "docid": "ab585f0e04fa45935c57e8f6dba39afe", "score": "0.47700673", "text": "def process_oms_request():\n\n if isinstance(request.json, list):\n # Log the list of Alert & Alarm messages from the OMS Event\n for alert_alarm_dict in request.json:\n aa_publisher.enqueue(alert_alarm_dict)\n log.info('oms_alert_alarm_server: OMS_AA_MSG: %r', alert_alarm_dict)\n\n # Publish the list of Alert & Alarm messages to qpid\n aa_publisher.publish()\n\n else:\n log.error('No data in the POSTed alert/alarm OMS Event ...')\n\n return '', httplib.ACCEPTED", "title": "" }, { "docid": "7049cf1192b219f3dc05200606b35f42", "score": "0.47664332", "text": "def handle_receiver(self):\n pass", "title": "" }, { "docid": "17e23322744090dc8bad4c374e4cf91f", "score": "0.47596255", "text": "def callback(message):\n payload = json.loads(message.data)\n\n print('Received message:')\n pprint.pprint(payload)\n print()\n\n ack = False\n if 'entitlement' in payload:\n ack = procurement.handle_entitlement_message()\n elif 'account' in payload:\n ack = procurement.handle_account_message(payload['account'])\n else:\n # If there's no account or entitlement, then just ack and ignore the\n # message. This should never happen.\n ack = True\n\n if ack:\n message.ack()", "title": "" }, { "docid": "0cf6b09a720e7862e58fb0127d294811", "score": "0.47526675", "text": "def reply_handler(msg):\r\n print \"Server Response: %s, %s\" % (msg.typeName, msg)", "title": "" }, { "docid": "099e1a92370b9c9cfa33061b14859c94", "score": "0.473368", "text": "def _redis_pubsub_listener(self):\n for msg in self._redis_pubsub.listen():\n # We may receive 'subscribe'-type messages to indicate that someone\n # has subscribed. Make sure that we only react to 'message' type messages\n # so that we don't trigger the panic event when other processes merely subscribe!\n if msg['type'] == 'message':\n # Abort all polling events by triggering the panic event.\n self.panic_event.set()\n # Alter the status.\n self.status = FlightStatus.PANICKED", "title": "" }, { "docid": "8332fc469f8b317b9b8fbc238bd05ae3", "score": "0.47328022", "text": "def _setup_pub_sub(self):\n # for publishing joint speed to real robot\n self._joint_vel_pub = rospy.Publisher(\n self.cfgs.ARM.JOINT_SPEED_TOPIC,\n JointTrajectory,\n queue_size=2\n )\n\n self._urscript_pub = rospy.Publisher(\n self.cfgs.ARM.URSCRIPT_TOPIC,\n String,\n queue_size=10\n )\n # This is necessary as set_tcp command\n # needs to use the publisher\n time.sleep(1)", "title": "" }, { "docid": "79807a098ede25e57b9d90f4ec201454", "score": "0.4719768", "text": "def __init__(self, subscriber, variables, event_delay, cargo):\r\n log.debug(\"event message\")\r\n\r\n if not variables:\r\n log.error(\"There are no variables to send\")\r\n return\r\n\r\n self.cargo = cargo\r\n\r\n headers = {}\r\n# headers[\"HOST\"] = subscriber.delivery_url\r\n headers[\"HOST\"] = subscriber.host\r\n headers[\"CONTENT-TYPE\"] = 'text/xml'\r\n headers[\"NT\"] = 'upnp:event'\r\n headers[\"NTS\"] = 'upnp:propchange'\r\n headers[\"SID\"] = \"uuid:\" + str(subscriber.subscription_id)\r\n headers[\"SEQ\"] = str(subscriber.event_key)\r\n subscriber.event_key_increment()\r\n\r\n event_body = self._build_message_body(variables)\r\n\r\n headers[\"CONTENT-LENGTH\"] = str(len(event_body))\r\n\r\n log.debug(\"Running http call\")\r\n run_async_call(http_call, success_callback=self.response,\r\n error_callback=self.error, delay=event_delay,\r\n method='NOTIFY', url=subscriber.delivery_url,\r\n body=event_body, headers=headers)", "title": "" }, { "docid": "df828e186fdf83c2f2f2de2bad373922", "score": "0.47143328", "text": "async def handle_message(self, event_msg):\n msg_logger = _utils.GEventMessageLogger(\n self._logger, {'msg_id': event_msg.msg_id})\n msg_logger.info('Publisher received new message.')\n\n if event_msg.data['resourceRecords']:\n for resource_record in event_msg.data['resourceRecords']:\n record_name = resource_record['name']\n if not record_name.endswith('.' + self.config['dns_zone']):\n msg = ('Error when asserting zone for record: '\n f'{record_name}.')\n msg_logger.error(msg)\n raise exceptions.InvalidDNSZoneInMessageError(msg)\n\n await self._dispatch_changes(\n resource_record, self.config['dns_zone'],\n event_msg.data['action'], msg_logger)\n else:\n msg = ('No records published or deleted as no resource records were'\n ' present.')\n msg_logger.info(msg)\n event_msg.append_to_history(msg, self.phase)", "title": "" }, { "docid": "c4b9c116aaeb9cbb15009f1b49d08ffe", "score": "0.47110695", "text": "def __init__(self):\n super().__init__('hydrophone_processor_aserver')\n self.action_server = ActionServer(\n self,\n Hydrophoneraw,\n 'hydrophoneprocessor',\n self.execute_callback\n )\n self.c_one_publisher = self.create_publisher(Int32MultiArray, 'hydrophonepeakcone', 10)\n self.c_two_publisher = self.create_publisher(Int32MultiArray, 'hydrophonepeakctwo', 10)\n self.c_three_publisher = self.create_publisher(Int32MultiArray, 'hydrophonepeakcthree', 10)\n self.raw_data = ''\n self.header_size = 17\n self.converted_data = ''\n self.data_range = 0\n self.check_signum", "title": "" }, { "docid": "08be8935f2816f6fce9080c7c5a8a30f", "score": "0.47105837", "text": "def on_pubmsg(self, connection, event):\n for message in event.arguments():\n self.log(event, message)\n args = filter(None, message.split())\n name = args.pop(0)\n for command in self.events[\"command\"]:\n if command.event[\"args\"][0] == name:\n self.on_command(event, command, args)", "title": "" }, { "docid": "1403cbdf945776ce99abe4d345fbe396", "score": "0.47103208", "text": "def handle_request(self, actor_ids=None):\n if self.server.pending_connections:\n addr, conn = self.server.accept()\n msg_id = calvinuuid.uuid(\"MSGID\")\n self.connections[msg_id] = conn\n _log.debug(\"New connection msg_id: %s\" % msg_id)\n\n for msg_id, connection in self.connections.items():\n if connection.data_available:\n command, headers, data = connection.data_get()\n _log.debug(\"CalvinControlTunnel handle_request msg_id: %s command: %s\" % (msg_id, command))\n msg = {\"cmd\": \"httpreq\",\n \"msgid\": msg_id,\n \"command\": command,\n \"headers\": headers,\n \"data\": data}\n self.tunnel.send(msg)", "title": "" }, { "docid": "c3692388d2f820a8fa3ba54b3fb85f86", "score": "0.47091073", "text": "def execute_callback(self, goal_handle):\n self.get_logger().info('Client request recieved...')\n self.subscription = self.create_subscription(\n String,\n 'hydrophonerawdata',\n self.listener_callback,\n 10\n )\n self.get_logger().info('Subscription created...')\n if not self.raw_data:\n self.get_logger().info('No data received, run hydrophone streamer action server!')\n else:\n self.convert_hydro_data()\n self.get_logger().info('Data converted')\n channel_tuple = self.isolate_sample()\n self.get_logger().info('Samples Isolated.')\n peakone, peaktwo, peakthree = self.peak_detect(channel_tuple)\n self.get_logger().info('Peaks detected. Publishing...')\n \n msg_one = Int32MultiArray()\n msg_two = Int32MultiArray()\n msg_three = Int32MultiArray()\n msg_one.data = peakone\n msg_two.data = peaktwo\n msg_three.data = peakthree\n self.c_one_publisher.publish(msg_one)\n self.c_two_publisher.publish(msg_two)\n self.c_three_publisher.publish(msg_three)\n self.get_logger().info('Channels published.')\n \n\n self.get_logger().info('sample One = {0}'.format(channel_tuple))\n \n\n\n result = Hydrophoneraw.Result()\n return result", "title": "" }, { "docid": "f59ab71a62a6a3e6ab7a8b0d8e9efb7f", "score": "0.4704909", "text": "def handle(self):\r\n global auth_dict, nonce_dict, crypto_service, password_hash_dict, user_addr_dict, chatting_service\r\n msg = self.request[0]\r\n sock = self.request[1]\r\n # get auth instance for client\r\n if self.client_address not in auth_dict:\r\n try:\r\n _, msg_parts = PacketOrganiser.process_packet(msg)\r\n except:\r\n return\r\n if msg_parts[0] != c.GREETING:\r\n return\r\n # new client, create an auth entry in the auth dictionary\r\n auth_dict[self.client_address] = Authentication(self.client_address, crypto_service,\r\n password_hash_dict)\r\n else:\r\n auth = auth_dict[self.client_address]\r\n if not PacketOrganiser.isValidTimeStampSeconds(auth.timestamp,c.KEEP_ALIVE_TIME):\r\n auth_dict.pop(self.client_address)\r\n\r\n cur_auth = auth_dict[self.client_address]\r\n assert isinstance(cur_auth, Authentication)\r\n rep = None\r\n if not cur_auth.is_auth():\r\n rep = cur_auth.process_request(msg, user_addr_dict)\r\n\r\n else:\r\n # get decrypted msg\r\n dec_msg = crypto_service.sym_decrypt(cur_auth.dh_key, msg)\r\n n, msg_ps = PacketOrganiser.process_packet(dec_msg)\r\n auth_dict[self.client_address].timestamp = PacketOrganiser.get_new_timestamp() # update timestamp\r\n rep = chatting_service.get_response(self.client_address, msg_ps)\r\n if rep is not None:\r\n rep = PacketOrganiser.prepare_packet(rep, n)\r\n rep = crypto_service.sym_encrypt(cur_auth.dh_key, rep)\r\n\r\n try:\r\n if rep is not None:\r\n sys.stdout.flush()\r\n sock.sendto(rep, self.client_address)\r\n elif cur_auth.is_auth():\r\n cur_auth.loginfailures += 1\r\n except socket.error:\r\n print(c.FAIL_MSG_FWD)\r\n return", "title": "" }, { "docid": "e26addec01820d70d0ff5a76f4e1e169", "score": "0.46985966", "text": "def dispatch(self, **url_args): # pragma: no cover\n\n # fallback to standard dispatch\n if self.realtime.hint not in self.environ:\n return super(RealtimeHandler, self).dispatch(**url_args)\n\n try:\n # websocket upgrade and session\n self.__socket__ = self.realtime.on_connect(self)\n self.realtime.on_message(self, self.__socket__)\n\n except NotImplementedError:\n return self.error(400) # raised when a non-websocket handler is hit", "title": "" }, { "docid": "dc356a3256cd0a3e245c98024d60237f", "score": "0.4698371", "text": "def on_public_message(self, client, userdata, msg):\n\n # pi('on_public_message')\n\n # LOGGER.info('[MosquittoClient] Received message with mid : %s from topic : %s with qos : %s and retain = %s ' % (str(msg.mid), msg.topic, str(msg.qos), str(msg.retain)))\n\n json_decoded_body = json.loads(msg.payload)\n stage = json_decoded_body['stage']\n\n if stage == 'new_participant':\n \n # print '[MosquittoClient] Received stage == new_participant'\n\n if json_decoded_body['msg']['clientid'] != self._clientid: \n\n # print '[MosquittoClient] received stage == new_participant with != self._clientid, thus subscribing to its private status'\n \n topic_list = ('private/' + str(json_decoded_body['msg']['clientid']) + '/status', 2)\n \n # print 'MosquittoClient] topic_list to be sent : ', topic_list\n \n # subscribe the new participant's status topic\n self.subscribe(topic_list=topic_list)\n \n else:\n\n\n # pr('on_public_message')\n\n return\n\n\n\n if stage == 'stop' and self._clientid == json_decoded_body['msg']['clientid']:\n\n # print '[MosquittoClient] received stage == stop with == self._clientid, thus sending offline status to subscribers'\n \n # LOGGER.info('[MosquittoClient] skipping sending message to websocket since webscoket is closed.')\n # LOGGER.info('[MosquittoClient] initating closing of rabbitmq Client Connection...')\n\n # avoid sending the message to the corresponding websocket, since its already cloesed. \n # rather sending the offline status message to the subscribers of its private/status topic\n self.send_offline_status()\n\n else:\n # print '[MosquittoClient] received stage != new_participant and != self._clientid, thus sendimg msg to corresponding websocket'\n \n # LOGGER.info('[MosquittoClient] sending the message to corresponsding websoket: %s ' % self.websocket)\n\n self.sendMsgToWebsocket(json_decoded_body)\n\n\n # pr('on_public_message')", "title": "" }, { "docid": "83fea3876494a6512ab24282ecfef9cc", "score": "0.46982637", "text": "def tunnel_recv_handler(self, payload):\n if \"cmd\" in payload:\n if payload[\"cmd\"] == \"httpreq\":\n try:\n self.calvincontrol.route_request(\n payload[\"msgid\"], None, payload[\"command\"], payload[\"headers\"], payload[\"data\"])\n except:\n _log.exception(\"FIXME! Caught exception in calvincontrol when tunneling.\")\n self.calvincontrol.send_response(payload[\"msgid\"], None, None, status=calvinresponse.INTERNAL_ERROR)\n elif payload[\"cmd\"] == \"started\":\n self.calvincontrol.node.external_control_uri = payload[\"controluri\"]\n self.calvincontrol.node.storage.add_node(self.calvincontrol.node)\n return\n elif payload[\"cmd\"] == \"logclose\":\n self.calvincontrol.close_log_tunnel(payload[\"msg_id\"])\n return\n _log.error(\"Tunnel client received unknown command %s\" % payload['cmd'] if 'cmd' in payload else \"\")", "title": "" }, { "docid": "95d86d49a58c5fad10e05b7b97a2c59e", "score": "0.46912205", "text": "def on_message(self, unused_channel, basic_deliver, properties, body):\n response = {\"return_code\": 200, \"msg\": \"Message received\"}\n bad_data = False\n data = None\n\n EVENT_LOGGER.info('[1][RPC]Received message # %s from %s: %s',\n basic_deliver.delivery_tag, properties.app_id, body)\n # response = \"Got the message, ok from server\" + body\n try:\n data = simplejson.loads(body)\n except Exception, e:\n EVENT_LOGGER.info(\"[1e][RPC]Exception onMessage arg %s\" % e)\n LOGGER.error(\"Non-JSON message received, ignoring\")\n bad_data = True\n response = {\"return_code\": 400, \"msg\": \"Non-JSON message received, ignoring\"}\n\n if not bad_data:\n try:\n method = data[\"method\"]\n LOGGER.info(\"method is %s\", method)\n arg = data[\"arg\"]\n LOGGER.info(\"argument is %s\", arg)\n response = {\"return_code\": 500, \"msg\": \"Method not found\"}\n try:\n rpcservice = RPCEndpoint(None, None)\n func = getattr(self, method)\n if callable(func):\n EVENT_LOGGER.info(\"[2][RPC] calling method %s\" % method)\n response = func(arg)\n else:\n EVENT_LOGGER.info(\"[2][RPC] Exception onMessage -MO+_SUCH_METHOD\")\n response = {\"return_code\": 500, \"msg\": \"NoSuchMethod exception occurred \"}\n except Exception, e:\n LOGGER.info(\"exception occurred calling method %s\" % e)\n EVENT_LOGGER.info(\"[2e][RPC] Exception onMessage -MO+_SUCH_METHOD %s\" % e)\n except Exception, e:\n LOGGER.error(\"Improperly formatted message received, ignoring\")\n EVENT_LOGGER.info(\"[2e][RPC] Exception onMessage Improperly formatted messag\")\n bad_data = True\n response = {\"return_code\": 400, \"msg\": \"Improperly formatted message received, ignoring\"}\n\n # get function name and args from payload\n LOGGER.info(\"reply to %s\", properties.reply_to)\n unused_channel.basic_publish(exchange=\"\",\n routing_key=properties.reply_to,\n properties=pika.BasicProperties(correlation_id=properties.correlation_id),\n body=simplejson.dumps(response))\n\n LOGGER.info('Received message # %s from %s: %s',\n basic_deliver.delivery_tag, properties.app_id, body)\n self.acknowledge_message(basic_deliver.delivery_tag)", "title": "" }, { "docid": "c8a5c3e11c10b0da0bd8b6f0db95d641", "score": "0.46904498", "text": "async def client(inet_addr='127.0.0.1', inet_port=7000):\n\n uri = f\"ws://{inet_addr}:{inet_port}\"\n\n ws = await websockets.connect(uri)\n\n iface_buf = await ws.recv()\n pubsub_channels_buf = await ws.recv()\n\n iface = unpack(iface_buf)\n pubsub_channels = unpack(pubsub_channels_buf)\n\n id_ = 0\n invoke_events = {}\n subscriptions = {}\n\n async def impl_transport(path, args):\n nonlocal id_\n id_here = id_\n id_ += 1\n event = asyncio.Event()\n invoke_events[id_here] = {\n 'event': event,\n }\n cmd = {\n 'type': 'invoke',\n 'id': id_here,\n 'path': path,\n 'args': args,\n }\n cmd = pack(cmd)\n await ws.send(cmd)\n await event.wait()\n message = invoke_events[id_here]['result_message']\n del invoke_events[id_here]\n\n if 'val' in message:\n return message['val'] # Standard return value\n\n elif 'exception' in message:\n error_text = message['exception']\n # TODO: Rebuild the exception more precisely.\n raise Exception(error_text)\n\n elif 'iface' in message:\n sub_iface = message['iface']\n _, sub_proxy_iface = build_interface(sub_iface, impl_transport)\n return sub_proxy_iface\n\n else:\n raise Exception('Unknown response message...')\n\n return val\n\n _, proxy_interface = build_interface(iface, impl_transport)\n\n async def read_ws():\n try:\n while True:\n message = await ws.recv()\n message = unpack(message)\n type_ = message['type']\n if type_ == 'invoke_result':\n id_ = message['id']\n if id_ in invoke_events:\n invoke_events[id_]['result_message'] = message\n event = invoke_events[id_]['event']\n event.set()\n elif type_ == 'publish':\n channel = message['channel']\n payload = message['payload']\n callbacks = subscriptions.get(channel, [])\n tasks = []\n for c in callbacks:\n task = asyncio.create_task(c(channel, payload))\n tasks.append(task)\n await asyncio.wait(tasks)\n\n except websockets.exceptions.ConnectionClosed:\n pass\n\n asyncio.create_task(read_ws())\n\n async def subscribe_func(channel, callback):\n if channel not in subscriptions:\n subscriptions[channel] = {callback}\n await ws.send(pack({\n 'type': 'subscribe',\n 'channel': channel,\n }))\n\n else:\n subscriptions[channel].add(callback)\n\n async def unsubscribe_func():\n subscriptions[channel].remove(callback)\n\n if len(subscriptions[channel]) == 0:\n del subscriptions[channel]\n await ws.send(pack({\n 'type': 'unsubscribe',\n 'channel': channel,\n }))\n\n return unsubscribe_func\n\n async def close():\n await ws.close()\n\n return proxy_interface, pubsub_channels, subscribe_func, close", "title": "" }, { "docid": "0952960462e993f33020779372c749ff", "score": "0.4681066", "text": "def __trigger__(self, data):\n\n # Attempt to serialise input data.\n #\n # Note: Errors encountered during recieve will likely occur in\n # the I/O loop of a network interface - and cannot be\n # caught here.\n try:\n data['payload'] = message_type(data['payload'])\n super(MessageListener, self).__trigger__(data)\n except: # pragma: no cover\n raise", "title": "" }, { "docid": "e424e879f8b8f438acd9705dd46dd858", "score": "0.4664852", "text": "def __call__(self, req):\n LOG.debug(\"APIDispatcher dispatch the request to remote host: (%s), \"\n \"port: (%d)\" % (self._remote_host, self._remote_port))\n utils.set_request_forward_environ(req, self._remote_host,\n self._remote_port)\n return self.app", "title": "" }, { "docid": "0847552b06ecf4d4889594e428c478cb", "score": "0.4664241", "text": "def app_pman_send(self, *args, **kwargs):\n\n d_msg = {}\n\n for k,v in kwargs.items():\n if k == 'msg': d_msg = v\n\n # pudb.set_trace()\n\n str_http = '%s:%s' % (settings.PMAN['host'], settings.PMAN['port'])\n\n str_debugFile = '%s/tmp/debug-purl.log' % os.environ['HOME']\n if self.str_debugFile == '/dev/null':\n str_debugFile = self.str_debugFile\n\n purl = pfurl.Pfurl(\n msg = json.dumps(d_msg),\n http = str_http,\n verb = 'POST',\n contentType = 'application/vnd.collection+json',\n b_raw = True,\n b_quiet = self.b_quiet,\n jsonwrapper = 'payload',\n debugFile = str_debugFile,\n useDebug = self.b_useDebug\n )\n\n # speak to pman...\n d_response = json.loads(purl())\n return d_response", "title": "" }, { "docid": "f42063d8d496a7d1db4983ed58f87783", "score": "0.46641576", "text": "def handle_lower_arm(mqtt_sender):\n print('lower arm')\n mqtt_sender.send_message(\"lower_arm\")", "title": "" }, { "docid": "f7e834cd59bca3c856d495902074d5c4", "score": "0.4662858", "text": "async def handle_protocol_event(self, event):\n if event[\"type\"] == 'application':\n await self.parse_application_data(event[\"data\"])\n elif event[\"type\"] == \"negotiate\":\n await self.execute_iac_negotiation(event[\"command\"], event[\"option\"])\n elif event[\"type\"] == \"command\":\n await self.execute_iac_command(event[\"command\"])\n elif event[\"type\"] == \"subnegotiate\":\n await self.sub_negotiate(event[\"option\"], event[\"data\"])\n else:\n print(f\"how the heck did we get here? Unknown Event Type: {event['data']}\")", "title": "" }, { "docid": "4cae8776838343efb751f1981384dcfd", "score": "0.46602744", "text": "def run(self):\n # Create endpoints\n self._incoming_endpoint = self._build_incoming_endpoint(self.context)\n self._outgoing_endpoint = self._build_outgoing_endpoint(self.context)\n\n logger.info(f\"\"\"Started server, waiting for messages\n Topics: {self.incoming_topic} --> {self.outgoing_topic}\n Ports: {self.incoming_port} --> {self.outgoing_port}\"\"\")\n while True:\n message = self._receive_message()\n prediction = self._handle_incoming_message(message)\n self._publish_message(prediction)", "title": "" }, { "docid": "42c860ac53bde32405c84e79f92b5585", "score": "0.4658804", "text": "def dispatch(self, handler):\n\t\tif not ipaddress.ip_address(handler.client_address[0]).is_loopback:\n\t\t\treturn\n\t\tprefix = '/'\n\t\tif self.config.get('server.vhost_directories'):\n\t\t\tprefix += handler.vhost + '/'\n\t\trequest_path = handler.path\n\t\tif request_path.startswith(prefix):\n\t\t\trequest_path = request_path[len(prefix):]\n\t\t\tif request_path == '_/ws/events/json':\n\t\t\t\tEventSocket(handler, self)\n\t\t\t\treturn\n\t\thandler.respond_not_found()\n\t\treturn", "title": "" }, { "docid": "54621f64372a62258211c8e1ca6f4bd9", "score": "0.46497595", "text": "def _handle_request(self):\n # broadcast/unicast frame ?\n if self.serial_w.request.slave_addr == 0 and self.allow_bcast:\n # if config allow it, process a broadcast request (=> process it, but don't respond)\n self.mbus_cli.custom_request(self.serial_w.request.pdu)\n elif self.serial_w.request.slave_addr == self.slave_addr:\n # process unicast request\n resp_pdu = self.mbus_cli.custom_request(self.serial_w.request.pdu)\n # if no error, format a response frame\n if resp_pdu:\n # regular response from Modbus/TCP client\n self.serial_w.response.build(raw_pdu=resp_pdu, slave_addr=self.serial_w.request.slave_addr)\n else:\n # exception response\n exp_pdu = struct.pack('BB', self.serial_w.request.function_code + 0x80,\n EXP_GATEWAY_TARGET_DEVICE_FAILED_TO_RESPOND)\n self.serial_w.response.build(raw_pdu=exp_pdu, slave_addr=self.serial_w.request.slave_addr)", "title": "" }, { "docid": "5755ac2aebac1ca396c0102f867f3c26", "score": "0.46456254", "text": "def bootstrap():\n\tp.subscribe(**{ \n\t\t'perseus:process_finished': process_finished,\n\t\t'test:popen': test_popen,\n\t\t'test:receive': test_receive\n\t})\n\n\ttry:\n\t\tprint \"Starting redis listener thread...\"\n\t\tthread.start_new_thread(check_redis_messages, ())\n\texcept Exception as e:\n\t\tprint e\n\t\tprint \"Error: Cannot start pubsub thread\"", "title": "" }, { "docid": "f35365ada0525f3b682a465685598d8b", "score": "0.46420723", "text": "def packet_in_handler(self, ev):\n msg = ev.msg\n\n req_pkt = packet.Packet(msg.data)\n req_igmp = req_pkt.get_protocol(igmp.igmp)\n if req_igmp:\n self._querier.packet_in_handler(req_pkt, req_igmp, msg)", "title": "" }, { "docid": "5208270978224a1741a7255db4462079", "score": "0.4640691", "text": "def main():\n import args\n import logs\n import message\n\n ap = args.get_parser()\n ap.add_argument('--clean', action=\"store_true\",\n help='Verify message format and add standard fields such as embersId.')\n ap.add_argument('--cat', action=\"store_true\",\n help='Write all published messages to stdout.')\n arg = ap.parse_args()\n logs.init(arg)\n init(arg)\n assert arg.sub or arg.pub, \"Need to subscribe or publish to something.\"\n\n # need to use the raw/utf handler unless we are doing clean\n marshal = UnicodeMarshal()\n if arg.clean:\n marshal = JsonMarshal()\n \n subq = None\n if arg.sub: # read a queue\n subq = open(arg.sub, 'sub', marshal=marshal, ssh_key=arg.ssh_key, ssh_conn=arg.tunnel)\n else: # read stdin\n subq = StreamQueue(sys.stdin, marshal=marshal)\n\n pubq = None\n if arg.pub: # send to queue\n pubq = open(arg.pub, 'pub', capture=arg.cat, marshal=marshal)\n else: # send to stdout\n pubq = StreamQueue(sys.stdout, mode='w', marshal=marshal)\n\n rc = 0\n try:\n it = subq.__iter__()\n while True:\n m = ''\n try:\n m = it.next()\n if arg.clean:\n m = message.clean(m)\n \n if m:\n pubq.write(m)\n \n except StopIteration:\n break\n\n except KeyboardInterrupt:\n break\n\n except:\n rc += 1\n if m:\n log.exception('Could not process message %s' % (m,))\n else:\n log.exception('Unknown processing error')\n\n except KeyboardInterrupt:\n pass\n\n except:\n rc = 1\n log.exception('Top level exception')\n\n return rc", "title": "" }, { "docid": "85b6009bfd7e01ddf1dfec26cf83149f", "score": "0.46373102", "text": "def main(request):\n messageJSON = _getMessageJSON(request)\n\n debug=messageJSON.get('debug', None)\n if debug is not None: _logger.setLevel(debug)\n\n _logger.info('Trigger message received is ' + json.dumps(messageJSON))\n\n pathInBucket = messageJSON.get('path', None)\n\n bucket = messageJSON.get('bucket', None)\n if bucket == '': bucket = None\n if bucket is not None:\n _logger.info('Using gs://{bucket}{pathInBucket} for storing records.'.format(bucket=bucket,\n pathInBucket='' if pathInBucket is None else '/' + pathInBucket))\n\n projectId = messageJSON.get('projectId', '')\n if projectId == '': projectId = None\n\n topic = messageJSON.get('topic', '')\n if topic == '': topic = None\n if topic is not None:\n if projectId is None:\n _logger.error('Must include a project ID if you include a topic.')\n return 'Error attempting to access Pub/Sub topic with no project ID.'\n _logger.info('Will submit to {topic}.'.format(topic=topic))\n\n # Grab the expected parameters from the message the function received.\n parameters=dict(filter(lambda key_value:key_value[1] is not None,\n map(lambda field:(field,messageJSON.get(field,None)),_expectedFieldsInFunctionCall)\n ))\n \n # Access API.\n _logger.info('Calling {url} with {params}.'.format(url=_url,params=str(parameters)))\n\n headers = {\n 'Accepts': 'application/json',\n }\n\n data=None\n try:\n session = Session()\n session.headers.update(headers)\n response = session.get(_url, params=parameters)\n data = json.loads(response.text)\n except:\n _logger.error('Error retrieving data.',exc_info=True,stack_info=True)\n \n if data is not None:\n processor=DataProcessor(projectId=projectId,topic=topic,bucket=bucket,path=pathInBucket,debug=debug)\n numWritten,numPublished=processor.process(data)\n _logger.info('Wrote {numWritten:d} records to gs://{bucket}/{path}, published {numPublished:d} messages to {topic}.'.format(\n numWritten=numWritten,\n numPublished=numPublished,\n bucket=bucket,\n path=pathInBucket,\n topic=topic\n ))\n\n return json.dumps(messageJSON)+' completed.'", "title": "" }, { "docid": "410514f328b772acae2aed6a820546f2", "score": "0.4635063", "text": "def subscribe():\n with INSTANCE_ENVIRONMENT():\n bind_port = env.get('API_BIND_HOST_PORT')\n test_bind_port = env.get('TEST_CALLBACK_SERVER_BIND_HOST_PORT')\n url = f'http://localhost:{bind_port}/messages/subscriptions/by_jurisdiction'\n callback_url = f'http://host.docker.internal:{test_bind_port}/callback'\n jurisdiction = env.get('JURISDICTION')\n\n data = {\n 'hub.mode': 'subscribe',\n 'hub.callback': callback_url,\n 'hub.topic': jurisdiction\n }\n print(f\"url: {url}\")\n print(\"data:\")\n pprint(data)\n response = requests.post(url, data=data)\n if response.status_code != 202:\n print(f\"subscribe failed, status:{response.status_code}, text:{response.text}\")\n return\n print(f'subscribed')", "title": "" }, { "docid": "e3a937526f0537edf05bf68dd60f16fd", "score": "0.46334097", "text": "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n self._logger.debug('Got message %s', strmsg)\n msg = json.loads(strmsg)\n # Find and execute callback function for this message\n msgid = msg['msgid']\n self._logger.debug('Got message %s', msgid)\n\n # Call internal callback is set up\n msg_callback = self._callbacks.get(msgid)\n if msg_callback:\n # Don't send msg to consumers, process it in this class\n msg_callback(msg)\n\n # Call external callback: broker or feed subscriber\n for func in self._subscribers.setdefault(msgid, []):\n func(msg)", "title": "" }, { "docid": "6e51f149956c67ee37c36a3b5ae3bff6", "score": "0.46321607", "text": "def start(self) -> None:\n self.bus.subscribe(\"ip:db:modified\", self.db_modified)\n self.bus.subscribe(\"ip:facts\", self.facts)\n self.bus.subscribe(\"ip:reverse\", self.reverse)", "title": "" }, { "docid": "3423aa4d35abb16dded9a4d804bf6349", "score": "0.4631599", "text": "def process(self, req):\n raise NotImplementedError(\"handler process function must be overridden\")", "title": "" }, { "docid": "d03bd6a0b77a0de0a2be6d0ac420429d", "score": "0.46295926", "text": "def SubscribePeerEvents(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n print(\"XXXX 15\")\n context.set_details('Method not 29!')\n raise NotImplementedError('Method not 30!')", "title": "" }, { "docid": "863d83b736d1750c3cdf5ca102835ba0", "score": "0.46294075", "text": "def _on_msg(self, msg):\n data = msg['content']['data']\n\n # If the message is a call invoke, run the function and send \n # the results.\n if 'callback' in data:\n guid = data['callback']\n callback = callback_registry[guid]\n args = data['arguments']\n args = [self.deserialize(a) for a in args]\n index = data['index']\n\n results = callback(*args)\n return self.serialize(self._send('return', index=index, results=results))\n\n # The message is not a call invoke, it must be an object\n # that is a response to a Python request.\n else:\n index = data['index']\n immutable = data['immutable']\n value = data['value']\n if index in self._callbacks:\n self._callbacks[index].resolve({\n 'immutable': immutable,\n 'value': value\n })\n del self._callbacks[index]", "title": "" }, { "docid": "90fee240386c930102257099e5b0cc1f", "score": "0.46283525", "text": "def host_notification_handler(self, connection, msg):\n if msg is not None:\n try:\n notification = json.loads(msg)\n\n version = notification.get('version', None)\n notify_type = notification.get('notify-type', None)\n notify_data = notification.get('notify-data', None)\n if notify_data is not None:\n notify_data = json.loads(notify_data)\n\n if 1 == version:\n for callback in self._host_notification_callbacks:\n status = callback(connection.ip, notify_type, notify_data)\n notification['status'] = status\n connection.send(json.dumps(notification))\n else:\n DLOG.error(\"Unknown version %s received, notification=%s\"\n % (version, notification))\n\n connection.close()\n\n except ValueError:\n DLOG.error(\"Message received is not valid, msg=%s\" % msg)\n connection.close()", "title": "" }, { "docid": "2c4d1626787ff0c9b97ae32441b76073", "score": "0.4619974", "text": "def application_message(self, bus, msg):\n msgtype = msg.structure.get_name()\n if msgtype == 'result':\n res = msg.structure['hyp']\n if res: print res\n self.dispatch_command(res)", "title": "" }, { "docid": "17317a4beb793a2eeb46e2d162d623da", "score": "0.46154484", "text": "def on_pubmsg(self, connection, event):\n message = TwitchChatBot.get_message_from_event(event)\n if self._is_command(message):\n self._execute_command(connection, event)\n else:\n self.process_message(event, TwitchChatBot.get_user_from_event(event), message)", "title": "" }, { "docid": "50eb45b842a41e960418f27ad1d05b23", "score": "0.46055156", "text": "def main():\n #rospy.init_node('chassis_faker')\n \n pub1 = rospy.Publisher(APOLLO_CHASSIS_TOPIC, Chassis, queue_size=10)\n msg1 = Chassis()\n msg1.engine_started = True\n msg1.driving_mode = 1\n\n \n pub2 = rospy.Publisher(APOLLO_CONTROL_PAD_TOPIC, PadMessage, queue_size=10)\n msg2 = PadMessage()\n msg2.action = 1\n msg2.driving_mode = 1 \n \n \n \"\"\" \n pub3 = rospy.Publisher(APOLLO_CHASSIS_DETAIL_TOPIC, ChassisDetail, queue_size=10)\n msg3 = ChassisDetail()\n e = Eps()\n e.is_eps_fail = False\n e.eps_control_state = 1 # TODO: Try 1 if it doesn't work\n msg3.eps.CopyFrom(e) \n\n crs = CheckResponseSignal()\n crs.is_eps_online = True\n\n msg3.check_response.CopyFrom(crs) \n \"\"\" \n\n while not rospy.is_shutdown():\n pub1.publish(msg1)\n pub2.publish(msg2)\n #pub3.publish(msg3)", "title": "" }, { "docid": "2f855ab687d95a97ebc894c5e6368f43", "score": "0.46019715", "text": "def on_message(self, rawmsg):\n content = json.loads(rawmsg)\n msgt = content.get('type')\n func = {\n 'identity': self.handle_identity, 'pong': self.handle_pong,\n 'joined': self.handle_joined, 'who': self.handle_who,\n 'unicast': self.handle_unicast,\n 'broadcast': self.handle_broadcast,\n 'response': self.handle_response, 'left': self.handle_left,\n 'error': self.handle_error\n }.get(msgt, self.on_unknown)\n func(content, rawmsg)", "title": "" }, { "docid": "1b84c9f590e480828b5743bbdffda468", "score": "0.46011394", "text": "def router():\n\n # Allow multiple forms of parameters to be used.\n sys.argv = list(map(proofreader, sys.argv))\n\n # Delayed imports to allow other functionality to work even when some required libraries may be missing.\n\n if const.ALIGN_COMMAND in sys.argv:\n # Run alignment related functionality..\n sys.argv.remove(const.ALIGN_COMMAND)\n from biorun.methods import align\n plac.call(align.run)\n\n elif const.TAXON_COMMAND in sys.argv:\n # Run taxonomy related functionality.\n from biorun.models import taxdb\n sys.argv.remove(const.TAXON_COMMAND)\n plac.call(taxdb.run)\n\n elif const.DBLINK_COMMAND in sys.argv:\n # Run SRA specific functionality.\n from biorun.models import dblink\n sys.argv.remove(const.DBLINK_COMMAND)\n plac.call(dblink.run)\n\n elif const.ONTOLOGY_COMMAND in sys.argv:\n # Run SRA specific functionality.\n from biorun.models import ontology\n sys.argv.remove(const.ONTOLOGY_COMMAND)\n plac.call(ontology.run)\n\n else:\n # Default action is to convert a file.\n from biorun import convert\n\n # Add the help flag if no other information is present.\n if len(sys.argv) == 1:\n sys.argv.append(\"-h\")\n\n # Delegate parameter parsing to converter.\n plac.call(convert.run)", "title": "" }, { "docid": "e14103878306e612749ecd69f704f4a5", "score": "0.4595571", "text": "def init_pubsub():\r\n try:\r\n this.hub = aiopubsub.Hub()\r\n this.publisher = aiopubsub.Publisher(\r\n this.hub, prefix=aiopubsub.Key())\r\n except Exception:\r\n logger.exception('Error initiationg pubsub module')\r\n raise", "title": "" }, { "docid": "9d4206488e65dab0a5429f6bd67a09d5", "score": "0.45942616", "text": "def application(env, start_response):\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost')\n )\n channel = connection.channel()\n\n exchange = env['PATH_INFO'].replace('/', '')\n\n channel.exchange_declare(\n exchange=exchange, exchange_type='fanout'\n )\n\n # exclusive means the queue should be deleted once the connection is closed\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue # random queue name generated by RabbitMQ\n\n channel.queue_bind(exchange=exchange, queue=queue_name)\n\n uwsgi.websocket_handshake(\n env['HTTP_SEC_WEBSOCKET_KEY'],\n env.get('HTTP_ORIGIN', '')\n )\n\n def keepalive():\n \"\"\"Keep the websocket connection alive (called every 30 seconds).\"\"\"\n print('PING/PONG...')\n try:\n uwsgi.websocket_recv_nb()\n connection.add_timeout(30, keepalive)\n except OSError as error:\n print(error)\n sys.exit(1) # Kill process and force uWSGI to Respawn\n\n keepalive()\n\n while True:\n for method_frame, _, body in channel.consume(queue_name):\n try:\n uwsgi.websocket_send(body)\n except OSError as error:\n print(error)\n sys.exit(1) # Force uWSGI to Respawn\n else:\n # acknowledge the message\n channel.basic_ack(method_frame.delivery_tag)", "title": "" }, { "docid": "9fd67496ea47bc98102338d00b8f377f", "score": "0.4593979", "text": "def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message acked.')", "title": "" }, { "docid": "9fd67496ea47bc98102338d00b8f377f", "score": "0.4593979", "text": "def on_publish(self, unused_client, unused_userdata, unused_mid):\n print('Published message acked.')", "title": "" }, { "docid": "fe08186f17f713198827bad8971e062a", "score": "0.45895454", "text": "def handle_message(context):\n \n send_message(context.copy())\n \n for callback in _subscribers:\n try:\n callback(context.copy())\n except StopIteration:\n break\n except Exception:\n print \"Error when calling subscriber:\"\n \n traceback.print_exc()", "title": "" }, { "docid": "0727a57dfc173909eb6335514714a3bb", "score": "0.458423", "text": "def triggerByPubsub(event, context):\n \n\n def getToken():\n func_url = os.environ['SLACKTOME']\n metadata_url = 'http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience='\n\n token_url = metadata_url + func_url\n token_header = {'Metadata-Flavor': 'Google'}\n\n token_response = requests.get(token_url, headers=token_header)\n jwt = token_response.content.decode('utf-8')\n print('jwt===',jwt)\n return jwt\n \n\n print(\"triggerByPubsub {} published at {}\".format(context.event_id, context.timestamp))\n\n if 'data' in event:\n jwt = getToken()\n\n name = base64.b64decode(event['data']).decode('utf-8')\n #toslack\n dict_headers = {'Content-type': 'application/json', 'Authorization': f'bearer {jwt}'}\n dict_payload = {\n \"text\": 'pubsub來了一個訊息='+name\n }\n json_payload = json.dumps(dict_payload)\n url = os.environ['SLACKTOME']\n rtn = requests.post(url, data=json_payload, headers=dict_headers)\n print(rtn.text)\n else:\n name = 'no data'\n print('Hello {}!'.format(name))", "title": "" }, { "docid": "bd118856f02344903a6947677166aa1a", "score": "0.45839682", "text": "def get_server(publisher, message_encoder, server_name=\"cortex_api\", *flask_args, **flask_kwargs):\n\n ThoughtAPI = Flask(server_name, *flask_args, **flask_kwargs)\n publish_func = publisher if callable(publisher) else publisher.publish\n @ThoughtAPI.route(\"/user/<id>\", methods=[\"POST\"])\n def handle_new_thought(id):\n \"\"\"\n this handles a new thought on the user ID. if the user doesn't exits it should be created.\n :param id: the id from the url\n :return: empty string. this happens whether the backend manages to save the thought or not.\n \"\"\"\n message = message_encoder(request.data, user=id)\n publish_func(configuration.topics.snapshot, message)\n return 'OK'\n\n @ThoughtAPI.route(\"/configuration\")\n def get_configuration():\n return configuration.get_config()[configuration.CONFIG_CLIENT_CONFIG]\n\n @ThoughtAPI.route('/users', methods=[\"POST\"])\n def ensure_user():\n user_info = request.json\n # parsing here because this is hella simple\n user_info['id'] = user_info.pop('userId')\n gender_enum = user_info.get('gender', 0)\n user_info['gender'] = 'Male' if gender_enum == 0 else 'Female' if gender_enum == 1 else 'Other'\n publish_func(configuration.get_parsed_data_topic_name(configuration.topics.user_info), json.dumps(user_info))\n return 'OK'\n\n return ThoughtAPI", "title": "" }, { "docid": "99cab2de8dd491c0149b40ea776ccb83", "score": "0.45817417", "text": "def on_webhook(self, environ, url, params):\n for handler in self.events[\"webhook\"]:\n args = handler.event[\"args\"]\n if not args or match(args[0], url):\n response = handler(self, environ, url, params)\n if response:\n return response", "title": "" }, { "docid": "447a9b3d9beced03f53a228b80bf5998", "score": "0.458073", "text": "async def __call__(self, scope, receive, send):\n await WsgiToAsgiInstance(self.wsgi_application)(scope, receive, send)", "title": "" }, { "docid": "48be7bf696fea3de04ee4086eb925c21", "score": "0.45800868", "text": "def handle_msg_main(request, event, destination):\n LINE.log_event(\"Message event\", event=event, dest=destination)\n\n handle_fn = fn_dict.get(_get_message_type(event.message))\n\n try:\n if handle_fn:\n handle_fn(request, event, destination)\n else:\n handle_msg_unhandled(request, event, destination)\n except Exception as ex: # pylint: disable=broad-except\n handle_error(ex, f\"Error occurred in handle_msg_main. Handle function: {handle_fn.__qualname__}\",\n event, destination)", "title": "" } ]
14a168fffb50104ce16e96fe8b2c89cc
Action models for a footstep phase.
[ { "docid": "981216e217c31644d2bc531c762a5df5", "score": "0.5858494", "text": "def createFootstepModels(self, comPos0, feetPos0, stepLength, stepHeight, timeStep, numKnots, supportFootIds,\n swingFootIds):\n numLegs = len(supportFootIds) + len(swingFootIds)\n comPercentage = float(len(swingFootIds)) / numLegs\n\n # Action models for the foot swing\n footSwingModel = []\n for k in range(numKnots):\n swingFootTask = []\n for i, p in zip(swingFootIds, feetPos0):\n # Defining a foot swing task given the step length\n # resKnot = numKnots % 2\n phKnots = numKnots / 2\n if k < phKnots:\n dp = np.array([stepLength * (k + 1) / numKnots, 0., stepHeight * k / phKnots])\n elif k == phKnots:\n dp = np.array([stepLength * (k + 1) / numKnots, 0., stepHeight])\n else:\n dp = np.array(\n [stepLength * (k + 1) / numKnots, 0., stepHeight * (1 - float(k - phKnots) / phKnots)])\n tref = p + dp\n\n swingFootTask += [crocoddyl.FramePlacement(i, pinocchio.SE3(np.eye(3), tref))]\n\n comTask = np.array([stepLength * (k + 1) / numKnots, 0., 0.]) * comPercentage + comPos0\n footSwingModel += [\n self.createSwingFootModel(timeStep, supportFootIds, comTask=comTask, swingFootTask=swingFootTask)\n ]\n\n # Action model for the foot switch\n footSwitchModel = self.createFootSwitchModel(supportFootIds, swingFootTask)\n\n # Updating the current foot position for next step\n comPos0 += [stepLength * comPercentage, 0., 0.]\n for p in feetPos0:\n p += [stepLength, 0., 0.]\n return footSwingModel + [footSwitchModel]", "title": "" } ]
[ { "docid": "21228deb4d6a0ed92944ccbc9a77decf", "score": "0.60571176", "text": "def perform_step(self, action: Action) -> dict:", "title": "" }, { "docid": "df2d28fb4b50d371ca4049bda1ded3e3", "score": "0.5815846", "text": "def step(self, action, **kwargs):\n pass", "title": "" }, { "docid": "3299821fdd551ba37ecc75afb4c78f99", "score": "0.5807211", "text": "def step(self, action):\n for action_k, env in zip(action, self._envs):\n env.step(action_k)\n\n # Return\n return self.observation(), self.reward(), self.done(), self.info()", "title": "" }, { "docid": "ce44346f1e152139465d757616d32092", "score": "0.5764049", "text": "def action_logic(self):", "title": "" }, { "docid": "c182672efb26920e750b57331690e4f9", "score": "0.57517004", "text": "def transition_model(self, state, action):\n ...", "title": "" }, { "docid": "cfeb250fc0b71acc3e3f0e1d0043c9b2", "score": "0.570284", "text": "def step(self, action):\n pass", "title": "" }, { "docid": "cfeb250fc0b71acc3e3f0e1d0043c9b2", "score": "0.570284", "text": "def step(self, action):\n pass", "title": "" }, { "docid": "cfeb250fc0b71acc3e3f0e1d0043c9b2", "score": "0.570284", "text": "def step(self, action):\n pass", "title": "" }, { "docid": "3c9b1716697f4b476f0ac0b1024f4f2a", "score": "0.5656943", "text": "def step(self, action):\n points_before_action = dict(self.player_points)\n #print(\"points before action \" + str(points_before_action))\n #Take action. Need to define action_state, as p1 action is action_state[action]\n p1_action = self.convertActionListToAction(action)\n #p1_action = self.getP1Action()\n p2_action = self.getP2Action()\n #print(self.chance)\n #print(p1_action)\n #print(p2_action)\n self.act(p1_action, p2_action)\n reward = self.getReward(points_before_action)\n #print(\"point before action \" + str(points_before_action) + \" point after \" + str(self.player_points))\n #if reward > 0:\n # print(\"Reward is \" + str(reward))\n obs = self.setObservationState()\n episode_over = self.checkEpisodeOver()\n if episode_over:\n print(self.player_points)\n\n #print(str(self.player_points['p1']) + \":\" + str(self.player_points['p2']))\n return obs, reward, episode_over, {}", "title": "" }, { "docid": "4b14080e21642bd873abd2afae24df57", "score": "0.5654875", "text": "def step(self, target, action):\n\t\tpackets = self.packages(self._traffic_type, self.UEs[target].packet_size)\n\t\treward = self.H_mag(target, action)\n\t\tself.Bit_rate(reward,440.35)\n\t\tself.UEs[target].step(packets, self.R)\n\n\t\t# Step for not target UE's \n\t\tfor i in range(len(self.UEs)):\n\t\t\tif i != target:\n\t\t\t\td_packets = self.packages(self._traffic_type, self.UEs[i].packet_size)\n\t\t\t\tself.UEs[i].step(d_packets, 0)\n\n\t\tself._state += 1\n\t\tdone = self._state >= self.ep_lenght\n\t\tfeedback = self.UE_feedback(target) \n\t\tfeedback.append(float(self.R))# dropped packets, sent packets and bit rate\n\t\tstate = np.concatenate((self.UEs[target].position, feedback), axis=None)\n\t\treturn state, reward, feedback, done", "title": "" }, { "docid": "70e772f2e71339c0ac25be19154bbf01", "score": "0.5603368", "text": "def step(self, action):\n transformed_action = self.action(action)\n self.transformed_action_list.append(transformed_action)\n self.raw_actions_list.append(action)\n self.T.append((self.latest_obs, transformed_action))\n self.latest_obs, rew, done, info = self.env.step(transformed_action)\n\n if done :\n self.T.append((self.latest_obs, None))\n self.Ts.extend([self.T])\n self.T = [] # reset self.T\n\n return self.latest_obs, rew, done, info", "title": "" }, { "docid": "a00fcc81effd945815e97f0978a9ff9d", "score": "0.5591679", "text": "def step(self, action):\n\n accel = action[\"accel\"]\n wheel_angle = action[\"wheel_angle\"]\n\n self.vehicle_model.move_accel(accel, wheel_angle)\n\n obs = {\n \"reward\":\n 0,\n \"dist_upcoming_points\":\n self.world.get_dist_upcoming_points(self.vehicle_model.position, self.vehicle_model.orientation),\n }\n\n return obs", "title": "" }, { "docid": "8813ea14f43f5b4d19b4832550033be6", "score": "0.5511492", "text": "def step(self, action):\n raise NotImplementedError", "title": "" }, { "docid": "8813ea14f43f5b4d19b4832550033be6", "score": "0.5511492", "text": "def step(self, action):\n raise NotImplementedError", "title": "" }, { "docid": "8813ea14f43f5b4d19b4832550033be6", "score": "0.5511492", "text": "def step(self, action):\n raise NotImplementedError", "title": "" }, { "docid": "7772f7065a9b1dd07a0cf05428a9ec8d", "score": "0.5510359", "text": "def get_actions(self):\n\n # TODO create concrete Action objects based on the domain action schema for: Load, Unload, and Fly\n # concrete actions definition: specific literal action that does not include variables as with the schema\n # for example, the action schema 'Load(c, p, a)' can represent the concrete actions 'Load(C1, P1, SFO)'\n # or 'Load(C2, P2, JFK)'. The actions for the planning problem must be concrete because the problems in\n # forward search and Planning Graphs must use Propositional Logic\n\n def load_actions():\n \"\"\"Create all concrete Load actions and return a list\n\n :return: list of Action objects\n \n Load(c, p, a), #cargo, plan, airport\n PRECOND: At(c, a) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a)\n EFFECT: ¬ At(c, a) ∧ In(c, p)\n \"\"\"\n loads = []\n # TODO create all load ground actions from the domain Load action\n for airport in self.airports:\n for plane in self.planes:\n for cargo in self.cargos:\n precond_pos = [expr(\"At({}, {})\".format(cargo, airport)),\n expr(\"At({}, {})\".format(plane, airport)),\n ]\n precond_neg = []\n effect_add = [expr(\"In({}, {})\".format(cargo, plane))]\n effect_rem = [expr(\"At({}, {})\".format(cargo, airport))]\n load = Action(expr(\"Load({}, {}, {})\".format(cargo, plane, airport)),\n [precond_pos, precond_neg],\n [effect_add, effect_rem])\n loads.append(load)\n return loads\n\n def unload_actions():\n \"\"\"Create all concrete Unload actions and return a list\n\n :return: list of Action objects\n Unload(c, p, a),\n PRECOND: In(c, p) ∧ At(p, a) ∧ Cargo(c) ∧ Plane(p) ∧ Airport(a)\n EFFECT: At(c, a) ∧ ¬ In(c, p)\n \"\"\"\n unloads = []\n # TODO create all Unload ground actions from the domain Unload action\n for airport in self.airports:\n for plane in self.planes:\n for cargo in self.cargos:\n precond_pos = [expr(\"In({}, {})\".format(cargo, plane)),\n expr(\"At({}, {})\".format(plane, airport)),\n ]\n precond_neg = []\n effect_add = [expr(\"At({}, {})\".format(cargo, airport))]\n effect_rem = [expr(\"In({}, {})\".format(cargo, plane))]\n unload = Action(expr(\"Unload({}, {}, {})\".format(cargo, plane, airport)),\n [precond_pos, precond_neg],\n [effect_add, effect_rem])\n unloads.append(unload)\n return unloads\n\n def fly_actions():\n \"\"\"Create all concrete Fly actions and return a list\n\n :return: list of Action objects\n\n Fly(p, from, to),\n PRECOND: At(p, from) ∧ Plane(p) ∧ Airport(from) ∧ Airport(to)\n EFFECT: ¬ At(p, from) ∧ At(p, to)\n \"\"\"\n flys = []\n for fr in self.airports:\n for to in self.airports:\n if fr != to:\n for p in self.planes:\n precond_pos = [expr(\"At({}, {})\".format(p, fr)),\n ]\n precond_neg = []\n effect_add = [expr(\"At({}, {})\".format(p, to))]\n effect_rem = [expr(\"At({}, {})\".format(p, fr))]\n fly = Action(expr(\"Fly({}, {}, {})\".format(p, fr, to)),\n [precond_pos, precond_neg],\n [effect_add, effect_rem])\n flys.append(fly)\n return flys\n\n return load_actions() + unload_actions() + fly_actions()", "title": "" }, { "docid": "cb74441ddd5f424404aa232e0b1309d1", "score": "0.54966354", "text": "def step(self, action):\n\t\tassert self.action_space.contains(action)\n\n\t\tobs = self._get_observation(action)\n\t\t\n\t\t# check if observation state is a touchdown\n\t\tif obs[4] == 1:\n\t\t\t# print(f\"action {self.action_dict[action]} td {obs}\")\n\t\t\tdone = True\n\t\t\treward = 7.\n\t\t# check if observation state is a field goal\n\t\telif obs[5] == 1:\n\t\t\t# print(f\"action {self.action_dict[action]} field goal {obs}\")\n\t\t\tdone = True\n\t\t\treward = 3.\n\t\t# check if it is a turnover\n\t\telif obs[1] <= 0 or obs[3] == 1:\n\t\t\t# print(f\"action {self.action_dict[action]} turnover {obs}\")\n\t\t\tdone = True\n\t\t\treward = -7. * (1 - obs[0]/100)\n\t\t# if not TO or TD then not done and no rewards\n\t\telse:\n\t\t\t# print(f\"action {self.action_dict[action]} continue {obs}\")\n\t\t\tdone = False\n\t\t\treward = 0.\n\t\t\n\t\tprint(f'state: action {self.action_dict[action]}, obs: {obs}, done: {done}, reward: {reward}')\n\t\treturn obs, reward, done, {}", "title": "" }, { "docid": "6c0df03db2eb20ffaa97d69ece39362b", "score": "0.5493983", "text": "def step(self, data):\n step_info = {}\n step_info['id'] = data[0]\n step_info['personnel'] = data[1]\n step_info['conditions_out'] = []\n\n if isinstance(data[2], top.Action): # Step has no attached entry condition\n step_info['condition_in'] = top.Immediate()\n step_info['action'] = data[2]\n deviations = data[3:]\n else: # Step has an attached entry condition\n step_info['condition_in'] = data[2]\n step_info['action'] = data[3]\n deviations = data[4:]\n\n for cond, transition in deviations:\n step_info['conditions_out'].append((cond, transition))\n\n return step_info", "title": "" }, { "docid": "102048cf993524e5a4e4e43716692239", "score": "0.54724634", "text": "def actions(self):\n pass", "title": "" }, { "docid": "b55468c49160c36b9741fe6674a336d5", "score": "0.5469175", "text": "def step(self, state, action):\n # perform action and observe reward & statePrime\n self.execute_action(action)\n status = self.hfo.step()\n stateFeatures = self.hfo.getState()\n statePrime = self.get_transformed_features(stateFeatures)\n stateQuantized = self.quantize_features(state)\n statePrimeQuantized = self.quantize_features(statePrime)\n reward = self.get_reward(status)\n # select actionPrime\n actionPrime = self.select_action(stateFeatures, statePrime)\n\n if self.exploring:\n # calculate TDError\n TDError = reward + self.gamma * self.get_Q(statePrimeQuantized, actionPrime) - self.get_Q(stateQuantized, action)\n\n # update trace value\n self.stateActionTrace[(stateQuantized, action)] = self.stateActionTrace.get((stateQuantized, action), 0) + 1\n \n #Will be used for advice \n self.episodeUpdateTrace.append((stateQuantized,action,statePrimeQuantized,statePrime,reward,(stateQuantized, action),self.stateActionTrace.get((stateQuantized, action), 0)))\n \n for stateAction in self.stateActionTrace:\n # update update ALL Q values and eligibility trace values\n self.qTable[stateAction] = self.qTable.get(stateAction, 0) + TDError * self.alpha * self.stateActionTrace.get(stateAction, 0)\n\n # update eligibility trace Function for state and action\n self.stateActionTrace[stateAction] = self.gamma * self.decayRate * self.stateActionTrace.get(stateAction, 0)\n #self.learn(stateQuantized, action, reward,\n # statePrimeQuantized, actionPrime)\n self.training_steps_total += 1\n if status == self.GOAL:\n self.adviseFellow()\n if status != self.IN_GAME:\n self.stateActionTrace = {}\n self.episodeUpdateTrace = []\n return status, statePrime, actionPrime", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.5466336", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.5466336", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.5466336", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "fa4c2561643c18821a4784183ea40834", "score": "0.5466336", "text": "def actions(self, state):\n raise NotImplementedError", "title": "" }, { "docid": "f58183eeabe425606a09ee0f5442f415", "score": "0.54402035", "text": "def actions(self, state):\r\n #\r\n\r\n raise NotImplementedError", "title": "" }, { "docid": "b51545b2895aac4576367240547378c9", "score": "0.5437686", "text": "def predict(self, context: Context, actions: Sequence[Action]) -> Union[Probs,Tuple[Probs,Info]]:\n ...", "title": "" }, { "docid": "b9dd45e951193113b0eda08e56269a04", "score": "0.5425708", "text": "def actions(self):", "title": "" }, { "docid": "bc05903e6f2ea230453027248308cd85", "score": "0.5424545", "text": "def phi(self, observations, actions, modelpath=\"tmp/model.ckpt\", exclude=EXCLUDE,\n meanfile=\"saved_models/mixinfo.npz\"):\n\n loaded_data = np.load(meanfile)\n self.mean_obs = loaded_data['mean_obs']\n self.std_obs = loaded_data['std_obs']\n self.state_step_size = loaded_data['stepsize']\n observations_n, actions_n = self.normalize_data(observations, actions)\n \"\"\"print(self.mean_obs, \" Mean\")\n print(self.std_obs, \"std\")\n print(self.state_step_size, \"stepsize\")\n observations_n = (observations - self.mean_obs) / self.std_obs\n # observations = (observations - self.mean_obs) / self.std_obs\n actions_n = actions / self.state_step_size\"\"\"\n n, s = np.shape(observations)[0:2]\n print(\"number of sequences : \", n)\n states_single = np.zeros((n, s, 4))\n with tf.Session() as sess:\n # Loads the model and calculate the new states\n self.saver.restore(sess, modelpath)\n states = sess.run(self.state,\n feed_dict={self.obs_var: observations_n[0:1, :, :], self.action_var: actions_n[0:1, :, :],\n self.keep_prob: 1.0, self.is_training: False})\n for i in range(1, n):\n states1 = sess.run(self.state, feed_dict={self.obs_var: observations_n[i:i + 1, :, :],\n self.action_var: actions_n[i:i + 1, :, :],\n self.keep_prob: 1.0, self.is_training: False})\n # states1 = sess.run(self.state, feed_dict = {self.obs_var: observations_n[:int(n/2),:,:],self.action_var: actions_n[:int(n/2),:,:], self.keep_prob: 1.0, self.is_training: False})\n # states2 = sess.run(self.state, feed_dict = {self.obs_var: observations_n[int(n/2):,:,:],self.action_var: actions_n[int(n/2):,:,:], self.keep_prob: 1.0, self.is_training: False})\n states = np.concatenate((states, states1), axis=0)\n if (exclude):\n states = states[:, exclude:, :]\n return states", "title": "" }, { "docid": "274f32d234b93f48077022c78d427f44", "score": "0.54175335", "text": "def step(self, actions):\n orders_costs = self.move_wagons2routes(actions) # 1.\n self.current_step += 1 # 2.\n # starting step 3\n station_costs = dict()\n for v in self.world_state.vs:\n nw = len(v['info'].wagons)\n cost = v['info'].stay_cost\n station_costs[v['info'].index] = nw * cost\n # station_costs contains costs for each station now\n profits = self.arrival_profit()\n self.update_world_graph()\n return orders_costs, station_costs, profits", "title": "" }, { "docid": "91e4ef31fe9c1ffd4d73f69b124c253d", "score": "0.5403668", "text": "def step(self, action):\n\n # execute the action\n self.gazebo.unpause_sim()\n self._set_action(action)\n self._check_all_systems_are_ready() # get latest system data\n self.gazebo.pause_sim()\n\n # compute the required fields\n obs = self._get_obs()\n done = self._is_done()\n reward = self._compute_reward(obs, done)\n info = {}\n\n return obs, reward, done, info", "title": "" }, { "docid": "9e224435b57548c0fdfe2ea0b83e8381", "score": "0.5399008", "text": "def get_actions(self,model = None):\n actions = []\n \n for ag in range(self.n_agents):\n # get visible state\n state = np.array([self.all_spacing[-1][ag],self.all_vel[-1][ag],self.all_dv[-1][ag]])\n \n # query agent function for action\n if self.agent_types[ag] == \"rand\":\n actions.append(np.random.normal(0,self.sigma))\n \n elif self.agent_types[ag] == \"step_accel\":\n if state[0] > 10: #spacing > goal spacing\n acc = 0.1\n if state[2] > 0: # dv > 0\n acc = acc - state[2]\n else:\n acc = -0.05\n actions.append(acc)\n \n elif self.agent_types[ag] == \"RL\":\n act = model.choose_action(state,EVAL = False)\n act = (act-0.5)*0.2\n actions.append(act)\n \n elif self.agent_types[ag] == \"step\":\n if self.step in [200,400]:\n acc = -0.5\n elif self.step in [300]:\n acc = 0.5\n else:\n acc = 0\n actions.append(acc)\n \n elif self.agent_types[ag] == \"IDM\":\n \n a = self.idm_params[0]\n b = self.idm_params[1]\n v0 = self.idm_params[2]\n delta = self.idm_params[3]\n T = self.idm_params[4]\n s0 = self.idm_params[5]\n \n s =state[0]\n v = state[1]\n dv = state[2]\n \n # in order to deal with ZeroDivisionError\n if abs(s) < 1e-3:\n s = 1e-3\n s_star = s0 + np.max([0, v*T - v*(-dv)/(2*np.sqrt(a*b))])\n acc= a*(1.0-(v/v0)**delta-(s_star/s)**2.0)\n actions.append(acc) \n \n return actions", "title": "" }, { "docid": "0761d6c96930f533dba5ccae7d46f774", "score": "0.53707004", "text": "def step(self, actions, action, current_state, depot):\n new_state = current_state.apppend(actions[action])\n action_reward = self.compute_reward(selected_action=actions[action])\n if action_reward is None:\n raise NotImplementedError('reward computation is not implemented')\n if new_state is depot:\n episode_done = True\n\n diag_info = \"No diagnostics info available\"\n return new_state, action_reward, episode_done, diag_info", "title": "" }, { "docid": "cd3b3af23246b9a43944e85afd098749", "score": "0.53677475", "text": "def step(self, action):\n raise NotImplementedError()", "title": "" }, { "docid": "fe601a53bd62aeb245b067eadf60aeb8", "score": "0.53534955", "text": "def actions(self, obs: Arrayable) -> Tensor:\n pass", "title": "" }, { "docid": "0a2272618c7aa40501b46372068a527a", "score": "0.5316051", "text": "def phases(self):\n raise NotImplementedError(\"Not implemented on backend\" + type(self).backend)", "title": "" }, { "docid": "959ed8ca87a0a2a73893878a175bdde7", "score": "0.531439", "text": "def step(self, action):\n if action == 0:\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=0))\n if action == 1:\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=-1 * self.STEER_AMT))\n if action == 2:\n self.vehicle.apply_control(carla.VehicleControl(throttle=1.0, steer=1 * self.STEER_AMT))\n\n v = self.vehicle.get_velocity()\n kmh = int(3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2))\n\n if len(self.collision_hist) != 0:\n done = True\n reward = -200\n elif kmh < 50:\n done = False\n reward = -1\n else:\n done = False\n reward = 1\n\n if self.episode_start + SECONDS_PER_EPISODE < time.time():\n done = True\n\n return self.front_camera, reward, done, None", "title": "" }, { "docid": "1bc156dec3b4ac1a16912bc039a04ae3", "score": "0.5311789", "text": "def step(self, action): \n \n self.t += 1\n danger_tax = 0\n if self.use_run_time_assurance:\n probe_state, unsafe = self.probe_step(action)\n # switch to unsafe controller if unsafe\n if unsafe:\n danger_tax = -5\n action = self.get_safe_control()\n \n state, reward, done, info = self.env.step(action)\n \n # if self.testing == False:\n # if done:\n # reward += 75\n # else:\n # reward = -1 + danger_tax\n # Could make a custom reward here if you want\n if self.use_action_masking:\n self.update_avail_actions(state)\n obs = {\n \"action_mask\": self.action_mask,\n \"state\": state,\n }\n else:\n obs = state\n \n if self.t >= 100: # ------ change if using v1. Can make this to find shortest path\n done = True\n return obs, reward, done, info", "title": "" }, { "docid": "16364ad55c901d8cf916dd9b6881105d", "score": "0.53106546", "text": "def step(self, action):\n # get night vision\n if self.episode_step == 1:\n self.agent_host.sendCommand('chat /effect @p night_vision 999 99')\n \n # Get Action\n if action != 'move 1' or (not self.facing_sheep and not self.facing_creeper and not self.facing_zombie and not self.facing_wall):\n command = self.action_dict[action]\n self.agent_host.sendCommand(command)\n self.episode_step += 1\n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs = self.get_observation(world_state)\n\n # Get Done\n done = not world_state.is_mission_running\n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n reward -= 5 * self.new_damage_taken\n self.episode_return += reward\n\n return self.obs, reward, done, dict()", "title": "" }, { "docid": "e8616bc0d72020709e4d9c013f491af6", "score": "0.5308288", "text": "def actions(self, state): \n util.raiseNotDefined()", "title": "" }, { "docid": "7d2f5950b7413aef282476d8a180b009", "score": "0.5301464", "text": "def predict(self, context: Context, actions: Sequence[Action]) -> Probs:\n return self._fixed_pmf", "title": "" }, { "docid": "52ff9ea03de08d972e03623ad64816d4", "score": "0.5281814", "text": "def step(self, actions):\n assert len(actions) == len(self)\n observations = []\n rewards = []\n dones = []\n infos = []\n for i, (env, action) in enumerate(zip(self.list_env, actions)):\n observation, reward, done, info = env.step(action)\n # If done=True, reset environment, store last observation in info and report new initial observation\n if done:\n info['last_observation'] = observation\n observation = env.reset()\n observations.append(observation)\n rewards.append(reward)\n dones.append(done)\n infos.append(info)\n return observations, rewards, dones, infos", "title": "" }, { "docid": "716192525c76ed7df8840908c7c0fd65", "score": "0.5274624", "text": "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n rospy.logdebug(\"START STEP OpenAIROS\")\n\n self._set_action(action)\n obs = self._get_obs()\n done = self._is_done(obs)\n info = {'is_success' : done}\n reward = self._compute_reward(obs, done)\n self.cumulated_episode_reward += reward\n #print(\"Total reward is\", self.cumulated_episode_reward, \"done is\", done, obs, self.desired_position)\n\n rospy.logdebug(\"END STEP OpenAIROS\")\n\n return obs, reward, done, info", "title": "" }, { "docid": "997bddb43043a88ca2f20883d06fc49c", "score": "0.52739125", "text": "def step(self, action):\n assert self.action_space.contains(action)\n self.sheriff, reward = self.transition(self.sheriff, action)\n done = (reward != 0)\n info = {}\n observation = self.sheriff\n return observation, reward, done, info", "title": "" }, { "docid": "4ab572c1e278fe1109a78102be6acdcb", "score": "0.52714163", "text": "def __onTa(self,phase, state):\n method_name = \"onTaWhen_\"+phase\n method = getattr(self, method_name, \\\n lambda s : exec(\"raise Exception('missing method implementation \\\"\" \\\n +method_name+\"\\\":\\\\nAdd declaration to your model eg:\\\\n\\\\tdef \" \\\n +method_name+\"(self,state):\\\\n\\\\t...')\\nraise NotImplementedError()\" ) )\n return method(state)", "title": "" }, { "docid": "9b35d046b9492583a801e0211c9fe272", "score": "0.5267563", "text": "def step(self, action):\n if self.episode_over:\n raise RuntimeError(\"Episode is done\")\n action = np.array(action).squeeze()\n action = np.atleast_1d(action)\n\n for i, a in enumerate(action):\n self._take_action(i, a)\n\n assert np.all(action <= self.naction), \"Actions should be in the range [0,naction).\"\n\n\n self.episode_over = False\n self.obs = self._get_obs()\n\n debug = {'predator_locs':self.predator_loc,'prey_locs':self.prey_loc}\n return self.obs, self._get_reward(), self.episode_over, debug", "title": "" }, { "docid": "8523438e8d753418f5697d9e29a3587e", "score": "0.52656525", "text": "def step(self, action) -> Tuple[np.ndarray, int, bool, dict]:\n reward = self.env.step(action)\n self.state, self.rendering = self.env.to_numpy()\n if self.state_type == 'binary':\n self.state = rgb_array_to_binary(self.state)\n return self.state, reward, self.episode_is_over(), {} # {} is a generic info dictionary", "title": "" }, { "docid": "b6719d2d78f02802dc8d1b9a018202a9", "score": "0.52581006", "text": "def step(self, action):\n self.forward_dynamics(action)\n comvel = self.get_body_comvel(\"torso\")\n if self._task_config.goal_velocity == -math.inf:\n forward_reward = -1 * comvel[0]\n elif self._task_config.goal_velocity == math.inf:\n forward_reward = comvel[0]\n else:\n forward_reward = -np.abs(comvel[0] - self._task_config.goal_velocity) + 1.0 \n lb, ub = self.action_bounds\n scaling = (ub - lb) * 0.5\n ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))\n contact_cost = 0.5 * 1e-3 * np.sum(\n np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),\n survive_reward = 0.05\n reward = forward_reward - ctrl_cost - contact_cost + survive_reward\n state = self._state\n notdone = np.isfinite(state).all() \\\n and state[2] >= 0.2 and state[2] <= 1.0\n done = not notdone\n ob = self.get_current_obs()\n return Step(ob, float(reward), done)", "title": "" }, { "docid": "d85cd21ca580c62d248720f18c13b3d9", "score": "0.52530533", "text": "def step(self, action):\n\n print(action)\n action[2] = 1 if action[2] > 0 else 0\n for i in range(len(action)):\n commands = {0 : \"move \", 1 : \"turn \", 2 : \"attack \", 3 : 'pitch '}\n if i == 2:\n if action[2] > 0:\n print(\"Stop moving and look for trees\")\n self.agent_host.sendCommand(\"move 0.0\")\n self.agent_host.sendCommand(\"turn 0.0\")\n self.agent_host.sendCommand(\"pitch 0.0\")\n world_state = self.agent_host.getWorldState()\n obs = self.get_observation(world_state)\n self.checkForWood(world_state)\n else:\n self.agent_host.sendCommand(commands[i] + str(action[i]))\n time.sleep(0.1)\n self.episode_step += 1\n\n # Get Observation\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs = self.get_observation(world_state) \n \n # Get Done\n done = not world_state.is_mission_running \n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n self.episode_return += reward\n\n return self.obs, reward, done, dict()", "title": "" }, { "docid": "c0df9b48b775927a80d44a3c84eb0ef8", "score": "0.5250604", "text": "def forward(self, observation):\n info = {}\n # Select an action.\n state = self.memory.get_recent_state(observation)\n if self.training:\n q_values = self.compute_q_values(state, self.active_model)\n action = self.policy.select_action(q_values=q_values)\n info['q_values'] = q_values\n else:\n q_values_all_nets = self.compute_q_values_all_nets(state)\n action, policy_info = self.test_policy.select_action(q_values_all_nets=q_values_all_nets)\n info['q_values_all_nets'] = q_values_all_nets\n info['mean'] = np.mean(q_values_all_nets[:, :], axis=0)\n info['standard_deviation'] = np.std(q_values_all_nets[:, :], axis=0)\n info['coefficient_of_variation'] = np.std(q_values_all_nets[:, :], axis=0) / \\\n np.mean(q_values_all_nets[:, :], axis=0)\n info.update(policy_info)\n\n # Book-keeping.\n self.recent_observation = observation\n self.recent_action = action\n\n return action, info", "title": "" }, { "docid": "946b118230ff9b70707f72c43bec9718", "score": "0.52412826", "text": "def step(self, action):\n\n new_state, reward, episode_done = self.env.execute_action(action)\n return new_state, reward, episode_done", "title": "" }, { "docid": "2ec049d506bf7382b7a48348c689866a", "score": "0.5237677", "text": "def __onOut(self,phase,state):\n method_name = \"onOutWhen_\"+phase\n method = getattr(self, method_name, \\\n lambda s : exec(\"raise Exception('missing method implementation \\\"\" \\\n +method_name+\"\\\":\\\\nAdd declaration to your model eg:\\\\n\\\\tdef \" \\\n +method_name+\"(self,state):\\\\n\\\\t...')\\nraise NotImplementedError()\" ))\n return method(state)", "title": "" }, { "docid": "a16bfef87f668238e69d2e39f57f58d0", "score": "0.5232372", "text": "def _post_action(self, action):\n reward = self.reward(action)\n\n # done if number of elapsed timesteps is greater than horizon\n self.done = (self.timestep >= self.horizon) and not self.ignore_done\n return reward, self.done, {}", "title": "" }, { "docid": "5119ffab3b1b82a585b7883a0e39b514", "score": "0.5231273", "text": "def step(self):\n \tif not self.is_done():\n actions = [agent.program(self.percept(agent))for agent in self.agents]\n for (agent, action) in zip(self.agents, actions):\n \t\t self.execute_action(agent, action)\n self.exogenous_change()", "title": "" }, { "docid": "d25ea6396d89fed5334c5f2c8d3c8554", "score": "0.5230338", "text": "def on_model_step(self, agent_info, action, observation, reward, done):\n del agent_info, action, reward, done\n if not self._log_current_episode:\n return\n observation = self._preprocess_observation(observation)\n self._episode_observations.add(observation)\n self._step_observations.add(observation)", "title": "" }, { "docid": "c252f198f40f9faafcd39032ff7ef7a8", "score": "0.52262706", "text": "def forward(self, state, **kwargs):\n action_sequence = self.solver(state)\n # Return first Step.\n return (\n action_sequence[0, ..., :],\n torch.zeros(self.dim_action[0], self.dim_action[0]),\n )", "title": "" }, { "docid": "0fb60381f457ed476ba2bdd96e70f570", "score": "0.52217394", "text": "def nActions(self):\n if self.action_type == 'DQN':\n return 7\n elif self.action_type == 'DDPG':\n return [[0, 1], [0, 1], [0, 1], [0, 1]] # [beam values, rest values, x values, y values] mapped from 0->1", "title": "" }, { "docid": "008117f1c8c14f3cfab6c8952472ea61", "score": "0.52076733", "text": "def on_step_begin(self, k, action):\r\n pass", "title": "" }, { "docid": "d47fca87b14c8b7ff7d44f92756f3cd1", "score": "0.5197861", "text": "def __init__(self, model, out_map, feature_ex=lambda x: x, gamma=1.0):\n assert (None, len(out_map)) == model.output_shape # Make sure all outputs can be mapped to actions\n self.model = model\n self.phi = feature_ex\n self.out_map = out_map\n self.gamma = gamma", "title": "" }, { "docid": "6c9c498836923fa4a9a1717ca9010f64", "score": "0.51758844", "text": "def step_(\n self,\n action: np.ndarray,\n **obs_kwargs\n ) -> Tuple[np.ndarray, float, bool, dict]:\n\n action = np.array(action).squeeze()\n self.raw_action = action\n\n # Advance the dynamics and update the model and state variables.\n self.model, self.zone_temp = dyn.dynamics(\n self.models,\n self.zone_temp,\n action,\n self.temp_oa,\n self.q_solar,\n self.q_int\n )\n\n self.p_consumed = dyn.get_p_consumed(action, self.temp_oa)\n\n # Get the reward\n rew, _ = self.step_reward()\n\n # Step in time and update the exogenous\n self.time_index += 1\n self.time = self.df.index[self.time_index]\n self._set_exogenous()\n\n # Call get_obs before returning so state dict is updated.\n obs, state = self.get_obs(**obs_kwargs)\n\n return np.array(obs), rew, self.is_terminal(), state", "title": "" }, { "docid": "92661d161104c21bccf0f5f6f6ab60f6", "score": "0.51708907", "text": "def step(self, action):\n\t\tself.current_step += 1\n\n\t\tif action is not None:\n\t\t\tself.robots[0].apply_action(action)\n\n\t\t\n\t\t# before step \n\t\tprev_obj_pos_xy = list()\n\t\tprev_obj_orn_z = list()\n\t\tfor obj in self.scene.interative_objects:\n\t\t\tprev_obj_pos_xy.append(obj.get_xy_position())\n\t\t\tprev_obj_orn_z.append(obj.get_orientation_z())\n\t\t\t\n\n\t\t# step simulator, check collisions, compute current_step_robot_energy_cost\n\t\t#non_interactive_collision_links, interactive_collision_links, self.current_step_robot_energy_cost = self.run_simulation()\n\t\tnon_interactive_collision_links, interactive_collision_links = self.run_simulation()\n\n\t\t# after step\n\t\t# used by reward_termination collision\n\t\tself.non_interactive_collision_links = non_interactive_collision_links\n\t\tself.interactive_collision_links = interactive_collision_links\n\n\t\tself.non_interactive_collision_step += int(len(non_interactive_collision_links) > 0)\n\t\tself.interactive_collision_step += int(len(interactive_collision_links) > 0)\n\n\n\t\t# accumulate robot energy cost at this step\n\t\tself.current_episode_robot_energy_cost += self.current_step_robot_energy_cost\n\t\t\n\t\t#print('Energy cost: %f'%(self.robot_energy_cost_cur_step * self.energy_cost_scale))\n\t\t#print('Action: %s'%(action))\n\t\t#if len(interactive_collision_links) > 0:\n\t\t#\tprint('Push')\n\t\t#print('--------------------------')\n\n\t\t# accumulate step pushing energy cost to episode pushing energy at this step\n\t\tcurrent_step_pushing_energy_translation, current_step_pushing_energy_rotation = self.compute_pushing_energy_per_action_step(prev_obj_pos_xy, prev_obj_orn_z)\n\t\tself.current_episode_pushing_energy_translation += current_step_pushing_energy_translation\n\t\tself.current_episode_pushing_energy_rotation += current_step_pushing_energy_rotation\n\n\t\t# update running history of min and max step energy (pushing and robot) among all trajectories\n\t\tself.current_step_pushing_energy_cost = current_step_pushing_energy_rotation + current_step_pushing_energy_translation\n\t\tself.max_step_pushing_energy_cost = max(self.max_step_pushing_energy_cost, self.current_step_pushing_energy_cost)\n\t\tself.min_step_pushing_energy_cost = min(self.min_step_pushing_energy_cost, self.current_step_pushing_energy_cost)\n\t\tself.max_step_robot_energy_cost = max(self.max_step_robot_energy_cost, self.current_step_robot_energy_cost)\n\t\tself.min_step_robot_energy_cost = min(self.min_step_robot_energy_cost, self.current_step_robot_energy_cost)\n\n\t\t#print(\"step_energy: %f\"%(self.current_step_pushing_energy_cost))\n\n\t\t# task compute reward\n\t\tstate = self.get_state()\n\t\tinfo = {}\n\n\t\tif self.config['scene'] == 'relocate_different_objects':\n\t\t\tif self.config['use_tier_reward'] == False:\n\t\t\t\treward, done, info, sub_reward = self.task.get_reward_termination(self, info)\n\t\t\telse:\t\n\t\t\t\treward, done, info, sub_reward = self.task.get_reward_termination_different_objects(self, info)\n\t\telse:\n\t\t\treward, done, info, sub_reward = self.task.get_reward_termination(self, info)\n\n\t\t\n\t\t\t\n\t\t# if succeed, update running history of min and max episode energy (pushing and robot) among all successful episodes\n\t\tif info['success']:\n\t\t\t# compute current episode pushing energy\n\t\t\tcurrent_episode_pushing_energy_cost = self.current_episode_pushing_energy_translation + self.current_episode_pushing_energy_rotation\n\t\t\t\n\t\t\tself.max_succeed_episode_robot_energy_cost = max(self.max_succeed_episode_robot_energy_cost, self.current_episode_robot_energy_cost)\n\t\t\tself.max_succeed_episode_pushing_energy_cost = max(self.max_succeed_episode_pushing_energy_cost, current_episode_pushing_energy_cost)\n\t\t\tself.min_succeed_episode_robot_energy_cost = min(self.min_succeed_episode_robot_energy_cost, self.current_episode_robot_energy_cost)\n\t\t\tself.min_succeed_episode_pushing_energy_cost = min(self.min_succeed_episode_pushing_energy_cost, current_episode_pushing_energy_cost)\n\n\t\t\t\n\t\t\t#self.current_succeed_episode_robot_energy_cost = self.current_episode_robot_energy_cost\n\t\t\t#self.current_succeed_episode_pushing_energy_translation = self.current_episode_pushing_energy_translation\n\t\t\t#self.current_succeed_episode_pushing_energy_rotation = self.current_episode_pushing_energy_rotation\n\n\t\t# consider energy cost in reward function when succeed\n\t\t# make sure that current_episode_energy, max_succeed and min_succeed are updated before ratio\n\t\t#if info['success'] and self.use_energy_cost:\n\t\tif info['success'] and self.reward_function_choice == \"0-1-with-per-episode-energy\": # in use\n\t\t\tratio = self.compute_episode_energy_ratio()\n\t\t\treward = reward * (1 - ratio)\n\t\t\n\t\t#print(sub_reward)\n\n\t\t# step task related variables\n\t\tself.task.step(self)\n\n\t\tself.populate_info(info)\n\n\t\tif done and self.automatic_reset:\n\t\t\t#info['last_observation'] = state # useless in iGibson\n\t\t\tstate = self.reset()\n\n\t\treturn state, reward, done, info", "title": "" }, { "docid": "beb4bd39ee1b6fc91d7a1e2342b40bf3", "score": "0.51616246", "text": "def step(self, action):\n print(\"Action is\", action)\n self.actions_so_far.append(action)\n logger.info(f\"Action#{len(self.actions_so_far)}={action}\")\n\n if len(self.actions_so_far) == self.max_timesteps:\n H = np.sum(self.hamiltonian(self.actions_so_far), axis=0)\n\n if not np.all(H == np.conjugate(H).T):\n logger.error(\n f\"{H} is not Hermitian with actions as {np.array(self.actions_so_far)}\")\n\n U = expm(-1j*self.dt*H/H_CROSS)\n\n if not np.allclose(np.matmul(U, np.conjugate(U.T)), np.eye(4)):\n logger.error(\n f\"Unitary Invalid-Difference is{np.matmul(U,U.T)-np.eye(4)}\")\n if not np.isclose(np.abs(np.linalg.det(U)), 1):\n logger.error(f\"Det Invalid-{np.abs(np.linalg.det(U))}\")\n\n return len(self.actions_so_far), self.fidelity(U, self.target), True, {}\n\n return len(self.actions_so_far), 0, False, {}", "title": "" }, { "docid": "ca5e89603068d3b1f56d4f08e4502e44", "score": "0.5158376", "text": "def step(self, action):\n observation, reward, done, info = self.env.step(action)\n if reward > 1.0:\n reward = 1.0\n elif reward < -1.0:\n reward = -1.0\n self.last_observations.append(self.preprocess(observation))\n state = np.stack(self.last_observations, axis=2)\n return state, reward, done, info", "title": "" }, { "docid": "f8473be99995bb297f98fd4dd982b241", "score": "0.51542", "text": "def step(self, action_dict):\n action_batch_t = {}\n for i in range(len(self.preys)):\n action_batch_t[i] = random.randint(0, 3)\n obs2, rew2, done2, info2 = self.multi_preys_env.step(action_batch_t)\n action = {}\n action[0] = action_dict\n\n obs, rew, done, info = {}, {}, {}, {}\n for i, action in action.items():\n dist = [10,10]\n #if \"hunter\" in i: #and amount_of_preys_living > 0:\n dist = find_closest(self.agents[i].get_position(), self.multi_preys_env.get_pos())\n obs[i], rew[i], done[i], info[i] = self.agents[i].step(action, dist)\n # rew[i] = 1\n if done[i]:\n self.dones.add(i)\n done[\"__all__\"] = len(self.dones) == len(self.agents)\n return obs[0][0], rew[0], done[0], info[0]", "title": "" }, { "docid": "97dfda468989aa1e45be95d183068302", "score": "0.51540035", "text": "def setup(self):\n self.action_size = len(ACTIONS) #Get size of the action\n \n if self.train:\n self.logger.info(\"Setting up model from scratch.\")\n #self.model = MultiOutputRegressor(LGBMRegressor(n_estimators=100, n_jobs=-1))\n #self.model = KNeighborsRegressor(n_jobs=-1)\n # self.model = MultiOutputRegressor(SVR(), n_jobs=8)\n self.isFit = False\n #self.model = LinearRegression()\n #self.model = MultiOutputRegressor(SGDRegressor( alpha = LEARNING_RATE ))\n self.q_table = np.zeros((4*((s.COLS-2)*(s.ROWS-2)), self.action_size))\n \n else:\n self.logger.info(\"Loading model from saved state.\")\n #with open(\"my-saved-model.pt\", \"rb\") as file:\n # self.model = pickle.load(file)\n self.q_table = np.load(\"my-q-table-longer.npy\")", "title": "" }, { "docid": "72e3d5af2a9beff89b137fd425e5508a", "score": "0.5153865", "text": "def _construct_transition_step(self):\n pass", "title": "" }, { "docid": "caebf6485c1c1c750af77c47f2f04569", "score": "0.51436245", "text": "def step(self, action):\n\n\t\tresult = self.move_model(action)\n\t\treward = self.delta_score()\n\n\t\t# set the reward scalar here \n\t\t#############################\n\t\tif reward <5:\n\t\t\treward = -0.5\n\t\telse:\n\t\t\treward = 0.5\n\t\tif result == -1:\n\t\t\treward = -1\n\t\t#############################\n\n\t\t# Increment collisions \n\t\tif result == -1:\n\t\t\tself.collisions = self.collisions + 1\n\t\treturn self.get_observation(), reward, 0, 0", "title": "" }, { "docid": "6bb876e602df3534894963dd452edcca", "score": "0.5130525", "text": "def __call__(self, trajectory: Tensor, actions: Tensor) -> float:\n pass", "title": "" }, { "docid": "d4f1a7304e37f04a5b377bee6aff7eec", "score": "0.5124904", "text": "def step(self, action: List[float]) -> Tuple[List[float], float, bool, dict]:\n self.n_tot_steps += 1\n self.n_steps += 1\n\n for param, a in zip(self.params, action):\n param.load(value=a, session=self.sess)\n\n obs = self._get_obs()\n\n done = self.done_func(self)\n reward = self.calc_reward(env=self, done=done) * self.reward_scale\n for callback in self.step_callbacks:\n callback(self, done=done)\n\n if self.n_steps >= self.max_steps:\n done = True\n\n return obs, reward, done, {}", "title": "" }, { "docid": "58f3c3c16723275375f39a038a920ddf", "score": "0.5123724", "text": "def handle_main_action(self, action, params):\n if action == ActionType.MOVE:\n self.handle_move(params)\n elif action == ActionType.SWITCH or action == ActionType.DRAG:\n self.handle_switch(params)\n elif action == ActionType.FAINT:\n # []|faint|p2a: Heatran\n details = params[2]\n teamID, pokeName = self.strip_team(details) \n poke = self.get_pokemon(None, pokeName, teamID=teamID)\n poke.set_health(0)\n elif action == ActionType.DETAILS_CHANGED:\n self.handle_details_changed(params)\n elif action == ActionType.NEW_TURN:\n turn = params[2]\n print(\"-----------------------------------\\nTurn %s starting\" % turn)", "title": "" }, { "docid": "ae6c5c7cf9d5efffe22a51f1644c77f0", "score": "0.5120506", "text": "def predict(self, states, actions):\n \"\"\" YOUR CODE HERE \"\"\"\n states = states.reshape((-1, states.shape[-1]))\n actions = actions.reshape((-1, actions.shape[-1]))\n return self.sess.run(self.pred_next_obs, feed_dict={self.ob_ph:states, self.ac_ph:actions}).reshape(states.shape)", "title": "" }, { "docid": "a6b9e54ad2cf08d143216a6470def839", "score": "0.5110356", "text": "def action_handler(model, data_input=None, tango_dev=None):\n # TODO (KM 18-01-2016): Need to remove the tango_dev parameter from\n # action handler, currently used for testing functionality of the\n # override class actions.\n temp_variables = {}\n return_value = None\n for action in actions:\n if action[\"behaviour\"] == \"long_running\":\n time.sleep(float(action[\"execution_time_secs\"]))\n if action[\"behaviour\"] == \"input_transform\":\n temp_variables[action[\"destination_variable\"]] = data_input\n if action[\"behaviour\"] == \"side_effect\":\n quantity = action[\"destination_quantity\"]\n temp_variables[action[\"source_variable\"]] = data_input\n model_quantity = model.sim_quantities[quantity]\n model_quantity.set_val(data_input, model.time_func())\n\n if action[\"behaviour\"] == \"output_return\":\n if \"source_variable\" in action and \"source_quantity\" in action:\n raise ValueError(\n \"{}: Either 'source_variable' or 'source_quantity'\"\n \" for 'output_return' action, not both\".format(action_name)\n )\n elif \"source_variable\" in action:\n source_variable = action[\"source_variable\"]\n try:\n return_value = temp_variables[source_variable]\n except KeyError:\n raise ValueError(\n \"{}: Source variable {} not defined\".format(\n action_name, source_variable\n )\n )\n elif \"source_quantity\" in action:\n quantity = action[\"source_quantity\"]\n try:\n model_quantity = model.sim_quantities[quantity]\n except KeyError:\n raise ValueError(\n \"{}: Source quantity {} not defined\".format(\n action_name, quantity\n )\n )\n return_value = model_quantity.last_val\n else:\n raise ValueError(\n \"{}: Need to specify one of 'source_variable' \"\n \"or 'source_quantity' for 'output_return' action\".format(\n action_name\n )\n )\n else:\n # Return a default value if output_return is not specified.\n return_value = ARBITRARY_DATA_TYPE_RETURN_VALUES[action_output_type]\n return return_value", "title": "" }, { "docid": "4549513055a5d01214b50b5e01abeaf3", "score": "0.5105185", "text": "def transform(self, action, prev_obs, obs):\n # Concatenate state-action pair\n concat_sa = np.array([])\n for pobs in prev_obs[1:]:\n concat_sa = np.append(concat_sa, pobs)\n concat_sa = np.append(concat_sa, obs)\n concat_sa = np.append(concat_sa, action)\n\n # Normalize\n concat_sa = apply_norm(concat_sa, self.fwd_norms[0])\n\n # Convert to tensor\n concat_sa = torch.tensor(concat_sa).float().to('cpu')\n with torch.no_grad():\n # Apply forward model\n d_next_state = self.fwd_model.forward(concat_sa)\n d_next_state = d_next_state.detach().cpu().numpy()\n # Un-normalize\n d_next_state = unapply_norm(d_next_state, self.fwd_norms[1])\n\n # Concatenate state, predicted next_state\n concat_ss = np.array([])\n for pobs in prev_obs[1:]:\n concat_ss = np.append(concat_ss, pobs)\n concat_ss = np.append(concat_ss, obs)\n concat_ss = np.append(concat_ss, d_next_state)\n # Normalize\n concat_ss = apply_norm(concat_ss, self.inv_norms[0])\n # Convert to tensor\n concat_ss = torch.tensor(concat_ss).float().to('cpu')\n # Apply inverse model\n tf_action = self.inv_model.forward(concat_ss)\n tf_action = tf_action.detach().cpu().numpy()\n # Un-normalize\n tf_action = unapply_norm(tf_action, self.inv_norms[1])\n # Apply alpha term\n tf_action = (1.0-self.alpha)*action + self.alpha*tf_action\n\n return tf_action", "title": "" }, { "docid": "57c95062893c0fc2bebb8f3bb01772b3", "score": "0.51027703", "text": "def step(self, action: Action) -> Tuple[Real, State, bool]:\n pass", "title": "" }, { "docid": "2cf95dd53395459d7ff56bcfde3d48ce", "score": "0.5098981", "text": "def act(self):\n self.logger.info('Picking action according to rule set')\n\n # computing state variable\n state, state_np = get_state(self)\n x, y, _, bombs_left, score = self.game_state['self']\n arena, bomb_map = state_np\n \n # determine valid actions\n directions = [(x,y), (x+1,y), (x-1,y), (x,y+1), (x,y-1)]\n valid_tiles, valid_actions = [], []\n for d in directions:\n if (((arena[d] == 0) or (arena[d] == 2) ) and\n (self.game_state['explosions'][d] <= 1) and\n (bomb_map[d] > 0)):\n valid_tiles.append(d)\n if (x-1,y) in valid_tiles: valid_actions.append('LEFT')\n if (x+1,y) in valid_tiles: valid_actions.append('RIGHT')\n if (x,y-1) in valid_tiles: valid_actions.append('UP')\n if (x,y+1) in valid_tiles: valid_actions.append('DOWN')\n if (x,y) in valid_tiles: valid_actions.append('WAIT')\n if (bombs_left > 0): valid_actions.append('BOMB')\n self.logger.debug(f'Valid actions: {valid_actions}')\n \n if len(valid_actions) == 0:\n return # we're fucked -> can only happen in last step of an episode\n \n # prepare state by stacking the the current state on top of 3 past states\n old_stack = self.state_hist[-1] if len(self.state_hist) > 0 else torch.from_numpy(np.array([np.nan]))\n stacked_state = stack(old_stack,state)\n\n # decide next action\n action= get_action(self, stacked_state, valid_actions)\n self.next_action = action\n \n # save state and action such that they are available in reward_update for learning\n self.action_hist.append(action)\n self.state_hist.append(stacked_state)", "title": "" }, { "docid": "dc325791a27da5cb80fc9356f62d1eef", "score": "0.50923723", "text": "def actions(self, state):\n moves = state.moves\n player = state.to_move\n\n phase = check_phase(state.w_no_board, state.b_no_board, state.w_board, state.b_board, player)\n\n # print(\"Phase in actions: \" + str(self.Phase))\n\n if phase == 1:\n moves = filter_phase1(state)\n\n if phase == 2:\n moves = filter_phase2(state)\n\n if phase == 3:\n moves = filter_phase3(state)\n return moves", "title": "" }, { "docid": "877a5660111ed42031253c0eeac7188a", "score": "0.5080673", "text": "def _step(self, action):\n\n if self._episode_ended:\n # The last action ended the episode. Ignore the current action and start\n # a new episode.\n return self.reset()\n\n #print('#### TYPE OF ACTION', type(action))\n #if isinstance(action, np.ndarray):\n action = int(action)\n #print('#### TYPE OF ACTION', type(action))\n observations, reward, done, info = self._env.step(action)\n observation = observations['player_observations'][observations['current_player']]\n\n reward = np.asarray(reward, dtype=np.float32)\n\n obs_vec = np.array(observation['vectorized'], dtype=dtype_vectorized)\n mask_valid_actions = self.get_mask_legal_moves(observation)\n # stores current game score\n info = self._env.state.score()\n \n #used for two-player curiosity implementation\n otherplayer_id=1\n if observations['current_player']==1:\n otherplayer_id=0\n\n state2 = observations['player_observations'][otherplayer_id]\n state2_vec = np.array(state2['vectorized'], dtype=dtype_vectorized)\n\n obs = {'state': obs_vec, 'mask': mask_valid_actions, 'info': info, 'state2': state2_vec}\n\n if done:\n self._episode_ended = True\n step_type = StepType.LAST\n else:\n step_type = StepType.MID\n\n return TimeStep(step_type, reward, discount, obs)", "title": "" }, { "docid": "f6c97ca943245dff5e08f3d25c95b586", "score": "0.50760585", "text": "def step(self, action):\n #self.placement = self.env.placement\n self.state, self.reward, self.done\n #print(done)\n if self.done:\n self.start += 1\n\n return self.state, self.reward, self.done", "title": "" }, { "docid": "db7dc2a8c28b5c50ad40089875b5f1ee", "score": "0.50581133", "text": "def __call__(self, action: float) -> np.ndarray:\n # do the action and save the state and reinforcement\n new_state, new_reinforcement, _, _ = self.env.step([action])\n\n # update object variables\n self.state = new_state\n self.acc_reinf += new_reinforcement\n self.reinforcement = new_reinforcement\n\n # render the environment\n if self.render:\n self.env.render(mode=\"human\")\n\n return (self.get_state(), self.get_reinforcement())", "title": "" }, { "docid": "2c2bf0c768f407090f6befab3650e44a", "score": "0.5057995", "text": "def act(self, action):\r\n self.state, reward, done, info = self.env.step(action)\r\n self.nb_step += 1\r\n\r\n #terminal = False\r\n #if self.nb_step > 2000:\r\n # terminal = True\r\n return (reward, done)", "title": "" }, { "docid": "a20f89d56b218ae5ab8ff159c57d6b16", "score": "0.5054385", "text": "def step(self, action):\n\n env_info = self.env.step(action)[self.brain_name]\n state = self.get_state(env_info)\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n return state, reward, done", "title": "" }, { "docid": "02f1d33c8d8d1e6475bd6fed3a517591", "score": "0.5053472", "text": "def step_op(self, model, **kwargs):\n inputs, labels = kwargs[\"inputs\"], kwargs[\"labels\"]\n\n outputs = model(inputs)\n\n def train_op():\n \"\"\"Takes care of the training process.\"\"\"\n pass\n\n def log_op():\n \"\"\"\n Takes care of the logging process.\n Returns\n -------\n A dictionary whose values are to be logged.\n \"\"\"\n return {\"inputs\": inputs, \"labels\": labels}\n\n def eval_op():\n \"\"\"\n Takes care of the evaluation.\n Returns\n -------\n A dictionary with values to be evaluated.\n \"\"\"\n return {\"outputs\": outputs}\n\n return {\"train_op\": train_op, \"log_op\": log_op, \"eval_op\": eval_op}", "title": "" }, { "docid": "e269e8e931d2b0e5de8bf1266082c98f", "score": "0.50426894", "text": "def step(self, action, visualize=False):\n self.leadtime = self.generate_leadtime(0, self.leadtime_dist,self.leadtime_lb, self.leadtime_ub)\n print('-----------------------------------Period :',self.period)\n action, penalty = self._check_action_space(action)\n \n self._initialize_state()\n \n print('Action :',action)\n print('State at start :',self.state)\n if visualize: self._visualize(\"0. IP\")\n \n \n\n if self.case_name == \"General\" or self.case_name=='Linear':\n self._generate_demand() ## order from customer to retail i.e. last leg\n self._receive_incoming_delivery()\n if visualize: self._visualize(\"1. Delivery\")\n self._receive_incoming_orders()\n if visualize: self._visualize(\"2. Demand\")\n #self._place_outgoing_order(1, action)\n self._place_outgoing_order(self.period+1, action)\n elif self.case_name == \"Divergent\":\n # According to the paper:\n # (1) Warehouse places order to external supplier\n self._place_outgoing_order(self.period, action)\n if visualize: self._visualize(\"1. Warehouse order\")\n # (2) Warehouse ships the orders to retailers taking the inventory position into account\n self._recieve_incoming_orders_divergent()\n if visualize: self._visualize(\"2. Warehouse ships\")\n # (3) Warehouse and retailers receive their orders\n self._receive_incoming_delivery()\n if visualize: self._visualize(\"3. Orders received\")\n # (4) Demand from customers is observed\n self._generate_demand()\n self._recieve_incoming_orders_customers()\n if visualize: self._visualize(\"4. Demand\")\n else:\n raise NotImplementedError\n \n #CIP = self._code_state()\n holding_costs, backorder_costs,lost_sales_costs,revenue,cost_of_goods = self.calculate_reward()\n reward = revenue-(cost_of_goods+holding_costs + backorder_costs+lost_sales_costs + penalty )\n \n print('Inventory at end of period :',self.INV[self.period])\n \n # update period\n self.period += 1\n \n # determine if simulation should terminate\n if self.period >= self.num_of_periods:\n done = True\n else:\n done = False\n # update stae\n #self._update_state()\n # CIP is next state\n print('Revenue :',revenue)\n print('COGS :',cost_of_goods)\n print('Holding Costs :',holding_costs)\n print('Back Order Costs :',backorder_costs)\n print('Lost Order Sales :',lost_sales_costs)\n print('Reward :',reward)\n \n \n return self.state, reward/self.divide, done,{}", "title": "" }, { "docid": "25f74a6e4756505ed7dfd8bff2550d4b", "score": "0.5040966", "text": "def step(self, state):\n # compute output for each metamodel\n responses = {}\n for m in self.metamodel_simulators:\n m_out = m.step(state)\n responses.update(m_out)\n return responses", "title": "" }, { "docid": "8acb2c15946955e34613e58e55b998bd", "score": "0.5036357", "text": "def step(self, action: int) -> (tuple, VectorDecimal, bool, dict):\n\n # Initialize reward as vector\n reward = self.default_reward.copy()\n\n # Get probability action\n action = self.__probability_action(action=action)\n\n # Update previous state\n self.current_state = self.next_state(action=action)\n\n # Get reward\n reward[0] = self.finals.get(self.current_state, self.default_reward[0])\n\n # Check if is final position\n final = self.is_final(self.current_state)\n\n # Set extra\n info = {}\n\n return self.current_state, reward, final, info", "title": "" }, { "docid": "07e632537e5c97ac20d557b046041c15", "score": "0.50352246", "text": "def actions(self, state):\n return state.moves", "title": "" }, { "docid": "5760963bfef1073b513a485227be92b5", "score": "0.5029714", "text": "def _localTakeAstepRun(self,inDictionary):\n outputs = self.__getOutputs(inDictionary)\n for i in range(len(outputs)):\n if self.actionType[i] == 'Database-dataObjects':\n #inDictionary['Input'][i] is Database, outputs[i] is a DataObjects\n inDictionary['Input'][i].loadIntoData(outputs[i])\n elif self.actionType[i] == 'dataObjects-Database':\n #inDictionary['Input'][i] is a dataObjects, outputs[i] is Database\n outputs[i].saveDataToFile(inDictionary['Input'][i])\n\n elif self.actionType[i] == 'ROM-dataObjects':\n #inDictionary['Input'][i] is a ROM, outputs[i] is dataObject\n ## print information from the ROM to the data set or associated XML.\n romModel = inDictionary['Input'][i]\n # get non-pointwise data (to place in XML metadata of data object)\n ## TODO how can user ask for particular information?\n xml = romModel.writeXML(what='all')\n self.raiseADebug('Adding meta \"{}\" to output \"{}\"'.format(xml.getRoot().tag,outputs[i].name))\n outputs[i].addMeta(romModel.name, node = xml)\n # get pointwise data (to place in main section of data object)\n romModel.writePointwiseData(outputs[i])\n\n elif self.actionType[i] == 'MODEL-FILES':\n #inDictionary['Input'][i] is a ROM, outputs[i] is Files\n ## pickle the ROM\n #check the ROM is trained first\n if isinstance(inDictionary['Input'][i],Models.ROM) and not inDictionary['Input'][i].amITrained:\n self.raiseAnError(RuntimeError,'Pickled rom \"%s\" was not trained! Train it before pickling and unpickling using a RomTrainer step.' %inDictionary['Input'][i].name)\n fileobj = outputs[i]\n fileobj.open(mode='wb+')\n cloudpickle.dump(inDictionary['Input'][i], fileobj, protocol=pickle.HIGHEST_PROTOCOL)\n fileobj.flush()\n fileobj.close()\n elif self.actionType[i] == 'MODEL-FMU':\n #check the ROM is trained first (if ExternalModel no check it is performed)\n if isinstance(inDictionary['Input'][i],Models.ROM) and not inDictionary['Input'][i].amITrained:\n self.raiseAnError(RuntimeError,'Pickled rom \"%s\" was not trained! Train it before pickling and unpickling using a RomTrainer step.' %inDictionary['Input'][i].name)\n self.raiseAMessage('Exporting Model \"{}\" as FMU named \"{}\"'.format(inDictionary['Input'][i].name, outputs[i].name))\n from ..utils.fmuExporter import FMUexporter\n fdir = inDictionary['jobHandler'].runInfoDict['FrameworkDir']\n fmuexec = FMUexporter(**{'model': inDictionary['Input'][i],'executeMethod': 'evaluate', 'workingDir': outputs[i].getPath(), 'frameworkDir': fdir, 'keepModule': True})\n fmuexec.buildFMU(outputs[i].getAbsFile())\n\n elif self.actionType[i] == 'FILES-MODEL':\n #inDictionary['Input'][i] is a Files, outputs[i] is ROM or ExternalModel\n ## unpickle the ROM\n fileobj = inDictionary['Input'][i]\n unpickledObj = pickle.load(open(fileobj.getAbsFile(),'rb+'))\n if not isinstance(unpickledObj, (Models.ROM, Models.ExternalModel)):\n ## DEBUGG\n # the following will iteratively check the size of objects being unpickled\n # this is quite useful for finding memory crashes due to parallelism\n # so I'm leaving it here for reference\n # print('CHECKING SIZE OF', unpickledObj)\n # target = unpickledObj# .supervisedEngine.supervisedContainer[0]._macroSteps[2025]._roms[0]\n # print('CHECKING SIZES')\n # from utils.Debugging import checkSizesWalk\n # checkSizesWalk(target, 1, str(type(target)), tol=2e4)\n # print('*'*80)\n # crashme\n ## /DEBUGG\n self.raiseAnError(RuntimeError,'Pickled object in \"%s\" is not a ROM. Exiting ...' %str(fileobj))\n if isinstance(unpickledObj,Models.ROM) and not unpickledObj.amITrained:\n self.raiseAnError(RuntimeError,'Pickled rom \"%s\" was not trained! Train it before pickling and unpickling using a RomTrainer step.' %unpickledObj.name)\n # copy model (same for any internal model (Dummy model derived classes)\n outputs[i]._copyModel(unpickledObj)\n\n elif self.actionType[i] == 'FILES-dataObjects':\n #inDictionary['Input'][i] is a Files, outputs[i] is PointSet\n ## load a CSV from file\n infile = inDictionary['Input'][i]\n options = {'fileToLoad':infile}\n outputs[i].load(inDictionary['Input'][i].getPath(),'csv',**options)\n\n else:\n # unrecognized, and somehow not caught by the step reader.\n self.raiseAnError(IOError,\"Unknown action type \"+self.actionType[i])\n\n for output in inDictionary['Output']:\n if isinstance(output, OutStreamEntity):\n output.addOutput()", "title": "" }, { "docid": "11d90c115da0ae23e5b449bcf8e8f50a", "score": "0.50292486", "text": "def step(self, action):\n self.env_info = self.env.step(action)[self.brain_name]\n return self.getStateInfo()", "title": "" }, { "docid": "85c8e9e0188fcc1e7ba276dcd5459a36", "score": "0.50284445", "text": "def end_of_round(self, last_game_state: dict, last_action: str, events: List[str]):\n self.logger.debug(f'Encountered event(s) {\", \".join(map(repr, events))} in final step')\n self.transitions.append(Transition(state_to_features(last_game_state), last_action, None, reward_from_events(self, events)))\n \n #print(np.count_nonzero(self.model==0))\n\n # Store the model\n with open(\"my-saved-model.pt\", \"wb\") as file:\n pickle.dump(self.model, file)", "title": "" }, { "docid": "9d16f3e2c34ec72262f88e4814aa4534", "score": "0.50239325", "text": "def wizard(self) :\n\n\t\t# Variables\n\t\tprint(\"Complete list of state variables, separated by commas :\")\n\t\tself.states = input().replace(\" \", \"\").split(\",\")\n\t\tself.N_states = len(self.states)\n\t\tself.states_map = { s : idx for s, idx in zip(self.states, range(self.N_states)) }\n\n\t\t# Initial condition for each variable\n\t\tprint(\"\\nInitial conditions (integers) :\")\n\t\tself.initconds = { s : int(input(\"%s : \" % s)) for s in self.states }\n\n\t\t# Parameters\n\t\tprint(\"\\nComplete list of parameters, separated by commas :\")\n\t\tparams = input().replace(\" \", \"\").split(\",\")\n\n\t\t# Value of each parameter\n\t\tprint(\"\\nValues of parameters :\")\n\t\tself.parameters = { p : input(\"%s : \" % p) for p in params }\n\n\t\t# State transitions\n\t\tevent = []\n\t\tself.events = []\n\t\tprint(\"\\nEvents, as \\\"<rate>, <state_change>, ...\\\" lists, with commas between state changes and X+1, Y-1 as example changes :\")\n\t\twhile True :\n\n\t\t\t# Grab user input of one event\n\t\t\tevent = input().split(\",\")\n\t\t\tif event == [\"\"] : # if they hit Enter\n\t\t\t\tbreak # stop reading in events\n\n\t\t\tthisevent = {}\n\t\t\tfor e in event[1:] :\n\t\t\t\tif \"+\" in e :\n\t\t\t\t\tst, quant = e.split(\"+\")\n\t\t\t\t\tquant = int(quant)\n\t\t\t\telif \"-\" in e :\n\t\t\t\t\tst, quant = e.split(\"-\")\n\t\t\t\t\tquant = -int(quant)\n\t\t\t\telse :\n\t\t\t\t\traise helpers.InvalidModel(\"The syntax of this event was not recognised.\")\n\t\t\t\tthisevent[st.strip()] = quant\n\n\t\t\tself.events.append([event[0].strip(), thisevent])\n\n\t\t# Model variables\n\t\tself.build()", "title": "" }, { "docid": "8738216d097d3256cfacd54f2b3634e7", "score": "0.5022401", "text": "def _step(self, action):\n # (num_action,) ndarray\n self.robot.act(action)\n # (height, width, channels) ndarray\n self.view = self.robot.get_view()\n\n self.state = np.array(self.view)\n\n done, reward = self._reward(self.state)\n\n return self.state, reward, done, {}", "title": "" }, { "docid": "d845bd23d42aa9359b703148f2556de9", "score": "0.50142926", "text": "def create_step_model(self, n_actions, n_state):\n x = Input(shape=(1, n_state))\n x1 = Flatten()(x)\n x2 = (Dense(8, activation='relu'))(x1)\n # x3 = (Dense(48, activation='relu'))(x2)\n x4 = (Dense(8, activation='relu'))(x2)\n x5 = (Dense(n_actions))(x4)\n\n actions_input = Input((n_actions,), name='mask')\n actions_input2 = keras.layers.Reshape((1, n_actions))(actions_input)\n filtered_output = keras.layers.Multiply()([actions_input2, x5])\n filtered_output = keras.layers.Reshape((n_actions, 1))(filtered_output)\n\n step_model = Model(inputs=[x, actions_input], outputs=filtered_output)\n opt = RAdam()\n step_model.compile(optimizer=opt, loss='mean_squared_error')\n\n return step_model", "title": "" }, { "docid": "88258eb6f53d5b0167df32ab3d13f0cb", "score": "0.50115746", "text": "def define_actions(self):\n if self.number_phases==2:\n self.all_or_not = 0\n if self.layers==1:\n self.actions = [np.arange(-self.bound_displacements, 0 + self.resolution, self.resolution)]\n else:\n self.actions = [np.arange(-self.bound_displacements, self.bound_displacements+ 2*self.resolution, 2*self.resolution), np.arange(-self.bound_displacements,self.bound_displacements +self.resolution, 2*self.resolution)]\n self.action_indexes = np.arange(0,len(self.actions[0]))\n\n else:\n self.number_displacements = int(2*(self.bound_displacements)/self.resolution)+1\n\n self.actions = np.zeros((self.number_displacements,self.number_displacements),dtype=complex)\n for index1,action1 in np.ndenumerate(np.arange(-self.bound_displacements,self.bound_displacements+self.resolution,self.resolution)):\n for index2,action2 in np.ndenumerate(1j*np.flip(np.arange(-self.bound_displacements,self.bound_displacements+self.resolution,self.resolution))):\n self.actions[(index1[0],index2[0])] = action1+action2\n\n self.actions_matrix_form = self.actions\n self.actions = self.actions.flatten()\n\n self.action_indexes = np.arange(0,len(self.actions))\n\n return", "title": "" }, { "docid": "3cf9cf0082169041fc21679772897c47", "score": "0.5006769", "text": "def compile(self):\n # inverse model (predics: s_t x s_t+1 -> action)\n predicted_action = self._inverse_prediction()\n iv_model = Model(inputs=[self.state_t0, self.state_t1],\n outputs=predicted_action)\n iv_model.compile(optimizer=\"adam\",\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"sparse_categorical_accuracy\"])\n\n # forward model (predicts: s_t x a -> s_t+1)\n # trun off training for the embedded model when constructing\n predicted_state = self._forward_prediction()\n fw_model = Model(inputs=[self.state_forward, self.action],\n outputs=predicted_state)\n fw_model = AuxModel(fw_model, trainables=[self.dense_fw_1.variables,\n self.dense_fw_2.variables])\n\n # embedding model\n emb_model = Model(inputs=self.state_embedding,\n outputs=self._inverse_embedding(self.state_embedding))\n emb_model.compile(optimizer=\"adadelta\", loss=\"mean_squared_error\")\n\n return fw_model, iv_model, emb_model", "title": "" }, { "docid": "db68fe3473eb12b6ba80d4afd2127597", "score": "0.5006107", "text": "def step(self, action):\n self._before_step()\n _action = action\n if isinstance(action, list):\n action = {key: val for ac_i in action for key, val in ac_i.items()}\n if isinstance(action, dict):\n action = np.concatenate(\n [action[key] for key in self.action_space.spaces.keys()]\n )\n\n ob, reward, done, info = self._step(action)\n done, info, penalty = self._after_step(reward, done, info)\n reward += penalty\n if self._record_demo:\n self._store_state()\n self._demo.add(ob=ob, action=_action, reward=reward)\n return ob, reward, done, info", "title": "" }, { "docid": "f019ab363b9e489388dc1f1e27122f3d", "score": "0.4995139", "text": "def createSwingFootModel(self, timeStep, supportFootIds, comTask=None, swingFootTask=None):\n # Creating a 3D multi-contact model, and then including the supporting\n # foot\n contactModel = crocoddyl.ContactModelMultiple(self.state, self.actuation.nu)\n\n for i in supportFootIds:\n xref = crocoddyl.FrameTranslation(i, np.array([0., 0., 0.]))\n supportContactModel = crocoddyl.ContactModel3D(self.state, xref, self.actuation.nu, np.array([0., 50.]))\n contactModel.addContact(self.rmodel.frames[i].name + \"_contact\", supportContactModel)\n\n # Creating the cost model for a contact phase\n costModel = crocoddyl.CostModelSum(self.state, self.actuation.nu)\n if isinstance(comTask, np.ndarray):\n comTrack = crocoddyl.CostModelCoMPosition(self.state, comTask, self.actuation.nu)\n costModel.addCost(\"comTrack\", comTrack, 1e6)\n for i in supportFootIds:\n cone = crocoddyl.FrictionCone(self.nsurf, self.mu, 4, False)\n frictionCone = crocoddyl.CostModelContactFrictionCone(\n self.state, crocoddyl.ActivationModelQuadraticBarrier(crocoddyl.ActivationBounds(cone.lb, cone.ub)),\n crocoddyl.FrameFrictionCone(i, cone), self.actuation.nu)\n costModel.addCost(self.rmodel.frames[i].name + \"_frictionCone\", frictionCone, 1e1)\n if swingFootTask is not None:\n for i in swingFootTask:\n xref = crocoddyl.FrameTranslation(i.id, i.placement.translation)\n footTrack = crocoddyl.CostModelFrameTranslation(self.state, xref, self.actuation.nu)\n costModel.addCost(self.rmodel.frames[i.id].name + \"_footTrack\", footTrack, 1e6)\n\n stateWeights = np.array([0.] * 3 + [500.] * 3 + [0.01] * (self.rmodel.nv - 6) + [10.] * 6 + [1.] *\n (self.rmodel.nv - 6))\n stateReg = crocoddyl.CostModelState(self.state, crocoddyl.ActivationModelWeightedQuad(stateWeights**2),\n self.rmodel.defaultState, self.actuation.nu)\n ctrlReg = crocoddyl.CostModelControl(self.state, self.actuation.nu)\n costModel.addCost(\"stateReg\", stateReg, 1e1)\n costModel.addCost(\"ctrlReg\", ctrlReg, 1e-1)\n\n lb = np.concatenate([self.state.lb[1:self.state.nv + 1], self.state.lb[-self.state.nv:]])\n ub = np.concatenate([self.state.ub[1:self.state.nv + 1], self.state.ub[-self.state.nv:]])\n stateBounds = crocoddyl.CostModelState(\n self.state, crocoddyl.ActivationModelQuadraticBarrier(crocoddyl.ActivationBounds(lb, ub)),\n 0 * self.rmodel.defaultState, self.actuation.nu)\n costModel.addCost(\"stateBounds\", stateBounds, 1e3)\n\n # Creating the action model for the KKT dynamics with simpletic Euler\n # integration scheme\n dmodel = crocoddyl.DifferentialActionModelContactFwdDynamics(self.state, self.actuation, contactModel,\n costModel, 0., True)\n model = crocoddyl.IntegratedActionModelEuler(dmodel, timeStep)\n return model", "title": "" }, { "docid": "34c221e8cbf7970f03c9d317114474b6", "score": "0.4991931", "text": "def predict(self, context: Context, actions: Sequence[Action]) -> Probs:\n return [1/len(actions)] * len(actions)", "title": "" }, { "docid": "a7f3caeba4bd71e86d58c1e6faa83711", "score": "0.49847543", "text": "def _get_rule_actions(self, rule):\n parser = self.switch.dp.ofproto_parser\n return [parser.OFPActionOutput(rule.vnf_id)]", "title": "" }, { "docid": "e66818c76e9fa24a48438e7d00577df9", "score": "0.49841347", "text": "def get_q_values(self, model_out: TensorType, actions: TensorType) -> TensorType:\n return self.q_model(torch.cat([model_out, actions], -1))", "title": "" }, { "docid": "c96550566b6d5549448c0b87f89d34c3", "score": "0.49808475", "text": "def step(self, action):\n\n \"\"\"\n Here we should convert the action num to movement action, execute the action in the\n simulation and get the observations result of performing that action.\n \"\"\"\n rospy.logdebug(\"START STEP ROS\")\n \n self._set_action(action)\n obs = self._get_obs() \n done = self._is_done(obs)\n info = {}\n reward = self._compute_reward(obs, done)\n\n rospy.logdebug(\"END STEP ROS\")\n\n return obs, reward, done, info", "title": "" }, { "docid": "29e9923dabb7362051ead556a5e54f8d", "score": "0.49789146", "text": "def get_perturbed_actions(self, obs, actions, inferred_mdps):\n perturbations = self.forward(obs, actions, inferred_mdps)\n return (actions + perturbations * self.perturbed_scale).clamp(-self.max_action, self.max_action)", "title": "" } ]
53ecc997fde6146d2c03c85ffb7ae81f
Fetch the storage usage from Rucio, which will then be used as part of the data placement mechanism. Also calculate the available quota given the configurable quota fraction and mark RSEs with less than 1TB available as NOT usable.
[ { "docid": "2f001744d41069ae79a899525cc24651", "score": "0.77216226", "text": "def fetchStorageUsage(self, dataSvcObj):\n self.logger.info(\"Using Rucio for storage usage, with acct: %s\", self.dataAcct)\n for item in dataSvcObj.getAccountUsage(self.dataAcct):\n if item['rse'] not in self.nodeUsage:\n self.logger.warning(\"Rucio RSE: %s has data usage but no quota available.\", item['rse'])\n continue\n # bytes_limit is always 0, so skip it and use whatever came from the limits call\n # bytes_remaining is always negative, so calculate it based on the limits\n quota = self.nodeUsage[item['rse']]['quota']\n self.nodeUsage[item['rse']].update({'bytes': item['bytes'],\n 'bytes_remaining': quota - item['bytes']})", "title": "" } ]
[ { "docid": "d79afe470ec012494926e358feb8c972", "score": "0.73003674", "text": "def fetchStorageQuota(self, dataSvcObj):\n self.nodeUsage.clear()\n response = dataSvcObj.getAccountLimits(self.dataAcct)\n for rse, quota in viewitems(response):\n if rse.endswith(\"_Tape\") or rse.endswith(\"_Export\"):\n continue\n self.nodeUsage.setdefault(rse, {})\n self.nodeUsage[rse] = dict(quota=int(quota),\n bytes_limit=int(quota),\n bytes=0,\n bytes_remaining=int(quota), # FIXME: always 0\n quota_avail=0)\n self.logger.info(\"Storage quota filled from Rucio\")", "title": "" }, { "docid": "ea00e2d937964afaf6ecf2d42d84a342", "score": "0.678696", "text": "def do_quota():\n url = settings.XFC_API_URL + \"user?name=\" + settings.USER\n response = requests.get(url, verify=settings.VERIFY)\n if response.status_code == 200:\n data = response.json()\n used = data[\"quota_used\"]\n allocated = data[\"quota_size\"]\n total = data[\"total_used\"]\n hard_limit = data[\"hard_limit_size\"]\n sys.stdout.write(bcolors.MAGENTA+\\\n \"-----------------------------\\n\" +\\\n \"Quota for user: \" + settings.USER + \"\\n\" +\\\n \"-----------------------------\\n\" + bcolors.ENDC +\\\n \" Temporal Quota (TQ)\\n\"\n \" Used : \" + sizeof_fmt(used) + \" days\\n\" +\\\n \" Allocated : \" + sizeof_fmt(allocated) + \" days\\n\")\n if allocated - used < 0:\n sys.stdout.write(bcolors.RED)\n else:\n sys.stdout.write(bcolors.GREEN)\n sys.stdout.write(\" Remaining : \" + sizeof_fmt(allocated - used) + bcolors.ENDC + \" days\\n\")\n\n sys.stdout.write(\"-----------------------------\\n\")\n sys.stdout.write(\" Hard Quota (HQ)\\n\")\n sys.stdout.write(\" Used : \" + sizeof_fmt(total) + \"\\n\")\n sys.stdout.write(\" Allocated : \" + sizeof_fmt(hard_limit) + \"\\n\")\n if hard_limit - total < 0:\n sys.stdout.write(bcolors.RED)\n else:\n sys.stdout.write(bcolors.GREEN)\n sys.stdout.write(\" Remaining : \" + sizeof_fmt(hard_limit - total) + bcolors.ENDC + \"\\n\")\n\n elif response.status_code == 404:\n user_not_initialized_message()\n else:\n error_from_response(response)", "title": "" }, { "docid": "cff67818a353abd8a6b28ec188d546ab", "score": "0.67111605", "text": "def setting_storage_usage(self):\n\n ceph_cluster = CephCluster()\n ceph_capacity = ceph_cluster.get_ceph_capacity()\n log.info(f\"Total storage capacity is {ceph_capacity} GiB\")\n self.total_data_set = int(ceph_capacity * 0.4)\n self.filesize = int(\n self.fio_cr[\"spec\"][\"workload\"][\"args\"][\"filesize\"].replace(\"GiB\", \"\")\n )\n # To make sure the number of App pods will not be more then 50, in case\n # of large data set, changing the size of the file each pod will work on\n if self.total_data_set > 500:\n self.filesize = int(ceph_capacity * 0.008)\n self.fio_cr[\"spec\"][\"workload\"][\"args\"][\"filesize\"] = f\"{self.filesize}GiB\"\n # make sure that the storage size is larger then the file size\n self.fio_cr[\"spec\"][\"workload\"][\"args\"][\n \"storagesize\"\n ] = f\"{int(self.filesize * 1.2)}Gi\"\n self.fio_cr[\"spec\"][\"workload\"][\"args\"][\"servers\"] = int(\n self.total_data_set / self.filesize\n )\n log.info(f\"Total Data set to work on is : {self.total_data_set} GiB\")", "title": "" }, { "docid": "a1a072cb7a8e239f4c0149006e2b5e21", "score": "0.67043", "text": "def getquota(user, used=False):\n column=2\n percent=\"\"\n if used:\n column=1\n size=0\n cmd=\"edquota -u %s -f %s 2>/dev/null\"%(user, HOME)\n p = Popen(cmd, shell=True, bufsize=0, stdout=PIPE, stderr=STDOUT, close_fds=True)\n stdout = p.stdout\n for line in stdout.readlines():\n if line != '\\n':\n line=line.replace('\\n', '')\n #print \"==>%s\"%line\n if REMOTE in line:\n size=line.split()[column]\n if used:\n if int(line.split()[2]) == 0:\n return \"<span style='color:red'>no cuota</span>\"\n percent=float(line.split()[1])/float(line.split()[2])*100\n size=int(size)/1024\n maxsize=int(line.split()[2])/1024\n color=\"black\"\n if size >= maxsize:\n color=\"red\"\n return \"<span style='color:%s'>%sMB / %sMB (%0.2f%%)</span>\"%(color, size, maxsize, percent)\n size=int(size)/1024\n return size", "title": "" }, { "docid": "40bd109ffeace7867670893adcbba7b2", "score": "0.66703975", "text": "def getQuota(user, used=False):\n column = 2\n percent = \"\"\n if used:\n column = 1\n size = 0\n cmd = \"/usr/sbin/edquota -u %s -f %s \" % (user, HOME)\n # sys.stderr.write(\"<? /* getQuota cmd='%s' */ ?>\\n\" % (cmd))\n p = Popen(cmd, shell=True, bufsize=0, stdout=PIPE, stderr=STDOUT, close_fds=True)\n stdout = p.stdout\n for line in stdout.readlines():\n if line != '\\n':\n line = line.replace('\\n', '')\n if 'Filesystem' in line or 'Disk quotas for user' in line:\n continue\n #print \"==>%s\"%line\n part = line.strip().split()[0]\n # sys.stderr.write(\"<? /* getQuota user=%s part='%s' line='%s' */ ?>\\n\" % (user, part, line))\n # if REMOTE in line:\n if part in REMOTES:\n size = line.split()[column]\n if used:\n if int(line.split()[2]) == 0:\n return (False, 0, 0, 0)\n # print \"==>%s\"%line\n percent = float(line.split()[1]) / float(line.split()[2]) * 100\n # print \"percent(%s)=%s/%s/100\" %(percent, line.split()[1], line.split()[2])\n # print \"\\n\\n\"\n size = int(size) / 1024\n # if user == 'pepe':\n # size = 8000\n # percent = 200\n maxsize = int(line.split()[2]) / 1024\n overquota = False\n # print \"%s %s > %s\" % (user, percent, LIMIT)\n # sys.stderr.write(\"%s %s > %s\\n\" % (user, percent, LIMIT))\n if percent > LIMIT:\n overquota = True\n return (overquota, size, maxsize, \"%0.2f%%\" % percent)\n return (False, 0, 0, 0)", "title": "" }, { "docid": "58695f019bb2761ae309163c5f178b7b", "score": "0.6621589", "text": "def storage_usage(self):\n rsp = self._dev.rpc.get_system_storage()\n\n def _name(fs):\n return fs.findtext(\"filesystem-name\").strip()\n\n def _decode(fs):\n r = {}\n r[\"mount\"] = fs.find(\"mounted-on\").text.strip()\n tb = fs.find(\"total-blocks\")\n r[\"total\"] = tb.get(\"format\")\n r[\"total_blocks\"] = int(tb.text)\n ub = fs.find(\"used-blocks\")\n r[\"used\"] = ub.get(\"format\")\n r[\"used_blocks\"] = int(ub.text)\n r[\"used_pct\"] = fs.find(\"used-percent\").text.strip()\n ab = fs.find(\"available-blocks\")\n r[\"avail\"] = ab.get(\"format\")\n r[\"avail_block\"] = int(ab.text)\n return r\n\n re_list = rsp.xpath(\"multi-routing-engine-item\")\n if re_list:\n fs_dict = {}\n for re in re_list:\n re_name = re.findtext(\"re-name\").strip()\n re_fs_dict = dict(\n (_name(fs), _decode(fs))\n for fs in re.xpath(\"system-storage-information/filesystem\")\n )\n fs_dict[re_name] = re_fs_dict\n return fs_dict\n\n return dict((_name(fs), _decode(fs)) for fs in rsp.xpath(\"filesystem\"))", "title": "" }, { "docid": "75fb6708dea1aab01561c26be414873d", "score": "0.64306474", "text": "def test_calculate_custom_quota_size(self):\n\n project = fake_clients.FakeProject(name=\"test_project\", id=\"test_project_id\")\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n admin_headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"test@example.com\",\n \"user_id\": user.id,\n \"authenticated\": True,\n }\n\n cinderquota = cinder_cache[\"RegionOne\"][\"test_project_id\"][\"quota\"]\n cinderquota[\"gigabytes\"] = 6000\n novaquota = nova_cache[\"RegionOne\"][\"test_project_id\"][\"quota\"]\n novaquota[\"ram\"] = 70000\n neutronquota = neutron_cache[\"RegionOne\"][\"test_project_id\"][\"quota\"]\n neutronquota[\"network\"] = 4\n\n url = \"/v1/openstack/quotas/?regions=RegionOne\"\n\n response = self.client.get(url, headers=admin_headers)\n # First check we can actually access the page correctly\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"regions\"][0][\"current_quota_size\"], \"small\")", "title": "" }, { "docid": "39768bc21e6dd39bee6c76fa3eddc5d8", "score": "0.63918763", "text": "def currentQuotaUse(self, request):\n assert self.isCollection(), \"Only collections can have a quota root\"\n assert self.hasQuotaRoot(request), (\n \"Quota use only on quota root collection\"\n )\n\n # Try to get the cached value property\n if self.hasDeadProperty(TwistedQuotaUsedProperty):\n return succeed(\n int(str(self.readDeadProperty(TwistedQuotaUsedProperty)))\n )\n else:\n # Do brute force size determination and cache the result\n # in the private property\n def _defer(result):\n self.writeDeadProperty(TwistedQuotaUsedProperty(str(result)))\n return result\n d = self.quotaSize(request)\n d.addCallback(_defer)\n return d", "title": "" }, { "docid": "81f799a7b2cd624ad5808a5c6ca8af4f", "score": "0.63672084", "text": "def quota(self, request):\n\n # See if already cached\n if hasattr(request, \"quota\"):\n if self in request.quota:\n return succeed(request.quota[self])\n else:\n request.quota = {}\n\n # Find the quota root for this resource and return its data\n def gotQuotaRootResource(qroot_resource):\n if qroot_resource:\n qroot = qroot_resource.quotaRoot(request)\n if qroot is not None:\n def gotUsage(used):\n available = qroot - used\n if available < 0:\n available = 0\n request.quota[self] = (available, used)\n return (available, used)\n\n d = qroot_resource.currentQuotaUse(request)\n d.addCallback(gotUsage)\n return d\n\n request.quota[self] = None\n return None\n\n d = self.quotaRootResource(request)\n d.addCallback(gotQuotaRootResource)\n return d", "title": "" }, { "docid": "d5032557390ad234ccb615d22afcee5b", "score": "0.62543184", "text": "def quota(self) -> str:\n return pulumi.get(self, \"quota\")", "title": "" }, { "docid": "4aa34de00d2b28b8f197ef849a22a17e", "score": "0.62353826", "text": "def disk_quota(self) -> int:\n return pulumi.get(self, \"disk_quota\")", "title": "" }, { "docid": "59c952a2c06f086aaefb4d040900ced3", "score": "0.6209217", "text": "def used(self):\n return self.total - self.free_storage_pool - self.free_raw_disk", "title": "" }, { "docid": "6f281c9ce8d5aa1748922ec5930162de", "score": "0.6202947", "text": "def _compute_space_utilization(self, datastore_summary):\r\n return (\r\n 1.0 -\r\n datastore_summary.freeSpace / float(datastore_summary.capacity)\r\n )", "title": "" }, { "docid": "678fa91d32942e9e9162972b9a7ea547", "score": "0.619961", "text": "def _get_free_space(self):\n free_space = None\n free_space_output = self.api_handler.tm.util.bash.exec_cmd(\"run\", utilCmdArgs='-c \"vgdisplay -s --units G\"')\n if free_space_output:\n free_space = free_space_output.commandResult\n free_space_regex = r\".*\\s\\/\\s(\\d+\\.?\\d+) GB free\"\n match = re.match(free_space_regex, free_space)\n\n if match:\n free_space = float(match.group(1))\n\n return free_space", "title": "" }, { "docid": "474dd0386e484aa1ceb1e4b8f09cdb33", "score": "0.61928236", "text": "def query_storage_resource_quota(self, query_quota_filter=None):\n project_id = self.get_resource_group_id()\n try:\n quotas = self.cinder_client.quotas.get(project_id)\n except Exception as e:\n LOG.exception(e)\n raise OpenstackVimAdapterError('Unable to get storage resource quota - %s' % e)\n virtual_storage_quota = VirtualStorageQuota()\n virtual_storage_quota.resource_group_id = str(project_id)\n resources = {\n 'storage_size': quotas.gigabytes,\n 'num_snapshots': quotas.snapshots,\n 'num_volumes': quotas.volumes\n }\n for item, value in resources.items():\n if value != -1:\n setattr(virtual_storage_quota, item, value)\n return virtual_storage_quota", "title": "" }, { "docid": "57b90838f24cea7f16ce7447b9fa148e", "score": "0.61566186", "text": "def get_remaining_quota(self):\n\t\tif self.disk_quota == 0:\n\t\t\t# Unlimited storage\n\t\t\t# return math.inf\n\t\t\t# Limit space to available disk space\n\t\t\treturn int(shutil.disk_usage(settings.MEDIA_ROOT)[2])\n\t\telse:\n\t\t\treturn self.disk_quota - self.used_quota", "title": "" }, { "docid": "e091fed9ad0ab600749835ee70522554", "score": "0.6135651", "text": "def usage_quota(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"usage_quota\")", "title": "" }, { "docid": "42686b7e333cd287b48ff4706c731073", "score": "0.6134971", "text": "def Quota(self) -> int:", "title": "" }, { "docid": "42686b7e333cd287b48ff4706c731073", "score": "0.6134971", "text": "def Quota(self) -> int:", "title": "" }, { "docid": "ad19c727648e11be30f97bf3b7a9d098", "score": "0.6109208", "text": "def neutron_quota(self):\n try:\n quota = self._nt_drv.show_quota(self.project_id)\n except Exception as e:\n self.log.exception(\"Get Neutron quota operation failed. Exception: %s\", str(e))\n raise\n return quota", "title": "" }, { "docid": "88d44d3a801b94e2df1f0cf6c628a8a7", "score": "0.61003107", "text": "def getDiskSpace(self):\n result = S_OK()\n comm = 'df -P -m .'\n spaceDict = shellCall(5,comm)\n if spaceDict['OK']:\n space = string.split(spaceDict['Value'][1]) [10]\n result['Value'] = float(space) # MB\n else:\n result = S_ERROR('Could not obtain disk usage')\n\n return result", "title": "" }, { "docid": "165c804c40a00eb6545e22b622de95b8", "score": "0.60631186", "text": "def evaluateQuotaExceeded(self):\n self.availableRSEs.clear()\n self.outOfSpaceNodes.clear()\n # given a configurable sub-fraction of our quota, recalculate how much storage is left\n for rse, info in viewitems(self.nodeUsage):\n quotaAvail = info['quota'] * self.quotaFraction\n info['quota_avail'] = min(quotaAvail, info['bytes_remaining'])\n if info['quota_avail'] < self.minimumSpace:\n self.outOfSpaceNodes.add(rse)\n else:\n self.availableRSEs.add(rse)\n self.logger.info(\"Currently %d nodes are out of space.\", len(self.outOfSpaceNodes))", "title": "" }, { "docid": "725d7f9b37240d09afa125da5df75138", "score": "0.60235", "text": "def loadQuota(self):\n\n response, = self.client.query( 'get_quota_by_filesys', (self.label, ), version = 14 )\n result = utils.responseToDict(self.quota_query_description, response)\n self.quota, self.quota_lastmod_datetime, self.quota_lastmod_by, self.quota_lastmod_with = result['size'], result['lastmod_datetime'], result['lastmod_by'], result['lastmod_with']", "title": "" }, { "docid": "44ed4528bac9dbc5c22031318c7f7f35", "score": "0.6001022", "text": "def quota(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"quota\")", "title": "" }, { "docid": "5f2e122e587099bd69f317c5f5cc2d3e", "score": "0.59982365", "text": "def _update_capacity_utilisation(self):\n\t\treach = Circle(self.position.center(), self.radius)\n\t\tproviders = self.island.get_providers_in_range(reach, reslist=self.get_needed_resources())\n\t\tproviders = [ p for p in providers if isinstance(p, Field) ]\n\t\tself.capacity_utilisation = float(len(providers))/self.max_fields_possible\n\t\t# sanity checks for theoretically impossible cases:\n\t\tself.capacity_utilisation = min(self.capacity_utilisation, 1.0)\n\t\tself.capacity_utilisation = max(self.capacity_utilisation, 0.0)", "title": "" }, { "docid": "12a07370a1bdd168051a7ddf165210a3", "score": "0.5997863", "text": "def handle_quota(brick_path, volname, pvtype):\n\n volhash = get_volname_hash(volname)\n volpath = get_volume_path(pvtype, volhash, volname)\n subdir_path = os.path.join(brick_path, volpath)\n\n pvinfo_file_path = os.path.join(brick_path, \"info\", volpath + \".json\")\n if os.path.exists(pvinfo_file_path):\n data = {}\n with open(pvinfo_file_path) as pvinfo_file:\n data = json.loads(pvinfo_file.read().strip())\n\n # global dictionary to avoid init to None\n global SIZE_LIMITS\n\n # Add every new entry of volnames\n if volname not in SIZE_LIMITS:\n SIZE_LIMITS[volname] = {}\n\n # Handle PV resize quota updates\n\n # Init existing_size to -1 to handle new quota requests\n # Update existing_size for every update requests\n if data[\"size\"] > SIZE_LIMITS[volname].get('existing_size', -1):\n SIZE_LIMITS[volname]['existing_size'] = data[\"size\"]\n # Quota already set for size, return\n else:\n return\n\n try:\n set_quota(os.path.dirname(brick_path), subdir_path, data[\"size\"])\n logging.info(logf(\n \"Quota set for size\",\n size=data[\"size\"]\n ))\n except CommandException as err:\n logging.error(logf(\"Failed to set Quota\",\n err=err.err,\n path=subdir_path.replace(\n brick_path, \"\"),\n size=data[\"size\"]))\n return", "title": "" }, { "docid": "445bc8c158864eaa13a00b626dec2ae9", "score": "0.59468603", "text": "def _update_capacity(self):\r\n tot_bytes, used_bytes = 0, 0\r\n pools = self._client.list_storage_pools()\r\n for pool in pools:\r\n if pool['volumeGroupRef'] in self._objects['disk_pool_refs']:\r\n tot_bytes = tot_bytes + int(pool.get('totalRaidedSpace', 0))\r\n used_bytes = used_bytes + int(pool.get('usedSpace', 0))\r\n self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.GiB\r\n self._stats['total_capacity_gb'] = tot_bytes / units.GiB", "title": "" }, { "docid": "295304fa983c91daae0748b7fc20d92e", "score": "0.593846", "text": "def listQuota(rc,output,outerr,parseParamList,Logger):\n quota=-1\n return quota", "title": "" }, { "docid": "8a48c5c16ebf21c0647324b31ed52dd1", "score": "0.593838", "text": "def setting_storage_usage(self, file_size, files, threads, samples, clients):\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"file_size\"] = file_size\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"files\"] = files\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"threads\"] = threads\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"samples\"] = samples\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"clients\"] = clients\n\n # Calculating the size of the volume that need to be test, it should\n # be at least twice in the size then the size of the files, and at\n # least 100Gi.\n # Since the file_size is in Kb and the vol_size need to be in Gb, more\n # calculation is needed.\n vol_size = int(files * threads * file_size * 3)\n vol_size = int(vol_size / constants.GB2KB)\n if vol_size < 100:\n vol_size = 100\n self.crd_data[\"spec\"][\"workload\"][\"args\"][\"storagesize\"] = f\"{vol_size}Gi\"", "title": "" }, { "docid": "41290b4daeb955a0cc8e079ce3c354b3", "score": "0.5929097", "text": "def pool_usage(mnt_pt):\n cmd = [BTRFS, 'fi', 'usage', '-b', mnt_pt]\n out, err, rc = run_command(cmd)\n\n used = 0\n for line in out:\n fields = re.split('\\W+', line)\n if line.startswith('Data'):\n used += int(fields[5])\n elif re.search('Size', line):\n used += int(fields[3])\n\n return used / 1024", "title": "" }, { "docid": "963467d9a58675e91f8d8e117cf1e83f", "score": "0.59280235", "text": "def quotaSize(self, request):\n unimplemented(self)", "title": "" }, { "docid": "e3675c00a173435f09f1b33b19057d33", "score": "0.5921733", "text": "def hdd_used(self):\n\n response = self.response[0]\n\n # Value transformed from Bytes to MBytes\n free_hdd = int(response[\"free-hdd-space\"])\n total_hdd = int(response[\"total-hdd-space\"])\n used_hdd = total_hdd - free_hdd\n used_MB = truncate(used_hdd / (10**6), 2)\n used_percentage = truncate((used_hdd / total_hdd) * 100, 1)\n\n return used_MB, used_percentage", "title": "" }, { "docid": "1745088844d445d7b5444091d1fb82de", "score": "0.5912309", "text": "def storage_capacity(self) -> int:\n return pulumi.get(self, \"storage_capacity\")", "title": "" }, { "docid": "352115367168822fa7f449b406d0b515", "score": "0.58969355", "text": "def _get_free_capacity(self):\r\n self._update_login_info()\r\n params_conf = self._parse_conf_lun_params()\r\n lun_type = params_conf['LUNType']\r\n pools_conf = params_conf['StoragePool']\r\n pools_dev = self._get_dev_pool_info(lun_type)\r\n total_free_capacity = 0.0\r\n for pool_dev in pools_dev:\r\n for pool_conf in pools_conf:\r\n if ((lun_type == 'Thick') and\r\n (pool_dev[5] == pool_conf)):\r\n total_free_capacity += float(pool_dev[3])\r\n break\r\n elif pool_dev[1] == pool_conf:\r\n total_free_capacity += float(pool_dev[4])\r\n break\r\n\r\n return total_free_capacity / 1024", "title": "" }, { "docid": "a2dec0438004bdc97451596f4f05bd35", "score": "0.58937913", "text": "def _get_free_capacity(self):\r\n self._update_login_info()\r\n lun_type = ('Thin' if self.device_type == 'Dorado2100 G2' else 'Thick')\r\n pools_dev = self._get_dev_pool_info(lun_type)\r\n total_free_capacity = 0.0\r\n for pool_dev in pools_dev:\r\n if self.device_type == 'Dorado2100 G2':\r\n total_free_capacity += float(pool_dev[2])\r\n continue\r\n else:\r\n params_conf = self._parse_conf_lun_params()\r\n pools_conf = params_conf['StoragePool']\r\n for pool_conf in pools_conf:\r\n if pool_dev[5] == pool_conf:\r\n total_free_capacity += float(pool_dev[3])\r\n break\r\n\r\n return total_free_capacity / 1024", "title": "" }, { "docid": "1da0a3f48f45d79ec25f5236e24ac1ca", "score": "0.58911145", "text": "def available_tiered_storage_in_bytes(self) -> Optional[float]:\n return pulumi.get(self, \"available_tiered_storage_in_bytes\")", "title": "" }, { "docid": "ee25e867288d833800473112534b4b09", "score": "0.5889891", "text": "def get_used_space_on_mount_point(pod_obj):\n # Verify data's are written to mount-point\n mount_point = pod_obj.exec_cmd_on_pod(command=\"df -kh\")\n mount_point = mount_point.split()\n used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1]\n return used_percentage", "title": "" }, { "docid": "11ccd44fb4db291c7e7035aa3dc9b218", "score": "0.5880917", "text": "def calculate_quota(user, do_save=True):\n item_container = get_item_container_by_path(get_home_path(user))\n protected = item_container.container.is_protected()\n base_path = get_folder_name(item_container, protected)\n total = 0\n for root, dirs, files in os.walk(base_path):\n total += sum(os.path.getsize(os.path.join(root, name)) for name in files)\n if do_save:\n set_quota(user.username, total)\n return total", "title": "" }, { "docid": "a9a78b7bedad0fd6757998c59d05f240", "score": "0.58649194", "text": "def get_storagestats(self):\n # Creating logger\n logger = logging.getLogger(__name__)\n \n self.stats['bytesfree'] = 0\n self.stats['bytesused'] = 0\n self.stats['quota'] = 0\n # Not required, but is useful for reporting/accounting:\n self.stats['filecount'] = 0", "title": "" }, { "docid": "3cf57ab65a5e3395ffd896b6dbd572a7", "score": "0.5848431", "text": "def list_quota_usage(\n ctx, id, email, user_access_token, admin_access_token, output_format, human_readable\n):\n try:\n response = _get_users(id, email, user_access_token)\n headers = [\"id\", \"email\", \"cpu-used\", \"cpu-limit\", \"disk-used\", \"disk-limit\"]\n health_order = {\n QuotaHealth.healthy.name: 0,\n QuotaHealth.warning.name: 1,\n QuotaHealth.critical.name: 2,\n }\n data = []\n colours = []\n health = []\n for user in response:\n quota_usage = user.get_quota_usage()\n disk, cpu = quota_usage.get(\"disk\"), quota_usage.get(\"cpu\")\n data.append(\n (\n str(user.id_),\n user.email,\n cpu.get(\"usage\").get(human_readable),\n cpu.get(\"limit\", {}).get(human_readable) or \"-\",\n disk.get(\"usage\").get(human_readable),\n disk.get(\"limit\", {}).get(human_readable) or \"-\",\n )\n )\n health_ordered = max(\n [\n disk.get(\"health\", QuotaHealth.healthy.name),\n cpu.get(\"health\", QuotaHealth.healthy.name),\n ],\n key=lambda key: health_order[key],\n )\n colours.append(REANA_RESOURCE_HEALTH_COLORS[health_ordered])\n health.append(health_ordered)\n\n if data and colours and health:\n data, colours, _ = (\n list(t)\n for t in zip(\n *sorted(\n zip(data, colours, health),\n key=lambda t: health_order[t[2]],\n reverse=True,\n )\n )\n )\n\n if output_format:\n tablib_data = tablib.Dataset()\n tablib_data.headers = headers\n for row in data:\n tablib_data.append(row)\n\n click.echo(tablib_data.export(output_format))\n else:\n click_table_printer(headers, [], data, colours)\n\n except Exception as e:\n logging.debug(traceback.format_exc())\n logging.debug(str(e))\n click.echo(\n click.style(\"User could not be retrieved: \\n{}\".format(str(e)), fg=\"red\"),\n err=True,\n )", "title": "" }, { "docid": "3b84debd44cccbae50aa8ba088ec6aff", "score": "0.58480465", "text": "def getSpaceUse(self):\r\n stats = {}\r\n try:\r\n out = subprocess.Popen([dfCmd, \"-Pk\"], \r\n stdout=subprocess.PIPE).communicate()[0]\r\n except:\r\n raise Exception('Execution of command %s failed.' % dfCmd)\r\n lines = out.splitlines()\r\n if len(lines) > 1:\r\n for line in lines[1:]:\r\n fsstats = {}\r\n cols = line.split()\r\n fsstats['device'] = cols[0]\r\n fsstats['type'] = self._fstypeDict[cols[5]]\r\n fsstats['total'] = 1024 * int(cols[1])\r\n fsstats['inuse'] = 1024 * int(cols[2])\r\n fsstats['avail'] = 1024 * int(cols[3])\r\n fsstats['inuse_pcent'] = int(cols[4][:-1])\r\n stats[cols[5]] = fsstats\r\n return stats", "title": "" }, { "docid": "c92a96174217b5bf594bb015a49c0bbf", "score": "0.5841492", "text": "def quota(self) -> \"k8sv1.ResourceQuotaSpec\":\n return self.__quota", "title": "" }, { "docid": "0135f606eedead250e80ee4b0d4230c3", "score": "0.58411676", "text": "def updateQuotaUse(self, request, adjust):\n assert self.isCollection(), \"Only collections can have a quota root\"\n\n # Get current value\n def _defer(size):\n size += adjust\n\n # Sanity check the resulting size\n if size >= 0:\n self.writeDeadProperty(TwistedQuotaUsedProperty(str(size)))\n else:\n # Remove the dead property and re-read to do brute\n # force quota calc\n log.info(\"Attempt to set quota used to a negative value: %s \"\n \"(adjustment: %s)\"\n % (size, adjust,))\n self.removeDeadProperty(TwistedQuotaUsedProperty)\n return self.currentQuotaUse(request)\n\n d = self.currentQuotaUse(request)\n d.addCallback(_defer)\n return d", "title": "" }, { "docid": "83c68b348e1344e55f8a495da9b937e1", "score": "0.5836171", "text": "def test_get_available_capacity_with_df(self):\r\n mox = self._mox\r\n drv = self._driver\r\n\r\n df_total_size = 2620544\r\n df_avail = 1490560\r\n df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\\n'\r\n df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \\\r\n (df_total_size, df_avail)\r\n df_output = df_head + df_data\r\n\r\n mox.StubOutWithMock(drv, '_get_mount_point_for_share')\r\n drv._get_mount_point_for_share(self.TEST_EXPORT1).\\\r\n AndReturn(self.TEST_MNT_POINT)\r\n\r\n mox.StubOutWithMock(drv, '_execute')\r\n drv._execute('df', '--portability', '--block-size', '1',\r\n self.TEST_MNT_POINT,\r\n run_as_root=True).AndReturn((df_output, None))\r\n\r\n mox.ReplayAll()\r\n\r\n self.assertEqual((df_avail, df_total_size),\r\n drv._get_available_capacity(self.TEST_EXPORT1))\r\n\r\n mox.VerifyAll()", "title": "" }, { "docid": "c9ba36a34c5f1427dbf2d99eab4138f2", "score": "0.5819184", "text": "def _get_capacity_info(self, nfs_share):\r\n nms = self.share2nms[nfs_share]\r\n ns_volume, ns_folder = self._get_share_datasets(nfs_share)\r\n folder_props = nms.folder.get_child_props('%s/%s' % (ns_volume,\r\n ns_folder), '')\r\n free = utils.str2size(folder_props['available'])\r\n allocated = utils.str2size(folder_props['used'])\r\n return free + allocated, free, allocated", "title": "" }, { "docid": "e9dc033a9b0c4fa7aa962e72c7392191", "score": "0.5811417", "text": "def test_calculate_quota_size_zero(self):\n\n project = fake_clients.FakeProject(name=\"test_project\", id=\"test_project_id\")\n\n user = fake_clients.FakeUser(\n name=\"test@example.com\", password=\"123\", email=\"test@example.com\"\n )\n\n setup_identity_cache(projects=[project], users=[user])\n\n admin_headers = {\n \"project_name\": \"test_project\",\n \"project_id\": project.id,\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"test@example.com\",\n \"user_id\": \"user_id\",\n \"authenticated\": True,\n }\n\n setup_quota_cache(\"RegionOne\", project.id, \"small\")\n\n url = \"/v1/openstack/quotas/?regions=RegionOne\"\n\n response = self.client.get(url, headers=admin_headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"regions\"][0][\"current_quota_size\"], \"small\")\n\n cinderquota = cinder_cache[\"RegionOne\"][project.id][\"quota\"]\n cinderquota[\"gigabytes\"] = 0\n\n # Check that the zero value doesn't interfer with being small\n response = self.client.get(url, headers=admin_headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"regions\"][0][\"current_quota_size\"], \"small\")\n\n setup_quota_cache(\"RegionOne\", project.id, \"zero\")\n\n url = \"/v1/openstack/quotas/?regions=RegionOne\"\n\n response = self.client.get(url, headers=admin_headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"regions\"][0][\"current_quota_size\"], \"zero\")\n\n # Check that the zero quota will still be counted even if\n # one value is not zero\n cinderquota = cinder_cache[\"RegionOne\"][project.id][\"quota\"]\n cinderquota[\"gigabytes\"] = 600\n\n response = self.client.get(url, headers=admin_headers)\n # First check we can actually access the page correctly\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"regions\"][0][\"current_quota_size\"], \"zero\")", "title": "" }, { "docid": "6df7fe44a7941472d1690f2394b12f5f", "score": "0.5800447", "text": "def storage_capacity(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"storage_capacity\")", "title": "" }, { "docid": "4ae3be50ea027c90b4cae3d58c0dab46", "score": "0.5796271", "text": "def _usagecapacity(service):\n usage_xpu = Capacity()\n capacity_xpus = Capacity()\n busy = 0\n detail = {}\n for resource in service.list_resources():\n detail[resource] = {'busy': '', 'reserved': ''}\n r_capacity = service.list_resources()[resource]\n detail[resource]['capacity'] = r_capacity\n capacity_xpus += r_capacity\n reserved = redis_db.get(\"reserved:%s:%s\" % (service.name, resource))\n if reserved:\n detail[resource]['reserved'] = reserved\n\n count_map_gpu = Counter()\n count_map_cpu = Counter()\n task_type = {}\n count_used_xpus = Capacity()\n\n r_usage_gpu = redis_db.hgetall(\"gpu_resource:%s:%s\" % (service.name, resource)).values()\n for t in r_usage_gpu:\n if t not in task_type:\n task_type[t] = redis_db.hget(\"task:%s\" % t, \"type\")\n count_map_gpu[t] += 1\n count_used_xpus.incr_ngpus(1)\n\n r_usage_cpu = redis_db.hgetall(\"cpu_resource:%s:%s\" % (service.name, resource)).values()\n for t in r_usage_cpu:\n if t not in task_type:\n task_type[t] = redis_db.hget(\"task:%s\" % t, \"type\")\n count_map_cpu[t] += 1\n count_used_xpus.incr_ncpus(1)\n\n detail[resource]['usage'] = [\"%s %s: %d (%d)\" % (task_type[t], t, count_map_gpu[t],\n count_map_cpu[t]) for t in task_type]\n detail[resource]['avail_gpus'] = r_capacity.ngpus - count_used_xpus.ngpus\n detail[resource]['avail_cpus'] = r_capacity.ncpus - count_used_xpus.ncpus\n err = redis_db.get(\"busy:%s:%s\" % (service.name, resource))\n if err:\n detail[resource]['busy'] = err\n busy = busy + 1\n usage_xpu += count_used_xpus\n queued = redis_db.llen(\"queued:\" + service.name)\n return (\"%d (%d)\" % (usage_xpu.ngpus, usage_xpu.ncpus), queued,\n \"%d (%d)\" % (capacity_xpus.ngpus, capacity_xpus.ncpus),\n busy, detail)", "title": "" }, { "docid": "4596f4ce4b8cdfc586dbe0ba7a4c81e2", "score": "0.57897", "text": "def quota_usage_get(context, project_id, resource):\r\n return IMPL.quota_usage_get(context, project_id, resource)", "title": "" }, { "docid": "33a0f4732659dec9493252c9a2b5b87b", "score": "0.57795477", "text": "def _get_utilization(self):\n total_cap = self._total_capacity \n if total_cap <= 0: \n return None \n\n else: \n return self._usage / total_cap", "title": "" }, { "docid": "c29e0eb2dadf38c4bac0cc6fca6b9b55", "score": "0.5772336", "text": "def getStorageType():", "title": "" }, { "docid": "792c8fd4e52a1b9ea8a9f96ff3f37818", "score": "0.5771832", "text": "def ds_space(ds_moref, warning, critical):\n datastore_capacity = float(ds_moref.summary.capacity / 1024 / 1024 / 1024)\n datastore_free = float(ds_moref.summary.freeSpace / 1024 / 1024 / 1024)\n datastore_used_pct = ((1 - (datastore_free / datastore_capacity)) * 100)\n extraOutput = \"(Used {:.1f} GB of {:.1f} GB)\".format((datastore_used_pct * datastore_capacity / 100),\n datastore_capacity)\n print_output_float(datastore_used_pct, 'Datastore Used Space', warning, critical, '%', extraOutput)", "title": "" }, { "docid": "4205844f254c34be85d0a8d9b8bc9b99", "score": "0.5761206", "text": "def _check_quota(self, user: zoe_lib.state.User, application_description):\n quota = self.sql.quota.select(only_one=True, **{'id': user.quota_id})\n\n running_execs = self.sql.executions.select(**{'status': 'running', 'user_id': user.id})\n running_execs += self.sql.executions.select(**{'status': 'starting', 'user_id': user.id})\n running_execs += self.sql.executions.select(**{'status': 'queued', 'user_id': user.id})\n running_execs += self.sql.executions.select(**{'status': 'image download', 'user_id': user.id})\n running_execs += self.sql.executions.select(**{'status': 'submitted', 'user_id': user.id})\n if quota.concurrent_executions != 0 and len(running_execs) >= quota.concurrent_executions:\n raise zoe_api.exceptions.ZoeQuotaException('You cannot run more than {} executions at a time, quota exceeded.'.format(quota.concurrent_executions))\n\n if quota.cores == 0 and quota.memory == 0:\n return\n\n reserved_cores = 0\n reserved_mem = 0\n for e in running_execs:\n for service in e.services:\n reserved_cores += service.resource_reservation.cores.min\n reserved_mem += service.resource_reservation.memory.min\n\n new_exec_cores = 0\n new_exec_memory = 0\n for service in application_description['services']:\n new_exec_cores += service['resources']['cores']['min'] * service['total_count']\n new_exec_memory += service['resources']['memory']['min'] * service['total_count']\n\n if quota.cores != 0 and quota.cores < reserved_cores + new_exec_cores:\n raise zoe_api.exceptions.ZoeQuotaException('You requested {} cores more than your quota allows, quota exceeded.'.format((reserved_cores + new_exec_cores) - quota.cores))\n if quota.memory != 0 and quota.memory < reserved_mem + new_exec_memory:\n raise zoe_api.exceptions.ZoeQuotaException('You requested {}B memory more than your quota allows, quota exceeded.'.format((reserved_mem + new_exec_memory) - quota.memory))", "title": "" }, { "docid": "f3ccefe3d5270ffe60a603f994c06edf", "score": "0.5754861", "text": "def _estimate_cockroachdb_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (compaction, backup, repair)\n needed_cores = sqrt_staffed_cores(desires) * 2\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _crdb_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = max(1, needed_cores // zones_per_region)\n needed_disk = max(1, needed_disk // zones_per_region)\n needed_memory = max(1, int(needed_memory // zones_per_region))\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"crdb-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "title": "" }, { "docid": "5203103f6a636d33791a63d6bcb7696e", "score": "0.5735907", "text": "def printQuotaSummary(self):\n self.logger.info(\"Summary of the current quotas in Terabytes:\")\n for node in sorted(self.nodeUsage.keys()):\n msg = \" %s:\\t\\tbytes_limit: %.2f, bytes_used: %.2f, bytes_remaining: %.2f, \"\n msg += \"quota: %.2f, quota_avail: %.2f\"\n self.logger.info(msg, node, teraBytes(self.nodeUsage[node]['bytes_limit']),\n teraBytes(self.nodeUsage[node]['bytes']),\n teraBytes(self.nodeUsage[node]['bytes_remaining']),\n teraBytes(self.nodeUsage[node]['quota']),\n teraBytes(self.nodeUsage[node]['quota_avail']))\n self.logger.info(\"List of RSE's out of quota: %s\", self.outOfSpaceNodes)", "title": "" }, { "docid": "7763a167b0e1f6ce427db348b06b93ca", "score": "0.5733195", "text": "def used_space_percent(arg_list, additional_data) -> int:\n\n mount_point = arg_list[0]\n host = additional_data.host\n port = additional_data.port\n username = additional_data.username\n password = additional_data.password\n output = SystemCommandsModule.used_space_percent(mount_point, host=host, port=port,\n user=username, password=password)\n return output", "title": "" }, { "docid": "b47b337e40961f0f0b757f8f41303e20", "score": "0.5722404", "text": "def getStorageStatus():\n\n res = EInterface.sendCommand(\"AT+CPMS?\")[0].split(\",\")\n used = int(res[1])\n total = int(res[2])\n return {\"free\": total - used, \"used\": used, \"total\": total}", "title": "" }, { "docid": "8ac8d0c66e3b8b1cf00c40365063590e", "score": "0.571454", "text": "def quota_get(project_id, region_name=None):\n client = get_neutronclient(region_name)\n limit = client.show_quota(project_id).get('quota')\n\n fip_n = len(floatingip_list(project_id, region_name))\n listener_n = len(listener_list(project_id, region_name))\n lb_n = len(loadbalancer_list(project_id, region_name))\n net_n = len(network_list(project_id, region_name))\n subnet_n = len(subnet_list(project_id, region_name))\n pool_n = len(pool_list(project_id, region_name))\n router_n = len(router_list(project_id, region_name))\n sg_n = len(security_group_list(project_id, region_name))\n pf_n = len(port_forwarding_list(project_id, region_name))\n\n quota = {\n 'floatingip': {'in_use': fip_n, 'limit': limit.get('floatingip')},\n 'listener': {'in_use': listener_n, 'limit': limit.get('listener')},\n 'loadbalancer': {'in_use': lb_n, 'limit': limit.get('loadbalancer')},\n 'network': {'in_use': net_n, 'limit': limit.get('network')},\n 'pool': {'in_use': pool_n, 'limit': limit.get('pool')},\n 'router': {'in_use': router_n, 'limit': limit.get('router')},\n 'security_group': {\n 'in_use': sg_n, 'limit': limit.get('security_group')\n },\n 'subnet': {'in_use': subnet_n, 'limit': limit.get('subnet')},\n 'portforwardings': {\n 'in_use': pf_n, 'limit': limit.get('portforwardings')\n }\n }\n\n return quota", "title": "" }, { "docid": "71d269747719532b5eef378c21822db3", "score": "0.5709514", "text": "def quota(self):\n\n options = dict()\n options[\"api_key\"] = self.api_key\n options[\"server\"] = self.server\n\n path = \"/lookup/rate_limit\"\n\n uri_parts = (options[\"server\"], path)\n\n uri = \"\".join(uri_parts)\n\n results = _query(options, uri, quota=True)\n\n return results", "title": "" }, { "docid": "32cdff47a40290de3bf594b5edecdb66", "score": "0.56860876", "text": "def used(self):\n return float((self.statvfs.f_blocks - self.statvfs.f_bfree) * self.statvfs.f_frsize) / 1024 / 1024 / 1024", "title": "" }, { "docid": "c37118b1e3839d02c18fc45baf70682b", "score": "0.56847066", "text": "def _get_available_capacity(self, share):\n mount_point = self._get_mount_point_for_share(share)\n\n out, _ = self._execute('df', '--portability', '--block-size', '1',\n mount_point, run_as_root=True)\n out = out.splitlines()[1]\n\n size = int(out.split()[1])\n available = int(out.split()[3])\n\n return available, size", "title": "" }, { "docid": "feadc8a0925b0013b0d7a80375b3ff91", "score": "0.5684563", "text": "def test_storage_resource_performance_function(self):\n sc_keys, sr_keys = None, None\n try:\n sc_keys = self.perf.get_storage_container_keys()\n sr_keys = self.perf.get_storage_resource_keys()\n except exception.ResourceNotFoundException:\n if not sc_keys:\n self.skipTest('There are no storage containers available in '\n 'this environment.')\n else:\n self.skipTest('There are no storage resources available in '\n 'this environment.')\n sc_id = sc_keys[0].get(pc.STORAGE_CONT_ID)\n response = None\n for time_key in sr_keys:\n sr_id = time_key.get(pc.STORAGE_RES_ID)\n try:\n response = self.perf.get_storage_resource_stats(sc_id, sr_id,\n pc.KPI)\n break\n except exception.VolumeBackendAPIException:\n continue\n self.assertTrue(response)\n self.assertIsInstance(response, dict)\n self.assertIsInstance(response.get(pc.RESULT), list)", "title": "" }, { "docid": "5544a2a2a7c2a3b4e03d8b2c5a137191", "score": "0.56808364", "text": "def check_fs_usage(db, global_config):\n\n max_fs_usage = int(global_config['storage_usage'])\n if max_fs_usage == 0:\n return float('Inf')\n\n # List of records in local disk\n local_records_list = query.get_local_record_files(db)\n local_records_mb = sum(o.size for o in local_records_list)\n\n return max_fs_usage - local_records_mb", "title": "" }, { "docid": "d5570eec872665dd71525ee6f331dcef", "score": "0.5679947", "text": "def check_used_space_percent(arg_list, additional_data) -> str:\n\n mount_point = arg_list[0]\n value = arg_list[1]\n host = additional_data.host\n port = additional_data.port\n username = additional_data.username\n password = additional_data.password\n output = SystemCommandsModule.check_used_space_percent(mount_point, value, host=host, port=port,\n user=username, password=password)\n return output", "title": "" }, { "docid": "07d136c39613b7468e9137650a5b67dc", "score": "0.5675044", "text": "def check_quota(token, project_id):\n auth = check_credentials(token)\n dict_quotas = get_user_quota(auth)\n project_name = auth.get_project(project_id)['name']\n endpoints, user_id = endpoints_and_user_id(auth)\n net_client = init_cyclades_netclient(endpoints['network'],token)\n # Get pending quota for given project id\n pending_quota = retrieve_pending_clusters(token, project_name)\n\n limit_cd = dict_quotas[project_id]['cyclades.disk']['limit'] / Bytes_to_GB\n usage_cd = dict_quotas[project_id]['cyclades.disk']['usage'] / Bytes_to_GB\n project_limit_cd = dict_quotas[project_id]['cyclades.disk']['project_limit'] / Bytes_to_GB\n project_usage_cd = dict_quotas[project_id]['cyclades.disk']['project_usage'] / Bytes_to_GB\n pending_cd = pending_quota['Disk']\n available_cyclades_disk_GB = limit_cd-usage_cd\n if (available_cyclades_disk_GB > (project_limit_cd - project_usage_cd)):\n available_cyclades_disk_GB = project_limit_cd - project_usage_cd\n available_cyclades_disk_GB = available_cyclades_disk_GB - pending_cd\n\n limit_cpu = dict_quotas[project_id]['cyclades.cpu']['limit']\n usage_cpu = dict_quotas[project_id]['cyclades.cpu']['usage']\n project_limit_cpu = dict_quotas[project_id]['cyclades.cpu']['project_limit']\n project_usage_cpu = dict_quotas[project_id]['cyclades.cpu']['project_usage']\n pending_cpu = pending_quota['Cpus']\n available_cpu = limit_cpu - usage_cpu\n if (available_cpu > (project_limit_cpu - project_usage_cpu)):\n available_cpu = project_limit_cpu - project_usage_cpu\n available_cpu = available_cpu - pending_cpu\n\n limit_ram = dict_quotas[project_id]['cyclades.ram']['limit'] / Bytes_to_MB\n usage_ram = dict_quotas[project_id]['cyclades.ram']['usage'] / Bytes_to_MB\n project_limit_ram = dict_quotas[project_id]['cyclades.ram']['project_limit'] / Bytes_to_MB\n project_usage_ram = dict_quotas[project_id]['cyclades.ram']['project_usage'] / Bytes_to_MB\n pending_ram = pending_quota['Ram']\n available_ram = limit_ram-usage_ram\n if (available_ram > (project_limit_ram - project_usage_ram)):\n available_ram = project_limit_ram - project_usage_ram\n available_ram = available_ram - pending_ram\n\n limit_vm = dict_quotas[project_id]['cyclades.vm']['limit']\n usage_vm = dict_quotas[project_id]['cyclades.vm']['usage']\n project_limit_vm = dict_quotas[project_id]['cyclades.vm']['project_limit']\n project_usage_vm = dict_quotas[project_id]['cyclades.vm']['project_usage']\n pending_vm = pending_quota['VMs']\n available_vm = limit_vm-usage_vm\n if (available_vm > (project_limit_vm - project_usage_vm)):\n available_vm = project_limit_vm - project_usage_vm\n available_vm = available_vm - pending_vm\n \n pending_net = pending_quota['Network']\n limit_net = dict_quotas[project_id]['cyclades.network.private']['limit']\n usage_net = dict_quotas[project_id]['cyclades.network.private']['usage']\n project_limit_net = dict_quotas[project_id]['cyclades.network.private']['project_limit']\n project_usage_net = dict_quotas[project_id]['cyclades.network.private']['project_usage']\n available_networks = limit_net - usage_net\n if (available_networks > (project_limit_net - project_usage_net)):\n available_networks = project_limit_net - project_usage_net\n available_networks -= pending_net\n \n list_float_ips = net_client.list_floatingips()\n pending_ips = pending_quota['Ip']\n limit_ips = dict_quotas[project_id]['cyclades.floating_ip']['limit']\n usage_ips = dict_quotas[project_id]['cyclades.floating_ip']['usage']\n project_limit_ips = dict_quotas[project_id]['cyclades.floating_ip']['project_limit']\n project_usage_ips = dict_quotas[project_id]['cyclades.floating_ip']['project_usage']\n available_ips = limit_ips-usage_ips\n if (available_ips > (project_limit_ips - project_usage_ips)):\n available_ips = project_limit_ips - project_usage_ips\n available_ips -= pending_ips\n for d in list_float_ips:\n if d['instance_id'] is None and d['port_id'] is None:\n available_ips += 1\n\n quotas = {'cpus': {'limit': limit_cpu, 'available': available_cpu},\n 'ram': {'limit': limit_ram, 'available': available_ram},\n 'disk': {'limit': limit_cd,\n 'available': available_cyclades_disk_GB},\n 'cluster_size': {'limit': limit_vm, 'available': available_vm},\n 'network': {'available': available_networks},\n 'float_ips': {'available': available_ips}}\n return quotas", "title": "" }, { "docid": "b8df1783ed6e0d20f81cac1b2f08ee98", "score": "0.56671363", "text": "def used_space_bytes(self):\n self.lock.acquire()\n try:\n return self.physical_utilisation\n finally:\n self.lock.release()", "title": "" }, { "docid": "b1113f0b2bfaa22f2d9b345afa449139", "score": "0.56643826", "text": "def used_disk_space(self):\n\t\treturn self.total_disk_space-self.free_disk_space", "title": "" }, { "docid": "4429da0dc632329f74308d5f70d12c6f", "score": "0.5662718", "text": "def test_get_capacity_info(self):\r\n mox = self._mox\r\n drv = self._driver\r\n\r\n stat_total_size = 2620544\r\n stat_avail = 2129984\r\n stat_output = '1 %d %d' % (stat_total_size, stat_avail)\r\n\r\n du_used = 490560\r\n du_output = '%d /mnt' % du_used\r\n\r\n mox.StubOutWithMock(drv, '_get_mount_point_for_share')\r\n drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1).\\\r\n AndReturn(self.TEST_MNT_POINT)\r\n\r\n mox.StubOutWithMock(drv, '_execute')\r\n drv._execute('stat', '-f', '-c', '%S %b %a',\r\n self.TEST_MNT_POINT,\r\n run_as_root=True).AndReturn((stat_output, None))\r\n\r\n drv._execute('du', '-sb', '--apparent-size',\r\n '--exclude', '*snapshot*',\r\n self.TEST_MNT_POINT,\r\n run_as_root=True).AndReturn((du_output, None))\r\n\r\n mox.ReplayAll()\r\n\r\n self.assertEqual((stat_total_size, stat_avail, du_used),\r\n drv._get_capacity_info(self.TEST_NFS_EXPORT1))\r\n\r\n mox.VerifyAll()", "title": "" }, { "docid": "8e8532b78815a5f2b381489f62506f2d", "score": "0.5659123", "text": "def _get_available_capacity(self, glusterfs_share):\r\n mount_point = self._get_mount_point_for_share(glusterfs_share)\r\n\r\n out, _ = self._execute('df', '--portability', '--block-size', '1',\r\n mount_point, run_as_root=True)\r\n out = out.splitlines()[1]\r\n\r\n size = int(out.split()[1])\r\n available = int(out.split()[3])\r\n\r\n return available, size", "title": "" }, { "docid": "3969d76381ac46bdc4e39005fcf8cca3", "score": "0.56520736", "text": "def test_get_available_capacity_with_df(self):\n drv = self._driver\n\n df_total_size = 2620544\n df_avail = 1490560\n df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\\n'\n df_data = 'glusterfs-host:/export %d 996864 %d 41%% /mnt' % \\\n (df_total_size, df_avail)\n df_output = df_head + df_data\n\n with mock.patch.object(drv, '_get_mount_point_for_share') as \\\n mock_get_mount_point_for_share,\\\n mock.patch.object(drv, '_execute') as mock_execute:\n mock_get_mount_point_for_share.\\\n return_value = self.TEST_MNT_POINT\n mock_execute.return_value = (df_output, None)\n\n result = drv._get_available_capacity(self.TEST_EXPORT1)\n self.assertEqual((df_avail, df_total_size), result)", "title": "" }, { "docid": "5ab251398bb8a439c74fcbc8df9cf92e", "score": "0.5651109", "text": "def quota(self) -> Optional[pulumi.Input['GoogleCloudApigeeV1QuotaArgs']]:\n return pulumi.get(self, \"quota\")", "title": "" }, { "docid": "5ab251398bb8a439c74fcbc8df9cf92e", "score": "0.5651109", "text": "def quota(self) -> Optional[pulumi.Input['GoogleCloudApigeeV1QuotaArgs']]:\n return pulumi.get(self, \"quota\")", "title": "" }, { "docid": "8cd5e7155d988cc2b4280f77450bf214", "score": "0.56462497", "text": "def storage_usage_bytes(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"storage_usage_bytes\")", "title": "" }, { "docid": "e5d8e2d1d57457b4b978107363457bbb", "score": "0.5643819", "text": "def renderResourceUsage(self, used, size):\n if size:\n size = self.convert(size)\n size_value = float(size['value'])\n size_unit = size['unit']\n size_str = '%0.2f %s' % (size_value, size_unit)\n else:\n size_str = '??'\n size_value = None\n size_unit = None\n\n if used:\n core_in_size_unit = self.convert(used, size_unit)\n core_value_in_size_unit = float(core_in_size_unit['value'])\n used = self.convert(used)\n core_value = float(used['value'])\n used_str = '%0.2f %s' % (core_value, used['unit'])\n else:\n core_value = None\n used_str = '??'\n\n if core_value and size_value:\n rel_usage_str = '%0.2f' % (core_value_in_size_unit / size_value * 100.0)\n else:\n rel_usage_str = '??'\n\n return '%s / %s (%s%%)' % (used_str, size_str, rel_usage_str)", "title": "" }, { "docid": "66d307ac1c5b609f6de97c62a8c52763", "score": "0.5642207", "text": "def getDiskUsage(self):\n # Static might be the app install size\n self.staticDisk = self.objc.staticDiskUsage().integerValue()\n # Dynamic might be the storage usage\n self.dynamicDisk = self.objc.dynamicDiskUsage().integerValue()", "title": "" }, { "docid": "8e5c856b936938c3111b82112c1fef54", "score": "0.5639745", "text": "def hdd_used(self):\n\n # NAPALM getter does not collect this data!!!\n used_MB = \"NAPALMnotImplemented\"\n used_percentage = \"NAPALMnotImplemented\"\n\n return used_MB, used_percentage", "title": "" }, { "docid": "67e92335fcb3d8bc18e25d558adcec13", "score": "0.56366503", "text": "def storage_usage_bytes(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"storage_usage_bytes\")", "title": "" }, { "docid": "7ddbb4d0a995d06557d926cf4ef60d9f", "score": "0.56357753", "text": "def AvailableFreeSpace(self) -> int:", "title": "" }, { "docid": "7ddbb4d0a995d06557d926cf4ef60d9f", "score": "0.56357753", "text": "def AvailableFreeSpace(self) -> int:", "title": "" }, { "docid": "56debb98f703cef242ac2c1afc6a44b8", "score": "0.56350535", "text": "def get_avail_space(self):\n s = os.statvfs(self.path)\n avail_space = s.f_bsize * s.f_bavail / 1024 / 1024 / 1024\n return round(avail_space, 2)", "title": "" }, { "docid": "e66708b3dd3f0c6bfecaf396cf2bf6e6", "score": "0.5631054", "text": "def _quota_reserve(context, project_id):\r\n def get_sync(resource, usage):\r\n def sync(elevated, project_id, session):\r\n return {resource: usage}\r\n return sync\r\n quotas = {}\r\n resources = {}\r\n deltas = {}\r\n for i, resource in enumerate(('volumes', 'gigabytes')):\r\n quotas[resource] = db.quota_create(context, project_id,\r\n resource, i + 1)\r\n resources[resource] = ReservableResource(resource,\r\n '_sync_%s' % resource)\r\n deltas[resource] = i + 1\r\n return db.quota_reserve(\r\n context, resources, quotas, deltas,\r\n datetime.datetime.utcnow(), datetime.datetime.utcnow(),\r\n datetime.timedelta(days=1), project_id\r\n )", "title": "" }, { "docid": "0c1376ca6881ca7f9c4fb395a6ab5bf5", "score": "0.5628875", "text": "def _get_glacier_storage_charges(volume):\n return volume * glacier_storage_rate", "title": "" }, { "docid": "d924ed72c43042dcb7e86ce23a8df9da", "score": "0.5621362", "text": "def get_utilization(self):\n old_cap = self._total_capacity\n \n use_factor = self._usage / self._sched_factor \n\n util = (use_factor - old_cap) * self.get_nearest_distance() \n logging.info(\"{}: utilization={}, usage={}, capacity={}, dist={}\".format(\n self.get_name(), util, self._usage, self.get_capacity(), \n self.get_nearest_distance()))\n \n return util", "title": "" }, { "docid": "15508262e1b8734219bd9f4d6d4f9930", "score": "0.5601709", "text": "def print_quota_info(quota):\n total_memory_quota = \"INF\" if quota[\n \"total_memory_quota\"] == constant.INF_TOTAL_MEMORY_QUOTA else quota[\n \"total_memory_quota\"]\n total_cpu_quota = \"INF\" if quota[\n \"total_cpu_quota\"] == constant.INF_TOTAL_CPU_QUOTA else quota[\n \"total_cpu_quota\"]\n total_gpu_quota = \"INF\" if quota[\n \"total_gpu_quota\"] == constant.INF_TOTAL_GPU_QUOTA else quota[\n \"total_gpu_quota\"]\n train_count_quota = \"INF\" if quota[\n \"train_count_quota\"] == constant.INF_JOB_COUNT else quota[\n \"train_count_quota\"]\n model_count_quota = \"INF\" if quota[\n \"model_count_quota\"] == constant.INF_JOB_COUNT else quota[\n \"model_count_quota\"]\n dev_count_quota = \"INF\" if quota[\n \"dev_count_quota\"] == constant.INF_JOB_COUNT else quota[\n \"dev_count_quota\"]\n\n print(\"{:16} {}\".format(\"Org id:\", quota[\"org_id\"]))\n print(\"{:16} {:32} {:32} {:32} {:32} {:32}\".format(\n \"\", \"Memory / Used\", \"CPU / Used\", \"GPU / Used\", \"Tensorboard / Used\",\n \"Count / Used\"))\n print(\"{:16} {:32} {:32} {:32} {:32} {:32}\".format(\n \"Train job\", quota[\"train_memory_quota\"] + \" / \" +\n quota[\"train_memory_used\"], quota[\"train_cpu_quota\"] + \" / \" + quota[\n \"train_cpu_used\"], quota[\"train_gpu_quota\"] + \" / \" + quota[\n \"train_gpu_used\"], \"- / -\", train_count_quota + \" / \" + quota[\n \"train_count_used\"]))\n print(\"{:16} {:32} {:32} {:32} {:32} {:32}\".format(\n \"Model service\", quota[\"model_memory_quota\"] + \" / \" +\n quota[\"model_memory_used\"], quota[\"model_cpu_quota\"] + \" / \" + quota[\n \"model_cpu_used\"], quota[\"model_gpu_quota\"] + \" / \" + quota[\n \"model_gpu_used\"], \"- / -\", model_count_quota + \" / \" + quota[\n \"model_count_used\"]))\n print(\"{:16} {:32} {:32} {:32} {:32} {:32}\".format(\"Dev environment\", quota[\n \"dev_memory_quota\"] + \" / \" + quota[\"dev_memory_used\"], quota[\n \"dev_cpu_quota\"] + \" / \" + quota[\"dev_cpu_used\"], quota[\n \"dev_gpu_quota\"] + \" / \" + quota[\n \"dev_gpu_used\"], \"- / -\", dev_count_quota + \" / \" + quota[\n \"dev_count_used\"]))\n print(\"{:16} {:32} {:32} {:32} {:32} {:32}\".format(\n \"Tensorboard\", \"- / -\", \"- / -\", \"- / -\", quota[\"tensorboard_quota\"] +\n \" / \" + quota[\"tensorboard_used\"], \"- / -\"))\n\n print(\"{:16} {:32} {:32} {:32} {:32} {:32}\".format(\n \"Total quota\", total_memory_quota + \" / \" + quota[\"total_memory_used\"],\n total_cpu_quota + \" / \" + quota[\"total_cpu_used\"], total_gpu_quota +\n \" / \" + quota[\"total_gpu_used\"], \"- / -\", \"- / -\"))", "title": "" }, { "docid": "23c1f3fac5b26a4976055f151c4f671b", "score": "0.55977225", "text": "def get_utilization(self):\n if self.mark_utilized:\n return 100\n\n if self.status == PrefixStatusChoices.STATUS_CONTAINER:\n queryset = Prefix.objects.filter(\n prefix__net_contained=str(self.prefix),\n vrf=self.vrf\n )\n child_prefixes = netaddr.IPSet([p.prefix for p in queryset])\n utilization = float(child_prefixes.size) / self.prefix.size * 100\n else:\n # Compile an IPSet to avoid counting duplicate IPs\n child_ips = netaddr.IPSet(\n [_.range for _ in self.get_child_ranges()] + [_.address.ip for _ in self.get_child_ips()]\n )\n\n prefix_size = self.prefix.size\n if self.prefix.version == 4 and self.prefix.prefixlen < 31 and not self.is_pool:\n prefix_size -= 2\n utilization = float(child_ips.size) / prefix_size * 100\n\n return min(utilization, 100)", "title": "" }, { "docid": "c52a3e01221585ad83c02488a01b08de", "score": "0.5592142", "text": "def get_lfs_quota(lustre_fs):\n command = \"lfs quota {0}\".format(lustre_fs)\n text = subprocess.Popen(command, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n universal_newlines=True).communicate()[0]\n #print text\n text = text.strip().split(\"\\n\")\n return(text)", "title": "" }, { "docid": "a24091b4d3f980a45bc0d5d8e23c4437", "score": "0.55854064", "text": "def test_get_disk_space_percentage(self):\n TestType = namedtuple('Point', ['used', 'total', 'free'])\n testval = TestType(100, 200, 100)\n self.assertEqual(diskspace.get_disk_space_percentage(testval), 50)\n testval = TestType(100, 400, 300)\n self.assertEqual(diskspace.get_disk_space_percentage(testval), 75)", "title": "" }, { "docid": "360a2415f2764c9030aad7258547228a", "score": "0.5580063", "text": "def get_cephfs_data(self):\n ceph_status = self.ceph_cluster.toolbox.exec_ceph_cmd(ceph_cmd=\"ceph df\")\n total_used = 0\n for pool in ceph_status[\"pools\"]:\n total_used += pool[\"stats\"][\"bytes_used\"]\n return total_used / constants.GB", "title": "" }, { "docid": "02599b643a0eab47e2df0a29655ea7b4", "score": "0.5570196", "text": "def get_rest_quota(self):\n remaining, limit, reset = None, None, None\n if self.response:\n if 'x-rate-limit-remaining' in self.response.headers:\n remaining = int(\n self.response.headers['x-rate-limit-remaining'])\n if remaining == 0:\n limit = int(self.response.headers['x-rate-limit-limit'])\n reset = int(self.response.headers['x-rate-limit-reset'])\n reset = datetime.fromtimestamp(reset)\n return {'remaining': remaining, 'limit': limit, 'reset': reset}", "title": "" }, { "docid": "5724adb142a22e5b8ba10fd97582c514", "score": "0.55693066", "text": "def get_quota_differences(self, current_quota):\n quota_differences = {}\n for size, setting in CONF.quota.sizes.items():\n match_percentages = []\n for service_name, values in setting.items():\n if service_name not in current_quota:\n continue\n for name, value in values.items():\n if name not in current_quota[service_name]:\n continue\n if value > 0:\n current = current_quota[service_name][name]\n dividend = float(min(current, value))\n divisor = float(max(current, value))\n match_percentages.append(dividend / divisor)\n elif value < 0:\n # NOTE(amelia): Sub-zero quota means unlimited\n if current_quota[service_name][name] < 0:\n match_percentages.append(1.0)\n else:\n match_percentages.append(0.0)\n elif current_quota[service_name][name] == 0:\n match_percentages.append(1.0)\n else:\n match_percentages.append(0.0)\n # Calculate the average of how much it matches the setting\n difference = abs(\n (sum(match_percentages) / float(len(match_percentages))) - 1\n )\n\n quota_differences[size] = difference\n\n return quota_differences", "title": "" }, { "docid": "571755e3fcbd14a3b3ae08feeea9105f", "score": "0.5568434", "text": "def disk_storage_used_in_bytes(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"disk_storage_used_in_bytes\")", "title": "" }, { "docid": "7437a203fc65589c852e557eb802bffe", "score": "0.55678725", "text": "def test_get_capacity_info_for_share_and_mount_point_with_spaces(self):\r\n mox = self._mox\r\n drv = self._driver\r\n\r\n stat_total_size = 2620544\r\n stat_avail = 2129984\r\n stat_output = '1 %d %d' % (stat_total_size, stat_avail)\r\n\r\n du_used = 490560\r\n du_output = '%d /mnt' % du_used\r\n\r\n mox.StubOutWithMock(drv, '_get_mount_point_for_share')\r\n drv._get_mount_point_for_share(self.TEST_NFS_EXPORT_SPACES).\\\r\n AndReturn(self.TEST_MNT_POINT_SPACES)\r\n\r\n mox.StubOutWithMock(drv, '_execute')\r\n drv._execute('stat', '-f', '-c', '%S %b %a',\r\n self.TEST_MNT_POINT_SPACES,\r\n run_as_root=True).AndReturn((stat_output, None))\r\n\r\n drv._execute('du', '-sb', '--apparent-size',\r\n '--exclude', '*snapshot*',\r\n self.TEST_MNT_POINT_SPACES,\r\n run_as_root=True).AndReturn((du_output, None))\r\n\r\n mox.ReplayAll()\r\n\r\n self.assertEqual((stat_total_size, stat_avail, du_used),\r\n drv._get_capacity_info(self.TEST_NFS_EXPORT_SPACES))\r\n\r\n mox.VerifyAll()", "title": "" }, { "docid": "acf155b833813524d8429b7fba881e30", "score": "0.556706", "text": "def storage_gb(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"storage_gb\")", "title": "" }, { "docid": "acf155b833813524d8429b7fba881e30", "score": "0.556706", "text": "def storage_gb(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"storage_gb\")", "title": "" }, { "docid": "acf155b833813524d8429b7fba881e30", "score": "0.556706", "text": "def storage_gb(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"storage_gb\")", "title": "" }, { "docid": "5f0bd7e59571efad3b2e0ae9e48046cd", "score": "0.55605197", "text": "def get_usage_bound(request):\n disk_sizes = [int(size) for size in\n request.query_params.getlist('disk_sizes[]')]\n raid_level = request.query_params.get('raid_level', 'single')\n return Response(usage_bound(disk_sizes, len(disk_sizes), raid_level))", "title": "" }, { "docid": "c8e2a9636c8a78ac2a1c65dba447a8a8", "score": "0.5553052", "text": "def get_quota(self):\n return request(API_LIST.ACCOUNT_QUOTA.value, {'email': self.email, 'token': self.token})", "title": "" }, { "docid": "98bad9719b6b6f547f4756aa9ca29cad", "score": "0.5531073", "text": "def get_os_disk_usage(self, path):\n st = os.statvfs(path)\n free = (st.f_bavail * st.f_frsize)\n total = (st.f_blocks * st.f_frsize)\n used = (st.f_blocks - st.f_bfree) * st.f_frsize\n try:\n percent = (float(used) / total) * 100\n except ZeroDivisionError:\n percent = 0\n # NB: the percentage is -5% than what shown by df due to\n # reserved blocks that we are currently not considering:\n # http://goo.gl/sWGbH\n return self.usage_ntuple(total, used, free, round(percent, 1))", "title": "" }, { "docid": "74076405ec5c5f608d5bdf81b33ed899", "score": "0.5525881", "text": "def __compute_needed_space():\n\n\troot_dev = re.findall('(/[^\\d]*)\\d* / ', open('/proc/mounts').read())[0]\n\n\ttotal = 0\n\tfor number in re.findall('%s\\s+\\d+\\s+(\\d+)\\s' % root_dev, os.popen2('df')[1].read()):\n\t\ttotal += int(number)\n\n\ttry:\n\t\t# if an exclude file is present, substract the space occupied by excluded dirs/files.\n\t\texclude_file = re.findall('EXTERNAL_EXCLUDE_FILE=\"?([^\"])\"?', configuration.backup_config_file)[0]\n\n\t\tfor line in open(exclude_file, 'r'):\n\t\t\tline = line.strip()\n\t\t\tif line[0] == '#' or line == '':\n\t\t\t\tcontinue\n\n\t\t\tif line[0] == '/':\n\t\t\t\tpath = line\n\t\t\telse:\n\t\t\t\t# special, because the rsync is done in 2 phases to retain posix ACLs.\n\t\t\t\t# TODO: configuration.defaults.home_base_path\n\t\t\t\tpath = '/home/' + line\n\n\t\t\ttry:\n\t\t\t\tspace = int(os.popen3('du -kc %s' % path)[1].read().split(' ')[0])\n\t\t\t\ttotal -= space\n\t\t\texcept:\n\t\t\t\t# DU failed, probably because the path contained a shell globbing pattern.\n\t\t\t\t# don't bother now, this is harmless to skip some bytes…\n\t\t\t\tpass\n\n\texcept IndexError:\n\t\t# when no exclude file, just return the total.\n\t\treturn total", "title": "" }, { "docid": "2df56156f6fb904e583eaa8e57258254", "score": "0.5517394", "text": "def storage_size(self) -> str:\n return pulumi.get(self, \"storage_size\")", "title": "" } ]
cdf0260b6ceb5b3e53529a432048a105
Given a list of paths to DCM files, and a RTSTRCT DCM file, it returns the 3D volumes.
[ { "docid": "9c747aa504fffc61c4a2764c11b1b2dd", "score": "0.0", "text": "def get_ImageAndGroundTruth( dcms , seg_fls_ls='none' ):\n # Get the 3D CT scan\n image = np.stack([s.pixel_array for s in dcms])\n gts = np.zeros_like( image )\n # Get pixel spacings\n pix_spc = [float(a) for a in dcms[0].PixelSpacing]\n # Get Planes\n if seg_fls_ls != 'none':\n planes = get_Planes( seg_fls_ls )\n if not isinstance(planes,str):\n # X-Y Origin\n xy_cent = [float(a) for a in dcms[0].ImagePositionPatient[:2]]\n \n # Fill in the points in GTS\n keys=[float(a) for a in planes.keys()]\n z_pos = [float(x.ImagePositionPatient[2]) for x in dcms]\n for ind , z in enumerate( z_pos ):\n if z in keys:\n points = [[int(np.round(np.abs(float(a[0])-xy_cent[0])/pix_spc[0])),int(np.round(np.abs(float(a[1])-xy_cent[1])/pix_spc[0]))] for a in planes['%.2f' % z]]\n gts[ind] = cv2.fillPoly(gts[ind],[np.asarray(points,dtype='int32')],color=(255))\n else:\n gts = 'none'\n # Add slice spacing to the pix_spc:\n try:\n slice_thickness = np.abs(dcms[0].ImagePositionPatient[2] - \n dcms[1].ImagePositionPatient[2])\n except:\n slice_thickness = np.abs(dcms[0].SliceLocation -\n dcms[1].SliceLocation)\n pix_spc += [slice_thickness]\n \n return image , gts , pix_spc", "title": "" } ]
[ { "docid": "c0828c96158906a06f7db878b1e79b53", "score": "0.5677691", "text": "def calculate_volume(segment_path, centroids, ct_path=None):\n\n mask = np.load(segment_path)\n mask, _ = scipy.ndimage.label(mask)\n labels = [mask[centroid['x'], centroid['y'], centroid['z']] for centroid in centroids]\n volumes = np.bincount(mask.flatten())\n volumes = volumes[labels].tolist()\n\n if ct_path:\n meta = load_ct(ct_path, voxel=False)\n meta = MetaData(meta)\n spacing = np.prod(meta.spacing)\n volumes = [volume * spacing for volume in volumes]\n\n return volumes", "title": "" }, { "docid": "70731e43cb967d503ae4b195c7142c0e", "score": "0.5656902", "text": "def get_volumes():\n volumes = []\n for (datastore, path) in get_datastores():\n for file_name in list_vmdks(path):\n volumes.append({'path': path,\n 'filename': file_name,\n 'datastore': datastore})\n return volumes", "title": "" }, { "docid": "94aff89d5a168c090f9f56ed9c3e7ec3", "score": "0.56414485", "text": "def load_dicom_image_folder(folder):\n volume = np.array([])\n count = 0\n for file in glob.glob(os.path.join(folder, \"*.dcm\")):\n data = pydicom.dcmread(file)\n if hasattr(data, \"pixel_array\"):\n if count == 0:\n volume = np.array([data.pixel_array])\n else:\n volume = np.append(volume, np.array([data.pixel_array]), axis=0)\n count += 1\n return volume", "title": "" }, { "docid": "b57000bc5f87dd849e8084d6e9579fe3", "score": "0.56171745", "text": "def load_series_volume(base_path,series_uid):\r\n series_file_names, series_file_ids = get_sitk_dcm_files(base_path,series_uid)\r\n if len(series_file_names)<10:\r\n return None,[0,0,0],[0,0,0],0,[]\r\n \r\n try:\t\r\n\t\t\r\n #start load the volume data\r\n series_reader = sitk.ImageSeriesReader()\r\n series_reader.SetFileNames(series_file_names)\r\n\t\t\t\r\n im_3D = series_reader.Execute()\r\n\r\n #image array:z,y,x; origin and spacing:x,y,z\r\n volume = sitk.GetArrayFromImage(im_3D)\t\t\r\n org = np.array(im_3D.GetOrigin())\r\n spacing =np.array(im_3D.GetSpacing())\r\n\r\n im_reader = sitk.ImageFileReader()\t\t\r\n im_reader.LoadPrivateTagsOn()\r\n\t\r\n slice_spacing = 0\r\n slice_spacing_tag = '0018|0088'\r\n im_reader.SetFileName(series_file_names[0])\r\n dcm_im = im_reader.Execute()\r\n\t\t\r\n if slice_spacing_tag in dcm_im.GetMetaDataKeys():\t\t\t\r\n slice_spacing = float(dcm_im.GetMetaData(slice_spacing_tag))\r\n\t\t\t\r\n return volume,org,spacing, slice_spacing,series_file_ids\t\t\r\n except (OSError, TypeError) as reason:\r\n print(str(reason))\r\n return None,np.array([0,0,0]),np.array([0,0,0,0]),0, []", "title": "" }, { "docid": "47cc5f3f90de6b35da81a3acd979fb35", "score": "0.55031407", "text": "def retrieveDicomFiles():\n lstFilesDCM = []\n for dirname, dirnames, filenames in os.walk('.', topdown=True, followlinks=True):\n\n for filename in filenames:\n\n if \".dcm\" in filename.lower():\n\n lstFilesDCM.append(os.path.join(dirname,filename))\n\n return lstFilesDCM", "title": "" }, { "docid": "6f97ec0eda191533a289cc330cf36c09", "score": "0.5477922", "text": "def read_fileset_vols(file):\n with open(file) as r:\n filesets = r.readlines()\n\n paths = []\n for fs in filesets:\n fs_parts = fs.strip().split(' ')\n #print(fs_parts)\n path, vol = fs_parts[0], float(fs_parts[1].strip())\n paths.append(path)\n if vol > MAX_FILESET_SIZE:\n print(\"Max fileset size {} exceeded {}:{}\".format(MAX_FILESET_SIZE, path, vol))\n\n return paths", "title": "" }, { "docid": "be6bc6e74479dcb27a6f67154cff8728", "score": "0.54412526", "text": "def get_chaos_volumes(root_chaos_directory, modality='CT', is_train=True):\n assert modality == 'CT' or modality == 'MR', f'Modality can either be CT or MR, and not {modality}'\n if is_train:\n modality_path = os.path.join(root_chaos_directory, 'Train_Sets', modality)\n else:\n modality_path = os.path.join(root_chaos_directory, 'Test_Sets', modality)\n \n input_image_dir = 'DICOM_anon'\n gt_image_dir = 'Ground'\n \n volumes = []\n for patient in sorted(os.listdir(modality_path)):\n \n input_image_names = sorted(os.listdir(os.path.join(modality_path, patient, input_image_dir)))\n pairs = []\n\n if is_train:\n \n gt_image_names = sorted(os.listdir(os.path.join(modality_path, patient, gt_image_dir)))\n assert len(input_image_names) == len(gt_image_names), f\"Number of input images and segmentation masks don't match for patient {patient}\"\n for input_name, gt_name in zip(input_image_names, gt_image_names):\n \n input_path = os.path.join(modality_path, patient, input_image_dir, input_name)\n gt_path = os.path.join(modality_path, patient, gt_image_dir, gt_name)\n pairs.append((input_path, gt_path))\n else:\n for name in input_image_names:\n input_path = os.path.join(modality_path, patient, input_image_dir, name)\n pairs.append((input_path, None))\n \n volumes.append(pairs)\n \n return volumes", "title": "" }, { "docid": "7e7748a90f598ebc96c7d197ff222a1e", "score": "0.54216224", "text": "def __getFilesFromCastor(self, meta): \n prodID = meta['ProdID']\n prod = str(prodID).zfill(8)\n energy = meta['Energy']\n bkg = meta[\"EvtType\"]\n detector = meta[\"DetectorType\"]\n path = \"/castor/cern.ch/grid/ilc/prod/%s/%s/%s/%s/SIM/%s/\" % (self.machine, energy, bkg, detector, prod)\n comm = [\"nsls\", \"%s\" % path]\n res = subprocess.Popen(comm, stdout = subprocess.PIPE).communicate()\n dirlist = res[0].rstrip().split(\"\\n\")\n mylist = []\n for mydir in dirlist:\n if mydir.count(\"dirac_directory\"):\n continue\n curdir = path + mydir\n comm2 = [\"nsls\", curdir]\n res = subprocess.Popen(comm2, stdout = subprocess.PIPE).communicate()\n for oFile in res[0].rstrip().split(\"\\n\"):\n if oFile.count(\"dirac_directory\"):\n continue\n mylist.append(path + mydir + \"/\" + oFile)\n if not mylist:\n return S_ERROR(\"File list is empty\")\n return S_OK(mylist)", "title": "" }, { "docid": "c598a8a7874e7a76e093ec573d813030", "score": "0.5360853", "text": "def read_plys(file_list, unit='m'):\n pcd_list = []\n for file_path in file_list:\n pcd_list.append(read_ply(file_path, unit=unit))\n\n return merge_pcd(pcd_list)", "title": "" }, { "docid": "d6225b5d3eb48d9219ac4554fac7dd5d", "score": "0.53323436", "text": "def walk_300vw_3d(tts, path):\n train, test = '', ''\n for (dir_path, dir_names, file_names) in os.walk(path):\n landmarks = []\n for file_name in file_names:\n # search for torch files\n file_path = os.path.join(dir_path, file_name)\n if not file_path.endswith('.t7'):\n continue\n\n # load the points\n points = torchfile.load(file_path)\n\n # search for image name\n if os.path.isfile(file_path[:-2] + 'jpg'):\n csv_list = [file_path[:-2] + 'jpg']\n elif os.path.isfile(file_path[:-2] + 'png'):\n csv_list = [file_path[:-2] + 'png']\n else:\n continue\n\n # add landmarks to our list\n for p in points:\n for n in p:\n csv_list.append(str(n))\n landmarks.append(f\"{','.join(csv_list)}\")\n\n # add landmarks from hole directory to train or path (no two files from one subset in different groups)\n if len(landmarks) > 0:\n if random.random() > tts:\n test += '\\n'.join(landmarks)\n test += '\\n'\n else:\n train += '\\n'.join(landmarks)\n train += '\\n'\n return train, test", "title": "" }, { "docid": "2980ececbbea5a036e1024ddf31f214d", "score": "0.5297258", "text": "def read_dicom_stack(filepath):\n\n # from: https://pyscience.wordpress.com/2014/09/08/dicom-in-python-\n # importing-medical-image-data-into-numpy-with-pydicom-and-vtk/\n file_list = []\n # Read in filenames from directory and sort them\n for root, dirs, files in os.walk(filepath):\n for filename in files:\n if \".dcm\" in filename.lower():\n file_list.append(os.path.join(root, filename))\n file_list.sort()\n # Read in first reference file to define shape of data\n ref_data = pydicom.read_file(file_list[0])\n data_shape = (len(file_list), int(ref_data.Rows), int(ref_data.Columns))\n data = np.zeros(data_shape, int)\n # Read in image stack and store in single ndarray\n for filename in file_list:\n data_slice = pydicom.dcmread(filename)\n data[file_list.index(filename), :, :] = data_slice.pixel_array\n # Read and calculate voxel size\n # The voxel size within one slice in given. Between the slices\n # (z-direction) has to be calculated as the difference in the\n # position of two slices.\n ref_data_second = pydicom.read_file(file_list[1])\n spacing3 = (ref_data_second.ImagePositionPatient[2]\n - ref_data.ImagePositionPatient[2])\n spacing = np.asarray([float(spacing3),\n float(ref_data.PixelSpacing[1]),\n float(ref_data.PixelSpacing[0])])\n # Calculate the volume position\n # The stored position in the dicom file points to top center of the\n # lower left last corner of the slice. Center in x,y direction and\n # on end of the z direction. (0.5,0.5,1) for a unit cell\n position = np.asarray([float(ref_data.ImagePositionPatient[2]),\n float(ref_data.ImagePositionPatient[0]),\n float(ref_data.ImagePositionPatient[1])])\n position = position - (np.asarray([1, 0.5, 0.5]) * spacing)\n # To get the real lover left last corner the stated position has to\n # be decreased by half the spacing in x and y direction and a full\n # spacing in z direction\n\n # Delete ref_data*\n del ref_data\n del ref_data_second\n return VolumeData(data, spacing, position)", "title": "" }, { "docid": "64738ebb520249938c0e98b2651b77c1", "score": "0.52403086", "text": "def load_eth3d_data(file_path, current_file):\n limg_name = pjoin(file_path, current_file + '/im0.png')\n #print (\"limg: {}\".format(limg_name))\n rimg_name = pjoin(file_path, current_file + '/im1.png')\n #print (\"rimg: {}\".format(rimg_name))\n \n ldisp_name = pjoin(file_path, current_file + '/disp0GT.pfm')\n #print (\"ldisp: {}\".format(ldisp_name))\n return limg_name, rimg_name, ldisp_name", "title": "" }, { "docid": "d747d0d51450137fc809e87e95f0cfb0", "score": "0.51988864", "text": "def list_volumes(self, pattern = None):\n volumes = []\n Datasets._volumeslock.acquire()\n if Datasets.volumes == None:\n Datasets.volumes = []\n cmd = [ZFSCMD, \"list\", \"-H\", \"-t\", \"volume\", \\\n \"-o\", \"name\", \"-s\", \"name\"]\n try:\n p = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n close_fds=True)\n outdata,errdata = p.communicate()\n err = p.wait()\n except OSError, message:\n raise RuntimeError, \"%s subprocess error:\\n %s\" % \\\n (cmd, str(message))\n if err != 0:\n Datasets._volumeslock.release()\n raise RuntimeError, '%s failed with exit code %d\\n%s' % \\\n (str(cmd), err, errdata)\n for line in outdata.rstrip().split('\\n'):\n Datasets.volumes.append(line.rstrip())\n Datasets._volumeslock.release()\n\n if pattern == None:\n volumes = Datasets.volumes[:]\n else:\n # Regular expression pattern to match \"pattern\" parameter.\n regexpattern = \".*%s.*\" % pattern\n patternobj = re.compile(regexpattern)\n\n for volname in Datasets.volumes:\n patternmatchobj = re.match(patternobj, volname)\n if patternmatchobj != None:\n volumes.append(volname)\n return volumes", "title": "" }, { "docid": "8caf0502a343a3c8f146846cb7602213", "score": "0.51945287", "text": "def volumes(filename, **kwds):\n # reads in the tif\n arr = read(filename)\n # binarize the 3D array\n arr = (arr < args.threshold)\n #binarize(arr, threshold=args.threshold, copy=False)\n # determine pore locations\n pores = Porosity(arr)\n # call your COM code\n vol = pore_volume(pores)\n # TODO: write a csv writer\n write(args.ofile, vol)", "title": "" }, { "docid": "5537bfd85770eb72bf5f83aa69db6e8b", "score": "0.5150098", "text": "def _get_cinder_volumes(self):\n # type: () -> []\n volumes = retry(self.cinder.volumes.list, self.retries, self.retry_exceptions)\n return volumes", "title": "" }, { "docid": "996e2cccc6916414baded66c21903072", "score": "0.5149494", "text": "def get_vrfs(file_path):\n with open(file_path) as f:\n data = yaml.load(f, Loader=yaml.FullLoader)\n multicast_vrf = data.get(\"MulticastVRF\", 0)\n unicast_vrf = data.get(\"UnicastVRF\", 254)\n return [multicast_vrf, unicast_vrf]", "title": "" }, { "docid": "c817e565959b02693e2d3c6704cde326", "score": "0.5142595", "text": "def _get_e3sm_vars(self, input_path: str) -> List[str]:\n paths: List[str] = []\n e3sm_vars: List[str] = []\n\n for root, _, files in os.walk(input_path):\n for filename in files:\n if \".nc\" in filename:\n paths.append(str(Path(root, filename).absolute()))\n\n for path in paths:\n ds = xr.open_dataset(path)\n data_vars = list(ds.data_vars.keys())\n\n e3sm_vars = e3sm_vars + data_vars\n\n if len(e3sm_vars) == 0:\n raise IndexError(\n f\"No variables were found in the input file(s) at '{input_path}'.\"\n )\n\n return e3sm_vars", "title": "" }, { "docid": "10f4d08531ea10679e7ae1f52f140adb", "score": "0.509259", "text": "def ncread(filedir, filename_regexp, var_name):\n files = os.listdir(filedir)\n regexp = re.compile(filename_regexp)\n filtered_files = []\n for each_file in files:\n if (regexp.search(each_file)):\n filtered_files.append(each_file)\n\n if (len(filtered_files) == 1):\n if filedir[-1] != '/':\n filedir += '/'\n filename = filedir + filtered_files[0]\n fh = netcdf.netcdf_file(filename,'r',mmap=True)\n return np.array(fh.variables[var_name][:])\n else:\n return NetCDFChain(filedir, filename_regexp, var_name)", "title": "" }, { "docid": "3c1b60ece3c872cd9d0941de6ad2cf10", "score": "0.5086093", "text": "def _get_mount_volumes() -> List[str]:\n volumes = []\n cwd = os.getcwd()\n for module_dir in _get_module_dirs():\n module = os.path.basename(module_dir)\n volumes.extend([\"-v\", \"%s/%s:/magma/%s\" % (cwd, module_dir, module)])\n return volumes", "title": "" }, { "docid": "ed74dab5738fb794ea922a4359271450", "score": "0.5082217", "text": "def list_vmdks(path):\n try:\n files = os.listdir(path)\n return [f for f in files\n if vmdk_is_a_descriptor(os.path.join(path, f))]\n except OSError as e:\n # dockvols may not exists on a datastore, so skip it\n return []", "title": "" }, { "docid": "d4c4a4f67117a562cc6eab7699b2b780", "score": "0.5066945", "text": "def load_volumes(self, db, load_normalized=False,\n load_seg=True, load_bm=True, load_tdm=True):\n \"\"\"\n Arguments:\n db: DatabaseBRATS\n normalized: flag indicating whether to load normalized volumes\n load_seg: flag indicating whether to load segmentation\n load_bm: flag indicating whether to load brain mask\n load_tdm: flag indicating whether to load tumor distance map\n Returns:\n list of volumes\n \"\"\"\n if load_normalized:\n volumes = [self.load_normalized_volume(db, m)\n for m in db.modalities[:-1]]\n else:\n volumes = [self.load_volume(db, m) for m in db.modalities[:-1]]\n if load_seg:\n volumes.append(self.load_volume(db, 'seg'))\n if load_bm:\n volumes.append(self.load_brain_mask(db))\n if load_tdm:\n volumes.append(self.load_tumor_dist_maps(db))\n return volumes", "title": "" }, { "docid": "a1690803865c397600af79affaaf87e5", "score": "0.4981326", "text": "def _compute_volumes(self):\n faces = self.faces\n\n volumes = np.zeros(faces.size - 1)\n for i in range(volumes.size):\n volumes[i] = (faces[i+1] - faces[i])*self.dim2*self.dim3\n return volumes", "title": "" }, { "docid": "71c246b0e490c6a76b8516663f24dab0", "score": "0.49811682", "text": "def loadtiff3d(filepath):\n from libtiff import TIFFfile, TIFF\n tiff = TIFF.open(filepath, mode='r')\n stack = []\n for sample in tiff.iter_images():\n stack.append(np.flipud(sample))\n\n out = np.dstack(stack)\n tiff.close()\n\n return out", "title": "" }, { "docid": "8bc3b3a32d25d155c749d5916dffbdcc", "score": "0.49328175", "text": "def get_scene_files():\n file_types = {'file': _file_handler,\n 'cacheFile': _cache_file_handler,\n 'diskCache': _diskCache_handler,\n 'VRayMesh': _vrmesh_handler,\n 'mentalrayTexture': _mrtex_handler,\n 'gpuCache': _gpu_handler,\n 'mentalrayOptions': _mrOptions_handler,\n 'mentalrayIblShape': _mrIbl_handler,\n 'AlembicNode': _abc_handler,\n 'VRaySettingsNode': _vrSettings_handler,\n 'particle': _particle_handler,\n 'VRayLightIESShape': _ies_handler,\n 'FurDescription': _fur_handler,\n 'mib_ptex_lookup': _ptex_handler,\n 'substance': _substance_handler,\n 'imagePlane': _imagePlane_handler,\n 'mesh': _mesh_handler,\n 'dynGlobals': _dynGlobals_handler,\n 'aiStandIn': _aiStandIn_handler,\n 'aiImage': _aiImage_handler,\n 'aiPhotometricLight': _aiPhotometricLight_handler,\n 'ExocortexAlembicFile': _exocortex_handler}\n\n for file_type in file_types:\n handler = file_types.get(file_type)\n nodes = cmds.ls(type=file_type)\n for node in nodes:\n for files in handler(node):\n for scene_file in files:\n if scene_file != None:\n yield scene_file.replace('\\\\', '/')", "title": "" }, { "docid": "d613142c22360c2e9488c85c28f01a1e", "score": "0.49280477", "text": "def find_DCM_files_parallel(rootpath=None):\n time_zero = time.time()\n dcmFileList = []\n\n # ~~ Walk directory, add all '*.dcm' files to list\n for root, dirs, files in os.walk(rootpath):\n for file in files:\n if file.endswith('.dcm'):\n fullpath = os.path.join(root, file)\n dcmFileList.append(fullpath)\n\n print(\"Found {} files\".format(len(dcmFileList)))\n\n if len(dcmFileList) > 300:\n print('Too many files!')\n return {}\n\n # ~~ Create Threadpool same size as dcm list, get modality for each file\n if not bool(dcmFileList):\n return {}\n\n pool = ThreadPool(len(dcmFileList))\n results = pool.map(func=lambda x: (dicom.read_file(x, force=True).Modality, x),\n iterable=dcmFileList)\n pool.close()\n pool.join()\n\n # ~~ sort into a dictionary by modality type\n dcmDict = {}\n for result in results:\n mode, filepath = result\n if mode not in dcmDict:\n dcmDict[mode] = []\n dcmDict[mode].append(filepath)\n\n print(\"parallel took %.2fs to sort DCMs\" % (time.time() - time_zero))\n return dcmDict", "title": "" }, { "docid": "b7c0234bf64e6deede00a442b2b68887", "score": "0.491537", "text": "def storage_volume_get_by_ids(context, ids):\n\n return IMPL.storage_volume_get_by_ids(context, ids)", "title": "" }, { "docid": "45430f35faac3bad41b95f405b99fc1b", "score": "0.4912654", "text": "def read_3d_data( camera_frame, rcams, origin_bc=False, augment=False,\n procrustes=False, lowpass=False ):\n dim = 3 # reading 3d data\n print(\"\\n[*] dimensions to use: \")\n print(DIMENSIONS_TO_USE)\n print()\n # Load 3d data\n data3d = load_data( dim, rcams, camera_frame, origin_bc, augment, procrustes, lowpass )\n train_set = split_train_test( data3d, TRAIN_FILES, dim, camera_frame )\n test_set = split_train_test( data3d, TEST_FILES, dim, camera_frame )\n \n # Compute normalization statistics\n complete_train = np.copy( np.vstack( list(train_set.values()) ).reshape((-1, DIMENSIONS*3)) )\n data_mean, data_std, dim_to_ignore, dim_to_use = \\\n normalization_stats( complete_train, origin_bc, dim )\n \n # Divide every dimension independently\n train_set = normalize_data( train_set, data_mean, data_std, dim_to_use, dim )\n test_set = normalize_data( test_set, data_mean, data_std, dim_to_use, dim )\n \n return train_set, test_set, data_mean, data_std, dim_to_ignore, dim_to_use", "title": "" }, { "docid": "b9c8a678ae6ed45d773f8476a90edea4", "score": "0.49126506", "text": "def test_components_3d():\n\n combs = [\n \"012\",\n \"345\",\n \"678\",\n \"9\",\n \"012345\",\n \"012678\",\n \"0129\",\n \"345678\",\n \"3459\",\n \"6789\",\n \"012345678\",\n \"0123459\",\n \"0126789\",\n \"3456789\",\n \"0123456789\",\n ]\n\n size = (1, 50, 50)\n\n create_lsds(size, combs)", "title": "" }, { "docid": "7ce31a23afcc137ee7c2b4f42e3ca2d9", "score": "0.49057105", "text": "def read_c3d_file(file_path):\n try:\n with open(file_path, 'rb') as file_handle:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\") # ignore UserWarning: missing parameter ANALOG:DESCRIPTIONS/LABELS\n reader = c3d.Reader(file_handle)\n \n marker_labels = reader.point_labels\n print(\"Marker Labels:\", \",\".join(marker_labels))\n first_frame = reader.first_frame()\n last_frame = reader.last_frame()\n print(\"First Frame:\", first_frame, \"Last Frame:\", last_frame)\n fps = reader.header.frame_rate\n print(\"FPS:\", fps)\n n_frames = last_frame - first_frame + 1\n total_length = n_frames / fps\n print(\"Clip length in total:\", humanize_time(total_length))\n # Extract positions for each frame.\n pos_array = np.empty([n_frames, len(marker_labels), 3])\n pos_array.fill(np.NAN)\n cond_array = np.empty([n_frames, len(marker_labels)])\n cond_array.fill(np.NAN)\n print(\"Reading frames... \", end=\"\", flush=True)\n for i, points, _ in reader.read_frames():\n # pos_array[frame, marker], e.g. pos_array[:,11] all frames for 12th marker\n # Convert to a mirrored coordinate system.\n pos_array[i - first_frame, :, :] = np.vstack([-1.0 * points[:, 0],\n -1.0 * points[:, 2],\n -1.0 * points[:, 1]]).T\n cond_array[i - first_frame, :] = points[:, 3]\n if n_frames is not None and i - first_frame >= n_frames:\n break\n except OSError:\n print(\"ERROR: Could not read file.\")\n raise\n print(\"Done.\")\n \n data = {'marker_labels': marker_labels,\n 'trajectories': pos_array,\n 'conditionals': cond_array,\n 'frame_rate': fps,\n }\n return data", "title": "" }, { "docid": "0e71d70245e5868e624d878633a418e4", "score": "0.489918", "text": "def readUBC_DC3Dobs(fileName, rtype = 'DC'):\n zflag = True # Flag for z value provided\n\n # Load file\n if rtype == 'IP':\n obsfile = np.genfromtxt(fileName,delimiter=' \\n',dtype=np.str,comments='IPTYPE')\n\n elif rtype == 'DC':\n obsfile = np.genfromtxt(fileName,delimiter=' \\n',dtype=np.str,comments='!')\n\n else:\n print \"rtype must be 'DC'(default) | 'IP'\"\n\n # Pre-allocate\n srcLists = []\n Rx = []\n d = []\n wd = []\n\n\n # Countdown for number of obs/tx\n count = 0\n for ii in range(obsfile.shape[0]):\n\n # Skip if blank line\n if not obsfile[ii]:\n continue\n\n # First line or end of a transmitter block, read transmitter info\n if count==0:\n # Read the line\n temp = (np.fromstring(obsfile[ii], dtype=float, sep=' ').T)\n count = int(temp[-1])\n\n # Check if z value is provided, if False -> nan\n if len(temp)==5:\n tx = np.r_[temp[0:2],np.nan,temp[2:4],np.nan]\n\n zflag = False # Pass on the flag to the receiver loc\n\n else:\n tx = temp[:-1]\n\n rx = []\n continue\n\n temp = np.fromstring(obsfile[ii], dtype=float,sep=' ') # Get the string\n\n # Filter out negative IP\n# if temp[-2] < 0:\n# count = count -1\n# print \"Negative!\"\n#\n# else:\n\n # If the Z-location is provided, otherwise put nan\n if zflag:\n\n rx.append(temp[:-2])\n # Check if there is data with the location\n if len(temp)==8:\n d.append(temp[-2])\n wd.append(temp[-1])\n\n else:\n rx.append(np.r_[temp[0:2],np.nan,temp[2:4],np.nan] )\n # Check if there is data with the location\n if len(temp)==6:\n d.append(temp[-2])\n wd.append(temp[-1])\n\n count = count -1\n\n # Reach the end of transmitter block, append the src, rx and continue\n if count == 0:\n rx = np.asarray(rx)\n Rx = DC.RxDipole(rx[:,:3],rx[:,3:])\n srcLists.append( DC.SrcDipole( [Rx], tx[:3],tx[3:]) )\n\n # Create survey class\n survey = DC.SurveyDC(srcLists)\n\n survey.dobs = np.asarray(d)\n survey.std = np.asarray(wd)\n\n return {'DCsurvey':survey}", "title": "" }, { "docid": "73a50e1213d25bed308bf1613c119015", "score": "0.4890817", "text": "def mp3filelist(basedir):\n\n mp3_files_pattern = Match(filetype='f', name='*.mp3')\n\n found_files = find_files(path=basedir, match=mp3_files_pattern)\n\n l = []\n for f in found_files:\n l.append(f)\n return l", "title": "" }, { "docid": "3355dcd18d4ca6394813369947439738", "score": "0.48849115", "text": "def load_vaihingen_data(folderlist):\n file_path, filename = os.path.split(folderlist)\n\n filelist = []\n with open(folderlist) as w :\n content = w.readlines()\n content = [x.strip() for x in content] \n for line in content :\n if line :\n filelist.append(line)\n\n all_left_img = []\n all_right_img = []\n all_left_disp = []\n\n for current_file in filelist :\n filename = file_path + '/' + current_file[0: len(current_file)]\n #print('left image: ' + filename)\n all_left_img.append(filename)\n #index1_dir = current_file.find('/')\n #index2_dir = current_file.find('/', index1_dir + 1)\n index2_dir = current_file.rfind('/')\n index1_dir = current_file.rfind('/', 0, index2_dir)\n\n filename = file_path + '/' + current_file[0: index1_dir] + '/colored_1' + current_file[index2_dir: len(current_file)]\n #print('right image: ' + filename)\n all_right_img.append(filename)\n filename = file_path + '/' + current_file[0: index1_dir] + '/disp_occ' + current_file[index2_dir: len(current_file)]\n #print('disp image: ' + filename)\n all_left_disp.append(filename)\n #pdb.set_trace()\n\n return all_left_img, all_right_img, all_left_disp", "title": "" }, { "docid": "35219c90ed9448c10633f003280302f3", "score": "0.48590073", "text": "def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):\n dc_name = get_managed_object_name(dc_ref)\n log.trace(\n \"Retrieving DVSs in datacenter '%s', dvs_names='%s', get_all_dvss=%s\",\n dc_name,\n \",\".join(dvs_names) if dvs_names else None,\n get_all_dvss,\n )\n properties = [\"name\"]\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path=\"networkFolder\",\n skip=True,\n type=vim.Datacenter,\n selectSet=[\n vmodl.query.PropertyCollector.TraversalSpec(\n path=\"childEntity\", skip=False, type=vim.Folder\n )\n ],\n )\n service_instance = get_service_instance_from_managed_object(dc_ref)\n items = [\n i[\"object\"]\n for i in get_mors_with_properties(\n service_instance,\n vim.DistributedVirtualSwitch,\n container_ref=dc_ref,\n property_list=properties,\n traversal_spec=traversal_spec,\n )\n if get_all_dvss or (dvs_names and i[\"name\"] in dvs_names)\n ]\n return items", "title": "" }, { "docid": "a46dad7e4743f87dda333e46f11d6f97", "score": "0.48565346", "text": "def get_tracks_dsi_studio(tracks_file,xsize=2.5,ysize=2.5,zsize=2.5):\n mm_convert = False\n tracks = core.file_reader(tracks_file)\n tracks_new = []\n for track in tracks:\n track_new = []\n track_len = len(track) / 3\n for count in range(track_len):\n start = (count * 3)\n if mm_convert:\n track_new.append((int(track[start]//xsize), int(track[start+1]//ysize), int(track[start+2]//zsize)))\n else:\n track_new.append((int(96-track[start]), int(96-track[start+1]), int(track[start+2])))\n tracks_new.append(track_new)\n return tracks_new", "title": "" }, { "docid": "8cbe8fea5c69a9ddfce446572bc7b060", "score": "0.484402", "text": "def volumes(parameters):\n volumes = sq_volumes(parameters)\n return volumes.mean()", "title": "" }, { "docid": "876ec16e87bbdf1fd803088aea34d9c1", "score": "0.4842539", "text": "def testReadSelectedVolumesLVM(self):\n test_path = self._GetTestFilePath(['lvm.raw'])\n self._SkipIfPathNotExists(test_path)\n\n test_os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=test_path)\n test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)\n test_lvm_container_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_LVM, location='/', parent=test_raw_path_spec)\n\n volume_system = lvm_volume_system.LVMVolumeSystem()\n volume_system.Open(test_lvm_container_path_spec)\n\n input_file_object = io.BytesIO(b'all\\n')\n test_input_reader = command_line.FileObjectInputReader(input_file_object)\n\n test_mediator = command_line.CLIVolumeScannerMediator(\n input_reader=test_input_reader)\n\n selected_volumes = test_mediator._ReadSelectedVolumes(\n volume_system, prefix='lvm')\n self.assertEqual(selected_volumes, ['lvm1', 'lvm2'])", "title": "" }, { "docid": "345de652babdd9a412baa9a0ced10cc5", "score": "0.4836639", "text": "def readr3d(filename, gzipped=None, fieldnums=None):\n\n from struct import unpack\n\n # Check whether the r3d file is gzipped or not if\n # not given\n if gzipped is None:\n with open(filename, 'rb') as f:\n magic = f.read(2)\n if magic == '\\037\\213':\n gzipped = True\n else:\n gzipped = False\n\n # Open gzipped or normal file\n if gzipped:\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'rb')\n\n # Read fixed size header\n raw_header = f.read(24)\n nx,ny,nz,nvar,r3dtype,t = unpack(\"iiiiif\",raw_header)\n\n # Issue a warning if the r3dtype is not supported\n if r3dtype not in (1,2,4):\n rawmsg = \"R3d file '{0}' is of type {1}, which is not supported.\"\n msg = rawmsg.format(filename, r3dtype)\n warn(msg)\n\n # Check if only a subset of the fields should be read\n if fieldnums is not None:\n # Make sure it is sorted\n fieldnum_list = list(fieldnums)\n fieldnum_list.sort()\n else:\n fieldnum_list = range(nvar)\n\n # Read the field names\n field_names_size = 32 * nvar\n field_names_format = '32s' * nvar\n rawnames = unpack(field_names_format,f.read(field_names_size))\n # Split off the zero byte and the following padding\n # Skip field names that should not be read (according to fieldnums)\n names = [ rawnames[i].split('\\x00')[0] for i in fieldnum_list ]\n\n # Read coordinates\n # First and last elements are coordinates to boundary, rest are coordinates\n # for midpoint in cell.\n raw_coordinates = f.read(4 * (nx + ny + nz))\n x = np.array(unpack(str(nx) + 'f', raw_coordinates[0:4*nx]))\n y = np.array(unpack(str(ny) + 'f', raw_coordinates[4*nx:4*(nx+ny)]))\n z = np.array(unpack(str(nz) + 'f', raw_coordinates[4*(nx+ny):4*(nx+ny+nz)]))\n\n # Read staggered coordinates if r3dtype == 2\n r3d_kwargs = {}\n if r3dtype == 2:\n staggered_coordinates = f.read(4 * (nx + ny + nz))\n xs = np.array(unpack(str(nx) + 'f', staggered_coordinates[0:4*nx]))\n ys = np.array(unpack(str(ny) + 'f', staggered_coordinates[4*nx:4*(nx+ny)]))\n zs = np.array(unpack(str(nz) + 'f', staggered_coordinates[4*(nx+ny):4*(nx+ny+nz)]))\n r3d_kwargs['xs'] = xs\n r3d_kwargs['ys'] = ys\n r3d_kwargs['zs'] = zs\n\n # Read the 3D-arrays contained in the r3d file\n fields = []\n for i in range(nvar):\n if i in fieldnum_list:\n v = array(unpack(\n str(nx*ny*nz) + 'f',\n f.read(nx*ny*nz * 4)\n ))\n # Without order='F', the order would be (nz,ny,nx)\n v = reshape(v, (nx,ny,nz), order='F')\n fields.append(v)\n else:\n # Skip this field\n f.seek(nx*ny*nz * 4, 1)\n\n # Close r3d file\n f.close()\n\n # Create R3d instance with the read values\n # Note that nx, ny, nz, and nvar are not passed to the constructor.\n # Those values are inferred from the dimension of the others\n r3d = R3d(x, y, z, names, fields, t, r3dtype, gzipped, **r3d_kwargs)\n return r3d", "title": "" }, { "docid": "362ea04173054b6f5fba33931876f246", "score": "0.48273334", "text": "def load_isomorphous_mtzs(*filenames):\n data = []\n print(\"Loading Mtz files...\")\n a,b,c,alpha,beta,gamma=0.,0.,0.,0.,0.,0.\n spacegroup = None\n for i,inFN in enumerate(filenames):\n ds = rs.read_mtz(inFN)\n if spacegroup is not None:\n if ds.spacegroup != spacegroup:\n raise ValueError(f\"Filename: {inFN} has space group {ds.spacegroup}, but {spacegroup} is expected. Cannot load non-isomorphous MTZs.\")\n spacegroup = ds.spacegroup\n ds['file_id'] = i\n a += ds.cell.a/len(filenames)\n b += ds.cell.b/len(filenames)\n c += ds.cell.c/len(filenames)\n alpha += ds.cell.alpha/len(filenames)\n beta += ds.cell.beta/len(filenames)\n gamma += ds.cell.gamma/len(filenames)\n data.append(ds)\n data = pd.concat(data)\n data.cell = gemmi.UnitCell(a, b, c, alpha, beta, gamma)\n data.spacegroup = spacegroup \n return data", "title": "" }, { "docid": "629281200d8d3c79ba6f9e3de70e9d2e", "score": "0.48180392", "text": "def compute_delays_slc(cube_filenames:list, wavelength):\n # parse date from filename\n dct_delays = {}\n for f in cube_filenames:\n date = datetime.strptime(os.path.basename(f).split('_')[2], '%Y%m%dT%H%M%S')\n dct_delays[date] = f\n\n sec, ref = sorted(dct_delays.keys())\n\n wet_delays = []\n hyd_delays = []\n phase2range = (-4 * np.pi) / float(wavelength)\n for dt in [ref, sec]:\n path = dct_delays[dt]\n with xr.open_dataset(path) as ds:\n da_wet = ds['wet'] * phase2range\n da_hydro = ds['hydro'] * phase2range\n\n wet_delays.append(da_wet)\n hyd_delays.append(da_hydro)\n\n crs = da_wet.rio.crs\n gt = da_wet.rio.transform()\n\n chunk_sizes = da_wet.shape[0], da_wet.shape[1]/3, da_wet.shape[2]/3\n\n # open one to copy and store new data\n ds_slc = xr.open_dataset(path).copy()\n encoding = ds_slc['wet'].encoding # chunksizes and fill value\n encoding['contiguous'] = False\n encoding['_FillValue'] = 0.\n encoding['chunksizes'] = tuple([np.floor(cs) for cs in chunk_sizes])\n del ds_slc['wet'], ds_slc['hydro']\n\n for i, key in enumerate('reference secondary'.split()):\n ds_slc[f'{key}_{TROPO_NAMES[0]}'] = wet_delays[i]\n ds_slc[f'{key}_{TROPO_NAMES[1]}'] = hyd_delays[i]\n\n model = os.path.basename(path).split('_')[0]\n\n attrs = {\n 'units': 'radians',\n 'grid_mapping': 'crs',\n }\n\n ## no data (fill value?) chunk size?\n for name in TROPO_NAMES:\n for key in 'reference secondary'.split():\n descrip = f\"Delay due to {name.lstrip('troposphere')} component of troposphere\"\n da_attrs = {**attrs, 'description':descrip,\n 'long_name':name, 'standard_name':name,\n 'RAiDER version': RAiDER.__version__,\n }\n ds_slc[f'{key}_{name}'] = ds_slc[f'{key}_{name}'].assign_attrs(da_attrs)\n ds_slc[f'{key}_{name}'].encoding = encoding\n\n ds_slc = ds_slc.assign_attrs(model=model, method='ray tracing')\n\n ## force these to float32 to prevent stitching errors\n coords = {coord:ds_slc[coord].astype(np.float32) for coord in ds_slc.coords}\n ds_slc = ds_slc.assign_coords(coords)\n\n return ds_slc.rename(z=DIM_NAMES[0], y=DIM_NAMES[1], x=DIM_NAMES[2])", "title": "" }, { "docid": "c8efcedd20020d2264e4ea9c6d32eec4", "score": "0.48062488", "text": "def reader_from_ts(filepathslist):\n filepaths, zvalues, ts_dates = [], [], [];\n xvalues, yvalues = [], []\n for i in range(len(filepathslist)):\n print(filepathslist[i])\n # Establish timing and filepath information\n filepaths.append(filepathslist[i]);\n datestr = re.findall(r\"\\d\\d\\d\\d\\d\\d\\d\\d\", filepathslist[i])[0];\n ts_dates.append(datetime.strptime(datestr, \"%Y%m%d\"));\n # Read in the data, either netcdf3 or netcdf4\n [xvalues, yvalues, zdata] = rwr.read_netcdf4(filepathslist[i]);\n zvalues.append(zdata);\n if i == round(len(filepathslist) / 2):\n print('halfway done reading files...');\n mydata = data(filepaths=np.array(filepaths), date_pairs_julian=None, date_deltas=None,\n xvalues=np.array(xvalues), yvalues=np.array(yvalues), zvalues=np.array(zvalues),\n date_pairs_dt=None, ts_dates=np.array(ts_dates));\n return mydata;", "title": "" }, { "docid": "7c103e37d2c5f7375ffb30422125f800", "score": "0.48060805", "text": "def read_vcirc_toomre(directory, names):\n toomre = []\n radius = [] # arcsec\n for n in names:\n bNan = False\n f = open(directory + n + '_data.txt', 'r')\n lines = f.readlines()\n R = []\n sigma_R = []\n kappa_vcirc = []\n Sigma = []\n dist = float(lines[1].split()[0])\n for l in lines:\n l = l.split()\n if len(l) > 4:\n try:\n R.append(float(l[0]))\n sigma_R.append(float(l[7]))\n kappa_vcirc.append(float(l[-1]))\n if np.isnan(kappa_vcirc[-1]):\n bNan = True\n print(n)\n break\n Sigma.append(float(l[6]))\n except:\n pass\n if not bNan:\n R = np.array(R)\n sigma_R = np.array(sigma_R)\n Sigma = np.array(Sigma) * const.M_sun.value / const.pc.value**2\n kappa = np.array(kappa_vcirc)\n kappa = kappa / (dist * const.kpc.value *\n np.pi / (3600*180)) # 1/km\n Q = kappa * sigma_R * 1e3 / (3.36 * Sigma * const.G.value)\n radius.append(R)\n toomre.append(Q)\n R = np.array(radius) # arcsec\n Q = np.array(toomre)\n return R, Q", "title": "" }, { "docid": "673ed880ca16c864de361b996cbe4327", "score": "0.4790977", "text": "def retrieve_segments_data(path_dir):\n path_dir = pathlib.Path(path_dir)\n segroot = path_dir / \"segments\"\n segment_data = []\n for se in sorted(segroot.glob(\"[0-1]\")):\n chan_data = []\n chroot = se / \"channels\"\n for ch in chroot.glob(\"*.dat\"):\n chan_data.append(load_dat_unit(ch))\n segment_data.append(chan_data)\n return segment_data", "title": "" }, { "docid": "8ba4cdfd95d15115b7e477e111324086", "score": "0.47809553", "text": "def summon_files(folder_path): \r\n descriptors = ['SpARCS_','.fits']\r\n files_SpA=make_file_list(folder_path, \"SpARCS_IDs.txt\", descriptors)\r\n descriptors = ['SVA1_COADD-','.fits']\r\n files_SVA=make_file_list(folder_path, \"SVA1_IDs.txt\", descriptors)\r\n return files_SpA+files_SVA", "title": "" }, { "docid": "915bf565c2ec3a57869e267733f11122", "score": "0.47792825", "text": "def load_dense_as_sparse(files, cube_Dcenter, cameraPOs, min_prob=0.5, rayPool_thresh = 0):\n prediction_list, rgb_list, vxl_ijk_list, rayPooling_votes_list = [], [], [], []\n cube_ijk_np, param_np, viewPair_np = None, None, None\n\n cameraT_folder = '/home/mengqi/dataset/MVS/cameraT/'\n cameraPO_folder = '/home/mengqi/dataset/MVS/pos/'\n\n # TODO: the new load_selected_POs hide the view index\n # cameraPOs = camera.load_selected_cameraPO_files_f64(dataset_name=param_volum.__datasetName, view_list=param_volum.__view_set)\n # cameraPOs = prepare_data.load_cameraPos_as_np(cameraPO_folder)\n cameraTs = camera.cameraPs2Ts(cameraPOs)\n\n\n for file_name in files: \n print file_name\n try:\n with open(file_name) as f:\n npz_file = np.load(f)\n \"\"\"\n prediction_sub: {N,1,D,D,D} float16 --> {N,D,D,D}\n rgb_sub: {N,3,D,D,D} uint8 --> {N,D,D,D,3}\n param_sub: {N,8} float64 # x,y,z,resol,modelIndx,indx_d0,indx_d1,indx_d2\n selected_viewPair_viewIndx_sub: {N, No_viewPairs, 2}\n \"\"\"\n prediction_sub, rgb_sub, param_sub, viewPair_sub = \\\n npz_file[\"prediction\"], npz_file[\"rgb\"], npz_file[\"param\"], npz_file[\"selected_pairIndx\"] \n prediction_list, rgb_list, vxl_ijk_list, rayPooling_votes_list, \\\n cube_ijk_np, param_np, viewPair_np = \\\n append_dense_2sparseList(prediction_sub = prediction_sub, rgb_sub = rgb_sub, param_sub = param_sub,\\\n viewPair_sub = viewPair_sub, min_prob = min_prob, rayPool_thresh = rayPool_thresh,\\\n enable_centerCrop = True, cube_Dcenter = cube_Dcenter,\\\n enable_rayPooling = True, cameraPOs = cameraPOs, cameraTs = cameraTs, \\\n prediction_list = prediction_list, rgb_list = rgb_list, vxl_ijk_list = vxl_ijk_list, \\\n rayPooling_votes_list = rayPooling_votes_list, \\\n cube_ijk_np = cube_ijk_np, param_np = param_np, viewPair_np = viewPair_np)\n except:\n print('Warning: this file not exist / valid')\n return prediction_list, rgb_list, vxl_ijk_list, rayPooling_votes_list, \\\n cube_ijk_np, param_np, viewPair_np", "title": "" }, { "docid": "fb74323f07fb55783a0242f67d1e72cf", "score": "0.47786596", "text": "def read_vol_surface(self):\r\n\r\n vol_surface = pd.read_pickle(\"{0}/{1}\".format(ROOT_DIR, self.SRC_VOL_FILE))\r\n\r\n print(vol_surface)\r\n\r\n # xlwings.view(vol_surface)\r", "title": "" }, { "docid": "4c8fecda8e68e80aa243e050b54a0d1f", "score": "0.47634137", "text": "def return_data_ns_cpmg_2site_3d(self, fname):\n\n r180x = load(fname+\"_r180x\"+\".npy\")\n M0 = load(fname+\"_M0\"+\".npy\")\n r10a = load(fname+\"_r10a\"+\".npy\")\n r10b = load(fname+\"_r10b\"+\".npy\")\n r20a = load(fname+\"_r20a\"+\".npy\")\n r20b = load(fname+\"_r20b\"+\".npy\")\n pA = load(fname+\"_pA\"+\".npy\")\n dw = load(fname+\"_dw\"+\".npy\")\n dw_orig = load(fname+\"_dw_orig\"+\".npy\")\n kex = load(fname+\"_kex\"+\".npy\")\n inv_tcpmg = load(fname+\"_inv_tcpmg\"+\".npy\")\n tcp = load(fname+\"_tcp\"+\".npy\")\n num_points = load(fname+\"_num_points\"+\".npy\")\n power = load(fname+\"_power\"+\".npy\")\n back_calc = load(fname+\"_back_calc\"+\".npy\")\n\n # Once off parameter conversions.\n pB = 1.0 - pA\n k_BA = pA * kex\n k_AB = pB * kex\n\n return r180x, M0, r10a, r10b, r20a, r20b, pA, dw, dw_orig, kex, inv_tcpmg, tcp, num_points, power, back_calc, pB, k_BA, k_AB", "title": "" }, { "docid": "accf8de2e533228aabeb484d48169bdd", "score": "0.4760266", "text": "def find_storm_images_3d(\n top_directory_name, radar_source, radar_field_names,\n radar_heights_m_agl, first_spc_date_string, last_spc_date_string):\n\n radar_utils.check_data_source(radar_source)\n first_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(\n first_spc_date_string)\n last_spc_date_unix_sec = time_conversion.spc_date_string_to_unix_sec(\n last_spc_date_string)\n\n if radar_source == radar_utils.GRIDRAD_SOURCE_ID:\n file_dict = storm_images.find_many_files_gridrad(\n top_directory_name=top_directory_name,\n radar_field_names=radar_field_names,\n radar_heights_m_agl=radar_heights_m_agl,\n start_time_unix_sec=first_spc_date_unix_sec,\n end_time_unix_sec=last_spc_date_unix_sec,\n one_file_per_time_step=False, raise_error_if_all_missing=True)\n else:\n file_dict = storm_images.find_many_files_myrorss_or_mrms(\n top_directory_name=top_directory_name, radar_source=radar_source,\n radar_field_names=[radar_utils.REFL_NAME],\n reflectivity_heights_m_agl=radar_heights_m_agl,\n start_time_unix_sec=first_spc_date_unix_sec,\n end_time_unix_sec=last_spc_date_unix_sec,\n one_file_per_time_step=False,\n raise_error_if_all_missing=True, raise_error_if_any_missing=False)\n\n radar_file_name_matrix = file_dict[storm_images.IMAGE_FILE_NAMES_KEY]\n num_file_times = radar_file_name_matrix.shape[0]\n\n if radar_source != radar_utils.GRIDRAD_SOURCE_ID:\n radar_file_name_matrix = numpy.reshape(\n radar_file_name_matrix,\n (num_file_times, 1, len(radar_heights_m_agl))\n )\n\n time_missing_indices = numpy.unique(\n numpy.where(radar_file_name_matrix == '')[0]\n )\n return numpy.delete(\n radar_file_name_matrix, time_missing_indices, axis=0)", "title": "" }, { "docid": "2cdbadd449d8cbd1b9a6da11fb8cf269", "score": "0.47542217", "text": "def load_patient_files(self):\n\n def load_folder(folder):\n\n return [os.path.join(folder, filename) for filename in os.listdir(os.path.join(folder))\n if filename.endswith('.dcm')]\n\n # Sets testing filepaths\n self.test_spc_files, self.test_iv_files = [], []\n for patient in self.test_patient:\n spc_folder = patient[self.contrasts[0].lower()]\n iv_folder = patient[self.contrasts[1].lower()] \n\n self.test_spc_files.extend(load_folder(spc_folder))\n self.test_iv_files.extend(load_folder(iv_folder))\n\n self.test_spc_files.sort()\n self.test_iv_files.sort()\n self.test_spc_files, self.val_iv_files = np.array(self.test_spc_files), np.array(self.test_iv_files)\n\n # Sets validation filepaths\n self.val_spc_files, self.val_iv_files = [], []\n for patient in self.val_patient:\n spc_folder = patient[self.contrasts[0].lower()]\n iv_folder = patient[self.contrasts[1].lower()] \n\n self.val_spc_files.extend(load_folder(spc_folder))\n self.val_iv_files.extend(load_folder(iv_folder))\n\n self.val_spc_files.sort()\n self.val_iv_files.sort()\n self.val_spc_files, self.val_iv_files = np.array(self.val_spc_files), np.array(self.val_iv_files)\n\n # Sets training filepaths\n self.tr_spc_files, self.tr_iv_files = [], []\n for patient in self.patient_list:\n spc_folder = patient[self.contrasts[0].lower()]\n iv_folder = patient[self.contrasts[1].lower()] \n\n self.tr_spc_files.extend(load_folder(spc_folder))\n self.tr_iv_files.extend(load_folder(iv_folder))\n\n self.tr_spc_files.sort()\n self.tr_iv_files.sort()\n self.tr_spc_files, self.tr_iv_files = np.array(self.tr_spc_files), np.array(self.tr_iv_files)", "title": "" }, { "docid": "0d53facf2f3c45299adb88744fc08750", "score": "0.4750663", "text": "def get_volume(self, token, channel,\n x_start, x_stop,\n y_start, y_stop,\n z_start, z_stop,\n resolution=1,\n block_size=DEFAULT_BLOCK_SIZE,\n neariso=False):\n return self.data.get_volume(token, channel,\n x_start, x_stop,\n y_start, y_stop,\n z_start, z_stop,\n resolution, block_size, neariso)", "title": "" }, { "docid": "53d0235bd786f745877864cd6de4aa53", "score": "0.4733814", "text": "def load_embeddings(file_list, emb_dir):\r\n embeddings = []\r\n for idx, filename in enumerate(file_list):\r\n emb_path = os.path.join(emb_dir, os.path.splitext(filename)[0] + '.npy')\r\n embeddings.append(np.load(emb_path))\r\n\r\n return embeddings", "title": "" }, { "docid": "75bdc748c708f06bb799d45dc1e6b2d4", "score": "0.47265512", "text": "def open_tiffs(fps: List[str]):\n obj = []\n arrays = []\n for file_path in fps:\n print(f\"opening {file_path}\")\n try:\n img = rs.open(file_path)\n array = img.read()\n obj.append(img)\n #print(img.crs.to_string())\n arrays.append(array) \n except:\n print(\"cannot open file at \" + file_path)\n return arrays, obj", "title": "" }, { "docid": "5eb51fb9640f6260111041407fc29ecc", "score": "0.472566", "text": "def read_dicom_series(directory, filepattern = \"image_*\"):\n \n if not os.path.exists(directory) or not os.path.isdir(directory):\n raise ValueError(\"Given directory does not exist or is a file : \"+str(directory))\n print '\\t\\t Read Dicom',directory\n lstFilesDCM = natsort.natsorted(glob.glob(os.path.join(directory, filepattern)))\n print '\\t\\t Length dicom series',len(lstFilesDCM)\n # Get ref file\n RefDs = dicom.read_file(lstFilesDCM[0])\n # Load dimensions based on the number of rows, columns, and slices (along the Z axis)\n ConstPixelDims = (int(RefDs.Rows), int(RefDs.Columns), len(lstFilesDCM))\n # The array is sized based on 'ConstPixelDims'\n ArrayDicom = np.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)\n\n # loop through all the DICOM files\n for filenameDCM in lstFilesDCM:\n # read the file\n ds = dicom.read_file(filenameDCM)\n # store the raw image data\n ArrayDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array\n\n return ArrayDicom", "title": "" }, { "docid": "c66a2ab5f6949a21816b2d2b9c69caeb", "score": "0.47246492", "text": "def test_select_files_varying_format():\n filename = \"psl_6hrPlev_EC-Earth3_dcppA-hindcast_s1960-r1i1p1f1_gr_\"\n\n files = [\n filename + \"196011010900-196110312100.nc\",\n filename + \"196111010900-196210312100.nc\",\n filename + \"196211010300-196310312100.nc\",\n ]\n\n result_yearly = _select_files(files, '1960/1962')\n result_monthly = _select_files(files, '196011/196210')\n result_daily = _select_files(files, '19601101/19601105')\n\n assert result_yearly == files\n assert result_monthly == files[0:2]\n assert result_daily == [files[0]]", "title": "" }, { "docid": "00dba984fffa1f4c97c1728c79d54ea5", "score": "0.47195655", "text": "def cam3(self):\n impath = os.path.join(self.data_path, 'image_03',\n 'data', '*.{}'.format(self.imtype))\n imfiles = sorted(glob.glob(impath))\n # Subselect the chosen range of frames, if any\n if self.frames is not None:\n imfiles = [imfiles[i] for i in self.frames]\n\n # Return a generator yielding the images\n return utils.get_images(imfiles, self.imformat)", "title": "" }, { "docid": "7c617be5e4b42b94994618431f4e6a2a", "score": "0.47188747", "text": "def test_vcf_load_variant_from_multiple_files(get_created_vcf_tabix_files):\n vcf_file_path, tabix_file_path = get_created_vcf_tabix_files(mock_file_input())\n vcf_loader = VCFReader(vcf=vcf_file_path, bams=[], is_fp=False)\n vcf_loader_2x = VCFReader(vcf=vcf_file_path, bams=[], is_fp=False)\n assert (len(vcf_loader) == len(vcf_loader_2x))", "title": "" }, { "docid": "1cba560b8f5ef87fdde51ce11446c669", "score": "0.47163305", "text": "def read_mols(filepath: str):\n assert os.path.exists(filepath), \"Check SDFile name\"\n mols = [mol for mol in Chem.SDMolSupplier(filepath)]\n return mols", "title": "" }, { "docid": "b20c2df16a422c89e3f17ec221096f1e", "score": "0.47146112", "text": "def rasterizeMultiFieldVectorLayer(vector_filepath, output_ascii_dir, dem_transform_data, exlude_list=['id', 'ID'], GDT_DataType={'d1':gdal.GDT_Float32, 'd2':gdal.GDT_Float32, 'd3':gdal.GDT_Float32}, keeptif=True, NoData_value=-9999):\n\n #1) Open layer and instanciate layer\n vector_data = ogr.Open(vector_filepath)\n vector_layer = vector_data.GetLayer()\n layer_definition = vector_layer.GetLayerDefn()\n\n #2) Create schema of filenames and optionally of subfolders\n schema = []\n\n #3) loop through input vector layer and create output schema with raster name and output subfolder\n for n in range(layer_definition.GetFieldCount()):\n field_definition = layer_definition.GetFieldDefn(n)\n if field_definition.name in ['net_number', 'net_energy', 'net_height']:\n schema.append({\"filename\": field_definition.name, \"folder\": None, \"attribute_field\": field_definition.name})\n\n # should catch e.g. net_number_net1, net_energy_net1 etc.\n if field_definition.name.count('_') == 2:\n try:\n schema.append({\"filename\": field_definition.name.rsplit(\"_\",1)[0], \"folder\": field_definition.name.rsplit(\"_\",1)[1], \"attribute_field\": field_definition.name})\n except IndexError as indexE:\n print(\"IndexError in getting the field attribute schema with following error:\")\n print(indexE)\n print(\"We will not use any subfolders...\")\n schema.append({\"filename\": field_definition.name, \"folder\": None, \"attribute_field\": field_definition.name})\n\n else:\n try:\n schema.append({\"filename\": field_definition.name.split('_')[0], \"folder\": field_definition.name.split('_')[1], \"attribute_field\": field_definition.name})\n\n # Exception for handling a simple configuration layer with fields containing only d1, d2, d3 -> not subfolder will be created\n except IndexError as indexE:\n print(\"IndexError in getting the field attribute schema with following error:\")\n print(indexE)\n print(\"We will not use any subfolders...\")\n schema.append({\"filename\": field_definition.name.split('_')[0], \"folder\": None, \"attribute_field\": field_definition.name})\n\n #4) Loop through folders and save each attribute field as Raster layer\n created_rasters = []\n for raster in schema:\n \n # exception for rocdensity -> rockdensity\n if raster['attribute_field'] == 'rocdensity':\n raster['filename'] = 'rockdensity'\n\n if raster['attribute_field'] not in exlude_list:\n subfolder = ''\n\n # 1) create a subfolder if it does not yet exist and if folder is not None\n if raster['folder'] is not None:\n subfolder = os.path.join(output_ascii_dir, raster['folder'])\n\n if os.path.isdir(subfolder):\n print(subfolder+\" already exists...\")\n\n else:\n print(\"Created new subfolder:\\n\"+subfolder)\n os.makedirs(subfolder)\n\n # 2) rasterizeLayer the filename into the specific subfolder or into the main folder if folder == None\n try:\n if subfolder != '':\n ascii_path = createRasterizedAscii(vector_data, os.path.join(output_ascii_dir, subfolder), raster['filename'], raster['attribute_field'], dem_transform_data['geo_transform'], dem_transform_data['x_res'], dem_transform_data['y_res'], GDT_DataType[raster['filename']], keeptif, NoData_value)\n created_rasters.append(ascii_path)\n else:\n ascii_path = createRasterizedAscii(vector_data, output_ascii_dir, raster['filename'], raster['attribute_field'], dem_transform_data['geo_transform'], dem_transform_data['x_res'], dem_transform_data['y_res'], GDT_DataType[raster['filename']], keeptif, NoData_value)\n created_rasters.append(ascii_path)\n pass\n except KeyError as ke:\n print(\"The Plugin detected an unrecognized vector field. This will be skipped\")\n print(ke)\n pass\n\n\n return created_rasters", "title": "" }, { "docid": "30d0bab85bfa051cbcd3da3f7cc712bc", "score": "0.47050458", "text": "def get_vcfs(vcf_dir, compressed):\n\n if compressed: suffix = \".gz\"\n else: suffix = \"\"\n\n vcf_data = {}\n found = False\n for vcf_path in glob.glob(os.path.join(vcf_dir, \"*_*.vcf\" + suffix)):\n bas = os.path.basename(vcf_path)\n sample, _ = bas.split(\"_\", maxsplit=1)\n reader = vcf.Reader(filename=vcf_path, compressed=compressed)\n vcf_data[sample] = list(reader)\n found = True\n if not found: return None\n else: return vcf_data", "title": "" }, { "docid": "af7428ccb73b0a3d33f916768295d448", "score": "0.47049767", "text": "def loadFromXYZs(filenames, folder=None):\n result = []\n for filename in filenames:\n if folder is not None:\n filename = os.path.join(folder, filename)\n result.append(Design.fromXYZ(filename))\n return result", "title": "" }, { "docid": "82ed1ec7c88787d3d0b0115fbdb2313c", "score": "0.47028622", "text": "def load_3D( path, par, cam_id=None, subjects='all', actions='all' ):\n \n path = os.path.join(path, '*.pkl')\n fnames = glob.glob( path )\n \n data, cam_par, good_keypts = {}, {}, {}\n for subject in subjects:\n for action in actions:\n \n fname = fnames.copy()\n \n #select files \n if subject!='all':\n fname = [file for file in fname if str(subject) in file] \n \n if action!='all':\n fname = [file for file in fname if action in file]\n \n assert len(fname)!=0, 'No files found. Check path!'\n \n for fname_ in fname:\n \n #load\n poses = pickle.load(open(fname_, \"rb\"))\n poses3d = poses['points3d']\n \n #only take data in a specified interval\n if 'interval' in par.keys():\n frames = np.arange(par['interval'][0], par['interval'][1])\n poses3d = poses3d[frames, :,:] #only load the stimulation interval\n \n #remove specified dimensions\n if 'dims_to_exclude' in par.keys():\n dimensions = [i for i in range(par['ndims']) if i not in par['dims_to_exclude']] \n poses3d = poses3d[:, dimensions,:]\n \n #reshape data\n poses3d = np.reshape(poses3d, \n (poses3d.shape[0], poses3d.shape[1]*poses3d.shape[2]))\n \n #collect data\n seqname = os.path.basename( fname_ ) \n data[ (subject, action, seqname[:-4]) ] = poses3d #[:-4] is to get rid of .pkl extension\n \n if 'good_keypts' in poses.keys():\n good_keypts[ (subject, action, seqname[:-4]) ] = poses['good_keypts']\n \n if cam_id is not None:\n cam_par[(subject, action, seqname[:-4])] = [poses[cam_id] for i in range(poses3d.shape[0])]\n \n #sort\n data = dict(sorted(data.items()))\n good_keypts = dict(sorted(good_keypts.items()))\n cam_par = dict(sorted(cam_par.items()))\n\n return data, good_keypts, cam_par", "title": "" }, { "docid": "2f9992ad8bf0faa1d1318e2f79c4da79", "score": "0.470055", "text": "def get_all_objs(root_dset, obj_category, item, obj_file_list=None, offsets=None, is_debug=False, verbose=False):\n norm_factors = []\n pts_list = []\n name_list = []\n target_dir = root_dset + '/objects/' + obj_category + '/' + item\n\n offset = 0\n if obj_file_list is None:\n for k, obj_file in enumerate(glob.glob( target_dir + '/part_objs/*.obj')):\n if offsets is not None:\n offset = offsets[k:k+1, :]\n if is_debug:\n print('obj_file is: ', obj_file)\n try:\n tm = trimesh.load(obj_file)\n vertices_obj = np.array(tm.vertices)\n except:\n dict_mesh, _, _, _ = load_model_split(obj_file)\n vertices_obj = np.concatenate(dict_mesh['v'], axis=0)\n pts_list.append(vertices_obj + offset)\n name_obj = obj_file.split('.')[0].split('/')[-1]\n name_list.append(name_obj)\n else:\n for k, obj_files in enumerate(obj_file_list):\n if offsets is not None:\n offset = offsets[k:k+1, :]\n if obj_files is not None and not isinstance(obj_files, list):\n try:\n tm = trimesh.load(obj_files)\n vertices_obj = np.array(tm.vertices)\n except:\n dict_mesh, _, _, _ = load_model_split(obj_files)\n vertices_obj = np.concatenate(dict_mesh['v'], axis=0)\n pts_list.append(vertices_obj + offset)\n name_obj = obj_files.split('.')[0].split('/')[-1]\n name_list.append(name_obj) # which should follow the right order\n elif isinstance(obj_files, list):\n if verbose:\n print('{} part has {} obj files'.format(k, len(obj_files)))\n part_pts = []\n name_objs = []\n for obj_file in obj_files:\n if obj_file is not None and not isinstance(obj_file, list):\n try:\n tm = trimesh.load(obj_file)\n vertices_obj = np.array(tm.vertices)\n except:\n dict_mesh, _, _, _ = load_model_split(obj_file)\n vertices_obj = np.concatenate(dict_mesh['v'], axis=0)\n name_obj = obj_file.split('.')[0].split('/')[-1]\n name_objs.append(name_obj)\n part_pts.append(vertices_obj)\n part_pts_whole = np.concatenate(part_pts, axis=0)\n pts_list.append(part_pts_whole + offset)\n name_list.append(name_objs) # which should follow the right\n\n if is_debug:\n print('name_list is: ', name_list)\n\n parts_a = []\n parts_a = pts_list\n parts_b = [None] * len(obj_file_list)\n # dof_rootd_Aa001_r.obj dof_rootd_Aa002_r.obj none_motion.obj\n # bike: part2: 'dof_Aa001_Ca001_r', 'dof_rootd_Aa001_r'\n if obj_category=='bike':\n part0 = []\n part1 = []\n part2 = []\n part0 = pts_list\n for i, name_obj in enumerate(name_list):\n if name_obj in ['dof_Aa001_Ca001_r', 'dof_rootd_Aa001_r']:\n print('part 2 adding ', name_obj)\n part2.append(pts_list[i])\n else:\n print('part 1 adding ', name_obj)\n part1.append(pts_list[i])\n parts = [part0, part1, part2]\n\n elif obj_category=='eyeglasses':\n for i, name_obj in enumerate(name_list):\n if name_obj in ['none_motion']:\n parts_b[0] = []\n parts_b[0].append(pts_list[i])\n if name_obj in ['dof_rootd_Aa001_r']:\n parts_b[1] = []\n parts_b[1].append(pts_list[i])\n elif name_obj in ['dof_rootd_Aa002_r']:\n parts_b[2] = []\n parts_b[2].append(pts_list[i])\n\n parts = [parts_a] + parts_b\n\n else:\n parts_a = []\n parts_a = pts_list\n parts_b = [None] * len(name_list)\n for i, name_obj in enumerate(name_list):\n parts_b[i] = []\n parts_b[i].append(pts_list[i])\n\n parts = [parts_a] + parts_b\n\n corner_pts = [None] * len(parts)\n\n for j in range(len(parts)):\n if is_debug:\n print('Now checking ', j)\n part_gts = np.concatenate(parts[j], axis=0)\n print('part_gts: ', part_gts.shape)\n tight_w = max(part_gts[:, 0]) - min(part_gts[:, 0])\n tight_l = max(part_gts[:, 1]) - min(part_gts[:, 1])\n tight_h = max(part_gts[:, 2]) - min(part_gts[:, 2])\n corner_pts[j] = np.amin(part_gts, axis=1)\n norm_factor = np.sqrt(1) / np.sqrt(tight_w**2 + tight_l**2 + tight_h**2)\n norm_factors.append(norm_factor)\n corner_pt_left = np.amin(part_gts, axis=0, keepdims=True)\n corner_pt_right= np.amax(part_gts, axis=0, keepdims=True)\n corner_pts[j] = [corner_pt_left, corner_pt_right] # [index][left/right][x, y, z], numpy array\n if is_debug:\n print('Group {} has {} points with shape {}'.format(j, len(corner_pts[j]), corner_pts[j][0].shape))\n if verbose:\n plot3d_pts([[part_gts[::2]]], ['model pts'], s=15, title_name=['GT model pts {}'.format(j)], sub_name=str(j))\n # for k in range(len(parts[j])):\n # plot3d_pts([[parts[j][k][::2]]], ['model pts of part {}'.format(k)], s=15, title_name=['GT model pts'], sub_name=str(k))\n\n return parts[1:], norm_factors, corner_pts", "title": "" }, { "docid": "01b72ad09d790fa78b82811b6193f736", "score": "0.46995047", "text": "def get_img_nvols(in_files):\n\n out = None\n from nibabel import load\n img = load(in_files)\n hdr = img.header\n nvols = None\n if len(hdr.get_data_shape()) > 3:\n nvols = int(hdr.get_data_shape()[3])\n else:\n nvols = 1\n out = nvols\n\n return out", "title": "" }, { "docid": "bf1cf8fd69ef4e8a232168a02d38a4cd", "score": "0.46992147", "text": "def read_xdatcar(self,path):\n try:\n with open(path,'r') as xdatcar:\n self._system=xdatcar.readline()\n self._scale_supercell=float(xdatcar.readline().rstrip('\\n').rstrip());\n self._a_supercell_vector=np.array([float(i)*self._scale_supercell for i in xdatcar.readline().rstrip('\\n').split()])\n self._b_supercell_vector=np.array([float(i)*self._scale_supercell for i in xdatcar.readline().rstrip('\\n').split()])\n self._c_supercell_vector=np.array([float(i)*self._scale_supercell for i in xdatcar.readline().rstrip('\\n').split()])\n self._latticevector_matrix_supercell=np.round(np.stack((self._a_supercell_vector,self._b_supercell_vector,self._c_supercell_vector)),6)\n self._element_names = [name for name in xdatcar.readline().rstrip('\\n').split()]\n self._element_numbers = np.array([int(number) for number in xdatcar.readline().rstrip('\\n').split()])\n self._total_number = np.sum(self._element_numbers)\n self._xdatcar=[]\n self._count = 0\n while True:\n line=xdatcar.readline().rstrip('\\n').split();\n if not line:\n break\n if (self._isfloat(*[items for items in line])):\n self._xdatcar.append(line)\n self._count +=1\n #self._xdatcar_fract = np.asarray(self._xdatcar,dtype = float)\n self._steps = int(self._count/self._total_number)\n except FileNotFoundError as e:\n print('XDARCAR file does not exist:{}'.format(e))\n raise e\n \"\"\" reshape the data from XDATCAR to 3D matrix steps * atoms * xyz(direction)\"\"\" \n self._xdatcar_fract = np.zeros((self._steps,self._total_number*3));\n for t in range(self._steps):\n self._xdatcar_fract[t,:] = np.asarray(self._xdatcar,dtype = float)[t*self._total_number:(t+1)*self._total_number,:].flatten();\n \n return self._xdatcar_fract;", "title": "" }, { "docid": "767b8dea39f28ce646534bd8bd4c2ccb", "score": "0.46972263", "text": "def get_files(wkdir='Data'):\n flist = glob.glob(wkdir + \"/[0-9]*.sdf\")\n flist = sorted(flist)\n\n return flist", "title": "" }, { "docid": "798b1c2fa02beb1feef900b5156186a5", "score": "0.46969932", "text": "def read_dicomfolder(foldername):\n dicomfolder = DicomFolder()\n for root, dirs, files in os.walk(foldername):\n for file in files:\n if not file.endswith(\".dcm\"):\n continue\n\n dcmname = os.path.join(root, file)\n # print(dcmname)\n try:\n ds = dcmread(dcmname, force=True)\n # instance attr\n SOPInstanceUID = ds.SOPInstanceUID\n InstanceNumber = (\n int(ds.InstanceNumber) if hasattr(ds, \"InstanceNumber\") else None\n )\n ImagePosition = (\n ds.ImagePosition if hasattr(ds, \"ImagePosition\") else None\n )\n ImageOrientation = (\n ds.ImageOrientation if hasattr(ds, \"ImageOrientation\") else None\n )\n\n # series attr\n SeriesInstanceUID = ds.SeriesInstanceUID\n SeriesNumber = (\n int(ds.SeriesNumber) if hasattr(ds, \"SeriesNumber\") else None\n )\n Modality = ds.Modality if hasattr(ds, \"Modality\") else None\n SeriesDescription = (\n ds.SeriesDescription if hasattr(ds, \"SeriesDescription\") else None\n )\n\n # study attr\n StudyInstanceUID = ds.StudyInstanceUID\n StudyID = ds.StudyID if hasattr(ds, \"StudyID\") else None\n StudyDate = ds.StudyDate if hasattr(ds, \"StudyDate\") else None\n StudyDescription = (\n ds.StudyDescription if hasattr(ds, \"StudyDescription\") else None\n )\n\n # patient attr\n PatientID = ds.PatientID if hasattr(ds, \"PatientID\") else \"Anonymous\"\n PatientName = ds.PatientName if hasattr(ds, \"PatientName\") else None\n\n # Insert into dicomfolder\n instance = Instance(\n dcmname,\n SOPInstanceUID,\n InstanceNumber,\n ImagePosition,\n ImageOrientation,\n )\n patient = dicomfolder.get_patient_record(PatientID)\n if patient is None:\n # print(patient)\n series = Series(\n SeriesInstanceUID, SeriesNumber, Modality, SeriesDescription\n )\n study = Study(\n StudyInstanceUID, StudyID, StudyDate, StudyDescription\n )\n patient = Patient(PatientID, PatientName)\n dicomfolder.add_patient_record(patient)\n patient.add_child(study)\n study.add_child(series)\n series.add_child(instance)\n continue\n\n study = patient.get_child(StudyInstanceUID)\n if study is None:\n series = Series(\n SeriesInstanceUID, SeriesNumber, Modality, SeriesDescription\n )\n study = Study(\n StudyInstanceUID, StudyID, StudyDate, StudyDescription\n )\n patient.add_child(study)\n study.add_child(series)\n series.add_child(instance)\n continue\n\n series = study.get_child(SeriesInstanceUID)\n if series is None:\n series = Series(\n SeriesInstanceUID, SeriesNumber, Modality, SeriesDescription\n )\n study.add_child(series)\n series.add_child(instance)\n continue\n\n series.add_child(instance)\n\n except pydicom.errors.InvalidDicomError as e:\n print(e)\n print(f\"{dcmname} is not a valid DICOM file\")\n\n # print(filepath, patient_id, patient_name, study_instance_uid, study_id, series_instance_uid, sop_instance_uid, instance_number,\n # image_position, image_orientation)\n return dicomfolder", "title": "" }, { "docid": "ffd359dfcaeb3041f37dbdc0bfc82659", "score": "0.46903324", "text": "def readUBC_DC2Dobs(fileName):\n\n from SimPEG import np\n\n # Load file\n obsfile = np.genfromtxt(fileName,delimiter=' \\n',dtype=np.str,comments='!')\n\n # Check first line and figure out if 2D or 3D file format\n line = np.array(obsfile[0].split(),dtype=float)\n\n tx_A = []\n tx_B = []\n rx_M = []\n rx_N = []\n d = []\n wd = []\n\n for ii in range(obsfile.shape[0]):\n\n # If len==3, then simple format where tx-rx is listed on each line\n if len(line) == 4:\n\n temp = np.fromstring(obsfile[ii], dtype=float,sep=' ')\n tx_A = np.hstack((tx_A,temp[0]))\n tx_B = np.hstack((tx_B,temp[1]))\n rx_M = np.hstack((rx_M,temp[2]))\n rx_N = np.hstack((rx_N,temp[3]))\n\n\n rx = np.transpose(np.array((rx_M,rx_N)))\n tx = np.transpose(np.array((tx_A,tx_B)))\n\n return tx, rx, d, wd", "title": "" }, { "docid": "0f5586c6255b235a9aee214b89f62321", "score": "0.4689237", "text": "def list_volumes(self):\n if self.__volumes == None:\n result = []\n regexpattern = \"^%s\" % self.name\n patternobj = re.compile(regexpattern)\n for volname in self.__datasets.list_volumes():\n patternmatchobj = re.match(patternobj, volname)\n if patternmatchobj != None:\n result.append(volname)\n result.sort()\n self.__volumes = result\n return self.__volumes", "title": "" }, { "docid": "bb306117ff5bb93b71b01ffd00e17779", "score": "0.46844262", "text": "def list_volumes(self):\n volumes = []\n Message.new(Debug=\"coprhd list_volumes invoked\").write(_logger)\n volumes_dict = self.coprhdcli.list_volume()\n if volumes_dict is None:\n return volumes\n for volume_name,volume_attr in volumes_dict.iteritems():\n attached_to = None\n if volume_attr['attached_to'] is None:\n Message.new(Debug=\"coprhd list_volumes attached None\").write(_logger)\n else:\n attached_to = volume_attr['attached_to']\n size = Decimal(volume_attr['size'])\n size = 1073741824 * int(size)\n volume = BlockDeviceVolume(\n size=size, attached_to=attached_to,\n dataset_id=UUID(volume_name), blockdevice_id=u\"block-{0}\".format(volume_name)\n )\n volumes.append(volume)\n Message.new(Debug=\"coprhd list_volumes returning\").write(_logger)\n return volumes", "title": "" }, { "docid": "56b5853dca60a237b767149423b36f9f", "score": "0.46736243", "text": "def files(\n self,\n ) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "title": "" }, { "docid": "e31a235c370bcf4b1c7c0bb69eb6dbaf", "score": "0.4672557", "text": "def get_products(root_dir, date=''):\n nc = glob.glob(f'{root_dir}/*{date}*/*L2W.nc')\n if len(nc) == 0:\n print(f\"no product found for date {date}\")\n return None\n return nc", "title": "" }, { "docid": "c7f970923dfec935832c75c577ca7dd1", "score": "0.46689236", "text": "def read_d3d_time(netcdf_vars):\n utime = netcdftime.utime(netcdf_vars['time'].units)\n dates = utime.num2date(netcdf_vars['time'][:])\n return dates", "title": "" }, { "docid": "3111169eef90fa1ac4a119bc11f2e6ab", "score": "0.4667888", "text": "def _read_s3(\n self, s3_path: str, seg_id: int, mip: int, rounding: Optional[bool] = True\n ):\n # TODO check header length\n\n # check input\n cv = CloudVolume(\n s3_path, mip=mip, fill_missing=self.fill_missing, use_https=self.use_https\n )\n skeleton = cv.skeleton.get(seg_id)\n swc_string = skeleton.to_swc()\n string_io = StringIO(swc_string)\n splitted_string = swc_string.split(\"\\n\")\n in_h = True\n h_len = -1\n while in_h:\n h_len += 1\n line = splitted_string[h_len]\n if len(line) == 0 or line[0] != \"#\":\n in_h = False\n df = pd.read_table(\n string_io,\n names=[\"sample\", \"structure\", \"x\", \"y\", \"z\", \"r\", \"parent\"],\n skiprows=h_len,\n sep=\" \"\n # delim_whitespace=True,\n )\n\n # round swc files when reading\n if rounding == True:\n res = cv.scales[mip][\"resolution\"]\n df[\"x\"] = np.round(df[\"x\"] / res[0])\n df[\"y\"] = np.round(df[\"y\"] / res[1])\n df[\"z\"] = np.round(df[\"z\"] / res[2])\n\n return df", "title": "" }, { "docid": "d2b8fd15f32ece97a6c7cfc27e1a22cd", "score": "0.46656358", "text": "def get_GEOSChem_files_as_ds(file_str='GEOSChem.SpeciesConc.*.nc4', wd=None,\n collection=None,\n parallel=True, data_vars=\"minimal\",\n coords=\"minimal\", compat=\"override\",\n combine='by_coords',\n debug=False):\n import glob\n # Check input\n assert type(wd) == str, 'Working directory (wd) provided must be a string!'\n # Get files\n if isinstance(collection, str):\n glob_pattern = '{}/*.{}.*'.format(wd, collection)\n\n else:\n glob_pattern = '{}/{}'.format(wd, file_str)\n files = glob.glob(glob_pattern)\n assert len(files) >= 1, 'No files found matching-{}'.format(wd+file_str)\n # Sort the files based on their name (which contains a regular datastring)\n files = list(sorted(files))\n # open all of these files as single Dataset\n # NOTE: Updated to use faster opening settings for files sharing the same coords\n # https://github.com/pydata/xarray/issues/1823\n ds = xr.open_mfdataset(files,\n# concat_dim='time',\n combine=combine,\n data_vars=data_vars, coords=coords,\n compat=compat, parallel=parallel)\n return ds", "title": "" }, { "docid": "e6ca0162784e97c389218b278074a63a", "score": "0.46589947", "text": "def get_steam_lib_dirs(steam_dir):\n def parse_library_folders(data):\n \"\"\"\n Parse the Steam library folders in the VDF file using the given data\n \"\"\"\n # VDF key & value pairs have the following syntax:\n # \\t\"<KEY>\"\\t\\t\"<VALUE>\"\n pattern = re.compile(r'\\t\"([^\"]*)\"\\t\\t\"([^\"]*)\"')\n\n lines = data.split(\"\\n\")\n\n # Skip the header and the last line\n lines = lines[2:]\n lines = lines[:-2]\n\n library_folders = []\n\n for line in lines: # Skip the header and the last line\n match = pattern.search(line)\n key, value = match.group(1), match.group(2)\n\n # Keys corresponding to library folders are integers. Other keys\n # we can skip.\n try:\n key = int(key)\n except ValueError:\n continue\n\n library_folders.append(value)\n\n print(\n \"[INFO] Found {} Steam library folders\".format(\n len(library_folders)\n )\n )\n return library_folders\n\n # Try finding Steam library folders using libraryfolders.vdf in Steam root\n if os.path.isdir(os.path.join(steam_dir, \"steamapps\")):\n folders_vdf_path = os.path.join(\n steam_dir, \"steamapps\", \"libraryfolders.vdf\")\n elif os.path.isdir(os.path.join(steam_dir, \"SteamApps\")):\n folders_vdf_path = os.path.join(\n steam_dir, \"SteamApps\", \"libraryfolders.vdf\")\n try:\n with open(folders_vdf_path, \"r\") as f:\n library_folders = parse_library_folders(f.read())\n except OSError:\n # libraryfolders.vdf doesn't exist; maybe no Steam library folders\n # are set?\n library_folders = []\n\n return [steam_dir] + library_folders", "title": "" }, { "docid": "dfcf709d4b2b2d86f5e3477b74130bf0", "score": "0.4658664", "text": "def ncdirs_to_vec(ncfolder,varname='swan_DIR',\r\n source_convention='directions to',\r\n target_convention='directions to',\r\n **kwargs):\r\n\r\n nc_dir = netCDF4.Dataset(ncfolder+varname+'.63.nc', 'r', format='NETCDF4')\r\n\r\n # Create the file\r\n if 'savepath' in kwargs:\r\n ncfile = kwargs['savepath']\r\n else:\r\n ncfile = ncfolder+varname+'vec.63.nc'\r\n nc_vec = netCDF4.Dataset(ncfile, 'w', format='NETCDF4')\r\n\r\n # Global attributes\r\n nc_vec.Author = getpass.getuser()\r\n nc_vec.Created = time.ctime()\r\n nc_vec.Software = 'Created with Python ' + sys.version\r\n nc_vec.NetCDF_Lib = str(netCDF4.getlibversion())\r\n\r\n # Copy additional global attributes from source\r\n nc_vec.setncatts({a:nc_dir.getncattr(a) for a in nc_dir.ncattrs() if\r\n a not in ['creation_date','modification_date','host',\r\n 'convention','contact']})\r\n\r\n # Create dimensions\r\n nc_vec.createDimension('time',0) # The unlimited dimension\r\n nc_vec.createDimension('node', len(nc_dir.dimensions['node'])) # Number of nodes\r\n\r\n\r\n # Copy variables\r\n for name, var in nc_dir.variables.items():\r\n if name in ['time','x','y']:\r\n # Create select vars\r\n nc_vec.createVariable(name, var.dtype, var.dimensions)\r\n # Copy the variable attributes\r\n nc_vec.variables[name].setncatts({a:var.getncattr(a) for a in var.ncattrs()})\r\n # Copy the variables values\r\n nc_vec.variables[name][:] = nc_dir.variables[name][:]\r\n\r\n # Create the rest of the variables\r\n nc_vec.createVariable(varname+'_u','f8',('time','node'))\r\n nc_vec.variables[varname+'_u'].long_name = 'e/w direction'\r\n nc_vec.variables[varname+'_u'].units = nc_dir[varname].units\r\n nc_vec.variables[varname+'_u'].convention = target_convention\r\n\r\n nc_vec.createVariable(varname+'_v','f8',('time','node'))\r\n nc_vec.variables[varname+'_v'].long_name = 'n/s direction'\r\n nc_vec.variables[varname+'_v'].units = nc_dir[varname].units\r\n nc_vec.variables[varname+'_v'].convention = target_convention\r\n\r\n for aa in range(len(nc_vec['time'])):\r\n if source_convention != target_convention:\r\n dirs = gangles.wrapto360(nc_dir[varname][aa,:].data + 180) * np.pi/180\r\n else:\r\n dirs = nc_dir[varname][aa,:].data * np.pi/180\r\n nc_vec.variables[varname+'_u'][aa,:] = np.sin(dirs)\r\n nc_vec.variables[varname+'_v'][aa,:] = np.cos(dirs)\r\n\r\n # All done here\r\n nc_dir.close()\r\n nc_vec.close()", "title": "" }, { "docid": "85dfd755aab063499267205e8e3db994", "score": "0.46586072", "text": "def surface_distances(filename, **kwds):\n # reads in the tif\n arr = read(filename)\n # send array to surface finder\n shell = sample_surface(arr)\n # get void coms\n pores = Porosity(arr)\n com = pore_com(pores)\n # send surface and void COMS to distance.py\n dists = distance(com, shell)\n # TODO: write a csv writer\n write(args.ofile, vol)", "title": "" }, { "docid": "ec2bffe92aa7e27ffa56ffe4a6a16f60", "score": "0.46469903", "text": "def load_depth(dcFileNameList):\n r=[]; n=[]\n acgt = {'A':0, 'C':1, 'G':2, 'T':3}\n \n loc = []\n refb = {}\n cd = []\n for dcFileName in dcFileNameList:\n with open(dcFileName, 'r') as dcFile:\n header = dcFile.readline().strip()\n dc = dcFile.readlines()\n dc = [x.strip().split(\"\\t\") for x in dc]\n \n loc1 = [x[1]+':'+str(x[2]).strip('\\000') for x in dc if x[4] in acgt.keys()]\n\t loc.append( loc1 )\n \n refb1 = dict(zip(loc1, [x[4] for x in dc if x[4] in acgt.keys()]))\n refb.update(refb1)\n cd.append( dict(zip(loc1, [map(int, x[5:9]) for x in dc if x[4] in acgt.keys()])) )\n \n loc = list(reduce(set.intersection, map(set, loc)))\n\n def stringSplitByNumbers(x):\n r = re.compile('(\\d+)')\n l = r.split(x)\n return [int(y) if y.isdigit() else y for y in l]\n\n loc = sorted(loc,key = stringSplitByNumbers)\n logging.debug(loc)\n refb = [refb[k] for k in loc]\n \n J = len(loc)\n N = len(dcFileNameList)\n for i in xrange(0, N):\n logging.debug(\"Processing %s\" % dcFileNameList[i])\n c = np.array( [cd[i][k] for k in loc] )\n n1 = np.sum(c, 1)\n #r1 = np.zeros(J)\n refIdx=np.zeros(J)\n\n for j in xrange(0,J):\n #r1[j] = n1[j] - c[j, acgt[refb[j]]]\n refIdx[j] = 4*j+acgt[refb[j]]\n c = np.delete(c, refIdx, None)\n c = np.reshape(c, (J, 3) )\n #r.append(r1)\n n.append(n1)\n r.append(c)\n r = np.array(r)\n n = np.array(n)\n\n return (r, n, loc, refb)", "title": "" }, { "docid": "256250337afacccb6ed234dadb74d777", "score": "0.4645479", "text": "def get_data_from_multiple_folders_helper(working_directory, folderlist,\n filename, flip_h=False):\n output = np.array([])\n for foldername in folderlist:\n result = get_data_from_folder_helper(working_directory, foldername,\n filename, flip_h=flip_h)\n output = np.concatenate([output, result.ravel()])\n return output", "title": "" }, { "docid": "f68a188185e949fb5e3e956abd5010d4", "score": "0.46394134", "text": "def __getFilesFromPath(self):\n meta = {}\n return self.fcc.findFilesByMetadata(meta, self.pathToOverlayFiles)", "title": "" }, { "docid": "8d8deac11ed3df15b9d841ecaab6c3a1", "score": "0.46391284", "text": "def list_files(tag=None, sat_id=None, data_path=None, format_str=None):\n\n if format_str is None:\n if tag == '':\n tag = 'L2'\n format_str = ''.join(['SPORT_', tag, '_IVM_{year:04d}-{month:02d}-'\n '{day:02d}_v{version:02d}r{revision:04d}.NC'])\n return pysat.Files.from_os(data_path=data_path, format_str=format_str)", "title": "" }, { "docid": "ee4a6e6af1c3188f4c1181686df257b6", "score": "0.46370262", "text": "def read_csvs(filenames: Iterable[str], **kwargs) -> pd.DataFrame:\n dfs: List[pd.DataFrame] = []\n for filename in filenames:\n dfs.append(pd.read_csv(filename, header=0, **kwargs))\n return pd.concat(dfs, ignore_index=True)", "title": "" }, { "docid": "8296309f7461d4fff16c9e8127034f20", "score": "0.4635949", "text": "def from_multiple_images(cls, path_list: Sequence, dtype=np.uint16, **kwargs):\n obj = cls.from_demo_image()\n # save a combined image to a temporary dir, then load it back in as a PFDicomImage\n with TemporaryDirectory() as tmp:\n filename = osp.join(tmp, 'mydcm.dcm')\n image.load_multiples(path_list, dtype=dtype, **kwargs).save(filename)\n obj.image = PFDicomImage(filename)\n return obj", "title": "" }, { "docid": "c36e1c442efa328bee29a21ac5302e11", "score": "0.4635051", "text": "def find_DCM_files_serial(rootpath=None):\n time_zero = time.time()\n dcmList = []\n dcmDict = {}\n\n # ~~ Walk directory, add all '*.dcm' files to dict by modality\n for root, dirs, files in os.walk(rootpath):\n for file in files:\n if file.endswith('.dcm'):\n fullpath = os.path.join(root, file)\n dcmList.append(fullpath)\n\n if len(dcmList) > 300:\n print('Too many files!')\n return {}\n\n for fullpath in dcmList:\n try:\n modality = dicom.read_file(fullpath, force=True).SOPClassUID\n except AttributeError:\n modality = 'unknown'\n if modality not in dcmDict:\n dcmDict[modality] = []\n\n dcmDict[modality].append(fullpath)\n\n return dcmDict", "title": "" }, { "docid": "c2bd076aad9324aff466c2b0cf49c078", "score": "0.46310812", "text": "def GetLayers(mxds):\n\n lyrlist = []\n if type(mxds) is list:\n for mxdpath in mxds:\n print mxdpath\n mxd = arcpy.mapping.MapDocument(mxdpath)\n i = 0\n for lyr in arcpy.mapping.ListLayers(mxd):\n lyrlist.append([os.path.basename(mxdpath), str(lyr.name), i])\n i += 1\n print 'MXD/tLAYER/tLAYER_INDEX'\n for row in lyrlist:\n print row\n return lyrlist\n elif type(mxds) is str:\n mxd = arcpy.mapping.MapDocument(mxds)\n i = 0\n for lyr in arcpy.mapping.ListLayers(mxd):\n lyrlist.append([os.path.basename(mxds), str(lyr.name), i])\n i += 1\n print 'MXD/tLAYER/tLAYER_INDEX'\n for row in lyrlist:\n print row\n return lyrlist\n else:\n print \"The mxd needs to be formatted as a list, not a string. add brackets around the variable ['mxdpath']\"", "title": "" }, { "docid": "2ab570f1ce369c1ec6ada0fbb8ea79e1", "score": "0.4630829", "text": "def viewersPerVolume(self,volumeNodes=None,background=None,label=None,include3D=False):\n import math\n\n if not volumeNodes:\n volumeNodes = slicer.util.getNodes('*VolumeNode*').values()\n\n if len(volumeNodes) == 0:\n return\n\n #\n # construct the XML for the layout\n # - one row per volume\n # - viewers for each orientation\n #\n orientations = ('Axial', 'Sagittal', 'Coronal')\n actualViewNames = []\n index = 1\n layoutDescription = ''\n layoutDescription += '<layout type=\"vertical\">\\n'\n row = 0\n for volumeNode in volumeNodes:\n layoutDescription += ' <item> <layout type=\"horizontal\">\\n'\n column = 0\n for orientation in orientations:\n viewName = volumeNode.GetName() + '-' + orientation\n rgb = [int(round(v*255)) for v in self.lookupTable.GetTableValue(index)[:-1]]\n color = '#%0.2X%0.2X%0.2X' % tuple(rgb)\n layoutDescription += self.sliceViewItemPattern.format(viewName=viewName,orientation=orientation,color=color)\n actualViewNames.append(viewName)\n index += 1\n column += 1\n if include3D:\n print('TODO: add 3D viewer')\n layoutDescription += '</layout></item>\\n'\n row += 1\n layoutDescription += '</layout>'\n self.assignLayoutDescription(layoutDescription)\n\n # let the widgets all decide how big they should be\n slicer.app.processEvents()\n\n # put one of the volumes into each row and set orientations\n layoutManager = slicer.app.layoutManager()\n sliceNodesByViewName = {}\n for volumeNode in volumeNodes:\n for orientation in orientations:\n viewName = volumeNode.GetName() + '-' + orientation\n sliceWidget = layoutManager.sliceWidget(viewName)\n compositeNode = sliceWidget.mrmlSliceCompositeNode()\n compositeNode.SetBackgroundVolumeID(volumeNode.GetID())\n sliceNode = sliceWidget.mrmlSliceNode()\n sliceNode.SetOrientation(orientation)\n sliceWidget.fitSliceToBackground()\n sliceNodesByViewName[viewName] = sliceNode\n return sliceNodesByViewName", "title": "" }, { "docid": "d3d2ed9aed3d57ef4c2e086f329041b2", "score": "0.46248406", "text": "def recursive_read_dicom(curr_path):\n tmp_lst = []\n for curr_f in os.listdir(curr_path):\n tmp_path = os.path.join(curr_path, curr_f)\n if os.path.isdir(tmp_path):\n tmp_lst = tmp_lst + recursive_read_dicom(tmp_path)\n else:\n # read in dicom\n curr_dicom_f = dicom.read_file(tmp_path)\n\n tmp_lst.append(curr_dicom_f)\n\n return tmp_lst", "title": "" }, { "docid": "d02c303297655ca59d69476dc65cde95", "score": "0.46229216", "text": "def volumes(self) -> Optional[List[str]]:\n return self.__volumes", "title": "" }, { "docid": "e1b723442cbcc7387c3bea44db62dd1a", "score": "0.4615586", "text": "def get_gds_files_for_path_tokens(volume_name: str, path_tokens: list):\n # TODO: check path_tokens array for minimal length\n qs = GDSFile.objects.filter(volume_name=volume_name, path__contains=path_tokens[0])\n for token in path_tokens[1:]:\n qs = qs.filter(path__contains=token)\n\n return qs", "title": "" }, { "docid": "1accdb7c5c6467e7c866485096eb7d78", "score": "0.46153262", "text": "def testReadSelectedVolumesGPT(self):\n test_path = self._GetTestFilePath(['gpt.raw'])\n self._SkipIfPathNotExists(test_path)\n\n test_os_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_OS, location=test_path)\n test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)\n test_gpt_container_path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_GPT, location='/', parent=test_raw_path_spec)\n\n volume_system = gpt_volume_system.GPTVolumeSystem()\n volume_system.Open(test_gpt_container_path_spec)\n\n input_file_object = io.BytesIO(b'all\\n')\n test_input_reader = command_line.FileObjectInputReader(input_file_object)\n\n test_mediator = command_line.CLIVolumeScannerMediator(\n input_reader=test_input_reader)\n\n selected_volumes = test_mediator._ReadSelectedVolumes(\n volume_system, prefix='p')\n self.assertEqual(selected_volumes, ['p1', 'p2'])", "title": "" }, { "docid": "6dbe9f64ea27bd7dd5b55aba855599d0", "score": "0.46141967", "text": "def getMatchingGCPDirectories(args):\n search_pid, bucket = getGoogleCloudSearchPath(args)\n list_file = 'landsat_scenelist_{}.txt'.format(search_pid)\n list_file = list_file.replace(' ','')\n cmdstr = 'gsutil ls -d {}>{}/{}'.format(bucket, args.basedir, list_file)\n print('\\nUsing gsutil to search directories that match:\\n{}'.format(bucket))\n call(cmdstr, shell=True)\n return list_file", "title": "" }, { "docid": "c47ab2fba734b46e13d045c0d610ba67", "score": "0.46121383", "text": "def readUBC_DC2DMesh(fileName):\n\n from SimPEG import np\n # Open file\n fopen = open(fileName,'r')\n\n # Read down the file and unpack dx vector\n def unpackdx(fid,nrows):\n for ii in range(nrows):\n\n line = fid.readline()\n var = np.array(line.split(),dtype=float)\n\n if ii==0:\n x0= var[0]\n xvec = np.ones(int(var[2])) * (var[1] - var[0]) / int(var[2])\n xend = var[1]\n\n else:\n xvec = np.hstack((xvec,np.ones(int(var[1])) * (var[0] - xend) / int(var[1])))\n xend = var[0]\n\n return x0, xvec\n\n #%% Start with dx block\n # First line specifies the number of rows for x-cells\n line = fopen.readline()\n nl = np.array(line.split(),dtype=float)\n\n [x0, dx] = unpackdx(fopen,nl)\n\n\n #%% Move down the file until reaching the z-block\n line = fopen.readline()\n if not line:\n line = fopen.readline()\n\n #%% End with dz block\n # First line specifies the number of rows for z-cells\n line = fopen.readline()\n nl = np.array(line.split(),dtype=float)\n\n [z0, dz] = unpackdx(fopen,nl)\n\n # Flip z0 to be the bottom of the mesh for SimPEG\n z0 = z0 - sum(dz)\n dz = dz[::-1]\n #%% Make the mesh using SimPEG\n\n from SimPEG import Mesh\n tensMsh = Mesh.TensorMesh([dx,dz],(x0, z0))\n return tensMsh", "title": "" }, { "docid": "a2a10051ec99452b24e0626a1992341d", "score": "0.46119982", "text": "def get_rdf_group_volume_list(self, rdf_number):\n response = self.get_resource(\n category=REPLICATION,\n resource_level=SYMMETRIX, resource_level_id=self.array_id,\n resource_type=RDFG, resource_type_id=rdf_number,\n resource=VOLUME)\n return response.get('name', list()) if response else list()", "title": "" }, { "docid": "e0ea5a306ab75aad8a03bd4738033f6f", "score": "0.4608701", "text": "def load_volume(self, db, m):\n \"\"\"\n Arguments:\n db: DatabaseBRATS\n m: MRI modality\n Returns:\n volume as numpy array\n \"\"\"\n volume_path = os.path.join(db.db_path, self.relative_path,\n self.name + '_' + m + '.nii')\n return nib.load(volume_path).get_data().astype('float32')", "title": "" }, { "docid": "ae0569f45fbcc6680099d0c59866363c", "score": "0.4607456", "text": "def make3d(infile, layerfractions, vglvls):\n nz = layerfractions.shape[0]\n outfile = infile.slice(LAY=[0]*nz)\n\n for key, var in outfile.variables.items():\n if key != 'TFLAG':\n var[:] *= layerfractions[None, :, None, None]\n outfile.VGLVLS = vglvls.astype('f')\n outfile.NLAYS = nz\n return outfile", "title": "" }, { "docid": "3aeeeca9ff6cbafbf80de5f0497de4a9", "score": "0.46065032", "text": "def open_mp3(path):\n f = pydub.AudioSegment.from_mp3(path)\n return np.array(f.get_array_of_samples()).reshape(-1)", "title": "" }, { "docid": "33d952c1b894afaf9a2c15a9c24807f0", "score": "0.4606098", "text": "def storage_volume_get_all(context):\n\n return IMPL.storage_volume_get_all(context)", "title": "" }, { "docid": "42b8038f84393666da989782687750e0", "score": "0.46008846", "text": "def loadSubredditVectors(infilename):\n if infilename.endswith(\"json\"):\n return loadSubredditVectorsJson(infilename)\n if infilename.endswith(\"pkl\"):\n return loadSubredditVectorsPkl(infilename)", "title": "" }, { "docid": "5bdbfff425ebf58bea48e05da305b52c", "score": "0.45973298", "text": "def rednet_pipeline(mvs_list):\n print('sample number: ', len(mvs_list))\n\n # create output folder\n output_folder = os.path.join(FLAGS.dense_folder, 'depths_rednet')\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n # testing set\n mvs_generator = iter(MVSGenerator(mvs_list, FLAGS.view_num))\n generator_data_type = (tf.float32, tf.float32, tf.float32, tf.float32, tf.string)\n mvs_set = tf.data.Dataset.from_generator(lambda: mvs_generator, generator_data_type)\n mvs_set = mvs_set.batch(FLAGS.batch_size)\n mvs_set = mvs_set.prefetch(buffer_size=1)\n\n # data from dataset via iterator\n mvs_iterator = mvs_set.make_initializable_iterator()\n croped_images, centered_images, scaled_cams, croped_cams, locations = mvs_iterator.get_next()\n\n # set shapes\n croped_images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))\n centered_images.set_shape(tf.TensorShape([None, FLAGS.view_num, None, None, 3]))\n scaled_cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))\n croped_cams.set_shape(tf.TensorShape([None, FLAGS.view_num, 2, 4, 4]))\n\n depth_start = tf.reshape(\n tf.slice(scaled_cams, [0, 0, 1, 3, 0], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])\n depth_interval = tf.reshape(\n tf.slice(scaled_cams, [0, 0, 1, 3, 1], [FLAGS.batch_size, 1, 1, 1, 1]), [FLAGS.batch_size])\n depth_num = tf.cast(\n tf.reshape(tf.slice(scaled_cams, [0, 0, 1, 3, 2], [1, 1, 1, 1, 1]), []), 'int32')\n\n depth_end = depth_start + (tf.cast(depth_num, tf.float32) - 1) * depth_interval\n\n # depth map inference using RED\n init_depth_map, prob_map = inference_winner_take_all(centered_images, scaled_cams,\n depth_num, depth_start, depth_end)\n\n # init option\n init_op = tf.global_variables_initializer()\n var_init_op = tf.local_variables_initializer()\n\n # GPU grows incrementally\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n with tf.Session(config=config) as sess: \n\n # initialization\n sess.run(var_init_op)\n sess.run(init_op)\n total_step = 0\n\n # load model\n if FLAGS.model_dir is not None:\n pretrained_model_ckpt_path = os.path.join(FLAGS.model_dir, 'model.ckpt')\n restorer = tf.train.Saver(tf.global_variables())\n restorer.restore(sess, '-'.join([pretrained_model_ckpt_path, str(FLAGS.ckpt_step)]))\n print('Pre-trained model restored from %s' %\n ('-'.join([pretrained_model_ckpt_path, str(FLAGS.ckpt_step)])))\n total_step = FLAGS.ckpt_step\n \n # run inference for each reference view\n sess.run(mvs_iterator.initializer)\n for step in range(len(mvs_list)):\n\n start_time = time.time()\n try:\n out_init_depth_map, out_prob_map, out_images, out_cams, out_croped_cams, out_locations = sess.run(\n [init_depth_map, prob_map, croped_images, scaled_cams, croped_cams, locations])\n except tf.errors.OutOfRangeError:\n print(\"all dense finished\") # ==> \"End of dataset\"\n break\n duration = time.time() - start_time\n\n # squeeze output\n out_init_depth_image = np.squeeze(out_init_depth_map)\n out_prob_map = np.squeeze(out_prob_map)\n out_prob_map[np.isnan(out_prob_map)] = 1e-10\n out_ref_image = np.squeeze(out_images)\n out_ref_image = np.squeeze(out_ref_image[0, :, :, :])\n out_ref_cam = np.squeeze(out_croped_cams)\n out_ref_cam = np.squeeze(out_ref_cam[0, :, :, :])\n\n out_location = np.squeeze(out_locations)\n out_location = np.squeeze(out_location[0, :])\n out_index = out_location[0].decode('utf-8')\n\n init_depth_map_path = output_folder + ('/%s_init.pfm' % out_index)\n prob_map_path = output_folder + ('/%s_prob.pfm' % out_index)\n out_ref_image_path = output_folder + ('/%s.jpg' % out_index)\n out_ref_cam_path = output_folder + ('/%s.txt' % out_index)\n\n # save output\n write_pfm(init_depth_map_path, out_init_depth_image)\n write_pfm(prob_map_path, out_prob_map)\n # color\n if not os.path.isdir(output_folder+'\\color'):\n os.mkdir(output_folder+'\\color')\n size1 = len(out_init_depth_image)\n size2 = len(out_init_depth_image[1])\n e = np.ones((size1, size2), dtype=np.float)\n out_init_depth_image = e * 1200 - out_init_depth_image\n plt.imsave(output_folder + ('/color/%s_init.png' % out_index), out_init_depth_image, format='png')\n plt.imsave(output_folder + ('/color/%s_prob.png' % out_index), out_prob_map, format='png')\n\n out_ref_image = cv2.cvtColor(out_ref_image, cv2.COLOR_RGB2BGR)\n #image_file = file_io.FileIO(out_ref_image_path, mode='w')\n imsave(out_ref_image_path, np.uint8(out_ref_image))\n write_cam(out_ref_cam_path, out_ref_cam, out_location)\n total_step = step + 1\n print('depth inference %d finished, image %s finished, (%.3f sec/step)' %(step, out_index, duration))\n\n print('total %d finished, image %s finished' %(total_step, out_index))", "title": "" }, { "docid": "6cf74f63247b971568d4498c84f8af07", "score": "0.45921466", "text": "def read_files(dirs, path):\n imgs = []\n for d in dirs:\n sectionFiles = listdir(join(path + d))\n imgs += [ array(Image.open(\"{}{}/{}\".format(path,d,f))) for f in sectionFiles ]\n return imgs", "title": "" }, { "docid": "9ba4b8816d87eb32b9b35b4782ee93af", "score": "0.4590175", "text": "def get_gds_files_for_regex(volume_name: str, pattern: str):\n qs = GDSFile.objects.filter(volume_name=volume_name, path__regex=pattern)\n\n return qs", "title": "" }, { "docid": "145645ae89abeef4226bde1e4318f77a", "score": "0.45884982", "text": "def getSamples(filepath):\n return sf.read(filepath, dtype='float32')", "title": "" } ]
5647f4aac8d50d08f4c4673a0ac7661a
Get transitions from states.
[ { "docid": "f4f7119e780434b6f5604700929de29e", "score": "0.7659456", "text": "def all_transitions(self):\n transitions = list()\n for src_state in self.states:\n for input_value, dst_state in src_state.items():\n transitions.append((src_state, input_value, dst_state))\n return transitions", "title": "" } ]
[ { "docid": "edb52bb22b35b21c7d670dca2ab5264f", "score": "0.7203781", "text": "def extract(transitions):\n states = torch.cat([t.state for t in transitions])\n actions = torch.cat([t.action for t in transitions])\n rewards = torch.cat([t.reward for t in transitions])\n mask = torch.tensor([t.next_state is not None for t in transitions])\n next_states = torch.cat([t.next_state for t in transitions if t.next_state is not None])\n return states, actions, rewards, next_states, mask", "title": "" }, { "docid": "0fbd1f846f6d5794c4ec62b344a2c936", "score": "0.7174492", "text": "def get_transitions(self, indexes):\n return self.deque.get_transitions(indexes)", "title": "" }, { "docid": "51ba1326d518f2caa40a09b606eace94", "score": "0.71002537", "text": "def transitions(self, s):\n if s in self.goal_states:\n return [[Transition(state=s, prob=1.0, reward=0)] for a in self.ACTIONS]\n\n if s in self.risky_goal_states:\n goal = next(iter(self.goal_states))\n return [[Transition(state=goal, prob=self.risky_p_loss, reward=-50),\n Transition(state=goal, prob=1-self.risky_p_loss, reward=100)] for a in self.ACTIONS]\n\n transitions_full = []\n for a in self.ACTIONS:\n transitions_actions = []\n\n # over all *random* actions\n for a_ in self.ACTIONS:\n s_ = self.target_state(s, a_)\n if s_ in self.cliff_states:\n r = self.FALL_REWARD\n # s_ = self.initial_state\n s_ = next(iter(self.goal_states))\n else:\n r = -1\n p = 1.0 - self.random_action_p if a_ == a else self.random_action_p / 3\n if p != 0:\n transitions_actions.append(Transition(s_, p, r))\n transitions_full.append(transitions_actions)\n\n return transitions_full", "title": "" }, { "docid": "352708d2419de44ff27f5284b8b0f4d9", "score": "0.70946234", "text": "def get_diff_state_transitions(self) -> list:\n transitions = []\n for trans, state in self.transitions.items():\n if state != self:\n transitions.append(trans)\n return transitions", "title": "" }, { "docid": "a7f522dfc15a81ad8cf4c70a92bfacaf", "score": "0.7042979", "text": "def get_transition_map(self):\n return self._state_transitions", "title": "" }, { "docid": "c29556e978aeefa77ede6c9e8c469fb8", "score": "0.69840056", "text": "def get_transitions(self):\n for t in self.transitions:\n ts = []\n for si, st in enumerate(self.store_types):\n if st == BASE:\n ts.append(Transition([t.lhs[si]], [t.rhs[si]]))\n elif st == STREAM:\n assert len(t.rhs[si]) == 0\n ts.append(Transition([t.lhs[si]], []))\n elif st == TAPE:\n b = t.rhs[si].values\n if t.rhs[si].position == -1:\n d = 'L'\n elif t.rhs[si].position == 0:\n d = 'S'\n elif t.rhs[si].position == len(b):\n d = 'R'\n else:\n raise ValueError('No move for length {} and position {}'.format(len(b), t.rhs[si].position))\n ts.append(Transition([t.lhs[si]], [b, d]))\n else:\n assert False\n yield AlignedTransition(ts)", "title": "" }, { "docid": "7e4ad227fc028b0821476a2a8cde8096", "score": "0.6782277", "text": "def transitions(self):\n baseTrans = self._transitions()\n newTrans = {}\n for eventType, nextState in baseTrans.iteritems():\n # Catch the timeout event and replace with our class specific \n # timeout event type\n if eventType == TIMEOUT:\n eventType = self._timeoutEvent\n self._hasTimeout = True\n \n \n if nextState == Next:\n # If the next state is the special Next marker state, swap it \n # out for the real next state\n nextState = self._nextState\n elif nextState == Failure:\n # If that state is the special failure marker state, swap it \n # out for the real failure state\n if self._failureState is None:\n raise \"ERROR: transition to non existent failure state\"\n nextState = self._failureState\n \n # Store the event\n newTrans[eventType] = nextState\n \n return newTrans", "title": "" }, { "docid": "c4dff7620d7c66500f2fe9f0b2584fa8", "score": "0.67056125", "text": "def transitions(self):\n return deepcopy(self._deltas)", "title": "" }, { "docid": "7101fee9a2c3b1569a37a12b355aa317", "score": "0.65812445", "text": "def get_transitions( self ):\n transitions = set()\n for key in self._adjacence:\n for end in self._adjacence[key]:\n transitions.add( (key[0], key[1], end) )\n return pretty_set(transitions)", "title": "" }, { "docid": "f2722906ae11852c0f5dc2ee4d65d06d", "score": "0.6573625", "text": "def transition_states(self):\n if self.state_index < len(self.states) - 1:\n self.state_index += 1\n else: \n self.state_index = 0\n self.cur_state = self.states[self.state_index]\n # print(self.cur_state)", "title": "" }, { "docid": "f2722906ae11852c0f5dc2ee4d65d06d", "score": "0.6573625", "text": "def transition_states(self):\n if self.state_index < len(self.states) - 1:\n self.state_index += 1\n else: \n self.state_index = 0\n self.cur_state = self.states[self.state_index]\n # print(self.cur_state)", "title": "" }, { "docid": "7b8bdbfe1ede1aca318e67c7479e2192", "score": "0.65733206", "text": "def transitions():\n return { A : Middle }", "title": "" }, { "docid": "3ddd5b7ee9688ee799582aeb282a453b", "score": "0.64616394", "text": "def get_same_state_transitions(self) -> list:\n dst = self.get_diff_state_transitions()\n all_trans = set(self.transitions.keys())\n return [t for t in all_trans if t not in dst]", "title": "" }, { "docid": "0860a7e67354ce23e0983803cb9f7c99", "score": "0.6457405", "text": "def states(self):\n if self.state is None: raise ValueError(\"This Machine doesn't have a state\")\n return (set(t.lhs[self.state][0] for t in self.transitions) | \n set(t.rhs[self.state][0] for t in self.transitions))", "title": "" }, { "docid": "22ac3d078933f07b03c1a400e3e83fee", "score": "0.64327973", "text": "def _get_valid_transition_previous_states(self, model, state):\n try:\n # return list of states `state` can transition from\n return next(\n t[1]\n for t in model.STATE_TRANSITIONS\n if state == t[0])\n except StopIteration:\n # `state` is not listed in `STATE_TRANSITIONS`, meaning\n # it is allowed to transition from any other state;\n # return all states except value of `state`\n\n # get states that can't be transitioned out of\n fixed_states = list(dict(model.STATE_TRANSITIONS).get(None, []))\n\n return [\n choice[0]\n for choice in next(\n field\n for field in model._meta.fields\n if field.name == 'state'\n ).choices\n if choice[0] not in fixed_states + [state]\n ] + [None]", "title": "" }, { "docid": "ec8c22dcada66c578c860f76938141db", "score": "0.63623875", "text": "def make_state_transition(self, n_states: int) -> np.ndarray:\n return self.state_transition_", "title": "" }, { "docid": "76cbaff030ae0a32b48634f52dec618e", "score": "0.6340387", "text": "def generate_transitions(self):\n for (state, map) in self._transitions.items():\n for (symbol, targets) in map.items():\n for target in targets:\n yield (state, symbol, target)", "title": "" }, { "docid": "be604df75c2905ba23d6b241690deec3", "score": "0.6331591", "text": "def transition_list(self):\n xn_list = []\n xn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))\n xn_list.append(Transition((1, 0, 0), (0, 1, 0), 1.0))\n return xn_list", "title": "" }, { "docid": "68010dd11c8a0fe16e51803b941a8f3f", "score": "0.6254279", "text": "def transition(self, aps):\n states = [self.graph.transition_states(state, aps) for state in self.states]\n\n return AutomatonSet(self.graph, set().union(*states))", "title": "" }, { "docid": "f1c074f98503995d026c922e0281c5db", "score": "0.6252052", "text": "def transition_states(self, state: int, aps: List[bool]):\n outstates = set()\n\n for u, v, data in self.network.edges(state, data=True):\n label: hoa.Evaluator = data[\"evaluator\"]\n if label(aps):\n outstates.add(int(v))\n\n return outstates", "title": "" }, { "docid": "105475dd706e23edaabbdd46ce29a93e", "score": "0.6233537", "text": "def CombineTransitions(\n transitions: t.Iterable[Transition],\n ) -> t.Tuple[States, Actions, Rewards, States, numpy.ndarray]:\n s_list = [] # type: t.List[State]\n a_list = [] # type: t.List[Action]\n r_list = [] # type: t.List[Reward]\n sp_list = [] # type: t.List[State]\n r_mask_list = [] # type: t.List[int]\n for idx, transition in enumerate(transitions):\n s_list.append(transition.s)\n a_list.append(transition.a)\n r_list.append(transition.r)\n if transition.sp is not None:\n sp_list.append(transition.sp)\n r_mask_list.append(1)\n else:\n sp_list.append(transition.s)\n r_mask_list.append(0)\n return (\n numpy.concatenate(s_list),\n numpy.concatenate(a_list),\n numpy.array(r_list),\n numpy.concatenate(sp_list),\n numpy.array(r_mask_list),\n )", "title": "" }, { "docid": "e39cc208ac2f083b1c100dc9d308e8c2", "score": "0.6223489", "text": "def create_transition_dataframe(states):\n output = states.copy()\n for col in states:\n output[col] = map(one_step_transition, states[col].shift(), states[col])\n return output", "title": "" }, { "docid": "036574de1374924b408de9a0067b5f1a", "score": "0.61272866", "text": "def find_transitions(self, transition):\n transitions = self.get_transitions()\n result = []\n if transition[0] and transition[1]:\n for t in transitions:\n if transition[0] == t[0] and transition[1] == t[1]:\n result.append(t)\n #elif transition[0] == t[0] and \"Committed\" in t[1]:\n elif transition[0] == t[0]:\n for tt in transitions:\n if t[1] == tt[0] and transition[1] == tt[1]:\n result.append(t)\n elif transition[0]:\n result = filter(lambda x: x[0] == transition[0], transitions)\n elif transition[1]:\n for t in transitions:\n #if \"Committed\" not in t[0] and transition[1] == t[1]:\n if transition[1] == t[1]:\n result.append(t)\n else:\n #result = filter(lambda x: \"Committed\" not in x[0], transitions)\n result = transitions\n return result", "title": "" }, { "docid": "10d6cf37bba262ca58157056e654c910", "score": "0.6121095", "text": "def visit_get_states(state=None, states=None, states_range=[0, None, 1]):\n if state is not None:\n states = [state]\n elif states is None:\n if states_range[1] is None:\n states_range[1] = visit.TimeSliderGetNStates()\n else:\n states_range[1] += 1\n states = list(range(*states_range))\n return states", "title": "" }, { "docid": "32317aff5c81e53bb3172811e916a7d7", "score": "0.61147213", "text": "def get_transitions(self, cell_transition, orientation):\n bits = (cell_transition >> ((3 - orientation) * 4))\n return ((bits >> 3) & 1, (bits >> 2) & 1, (bits >> 1) & 1, (bits) & 1)", "title": "" }, { "docid": "1a57bbbd6d5e994dfdc151bea7ab8729", "score": "0.61109346", "text": "def get_transition_matrix(self):\n return np.array([self.distribution_to_vector(self.get_transition_distribution(s))\n for s in self.get_states()])", "title": "" }, { "docid": "636f1cca3d815ca18999f9bd01a1b23d", "score": "0.6080263", "text": "def read_transitions(type, states):\n print('Reading transitions...')\n with open('tables/table1.csv', 'r') as file:\n lines = file.readlines()\n\n rows = []\n for line in lines:\n rows.append(line.strip('\\n').split(','))\n\n transitions = {}\n if type == 'dfa':\n for row in rows[1:]:\n for i, cell in enumerate(row[1:]):\n if cell != '':\n try:\n transitions[(row[0], rows[0][i+1])] = cell\n if cell not in states:\n print(f'Detected unknown state {cell}. ಠ_ಠ')\n except IndexError:\n print('\\nFATAL ERROR: You gave multiple states for a dfa. (¬_¬)')\n print('Exiting. ╚(ಠ_ಠ)=┐\\n')\n raise SystemExit\n else:\n for row in rows[1:]:\n for i, cell in enumerate(row[1:]):\n key = (row[0], rows[0][i+1])\n for c in cell.split('|'):\n if c != '':\n transitions.setdefault(key, set()).add(c)\n if c not in states:\n print(f'Detected unknown state {c}. ಠ_ಠ')\n print('Done!')\n return transitions", "title": "" }, { "docid": "170002563e86b2901a6c63359a5d83b2", "score": "0.60591173", "text": "def transitions(self):\n transitions = numpy.zeros((self.n_conditions, self.n_conditions))\n for i, j in zip(self.trials, self.trials[1:]):\n transitions[i-1, j-1] += 1\n return transitions", "title": "" }, { "docid": "853f02caad924881001f83efa6c6a58c", "score": "0.60498476", "text": "def states(self):\n return set(\n chain.from_iterable(\n (inState, outState)\n for\n (inState, inputSymbol, outState, outputSymbol)\n in self._transitions\n )\n )", "title": "" }, { "docid": "f631c848ecddba395dad730ca733c6d1", "score": "0.5999628", "text": "def _get_next_states(self, state):\n next_states_mapping = {\n \"new\": [\"completed\"],\n \"completed\": []\n }\n\n return next_states_mapping[state]", "title": "" }, { "docid": "1b45d17e0d4d3b350d429391b9f93626", "score": "0.59731126", "text": "def get_transition_states_and_probs(self, state, action):\n if self.trans_prob == 1:\n inc = self.neighbors[action]\n nei_s = (state[0] + inc[0], state[1] + inc[1])\n if nei_s[0] >= 0 and nei_s[0] < self.height and nei_s[\n 1] >= 0 and nei_s[1] < self.width and self.grid[nei_s[0]][nei_s[1]] != 'x':\n return [(nei_s, 1)]\n else:\n # if the state is invalid, stay in the current state\n return [(state, 1)]\n else:\n action_states = self.__get_action_states(state)\n inc = self.neighbors[action]\n nei_s = (state[0] + inc[0], state[1] + inc[1])\n res = []\n\n if nei_s[0] >= 0 and nei_s[0] < self.height and nei_s[\n 1] >= 0 and nei_s[1] < self.width and self.grid[nei_s[0]][nei_s[1]] != 'x':\n for i in range(len(action_states)):\n if action_states[i][0] == action:\n res.append((action_states[i][1], self.trans_prob))\n else:\n res.append(\n (action_states[i][1], (1 - self.trans_prob) / (len(action_states) - 1)))\n else:\n # if the action is not valid, then return uniform distribution of the valid moves.\n for i in range(len(action_states)):\n res.append((action_states[i][1], 1.0 / len(action_states)))\n return res", "title": "" }, { "docid": "738f7de87838ed065772a903e68a6d92", "score": "0.5972289", "text": "def Transitions(self):\n all = {}\n for trans in self.transitions:\n all[trans] = all.get(trans, 0) + 1\n # We currently don't use the chain type.\n # TODO(evanm): make this a command-line option.\n # if trans & 0x30000000 != 0:\n # chain = ''\n # if trans & 0x10000000:\n # chain = 'start'\n # if trans & 0x20000000:\n # if len(chain) == 0:\n # chain = 'end'\n # else:\n # chain = ''\n # if len(chain) > 0:\n # edge['chain'] = chain\n return all", "title": "" }, { "docid": "1047aa4f80fbfdce22a04f89e383c617", "score": "0.5934085", "text": "def get_states(self):\n raise NotImplementedError", "title": "" }, { "docid": "0ebfdaa44c379eb42d55e6d1424cd99c", "score": "0.59268045", "text": "def retrieve_nice_state(states, transitions=10):\n print 'Retrieving nice state from engine to plot'\n for s in states:\n if len(s.views) == 1:\n state = s\n break\n else:\n state = states[0]\n for _ in xrange(transitions):\n state.transition_dim_hypers()\n return state", "title": "" }, { "docid": "e71ac352d66cf69a1bf48b5a7088548e", "score": "0.5876059", "text": "def getPoints(start_state, transitions):\n points = []\n current_state = start_state\n for transition in transitions:\n value_point = g.states[current_state][transition]['value']\n points.append(value_point)\n current_state = g.states[current_state][transition]['state']\n return points", "title": "" }, { "docid": "eef5b01d1e0fde3dcad5ceb1a684cd0c", "score": "0.5855674", "text": "def _get_transitions(\n self, state: NPDAStateT, input_symbol: str, stack_symbol: str\n ) -> Set[Tuple[str, NPDAStateT, str]]:\n transitions = set()\n if (\n state in self.transitions\n and input_symbol in self.transitions[state]\n and stack_symbol in self.transitions[state][input_symbol]\n ):\n for dest_state, new_stack_top in self.transitions[state][input_symbol][\n stack_symbol\n ]:\n transitions.add((input_symbol, dest_state, new_stack_top))\n return transitions", "title": "" }, { "docid": "6d57e1ea62ada7d18fd6ba9387e529e6", "score": "0.5838807", "text": "def get_states(self):\n states_list = reduce((lambda a_list, b_list: a_list + b_list),\n [list(symbols.values()) for symbols in self.delta.values()])\n\n _states = {self.initial_state} | set(self.delta.keys()) | reduce((lambda a, b: a | b), states_list)\n\n return _states", "title": "" }, { "docid": "fa4af9c324f94d90ace804fa149f9669", "score": "0.58317554", "text": "def next_transitions(self) -> Collection[Tuple[Character, \"Node\"]]:\n return list(self._symbol2child.items())", "title": "" }, { "docid": "c1b4c76a7d90ed022ef6fcf97394987f", "score": "0.5806438", "text": "def getTransitions(p):\n\ttransitions = []\n\tfor pos in xrange(len(p)):\n\t\tif pos == 0:\n\t\t\ttransitions.append(str(pos) + \".\" + p[pos])\n\n\t\telse:\n\t\t\tinsert = p[pos-1] + p[pos]\n\t\t\ttransitions.append(str(pos) + \".\" + insert)\n\n\treturn transitions", "title": "" }, { "docid": "c1b4c76a7d90ed022ef6fcf97394987f", "score": "0.5806438", "text": "def getTransitions(p):\n\ttransitions = []\n\tfor pos in xrange(len(p)):\n\t\tif pos == 0:\n\t\t\ttransitions.append(str(pos) + \".\" + p[pos])\n\n\t\telse:\n\t\t\tinsert = p[pos-1] + p[pos]\n\t\t\ttransitions.append(str(pos) + \".\" + insert)\n\n\treturn transitions", "title": "" }, { "docid": "cb3a77c0b7c585506a24cae60832df01", "score": "0.5790871", "text": "def getPrevStates(self, state, index):\n if state == 0 or (index == 0 and (state-2)%3 != 0):#begin state or non-delete at first index\n return []\n \n elif state == 1: #first insert state\n if index < 2:\n return [0]\n else: #first insert with chance of self as predecessor\n return range(2)\n \n elif state == 2: #first delete state\n return [0]\n\n elif state == 3: #first match state\n return [0, 1]\n\n elif state == len(self.stateorder)-1: #end state\n return [state-i for i in range (1, 4)]\n\n elif (state-2)%3 == 0: #any other delete state\n return [state-3, state-2]\n\n elif (state-2)%3 == 1: #any other match state (deleteN-1, matchN-1, insertN-1, deleteN, matchN)\n return [state-i for i in range(2,5)]\n\n elif (state-2)%3 == 2: #any other insert state\n return [state, state-1]", "title": "" }, { "docid": "396ef787a7e4423e04ad75050262d0de", "score": "0.5786214", "text": "def init_transition_map(self):\n count = 0\n for from_state in self._states_set:\n count += 1\n self._transition_map[from_state] = {}\n from_decoded = self.decode_state(from_state)\n for encoded_action in self._actions_set:\n if self._simulator.track.isFinish(\n from_decoded.get(\"x\"), from_decoded.get(\"y\")):\n # Do not record transition for the finish state\n continue\n\n if not self.will_crash(from_state, encoded_action):\n to_state = self.get_cur_state()\n self._transition_map[from_state][encoded_action] = to_state", "title": "" }, { "docid": "359594058b2a3db97bbf01111f915bce", "score": "0.5786028", "text": "def get_all_complete_episodes_transitions(self) -> List[Transition]:\n return self.transitions[:self.num_transitions_in_complete_episodes()]", "title": "" }, { "docid": "d09ca41a3323f40a22a92d6113c833c6", "score": "0.5782922", "text": "def getStateItemList(self, **kw):\n workflow_uri, transitions, transition_id = self.uri.rsplit('/',2)\n helper = self.getDocumentationHelper('DCWorkflowDocumentationHelper', workflow_uri)\n return [state for state in helper.getStateItemList(**kw)\n if transition_id in state.getDocumentedObject().transitions]", "title": "" }, { "docid": "eab465a35f2a8804758e935ce23d788e", "score": "0.5777934", "text": "def _get_tensors(self, transitions: List[Transition]):\n is_convolutional = len(self.dqn.input_shape[0]) == 4\n state_tensor = np.asarray(\n [\n np.asarray(t.state) / 255.0 if is_convolutional else np.array(t.state)\n for t in transitions\n ]\n )\n action_tensor = np.asarray([t.action for t in transitions], dtype=np.int64)\n reward_tensor = np.asarray([t.reward for t in transitions])\n next_state_tensor = np.asarray(\n [\n np.asarray(t.next_state) / 255.0\n if is_convolutional\n else np.asarray(t.next_state)\n for t in transitions\n if t.next_state is not None\n ]\n )\n\n non_terminal_mask = np.asarray(\n [t.next_state is not None for t in transitions], dtype=np.uint8\n ).nonzero()\n return (\n state_tensor,\n action_tensor,\n reward_tensor,\n next_state_tensor,\n non_terminal_mask,\n )", "title": "" }, { "docid": "abf0a8ab4662d22c31297324539c6beb", "score": "0.576342", "text": "def modeTransitions():\n return [ModeTransition(t) for t in MODEL_PROVIDER.getCollection(DEVICE, 'modeTransitions')]", "title": "" }, { "docid": "b18a00a7c08be8e9fde120f85752f2c4", "score": "0.57519543", "text": "def make_transitions(self, name_list):\r\n stringtype = type('')\r\n names = []\r\n transitions = {}\r\n for namestate in name_list:\r\n if type(namestate) is stringtype:\r\n transitions[namestate] = self.make_transition(namestate)\r\n names.append(namestate)\r\n else:\r\n transitions[namestate[0]] = self.make_transition(*namestate)\r\n names.append(namestate[0])\r\n return names, transitions", "title": "" }, { "docid": "0c504bd59ddfffc152856a50fd815542", "score": "0.5741533", "text": "def _get_possible_transitions(self, obj):\n fsm_fields = self._get_fsm_field_list()\n for field in fsm_fields:\n fsmfield = obj._meta.get_field_by_name(field)[0]\n transitions = fsmfield.get_all_transitions(self.model)\n for transition in transitions:\n if transition.source in [getattr(obj, field), '*']:\n yield transition", "title": "" }, { "docid": "49be943f3485bfb52a85f895a60baac3", "score": "0.5725498", "text": "def _fsm_get_transitions(self, obj, request, perms=None):\n user = request.user\n fsm_fields = self._get_fsm_field_list()\n\n transitions = {}\n for field in fsm_fields:\n transitions_func = 'get_available_user_{0}_transitions'.format(field)\n transitions_generator = getattr(obj, transitions_func)(user) if obj else []\n transitions[field] = self._filter_admin_transitions(transitions_generator)\n return transitions", "title": "" }, { "docid": "15893f813d4227bd0371d2445a93b577", "score": "0.57086194", "text": "def transition(self, states, selected_action):\n if hasattr(selected_action, 'action'):\n converted = selected_action.action\n else:\n converted = selected_action\n self.game.take_action(self.game.index_to_action(converted))\n new_state_key = self.game.state_key()\n if new_state_key not in states:\n return gt.State(\n states=states,\n game=self.game,\n model=self.model,\n rollout_policy=self.config.rollout_policy,\n opponent_rollout_policy=self.config.opponent_rollout_policy)\n return states[new_state_key]", "title": "" }, { "docid": "5ed9f8b211448790b8067565b7bd8351", "score": "0.5707025", "text": "def act(self, states):\n return [agent.act(state) for agent, state in zip(self.agents, states)]", "title": "" }, { "docid": "b40beb06b71071a8a1cb805e57c2f060", "score": "0.5698983", "text": "def get_transition_states_and_probs(self, state, action):\n if self.is_terminal(tuple(state)):\n return [(tuple(state), 1)]\n\n if self.trans_prob == 1:\n inc = self.neighbors[action]\n nei_s = [state[0] + inc[0], state[1] + inc[1]]\n if nei_s[0] >= 0 and nei_s[0] < self.height and nei_s[1] >= 0 and nei_s[1] < self.width and self.grid[int(nei_s[0])][int(nei_s[1])] != 'x':\n return [(nei_s, 1)]\n else:\n # if the state is invalid, stay in the current state\n return [(state, 1)]\n else:\n # [(0, 1), (0, -1), (1, 0), (-1, 0)]\n mov_probs = np.zeros([self.n_actions])\n mov_probs[action] = self.trans_prob\n mov_probs += (1-self.trans_prob)/self.n_actions\n\n for a in range(self.n_actions):\n inc = self.neighbors[a]\n nei_s = (state[0] + inc[0], state[1] + inc[1])\n if nei_s[0] < 0 or nei_s[0] >= self.height or nei_s[1] < 0 or nei_s[1] >= self.width or self.grid[int(nei_s[0])][int(nei_s[1])] == 'x':\n # if the move is invalid, accumulates the prob to the current state\n mov_probs[self.n_actions-1] += mov_probs[a]\n mov_probs[a] = 0\n\n res = []\n for a in range(self.n_actions):\n if mov_probs[a] != 0:\n inc = self.neighbors[a]\n nei_s = (state[0] + inc[0], state[1] + inc[1])\n res.append((nei_s, mov_probs[a]))\n return res", "title": "" }, { "docid": "cd65481021c120da576690f9266b8a87", "score": "0.5667819", "text": "def transitions(self, stack, with_non_terminals=True):\n for stack_node in reversed(stack):\n for transition, value in stack_node.dfa.transitions.items():\n if not isinstance(transition, ReservedString):\n # A token type\n yield transition, value\n\n if not stack_node.dfa.is_final:\n break", "title": "" }, { "docid": "d9369d7e4fb2a363c5629f5b483b70f6", "score": "0.56635267", "text": "def make_state_transition(self, n_states: int) -> np.ndarray:\n self.state_transition_ = np.ones((n_states, n_states)) / n_states\n\n return self.state_transition_", "title": "" }, { "docid": "b6ed2da172b5f0e95bd29797df38677c", "score": "0.56499314", "text": "def states(self):\n _, s_values = zip(*self.states_)\n return product(*s_values)", "title": "" }, { "docid": "d2723119e4f5dce8b2887d94cf3d332d", "score": "0.5614959", "text": "def dest_states(self, pos, state):\n\t\tassert state in STATES\n\t\tassert pos <= self.length or (pos == self.length + 1 and state is N)\n\t\tsources = set()\n\n\t\tif (state is not B or pos == 0) and state is not N:\n\t\t\tif pos == self.length:\n\t\t\t\tsources.add((self.length, I))\t\t\t\t\t\n\t\t\t\tsources.add((self.length + 1, N))\n\t\t\t\treturn sources\n\n\t\t\tif pos == 0:\n\t\t\t\tsources.add((0, I))\n\t\t\t\tsources.add((1, M))\n\t\t\t\tsources.add((1, D))\n\t\t\t\treturn sources\n\n\t\t\telif pos > self.length or pos < 0:\n\t\t\t\treturn sources\n\n\t\t\tdest_pos = pos + 1\n\t\t\td_dest = (dest_pos, D)\n\t\t\tm_dest = (dest_pos, M)\n\t\t\ti_dest = (pos, I)\n\t\t\tsources.add(d_dest)\n\t\t\tsources.add(m_dest)\n\t\t\tsources.add(i_dest)\n\n\t\t\treturn sources\n\n\t\treturn sources", "title": "" }, { "docid": "86dba51ae8ea8fbd89ebd51a1a38c3f6", "score": "0.5613415", "text": "def get_target_actions(self, states):\n \n target_actions = torch.zeros(self.batch_size, self.num_agents, self.action_size)\n for i in range(self.num_agents):\n # next_action : [batch_size, action_size]\n next_action = self.agents_list[i].target_actor(states[:,i,:])\n target_actions[:,i,:] = next_action\n\n return target_actions", "title": "" }, { "docid": "0d80d36abf55a99695553904d4d96595", "score": "0.5580872", "text": "def states(self) -> Iterable[State]:\n pass", "title": "" }, { "docid": "339f3bb6a8ee8bc625d73ab56722b956", "score": "0.55682945", "text": "def get_edge_transitions(self) -> List[Tuple[int, int]]:\n\n edges = Mixin.edges\n floor = Element(\"FLOOR\").get_char()\n transitions = []\n for edge in edges:\n _strpos = self._xy2strpos(*edge)\n if self._board[0][_strpos] == floor:\n transitions.append(edge)\n return transitions", "title": "" }, { "docid": "3bd1888ccfc860ad1473846897338c51", "score": "0.55656284", "text": "def get_action(self, states):\n states = states.reshape(-1, 4, self.height, self.width)\n num_states = states.shape[0]\n actions = self.session.run(self.a, {self.state: states})\n return actions[0] if num_states == 1 else actions", "title": "" }, { "docid": "a6b84eeef3c6a773f5086aaf27113a96", "score": "0.5542426", "text": "def transitions(self, state):\n if state.current_player() == self._player_id:\n # Counterfactual reach probabilities exclude the best-responder's actions,\n # hence return probability 1.0 for every action.\n return [(action, 1.0) for action in state.legal_actions()]\n elif state.is_chance_node():\n return state.chance_outcomes()\n else:\n return list(self._policy.action_probabilities(state).items())", "title": "" }, { "docid": "043dbf714dc7bcf463ef9191a819bf8e", "score": "0.5525461", "text": "def get_states(self, n):\n return self.states[len(self.new_states):len(self.new_states) + n]", "title": "" }, { "docid": "b0a7fda801acfaaa2142892492e31936", "score": "0.5522665", "text": "def get_states(self, n=1):\n raise NotImplementedError()", "title": "" }, { "docid": "21d7a8d6dc62f95943d18d46a3b6ca60", "score": "0.550026", "text": "def feed_dict(self, transitions):\n obs_vect = self.online_net.obs_vectorizer\n res = {\n self.obses_ph: obs_vect.to_vecs([t['obs'] for t in transitions]),\n self.actions_ph: [t['model_outs']['actions'][0] for t in transitions],\n self.rews_ph: [self._discounted_rewards(t['rewards']) for t in transitions],\n self.terminals_ph: [t['new_obs'] is None for t in transitions],\n self.discounts_ph: [(self.discount ** len(t['rewards'])) for t in transitions],\n self.weights_ph: [t['weight'] for t in transitions]\n }\n new_obses = []\n for trans in transitions:\n if trans['new_obs'] is None:\n new_obses.append(trans['obs'])\n else:\n new_obses.append(trans['new_obs'])\n res[self.new_obses_ph] = obs_vect.to_vecs(new_obses)\n return res", "title": "" }, { "docid": "0662013acca184fa36486a23b68184ae", "score": "0.5500071", "text": "def UpdateFromTransitions(\n self,\n transitions: t.Iterable[Transition],\n ) -> None:\n pass", "title": "" }, { "docid": "183b2ee686b76616ba95159c828d1644", "score": "0.5500025", "text": "def getTransition(self, event):\r\n try:\r\n return self._eventToTransition[event]\r\n except KeyError:\r\n import logging\r\n logging.critical('Cannot find transition for event \"%s\". (Machine %s, State %s)',\r\n event, self.machineName, self.name)\r\n raise UnknownEventError(event, self.machineName, self.name)", "title": "" }, { "docid": "183b2ee686b76616ba95159c828d1644", "score": "0.5500025", "text": "def getTransition(self, event):\r\n try:\r\n return self._eventToTransition[event]\r\n except KeyError:\r\n import logging\r\n logging.critical('Cannot find transition for event \"%s\". (Machine %s, State %s)',\r\n event, self.machineName, self.name)\r\n raise UnknownEventError(event, self.machineName, self.name)", "title": "" }, { "docid": "13f6bd2bc9e9406a280419b296f70776", "score": "0.5487868", "text": "def getPossilbeTransitions(depth):\n transitions = []\n stuff = [1, 2]\n for subset in list(itertools.combinations_with_replacement(['left', 'right'], depth) ):\n transitions.append(list(subset))\n return transitions", "title": "" }, { "docid": "d5de475d6464abc4dde4e2f2c12392e0", "score": "0.5477846", "text": "def get(**kwargs):\n return QueryObject.get(Transition, **kwargs)", "title": "" }, { "docid": "07e632537e5c97ac20d557b046041c15", "score": "0.5476273", "text": "def actions(self, state):\n return state.moves", "title": "" }, { "docid": "07e632537e5c97ac20d557b046041c15", "score": "0.5476273", "text": "def actions(self, state):\n return state.moves", "title": "" }, { "docid": "8ecba9014379af8ec41cdcd7b0a53083", "score": "0.54406077", "text": "def call_transition_cbs(self, from_state, to_state):\n for (cb, args) in self._transition_cbs:\n cb(from_state, to_state, *args)", "title": "" }, { "docid": "d72fb6041e692511e94fd8de4d4d7255", "score": "0.54317576", "text": "def actions(self, state):\n return self.mdp[state].keys()", "title": "" }, { "docid": "df2663a43b7a8456792837fcd000577e", "score": "0.54291487", "text": "def _getTransition(self, machineConfig, transitionConfig):\r\n if machineConfig.name in self.machines and \\\r\n transitionConfig.name in self.machines[machineConfig.name][constants.MACHINE_TRANSITIONS_ATTRIBUTE]:\r\n return self.machines[machineConfig.name][constants.MACHINE_TRANSITIONS_ATTRIBUTE][transitionConfig.name]\r\n \r\n target = self.machines[machineConfig.name][constants.MACHINE_STATES_ATTRIBUTE][transitionConfig.toState.name]\r\n retryOptions = self._buildRetryOptions(transitionConfig)\r\n countdown = transitionConfig.countdown\r\n queueName = transitionConfig.queueName\r\n \r\n return Transition(transitionConfig.name, target, action=transitionConfig.action,\r\n countdown=countdown, retryOptions=retryOptions, queueName=queueName)", "title": "" }, { "docid": "df2663a43b7a8456792837fcd000577e", "score": "0.54291487", "text": "def _getTransition(self, machineConfig, transitionConfig):\r\n if machineConfig.name in self.machines and \\\r\n transitionConfig.name in self.machines[machineConfig.name][constants.MACHINE_TRANSITIONS_ATTRIBUTE]:\r\n return self.machines[machineConfig.name][constants.MACHINE_TRANSITIONS_ATTRIBUTE][transitionConfig.name]\r\n \r\n target = self.machines[machineConfig.name][constants.MACHINE_STATES_ATTRIBUTE][transitionConfig.toState.name]\r\n retryOptions = self._buildRetryOptions(transitionConfig)\r\n countdown = transitionConfig.countdown\r\n queueName = transitionConfig.queueName\r\n \r\n return Transition(transitionConfig.name, target, action=transitionConfig.action,\r\n countdown=countdown, retryOptions=retryOptions, queueName=queueName)", "title": "" }, { "docid": "fe9e7f5bc20ac2bcf25cd4601f2a06ce", "score": "0.5429004", "text": "def print_transitions(self):\n\t\tprint(\"Liste des transitions de l'automate : \")\n\t\tprint(self.get_transitions())", "title": "" }, { "docid": "e9704a49a460d416a5321501b0856123", "score": "0.54285073", "text": "def actions(self, state):\n list_actions = []\n for (nextState, length, action, reward) in self.map[state]:\n list_actions.append(action)\n return list_actions", "title": "" }, { "docid": "8fbc48631ab3bdfc940d53294eb17f03", "score": "0.5418652", "text": "def get_states( self ):\n return pretty_set(self._states)", "title": "" }, { "docid": "3aead14429b688c9da0aeeed8eca366e", "score": "0.541704", "text": "def create_state_transitions(self, baseviews: List, menus: List) -> Dict:\r\n state_transitions = {\r\n \"add\": {},\r\n \"del_role_pvm\": set(),\r\n \"del_views\": set(),\r\n \"del_perms\": set(),\r\n }\r\n for baseview in baseviews:\r\n add_all_flag = False\r\n new_view_name = baseview.class_permission_name\r\n permission_mapping = self._get_new_old_permissions(baseview)\r\n if baseview.previous_class_permission_name:\r\n old_view_name = baseview.previous_class_permission_name\r\n add_all_flag = True\r\n else:\r\n new_view_name = baseview.class_permission_name\r\n old_view_name = new_view_name\r\n for new_perm_name in baseview.base_permissions:\r\n if add_all_flag:\r\n old_perm_names = permission_mapping.get(new_perm_name)\r\n old_perm_names = old_perm_names or (new_perm_name,)\r\n for old_perm_name in old_perm_names:\r\n self._add_state_transition(\r\n state_transitions,\r\n old_view_name,\r\n old_perm_name,\r\n new_view_name,\r\n new_perm_name,\r\n )\r\n else:\r\n old_perm_names = permission_mapping.get(new_perm_name) or set()\r\n for old_perm_name in old_perm_names:\r\n self._add_state_transition(\r\n state_transitions,\r\n old_view_name,\r\n old_perm_name,\r\n new_view_name,\r\n new_perm_name,\r\n )\r\n self._update_del_transitions(state_transitions, baseviews)\r\n return state_transitions", "title": "" }, { "docid": "a5923a433ff7cce23bdac2430dc921fc", "score": "0.5414098", "text": "def states(self):\n for y in range(self.height):\n for x in range(self.width):\n s = State(y, x)\n if s in self.cliff_states:\n continue\n yield s", "title": "" }, { "docid": "94741704662cb27c5b8b6a668aa2c718", "score": "0.54031277", "text": "def transition(self):\n return self._transition", "title": "" }, { "docid": "60b57f1e85dc6fe62daeeb4f663fe885", "score": "0.5400679", "text": "def actions(self, state):\r\n return state.moves", "title": "" }, { "docid": "8a0713016879cf42ebf539e5e296ee04", "score": "0.53836054", "text": "def get_states(states: dict):\n reference = {}\n for domain, frame in states.items():\n for slot, values in frame['slot_values'].items():\n if slot != \"requested_slots\":\n reference[slot] = values\n return reference", "title": "" }, { "docid": "ebe1e7c6e113c1be2b011d80e09aba93", "score": "0.53810614", "text": "def next_states(state):\n i = state.index(0)\n if i%3 >= 1:\n yield swap(state, i, i-1)\n if i%3 <= 1:\n yield swap(state, i, i+1)\n if i//3 >= 1:\n yield swap(state, i, i-3)\n if i//3 <= 1:\n yield swap(state, i, i+3)", "title": "" }, { "docid": "ee6c48956fe4c9f217b3c666e32b3089", "score": "0.53761137", "text": "def simulate(transitions: Dict[str, str]) -> List[str]:\n initial_state = 'A'\n random.seed(123)\n visited_states = []\n for i in range(2000):\n visited_states.append(initial_state)\n value = transitions.get(initial_state)\n initial_state = value[random.randint(0, len(value)-1)]\n return visited_states", "title": "" }, { "docid": "1f96b4ff21f0a54f1d3431966cae8d6e", "score": "0.5370913", "text": "def get_states(self, n=1):\n states = {\n 'directions': self._spins_history[-n:],\n 'positions': self._positions_history[-n:],\n 'colors': self._colors_history[-n:],\n 'magnetization': self._magnetization_history[-n:],\n 'time': self._time_history[-n:],\n 'temperature': self._temperature_hist[-n:]\n }\n return states", "title": "" }, { "docid": "7f1b240f2729b517c8441a6eae1dbd9e", "score": "0.5365702", "text": "def from_transitions(transitions, start_state, accept_states):\n\n lhs_sizes = set()\n rhs_sizes = set()\n for lhs, rhs in transitions:\n lhs_sizes.add(len(lhs))\n rhs_sizes.add(len(rhs))\n \n if len(lhs_sizes) != 1:\n raise ValueError('All left-hand sides must have the same size')\n [lhs_size] = lhs_sizes\n if len(rhs_sizes) != 1:\n raise ValueError('All right-hand sides must have the same size')\n [rhs_size] = rhs_sizes\n\n store_types = []\n\n rhs_vocab = [set() for si in range(rhs_size)]\n for lhs, rhs in transitions:\n for si, x in enumerate(rhs):\n rhs_vocab[si].update(rhs[si])\n \n for si in range(lhs_size):\n if si+1 < rhs_size and len(rhs_vocab[si+1]) > 0 and rhs_vocab[si+1].issubset({'L', 'S', 'R'}):\n store_types.append(TAPE)\n rhs_size -= 1\n del rhs_vocab[si+1]\n else:\n store_types.append(BASE)\n \n if lhs_size == rhs_size:\n pass\n elif lhs_size-1 == rhs_size:\n store_types[1] = STREAM\n else:\n raise ValueError(\"Right-hand sides must either be same size or one smaller than left-hand sides\")\n\n m = Machine(store_types, state=0, input=1)\n\n m.set_start_state(start_state)\n m.add_accept_states(accept_states)\n\n for lhs, rhs in transitions:\n m.add_transition(lhs, rhs)\n\n return m", "title": "" }, { "docid": "10f282eb5963c3216243e80622efac10", "score": "0.5349847", "text": "def get_all_possible_states(self, state, colour):\n jump_states =[]\n exit_states = []\n move_states = []\n\n for action in self.get_all_possible_moves(state, colour):\n move = self.get_move_from_tuple(action)\n temp_state = state.copy()\n self.update_node(temp_state, colour, move)\n if move[0]== \"JUMP\":\n jump_states.append((temp_state, action))\n elif move[0]== \"MOVE\":\n move_states.append((temp_state, action))\n elif move[0]== \"EXIT\":\n move_states.append((temp_state, action))\n elif move[0]== \"PASS\":\n move_states.append((temp_state, action))\n #ordered to promote high scoring states first \n return exit_states + jump_states + move_states", "title": "" }, { "docid": "492d7ae7f59e080353d4e439bbd2a6b5", "score": "0.53367645", "text": "def transition_states(self, curr_state):\n\n # start with the derivatives --> create a state where all the magnitudes have changed based on the derivatives\n # only change the derivative if we are at a landmark\n\n subs = defaultdict(list)\n for quantity, values in curr_state.items():\n\n # should add the same, except when?\n subs[quantity].append(values)\n\n mag = values['magnitude']\n der = values['derivative']\n\n # check if the current derivative is 0 and change it\n if der == '0':\n subs[quantity].append({'magnitude': mag, 'derivative': '+'})\n\n subs[quantity].append({'magnitude': mag, 'derivative': '-'})\n\n\n # if there already exists a derivative, change the magnitude in its direction\n # if magnitude reaches landmark, set derivative to 0\n else:\n # get the index of the current magnitude\n ind = self.quantities[quantity].index(mag)\n\n # change it according to positive derivative\n if der == '+':\n if quantity == 'I':\n new_mag = self.quantities[quantity][min(1, ind + 1)]\n else:\n new_mag = self.quantities[quantity][ind + 1]\n elif der == '-':\n new_mag = self.quantities[quantity][ind - 1]\n\n # check if new mag falls on landmark, if so derivative becomes 0, else stays on same\n if new_mag in self.landmarks:\n # get which one it is\n subs[quantity].append({'magnitude': new_mag, 'derivative': '0'})\n subs[quantity].append({'magnitude': new_mag, 'derivative': '-'})\n\n else:\n subs[quantity].append({'magnitude': new_mag, 'derivative': der})\n\n # change derivative back to 0\n subs[quantity].append({'magnitude': mag, 'derivative': '0'})\n\n transition_states = []\n\n for perm in itertools.product(*subs.values()):\n\n # Create a new state object\n new_state = {}\n for i, quantity in enumerate(self.quantities):\n new_state[quantity] = perm[i]\n\n # check if the generated state is among the filtered (possible) states\n if new_state in self.valid_states and new_state != curr_state:\n transition_states.append(new_state)\n\n valid_trans = copy.deepcopy(transition_states)\n # check if transition is valid\n for state in transition_states:\n\n # determine whether points comes from or goes to landmark\n from_land = self.from_landmark(curr_state, state, True)\n to_land = self.to_landmark(curr_state, state)\n\n for quan, values in state.items():\n mag = values['magnitude']\n der = values['derivative']\n\n # check whether the magnitude changed\n mag_change = False\n if mag != curr_state[quan]['magnitude']:\n mag_change = True\n\n # check whether the derivative changed\n der_change = False\n if der != curr_state[quan]['derivative']:\n der_change = True\n\n # if they are both changed, the magnitude has to end up in a landmark\n if mag_change and der_change:\n if mag in self.landmarks:\n\n # if the new point is indeed a landmark, then the derivative is allowed to change\n if not (mag == '0' or (quan != 'I' and mag == 'max')):\n valid_trans.remove(state)\n\n # final check for ambiguous state transitions\n if quan == 'I':\n # get index of derivative of both states for volume\n curr_deriv = self.derivatives.index(curr_state['V']['derivative'])\n\n next_deriv = self.derivatives.index(state['V']['derivative'])\n\n\n # if derivative of inflow is positive, next derivative of volume can not be more\n # negative compared to first, except when on max\n if curr_state[quan]['derivative'] == '+' or curr_state[quan]['derivative'] == '0':\n if next_deriv < curr_deriv:\n # when on max, derivative can be lower than previous (since it will no longer increase\n # on max)\n if state['V']['magnitude'] == 'max' and state['V']['derivative'] == '0':\n continue\n\n try:\n valid_trans.remove(state)\n except:\n pass\n\n if curr_state[quan]['derivative'] == '-' or curr_state[quan]['derivative'] == '0':\n if next_deriv > curr_deriv:\n # when on 0, derivative can be higher than previous (since it will no longer decrease on 0)\n if state['V']['magnitude'] == '0' and state['V']['derivative'] == '0':\n continue\n try:\n valid_trans.remove(state)\n except:\n pass\n\n\n # finally, check whether a point is not coming from a landmark and going to a landmark, since this\n # is impossible\n if not from_land ^ to_land:\n try:\n valid_trans.remove(state)\n except:\n pass\n\n # take all valid transitions that are left\n transition_states = valid_trans\n\n # only point to interval changes can occur (except for exogenous changes)\n if any(self.from_landmark(curr_state, state, False)\n for state in transition_states):\n transition_states = [state for state in transition_states\n if self.from_landmark(curr_state, state, True)]\n\n return transition_states", "title": "" }, { "docid": "ca3d812ff27a82c908453739bc63c758", "score": "0.53359854", "text": "def __get_action_states(self, state):\n a_s = []\n for i in range(len(self.actions)):\n inc = self.neighbors[i]\n a = self.actions[i]\n nei_s = (state[0] + inc[0], state[1] + inc[1])\n if nei_s[0] >= 0 and nei_s[0] < self.height and nei_s[1] >= 0 and nei_s[\n 1] < self.width and self.grid[nei_s[0]][nei_s[1]] != 'x':\n a_s.append((a, nei_s))\n return a_s", "title": "" }, { "docid": "89e9198ef08d96d56e232b99518795ae", "score": "0.53345406", "text": "def _get_batch(self):\n\n start_states, actions, rewards, end_states, dones = [], [], [], [], []\n\n for transition in self.transitions:\n start_states.append(transition[0])\n actions.append([transition[1]])\n rewards.append([transition[2] / 100.0])\n end_states.append(transition[3])\n dones.append([0.0 if transition[4] else 1.0])\n\n self.transitions = []\n\n return (\n torch.tensor(start_states, dtype=torch.float),\n torch.tensor(actions),\n torch.tensor(rewards, dtype=torch.float),\n torch.tensor(end_states, dtype=torch.float),\n torch.tensor(dones, dtype=torch.float),\n )", "title": "" }, { "docid": "e9751ef91223a2b291d583b68b94dbb3", "score": "0.53334576", "text": "def get_transitions(self):\n newdipole=[]\n for line in self:\n newdipole.append(np.ravel(line[0,0]*line[0,1:]))\n return np.array(newdipole)", "title": "" }, { "docid": "8918fb0c878ba0c5350263ba83fef57b", "score": "0.532645", "text": "def get_all_states(self):\n\t\tret = []\n\t\tret.append((0, B))\n\t\tret.append((0, I))\n\t\tfor k in range(1, self.length + 1):\n\t\t\tret.append((k, M))\n\t\t\tret.append((k, D))\n\t\t\tret.append((k, I))\n\t\tret.append((self.length + 1, N))\n\t\tret.reverse()\n\t\treturn ret", "title": "" }, { "docid": "e5703497d3bf83df25d7a8f2345ac42b", "score": "0.5325383", "text": "def UpdateFromTransitions(\n self,\n transitions: t.Iterable[Transition],\n ) -> None:\n states, actions, rewards, new_states, reward_mask = (\n self.CombineTransitions(transitions))\n # See: https://en.wikipedia.org/wiki/Q-learning\n # axis=1 because action is always assumed to be 1-dimensional.\n new_action_values = numpy.amax(self.GetValues(new_states), axis=1)\n learn_new_action_values = (\n rewards + self._gamma * new_action_values * reward_mask)\n\n if self._alpha < 0.9999999:\n values = self.GetValues(states)\n old_action_values = self.GetActionValues(values, actions)\n self._SetActionValues(\n states, actions,\n ((1.0 - self._alpha) * old_action_values\n + self._alpha * learn_new_action_values),\n values=values)\n else:\n self._SetActionValues(states, actions, learn_new_action_values)", "title": "" }, { "docid": "ba6bd4a092644dad47f93605b94d217a", "score": "0.5322368", "text": "def states(self):\n return deepcopy(self._states)", "title": "" }, { "docid": "45e74b664efc50ec87e25f7673f342d6", "score": "0.53184295", "text": "def add_transitions(self, transitions):\n for t in transitions:\n self.add_transition(t)", "title": "" }, { "docid": "22ee3dceb19d586d347c0b0fc50ded1f", "score": "0.5313657", "text": "def actions(self, state):\n row_zero, col_zero = find(0, state)\n available_actions = []\n\n if row_zero > 0:\n available_actions.append(state[row_zero - 1][col_zero])\n if row_zero < 2:\n available_actions.append(state[row_zero + 1][col_zero])\n if col_zero > 0:\n available_actions.append(state[row_zero][col_zero - 1])\n if col_zero < 2:\n available_actions.append(state[row_zero][col_zero + 1])\n\n return available_actions", "title": "" }, { "docid": "bacd7b83461d68f251229e430acc8d7b", "score": "0.5295652", "text": "def get_next_states_values(self, state):\n return", "title": "" }, { "docid": "e7ba4c8ba30bdf75125bc46d7fdce806", "score": "0.5290235", "text": "def pull_states():\n # states data\n for state in STATES:\n pull_state(state)", "title": "" }, { "docid": "b1aa639f637a780ac90087f5e2a777af", "score": "0.5269242", "text": "def states(self):\n if self.is_set(\"states\"):\n return self._get(\"states\")\n else:\n return {}", "title": "" } ]
dd87bc8d0603a6b8d9b6379643382c4a
Test backing up the Plone data into a single file
[ { "docid": "1efb6412d4c9eb1b80b09d992a3a46ed", "score": "0.0", "text": "def test_backup_combined(self):\n tmp_path = self.create_dir('combined')\n wheelbarrow = Wheelbarrow(\n tmp_path,\n os.path.join('/', 'opt', 'current-plone', 'zeocluster'),\n verbosity=0,\n combined=True\n )\n wheelbarrow.backup()\n\n backup_date = datetime.now().strftime(\"%Y-%m-%d\")\n self.assertTrue(\n os.path.isfile(os.path.join(tmp_path, backup_date + '.tar.gz')),\n msg='Backup file was not created'\n )\n backup_dir = os.path.join(tmp_path, backup_date)\n os.makedirs(backup_dir)\n call(\n ['tar', '-zxmf', os.path.join('..', backup_date + '.tar.gz')],\n cwd=backup_dir\n )\n self.assertTrue(\n os.path.isfile(os.path.join(backup_dir, 'Data.fs')),\n msg='Backup Data.fs file not included in backup'\n )\n self.assertTrue(\n os.path.isfile(os.path.join(backup_dir, '.layout')),\n msg='Backup blob storage layout file not included in backup'\n )\n self.assertTrue(\n 'tmp' in os.listdir(backup_dir),\n msg='Backup of blob storage incomplete'\n )\n self.remove_dir(tmp_path)", "title": "" } ]
[ { "docid": "94c035edcc158e9cec5a0d1ae38fb429", "score": "0.6593598", "text": "def test_to_file(self):\n fd, fp = mkstemp()\n close(fd)\n pt = qdb.metadata_template.prep_template.PrepTemplate.create(\n self.metadata, self.test_study, self.data_type)\n pt.to_file(fp)\n self._clean_up_files.append(fp)\n with open(fp, newline=None) as f:\n obs = f.read()\n self.assertEqual(obs, EXP_PREP_TEMPLATE.format(pt.id))\n\n # cleaning\n qdb.metadata_template.prep_template.PrepTemplate.delete(pt.id)", "title": "" }, { "docid": "63fc752db638d455f7011455ed90ca2e", "score": "0.6584866", "text": "def test_dump_load_data(self):\r\n # Initialize the test data\r\n create_folder_structure(1,1)\r\n fileobj = self.create_filer_file(Folder.objects.all()[0])\r\n jdata = StringIO()\r\n\r\n # Dump the current data\r\n fobj = tempfile.NamedTemporaryFile(suffix=\".json\", delete=False)\r\n call_command(\"dumpdata\", \"filer\", stdout=jdata, indent=3)\r\n\r\n # Delete database and filesystem data\r\n complete = os.path.join(fileobj.file.storage.location, fileobj.path)\r\n os.unlink(complete)\r\n fileobj.delete()\r\n\r\n # Dump data to json file\r\n fobj.write(jdata.getvalue().encode('utf-8'))\r\n fobj.seek(0)\r\n\r\n # Load data back\r\n call_command(\"loaddata\", fobj.name, stdout=jdata)\r\n\r\n # Database data is restored\r\n self.assertEqual(Folder.objects.all().count(), 1)\r\n self.assertEqual(File.objects.all().count(), 1)\r\n self.assertEqual(File.objects.all()[0].original_filename, self.image_name)\r\n\r\n fileobj = File.objects.all()[0]\r\n complete = os.path.join(fileobj.file.storage.location, fileobj.path)\r\n # Filesystem data is not\r\n self.assertFalse(os.path.exists(complete))", "title": "" }, { "docid": "863fd4013f01ff9ddb58bc7d3fa004ee", "score": "0.653679", "text": "def test_save_and_read_data():\n test_data = src.training_data.PlankData()\n test_data.clean_data()\n test_data.add(dict(datum=\"11.10.1975\", name=\"Thomas\"))\n test_data.add(dict(datum=\"10.09.2012\", name=\"Tim\"))\n test_data.add(dict(datum=\"10.11.1980\", name=\"Andrea\"))\n file_name = test_data.save()\n assert (os.path.exists(file_name) == True)\n\n assert (test_data.size() == 3)", "title": "" }, { "docid": "839cbb83cce4d9284d97ade70a241b7f", "score": "0.64743096", "text": "def test_write_data(self):\n # if you need more tests, add sections to test.conf & specify them here\n # todo: incorporate multiple-file scenarios\n # todo: allow incremental file suffixes when files named the same\n for section_name, num_records, fpath in [\n (\"test_record_i18n\", 74, \"fixed_test_raiffeisen_01.csv\"),\n (\"test_record_headers\", 74, \"fixed_test_headers.csv\"),\n ]:\n config = fix_conf_params(self.cp, section_name)\n b = B2YBank(config)\n for f in b.get_files():\n output_data = b.read_data(f)\n self.assertEqual(len(output_data), num_records)\n result_file = b.write_data(f, output_data)\n # check the file is where we expect it to be\n expected_file = abspath(join(self.test_data, fpath))\n self.assertTrue(exists(expected_file))\n self.assertEqual(expected_file, result_file)\n # todo: check actual contents are what we expect\n os.unlink(expected_file)", "title": "" }, { "docid": "2b4e3ee7f9e5b8402d3113e0fd4ebbba", "score": "0.64500505", "text": "def test_dump_load_data_content(self):\r\n with SettingsOverride(filer_settings, FILER_DUMP_PAYLOAD=True):\r\n # Initialize the test data\r\n create_folder_structure(1,1)\r\n fileobj = self.create_filer_file(Folder.objects.all()[0])\r\n jdata = StringIO()\r\n\r\n # Dump the current data\r\n fobj = tempfile.NamedTemporaryFile(suffix=\".json\", delete=False)\r\n call_command(\"dumpdata\", \"filer\", stdout=jdata, indent=3)\r\n\r\n # Delete database and filesystem data and\r\n complete = os.path.join(fileobj.file.storage.location, fileobj.path)\r\n os.unlink(complete)\r\n fileobj.delete()\r\n\r\n # Dump data to json file\r\n fobj.write(jdata.getvalue().encode('utf-8'))\r\n fobj.seek(0)\r\n\r\n # Load data back\r\n call_command(\"loaddata\", fobj.name, stdout=jdata)\r\n\r\n # Database data is restored\r\n self.assertEqual(Folder.objects.all().count(), 1)\r\n self.assertEqual(File.objects.all().count(), 1)\r\n self.assertEqual(File.objects.all()[0].original_filename, self.image_name)\r\n\r\n fileobj = File.objects.all()[0]\r\n complete = os.path.join(fileobj.file.storage.location, fileobj.path)\r\n # Filesystem data too!\r\n self.assertTrue(os.path.exists(complete))", "title": "" }, { "docid": "eb65671f78f27d78b20eb757a6c02a1b", "score": "0.6431671", "text": "def test_dump_data_base(self):\r\n fileobj = self.create_filer_file()\r\n jdata, jdata2 = StringIO(), StringIO()\r\n call_command(\"dumpdata\", \"filer\", stdout=jdata)\r\n fileobj.delete()\r\n call_command(\"dumpdata\", \"filer\", stdout=jdata2)\r\n data = json.loads(jdata.getvalue())\r\n data2 = json.loads(jdata2.getvalue())\r\n self.assertEqual(len(data), 1)\r\n self.assertEqual(len(data2), 0)", "title": "" }, { "docid": "432b14f438651b6153a115c3a85971b8", "score": "0.6303791", "text": "def test_write_data(dbh):\n mock_path = Path(\"/tmp/test.json\")\n if mock_path.exists():\n os.remove(mock_path)\n assert not mock_path.exists()\n assert dbh.write_data(mock_path)\n assert mock_path.is_file()", "title": "" }, { "docid": "8e41d0294963cc258cf192ba10525b92", "score": "0.6272746", "text": "def test_asset_database_export(self):\n pass", "title": "" }, { "docid": "4ff85979326fb2c2e4114301b4186edf", "score": "0.623599", "text": "def testFileOutput(self):\n request = rdfvalue.RekallRequest()\n request.plugins = [\n # Run procdump to create one file.\n rdfvalue.PluginRequest(\n plugin=\"procdump\", args=dict(pid=2860))]\n\n with test_lib.Instrument(transfer.MultiGetFile,\n \"StoreStat\") as storestat_instrument:\n self.LaunchRekallPlugin(request)\n # Expect one file to be downloaded.\n self.assertEqual(storestat_instrument.call_count, 1)", "title": "" }, { "docid": "353d25f2abc322e940c9e521e926ef78", "score": "0.62145245", "text": "def testDumpData(self):\r\n self.assertCommandSucceeds(\"dumpdata\")", "title": "" }, { "docid": "a7b8ca243939021570545e9fb009f215", "score": "0.61173964", "text": "def testPublishJSONCreate(self):\n\n # Set up uploading and write them elsewhere since the test deletes them.\n self.uploadPublishInfo = True\n self.uploadPublishDir = self.testDir\n\n # Insert some DBSFiles\n testFileChildA = DBSBufferFile(lfn = \"/this/is/a/child/lfnA\", size = 1024, events = 20)\n testFileChildA.setAlgorithm(appName = \"cmsRun\", appVer = \"CMSSW_2_1_8\",\n appFam = \"RECO\", psetHash = \"GIBBERISH\",\n configContent = \"MOREGIBBERISH\")\n testFileChildB = DBSBufferFile(lfn = \"/this/is/a/child/lfnB\", size = 1024, events = 20)\n testFileChildB.setAlgorithm(appName = \"cmsRun\", appVer = \"CMSSW_2_1_8\",\n appFam = \"RECO\", psetHash = \"GIBBERISH\",\n configContent = \"MOREGIBBERISH\")\n testFileChildC = DBSBufferFile(lfn = \"/this/is/a/child/lfnC\", size = 1024, events = 20)\n testFileChildC.setAlgorithm(appName = \"cmsRun\", appVer = \"CMSSW_2_1_8\",\n appFam = \"RECO\", psetHash = \"GIBBERISH\",\n configContent = \"MOREGIBBERISH\")\n\n testFileChildA.setDatasetPath(\"/Cosmics/USER-DATASET1-v1/USER\")\n testFileChildB.setDatasetPath(\"/Cosmics/USER-DATASET1-v1/USER\")\n testFileChildC.setDatasetPath(\"/Cosmics/USER-DATASET2-v1/USER\")\n\n testFileChildA.create()\n testFileChildB.create()\n testFileChildC.create()\n\n testFile = DBSBufferFile(lfn = \"/this/is/a/lfn\", size = 1024, events = 10)\n testFile.setAlgorithm(appName = \"cmsRun\", appVer = \"CMSSW_2_1_8\",\n appFam = \"RECO\", psetHash = \"GIBBERISH\",\n configContent = \"MOREGIBBERISH\")\n testFile.setDatasetPath(\"/Cosmics/CRUZET09-PromptReco-v1/RECO\")\n testFile.create()\n\n testFileChildA.addParents([testFile[\"lfn\"]])\n testFileChildB.addParents([testFile[\"lfn\"]])\n testFileChildC.addParents([testFile[\"lfn\"]])\n\n myThread = threading.currentThread()\n self.dbsDaoFactory = DAOFactory(package=\"WMComponent.DBS3Buffer\", logger=myThread.logger, dbinterface=myThread.dbi)\n self.insertWorkflow = self.dbsDaoFactory(classname=\"InsertWorkflow\")\n workflowID = self.insertWorkflow.execute(requestName='TestWorkload', taskPath='TestWorkload/Production',\n blockMaxCloseTime=100, blockMaxFiles=100,\n blockMaxEvents=100, blockMaxSize=100)\n myThread.dbi.processData(\"update dbsbuffer_file set workflow=1 where id < 4\")\n\n # Run the test again\n self.testA_BasicFunctionTest()\n\n # Reset default values\n self.uploadPublishInfo = False\n self.uploadPublishDir = None\n\n # Make sure the files are there\n self.assertTrue(os.path.exists( os.path.join(self.testDir, 'TestWorkload_publish.json')))\n self.assertTrue(os.path.getsize(os.path.join(self.testDir, 'TestWorkload_publish.json')) > 100)\n self.assertTrue(os.path.exists( os.path.join(self.testDir, 'TestWorkload_publish.tgz' )))\n\n return", "title": "" }, { "docid": "37efbfccee531f269f05607dda644d91", "score": "0.61070186", "text": "def test_export_bundle(self):\n pass", "title": "" }, { "docid": "07f294d0770af690795a7790249e0d36", "score": "0.609392", "text": "def backup_file_fixture(self):\n self.backup_directory_fixture()\n\n from datetime import date\n name = self.settings['anuket.brand_name']\n directory = self.settings['anuket.backup_directory']\n today = date.today().isoformat()\n filename = '{0}-{1}.sql.bz2'.format(name, today)\n self.file_fixture_path = os.path.join(directory, filename)\n backup_file = open(self.file_fixture_path, 'w')\n backup_file.close()", "title": "" }, { "docid": "d52a125173395fe1a13d97d71d5ede0a", "score": "0.6093293", "text": "def test_db2file(self):\n db_message = self.create_message()\n db_message_data = db_message.message_data\n settings.MAILER_STORAGE_BACKEND = 'django_yubin.storage_backends.FileStorageBackend'\n\n db2file()\n\n file_message = Message.objects.get(pk=db_message.pk)\n self.assertEqual(file_message.message_data, db_message_data)\n\n settings.MAILER_STORAGE_BACKEND = 'django_yubin.storage_backends.DatabaseStorageBackend'\n self.assertFalse(Message.objects.filter(storage=settings.MAILER_STORAGE_BACKEND).exists())", "title": "" }, { "docid": "905f22c6156fdbca18d8602f84f9bdaf", "score": "0.60883987", "text": "def test_export_plasma_source(self):\n\n test_plasma = paramak.Plasma()\n\n os.system(\"rm plasma.stp\")\n\n test_plasma.export_stp(\"plasma.stp\")\n\n assert Path(\"plasma.stp\").exists()\n os.system(\"rm plasma.stp\")", "title": "" }, { "docid": "dc78487bbeae6eda7fbe177e5f90540a", "score": "0.60801506", "text": "def testSmallWrite(self):\n now = time.asctime()\n filename = TESTNAME + \"/file.2.\" + \"_\".join(now.split()) + \".tmp\"\n self.file = self.kfsClient.create(filename)\n assert self.file is not None\n part1 = self.datamap.read(139)\n res = self.file.write(part1)\n assert res is None\n self.file.sync()\n\n filename2 = TESTNAME + \"/file.3.\" + \"_\".join(now.split()) + \".tmp\"\n self.file2 = self.kfsClient.create(filename2)\n assert self.file2 is not None\n part3 = self.datamap.read(128)\n res = self.file2.write(part3)\n assert res is None\n self.file2.sync()\n \n part2 = self.datamap.read(22)\n res = self.file.write(part2) \n self.file.close()\n self.file2.close()\n msg = part1 + part2\n self.file = self.kfsClient.open(filename, 'r')\n res = self.file.data_verify(msg)\n assert res == 1\n data1 = self.file.read(139)\n assert len(data1) == 139\n data2 = self.file.read(22)\n assert len(data2) == 22\n data = data1 + data2\n assert msg == data", "title": "" }, { "docid": "45747721241b7d6f193064711fdc8eb9", "score": "0.6054459", "text": "def test_000_upload_files_from_disk( self ):\n self.logout()\n self.login( email='tst@bx.psu.edu' )\n history1 = galaxy.model.History.query().order_by( desc( galaxy.model.History.table.c.create_time ) ).first()\n self.upload_file( '1.bed' )\n hda1 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda1 is not None, \"Problem retrieving hda1 from database\"\n self.verify_dataset_correctness( '1.bed', hid=str( hda1.hid ) )\n self.upload_file( '2.bed', dbkey='hg17' )\n hda2 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda2 is not None, \"Problem retrieving hda2 from database\"\n self.verify_dataset_correctness( '2.bed', hid=str( hda2.hid ) )\n self.upload_file( '3.bed', dbkey='hg17', ftype='bed' )\n hda3 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda3 is not None, \"Problem retrieving hda3 from database\"\n self.verify_dataset_correctness( '3.bed', hid=str( hda3.hid ) )\n self.upload_file( '4.bed.gz', dbkey='hg17', ftype='bed' )\n hda4 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda4 is not None, \"Problem retrieving hda4 from database\"\n self.verify_dataset_correctness( '4.bed', hid=str( hda4.hid ) )\n self.upload_file( '1.scf', ftype='scf' )\n hda5 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda5 is not None, \"Problem retrieving hda5 from database\"\n self.verify_dataset_correctness( '1.scf', hid=str( hda5.hid ) )\n self.upload_file( '1.scf.zip', ftype='binseq.zip' )\n hda6 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda6 is not None, \"Problem retrieving hda6 from database\"\n self.verify_dataset_correctness( '1.scf.zip', hid=str( hda6.hid ) )\n self.delete_history( id=str( history1.id ) )", "title": "" }, { "docid": "e0944ae39e5ea2af7bc0864ef5090ef4", "score": "0.60246867", "text": "def test_3_save_to_2(self):\n Base.save_to_file([])\n with open('Base.json', 'r') as f:\n self.assertEqual(f.read(), \"[]\")", "title": "" }, { "docid": "219dff8a8e7724775a3976c218e78d3b", "score": "0.60196084", "text": "def test_files():\n\n # TODO: carregar automaticamente arquivos do diretorio data\n here = os.path.abspath(os.path.dirname(__file__))\n DATA_FOLDER = os.path.join(here, './data')\n\n # find all files in data folder\n files = []\n for path in os.listdir(DATA_FOLDER):\n full_path = os.path.join(DATA_FOLDER, path)\n if os.path.isfile(full_path):\n files.append(full_path)\n\n # Tenta abrir todos arquivos para ver se da algum erro\n for file in files:\n print('Opening: {}'.format(file))\n title, serial_number, header, extra = hobo.get_info(file)\n table = hobo.get_data(file)\n proc_extra = hobo.process_details(extra)\n\n\n # Foi tudo ok. Lanca um True - não precisa...just in case.\n assert True", "title": "" }, { "docid": "b52a6fb394cbf48213a860ff52a0f4da", "score": "0.6017931", "text": "def test_create_sprayday_druid_json_file(self):\n queryset = SprayDay.objects.order_by(\"spray_date\")[:10]\n\n try:\n os.remove(\"/tmp/somefile.json\")\n except OSError:\n pass\n\n lines = []\n for record in queryset_iterator(queryset, 1000):\n data = SprayDayDruidSerializer(record).data\n line = JSONRenderer().render(data)\n lines.append(line)\n\n expected_content = b\"\\n\".join(lines)\n\n default_file_storage = \"django.core.files.storage.FileSystemStorage\"\n with self.settings(\n DEFAULT_FILE_STORAGE=default_file_storage, MEDIA_ROOT=\"/tmp/\"\n ):\n result = create_sprayday_druid_json_file(\n queryset=queryset, filename=\"somefile.json\"\n )\n self.assertEqual(result, \"/tmp/somefile.json\")\n with open(result, \"r\") as f:\n content = f.read()\n self.assertEqual(content.encode(), expected_content)", "title": "" }, { "docid": "614b7da5083590b38e77d1a241f25ed8", "score": "0.60147345", "text": "def test_data():\n assert os.path.exists(data_path)\n assert os.path.exists(conmat_file)\n assert os.path.exists(coords_file)\n assert os.path.exists(Z_list_file)\n assert os.path.exists(Pajek_net_file)", "title": "" }, { "docid": "67715622ffc085ad2e48c86417c9f7d1", "score": "0.59992874", "text": "def test_io(self):\n db0 = get_example()\n dump_list = [(f'test.sqlite', None),\n (f'test.txt', 'dump'),\n (f'test.gz', None)]\n # Save.\n for fn, fmt in dump_list:\n print(f'Writing {fn}')\n db0.to_file(fn, fmt=fmt)\n print(' -- output has size {}'.format(os.path.getsize(fn)))\n t0 = time.time()\n db1 = metadata.ObsDb.from_file(fn, fmt=fmt)\n dt = time.time() - t0\n self.assertEqual(len(db1.query()), len(db0.query()))\n print(' -- removing.')\n os.remove(fn)", "title": "" }, { "docid": "c5313d9ddb574336bfbe5d8dd3b68345", "score": "0.59978944", "text": "def test_save(self):\n\n # Create plugin save it\n plugin = models.Plugin(module_filename='testfilename.py', requirements_file='testfilename.txt')\n plugin.save()\n\n # Test that a task was created to prepare this datasource\n # TODO. Check that task has been created and recieved. We will need to mock celery. Not sure how we can make\n # this a valid test.", "title": "" }, { "docid": "d4df40cd0d7349a0b4e85cc14fb608db", "score": "0.5977062", "text": "def test_update_file(self):\n pass", "title": "" }, { "docid": "6bc2411bf89f3f13666ad90e9f34c72a", "score": "0.5969495", "text": "def test_write_loadTheSameSimpleData(self):\n pc = SimpleTestData.get_point_cloud()\n export(pc, self.test_file_path)\n file = _get_file_from_path(self.test_file_path)\n _assert_all_attributes_in_file(pc[keys.point], file)", "title": "" }, { "docid": "32ab691bc4d622fc8749d8b03feea428", "score": "0.5923298", "text": "def test_save(self):", "title": "" }, { "docid": "32ab691bc4d622fc8749d8b03feea428", "score": "0.5923298", "text": "def test_save(self):", "title": "" }, { "docid": "7f16ec7e1f212ea628be78fb894488d3", "score": "0.59003514", "text": "def test_write_sameFileTwice(self):\n pc = SimpleTestData.get_point_cloud()\n export(pc, self.test_file_path)\n with pytest.raises(FileExistsError):\n export(pc, self.test_file_path)", "title": "" }, { "docid": "af2e1523d9e45a8d952eed7929f55a9a", "score": "0.58900225", "text": "def test_file2db(self):\n settings.MAILER_STORAGE_BACKEND = 'django_yubin.storage_backends.FileStorageBackend'\n file_message = self.create_message()\n file_message_data = file_message.message_data\n settings.MAILER_STORAGE_BACKEND = 'django_yubin.storage_backends.DatabaseStorageBackend'\n\n file2db(delete=True)\n\n db_message = Message.objects.get(pk=file_message.pk)\n self.assertEqual(db_message.message_data, file_message_data)\n self.assertFalse(Message.objects.filter(\n storage='django_yubin.storage_backends.FileStorageBackend').exists())", "title": "" }, { "docid": "7d45710da0c8e57afba802b01d93cb57", "score": "0.5860313", "text": "def test_save(self):\n\n expected_file_location = (\n PyFunceble.OUTPUT_DIRECTORY\n + PyFunceble.OUTPUTS.parent_directory\n + PyFunceble.OUTPUTS.logs.directories.parent\n + PyFunceble.OUTPUTS.logs.filenames.execution_time\n )\n\n PyFunceble.helpers.File(expected_file_location).delete()\n\n PyFunceble.CONFIGURATION.show_execution_time = True\n PyFunceble.CONFIGURATION.logs = True\n PyFunceble.CONFIGURATION.show_percentage = False\n PyFunceble.INTERN[\"file_to_test\"] = \"this_is_a_ghost\"\n\n start_time = datetime.now()\n PyFunceble.INTERN[\"start\"] = start_time.timestamp()\n PyFunceble.INTERN[\"end\"] = (start_time + timedelta(seconds=15)).timestamp()\n\n expected = {\n \"current_total\": \"00:00:00:15.0\",\n \"data\": [\n [PyFunceble.INTERN[\"start\"], PyFunceble.INTERN[\"end\"]],\n [PyFunceble.INTERN[\"start\"], PyFunceble.INTERN[\"end\"]],\n ],\n \"final_total\": \"00:00:00:15.0\",\n }\n\n ExecutionTime(\"start\")\n ExecutionTime(\"stop\")\n\n expected_stdout = (\n f\"{Fore.MAGENTA}{Style.BRIGHT }\\n\"\n f\"Execution time: {expected['final_total']}\\n\"\n )\n self.assertEqual(expected_stdout, sys.stdout.getvalue())\n\n ExecutionTime(\"start\")\n ExecutionTime(\"stop\", last=True)\n\n expected_stdout += expected_stdout\n expected_stdout += (\n f\"{Fore.MAGENTA}{Style.BRIGHT }\"\n f\"Global execution time: {expected['final_total']}\\n\"\n )\n self.assertEqual(\n expected_stdout,\n sys.stdout.getvalue(),\n f\"{repr(sys.stdout.getvalue())}\\n{repr(expected_stdout)}\",\n )\n\n actual = PyFunceble.helpers.Dict().from_json_file(expected_file_location)\n self.assertEqual(expected, actual)\n\n del expected[\"final_total\"]\n\n ExecutionTime(\"start\")\n ExecutionTime(\"stop\", last=True)\n\n expected[\"data\"].extend(\n [[PyFunceble.INTERN[\"start\"], PyFunceble.INTERN[\"end\"]]]\n )\n expected[\"final_total\"] = \"00:00:00:15.0\"\n\n actual = PyFunceble.helpers.Dict().from_json_file(expected_file_location)\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "19b394f2b21f460ed3a9428d0d72c55c", "score": "0.58559406", "text": "def saveTestingData():\n summaries, dataInfo = gensimSummarizer()\n tokenizedSummaries = getSummarizedSents()\n summaryFile = '../Data/test_data.dat'\n dataFile = '../Data/test_dataset_info.dat'\n openedSumFile = open(summaryFile, 'wb')\n pickle.dump(tokenizedSummaries, openedSumFile)\n openedSumFile.close()\n openedDataFile = open(dataFile, 'wb')\n pickle.dump(dataInfo, openedDataFile)\n openedDataFile.close()", "title": "" }, { "docid": "ded69175d5efd29f6a3efd469f2e3a9b", "score": "0.5846448", "text": "def write_fixture(data):\n with open(fixture_name(\"tmp\"), \"w\") as tmp_file:\n print(data, file=tmp_file)", "title": "" }, { "docid": "1f739f41162acd89102c144e03824469", "score": "0.580859", "text": "def setUp(self):\n\n # insert test data\n from demo.save_data import save_data\n save_data()", "title": "" }, { "docid": "394bca50b2cd476afd101de26d43c899", "score": "0.5780821", "text": "def test_save_template(self):\n pass", "title": "" }, { "docid": "14f3216358a5f73354f3abaa47001cf2", "score": "0.576901", "text": "def test_and_save():\n#\n#--- check whether the local file exist before start checking\n#\n if not os.path.isfile(temp_opfile):\n return False \n#\n#--- check whether there are any differences from the current mta limit database\n#\n cmd = 'diff ' + mta_op_limit + ' ' + temp_opfile + ' > ' + zspace\n os.system(cmd)\n data = mcf.read_data_file(zspace, remove=1)\n#\n#--- if so, save a copy of op_limit.db and glimon to Past_data directory and also \n#--- update the main mta limit database\n#\n if len(data) < 1:\n cmd = 'rm ' + temp_opfile\n os.system(cmd)\n else:\n cmd = 'mv ' + temp_opfile + ' ' + main_dir +'op_limits.db'\n os.system(cmd)\n\n tail = time.strftime(\"%m%d%y\", time.gmtime())\n cmd = 'cp ' + main_dir + 'op_limits.db ' + main_dir + 'Past_data/op_limits.db_' + tail\n os.system(cmd)\n\n cmd = 'cp -f ' + main_dir + glimmon + '/data/mta4/MTA/data/op_limits/.'\n os.system(cmd)\n\n cmd = 'cp ' + main_dir + glimmon + ' ' + main_dir + 'Past_data/' + glimmon + '_' + tail\n os.system(cmd)\n#\n#--- notify the changes to admin person\n#\n line = 'There are some changes in mta limit database; '\n line = line + 'check /data/mta/Script/MSID_limit/* '\n line = line + 'and /data/mta4/MTA/data/op_limits/op_limits.db.\\n'\n\n with open(zspace, 'w') as fo:\n fo.write(line)\n\n cmd = 'cat ' + zspace + '| mailx -s \"Subject: MTA limit database updated \" ' + admin\n os.system(cmd)\n\n mcf.rm_files(zspace)\n\n cmd = 'chgrp mtagroup ./*'\n os.system(cmd)\n cmd = 'chgrp mtagroup ./Past_data/*'\n os.system(cmd)\n cmd = 'chgrp mtagroup ' + mta_op_limit\n os.system(cmd)\n\n return True", "title": "" }, { "docid": "574d26c7afa040f5bd42cdc8f1277566", "score": "0.5768174", "text": "def test_list_create_failure(self):\n\tview_url = \"/uploadeXe/list/\"\n\t#try:\n\tif True:\n\t\tself.c = Client();\n \tself.user = User.objects.get(username=\"testuser1\")\n \tself.user.set_password('hello')\n \tself.user.save()\n \tself.user = authenticate(username='testuser1', password='hello')\n \tlogin = self.c.login(username='testuser1', password='hello')\n\n with open('/opt/UMCloudDj/test.epub',\"r\") as myfile:\n post_data={'exefile': myfile}\n response = self.client.post(view_url, post_data)\t\t\n\t\t\tself.assertEqual(\"YES\", Document.objects.get(name=\"LUL-Boys\").success)\n\t\t\t\n\t\t\t\n #except:\n\telse:\n print(\"TEST ELP FILE NOT INCLUDED\")", "title": "" }, { "docid": "b246addea6dd95fb97990c55ca6e6dac", "score": "0.57546026", "text": "def test_main_download(self):\n with open(os.path.join(testpath, 'aoi1.geojson')) as f:\n aoi = json.dumps(json.load(f))\n config.DATADIR = os.path.join(testpath, \"${eo:platform}\")\n items = main.main(datetime='2017-01-05/2017-01-21', intersects=aoi, download=['thumbnail', 'MTL'], **{'collection': 'landsat-8-l1'})\n for item in items:\n bname = os.path.splitext(item.get_filename(config.DATADIR))[0]\n assert(os.path.exists(bname + '_thumbnail.jpg'))\n if not os.path.exists(bname + '_MTL.txt'):\n import pdb; pdb.set_trace()\n assert(os.path.exists(bname + '_MTL.txt'))\n shutil.rmtree(os.path.join(testpath,'landsat-8'))\n config.DATADIR = testpath", "title": "" }, { "docid": "ae23c0fa2b75b3fe8f7981a0abb09745", "score": "0.57529944", "text": "def test_data(self):\n pass", "title": "" }, { "docid": "fbb472176a175473a5e3f7ea4db07d0f", "score": "0.5745818", "text": "def test():\n protocol_path = r\"\\\\akrgis.nps.gov\\inetApps\\observer\\protocols\\sample.obsprot\"\n fgdb_folder = r\"C:\\tmp\\observer\"\n csv_folder = r\"C:\\tmp\\observer\\test1\"\n database, protocol_json = database_creator.database_for_protocol_file(\n protocol_path, fgdb_folder\n )\n process_csv_folder(csv_folder, protocol_json, database)", "title": "" }, { "docid": "4649ba4dd56b7bdabc93d2b1172b7d40", "score": "0.57398015", "text": "def test_backup_separate(self):\n tmp_path = self.create_dir('separate')\n wheelbarrow = Wheelbarrow(\n tmp_path,\n os.path.join('/', 'opt', 'current-plone', 'zeocluster'),\n verbosity=0,\n combined=False\n )\n wheelbarrow.backup()\n\n backup_date = datetime.now().strftime(\"%Y-%m-%d\")\n self.assertTrue(\n os.path.isfile(os.path.join(tmp_path, backup_date + '_data.fs')),\n msg='Backup Data.fs file was not created'\n )\n self.assertTrue(\n os.path.isfile(os.path.join(tmp_path, backup_date + '_blobstorage.tar.gz')), # NOQA\n msg='Backup blob storage file was not created'\n )\n backup_dir = os.path.join(tmp_path, backup_date)\n os.makedirs(backup_dir)\n call(\n ['tar', '-zxmf', os.path.join('..', backup_date + '_blobstorage.tar.gz')], # NOQA\n cwd=backup_dir\n )\n self.assertTrue(\n os.path.isfile(os.path.join(backup_dir, '.layout')),\n msg='Backup blob storage layout file not included in backup'\n )\n self.assertTrue(\n 'tmp' in os.listdir(backup_dir),\n msg='Backup of blob storage incomplete'\n )\n self.assertFalse(\n 'Data.fs' in os.listdir(backup_dir),\n msg='Backup Data.fs file was in blob storage backup'\n )\n self.remove_dir(tmp_path)", "title": "" }, { "docid": "5f911288eff749211a670a7c176f7300", "score": "0.5738023", "text": "def test_save_and_load(self, tmp_path, dummy_dataframe):\n filepath = (tmp_path / FILENAME).as_posix()\n data_set = ParquetDataSet(filepath=filepath)\n data_set.save(dummy_dataframe)\n reloaded = data_set.load()\n assert_frame_equal(dummy_dataframe, reloaded)\n\n files = [child.is_file() for child in tmp_path.iterdir()]\n assert all(files)\n assert len(files) == 1", "title": "" }, { "docid": "0a4f534b456cf2142b6d70cdec4dd46f", "score": "0.57237786", "text": "def test_to_json():\n start = time.time()\n bulletin = SEAPBulletin(EXAMPLE_FILE_PATH, date='2020-08-11')\n bulletin.to_file(\n output_file=OUTPUT_DIR + '/[YYYY][MM][DD]_SEAPRJ.json',\n tablename='facilities'\n )\n expected_results = [\n OUTPUT_DIR + '/20200811_SEAPRJ_facilities.json',\n ]\n for _file in expected_results:\n # check there is a file in the expected path\n log.info(f\"Checking whether {_file} exists...\")\n assert os.path.isfile(_file)\n #assert _file in os.listdir(OUTPUT_DIR)\n # check file was updated in this test (is not a preexisting file)\n log.info(f\"Checking whether {_file} was updated...\")\n assert os.path.getmtime(_file) >= start", "title": "" }, { "docid": "3516512adbba82039486da021d765c1d", "score": "0.57218546", "text": "def test_content_file_create(self):\n pass", "title": "" }, { "docid": "9a075f95461fa9c9362a304546f56c65", "score": "0.5714154", "text": "def test_file_save(self):\n b3 = BaseModel()\n b3.save()\n with open(\"file.json\", \"r\") as f:\n self.assertIn(b3.id, f.read())", "title": "" }, { "docid": "82292f343282b846b9cc30171a704578", "score": "0.57141185", "text": "def test_dump(self):\n\n # Create paths to files.\n log_file = os.path.join(LOG_PATH, 'UnitTestMessageA.log')\n csv_file = os.path.join(TMP_PATH, 'data.csv')\n\n # Dump data to CSV file.\n keys = ['name', 'data', 'timestamp']\n dump_to_csv(log_file, csv_file, keys)\n\n # Read data from CSV file and reference.\n with open(csv_file, 'r') as f:\n write_data = f.read()\n with open(os.path.join(LOG_PATH, 'UnitTestMessageA.csv'), 'r') as f:\n expected_data = f.read()\n\n # Ensure CSV data is in the expected format.\n self.assertEqual(write_data, expected_data)", "title": "" }, { "docid": "f4b1f32d0f7670145570c2cce2a666f1", "score": "0.5713959", "text": "def test3(self):\n\n # FIXME include abstractions\n (save_bundle, vt_save_dir) = open_bundle_from_zip_xml( \\\n DBVistrail.vtType,\n os.path.join(vistrails.core.system.vistrails_root_directory(),\n 'tests/resources/dummy_new.vt'))\n assert save_bundle.vistrail is not None", "title": "" }, { "docid": "05eb40db62584100e7358e61b7d521d0", "score": "0.56913155", "text": "def testLoadData(self):\r\n self.assertCommandSucceeds(\"loaddata\")", "title": "" }, { "docid": "05eb40db62584100e7358e61b7d521d0", "score": "0.56913155", "text": "def testLoadData(self):\r\n self.assertCommandSucceeds(\"loaddata\")", "title": "" }, { "docid": "9ddbd1e55dc6c623cdd1fd3f72a9d18b", "score": "0.56909704", "text": "def test_upload_download_object(self):\n\n for name, (storage_provider, bucket, test_file, destination, s3_access_key_id, s3_secret_access_key) in self.storage_cases.items():\n with self.subTest(name=name):\n from cstash.storage.storage import Storage\n Storage(storage_provider, s3_access_key_id, s3_secret_access_key).upload(bucket, test_file)\n retrieved_file = Storage(storage_provider, s3_access_key_id, s3_secret_access_key).download(bucket, self.single_file, destination)\n\n retrieved_file_contents = open(retrieved_file, \"r\").read()\n self.assertEqual(self.file_contents, retrieved_file_contents)", "title": "" }, { "docid": "e850ce3bba3ef4d55a9b2489a249b86c", "score": "0.5688836", "text": "def testSaveAndLoad(self):\n\n # It has been noted in the comments in DataStructs/File.py that\n # the save and load functions exist only to be overridden by\n # descendents of the DataStruct/File object, so I am not\n # testing this functionality. This is just a placeholder in\n # case those requirements change.\n\n return", "title": "" }, { "docid": "a3f05a4e4e8f1833160fdf9ce7a89aad", "score": "0.5681014", "text": "def test_export_stp(self):\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")\n\n self.test_shape.export_stp(\"test_solid.stp\", mode=\"solid\")\n self.test_shape.export_stp(\"test_solid2.stp\")\n self.test_shape.export_stp(\"test_wire.stp\", mode=\"wire\")\n\n assert Path(\"test_solid.stp\").exists() is True\n assert Path(\"test_solid2.stp\").exists() is True\n assert Path(\"test_wire.stp\").exists() is True\n\n assert Path(\"test_solid.stp\").stat().st_size == Path(\"test_solid2.stp\").stat().st_size\n assert Path(\"test_wire.stp\").stat().st_size < Path(\"test_solid2.stp\").stat().st_size\n\n os.system(\"rm test_solid.stp test_solid2.stp test_wire.stp\")", "title": "" }, { "docid": "1a91213e85609d9517a2490dc489fc5f", "score": "0.56773025", "text": "def test_upload_working(self):\n\n pass", "title": "" }, { "docid": "dcf5c0e9ab439df4eb6f4331e538c0f4", "score": "0.5674069", "text": "def test_save_proper_pipeline(mk_tmp_dirs):\n args = [\n 'jwst.stpipe.tests.steps.ProperPipeline',\n data_fn_path,\n ]\n Step.from_cmdline(args)\n\n assert isfile('ppbase_pp.fits')", "title": "" }, { "docid": "f1436e13cae4e1f298c6e7f62323c902", "score": "0.56642896", "text": "def test_replicate_pg_to_sf_with_archive_load_files(self):\n\n self.delete_dangling_files_from_archive()\n\n assertions.assert_run_tap_success(\n self.tap_id, self.target_id, ['fastsync', 'singer']\n )\n\n expected_archive_files_count = {\n 'public.city': 2, # INCREMENTAL: fastsync and singer\n 'public.country': 1, # FULL_TABLE : fastsync only\n 'public2.wearehere': 1, # FULL_TABLE : fastsync only\n }\n\n # Assert expected files in archive folder\n for (\n schema_table,\n expected_archive_files,\n ) in expected_archive_files_count.items():\n\n schema, table = schema_table.split('.')\n files_in_s3_archive = self.get_files_from_s3_for_table(table)\n\n if (\n files_in_s3_archive is None\n or len(files_in_s3_archive) != expected_archive_files\n ):\n raise Exception(\n f'files_in_archive for {table} is {files_in_s3_archive}.'\n f'Expected archive files count: {expected_archive_files}'\n )\n\n # Assert expected metadata\n archive_metadata = self.s3_client.head_object(\n Bucket=self.s3_bucket, Key=(files_in_s3_archive[0]['Key'])\n )['Metadata']\n\n expected_metadata = {\n 'tap': 'postgres_to_sf_archive_load_files',\n 'schema': schema,\n 'table': table,\n 'archived-by': 'pipelinewise_fastsync_postgres_to_snowflake',\n }\n\n if archive_metadata != expected_metadata:\n raise Exception(f'archive_metadata for {table} is {archive_metadata}')\n\n # Assert expected file contents\n with tempfile.NamedTemporaryFile() as tmpfile:\n with open(tmpfile.name, 'wb') as tmpf:\n self.s3_client.download_fileobj(\n self.s3_bucket, files_in_s3_archive[0]['Key'], tmpf\n )\n with gzip.open(tmpfile, 'rt') as gzipfile:\n rows_in_csv = len(gzipfile.readlines())\n\n rows_in_table = self.e2e_env.run_query_tap_postgres(\n f'SELECT COUNT(1) FROM {schema_table}'\n )[0][0]\n\n if rows_in_csv != rows_in_table:\n raise Exception(\n f'Rows in csv and db differ: {rows_in_csv} vs {rows_in_table}'\n )", "title": "" }, { "docid": "f152059513f56682638927e5437b6643", "score": "0.56628644", "text": "def test_save(self):\n objects = self.storage.all()\n self.storage.save()\n with open(\"file.json\", 'r') as f:\n from_file = f.read()\n self.assertEqual(from_file, json.dumps({key: value.to_dict()\n for key, value in\n objects.items()}))", "title": "" }, { "docid": "ee11cd65456da496b3e6086631d62d35", "score": "0.56482595", "text": "def test_010_upload_encode_data( self ):\n # Deleting the current history should have created a new history\n self.check_history_for_string( 'Your history is empty' )\n history3 = galaxy.model.History.query().order_by( desc( galaxy.model.History.table.c.create_time ) ).first()\n self.run_tool( 'encode_import_chromatin_and_chromosomes1', hg17=['cc.EarlyRepSeg.20051216.bed'] )\n self.wait()\n hda7 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda7 is not None, \"Problem retrieving hda7 from database\"\n self.verify_dataset_correctness( 'cc.EarlyRepSeg.20051216.bed', hid=str( hda7.hid ) )\n self.run_tool('encode_import_gencode1', hg17=['gencode.CDS.20051206.bed'])\n self.wait()\n hda8 = galaxy.model.HistoryDatasetAssociation.query() \\\n .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ).first()\n assert hda8 is not None, \"Problem retrieving hda8 from database\"\n self.verify_dataset_correctness( 'sc_3D_cds.bed', hid=str( hda8.hid ) )\n self.delete_history( id=str( history3.id ) )", "title": "" }, { "docid": "6c42182061f1f06d8a7e9d243f77e7c1", "score": "0.56421024", "text": "def testNewFileObject(self):\n resolver_helper_object = (\n sqlite_blob_resolver_helper.SQLiteBlobResolverHelper())\n self._TestNewFileObject(resolver_helper_object, self._sqlite_blob_path_spec)", "title": "" }, { "docid": "4a37636ce08840a8a96b4afb8c84fa90", "score": "0.5640938", "text": "def test_save_model2():\n model_file = join(get_test_data_path(), \"gclda_model.pklz\")\n temp_file = join(get_test_data_path(), \"temp.pklz\")\n model = Model.load(model_file)\n model.save(temp_file)\n file_found = isfile(temp_file)\n assert file_found\n\n # Perform cleanup\n remove(temp_file)", "title": "" }, { "docid": "09314d35f8b9b3a42574a153e058f702", "score": "0.5638447", "text": "def test_files_created(self, file_format):\n\n fn = bqfl.WriteRecordsToFile(\n schema=_ELEMENTS_SCHEMA, file_format=file_format)\n self.tmpdir = self._new_tempdir()\n\n def check_files_created(output_pcs):\n dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]\n\n files = dest_file_pc | \"GetFiles\" >> beam.Map(lambda x: x[1][0])\n file_count = files | \"CountFiles\" >> combiners.Count.Globally()\n\n _ = files | \"FilesExist\" >> beam.Map(\n lambda x: hamcrest_assert(os.path.exists(x), is_(True)))\n assert_that(file_count, equal_to([3]), label='check file count')\n\n destinations = (\n dest_file_pc\n | \"GetDests\" >>\n beam.Map(lambda x: bigquery_tools.get_hashable_destination(x[0])))\n assert_that(\n destinations,\n equal_to(list(_DISTINCT_DESTINATIONS)),\n label='check destinations ')\n\n self._consume_input(fn, check_files_created)", "title": "" }, { "docid": "06817a962ec46140dc0c713eb7a2038d", "score": "0.56319547", "text": "def setUp(cls):\n with open(PATH_FILE + '\\\\' + FILE_BK, 'w') as f:\n f.write(str(data_old))\n with open(PATH_FILE + '\\\\' + FILE_DATA, 'w') as f:\n f.write(str(data_old) + str(data))", "title": "" }, { "docid": "2ccbcdef73b9378315b3f366ecfc15d1", "score": "0.56265515", "text": "def setUp(self):\n super().setUp()\n self.temp_dir = tempfile.mkdtemp()\n self.csv_col_names = pd.read_csv(self.csv_path, nrows=0).columns\n data_frame = pd.read_csv(self.csv_path)\n self.parquet_file_name = \"test.parquet\"\n self.output_file = f\"{self.temp_dir}/{self.parquet_file_name}\"\n data_frame.to_parquet(self.output_file, allow_truncated_timestamps=True, coerce_timestamps=\"ms\")\n\n self.manifest_id = 1\n self.account = \"org1234567\"\n self.s3_path = self.temp_dir\n self.provider_uuid = str(uuid.uuid4())\n self.local_parquet = self.output_file\n self.date_columns = [\"date1\", \"date2\"]\n self.numeric_columns = [\"numeric1\", \"numeric2\"]\n self.boolean_columns = [\"bool_col\"]\n self.other_columns = [\"other\"]\n self.table_name = \"test_table\"\n self.column_types = {\n \"numeric_columns\": self.numeric_columns,\n \"date_columns\": self.date_columns,\n \"boolean_columns\": self.boolean_columns,\n }\n self.processor = ReportParquetProcessorBase(\n self.manifest_id,\n self.account,\n self.s3_path,\n self.provider_uuid,\n self.local_parquet,\n self.column_types,\n self.table_name,\n )\n self.log_base = \"masu.processor.report_parquet_processor_base\"\n self.log_output_info = f\"INFO:{self.log_base}:\"", "title": "" }, { "docid": "cc1e1f3a0e01d24546b56f32ffb4fbe4", "score": "0.5605927", "text": "def test_save(self):\n\n expected = {\n self.file_to_test: {\n \"example.com\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.invalid,\n },\n },\n }\n\n self.inactive_db.database = expected.copy()\n self.inactive_db.save()\n\n self.assertEqual(\n expected, PyFunceble.helpers.Dict().from_json_file(self.storage_file)\n )", "title": "" }, { "docid": "bf4931320e64b6b0e5747dbc067995d1", "score": "0.56022817", "text": "def test_otoroshi_controllers_adminapi_import_export_controller_full_import_from_file(self):\n pass", "title": "" }, { "docid": "5ea0a83a6d3c974ebbd06a8c95e079a5", "score": "0.56019866", "text": "def test_populated_database(self):\n pass", "title": "" }, { "docid": "2a3c38ba9daf7fb3647bab2708e433fa", "score": "0.5600723", "text": "def make_test_data(outfile=None):\n\n if outfile is None:\n outfile = default_test_data_path\n\n print green(\"Saving test data from %s to %s\" % (test_data_apps, outfile))\n args = ' '.join(test_data_apps + ('--exclude=auth.Permission',))\n if fabutils.manage_py(\"dumpdata --indent=2 %s > %s\" % (args, outfile)):\n print \"Make test data successful.\"", "title": "" }, { "docid": "c7d056361f4a0800346472283eaa0088", "score": "0.55994165", "text": "def test_to_file(self):\n tempfilename = os.path.join(self.tempdir, 'boros.shp')\n self.df.to_file(tempfilename)\n # Read layer back in?\n df = GeoDataFrame.from_file(tempfilename)\n self.assertTrue('geometry' in df)\n self.assertTrue(len(df) == 5)\n self.assertTrue(np.alltrue(df['BoroName'].values == self.boros))", "title": "" }, { "docid": "1619b91017f202294b9a89b22d572cdd", "score": "0.5596699", "text": "def default_to_test(app: Application):\n\n import textwrap\n\n temp_dir = tempfile.mkdtemp()\n try:\n # Convertion Contexts\n cvt_ctxs = [\n AttrDict(\n {\n \"file_name\": \"db.sql\",\n \"src_db\": \"bb2_default\",\n \"dst_db\": \"bb2_test\",\n }\n ),\n AttrDict(\n {\n \"file_name\": \"db_sg.sql\",\n \"src_db\": \"bb2_default_sg\",\n \"dst_db\": \"bb2_test_sg\",\n }\n ),\n ]\n\n for actx in cvt_ctxs:\n click.echo(\n \" * [Transfer Data From %s To %s]\" % (actx.src_db, actx.dst_db)\n )\n\n click.echo(\n textwrap.dedent(\n \"\"\"\\\n Exporting data from %s and it will take several minutes,\n please wait...\n \"\"\"\n ).replace(\"\\n\", \" \")\n % actx.src_db\n )\n\n apath = os.path.join(temp_dir, actx.file_name)\n with open(apath, \"w\") as f:\n p = app.db_dump(\n f,\n databases=[actx.src_db],\n force=True,\n without_db_name=True,\n )\n if p.returncode != 0:\n click.echo(\"Failed to export data!\", err=True)\n return p.returncode\n\n click.echo(\"Export database %s successfully!\" % actx.src_db)\n\n click.echo(\n textwrap.dedent(\n \"\"\"\\\n Importing data to %s and it will take several minutes,\n please wait...\n \"\"\"\n ).replace(\"\\n\", \" \")\n % actx.dst_db\n )\n\n p = app.db_import(apath, actx.dst_db)\n if p.returncode != 0:\n click.echo(\"Import data to test fail!\", err=True)\n return p.returncode\n\n click.echo(\"Import data to %s successfully\" % actx.dst_db)\n\n click.echo(\"Transfter data from default to test successfully!\\n\")\n\n finally:\n shutil.rmtree(temp_dir)", "title": "" }, { "docid": "ad26ee4616b6fdabaa2c67d9cf7dbd66", "score": "0.55947965", "text": "def test_creation(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n self.assertTrue(os.path.exists('population_backup/storage/test_scenario_neat_gru/delete_me/'))", "title": "" }, { "docid": "a150b1175033817e286d6902b4708c8c", "score": "0.55843407", "text": "def test_get_db_filepath(dbh):\n assert str(dbh.get_db_filepath()) == \"test/test_data/trading_log.json\"\n\n mock_path = Path(\"/tmp/test.json\")\n assert dbh.write_data(mock_path)\n assert mock_path.is_file()\n dbh.read_data(mock_path)\n assert dbh.get_db_filepath() == mock_path", "title": "" }, { "docid": "64f14c05dffd9290deb38fc1f0885b41", "score": "0.55734134", "text": "def export_data(self, model, dumpdata = True):\n model_export_name = model\n if dumpdata == True:\n try:\n sysout = sys.stdout\n sys.stdout = open(VPATH+'vulture/fixtures/'+ model_export_name +'.json','w')\n call_command('dumpdata', model_export_name)\n sys.stdout = sysout\n except django.db.utils.DatabaseError as e:\n os.system(\"cp \"+VPATH+\"vulture/models_final.py \"+VPATH+\"vulture/models.py\")#Error : restoring target model\n sys.stdout = sysout\n print e\n print \"error on model : \" + model_export_name\n sys.exit(0)\n else:\n f = open(VPATH+'vulture/fixtures/'+ model_export_name +'.json','w')\n json_data = json.dumps(dumpdata)\n f.write(json_data)", "title": "" }, { "docid": "a599e9553e0b47daa0b98831af34fea9", "score": "0.5572478", "text": "def test_minimal_stp_creation(self):\n\n os.system(\"rm -r *.stp\")\n output_filenames = [\n \"plasma.stp\",\n \"center_column_shield.stp\",\n \"outboard_firstwall.stp\",\n \"blanket.stp\",\n \"divertor_upper.stp\",\n \"divertor_lower.stp\",\n \"supports.stp\",\n \"outboard_rear_blanket_wall.stp\",\n \"inboard_tf_coils.stp\",\n ]\n self.test_reactor.export_stp(filename=output_filenames)\n\n for output_filename in output_filenames:\n assert Path(output_filename).exists() is True\n os.system(\"rm -r *.stp\")", "title": "" }, { "docid": "5cd1e8a0bda283614b80332de1fa2d7f", "score": "0.5561122", "text": "def test_one_file(self):\n # create model file with name that matches target name\n for name in ['cal', 'alias']:\n fname = os.path.join(self.lsm_dir, name + '.txt')\n with open(fname, 'w') as f:\n print(self.file_model, file=f)\n\n expected_params = ['S0, radec, 08:25:26.87, -50:10:38.49, (800 1712'\n ' 5 6 7 8)']\n\n model_params, model_file = pipelineprocs.get_model(self.target, self.lsm_dir, 'l')\n self.assertEqual(fname, model_file)\n np.testing.assert_equal(model_params, expected_params)\n\n os.remove(fname)", "title": "" }, { "docid": "8b362cc50a12f0314aeca0d0fb8d3bc6", "score": "0.55606043", "text": "def test_export_config(self):\n pass", "title": "" }, { "docid": "17d71f842d2bd21fe5cfcb82f9468cff", "score": "0.5557499", "text": "def test_serialization(valid_data):\n process_spec: ProcessSpec = ProcessSpec.build(valid_data)\n serialized = process_spec.dump()\n # Audit info & dataset are not included in the dump\n serialized['audit_info'] = valid_data['audit_info']\n assert serialized == valid_data", "title": "" }, { "docid": "32977a43e238f91e64a20f6fc3647900", "score": "0.55560416", "text": "def test_asmnt_procedure_export(self, model):\n with factories.single_commit():\n program = factories.ProgramFactory()\n audit = factories.AuditFactory(program=program)\n import_queries = []\n for i in range(3):\n import_queries.append(collections.OrderedDict([\n (\"object_type\", model),\n (\"Assessment Procedure\", \"Procedure-{}\".format(i)),\n (\"Title\", \"Title {}\".format(i)),\n (\"Code*\", \"{}-{}\".format(model, i)),\n (\"Admin\", \"user@example.com\"),\n (\"Assignees\", \"user@example.com\"),\n (\"Creators\", \"user@example.com\"),\n (\"Description\", \"{} description\".format(model)),\n (\"Program\", program.slug),\n (\"Audit\", audit.slug),\n (\"Start Date\", \"\"),\n (\"End Date\", \"\"),\n ]))\n self.check_import_errors(self.import_data(*import_queries))\n\n model_cls = inflector.get_model(model)\n objects = model_cls.query.order_by(model_cls.test_plan).all()\n self.assertEqual(len(objects), 3)\n for num, obj in enumerate(objects):\n self.assertEqual(obj.test_plan, \"Procedure-{}\".format(num))\n\n obj_dicts = [\n {\n \"Code*\": obj.slug,\n \"Assessment Procedure\": \"Procedure-{}\".format(i)\n } for i, obj in enumerate(objects)\n ]\n search_request = [{\n \"object_name\": model_cls.__name__,\n \"filters\": {\n \"expression\": {},\n \"order_by\": {\"name\": \"id\"}\n },\n \"fields\": [\"slug\", \"test_plan\"],\n }]\n exported_data = self.export_parsed_csv(search_request)[model]\n self.assertEqual(exported_data, obj_dicts)", "title": "" }, { "docid": "f48904a3176e4db43b3375bfcb2c5622", "score": "0.5552713", "text": "def snapshot_export():", "title": "" }, { "docid": "29255180b306351249473297f25df96e", "score": "0.5549613", "text": "def test_to_csv():\n start = time.time()\n bulletin = SEAPBulletin(EXAMPLE_FILE_PATH, date='2020-08-11')\n bulletin.to_file(\n output_file=OUTPUT_DIR + '/[YYYY][MM][DD]_SEAPRJ.csv',\n tablename='facilities'\n )\n expected_results = [\n OUTPUT_DIR + '/20200811_SEAPRJ_facilities.csv',\n ]\n for _file in expected_results:\n # check there is a file in the expected path\n log.info(f\"Checking whether {_file} exists...\")\n assert os.path.isfile(_file)\n # check file was updated in this test (is not a preexisting file)\n log.info(f\"Checking wheter {_file} was updated...\")\n assert os.path.getmtime(_file) >= start", "title": "" }, { "docid": "ff60cb9f59d262c400c6d5cad44d2c20", "score": "0.5546362", "text": "def test_save_and_load(self, biosequence_data_set, dummy_data):\n biosequence_data_set.save(dummy_data)\n reloaded = biosequence_data_set.load()\n assert dummy_data[0].id, reloaded[0].id\n assert dummy_data[0].seq, reloaded[0].seq\n assert len(dummy_data) == len(reloaded)\n assert biosequence_data_set._fs_open_args_load == {\"mode\": \"r\"}\n assert biosequence_data_set._fs_open_args_save == {\"mode\": \"w\"}", "title": "" }, { "docid": "882ec5facd9c9fbe0746a984c0e588bb", "score": "0.5545566", "text": "def test_export_brep():\n\n if TEST_OUT_PATH.exists():\n shutil.rmtree(TEST_OUT_PATH)\n TEST_OUT_PATH.mkdir()\n\n cpacs = CPACS(CPACS_IN_PATH)\n\n export_brep(cpacs, TEST_OUT_PATH)\n\n brep_files = list(TEST_OUT_PATH.glob(\"*.brep\"))\n brep_file_names = [brep_file.name for brep_file in brep_files]\n\n assert len(brep_files) == 3 # simpletest_cpacs.xml containt only 3 parts\n assert \"Wing.brep\" in brep_file_names\n assert \"Wing_mirrored.brep\" in brep_file_names\n assert \"SimpleFuselage.brep\" in brep_file_names\n\n # Erase brep file generated by the test\n for brep_file in brep_files:\n brep_file.unlink()\n\n with pytest.raises(FileNotFoundError):\n with patch(\"ceasiompy.CPACS2GMSH.func.exportbrep.export_shapes\", return_value=True):\n export_brep(cpacs, TEST_OUT_PATH)", "title": "" }, { "docid": "f1f696cd1a01e8cfbb5bffd8c804d8b6", "score": "0.5544684", "text": "def test_create_from_file_no_relation():\n with TempDirectory() as d:\n # Create a file in the temp directory\n d.write('test.sql',\n 'SELECT * FROM test_table;'.encode('utf8'))\n Database(files=[os.path.join(d.path, 'test.sql')])", "title": "" }, { "docid": "c271934a4cc5a26063025ab40c2d7314", "score": "0.5540516", "text": "def setUp(self):\n self.backup = WMFBackup('test', {'type': 'null'})", "title": "" }, { "docid": "ac157dfdc3d26439d0865e2fc26e03d0", "score": "0.5535402", "text": "def test_save(self):\n self.modelgrid.save('test.p')\n os.system('rm test.p')", "title": "" }, { "docid": "c50b300044bd22ce44980862171fb57b", "score": "0.5535128", "text": "def test():\n # NOTE: Disable store command so far\n # store()\n check_results()", "title": "" }, { "docid": "79e8ca57282e89a89c7b0fdd4687d0bd", "score": "0.55340123", "text": "def test_write_binary_creates_file(self):\n time = np.random.rand(10)\n current = np.random.rand(10)\n file_name = \"./files/IOTest6.npz\"\n # File should not exist before we begin.\n if os.path.exists(file_name):\n os.remove(file_name)\n io.write_time_current_data(file_name, time, current)\n self.assertTrue(os.path.exists(file_name))\n os.remove(file_name)\n self.assertFalse(os.path.exists(file_name))", "title": "" }, { "docid": "06afa454bb5081b78d9902e758754597", "score": "0.5530934", "text": "def test_database(client):\n tester = Path(TEST_DB).is_file()\n assert tester", "title": "" }, { "docid": "e001553a354a4956d024828a3cfac2c4", "score": "0.55277544", "text": "def setUp(self):\n\n try:\n os.makedirs(self.test_files_directory, exist_ok=True)\n os.makedirs(self.two_directory_tieres, exist_ok=True)\n\n for this_directory in [self.test_files_directory, self.two_directory_tieres]:\n with open(f\"{this_directory}/{self.single_file}\", \"w+\") as test_file:\n test_file.write(\"Some amazing things, right here\")\n\n except Exception as e:\n print(f\"Couldn't create fixture: {e}\")", "title": "" }, { "docid": "fa909fdd00916ec6e6cbb838b5ac28e2", "score": "0.5526885", "text": "def test_supplemental_output_created(self):\n settings = OutputSettings.objects.first()\n settings.save_daily_unit_states = True\n settings.save()\n close_all_connections()\n output_file = os.path.join(self.scenario_directory, 'states_1.csv')\n\n sim = Simulation(1)\n sim.start()\n sim.join()\n\n self.assertTrue(os.access(output_file, os.F_OK))", "title": "" }, { "docid": "31fea51ec72b97c8c19ea6a19ed23d1e", "score": "0.552055", "text": "def test_simple_noextension(self, tmpdir):\n filename = str(tmpdir.join('test_simple'))\n t1 = Table(self.data)\n t1.write(filename, overwrite=True, format='fits')\n t2 = Table.read(filename)\n assert equal_data(t1, t2)", "title": "" }, { "docid": "7a29b4f5bed8b34a4e1d5510244ea2b4", "score": "0.5517719", "text": "def single_case(test_id: str, test_object: Any):\n # Dump to disk\n path = os.path.join(TESTPATH, f\"{time.strftime('%Y%m%d-%H%M%S')} - {test_id}\")\n success = dumps(filename=path, data=test_object)\n\n # Ensure that dumping is successful\n assert_equal(True, success)\n\n # Read from disk\n reloaded_object = loads(path)\n\n assert_equal(True, test_object == reloaded_object)\n\n # Cleanup\n os.remove(f\"{path}.pkl\")", "title": "" }, { "docid": "69f8370918c2e2259d58c2c53afc47f9", "score": "0.55171764", "text": "def test_write_loadTheSameComplexData(self):\n test_data = ComplexTestData()\n pc = test_data.get_point_cloud()\n export(pc, self.test_file_path)\n file = _get_file_from_path(self.test_file_path)\n _assert_all_attributes_in_file(pc[keys.point], file)", "title": "" }, { "docid": "c69598094c64532b216d4e83b32e407a", "score": "0.5513966", "text": "def test_write_db_load_db2(self):\n with TemporaryDirectory(dir=\".\") as dirname:\n outpath = join(dirname, \"delme\")\n writer = write_db(outpath, create=True, if_exists=\"ignore\")\n data = dict(a=[1, 2], b=\"string\")\n m = writer(data, identifier=join(\"blah\", \"delme.json\"))\n writer.data_store.db.close()\n dstore = io_app.get_data_store(f\"{outpath}.tinydb\", suffix=\"json\")\n reader = io_app.load_db()\n got = reader(dstore[0])\n dstore.close()\n self.assertEqual(got, data)", "title": "" }, { "docid": "185130a08a7d1e67f55111fca5c08c9e", "score": "0.5512545", "text": "def test_write_db_parallel(self):\n dstore = io_app.get_data_store(self.basedir, suffix=\"fasta\")\n members = dstore.filtered(callback=lambda x: \"brca1.fasta\" not in x.split(\"/\"))\n reader = io_app.load_unaligned()\n aligner = align_app.align_to_ref()\n writer = write_db(\"delme.tinydb\", create=True, if_exists=\"overwrite\")\n process = reader + aligner + writer\n\n r = process.apply_to(members, logger=False, show_progress=False, parallel=True)\n\n expect = [str(m) for m in process.data_store]\n process.data_store.close()\n\n # now get read only and check what's in there\n result = io_app.get_data_store(\"delme.tinydb\")\n got = [str(m) for m in result]\n result.close()\n\n self.assertEqual(got, expect)", "title": "" }, { "docid": "b51faf18927e2bf83901f8acf4ac98d8", "score": "0.5511329", "text": "def test_output_to_file(self):\n train_df = pd.read_csv(\"../data/training_set_test.txt\", sep=\" \", header=None)\n train_df.columns = [\"source\", \"target\", \"label\"]\n\n node_information_df = pd.read_csv(\"../data/node_information_test.csv\", sep=\",\", header=None)\n node_information_df.columns = [\"ID\", \"year\", \"title\", \"authors\", \"journalName\", \"abstract\"]\n node_information_df = node_information_df.reset_index().set_index(\"ID\")\n node_information_df[\"authors\"].fillna(\"\", inplace=True)\n node_information_df[\"journalName\"].fillna(\"\", inplace=True)\n\n features = [\"original\"]\n feature = features[0]\n exporter = FeatureExporter()\n\n exporter.computeFeature(train_df, node_information_df, feature)\n exporter.exportTo('training_set_test.txt', feature)\n\n self.assertTrue(os.path.isfile(\"featureEngineering/originalFeatures/output/training_set_test.txt\"))", "title": "" }, { "docid": "3d59318418974ced929b1ed6a78ab509", "score": "0.5510073", "text": "def test_APIDeltaWriter(self):\r\n (fileDes, tempFileName) = tempfile.mkstemp()\r\n old_db = os.path.join(os.environ['TEST_DATA'], 'data', 'docs', 'sample_old_db.xml') \r\n new_db = os.path.join(os.environ['TEST_DATA'], 'data', 'docs', 'sample_new_db.xml') \r\n writer = APIDeltaWriter(open(old_db), open(new_db))\r\n saveout = sys.stdout\r\n sys.stdout = sys.stderr\r\n writer.write(tempFileName)\r\n os.close(fileDes)\r\n sys.stdout = saveout\r\n tempFile = open(tempFileName, 'r')\r\n content = tempFile.readlines()\r\n tempFile.close()\r\n os.unlink(tempFileName)\r\n assert len(content) == 13", "title": "" }, { "docid": "1c210043b697d9b14b573d367540f3b3", "score": "0.55095917", "text": "def testSaveFamily(self):\n os.makedirs(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy'))\n self.family.save(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy'))\n try:\n self.assertTrue(filecmp.cmp(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_migration/groups.py'),\n os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy/groups.py')))\n self.assertTrue(filecmp.cmp(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_migration/rules.py'),\n os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy/rules.py')))\n self.assertTrue(filecmp.cmp(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_migration/training/reactions.py'),\n os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy/training/reactions.py')))\n self.assertTrue(filecmp.cmp(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_migration/training/dictionary.txt'),\n os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy/training/dictionary.txt')))\n finally:\n shutil.rmtree(os.path.join(settings['test_data.directory'], 'testing_database/kinetics/families/intra_H_copy'))", "title": "" }, { "docid": "5e5cb26eed96373bd168462fc8987ed4", "score": "0.55088717", "text": "def test_write_sameFileTwiceOverwrite(self):\n pc = SimpleTestData.get_point_cloud()\n export(pc, self.test_file_path)\n export(pc, self.test_file_path, overwrite=True)\n self.assertTrue(os.path.isfile(self.test_file_path))", "title": "" }, { "docid": "b7fee2e9f25837a7d3b8c5e237df19cc", "score": "0.55071664", "text": "def test_save_to_disk():\n donor = Donor(\"Abby Lin\", 5000)\n donor.save_to_disk(\"thank_you_letters\")\n\n os.chdir(\"thank_you_letters\")\n assert os.path.isfile(\"Abby_Lin.txt\")\n with open(\"Abby_Lin.txt\") as f:\n size = len(f.read())\n assert size > 0", "title": "" }, { "docid": "96df76c4608292fc162203f7d7c4a992", "score": "0.5496512", "text": "def test_upload_using_import_export_generic_view(self):\n response = self.upload_file('abc.xml', '/abc_import_export/')\n self.assertEqual(response.status_code, 201)\n self.assertEqual(ABC.objects.count(), 2)\n self.assertTrue(ABC.objects.filter(name='cheetah').exists())", "title": "" }, { "docid": "ad90ad9287f7758a8d0918deb98648a3", "score": "0.5490607", "text": "def setUp(self):\n testutils.GanetiTestCase.setUp(self)\n\n self.plain_output_old_ok = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_old_ok.txt\")\n self.plain_output_old_no_matches = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_old_no_matches.txt\")\n self.plain_output_old_extra_matches = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_old_extra_matches.txt\")\n self.plain_output_old_empty = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_old_empty.txt\")\n self.plain_output_new_ok = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_new_ok.txt\")\n self.plain_output_new_no_matches = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_new_no_matches.txt\")\n self.plain_output_new_extra_matches = \\\n testutils.ReadTestData(\"bdev-rbd/plain_output_new_extra_matches.txt\")\n # This file is completely empty, and as such it's not shipped.\n self.plain_output_new_empty = \"\"\n self.json_output_ok = testutils.ReadTestData(\"bdev-rbd/json_output_ok.txt\")\n self.json_output_no_matches = \\\n testutils.ReadTestData(\"bdev-rbd/json_output_no_matches.txt\")\n self.json_output_extra_matches = \\\n testutils.ReadTestData(\"bdev-rbd/json_output_extra_matches.txt\")\n self.json_output_empty = \\\n testutils.ReadTestData(\"bdev-rbd/json_output_empty.txt\")\n self.output_invalid = testutils.ReadTestData(\"bdev-rbd/output_invalid.txt\")\n\n self.volume_name = \"d7ab910a-4933-4ffe-88d0-faf2ce31390a.rbd.disk0\"\n self.test_unique_id = (\"rbd\", self.volume_name)\n self.test_params = {\n constants.LDP_POOL: \"fake_pool\"\n }", "title": "" }, { "docid": "1cf53e2335438b2ffe85c8ad25581d39", "score": "0.54901284", "text": "def test_write_empty(mp_tmpdir, cleantopo_tl):\n with mapchete.open(cleantopo_tl.path) as mp:\n # process and save\n mp.write(mp.config.process_pyramid.tile(5, 0, 0), None)", "title": "" }, { "docid": "999bb2f39caa5e65f667cd541f9ff3d7", "score": "0.54894716", "text": "def test_records_are_spilled(self, file_format):\n\n fn = bqfl.WriteRecordsToFile(\n schema=_ELEMENTS_SCHEMA,\n max_files_per_bundle=2,\n file_format=file_format)\n self.tmpdir = self._new_tempdir()\n\n def check_many_files(output_pcs):\n dest_file_pc = output_pcs[bqfl.WriteRecordsToFile.WRITTEN_FILE_TAG]\n spilled_records_pc = output_pcs[\n bqfl.WriteRecordsToFile.UNWRITTEN_RECORD_TAG]\n\n spilled_records_count = (spilled_records_pc | combiners.Count.Globally())\n assert_that(spilled_records_count, equal_to([3]), label='spilled count')\n\n files_per_dest = (\n dest_file_pc\n | beam.Map(lambda x: x).with_output_types(\n beam.typehints.KV[str, Tuple[str, int]])\n | combiners.Count.PerKey())\n files_per_dest = (\n files_per_dest\n | \"GetDests\" >> beam.Map(\n lambda x: (bigquery_tools.get_hashable_destination(x[0]), x[1])))\n\n # Only table1 and table3 get files. table2 records get spilled.\n assert_that(\n files_per_dest,\n equal_to([('project1:dataset1.table1', 1),\n ('project1:dataset1.table3', 1)]),\n label='file count')\n\n # Check that the files exist\n _ = dest_file_pc | beam.Map(lambda x: x[1][0]) | beam.Map(\n lambda x: hamcrest_assert(os.path.exists(x), is_(True)))\n\n self._consume_input(fn, check_many_files)", "title": "" } ]
3ef7615d0965c054880db2854b139b13
Process a string into a shapely polygon
[ { "docid": "3060d8a552a97cef7923be4ccde69727", "score": "0.5660084", "text": "def proc_polystr(polys, llcrnrlat, llcrnrlon, urcrnrlat, urcrnrlon, tolerance=-1):\n\t\n\tif len(polys) == 0:\n\t\treturn []\n\n\tall_polys = []\t\n\n\tfor i in polys:\n\t\tji = ast.literal_eval(i[0])\n\t\tfor p in ji['coordinates']:\n\t\t\tif isinstance(p[0], list):\n\t\t\t\tfor x in p:\n\t\t\t\t\tif len(x) == 1:\n\t\t\t\t\t\tpoly = Point(x[0]);\n\t\t\t\t\telse:\n\t\t\t\t\t\tpoly = Polygon(x);\n\n\t\t\t\t\tif poly.bounds[0] > llcrnrlat and poly.bounds[1] > llcrnrlon and poly.bounds[2] < urcrnrlat and poly.bounds[3] < urcrnrlon:\n\t\t\t\t\t\tif poly.is_valid:\n\t\t\t\t\t\t\tall_polys.append( poly );\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tall_polys.append( poly.buffer(0) );\n\t\t\telse:\t\n\t\t\t\tif len(p) == 1:\n\t\t\t\t\tpoly = Point(p[0]);\n\t\t\t\telse:\t\t\n\t\t\t\t\tpoly = Polygon(p);\n\n\t\t\t\tif poly.bounds[0] > llcrnrlat and poly.bounds[1] > llcrnrlon and poly.bounds[2] < urcrnrlat and poly.bounds[3] < urcrnrlon:\n\t\t\t\t\tif poly.is_valid:\n\t\t\t\t\t\tall_polys.append( poly );\n\t\t\t\t\telse:\n\t\t\t\t\t\tall_polys.append( poly.buffer(0) );\n\t\n\tall_polys = cascaded_union(all_polys);\t\n\tif tolerance > 0: all_polys = all_polys.simplify(tolerance, preserve_topology=False);\n\n\tif all_polys.geom_type == \"MultiPolygon\":\n\t\treturn [p for p in all_polys if p.geom_type == \"Polygon\"];\n\t\t\n\tif all_polys.geom_type == \"GeometryCollection\":\n\t\treturn [ p for p in all_polys ];\n\t\t\t\n\treturn [all_polys];", "title": "" } ]
[ { "docid": "e25e0406c63c139422244c19be5ab765", "score": "0.68376374", "text": "def _polygon(s, r):\n return MyPolygon(s, r)", "title": "" }, { "docid": "12f2ba308ea094d0ea76e8448d640467", "score": "0.6781244", "text": "def str2polygon(strdata):\n pts = []\n partial = None\n\n # We have two potential formats, one with 4 or 5 places and one\n # with eight!\n vals = re.findall(LAT_LON, strdata)\n for val in vals:\n if len(val) == 8:\n lat = float(val[:4]) / 100.00\n lon = float(val[4:]) / 100.00\n if lon < 40:\n lon += 100.0\n lon = 0 - lon\n pts.append(checker(lon, lat, strdata))\n else:\n fval = float(val) / 100.00\n if partial is None: # we have lat\n partial = fval\n continue\n # we have a lon\n if fval < 40:\n fval += 100.0\n fval = 0 - fval\n pts.append(checker(fval, partial, strdata))\n partial = None\n\n if not pts:\n return None\n if pts[0][0] != pts[-1][0] and pts[0][1] != pts[-1][1]:\n pts.append(pts[0])\n return Polygon(pts)", "title": "" }, { "docid": "b371882fa21554e877ba742709e9f618", "score": "0.63780403", "text": "def polygon(points, **kwargs):\n points_str = \" \".join(f\"{p.x},{p.y}\" for p in points)\n return Shape(tag=\"polygon\", points=points_str, **kwargs)", "title": "" }, { "docid": "43f74a777979bf1c3315d6a60dfb785e", "score": "0.63326424", "text": "def split_polygon(polygon):\n\n if len(polygon) < 3:\n raise ValueError(\n 'At least 3 lat/lon float value pairs must be provided')\n\n polygon_string = ''\n\n for poly in polygon:\n polygon_string += ' '.join(map(str, poly))\n polygon_string += ' '\n\n return polygon_string.strip()", "title": "" }, { "docid": "1cbd3f6ef5dd29b3bb96afbe7bdfd0fb", "score": "0.6218961", "text": "def polygon(geom):\n\n\traw_points = parse_args(geom,['x','y'])\n\t#now a giant list of points [x,y,x,y,x,y,x,y...]\n\n\tx = raw_points[0::2]\n\ty = raw_points[1::2]\n\n\tlx = len(x)\n\tly = len(y)\n\n\n\tif lx != ly:\n\t\tprint(\"coordinates must match in length\")\n\n\tif lx < 1 or ly < 1:\n\t\tprint(\"must have at least one coordinate pair\")\n\n\tpx = (c_int * lx)(*x)\n\tpy = (c_int * ly)(*y)\n\n\treturn lib.ovg_polygon(px, py, lx)", "title": "" }, { "docid": "6ef45e82fc305595bdefa09d6cd7b99d", "score": "0.61822665", "text": "def segments_to_polygons(couplets):\n if len(couplets) > 0:\n runs = [[]]\n runs[0].append(couplets[0]) #Initialize the first polygon with the first line\n couplets = process_segments(couplets,runs)\n polys = form_shapes(runs)\n return polys\n return []", "title": "" }, { "docid": "e85786a936fc4920e130d4662b353d58", "score": "0.6099025", "text": "def __cap_gis_location_add_polygon(element, cap_polygon_text, fallback=False):\n # Make a gis_location_tag.\n tag_resource = SubElement(element, RESOURCE)\n tag_resource.set(NAME, tag_tablename)\n tag_field = SubElement(tag_resource, DATA)\n # Add tag and value children.\n tag_field.set(FIELD, tag_fieldname)\n if fallback:\n tag_field.text = fallback_polygon_tag\n else:\n tag_field.text = polygon_tag\n val_field = SubElement(tag_resource, DATA)\n val_field.set(FIELD, val_fieldname)\n val_field.text = cap_polygon_text", "title": "" }, { "docid": "2fc6f19a1109d0a279ffc7c61afdab04", "score": "0.6083418", "text": "def _GetPolygon(poly):\n out_ring = poly.outerBoundaryIs.LinearRing.coordinates.text.strip().split(' ')\n out_points = [_SplitCoordinates(coord) for coord in out_ring]\n int_points = []\n try:\n for inner_boundary in poly.innerBoundaryIs:\n inner_ring = inner_boundary.LinearRing.coordinates.text.strip().split(' ')\n int_points.append([_SplitCoordinates(coord) for coord in inner_ring])\n except AttributeError:\n pass\n return sgeo.Polygon(out_points, holes=int_points)", "title": "" }, { "docid": "36afb019479db3eed355b3cab6bfab95", "score": "0.5982674", "text": "def generateGeometry(in_shp):\n resultingGeometry = []\n if in_shp.header['Shape Type'] == 1:\n for i in range(len(in_shp)):\n resultingGeometry.append(Point(in_shp.get_shape(i)['X'], in_shp.get_shape(i)['Y']))\n elif in_shp.header['Shape Type'] == 3:\n for i in range(len(in_shp)):\n resultingGeometry.append(LineString(in_shp.get_shape(i)['Vertices']))\n elif in_shp.header['Shape Type'] == 5:\n for i in range(len(in_shp)):\n resultingGeometry.append(Polygon(in_shp.get_shape(i)['Vertices']))\n return resultingGeometry", "title": "" }, { "docid": "cbd99e340925074e261b5fc10a0d9dc8", "score": "0.59528816", "text": "def polygon(name, material_name, pts, minimal=False):\n __base_string = \"%s polygon %s\\n0\\n0\\n%d\\n%s\"\n\n assert len(pts) >= 3, \\\n \"Insufficient number of points for %s: %d\" % (name, len(pts))\n\n try:\n pt_coordinates = \"\\n\".join(\n [\" \".join(map(str, (pt.X, pt.Y, pt.Z))) for pt in pts])\n except AttributeError:\n pt_coordinates = \"\\n\".join([\" \".join(map(str, pt)) for pt in pts])\n\n definition = __base_string % (\n __norm_name(material_name),\n __norm_name(name),\n 3 * len(pts),\n pt_coordinates\n )\n\n return definition.replace(\"\\t\", \"\").replace(\"\\n\", \" \") if minimal else definition", "title": "" }, { "docid": "933461bb56e61634782f4cce8931aa3b", "score": "0.5944176", "text": "def polygon_to_geometry(polygon):\n if not isinstance(polygon, Polygon):\n raise TypeError, \"The input data should be a valid Polygon object!\"\n listvert = [\"%s %s\" % (x, y) for (x, y) in polygon.get_verts()]\n listvert.append(listvert[0])\n return ogr.CreateGeometryFromWkt(\"POLYGON ((%s))\" % ','.join(listvert))", "title": "" }, { "docid": "bed5f7e09da937105c91bb4a2e826c91", "score": "0.59364104", "text": "def make_polygon(x, y, name=None):\n x, y = list(x), list(y)\n poly = ij.gui.PolygonRoi(x, y, len(x), ij.gui.Roi.POLYGON)\n if name:\n poly.setName(name)\n return poly", "title": "" }, { "docid": "ad9c5c692af2070daf8e2819995d149b", "score": "0.5840841", "text": "def shape(pyshp_shpobj):\n types = {5: Polygon,\n 3: LineString,\n 1: Point}\n hataripy_geometype = types[pyshp_shpobj.shapeType]\n return hataripy_geometype(pyshp_shpobj.points)", "title": "" }, { "docid": "4a9960933811032672f69b3250ab403b", "score": "0.5822652", "text": "def CreateMongoPolygon(theFeature):\n \n arrayCoordinates = []\n dataset = [ list(pointpair) for pointpair in theFeature.exterior.coords ]\n #ringCoordinates = [list(pointPair) for ring in theFeaturePoints for pointPair in ring] \n arrayCoordinates.append(dataset)\n #Adding any interior polygons \n if list(theFeature.interiors): \n for interiorRings in list(theFeature.interiors):\n rings = [ list(pointPair) for pointPair in interiorRings.coords ]\n arrayCoordinates.append(rings)\n \n return(arrayCoordinates)", "title": "" }, { "docid": "ac5ab4729bb63d84c5dac0a2bedbbdef", "score": "0.58194", "text": "def read_polygon(f):\n\tcoords = []\n\twhile True:\n\t\tline = f.readline()\n\t\t# stop on EOF\n\t\tif not line:\n\t\t\tbreak\n\t\tline = line.strip()\n\t\t# stop on polygon end\n\t\tif line == 'END':\n\t\t\tbreak\n\t\t# skip whitespace lines\n\t\tif not line:\n\t\t\tcontinue\n\t\t# append coords\n\t\tords = line.split()\n\t\tcoords.append(\"%f %f\" % (float(ords[0]), float(ords[1])))\n\tif len(coords) < 3:\n\t\treturn None\n\tif coords[0] != coords[-1]:\n\t\tcoords.append(coords[0])\n\treturn '({})'.format(','.join(coords))", "title": "" }, { "docid": "a74f71326d2de3b9f611dddedc044371", "score": "0.58178675", "text": "def test_concave_polygon(self):\n concave_polygon = LineString([(500, 0), (0, 0), (0, 500),\n (500, 500)]).buffer(100)\n label = polylabel(concave_polygon)\n self.assertTrue(concave_polygon.contains(label))", "title": "" }, { "docid": "190eb7f746f14190a29c116d65bef49e", "score": "0.5813182", "text": "def convert_to_polygon(input_array, trim_invalid_geometry=False, autocorrect_invalid_geometry=False):\n input_array = np.array(input_array, dtype=np.dtype('O'))\n if input_array.size == 0:\n return 'undefined', input_array\n\n if (len(input_array.shape) == 1 or len(input_array.shape) > 2) and \\\n (not len(input_array.shape) == 5 and not len(input_array.shape) == 3):\n raise ValueError('Invalid array number of dimensions: '\n 'Expected a 2D array, found {}D.'.format(len(input_array.shape)))\n\n if len(input_array.shape) == 5 and not input_array.shape[4] == 2:\n raise ValueError('Invalid array fifth dimension: '\n 'Expected 2, found {}.'.format(len(input_array.shape)))\n\n elif len(input_array.shape) == 3 and not input_array.shape[2] == 1:\n raise ValueError('Invalid array third dimension: '\n 'Expected 1, found {}.'.format(len(input_array.shape)))\n\n object_array = np.ndarray((input_array.shape[0], input_array.shape[1]), dtype=np.dtype('O'))\n for i, coordinate in enumerate(input_array[:, 0]):\n line = [polygons(linearrings(coordinate[0]), holes=[linearrings(hole) for hole in coordinate[1:]])] \\\n if len(coordinate) > 1 else [polygons(linearrings(coordinate[0]))]\n line.extend(input_array[i, 1:])\n object_array[i] = np.array(line, dtype=np.dtype('O'))\n\n if autocorrect_invalid_geometry:\n object_array[:, 0] = _clean_multi_geometries(make_valid(object_array[:, 0]))\n\n if trim_invalid_geometry:\n object_array = object_array[is_valid(object_array[:, 0]), :]\n\n if not np.all(is_type(object_array[:, 0], GeometryType.POLYGON)):\n raise ValueError('Conversion is impossible: Some geometries could not be converted to valid polygons.')\n\n return object_array", "title": "" }, { "docid": "0178e413b95930f31a1c8595fa6fad75", "score": "0.5792328", "text": "def __init__(self, text):\n that = caplib.Polygon.fromString(text)\n super(ShadowPolygon, self).__init__(list(that))", "title": "" }, { "docid": "bfd0e654537b25aed1f8bd47db618eae", "score": "0.57899034", "text": "def struct_polygon(a, oid_fld, shp_fld, SR):\r\n pts = a #_arr_common(a, oid_fld, shp_fld)\r\n f = []\r\n for pt in pts:\r\n f.append(arcpy.Polygon(arcpy.Array([arcpy.Point(*p)\r\n for p in pt.tolist()]), SR))\r\n return f", "title": "" }, { "docid": "8151837df243272b8ebb1b9d16a34a94", "score": "0.5775394", "text": "def convert_poly(input_file, cur):\n\twith open(input_file, 'r') as f:\n\t\tname = f.readline().strip()\n\t\twkt = read_multipolygon(f)\n\tprint ' ', name\n\ttry:\n\t\tcur.execute('INSERT INTO borders (name, geom, modified) VALUES (%s, ST_GeomFromText(%s), now())', (name, wkt))\n\texcept psycopg2.Error as e:\n\t\tprint wkt\n\t\traise e", "title": "" }, { "docid": "e46beaa125fb02309d94be03939172f0", "score": "0.57261074", "text": "def test_CSD_with_polygon():\n name = 'CSD'\n test_name = 'test_CSD_with_polygon'\n check_feature(NS, test_name, name, image=NS.image_uri, gobject=NS.gobject_uri)", "title": "" }, { "docid": "a3f69fe22044613d1511539276d62b63", "score": "0.5724232", "text": "def read_shape(spec: str) -> Polygon:\n if os.path.exists(spec):\n ext = os.path.splitext(spec)[1].lower()\n if ext == '.wkt':\n return read_wkt(spec)\n elif ext == '.wkb':\n return read_wkb(spec)\n try:\n return read_wkt(spec)\n except shapely.errors.ReadingError:\n pass\n try:\n return read_wkb(spec)\n except shapely.errors.ReadingError:\n pass\n raise ValueError(\"Unknown shape file format: {!r}\".format(spec))\n if isinstance(spec, str):\n try:\n minx, miny, maxx, maxy = map(float, spec.split(','))\n return shapely.geometry.box(minx, miny, maxx, maxy)\n except ValueError:\n pass\n try:\n return shapely.wkt.loads(spec)\n except shapely.errors.ReadingError:\n pass\n raise ValueError(\"Unknown shape definition: {!r}\".format(spec))", "title": "" }, { "docid": "81e90164632af4d6fd6e4213b8f0b82c", "score": "0.57132894", "text": "def process_latlon(self):\n data = self.unixtext.replace(\"\\n\", \" \")\n search = LAT_LON_PREFIX.search(data)\n if search is None:\n return None\n pos = search.start()\n newdata = data[pos + 9 :]\n # Go find our next non-digit, non-space character, if we find it, we\n # should truncate our string, this could be improved, I suspect\n search = re.search(r\"[^\\s0-9]\", newdata)\n if search is not None:\n pos2 = search.start()\n newdata = newdata[:pos2]\n\n poly = str2polygon(newdata)\n if poly is None:\n return None\n\n # check 0, PGUM polygons are east longitude akrherz/pyIEM#74\n if self.tp.source == \"PGUM\":\n newpts = [[0 - pt[0], pt[1]] for pt in poly.exterior.coords]\n poly = Polygon(newpts)\n\n # check 1, is the polygon valid?\n if not poly.is_valid:\n self.tp.warnings.append(\n (\"LAT...LON polygon is invalid!\\n%s\") % (poly.exterior.xy,)\n )\n return None\n # check 2, is the exterior ring of the polygon clockwise?\n if poly.exterior.is_ccw:\n # No longer a warning as it was too much noise\n LOG.info(\n \"LAT...LON polygon exterior is CCW, reversing\\n%s\",\n poly.exterior.xy,\n )\n poly = Polygon(\n zip(poly.exterior.xy[0][::-1], poly.exterior.xy[1][::-1])\n )\n self.giswkt = \"SRID=4326;%s\" % (\n dumps(MultiPolygon([poly]), rounding_precision=6),\n )\n return poly", "title": "" }, { "docid": "7a0cf07413836797c3dbb715839002ff", "score": "0.56990635", "text": "def polygons_to_shapely_format(supplied_polygons: Union[str, Polygon, MultiPolygon, List[Polygon], np.ndarray]):\n if isinstance(supplied_polygons, str):\n # Assume file\n assert os.path.exists(supplied_polygons)\n if supplied_polygons[-4:] == \".shp\":\n # Read shapefile\n df = gpd.GeoDataFrame.from_file(supplied_polygons)\n poly_ls = list(df.geometry)\n assert all([isinstance(a, Polygon) for a in poly_ls])\n elif supplied_polygons[-4:] == \".kml\":\n raise NotImplementedError(\"KML import not implemented\")\n\n elif supplied_polygons[-4:] == \".csv\":\n df = pd.read_csv(supplied_polygons)\n # Assumes one polygon, with x y coordinates in first columns\n x = list(df.iloc[:, 0])\n y = list(df.iloc[:, 1])\n poly = Polygon([(xi, yi) for xi, yi in zip(x, y)])\n poly_ls = [poly]\n\n elif isinstance(supplied_polygons, np.ndarray):\n # Assumes one polygon, with x y coordinates in first two columns\n assert supplied_polygons.shape[1] == 2, \"Two columns expected\"\n poly = Polygon([(coord[0], coord[1]) for coord in supplied_polygons])\n poly_ls = [poly]\n\n elif isinstance(supplied_polygons, Polygon):\n poly_ls = [supplied_polygons]\n\n elif isinstance(supplied_polygons, MultiPolygon):\n poly_ls = list(supplied_polygons)\n\n elif isinstance(supplied_polygons, list):\n assert all([isinstance(a, Polygon) for a in supplied_polygons])\n poly_ls = [supplied_polygons]\n\n else:\n raise TypeError(\"Unexpected input: supply shp, csv, numpy array, shapely polygon, list of polygons or multipol\")\n\n return poly_ls", "title": "" }, { "docid": "4eb7452fdd579ffd89dbaf1c844d4461", "score": "0.5676479", "text": "def polygon(inputsides, inputlength):\n side_nums = int(inputsides)\n side_length = inputlength\n new_string = \"\"\n if side_nums != 0:\n angle_btwn_sides = str(int(360/side_nums))\n while side_nums !=0:\n new_string += \"F\" + side_length + \" L\" + angle_btwn_sides + \" \"\n side_nums -=1\n return new_string", "title": "" }, { "docid": "e77e4a331e0283ea2b23d49f30b8ca92", "score": "0.56572044", "text": "def polygonal(s,n):\n return (n**2 * (s - 2) - n * (s - 4)) // 2", "title": "" }, { "docid": "970daeb0c7e9d73980f8ef98e242ea4a", "score": "0.56489086", "text": "def triangle_encoding(svgdoc, xpos, ypos, blockx, blocky, code):\n svgdoc.add(svgdoc.polygon(points=[(xpos * blockx, ypos * blocky), \n (xpos * blockx + blockx, ypos * blocky + blocky),\n (xpos * blockx, ypos * blocky + blocky) if code\n else (xpos * blockx + blockx, ypos * blocky),\n (xpos * blockx, ypos * blocky)],\n fill='black'))", "title": "" }, { "docid": "a1306736e053d40c3fef2b475827c48b", "score": "0.5628904", "text": "def test_CLD_with_polygon():\n name = 'CLD'\n test_name = 'test_CLD_with_polygon'\n check_feature(NS, test_name, name, image=NS.image_uri, gobject=NS.gobject_uri)", "title": "" }, { "docid": "08849839de43e8c429f74db78bf5d2fd", "score": "0.5553377", "text": "def create_polygon(self, shape_polygon, alpha=np.nan):\n if np.isfinite(alpha):\n try:\n polygon = alphashape.alphashape(self.gdf, alpha)\n polygon.crs = self.crs\n polygon.to_file(shape_polygon)\n print('Polygon *.shp saved successfully.')\n except FileNotFoundError as e:\n print(e)\n else:\n try:\n polygon = alphashape.alphashape(self.gdf)\n except FileNotFoundError as e:\n print(e)\n else:\n polygon.crs = self.crs\n polygon.to_file(shape_polygon)\n print('Polygon *.shp saved successfully.')", "title": "" }, { "docid": "d3332c24c11f5289d36c14e1d6692057", "score": "0.55448765", "text": "def gen_polygon_from_layout(rp, extra_points=[], filename='scene.ply'):\n ceil = np.delete(rp, range(1, rp.shape[0],2),axis=0)\n ceil = np.delete(ceil,1,axis=1)\n tri = Delaunay(ceil)\n points = tri.simplices\n points_ceil = points*2+1\n new_points_ceil = np.array([points_ceil[:,1], points_ceil[:,2], points_ceil[:,0]]).transpose()\n points_floor = points*2\n new_points_floor = np.array([points_floor[:,2], points_floor[:,1], points_floor[:,0]]).transpose()\n size_pol = np.ones((new_points_ceil.shape[0],1))+2\n new_points_ceil = np.append(size_pol, new_points_ceil, axis=1)\n new_points_floor = np.append(size_pol, new_points_floor, axis=1)\n points_wall = generateTriplePoints(len(rp))\n n_faces = len(new_points_ceil) + len(new_points_floor) + len(points_wall)\n with open(filename, \"w\") as a_file:\n a_file.write(\"ply\\nformat ascii 1.0\\nelement vertex \"+str(len(rp)+len(extra_points))+\"\\nproperty float x\\nproperty float y\\nproperty float z\\nelement face \"+str(n_faces)+\"\\nproperty list uchar int vertex_index\\nend_header\\n\")\n with open(filename, \"ab\") as a_file:\n np.savetxt(a_file, rp, fmt=\"%.3f\")\n if extra_points != []:\n np.savetxt(a_file, extra_points, fmt=\"%d\")\n np.savetxt(a_file, new_points_ceil, fmt=\"%d\")\n np.savetxt(a_file, new_points_floor, fmt=\"%d\")\n np.savetxt(a_file, points_wall, fmt=\"%d\")", "title": "" }, { "docid": "8753e1ee828365fa9e2267867770b050", "score": "0.55190265", "text": "def polygonize(self, lines):\n source = getattr(lines, 'geoms', None) or lines\n obs = [self.shapeup(l) for l in source]\n geom_array_type = c_void_p * len(obs)\n geom_array = geom_array_type()\n for i, line in enumerate(obs):\n geom_array[i] = line._geom\n product = lgeos.GEOSPolygonize(byref(geom_array), len(obs))\n collection = geom_factory(product)\n for g in collection.geoms:\n clone = lgeos.GEOSGeom_clone(g._geom)\n g = geom_factory(clone)\n g._owned = False\n yield g", "title": "" }, { "docid": "d2f54192ebdb3dcc14869184c767dcb2", "score": "0.54879344", "text": "def polygon(self, coordinates):\n\n if not coordinates:\n return self\n \n # make sure we were passed something iterable\n try:\n iter(coordinates)\n except TypeError:\n raise ValueError(\"A line must be an iterable of coordinate tuples. Ex: [(90,90), (91, 90), ...]\")\n\n # polygon requires at least 4 pairs of coordinates\n if len(coordinates) < 4:\n raise ValueError(\"A polygon requires at least 4 pairs of coordinates.\")\n\n # convert to floats\n as_floats = []\n for lon, lat in coordinates:\n as_floats.extend([float(lon), float(lat)])\n\n # last point must match first point to complete polygon\n if as_floats[0] != as_floats[-2] or as_floats[1] != as_floats[-1]:\n raise ValueError(\"Coordinates of the last pair must match the first pair.\")\n\n # convert to strings\n as_strs = [str(val) for val in as_floats]\n\n self.params[\"polygon\"] = \",\".join(as_strs)\n\n return self", "title": "" }, { "docid": "856325035d4c18cb2f4cad4a3b6cb0f1", "score": "0.5476853", "text": "def shape_to_polygons(lines, idKeyword):\n from itertools import tee, izip\n def pairwise(iterable):\n a,b = tee(iterable)\n next(b, None)\n return izip(a, b)\n polygons = [[tuple(lines[0]['shape'])]]\n inhabitants = [lines[0]['population']]\n for a, b in pairwise(lines):\n if a[idKeyword] != b[idKeyword]:\n polygons.append([])\n inhabitants.append(b['population'])\n polygons[-1].append(tuple(b['shape']))\n assert len(polygons) == len(inhabitants)\n return polygons, inhabitants", "title": "" }, { "docid": "820d0bc7e1e54c6cbdd4bcd8884144e5", "score": "0.5464006", "text": "def PolygonOnTriangulation(*args):\n return _BRep.BRep_Tool_PolygonOnTriangulation(*args)", "title": "" }, { "docid": "bcde3349b9b6806be8ab3da7fc8dd640", "score": "0.5462849", "text": "def newpolygon(self, **kwargs):\r\n return self.document.newpolygon(**kwargs)", "title": "" }, { "docid": "e9ae9620b1c3e00df879f812ce3a213b", "score": "0.5454923", "text": "def _polygon_check(self, coordinate: str, storage: Storage) -> bool:\n polygon = 'POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'\n self.assertEqual(storage.coordinates, polygon)\n polygon = wkt.loads(polygon)\n point = wkt.loads(f'POINT ({coordinate})')\n return polygon.contains(point) or polygon.covers(point)", "title": "" }, { "docid": "641247cb5466019c3a18a85036aecdd7", "score": "0.5442807", "text": "def generate_polygon():\n with open('sweden.json') as f:\n data = json.load(f)\n\n arr = data['geometry']['coordinates']\n dt = []\n res = []\n for x in arr:\n for poly in x:\n for p in poly:\n dt.append(p)\n res.append(dt)\n dt = []\n \n\n\n for i,p in enumerate(res):\n res[i] = np.array([[ xtile(x), ytile(y)] for x,y in p])\n\n return res", "title": "" }, { "docid": "46d09ff2226d7fecb9355e58116bfcb3", "score": "0.54418343", "text": "def to_polygon(self):\n from .polys import Polygon\n return Polygon(self.coords, label=self.label)", "title": "" }, { "docid": "4f68f94a1f47059430fb11b78fc38b1c", "score": "0.5434426", "text": "def polygonize(self, data=None, mask=None, connectivity=4, transform=None):\n if not _HAS_RASTERIO:\n raise ImportError('Requires rasterio module')\n if data is None:\n data = self.mask.astype(np.uint8)\n if mask is None:\n mask = self.mask\n if transform is None:\n transform = self.affine\n shapes = rasterio.features.shapes(data, mask=mask, connectivity=connectivity,\n transform=transform)\n return shapes", "title": "" }, { "docid": "d135b3d2a945cd59fc8742e6e90d93be", "score": "0.5425607", "text": "def reshape_polygon(polygon):\n \n latitudes = polygon.latitudes\n longitudes = polygon.longitudes\n polygon_vertices = []\n\n for x in xrange(0,len(latitudes)):\n polygon_vertices.append([latitudes[x], longitudes[x]])\n\n return polygon_vertices", "title": "" }, { "docid": "2eb3acbbbd074f42272b9f5f081f4126", "score": "0.5410999", "text": "def shapely_insert(x, y):\n return shapely.geometry.Polygon(np.transpose((x, y)))", "title": "" }, { "docid": "5cd324845d056d9862232d00acefe501", "score": "0.5402253", "text": "def makePolygonFrom(r1, r2, r3) :\n case = [r1, r2, r3]\n case.sort(key=lambda r : r[0]) # lexicographic order\n if case[0][0] == 'aa' :\n if case[1][0] == 'aa' and case[2][0] == 'aa' : # Pattern 1\n return []\n if case[1][0] == 'ab' and case[2][0] == 'ab' : # Pattern 2\n return [case[1][1], case[2][1],\n Line(case[1][1].p1, case[2][1].p1)]\n if case[1][0] == 'ac' and case[2][0] == 'ac' : # Pattern 3\n return [case[1][1], case[2][1],\n Line(case[1][1].p1, case[2][1].p1),\n Line(case[1][1].p2, case[2][1].p2)]\n elif case[0][0] == 'ab' :\n if case[1][0] == 'ab' and case[2][0] == 'bb' : # Pattern 4\n return [case[0][1], case[1][1], case[2][1],\n Line(case[0][1].p1, case[1][1].p1)]\n if case[1][0] == 'ac' and case[2][0] == 'bc' : # Pattern 5\n return [case[0][1], case[1][1], case[2][1],\n Line(case[1][1].p1, case[0][1].p1),\n Line(case[1][1].p2, case[2][1].p2)]\n elif case[0][0] == 'ac' :\n if case[1][0] == 'ac' and case[2][0] == 'cc' : # Pattern 6\n return [case[0][1], case[1][1],\n Line(case[0][1].p1, case[1][1].p1),\n Line(case[0][1].p2, case[1][1].p2)]\n elif case[0][0] == 'bb' :\n if case[1][0] == 'bb' and case[2][0] == 'bb' : # Pattern 7\n return [case[0][1], case[1][1], case[2][1]]\n if case[1][0] == 'bc' and case[2][0] == 'bc' : # Pattern 8\n return [case[0][1], case[1][1], case[2][1],\n Line(case[1][1].p2, case[2][1].p2)]\n elif case[0][0] == 'bc' :\n if case[1][0] == 'bc' and case[2][0] == 'cc' : # Pattern 9\n return [case[0][1], case[1][1],\n Line(case[0][1].p2, case[1][1].p2)]\n elif case[0][0] == 'cc' :\n if case[1][0] == 'cc' and case[2][0] == 'cc' : # Pattern 10\n return []", "title": "" }, { "docid": "e2de4cbbb21ce2f9e494032393f55d02", "score": "0.5396145", "text": "def make_shapes_for_poly(self, poly):\n\n # TODO: handle curves\n opoly = Polygon()\n for vertex in poly.vertex:\n opoly.add_point(self.make_length(vertex.x),\n self.make_length(vertex.y))\n yield opoly", "title": "" }, { "docid": "df9781b700eaf1bc4cbe5a8f69cd5c9d", "score": "0.539061", "text": "def test_DCD_with_polygon():\n name = 'DCD'\n test_name = 'test_DCD_with_polygon'\n check_feature(NS, test_name, name, image=NS.image_uri, gobject=NS.gobject_uri)", "title": "" }, { "docid": "0f7a7daee31cedc4e479848cf772a937", "score": "0.5386784", "text": "def _convert_to_polygon(gpd_obj):\n for i in gpd_obj.index:\n if gpd_obj.loc[i, 'geometry'].type is not 'Polygon':\n geom = gpd_obj.loc[i, 'geometry']\n area = [glac.area for glac in geom]\n gpd_obj.set_value(i, 'geometry', geom[np.argmax(area)])\n for j in range(len(geom)):\n if not j == np.argmax(area) and geom[j].area > 1:\n gpd_obj, bool = _merge_sliver(gpd_obj, geom[j])\n return gpd_obj", "title": "" }, { "docid": "c05868e5c10cfc8305a84b6385e33d50", "score": "0.53851527", "text": "def make_poly_coords(xarr, yarr, zpnt=None, triangles=False):\n\n def process_input(array):\n flat = numpy.hstack([array[0, :], array[1, ::-1]])\n return flat[~numpy.isnan(flat)]\n\n x = process_input(xarr)\n y = process_input(yarr)\n if (not isinstance(xarr, numpy.ma.MaskedArray) or xarr.mask.sum() == 0 or\n (triangles and len(x) == 3)):\n if zpnt is None:\n coords = numpy.vstack([x, y]).T\n else:\n z = numpy.array([zpnt] * x.shape[0])\n coords = numpy.vstack([x, y, z]).T\n\n else:\n coords = None\n\n return coords", "title": "" }, { "docid": "af93bdd8a83733f68584d84edb1864a6", "score": "0.5383818", "text": "def construct_sql_array_polygons(layer):\n # Initialization of the sql array containing the study area's features geometry\n array_polygons = \"array[\"\n # Retrieve the CRS of the layer\n crs = layer.sourceCrs().authid()\n if crs.split(':')[0] != 'EPSG':\n raise QgsProcessingException(\"\"\"Le SCR (système de coordonnées de référence) de votre couche zone d'étude n'est pas de type 'EPSG'.\n Veuillez choisir un SCR adéquat.\n NB : 'EPSG:2154' pour Lambert 93 !\"\"\")\n else:\n crs = crs.split(':')[1]\n # For each entity in the study area...\n for feature in layer.getFeatures():\n # Retrieve the geometry\n area = feature.geometry() # QgsGeometry object\n # Retrieve the geometry type (single or multiple)\n geomSingleType = QgsWkbTypes.isSingleType(area.wkbType())\n # Increment the sql array\n if geomSingleType:\n array_polygons += \"ST_transform(ST_PolygonFromText('{}', {}), 2154), \".format(area.asWkt(), crs)\n else:\n array_polygons += \"ST_transform(ST_MPolyFromText('{}', {}), 2154), \".format(area.asWkt(), crs)\n # Remove the last \",\" in the sql array which is useless, and end the array\n array_polygons = array_polygons[:len(array_polygons)-2] + \"]\"\n return array_polygons", "title": "" }, { "docid": "0cbfb83e4cd3e8299558e7b28199366d", "score": "0.5380678", "text": "def write_tri_to_shapefile(triangulation,output_point_shapefile = 'SuperTri_points.shp',output_poly_shapefile = 'SuperTri_poly.shp'):\n \n super_tri = triangulation\n \n print('creating shapely point list ..')\n points = [shp.Point(super_tri['vertices'][i]) for i in range(len(super_tri['vertices']))]\n print('creating shapely polygon list ..')\n polygons = [shp.Polygon(super_tri['vertices'][super_tri['triangles'][i]]) for i in range(len(super_tri['triangles']))]\n\n\n print('writing point shapefile ..')\n t1=time()\n\n # Write a new Shapefile - points\n schema = {\n 'geometry': 'Point',\n 'properties': {'id': 'int'},\n }\n \n with fiona.open(output_point_shapefile, 'w', 'ESRI Shapefile', schema) as c:\n for i in range(len(points)):\n c.write({\n 'geometry': mapping(points[i]),\n 'properties': {'id': i,},\n })\n print(str(time()-t1)[0:8]+' seconds') \n\n\n print('writing polygon shapefile ..')\n t1=time()\n\n # Write a new Shapefile - polygons\n schema = {\n 'geometry': 'Polygon',\n 'properties': {'id': 'int','vertices':'str'},\n }\n\n with fiona.open(output_poly_shapefile, 'w', 'ESRI Shapefile', schema) as c:\n for i in range(len(polygons)):\n c.write({\n 'geometry': mapping(polygons[i]),\n 'properties': {'id': i,'vertices':str(super_tri['triangles'][i]).strip('[').strip(']')},\n })\n print(str(time()-t1)[0:8]+' seconds')\n \n return 'patience is a virtue'", "title": "" }, { "docid": "ed59e80b5efce153e4838f69fca0b97b", "score": "0.5375103", "text": "def obj_polygon(pnts, SR=None):\r\n f = []\r\n for pt in pnts:\r\n f.append(arcpy.Polygon(arcpy.Array([arcpy.Point(*p)\r\n for p in pt.tolist()]), SR))\r\n return f", "title": "" }, { "docid": "48bc05967ce80361d64ec06f60f225e9", "score": "0.5358353", "text": "def getType(self):\n return \"GPolygon\"", "title": "" }, { "docid": "5581196b5593a1aed007cf92b9e51fb2", "score": "0.5356674", "text": "def polygon_to_points( poly ):\n xlist = []\n ylist = []\n for vertex in poly:\n xlist.append(vertex[0])\n ylist.append(vertex[1])\n xlist.append(xlist[0])\n ylist.append(ylist[0])\n return (xlist, ylist)", "title": "" }, { "docid": "0d1256cf391b0a075380ae3c6f7c8bcf", "score": "0.53517836", "text": "def export_polygon(pnum, coordinates):\n print(\" <Placemark>\")\n print(\" <name>[\" + one_name + \"] Polygon \" + str(pnum) + \"</name>\")\n print(\" <styleUrl>#StyleMap\" + one_style + \"</styleUrl>\")\n print(\" <Polygon>\")\n print(\" <outerBoundaryIs>\")\n print(\" <LinearRing>\")\n print(\" <tessellate>1</tessellate>\")\n print(\" <coordinates>\")\n for pair in coordinates:\n print(\" \" + str(pair[0]) + \",\" + str(pair[1]) + \",0\")\n print(\" </coordinates>\")\n print(\" </LinearRing>\")\n print(\" </outerBoundaryIs>\")\n print(\" </Polygon>\")\n print(\" </Placemark>\")", "title": "" }, { "docid": "e46a464bab639d4f751488d8443b6726", "score": "0.53506297", "text": "def arr_polygon_fc(a, out_fc, SR):\r\n f = obj_polygon(a, SR)\r\n return shapes_fc(f, out_fc)", "title": "" }, { "docid": "9512e783c330398d77ec1cec73845b92", "score": "0.5349517", "text": "def test_gmlobject_string(self):\n with open('tests/data/util/location/polygon_single_31370.gml',\n 'r') as gmlfile:\n gml = gmlfile.read()\n\n gml_element = etree.fromstring(gml.encode('utf8'))\n gml_element = gml_element.find(\n './/{http://www.opengis.net/gml}Polygon')\n\n gml_object = GmlObject(etree.tostring(gml_element).decode('utf8'))\n\n assert clean_xml(etree.tostring(\n gml_object.get_element()).decode('utf8')) == clean_xml(\n '<gml:Polygon '\n 'srsName=\"urn:ogc:def:crs:EPSG::31370\"><gml:exterior><gml'\n ':LinearRing><gml:posList>108636.150020818 194960.844295764 '\n '108911.922161617 194291.111953824 109195.573506438 '\n '195118.42837622 108636.150020818 '\n '194960.844295764</gml:posList></gml:LinearRing></gml'\n ':exterior></gml:Polygon>')", "title": "" }, { "docid": "2c16be0fbaca2401206cab7bb7a805f5", "score": "0.53356624", "text": "def _make_polygon(gpd_obj):\n\n gpd_obj = gpd_obj[(gpd_obj.type == 'GeometryCollection') |\n (gpd_obj.type == 'Polygon') |\n (gpd_obj.type == 'MultiPolygon')]\n if not gpd_obj.empty:\n collection = gpd_obj[(~gpd_obj.is_empty) &\n (gpd_obj.type == 'GeometryCollection')]\n # choose only polygons or multipolygons\n for c in collection.index:\n geo = collection.loc[c, 'geometry']\n new = MultiPolygon()\n for obj in geo:\n if obj.type in ['Polygon', 'MultiPolygon']:\n new = new.union(obj)\n gpd_obj = gpd_obj.copy()\n gpd_obj.loc[c, 'geometry'] = new\n return gpd_obj", "title": "" }, { "docid": "f557169bd47c3b097c7952ad213f70d4", "score": "0.532676", "text": "def read_multipolygon(f):\n\tpolygons = []\n\tcur_poly = []\n\twhile True:\n\t\ttitle = f.readline().strip()\n\t\tif not title:\n\t\t\treturn None\n\t\tif title == 'END':\n\t\t\tbreak\n\t\touter = title.strip()[0] != '!'\n\t\tpolygon = read_polygon(f)\n\t\tif polygon != None:\n\t\t\tif outer:\n\t\t\t\tif cur_poly:\n\t\t\t\t\tpolygons.append('({})'.format(','.join(cur_poly)))\n\t\t\t\tcur_poly = [polygon]\n\t\t\telse:\n\t\t\t\tcur_poly.append(polygon)\n\tif cur_poly:\n\t\tpolygons.append('({})'.format(','.join(cur_poly)))\n\t\t\n\tif len(polygons) == 1:\n\t\treturn \"POLYGON\" + polygons[0]\n\telse:\n\t\treturn \"MULTIPOLYGON({})\".format(','.join(polygons))", "title": "" }, { "docid": "3b523afd5765c7286552765329912ef5", "score": "0.52952707", "text": "def geometry(self):\n ring = ogr.Geometry(ogr.wkbLinearRing)\n for point in self.corner_points():\n ring.AddPoint(point[0], point[1])\n ring.CloseRings()\n polygon = ogr.Geometry(ogr.wkbPolygon)\n polygon.AddGeometry(ring)\n return polygon", "title": "" }, { "docid": "c8dad11a1e7fdbaa085e6eae794d9bee", "score": "0.5288707", "text": "def point_to_poly_apply(geo, side_len):\n\n half_side = side_len * 0.5\n l1 = Polygon([[geo.x + half_side, geo.y + half_side], [geo.x + half_side, geo.y - half_side],\n [geo.x - half_side, geo.y - half_side], [geo.x - half_side, geo.y + half_side]])\n return (l1)", "title": "" }, { "docid": "b0e55267ed24879f68d3598f497ff947", "score": "0.5280317", "text": "def polygon_to_wkt(geojson):\n return f\"SRID=4326;{shape(geojson).wkt}\"", "title": "" }, { "docid": "8a09813f0d51f7134031b237e069f1a3", "score": "0.52732164", "text": "def calc_coordinates(in_string, sw_set, mw_dict):\n\tcoordinate_list = list()\n\tword = \"\"\n\tfor i in range(len(in_string)):\n\t\tif in_string[i].isalpha():\n\t\t\tword += in_string[i].lower()\n\t\telse:\n\t\t\tif (word in sw_set) or (word[-3:] == \"ing\" and catch_continuous):\n\t\t\t\tcoordinate_list.append((i-len(word), len(word)))\n\t\t\telif word in mw_dict:\n\t\t\t\tfor target_word in mw_dict[word]:\n\t\t\t\t\tspotlight_word = in_string[i:i+len(target_word)]\n\t\t\t\t\tspotlight_word.replace('\\n', ' ')\n\t\t\t\t\tif target_word == spotlight_word:\n\t\t\t\t\t\tcoordinate_list.append((i - len(word), len(word + target_word)))\n\t\t\tword = \"\"\n\treturn coordinate_list", "title": "" }, { "docid": "5558099f6e8a9e1769e848ce6abb7887", "score": "0.5271385", "text": "def BRep_Tool_PolygonOnTriangulation(*args):\n return _BRep.BRep_Tool_PolygonOnTriangulation(*args)", "title": "" }, { "docid": "27d941982992ad29f96584d1ac0f39ec", "score": "0.52703243", "text": "def to_polygon_style(self):\r\n\t\treturn PolygonStyle(self.line_pattern, self.line_width, self.line_color,\r\n\t\t\t\t\t\t\tfill_color=\"none\", dash_pattern=self.dash_pattern,\r\n\t\t\t\t\t\t\tlabel_style=self.label_style, label_anchor=self.label_anchor,\r\n\t\t\t\t\t\t\talpha=self.alpha,\r\n\t\t\t\t\t\t\tthematic_legend_style=self.thematic_legend_style)", "title": "" }, { "docid": "dd7253e4ca65252a69fded2f3f60bae5", "score": "0.525891", "text": "def _line_to_polygon(p1, p2, width, divergence=0):\n if divergence < 0 or divergence >= np.pi:\n raise ValueError(\"Divergence value is {}. It should be >= 0 and < pi.\".format(divergence))\n # increase line length in case of line rotation\n p1 = np.array(p1)\n p2 = np.array(p2)\n r = p2 - p1\n r = r / np.cos(divergence)\n p2 = p1 + r\n # take into account width and divergence\n line = shapely.geometry.LineString([p1, p2])\n ll = line.parallel_offset(width/2, 'left')\n p_top = ll.coords[0]\n ll = shapely.affinity.rotate(ll, divergence/2, origin=p_top, use_radians=True)\n lr = line.parallel_offset(width/2, 'right')\n p_top = lr.coords[1]\n lr = shapely.affinity.rotate(lr, -divergence/2, origin=p_top, use_radians=True)\n p1, p2 = ll.coords\n p3, p4 = lr.coords\n return p1, p2, p3, p4", "title": "" }, { "docid": "c1b70c98b4d587bfc672962e06c40fd7", "score": "0.5256694", "text": "def test_RSD_with_polygon():\n name = 'RSD'\n test_name = 'test_RSD_with_polygon'\n check_feature(NS, test_name, name, image=NS.image_uri, gobject=NS.gobject_uri)", "title": "" }, { "docid": "ca0041bb53e0a6d41816a6299072b1a0", "score": "0.5254453", "text": "def get_polygon(self, vv_x, vv_y, poly):\n self._get_polygon(vv_x, vv_y, poly)", "title": "" }, { "docid": "ee597e01033ca74d42ca45335a6aea75", "score": "0.5251572", "text": "def testRectObjToPolygon(self):\n r = mapscript.rectObj(-1.0, -2.0, 3.0, 4.0)\n s = r.toPolygon()\n assert s.numlines == 1, s.numlines\n line = self.getLineFromShape(s, 0)\n assert line.numpoints == 5, line.numpoints\n point = self.getPointFromLine(line, 0)\n self.assertAlmostEqual(point.x, -1.0)\n self.assertAlmostEqual(point.y, -2.0)", "title": "" }, { "docid": "795c1d19e17ac21d23144f2216d88431", "score": "0.52244776", "text": "def to_shapely(self, frame_size=None):\n if self.closed:\n points = []\n for shape in self.points: # pylint: disable=not-an-iterable\n if shape:\n shape = list(shape) + [shape[0]]\n\n points.append(shape)\n else:\n points = self.points\n\n if frame_size is not None:\n w, h = frame_size\n points = [[(x * w, y * h) for x, y in shape] for shape in points]\n\n if len(points) == 1:\n if self.filled:\n return sg.Polygon(points[0])\n\n return sg.LineString(points[0])\n\n if self.filled:\n return sg.MultiPolygon(list(zip(points, itertools.repeat(None))))\n\n return sg.MultiLineString(points)", "title": "" }, { "docid": "bbf78f330948c561cbc21b4ca072a013", "score": "0.5223984", "text": "def triangulatePolygon(polygon):\n\ttry:\n\t\timport Polygon\n\t\treturn triangulatePolygon_gpc(polygon)\n\texcept ImportError:\n\t\tpass\n\tif givePP2TWarning:\n\t\twarnings.warn('Using pypoly2tri for triangulation; for non-commercial use, consider'\n\t\t ' installing the faster Polygon3 library (pip install Polygon3)')\n\treturn triangulatePolygon_pypoly2tri(polygon)", "title": "" }, { "docid": "abe3510c3599efb22dfabe6c52f8c63b", "score": "0.52053684", "text": "def test_polygon():\n verts = [[0,0],[1,0],[1,1],[0,1]]\n p = Polygon(verts)\n assert 1.0 == p.area\n assert verts == p.vertices\n assert [([0, 0], [1, 0]), ([1, 0], [1, 1]), ([1, 1], [0, 1]), ([0, 1], [0, 0])] == p._segments\n assert 4.0 == p.perimiter\n\n verts = [[0,0],[1,0],[1,1]]\n segments = p._find_segments(verts=verts)\n assert [([0, 0], [1, 0]), ([1, 0], [1, 1]), ([1, 1], [0, 0])] == segments\n assert 0.5 == p._find_area(segments=segments)\n assert np.allclose(3.41421356237,p._find_permimter(segments=segments))", "title": "" }, { "docid": "b40c37dbc1570015f55fe07be80049d5", "score": "0.5204401", "text": "def test_extract_coordinates_from_polygon(self):\n polygon = Polygon([\n Point(-30.0, 151.0), Point(-30.0, 151.5), Point(-30.5, 151.5),\n Point(-30.5, 151.0), Point(-30.0, 151.0)\n ])\n latitude, longitude = GeoJsonDistanceHelper.\\\n extract_coordinates(polygon)\n self.assertAlmostEqual(latitude, -30.2, 1)\n self.assertAlmostEqual(longitude, 151.2, 1)", "title": "" }, { "docid": "ece9d53d9c324fcd0eb8fa2c1573b89f", "score": "0.5203871", "text": "def test_points_in_polygon_shp_unknownPath():\n assert_unknown_path_raises_value_error(points_in_polygon_shp_file)", "title": "" }, { "docid": "b71a34580ed26a54d099383096f3e8a5", "score": "0.5189735", "text": "def polygon(*points):\n p = []\n a = 2 * pi / len(points) - 1\n indices = []\n for i in range(len(points)):\n p.append(points[i][0])\n p.append(points[i][1])\n p.append(points[i][0])\n p.append(points[i][1])\n indices.append(i)\n\n _draw_graphic(Mesh(\n vertices=p,\n indices=indices,\n mode=\"triangle_fan\"\n ))", "title": "" }, { "docid": "905aa8bdeab61318dee4a963c971d523", "score": "0.51863706", "text": "def _get_polygons(self, geom):\n polygons = []\n for pos in geom.iter('*'):\n # get leaf nodes. Treat LinearRing and MultiSurface the same way\n if len(pos) == 0:\n positions = list(filter(None, pos.text.split(' ')))\n points = []\n points_as_pairs = zip(positions[1::2], positions[::2])\n for latitude, longitude in points_as_pairs:\n points.append(Point(float(latitude), float(longitude)))\n polygons.append(Polygon(points))\n return MultiPolygon(polygons)", "title": "" }, { "docid": "e3c1f1f1e446371be49ad6196ef0a567", "score": "0.51838887", "text": "def setPolygon (self):\n\t\tprint(\"Creating polygon\")\n\t\tvenuePoints = self.venue['geometry']['coordinates']\n\t\tvenuePoints = self.getTuple(venuePoints)\n\t\t#print(venuePoints)\n\t\tvenuePolygons = createPolygon(venuePoints)\n\t\tself.polygon = venuePolygons", "title": "" }, { "docid": "02b4df846905ccd688e80b94b952363c", "score": "0.5183187", "text": "def toposimplify(geojson, p):\n proc_out = subprocess.run(\n ['geo2topo'],\n input=bytes(\n json.dumps(geojson),\n 'utf-8'),\n stdout=subprocess.PIPE\n )\n proc_out = subprocess.run(\n ['toposimplify', '-P', p],\n input=proc_out.stdout,\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL\n )\n return json.loads(proc_out.stdout)", "title": "" }, { "docid": "fbb12a4c1d26b4301a5aa9ed5b022395", "score": "0.51823664", "text": "def split_line2poly(verts):\n \n verts = verts[:-1] # drop closing point\n\n n = int((verts.__len__()-2)/2) # number of resulting polygons\n out = []\n for i in range(n):\n out.append(np.array([verts[i],verts[i+1],verts[-i-2],verts[-i-1],verts[i]]))\n return out", "title": "" }, { "docid": "07fc70d9ef14282e36a0ca3817546e0d", "score": "0.51768976", "text": "def define_source_polygon():\n global source_transformation\n source_transformation = np.float32(\n [[(img_size[0] / 2) - 55, img_size[1] / 2 + 100],\n [((img_size[0] / 6) - 10), img_size[1]],\n [(img_size[0] * 5 / 6) + 60, img_size[1]],\n [(img_size[0] / 2 + 55), img_size[1] / 2 + 100]])\n # print(\"source_transformation polygon defined :\", source_transformation)", "title": "" }, { "docid": "faabb6520cdded1bd58aaddbafc05baf", "score": "0.51743233", "text": "def polygon(self,image,i,j):\r\n if(i+j>=391 and j-i<=265 and i+0.81*j<=425.66 and i+0.17*j<=200 and 0.89*j -i >=148.75) or (13.5*j+i<=5256.72 and 1.43*j-i>=368.82 and i+0.81*j >=425.66):\r\n image[299-i,j,:]=0,0,0\r\n #print(2)\r\n self.obstacle.append(int(self.string(i,j)))", "title": "" }, { "docid": "38b6dce6b4c8cb3d4604284be6218c43", "score": "0.51692265", "text": "def shape_df_to_polygon(shape_df: pd.DataFrame) -> PolygonShape:\n\n # Ensure that data points are sorted in ascending format\n shape_df = shape_df.sort_values(by=location_constants.shape_sequence,\n ascending=True)\n # Create a placeholder to add points\n points = []\n\n # Iterate over polygonal sequence and extract points\n for index, row in shape_df.iterrows():\n # Extract UTM locations\n easting, northing = row[location_constants.shape_easting], row[location_constants.shape_northing]\n\n # Convert UTM to lat/long and add to list of points\n points.append((utm.to_latlon(easting, northing,\n date_time_constants.utm_zone, date_time_constants.utm_zone_letter)))\n\n return PolygonShape(points)", "title": "" }, { "docid": "77dbb7c69539e9095eb320939fb5d018", "score": "0.5168699", "text": "def __init__(self, parent: _Graphics, points: Array[Point2D]) -> None:\r\n from apysc.display.graphics import Graphics\r\n from apysc.expression import expression_variables_util\r\n from apysc.expression import var_names\r\n parent_graphics: Graphics = parent\r\n variable_name: str = expression_variables_util.get_next_variable_name(\r\n type_name=var_names.POLYGON)\r\n super(Polygon, self).__init__(\r\n parent=parent, x=0, y=0, variable_name=variable_name)\r\n self.points = points\r\n self._set_initial_basic_values(parent=parent)\r\n self._append_constructor_expression()\r\n self._set_line_setting_if_not_none_value_exists(\r\n parent_graphics=parent_graphics)", "title": "" }, { "docid": "9a4258700acaaca1f60c0135f6dba636", "score": "0.5161945", "text": "def make_feature(feature):\n geometry = None\n sr = arcpy.SpatialReference(4326)\n if 'paths' in feature['geometry']:\n paths = feature['geometry']['paths']\n if len(paths) == 1:\n geometry = arcpy.Polyline(arcpy.Array([arcpy.Point(*coords) for coords in paths[0]]), sr)\n else:\n parts = []\n for path in paths:\n parts.append(arcpy.Array([arcpy.Point(*coords) for coords in path]))\n geometry = arcpy.Polyline(arcpy.Array(parts), sr)\n elif 'rings' in feature['geometry']:\n rings = feature['geometry']['rings']\n if len(rings) == 1:\n geometry = arcpy.Polygon(arcpy.Array([arcpy.Point(*coords) for coords in rings[0]]), sr)\n else:\n parts = []\n for ring in rings:\n parts.append(arcpy.Array([arcpy.Point(*coords) for coords in ring]))\n geometry = arcpy.Polygon(arcpy.Array(parts), sr)\n elif 'points' in feature['geometry']:\n points = feature['geometry']['points']\n if len(points) == 1:\n geometry = arcpy.Multipoint(arcpy.Array([arcpy.Point(*coords) for coords in points[0]]), sr)\n else:\n parts = []\n for point in points:\n parts.append(arcpy.Array([arcpy.Point(*coords) for coords in point]))\n geometry = arcpy.Multipoint(arcpy.Array(parts), sr)\n\n if geometry:\n return geometry\n else:\n raise NullGeometry", "title": "" }, { "docid": "270a52dc712ec8df6d49ac60bd1ab933", "score": "0.5159117", "text": "def test_points_in_polygon_shp_nonePath():\n assert_none_path_raises_value_error(points_in_polygon_shp_file)", "title": "" }, { "docid": "286abf418a59dc4fd2e72f1e385dd996", "score": "0.51510787", "text": "def convert_polygon_to_lines(polygon_obj):\r\n lines = []\r\n partnum = 0\r\n\r\n # Step through each part of the feature\r\n #\r\n for part in polygon_obj:\r\n\r\n # Step through each vertex in the feature\r\n #\r\n start_point = polygon_obj.firstPoint\r\n line_start_point = None\r\n current_line = []\r\n for pnt in polygon_obj.getPart(partnum):\r\n if pnt:\r\n if line_start_point is None:\r\n line_start_point = pnt\r\n current_line = [line_start_point]\r\n continue\r\n\r\n # take the first two points since they will always be a line, and then check if the third point belong to the same line\r\n # if the point belong to the same line then add the point to the line and move on the next point\r\n # if the point does not belong to the same line then we need to take the last point of the the current line as the start point in the new line and the current point will be the second point\r\n # of the new line and then we do the same again by taking the next point and checking if it belongs to the same line or not\r\n if pnt != line_start_point:\r\n if len(current_line) < 2:\r\n current_line.append(pnt)\r\n else:\r\n if is_point_in_line(current_line, pnt):\r\n current_line.append(pnt)\r\n else:\r\n lines.append(current_line)\r\n line_start_point = current_line[-1]\r\n current_line = [line_start_point, pnt]\r\n if current_line:\r\n lines.append(current_line)\r\n\r\n partnum += 1\r\n return lines", "title": "" }, { "docid": "27c89d410f92464518adfe5a8719d6b8", "score": "0.5150738", "text": "def find_polygon_at_point(gdf: GeoDataFrame, points: np.ndarray) -> np.ndarray:\n # This seems to be slightly slower than using shapely.strtree.STRtree,\n # but it is a bit more natural and simpler.\n index = gdf.sindex\n shapes = gdf.geometry.iloc\n return np.array([\n next((\n i for i in index.query(p)\n if shapes[i].intersects(p)), -1)\n for p in MultiPoint(points[:, :2])\n ])", "title": "" }, { "docid": "a121c23faecf4ba3fd77ac5391bd5342", "score": "0.51444983", "text": "def parse_vertices(vertices_str):\n s = [float(t) for t in vertices_str.split(',')]\n return zip(s[::2], s[1::2])", "title": "" }, { "docid": "0a40c31d728a2f177ee599f2445d1155", "score": "0.5143645", "text": "def test_points_in_polygon_wkt_unknownPath():\n assert_unknown_path_raises_value_error(points_in_polygon_wkt_file)", "title": "" }, { "docid": "efda4be33928829fc2d0ad39fe6401d5", "score": "0.51420707", "text": "def test_points_in_polygon_wkt_invalidPolygon():\n pc_in = read_las.read(\"testdata/AHN2.las\")\n with pytest.raises(ValueError):\n points_in_polygon_wkt(pc_in,\n \"POLYGON(( 243590.0 572110.0, 243640.0 572160.0, 243700.0 572110.0, 243640.0 572060.0 ))\")", "title": "" }, { "docid": "f23330fc54df203e3d68dcd8f1770b2c", "score": "0.5141163", "text": "def __getitem__(self, s):\n if isinstance(s, int):\n if s < 3 or s > self.n:\n raise IndexError\n else:\n return self._polygon(s, self.r)", "title": "" }, { "docid": "5c2bbd2d5b94b3583199308e7f82a401", "score": "0.5138454", "text": "def polygon_area(poly):\n # Thanks to StackExchange for this one.\n # https://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python\n\n def sin_proj(lon, lat):\n \"\"\"\n Returns the x & y coordinates in km using a sinusoidal projection\n\n latitude and longitude are numpy arrays\n \"\"\"\n lat_dist = np.pi * c.re_km / 180.0\n y = lat * lat_dist\n x = lon * lat_dist * cos(radians(lat))\n return x, y\n\n def area(x, y):\n \"\"\"\n Calculates the area of an arbitrary polygon given its verticies\n in sinusoidal coordinates (in km), result is in km^2\n \"\"\"\n area = 0.0\n for i in range(-1, len(x)-1):\n area += x[i] * (y[i+1] - y[i-1])\n return abs(area) / 2.0\n\n # if poly has vertices at (0,0), (0,1), (1,1), (1,0)\n # this will give (0,0,1,1,0), (0,1,1,0,0)\n lon, lat = poly.exterior.coords.xy\n lon = array(lon)\n lat = array(lat)\n # for the functions above, we don't need that last repeated vertex.\n x, y = sin_proj(lon, lat)\n return area(x, y)", "title": "" }, { "docid": "172f7e89e1e7748a66fc8ce8252a2b05", "score": "0.5132722", "text": "def test_wkt(self):\n polygon_example = [[7941214.280214552,6657013.482674685],[7941285.222167995,6657275.094877264],\n [7941646.696438557,6657174.27908272],[7941576.155582538,6656914.224977171],\n [7941214.280214552,6657013.482674685]]\n wkt = get_wkt(polygon_example)\n polygon_wkt = \"POLYGON((7941214.280214552 6657013.482674685,7941285.222167995 6657275.094877264,7941646.696438557 6657174.27908272,7941576.155582538 6656914.224977171,7941214.280214552 6657013.482674685,))\" \n self.assertEqual(wkt,polygon_wkt,\"Should equal\")", "title": "" }, { "docid": "a913bffc778275559e6287e640cdd531", "score": "0.51321507", "text": "def _project(s):\n # Is s an appropriate sequence or just a single point?\n try:\n return _unzip(map(_project_point, s))\n except TypeError:\n return _project_point(s)\n except IndexError: # for numpy arrays\n return _project_point(s)", "title": "" }, { "docid": "a6c95464b5f97328c756c6b4f7a08b50", "score": "0.5131346", "text": "def select_shape(shpfile, category, name):\r\n s = shpfile\r\n polygon = s[s[category] == name]\r\n polygon = polygon.geometry[:].unary_union\r\n return polygon", "title": "" }, { "docid": "9eb96de3ede28395aa80e326376f0e4f", "score": "0.51294744", "text": "def multipolygon_str(*polygons):\n poly_strs = []\n for polygon in polygons:\n v_strs = [\"{:.10f} {:.10f}\".format(*_recast(v)) for v in polygon]\n v_strs.append(v_strs[0]) # auto close the polygon\n poly_str = \"(( {} ))\".format(\",\".join(v_strs))\n poly_strs.append(poly_str)\n return 'MULTIPOLYGON({})'.format(\",\".join(poly_strs))", "title": "" }, { "docid": "151dc560bbcb8fb8e2b52d84732f0a32", "score": "0.51242393", "text": "def draw_polygon(self, points):\n \n # check points\n if len(points) < 3:\n return\n \n # apply scaling and offset\n points = (numpy.array(points) + self._offset) * numpy.array((self._scale, self._scale))\n \n # format\n points = (\"%s,%s\" % (x,y) for x,y in points)\n \n # get pen and brush\n pen = self._get_pen_attrs()\n brush = self._get_brush_attrs()\n \n # make command\n command = self._indent + '<polygon points=\"%s\" %s %s fill-rule=\"evenodd\" />' % (\" \".join(points), pen, brush)\n \n # add command\n self._commands.append(command)", "title": "" }, { "docid": "641e5f1dc9933e0498cce346d5628d28", "score": "0.51227885", "text": "def createPolygonObjects(self, layersForOutput):\n scripts = []\n template = \" var object{index} = topojson.feature(json{index}, json{index}.objects.l{index});\\n\"\n i = 0\n for o in layersForOutput:\n script = template.format(\n index = i\n )\n scripts.append(script)\n i += 1\n \n return \"\".join(scripts)", "title": "" }, { "docid": "83c2c1bd0b71a2a7fa61095d59abd6e4", "score": "0.5121203", "text": "def test_wkt_polygons_contains(self):\n test_data = ComplexTestData()\n path = os.path.join(self._test_dir, 'wkt_test.wkt')\n with open(path, 'w') as f:\n f.write(test_data.get_wkt_polygon_around_first_point_only())\n pc_in = test_data.get_point_cloud()\n expected_point = np.array(pc_in[point]['x']['data'][0], pc_in[point]['y']['data'][0])\n\n pc_out = points_in_polygon_wkt_file(pc_in, path)\n\n assert_equal(len(pc_out[point]['x']['data']), 1)\n selected_point = np.array(pc_out[point]['x']['data'][0], pc_out[point]['y']['data'][0])\n np.testing.assert_allclose(selected_point, expected_point)", "title": "" }, { "docid": "4fa5f8e2079c240aaa60e22d706926c3", "score": "0.5115571", "text": "def geo_within_stage_2(data_input, settings):\n spindex = settings['spindex']\n frag = settings['id_frag']\n shp_object = settings['shp_object']\n\n attributes = settings.get('attributes', None)\n if attributes is None:\n attributes = list(shp_object.columns)\n alias = settings.get('alias', '_shp')\n col_lon_lat = [settings['lon_col'], settings['lat_col']]\n polygon_col = settings.get('polygon', 'points')\n polygon_col_idx = shp_object.columns.get_loc(polygon_col)\n\n data_input.reset_index(drop=True, inplace=True)\n sector_position = [-1] * len(data_input)\n\n if len(data_input) > 0:\n points = data_input[col_lon_lat].to_numpy().tolist()\n\n def get_first_polygon(y, x):\n # first, find the squares where point is inside (coarse-grained)\n # (xmin,ymin,xmax,ymax)\n matches = spindex.intersect([x, y, x, y])\n\n # then, to all selected squares, check if point is in polygon\n # (fine-grained)\n for shp_inx in matches:\n row = shp_object.iat[shp_inx, polygon_col_idx].tolist()\n polygon = Path(row)\n if polygon.contains_point([y, x]):\n return shp_inx\n return None\n\n for i, point in enumerate(points):\n x, y = point\n sector_position[i] = get_first_polygon(y, x)\n\n cols = data_input.columns.tolist()\n col_tmp1 = create_auxiliary_column(cols)\n data_input[col_tmp1] = sector_position\n\n if polygon_col not in attributes:\n shp_object = shp_object.drop([polygon_col], axis=1)\n\n shp_object.columns = [\"{}{}\".format(c, alias) for c in attributes]\n # shp_object.reset_index(inplace=True)\n\n # merge with idx of each point\n data_input = data_input.merge(shp_object, how='left',\n left_on=col_tmp1, right_index=True,\n copy=False)\n\n del shp_object\n\n data_input = data_input.drop([col_tmp1], axis=1)\\\n .reset_index(drop=True)\n info = generate_info(data_input, frag)\n return data_input, info", "title": "" }, { "docid": "1c879494066297a17dee5959f79289ab", "score": "0.51147014", "text": "def polygon(t,n,length):\n angle=360.0/n\n polyline(t,n,length,angle)", "title": "" }, { "docid": "c0c16285227c58cfb4b53d7677d24184", "score": "0.5111245", "text": "def kml2slu(file, write=False):\n class Segment:\n # Stores info needed for each line segment making up the polygon\n def __init__(self, x0, x1, y0, y1):\n self.x = x0\n self.y = y0\n self.ylower = np.min([y0, y1])\n self.yupper = np.max([y0, y1])\n self.slope = (y1 - y0) / (x1 - x0)\n\n polygons = {}\n\n placemark = False\n name = None\n polygon = False\n grab_next = False\n\n threats = []\n\n with open(file, \"r\") as kml_file:\n for num, line in enumerate(kml_file, 1):\n if \"<Placemark>\" in line:\n placemark = True\n\n elif placemark:\n if \"<name>\" in line:\n name = line[line.find(\">\") + 1:line.find(\"</\")]\n elif \"<Polygon>\" in line:\n polygon = True\n elif (\"<coordinates>\" in line) and polygon:\n grab_next = True\n elif grab_next:\n tmp_threats = [\"\\n\" + name]\n # Grabs coordinates of points and puts them in dataframe with [lat, lon]\n s = pd.Series(np.array(line.strip().split(\" \"))).str.split(\",\")\n df = pd.concat([s.str.get(0).astype(float), s.str.get(1).astype(float)], axis=1)\n df.columns = [\"lon\", \"lat\"]\n\n # Identify polygon segments\n segments = [\n Segment(df[\"lon\"].iloc[i], df[\"lon\"].iloc[i + 1], df[\"lat\"].iloc[i], df[\"lat\"].iloc[i + 1])\n for i in df.index.to_list() if i != len(df) - 1]\n\n # Get longitudes for each latitude\n df[\"lon 0\"] = None\n df[\"lon 1\"] = None\n for i in df.index.to_list():\n lat = df[\"lat\"].iloc[i]\n lon_orig = df[\"lon\"].iloc[i]\n for segment in segments:\n if segment.ylower <= lat <= segment.yupper:\n lon = ((lat - segment.y) / segment.slope) + segment.x\n if lon != lon_orig:\n if df[\"lon 0\"].iloc[i] is not None:\n # Found three points with same latitude\n tmp_threats.append(str(lat))\n elif lon > lon_orig:\n df[\"lon 1\"].iloc[i] = lon\n df[\"lon 0\"].iloc[i] = lon_orig\n else:\n df[\"lon 1\"].iloc[i] = lon_orig\n df[\"lon 0\"].iloc[i] = lon\n\n # For top and bottom verticies\n if df[\"lon 0\"].iloc[i] is None and df[\"lon 1\"].iloc[i] is None:\n df[\"lon 0\"].iloc[i] = lon_orig\n df[\"lon 1\"].iloc[i] = lon_orig\n\n polygons[name] = df[[\"lat\", \"lon 0\", \"lon 1\"]].drop_duplicates(subset=['lat']).sort_values(\n by=['lat']).reset_index(drop=True).to_numpy()\n\n if len(tmp_threats) > 1:\n threats += tmp_threats\n\n polygon = False\n grab_next = False\n placemark = False\n\n if threats:\n raise TypeError(\"The following polygons have three points with the same latitude. Each latitude \"\n \"can belong to at most two points on a polygon.\" + '\\n'.join(threats))\n\n if write:\n with open(\"slu_ouputs.txt\", \"w\") as slu_file:\n [slu_file.writelines([p, \":\\n\", str(polygons.get(p)), \"\\n\\n\"]) for p in polygons]\n\n return polygons", "title": "" }, { "docid": "5d6d97accae99b268b56416b306cb951", "score": "0.5105927", "text": "def getAsGeoPoint(word):\n\treturn GeoPoint.initFromString(word, True)", "title": "" } ]
e3ad1ecc0b94296802f80087c1b85c0e
Read telemetry data from a PACE HKT product.
[ { "docid": "5f5c4e404086d52f704ac63219548881", "score": "0.0", "text": "def from_hkt(self, flnames: Path | list[Path], *,\n instrument: str | None = None, dump: bool = False):\n if isinstance(flnames, Path):\n flnames = [flnames]\n if instrument is None:\n instrument = 'spx'\n elif instrument not in ['spx', 'sc', 'oci', 'harp']:\n raise KeyError(\"instrument not in ['spx', 'sc', 'oci', 'harp']\")\n\n self.file_list = flnames\n ccsds_hk: tuple[np.ndarray] | tuple = ()\n for name in flnames:\n hkt = HKTio(name)\n self.set_coverage(hkt.coverage())\n ccsds_hk += hkt.housekeeping(instrument)\n\n if not ccsds_hk:\n return\n\n if dump:\n dump_numhk(flnames[0].stem + '_hk.dump', ccsds_hk)\n\n epoch = datetime.datetime(1958, 1, 1,\n tzinfo=datetime.timezone.utc)\n ii = len(ccsds_hk) // 2\n leap_sec = get_leap_seconds(ccsds_hk[ii]['hdr']['tai_sec'][0])\n epoch -= datetime.timedelta(seconds=leap_sec)\n self._hk = extract_l0_hk(ccsds_hk, epoch)", "title": "" } ]
[ { "docid": "3ec696c850c588d537fed90f96a8bf3f", "score": "0.55891937", "text": "def _get_data(self):\n self._data = system_info(self.payload)\n if self._data:\n self._target_temperature = data['setpoint']\n self._current_temperature = data['temperature']\n self._current_operation_mode = data['authorization']\n self._device_id = data['endpoint']\n _LOGGER.debug(\"Tydum value: {}\".format(self._data))\n else:\n _LOGGER.error(\"Could not get data from Tydum. {}\".format(self._data))", "title": "" }, { "docid": "9153acbef7069e9006c9076c6db36978", "score": "0.53883314", "text": "def _acquire_eeg_data_mock():\n json_file = open(\"to_send.json\")\n k = json.loads(json_file.read())\n _eeg_data = k[0][\"eeg_data\"]\n _timestamp = k[0][\"timestamp\"]\n return _eeg_data, _timestamp\n\n # Obtain EEG data from the LSL stream\n # eeg_data, timestamp = _acquire_eeg_data(inlet)", "title": "" }, { "docid": "127b299e6497e39f9d4644590b1561d1", "score": "0.5373777", "text": "def read_gps_data(gps_file, harbor_data):\n pass", "title": "" }, { "docid": "586e6f1dd3c7b4b7cda4a18560dac3b4", "score": "0.53023297", "text": "def read_telemetry(path):\n tele_df=pd.read_csv(path,sep='\\s+',names=['vessel_n','esn','month','day','Hours','minutes','fracyrday',\\\n 'lon','lat','dum1','dum2','depth','rangedepth','timerange','temp','stdtemp','year'])\n if len(tele_df)<6000:\n print('Warning: the emolt.dat file is not complete at this time.')\n #sys.exit()\n \n return tele_df", "title": "" }, { "docid": "08dc236c84c0372151f2dd79d14d989c", "score": "0.5231698", "text": "def read():\n ina = INA219(SHUNT_OHMS)\n ina.configure()\n current = \"%.3f\" % ina.voltage()\n power = \"%.3f\" % ina.power()\n shunt = \"%.3f\" % ina.shunt_voltage()\n\n point = Point(\"rpi\").tag(\"host\", \"rpi4\").field(\"current\", current)\n point2 = Point(\"rpi\").tag(\"host\", \"rpi4\").field(\"power\", power)\n point3 = Point(\"rpi\").tag(\"host\", \"rpi4\").field(\"shunt\", shunt)\n\n print(point.to_line_protocol())\n print(point2.to_line_protocol())\n print(point3.to_line_protocol())\n write_api.write(bucket=bucket, record=point3)\n write_api.write(bucket=bucket, record=point2)\n write_api.write(bucket=bucket, record=point)\n write_api.__del__()\n\n print(\"Bus Voltage: %.3f V\" % ina.voltage())\n try:\n print(\"Bus Current: %.3f mA\" % ina.current())\n print(\"Power: %.3f mW\" % ina.power())\n print(\"Shunt voltage: %.3f mV\" % ina.shunt_voltage())\n except DeviceRangeError as e:\n print(e)", "title": "" }, { "docid": "63fb49efee20544750d6472ebd0290fb", "score": "0.51485413", "text": "def read_data(self):\n pass", "title": "" }, { "docid": "63fb49efee20544750d6472ebd0290fb", "score": "0.51485413", "text": "def read_data(self):\n pass", "title": "" }, { "docid": "85d566be2d09aeb8ccbe283635e4ca63", "score": "0.5075995", "text": "def readAccData(self):\n return self.lis3dh.acceleration()", "title": "" }, { "docid": "5efcba58dc9912072f1a904d24d4fef2", "score": "0.5071989", "text": "def get_data(self):\n params = {\n \"serialnumber\": self.serial_number\n }\n data = self._session.request(\n url=self._url,\n params=params,\n )\n _LOGGER.debug(f\"Get Data: Loaded response {data}\")\n self._data = data\n\n self.heating = data.get(\"Heating\")\n self.online = data.get(\"Online\")\n self.room = data.get(\"Room\")\n self.serial_number = data.get(\"SerialNumber\")\n self.temperature = data.get(\"Temperature\")\n self.manual_temperature = data.get(\"ManualTemperature\")\n self.min_temperature = data.get(\"MinTemp\")\n self.max_temperature = data.get(\"MaxTemp\")\n self.target_temperature = data.get(\"SetPointTemp\")\n self._schedule_mode = data.get(\"RegulationMode\")\n # hold_time_str = data.get(\"HoldSetPointDateTime\")\n # self._hold_time = datetime.fromisoformat(hold_time_str)", "title": "" }, { "docid": "126885106ffabb391c61b45f891d55f2", "score": "0.50552154", "text": "def read(self):\n raw_data = self._read_raw(PARAM_AM2321_READ, REG_AM2321_HUMIDITY_MSB, 4)\n self.temperature = unpack('>H', raw_data[-2:])[0]/10.0\n self.humidity = unpack('>H', raw_data[-4:2])[0]/10.0", "title": "" }, { "docid": "950aff92e283ddc7367afd922972657a", "score": "0.50378305", "text": "def read(self, path):\n print(\"\\tRead, path={}\".format(path))\n\n self.product = ProductIO.readProduct(path)\n return self.product", "title": "" }, { "docid": "192f0471de464648e437b4406656f964", "score": "0.49859804", "text": "def get_data(self):\n humidity, temperature = Adafruit_DHT.read_retry(self.sensor, self.pin)\n \n if temperature is not None and humidity is not None:\n return humidity, round(temperature, 2)\n else:\n return self.get_data()", "title": "" }, { "docid": "d597b0a8ddc5a1ad1855be71e5e5a0d6", "score": "0.49791327", "text": "def read_sensor(self):\n\n # with open(\"pressuretestdata.csv\", 'rU') as f:\n # tempdata = [list(map(int, rec)) for rec in reader(f, delimiter=',')]\n\n # Recording sensor time\n t = time.process_time()\n\n processed_data = self.adc_reading()\n volts = np.array([processed_data[0], processed_data[1], processed_data[2]])\n # data_unit = np.array([tempdata[0][0], tempdata[0][1], tempdata[0][2]])\n # Converts all pressure sensor readings from volts to psi\n data_unit = self.volt_to_unit(volts)\n avg = self.vote(data_unit)\n\n # Appends temporary data to sensor data array\n # raw_data = np.append([t], [data_unit])\n # self.data.append(raw_data)\n # self.avg_data.append(avg)\n return avg, t", "title": "" }, { "docid": "80fa8f8981c566c84977db6fc8b79d76", "score": "0.49461246", "text": "def read_compensated_data(self, result=None):\n self.read_raw_data(self._l3_resultarray)\n raw_temp, raw_press, raw_hum = self._l3_resultarray\n # temperature\n var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)\n var2 = (((((raw_temp >> 4) - self.dig_T1) *\n ((raw_temp >> 4) - self.dig_T1)) >> 12) * self.dig_T3) >> 14\n self.t_fine = var1 + var2\n temp = (self.t_fine * 5 + 128) >> 8\n\n # pressure\n var1 = self.t_fine - 128000\n var2 = var1 * var1 * self.dig_P6\n var2 = var2 + ((var1 * self.dig_P5) << 17)\n var2 = var2 + (self.dig_P4 << 35)\n var1 = (((var1 * var1 * self.dig_P3) >> 8) +\n ((var1 * self.dig_P2) << 12))\n var1 = (((1 << 47) + var1) * self.dig_P1) >> 33\n if var1 == 0:\n pressure = 0\n else:\n p = 1048576 - raw_press\n p = (((p << 31) - var2) * 3125) // var1\n var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25\n var2 = (self.dig_P8 * p) >> 19\n pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)\n\n # humidity\n h = self.t_fine - 76800\n h = (((((raw_hum << 14) - (self.dig_H4 << 20) -\n (self.dig_H5 * h)) + 16384)\n >> 15) * (((((((h * self.dig_H6) >> 10) *\n (((h * self.dig_H3) >> 11) + 32768)) >> 10) +\n 2097152) * self.dig_H2 + 8192) >> 14))\n h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4)\n h = 0 if h < 0 else h\n h = 419430400 if h > 419430400 else h\n humidity = h >> 12\n\n if result:\n result[0] = temp\n result[1] = pressure\n result[2] = humidity\n return result\n\n return array(\"i\", (temp, pressure, humidity))", "title": "" }, { "docid": "e090dae73d0a7de52c99424ab049100e", "score": "0.49202776", "text": "def extract_data_particle(self, raw_data, timestamp):\r\n sample = self._extract_sample(CtdpfCklWfpParserDataParticle, None, raw_data, timestamp)\r\n return sample", "title": "" }, { "docid": "8d2cc1cef9f02a1a836bf6adb5a4bd17", "score": "0.49034235", "text": "def read_wx_data(wx_file, harbor_data):\n pass", "title": "" }, { "docid": "0afe14490f96f381fa11d6c6af71d6ac", "score": "0.48979148", "text": "def get_htc_data(htc_file, metadata_dict, rolling_window=None):\n \n if htc_file:\n if 'hfy' in htc_file:\n htc_cube = iris.load_cube(htc_file, 'zonal sum ocean heat y transport convergence globe')\n else:\n htc_cube = iris.load_cube(htc_file)\n metadata_dict[htc_file] = htc_cube.attributes['history']\n\n htc_cube = timeseries.convert_to_annual(htc_cube)\n if rolling_window:\n y_axis_name = get_y_axis_name(htc_cube)\n htc_cube = htc_cube.rolling_window(y_axis_name, iris.analysis.MEAN, rolling_window)\n\n htc_trend = calc_trend_cube(htc_cube)\n htc_mean = htc_cube.collapsed('time', iris.analysis.MEAN)\n\n htc_trend.attributes = htc_cube.attributes\n htc_mean.atributes = htc_cube.attributes\n else:\n htc_trend = None\n htc_mean = None\n \n return htc_trend, htc_mean, metadata_dict", "title": "" }, { "docid": "93dcf46210215506f8638115a9271b85", "score": "0.4890831", "text": "def read_sensor(self):\n (row, self.frame_index, self.old_frame, self.p0_nose) = self.detector.getOneEvent(\n self.camera, \n self.frame_index, \n self.old_frame, \n self.p0_nose\n )\n\n row['timestamp'] = time.time()\n \n return row", "title": "" }, { "docid": "2d5534170f1c354e80aec59396ea8038", "score": "0.48718184", "text": "def read (self):\n debug('read()')\n \n if self.instrument == None: return\n else: \n #if self.model == 'RIGOLDE': _t.sleep(RIGOLDE_DELAY)\n return self.instrument.read()", "title": "" }, { "docid": "817885eccc2369a4f4c0b4ff0725196e", "score": "0.48350632", "text": "def _read_trajectory(self,fname):\n if fname is None:\n return None\n\n if not fname.startswith(self.prefix):\n fname = os.path.join(self.prefix,fname)\n if not os.path.isfile(fname):\n # tracking probably hasn't been performed before\n return None\n\n try:\n data = np.loadtxt(fname,delimiter=',')\n except IOError:\n print('Failed to read',fname)\n return None\n\n if len(data.shape) == 1:\n Nread = 1\n data = data.reshape((1,len(data)))\n else:\n Nread = len(data)\n if not Nread == self.Ntimes:\n print('Incorrect number of time steps in',fname)\n print(' found',Nread,'but expected',self.Ntimes)\n return None\n\n # data[:,0] is just an index\n self.xh_wake = data[:,1]\n self.xv_wake = data[:,2]\n self._update_inertial()\n self.wake_tracked = True\n if self.verbose:\n print('Trajectory loaded from',fname)\n\n return data", "title": "" }, { "docid": "72ee940914940a5964381ce4fb2cec1f", "score": "0.48345608", "text": "def _gwpy_read(self, det, channel, start_time, end_time, dtype=\"float64\"):\n\n logger.debug(\"data-dict provided, attempt read of data\")\n\n if det not in self.data_dict:\n logger.info(f\"Detector {det} not found in data-dict\")\n return None\n else:\n source = self.data_dict[det]\n format_ext = os.path.splitext(source)[1]\n\n # If the source contains a glob-path, e.g. *gwf, glob it first\n if \"*\" in source:\n logger.info(f\"Globbing {source}\")\n source = glob.glob(source)\n logger.info(f\"Setting source={source}\")\n\n if \"gwf\" in format_ext:\n kwargs = dict(\n source=source, channel=channel, dtype=dtype, format=\"gwf.lalframe\"\n )\n type_kwargs = dict(dtype=dtype, subok=True, copy=False)\n elif \"hdf5\" in format_ext:\n kwargs = dict(source=source, start=start_time, end=end_time, format=\"hdf5\")\n elif \"txt\" in format_ext:\n data = kwargs = dict(source=source)\n else:\n # Generic best try\n kwargs = dict(\n source=source, channel=channel, start=start_time, end=end_time\n )\n\n if self.data_format is not None:\n kwargs[\"format\"] = self.data_format\n\n try:\n kwargs_string = \"\"\n for key, val in kwargs.items():\n if isinstance(val, str):\n val = f\"'{val}'\"\n kwargs_string += f\"{key}={val}, \"\n\n if \"gwf\" in format_ext:\n type_kwargs_string = \"\"\n for key, val in type_kwargs.items():\n if isinstance(val, str):\n val = f\"'{val}'\"\n type_kwargs_string += f\"{key}={val}, \"\n logger.info(\n f\"Running: gwpy.timeseries.TimeSeries.read({kwargs_string}).astype({type_kwargs_string})\"\n )\n data = gwpy.timeseries.TimeSeries.read(**kwargs).astype(**type_kwargs)\n\n else:\n logger.info(\n f\"Running: gwpy.timeseries.TimeSeries.read({kwargs_string})\"\n )\n data = gwpy.timeseries.TimeSeries.read(**kwargs)\n\n data = data.crop(start=start_time, end=end_time)\n\n if data.duration.value < end_time - start_time:\n logger.warning(\n \"Unable to read in requested {}s duration of data from {}\"\n \" only {}s available: returning None\".format(\n end_time - start_time, source, data.duration.value\n )\n )\n data = None\n elif data.duration.value > end_time - start_time:\n logger.info(\n \"Read in {}s of data from {}, but {}s requested, truncating\".format(\n data.duration.value, source, end_time - start_time\n )\n )\n data = data[data.times.value >= start_time]\n data = data[data.times.value < end_time]\n\n return data\n except ValueError as e:\n logger.info(f\"Reading of data failed with error {e}\")\n return None", "title": "" }, { "docid": "fe08802162afe6bf85ad0c9ff9ab401c", "score": "0.48255622", "text": "def read_temperature(self):\n\n self._log.debug(\"Reading temperature...\")\n return self.data.temperature", "title": "" }, { "docid": "ef7e4639e0d9c5d3015ebbc29bf16c2b", "score": "0.4824415", "text": "def get_data(self):\n\n try:\n self.illumination = self.get_moon_illumination()\n self.phase = self.get_moon_phase()\n self.access_data = True\n except:\n self.access_data = False", "title": "" }, { "docid": "b7a29ec6e3d6de9d4f4d2fe02cb19545", "score": "0.4798389", "text": "def test_reading(test_data):\n retrieval_file = GPROFRetrievalFile(test_data[\"retrieval_file\"])\n\n assert retrieval_file.satellite == \"GPM\"\n assert retrieval_file.sensor == \"GMI\"\n\n start_date = retrieval_file.start_date\n end_date = retrieval_file.end_date\n\n assert start_date.year == 2019\n assert start_date.month == 1\n assert start_date.day == 1\n assert end_date.year == 2019\n assert end_date.month == 1\n assert end_date.day == 1", "title": "" }, { "docid": "97ae8f6e8e604002bd27e4fc6c78f54c", "score": "0.4787155", "text": "def import_gt3x(filepath):\n\n print(\"\\nImporting data from {}...\".format(filepath))\n\n accel, ts, meta_data = gt3x.read_gt3x(filepath)\n\n start_stamp = str(ts[0])\n stop_stamp = str(ts[-1])\n tz = meta_data[\"TimeZone\"]\n sample_rate = meta_data[\"Sample_Rate\"]\n acc_min = meta_data[\"Acceleration_Min\"]\n acc_max = meta_data[\"Acceleration_Max\"]\n\n header_info = {\"Start_time\": start_stamp, \"Stop_time\": stop_stamp, \"Timezone\": tz, \"Sample_rate\": int(sample_rate),\n \"Accel_range\": (float(acc_min), float(acc_max))}\n\n print(\"Complete.\")\n\n return header_info, accel", "title": "" }, { "docid": "ce94b9a91247880b9e415a67c1a811b7", "score": "0.4780171", "text": "def getTelemetry(self, telemetry: str, args: str) -> Dict[str, Any]:\n demisto.debug('SensorClient.getTelemetry method has been called.')\n\n return self._http_request(\n method='GET',\n url_suffix='telemetry/' + telemetry.lower() + args\n )", "title": "" }, { "docid": "287e2456ea48b1fdbd74d207f7312c63", "score": "0.47729036", "text": "def test_readin_traces():\n data_dir = os.environ['PATDATA']\n patient_file_path = os.environ['PATSTAT']\n traces = data_manipulation.readin_traces(data_dir, patient_file_path,\n start_index = 0)\n\n assert len(traces) == 794\n assert isinstance(traces[0], data_manipulation.EyeTrace)", "title": "" }, { "docid": "b38b1ce4529d0f12b33d254bf76ebf3c", "score": "0.47604117", "text": "def extract_metadata_particle(self, raw_data, timestamp):\r\n sample = self._extract_sample(CtdpfCklWfpMetadataParserDataParticle, None,\r\n raw_data, timestamp)\r\n return sample", "title": "" }, { "docid": "7fda64ab54f5eb5b781260018b14d2b3", "score": "0.47558072", "text": "def _read_trajectory(self,fname):\n if python_version < 3:\n data = super(ContourWakeTracker,self)._read_trajectory(fname)\n else:\n data = super()._read_trajectory(fname)\n if data is not None:\n # assume load was successful\n self.Clevels = data[:,3]\n self.Cfvals = data[:,4]\n try:\n self.Careas = data[:,5]\n except IndexError:\n pass\n return data", "title": "" }, { "docid": "30878d8a694f4bfd5b0bbe3ff8978f49", "score": "0.4749064", "text": "def read(platform, tle_file=None, line1=None, line2=None):\n return Tle(platform, tle_file=tle_file, line1=line1, line2=line2)", "title": "" }, { "docid": "30878d8a694f4bfd5b0bbe3ff8978f49", "score": "0.4749064", "text": "def read(platform, tle_file=None, line1=None, line2=None):\n return Tle(platform, tle_file=tle_file, line1=line1, line2=line2)", "title": "" }, { "docid": "7681d66e782d002d0b336ce45bf05e75", "score": "0.47459388", "text": "def extract_data(self):\n try:\n r = requests.get(self.url, headers=self.headers, params=self.payload)\n self.result = r.json()\n return self.result[\"products\"]\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)", "title": "" }, { "docid": "10217ced74e5827f4389edc964bb96ef", "score": "0.47424054", "text": "def get_data():\n token = get_token()\n url = 'https://'+ORION_HOST+':2026/v2/entities?id=plugsense:PS02'\n headers = {'fiware-service': SERVICE, 'fiware-servicepath': SERVICE_PATH, 'X-Auth-Token': token} # Maybe the token changed\n response = requests.get(url, headers=headers, verify=False)\n response.encoding = 'utf-8'\n #print(response.text)\n response_dict=json.loads(response.text)\n print(\"Status code: \", response.status_code)\n # print(response.text)\n noise = response_dict[0]['NOISE']['value']\n datetime = response_dict[0]['NOISE']['metadata']['TimeInstant']['value']\n print(noise, datetime)\n return noise, datetime", "title": "" }, { "docid": "81e57d3bd532ba5b3f3f08583bc26608", "score": "0.47408956", "text": "def read(self):\n\n UT = self.readRawTemp()\n UP = self.readRawPressure()\n\n # True Temperature Calculations\n X1 = ((UT - self.__cal_AC6) * self.__cal_AC5) >> 15\n X2 = (self.__cal_MC << 11) / (X1 + self.__cal_MD)\n B5 = X1 + X2\n temp = ((B5 + 8) >> 4) / 10.0\n\n # Pressure Calculations\n B6 = B5 - 4000\n X1 = (self.__cal_B2 * (B6 * B6) >> 12) >> 11\n X2 = (self.__cal_AC2 * B6) >> 11\n X3 = X1 + X2\n B3 = (((self.__cal_AC1 * 4 + X3) << self.mode) + 2) / 4\n X1 = (self.__cal_AC3 * B6) >> 13\n X2 = (self.__cal_B1 * ((B6 * B6) >> 12)) >> 16\n X3 = ((X1 + X2) + 2) >> 2\n B4 = (self.__cal_AC4 * (X3 + 32768)) >> 15\n B7 = (UP - B3) * (50000 >> self.mode)\n if (B7 < 0x80000000):\n p = (B7 * 2) / B4\n else:\n p = (B7 / B4) * 2\n \n X1 = (p >> 8) * (p >> 8)\n X1 = (X1 * 3038) >> 16\n X2 = (-7357 * p) >> 16\n\n p = p + ((X1 + X2 + 3791) >> 4)\n\n return (1. * p, temp)", "title": "" }, { "docid": "934f144e5b96458526f7a0819f2331f2", "score": "0.47347844", "text": "def get_reading(self):\n if self.verbose:\n print('getting reading')\n block = self.get_block()\n if self.verbose:\n print('type:',block[1])\n if block[1] != 0x18:\n raise DustSensorException('Invalid block type')\n return None\n rate = block[3] + block[4]/10.0\n return rate", "title": "" }, { "docid": "035c7b633c22ec99942903ebdd100a7f", "score": "0.4727366", "text": "def rxt_dict(self, context, name, timestamp, approximate=False):\n rxt_file = self.read_mgr.resolve(context, name, timestamp, approximate)\n\n with open(rxt_file) as f_handle:\n data = json.load(f_handle)\n return data", "title": "" }, { "docid": "172736b7a9a619dfce6ecf1091d4bcef", "score": "0.47208276", "text": "def _read_cweeds_data(metadata):\n # Parse the weather data to a frame of series.\n # Unfortunately, seems pd can't handle appending, so we build lists.\n times = []\n ghi = []\n dni = []\n dhi = []\n dni_extra = []\n wind_speed = []\n temp_air = []\n pressure = []\n albedo = []\n\n # we use these a lot, save some lookups\n albedo_soil = pvlib.irradiance.SURFACE_ALBEDOS['soil']\n albedo_snow = pvlib.irradiance.SURFACE_ALBEDOS['snow']\n\n zipname = '../data/{territory}.zip'.format(**metadata)\n # the station name we use here is the ugly name, not the pretty name in metadata['name']\n # most stations have the territory name but some don't.\n wy2name = '{wban}.WY2'.format(**metadata).format(metadata.name)\n\n latitude = metadata['latitude']\n longitude = metadata['longitude']\n timezone = datetime.timezone(datetime.timedelta(hours=metadata['timezone']))\n\n with zipfile.ZipFile(zipname) as zipf:\n def openwy2():\n # Find the wy2name in the archive. The names aren't consistent enough\n # to just get the right one in one or two tries.\n for zipitem in zipf.infolist():\n if wy2name in zipitem.filename:\n return zipf.open(zipitem)\n raise KeyError(\"Could not find {} in {}\".format(wy2name, zipname))\n with openwy2() as f:\n for line in f:\n # yyyymmddhh but hh is 01-24; shift to 00-23\n times.append(datetime.datetime(int(line[6:10]), int(line[10:12]),\n int(line[12:14]), int(line[14:16]) - 1, tzinfo=timezone))\n\n # values in kJ/m^2 for the entire hour; later we divide by 3.6 to get W/m^2\n dni_extra.append(int(line[16:20])) # extraterrestrial irradiance (sun at ToA)\n ghi.append(int(line[20:24])) # global horizontal irradiance\n dni.append(int(line[26:30])) # direct normal irradiance\n dhi.append(int(line[32:36])) # diffuse horizontal irradiance (ghi - dni)\n\n # pressure in 10 Pa ; divide by 100 to get kPa\n pressure.append(int(line[85:90]))\n # value in 0.1 C ; divide by 10 to get C\n temp_air.append(int(line[91:95]))\n # value in 0.1 m/s ; divide by 10 to get m/s.\n wind_speed.append(int(line[105:109]))\n\n # 0 => no snow; 1 => snow; 9 => missing\n str_snow = chr(line[116])\n if str_snow == '0':\n albedo.append(albedo_soil)\n elif str_snow == '1':\n albedo.append(albedo_snow)\n else:\n # Missing. Shitty guess: assume it's snowy if temp < -3 (bad guess!)\n # we probably should guess based on a model that includes precip data and\n # recent temps, which we have access to\n if temp_air[-1] < -3:\n albedo.append(albedo_snow)\n else:\n albedo.append(albedo_soil)\n\n # Pack the data now, before using it below. Also convert to the units we\n # expect (W/m^2 rather than J/(m^2 h), m/s rather than dm/s, etc)\n # And convert the times to np.datetime64 so pandas can run faster.\n times = np.asarray(times, dtype=np.datetime64)\n ghi = np.asarray(ghi, dtype=np.float32) * (1 / 3.6)\n dni = np.asarray(dni, dtype=np.float32) * (1 / 3.6)\n dhi = np.asarray(dhi, dtype=np.float32) * (1 / 3.6)\n dni_extra = np.asarray(dni_extra, dtype=np.float32) * (1 / 3.6)\n wind_speed = np.asarray(wind_speed, dtype=np.float32) * 0.1\n temp_air = np.asarray(temp_air, dtype=np.float32) * 0.1\n pressure = np.asarray(pressure, dtype=np.float32) * 0.01\n albedo = np.asarray(albedo, dtype=np.float32)\n\n # We don't get zenith/azimuth from the data. Calculate it.\n solpos = pvlib.solarposition.get_solarposition(times, latitude, longitude)\n solar_zenith = np.asarray(solpos['apparent_zenith'], dtype=np.float32)\n solar_azimuth = np.asarray(solpos['azimuth'], dtype=np.float32)\n\n # Get the air mass (?)\n airmass = pvlib.atmosphere.relativeairmass(solar_zenith)\n am_abs = pvlib.atmosphere.absoluteairmass(airmass, pressure)\n am_abs = np.asarray(am_abs, dtype=np.float32)\n\n return pd.DataFrame({\n 'ghi' : ghi,\n 'dni' : dni,\n 'dhi' : dhi,\n 'dni_extra': dni_extra,\n 'wind_speed': wind_speed,\n 'temp_air' : temp_air,\n 'pressure' : pressure,\n 'albedo' : albedo,\n 'solar_zenith' : solar_zenith,\n 'solar_azimuth' : solar_azimuth,\n 'absolute_airmass': am_abs\n }, index = times)", "title": "" }, { "docid": "ad6bde947a0a186782c471c75c360f15", "score": "0.47155017", "text": "def _get_temperature_reading(device_config):\r\n #vne::tbd:: get mode value from a common place to adapt to future changes\r\n if device_config['sim_mode'] is True:\r\n (temp_reading, user_data) = _get_sim_temperature(device_config)\r\n else:\r\n (temp_reading, user_data) = _get_udo_temperature_sensor(device_config)\r\n\r\n #Temperature Unit conversion \r\n final_reading = dtemperature.convert_unit(temp_reading, device_config['default_unit'], device_config['unit'])\r\n \r\n return (final_reading, user_data)", "title": "" }, { "docid": "866819fc9f27a517a3c09c2463fa5930", "score": "0.47137517", "text": "def get_sensor_data(serial_obj):\n\traw_data = serial_obj.readline().rstrip().split(\",\")\n\tdata = map(float, raw_data)\n\t# split into gyro and accel readings\n\taccel = np.array(data[:3])*g - np.array([AX_BIAS, AY_BIAS, AZ_BIAS])\n\t# account for gyro bias\n\tgyro = np.array(data[3:6]) - np.array([GX_BIAS, GY_BIAS, GZ_BIAS])\n\t# pressure\n\tbaro = data[-1]\n\treturn accel, gyro, baro", "title": "" }, { "docid": "74e3817e637db90bc5b57600b3ecfadf", "score": "0.47003913", "text": "def sample_data(self):\n elec_price_data = toy_data.price\n\n if self.evaluation_mode:\n charging_data = utils.sample_charging_data(\n self.eval_charging_data,\n self.config.EVAL_EPS_LEN,\n self.time_step,\n self.random_state\n )\n else:\n charging_data = utils.sample_charging_data(\n self.train_charging_data,\n self.episode_length,\n self.time_step,\n self.random_state\n )\n return charging_data, elec_price_data", "title": "" }, { "docid": "17430cee857def715237fb3034085387", "score": "0.46990985", "text": "def test_missing_metadata(self):\n lines = []\n\n with open(\"trace.txt\", \"r\") as fil:\n lines += fil.readlines()\n lines = lines[7:]\n fil.close()\n\n with open(\"trace.txt\", \"w\") as fil:\n fil.write(\"\".join(lines))\n fil.close()\n\n trace = trappy.FTrace()\n self.assertEqual(trace._cpus, None)\n self.assertEqual(trace._version, None)\n self.assertTrue(len(trace.thermal.data_frame) > 0)", "title": "" }, { "docid": "e80bbd654812c2af36a416dbc106938f", "score": "0.4686552", "text": "def read_temperature(self):\n return self._imu.read_temperature()", "title": "" }, { "docid": "8f50808de8e29bd4a510cdfc57198a81", "score": "0.46863773", "text": "def _get_data(self, mode: str):\n raise NotImplementedError", "title": "" }, { "docid": "f4d661cfb5d6905643a35b5c50f88444", "score": "0.46850336", "text": "def test_reader():\n fn = (\n f\"{os.path.dirname(__file__)}/../data/product_examples/\"\n \"1hrad.20130920.190000.gz\"\n )\n metadata, _ = mrms.reader(fn)\n assert abs(metadata[\"ul_lat\"] - 54.99) < 0.01", "title": "" }, { "docid": "d92a6f4f94f09ae7093b8f05aaf85a78", "score": "0.4681387", "text": "def extract_data_particle(self, raw_data, timestamp):\r\n sample = self._extract_sample(self._instrument_data_particle_class, None, raw_data, internal_timestamp=timestamp)\r\n return sample", "title": "" }, { "docid": "d8f5e89bd663c046a1254884619f7418", "score": "0.4666092", "text": "def on_telemetry_sample(self, msg):\n # Check if it's drone's ", "title": "" }, { "docid": "5bf1eb80088deeaae4fc3dc08ecd76ef", "score": "0.46645346", "text": "def main():\n tle_data = read('Noaa-19')\n print(tle_data)", "title": "" }, { "docid": "20818b914d06cf8a8b28c5c08831b450", "score": "0.4657693", "text": "def do_local_sensor_reading():\n # Get from sensor payload: data as json\n #jotter.get_jotter().jot(\"Acquiring sensor data.\", source_file=__name__)\n sensor = sensor_payload.get_sensor_payload_instance()\n sensor.start_acquisition()\n while not sensor.is_completed():\n sensor.process_acquisition()\n\n sensor_data_json = sensor.get_latest_data_as_json()\n # sensor_data_str = json.dumps(sensor_data_json)\n # print(sensor_data_str)\n\n wifi_connected = is_wifi_connected()\n\n #\n # Send Sensor Readings to server\n #\n if wifi_connected:\n # Put to server: sensor payload data\n #jotter.get_jotter().jot(\"Sending sensor data to server.\", source_file=__name__)\n import mainloop.main.httputil as httputil\n http_client = httputil.HttpClient()\n import gc\n gc.collect()\n response = http_client.post('http://192.168.4.1:3000/sensors/', json=sensor_data_json)\n # Check for success - resend/queue and resend - TODO\n response = None\n gc.collect()", "title": "" }, { "docid": "d976ac8f737a437a983f9a9b864666d6", "score": "0.46518788", "text": "def getData(self, ttype: ThreadType):\n if ttype == ThreadType.PLASMA:\n return self.praData\n elif ttype == ThreadType.TEMPERATURE:\n return self.tData\n elif ttype == ThreadType.PRESSURE1:\n return self.p1Data\n elif ttype == ThreadType.PRESSURE2:\n return self.p2Data\n else:\n return", "title": "" }, { "docid": "7de06dd06883ea6727fccac18c665a4b", "score": "0.4646653", "text": "def get_sense_data():\n global sense_data\n sense_data=[]\n \n sense_data.append(sense.get_temperature_from_humidity())\n sense_data.append(sense.get_temperature_from_pressure())\n sense_data.append(sense.get_humidity())\n sense_data.append(sense.get_pressure())\n\n # temperature corrected\n t = os.popen('vcgencmd measure_temp')\n cputemp = t.readline()\n cputemp = cputemp.replace('temp=','')\n cputemp = cputemp.replace(\"'C\\n\",\"\")\n cputemp = float(cputemp) \n temperature = sense.get_temperature()\n tempp = sense.get_temperature_from_pressure()\n temph = sense.get_temperature_from_humidity()\n temperatc = ((temperature + tempp + temph)/3) - (cputemp/5)\n sense_data.append(temperatc)\n \n\n\n o = sense.get_orientation()\n yaw = o[\"yaw\"]\n pitch = o[\"pitch\"]\n roll = o[\"roll\"]\n sense_data.extend([pitch,roll,yaw])\n\n mag = sense.get_compass_raw()\n mag_x = mag[\"x\"]\n mag_y = mag[\"y\"]\n mag_z = mag[\"z\"]\n sense_data.extend([mag_x,mag_y,mag_z])\n \n acc = sense.get_accelerometer_raw()\n x = acc[\"x\"]\n y = acc[\"y\"]\n z = acc[\"z\"]\n sense_data.extend([x,y,z])\n \n gyro = sense.get_gyroscope_raw()\n gyro_x = gyro[\"x\"]\n gyro_y = gyro[\"y\"]\n gyro_z = gyro[\"z\"]\n\n \n sense_data.extend([gyro_x,gyro_y,gyro_z])\n\n \n return sense_data", "title": "" }, { "docid": "0be55c9bb3108baab9068d0c2eda14d4", "score": "0.4641798", "text": "def read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines", "title": "" }, { "docid": "8db190ac4e044060969a6d55c7fd5ec1", "score": "0.46407166", "text": "def read_accel():\n\tglobal D\n\tx = (D.accel.readU8(D.OUT_X_H_A)<<8)|D.accel.readU8(D.OUT_X_L_A)\n\ty = (D.accel.readU8(D.OUT_Y_H_A)<<8)|D.accel.readU8(D.OUT_Y_L_A)\n\tz = (D.accel.readU8(D.OUT_Z_H_A)<<8)|D.accel.readU8(D.OUT_Z_L_A)\n\tdata = [x,y,z]\n\tdata = map(twos_complement,data)\t# convert to two's complement\n\tdata = map(accel_to_analog,data)\t\t# convert to m/s\n\treturn data", "title": "" }, { "docid": "8c8cd4da61ad632e38a9c1f99dad43e9", "score": "0.46369693", "text": "def read_data():\n\n url = \"http://observability-thanos-query-frontend.open-cluster-management-observability.svc.cluster.local:9090\"\n token = \"doesnotmatteryet\"\n #connects to Thanos or Prometheus as dictated by the URL\n pc = PrometheusConnect(url=url, headers={\"Authorization\": \"Bearer {}\".format(token)}, disable_ssl=True);\n\n start_time=(datetime.datetime.now() - datetime.timedelta(minutes=2160))\n end_time=datetime.datetime.now()\n #interval between data points gathered\n step='10m'\n\n alert_total = pc.custom_query_range(\n query='count(ALERTS{alertstate=\"firing\"}) by (cluster,alertname)',\n start_time=start_time,\n end_time=end_time,\n step=step,\n )\n\n alert_total_df = MetricRangeDataFrame(alert_total);\n alert_total_df[\"value\"]=alert_total_df[\"value\"].astype(float)\n alert_total_df.index= pandas.to_datetime(alert_total_df.index, unit=\"s\")\n\n #st.dataframe(alert_total_df.head())\n #st.dataframe(alert_total_df.count())\n\n return alert_total_df", "title": "" }, { "docid": "b4c086babdc7898792e71cd6c17adba8", "score": "0.46329975", "text": "def read_pressure(self):\n\n self._log.debug(\"Reading pressure...\")\n return self.data.pressure", "title": "" }, { "docid": "da1be17b04c74469b80ab0e12e0c0f67", "score": "0.46302512", "text": "def test_data_on(self):\n self.assert_initialize_driver()\n self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE,\n DataParticleType.THSPH_PARSED,\n self.assert_particle_sample,\n delay=15)", "title": "" }, { "docid": "7428f3817b6b2533b894f33640853a80", "score": "0.4625345", "text": "def GetRealPhyInfo(time, need_info=None):\n if os.path.exists(\"./openfoam_cylinder_re100\") is False:\n data_set = (\n \"https://dataset.bj.bcebos.com/PaddleScience/cylinder3D/\"\n \"openfoam_cylinder_re100/cylinder3d_openfoam_re100.zip\"\n )\n wget.download(data_set)\n with zipfile.ZipFile(\"cylinder3d_openfoam_re100.zip\", \"r\") as zip_ref:\n zip_ref.extractall(\"openfoam_cylinder_re100\")\n real_data = np.load(\"openfoam_cylinder_re100/flow_re100_\" + str(int(time)) + \"_xyzuvwp.npy\")\n real_data = real_data.astype(np.float32)\n if need_info == \"cord\":\n return real_data[:, 0:3]\n elif need_info == \"physic\":\n return real_data[:, 3:7]\n else:\n return real_data", "title": "" }, { "docid": "291160cfab513812081c748c06ab7ca7", "score": "0.46245205", "text": "def test_missing_metadata(self):\n lines = []\n\n with open(\"trace.txt\", \"r\") as fil:\n lines += fil.readlines()\n lines = lines[7:]\n fil.close()\n\n with open(\"trace.txt\", \"w\") as fil:\n fil.write(\"\".join(lines))\n fil.close()\n\n run = trappy.Run()\n self.assertEquals(run._cpus, None)\n self.assertEquals(run._version, None)\n self.assertTrue(len(run.thermal.data_frame) > 0)", "title": "" }, { "docid": "5cfff8af605c68a3ea83faa6190daafd", "score": "0.46234939", "text": "def ReadTestData(self):\n \n \n \n\n # read csv file into panda data dataframe\n temp_data = pd.read_csv(self.InputFile)\n # now we drop some of the columns, pay attention to white space\n drop_list =['server id','jitter','package','latency measured']\n print(temp_data)\n try:\n lcwa_data = temp_data.drop(columns = drop_list)\n except:\n print('error in pandas')\n return\n # convert date and time back to datetime\n lcwa_data[\"Time\"] = pd.to_datetime(lcwa_data['time']) \n\n # Create an iper and a speedtest frame\n\n iperf_opt = [' iperf3']\n self.lcwa_iperf = lcwa_data[lcwa_data['server name'].isin(iperf_opt)] #all the iperf values\n self.lcwa_speed = lcwa_data[~lcwa_data['server name'].isin(iperf_opt)] #all the not iperf values \n\n self.PlotData()", "title": "" }, { "docid": "41ab912d447cd1def706a3f2538c108e", "score": "0.461807", "text": "def read_compensated_data(self, result=None):\n self.read_raw_data(self._l3_resultarray)\n raw_temp, raw_press = self._l3_resultarray\n # temperature\n var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)\n var2 = (((((raw_temp >> 4) - self.dig_T1) *\n ((raw_temp >> 4) - self.dig_T1)) >> 12) * self.dig_T3) >> 14\n self.t_fine = var1 + var2\n temp = (self.t_fine * 5 + 128) >> 8\n\n # pressure\n var1 = self.t_fine - 128000\n var2 = var1 * var1 * self.dig_P6\n var2 = var2 + ((var1 * self.dig_P5) << 17)\n var2 = var2 + (self.dig_P4 << 35)\n var1 = (((var1 * var1 * self.dig_P3) >> 8) +\n ((var1 * self.dig_P2) << 12))\n var1 = (((1 << 47) + var1) * self.dig_P1) >> 33\n if var1 == 0:\n pressure = 0\n else:\n p = 1048576 - raw_press\n p = (((p << 31) - var2) * 3125) // var1\n var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25\n var2 = (self.dig_P8 * p) >> 19\n pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)\n\n if result:\n result[0] = temp\n result[1] = pressure\n return result\n\n return array(\"i\", (temp, pressure))", "title": "" }, { "docid": "2cbf0c1cd077db78ae2bc61bc296172f", "score": "0.46098584", "text": "def eeRead(self):\n # if self.devInfo['type'] == 4:\n # version = 1\n # elif self.devInfo['type'] == 5:\n # version = 2\n # else:\n # version = 0\n progdata = _ft.ft_program_data(\n Signature1=0,\n Signature2=0xFFFFFFFF,\n Version=2,\n Manufacturer=c.cast(c.c_buffer(256), c.c_char_p),\n ManufacturerId=c.cast(c.c_buffer(256), c.c_char_p),\n Description=c.cast(c.c_buffer(256), c.c_char_p),\n SerialNumber=c.cast(c.c_buffer(256), c.c_char_p),\n )\n\n call_ft(_ft.FT_EE_Read, self.handle, c.byref(progdata))\n return progdata", "title": "" }, { "docid": "80740abd29923540d9521dec59e1e1c5", "score": "0.4607617", "text": "def read_trace(self, channel):\n # Set some parameters\n self.write(\"COMM:HEAD 0\")\n self.write(\"WAVEFORM:DATASELECT ACQDATA\")\n self.write(\"WAVEFORM:FORMAT WORD;BYTEORDER LSBFIRST\")\n samplerate = float(self.query(\"WAVEFORM:SRATE?\"))\n self.write(\"WAVEFORM:TRACE \"+str(channel))\n\n # Get the parameters of the trace\n vdiv = float(self.query(\"WAVEFORM:VDIV?\"))\n offset = float(self.query(\"WAVEFORM:OFFSET?\"))\n data_length = int(self.query(\"WAVEFORM:LENGTH?\"))\n\n # Number of blocks to fetch\n data_per_block = 5000\n numblocks = int(data_length/data_per_block) + \\\n int(data_length % data_per_block > 0)\n this_ch_data = []\n for i in xrange(0, numblocks):\n # Get the block of data\n startblock = i*data_per_block\n if i == (numblocks-1):\n endblock = int(data_length)\n else:\n endblock = (i+1)*data_per_block-1\n if DEBUG:\n print \"Getting data from \" + str(startblock) + \\\n ' to ' + str(endblock)\n self.write('WAV:START ' + str(startblock) +\n ';END '+str(endblock)+';SEND?')\n data = self.read(12000)\n # The scope first outputs the number of bytes of the length string\n trace_data, num_lenbytes, num_databytes = \\\n self.__handle_binary_response(data)\n numeric_data = struct.unpack(\"<\"+str(num_databytes/2)+\"h\",\n trace_data)\n numeric_data = list(numeric_data)\n this_ch_data += map(lambda x: float(x)*vdiv/3200+offset,\n numeric_data)\n\n t = 1/samplerate*np.arange(0, data_length, 1)\n return this_ch_data, t", "title": "" }, { "docid": "bd1bf8202fb5bccaf7c5d08205292d28", "score": "0.45964098", "text": "def readElStress(self, line, rfstr, time):\n\n elId, intp, sxx, syy, szz, sxy, syz, szx = self._getVals(rfstr, line)\n\n return elId, intp, sxx, syy, szz, sxy, syz, szx", "title": "" }, { "docid": "3117cdcdbfd4d755feb7137d8f0a4442", "score": "0.45947748", "text": "def get_data_from_provider(self):\n geometry = self.campaign.geometry\n start_date = self.campaign.start_date\n end_date = self.campaign.end_date\n return OsmchaFeaturesProvider().get_data(\n geometry, self.current_page,\n start_date=start_date, end_date=end_date)", "title": "" }, { "docid": "c20051f710c3760290868e983521546b", "score": "0.45919028", "text": "async def import_device_data(self):\n for position in self._positions:\n device = next(\n (dev for dev in self._devices if dev.id == position.device_id), None\n )\n\n if not device:\n continue\n\n attr = {\n ATTR_TRACKER: \"traccar\",\n ATTR_ADDRESS: position.address,\n ATTR_SPEED: position.speed,\n ATTR_ALTITUDE: position.altitude,\n ATTR_MOTION: position.attributes.get(\"motion\", False),\n ATTR_TRACCAR_ID: device.id,\n ATTR_GEOFENCE: next(\n (\n geofence.name\n for geofence in self._geofences\n if geofence.id in (device.geofence_ids or [])\n ),\n None,\n ),\n ATTR_CATEGORY: device.category,\n ATTR_STATUS: device.status,\n }\n\n skip_accuracy_filter = False\n\n for custom_attr in self._custom_attributes:\n if device.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n if position.attributes.get(custom_attr) is not None:\n attr[custom_attr] = position.attributes[custom_attr]\n if custom_attr in self._skip_accuracy_on:\n skip_accuracy_filter = True\n\n accuracy = position.accuracy or 0.0\n if (\n not skip_accuracy_filter\n and self._max_accuracy > 0\n and accuracy > self._max_accuracy\n ):\n _LOGGER.debug(\n \"Excluded position by accuracy filter: %f (%s)\",\n accuracy,\n attr[ATTR_TRACCAR_ID],\n )\n continue\n\n await self._async_see(\n dev_id=slugify(device.name),\n gps=(position.latitude, position.longitude),\n gps_accuracy=accuracy,\n battery=position.attributes.get(\"batteryLevel\", -1),\n attributes=attr,\n )", "title": "" }, { "docid": "40c5e4399bba9c5ee6740c973bc5d91f", "score": "0.45878667", "text": "def readSegyData(data, SH, nd, bps, index, endian='>'): # added by A Squelch\n\n # Calulate number of dummy samples needed to account for Trace Headers\n ndummy_samples = int(240 / bps)\n printverbose(\"readSegyData : ndummy_samples=\" + str(ndummy_samples), 6)\n\n # READ ALL SEGY TRACE HEADRES\n STH = getAllSegyTraceHeaders(SH, data)\n\n printverbose(\"readSegyData : Reading segy data\", 1)\n\n # READ ALL DATA EXCEPT FOR SEGY HEADER\n # Data = np.zeros((SH['ns'],ntraces))\n\n revision = SH[\"SegyFormatRevisionNumber\"]\n if (revision == 100):\n revision = 1\n if (revision == 256): # added by A Squelch\n revision = 1\n\n dsf = SH[\"DataSampleFormat\"]\n\n try: # block added by A Squelch\n DataDescr = SH_def[\"DataSampleFormat\"][\"descr\"][revision][dsf]\n except KeyError:\n print(\"\")\n print(\" An error has ocurred interpreting a SEGY binary header key\")\n print(\" Please check the Endian setting for this file: \", SH[\"filename\"])\n sys.exit()\n\n printverbose(\"readSegyData : SEG-Y revision = \" + str(revision), 1)\n printverbose(\"readSegyData : DataSampleFormat=\" + str(dsf) + \"(\" + DataDescr + \")\", 1)\n\n if (SH[\"DataSampleFormat\"] == 1):\n printverbose(\"readSegyData : Assuming DSF=1, IBM FLOATS\", 2)\n Data1 = getValue(data, index, 'ibm', endian, nd)\n elif (SH[\"DataSampleFormat\"] == 2):\n printverbose(\"readSegyData : Assuming DSF=\" + str(SH[\"DataSampleFormat\"]) + \", 32bit INT\", 2)\n Data1 = getValue(data, index, 'l', endian, nd)\n elif (SH[\"DataSampleFormat\"] == 3):\n printverbose(\"readSegyData : Assuming DSF=\" + str(SH[\"DataSampleFormat\"]) + \", 16bit INT\", 2)\n Data1 = getValue(data, index, 'h', endian, nd)\n elif (SH[\"DataSampleFormat\"] == 5):\n printverbose(\"readSegyData : Assuming DSF=\" + str(SH[\"DataSampleFormat\"]) + \", IEEE\", 2)\n Data1 = getValue(data, index, 'float', endian, nd)\n elif (SH[\"DataSampleFormat\"] == 8):\n printverbose(\"readSegyData : Assuming DSF=\" + str(SH[\"DataSampleFormat\"]) + \", 8bit CHAR\", 2)\n Data1 = getValue(data, index, 'B', endian, nd)\n else:\n printverbose(\"readSegyData : DSF=\" + str(SH[\"DataSampleFormat\"]) + \", NOT SUPORTED\", 2)\n\n Data = Data1[0]\n\n printverbose(\"readSegyData : - reshaping\", 2)\n printverbose(\" ns=\" + str(SH['ns']),-2)\n Data = np.reshape(np.array(Data), (SH['ntraces'], SH['ns'] + ndummy_samples))\n printverbose(\"readSegyData : - stripping header dummy data\", 2)\n Data = Data[:, ndummy_samples:(SH['ns'] + ndummy_samples)]\n printverbose(\"readSegyData : - transposing\", 2)\n Data = np.transpose(Data)\n\n # SOMEONE NEEDS TO IMPLEMENT A NICER WAY DO DEAL WITH DSF=8\n if (SH[\"DataSampleFormat\"] == 8):\n for i in np.arange(SH['ntraces']):\n for j in np.arange(SH['ns']):\n if Data[i][j] > 128:\n Data[i][j] = Data[i][j] - 256\n\n printverbose(\"readSegyData : Finished reading segy data\", 1)\n\n return Data, SH, STH", "title": "" }, { "docid": "19c4f34c81c1e61de09198d838fa9f3a", "score": "0.4586619", "text": "def _extract_sample(self, particle_class, regex, line, timestamp, publish=True, internal_timestamp=None):\n if regex.match(line):\n\n particle = particle_class(line, port_timestamp=timestamp, internal_timestamp=internal_timestamp)\n parsed_sample = particle.generate()\n\n # grab the internal timestamp from the particle\n new_internal_timestamp = parsed_sample.get(DataParticleKey.INTERNAL_TIMESTAMP)\n\n if new_internal_timestamp is not None:\n if internal_timestamp is None:\n self.last_header_timestamp = new_internal_timestamp\n # this timestamp came from the instrument, check if we need to update our offset\n if self.offset_timestamp is not None:\n self.offset = self.offset_timestamp - new_internal_timestamp\n log.info('Setting new offset: %r', self.offset)\n self.offset_timestamp = None\n else:\n # bump the last_header_timestamp value by 1/8th of a second (sample rate)\n self.last_header_timestamp += 1.0 / 8\n\n parsed_sample[DataParticleKey.INTERNAL_TIMESTAMP] = new_internal_timestamp + self.offset\n\n if publish and self._driver_event:\n self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)\n\n return parsed_sample", "title": "" }, { "docid": "f65a1378c17b2b40b935142f3815baca", "score": "0.45793483", "text": "def _read(filename, dataformat=None, headonly=False, **kwargs):\n debug = kwargs.get('debug')\n\n stream = DataStream([],{})\n format_type = None\n foundapproptiate = False\n if not dataformat:\n # auto detect format - go through all known formats in given sort order\n for format_type in PYMAG_SUPPORTED_FORMATS:\n # check format\n if debug:\n print(\"_read: Testing format: {} ...\".format(format_type))\n if debug:\n logger.info(\"_read: Testing format: {} ...\".format(format_type))\n #try:\n # readsucc = isFormat(filename, format_type)\n #except:\n # readsucc = False\n if isFormat(filename, format_type):\n if debug:\n logger.info(\" -- found: {}\".format(format_type))\n print (\" -- found: {}\".format(format_type))\n foundapproptiate = True\n break\n if not foundapproptiate:\n temp = open(filename, 'rt').readline()\n if temp.startswith('# MagPy Absolutes'):\n logger.warning(\"_read: You apparently tried to open a DI object - please use the absoluteAnalysis method\")\n else:\n logger.error(\"_read: Could not identify a suitable data format\")\n return DataStream([LineStruct()],{},np.asarray([[] for el in KEYLIST]))\n else:\n # format given via argument\n dataformat = dataformat.upper()\n try:\n formats = [el for el in PYMAG_SUPPORTED_FORMATS if el == dataformat]\n format_type = formats[0]\n except IndexError:\n msg = \"Format \\\"%s\\\" is not supported. Supported types: %s\"\n logger.error(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))\n raise TypeError(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))\n\n \"\"\"\n try:\n # search readFormat for given entry point\n readFormat = load_entry_point(format_ep.dist.key,\n 'obspy.plugin.waveform.%s' % (format_ep.name), 'readFormat')\n except ImportError:\n msg = \"Format \\\"%s\\\" is not supported. Supported types: %s\"\n raise TypeError(msg % (format_ep.name,\n ', '.join(WAVEFORM_ENTRY_POINTS)))\n \"\"\"\n stream = readFormat(filename, format_type, headonly=headonly, **kwargs)\n\n return stream", "title": "" }, { "docid": "c0768b661637f470d6ac761a87c56e59", "score": "0.45752004", "text": "def ReadTrajOutfile(trajfile='default',header_only=False):\n from sys import stdin\n from htools import strl2numl\n\n if trajfile == 'default':\n\ttrajfile = '/media/sda4/back-trajectories-data/GDAS/0607312100-96-bt.dat'\n\n fid=open(trajfile)\n trajectories=[]\n\n str_cnts=fid.readline().split()\n num_grids=int(str_cnts[0])\n\n try: format_id=int(str_cnts[1])\n except ValueError: pass\n\n # There is some info in the header about the datasets used to generate the trajectories. These \n # are ignored for the time being.\n for k in range(0,num_grids):\n\tfid.readline()\n\t# str_cnts=fid.readline().split()\n\t\n\n str_cnts=fid.readline().split()\n num_trajs=int(str_cnts[0])\n\n a={}\n\n for k in range(0,num_trajs):\n\tstr_cnts=fid.readline().split()\n\t\n\ttrajectories.append(a.copy())\n\ttrajectories[k]['endtime']\t= strl2numl(str_cnts[0:4])\n\ttrajectories[k]['endpoint']\t= strl2numl(str_cnts[4:],format='float')\n\n \n str_cnts=fid.readline().split()\n num_fields=int(str_cnts[0])\n \n for k in range(0,num_trajs):\n\ttrajectories[k]['userfields'] = str_cnts[1:]\n\n # return here if only header info is required\n if header_only:\n\ttrajectories_header=trajectories\n\treturn trajectories_header\n \n # Read ALL of the remaining data\n data = fid.readlines()\n\n # d=data[:].split()\n\n num_lines=len(data)\n num_points=num_lines/num_trajs\n\n # preallocate memory for efficiency\n for k in range(0,num_trajs):\n\ttrajectories[k]['timestamp']\t= n.zeros([num_points,5])\n\ttrajectories[k]['age']\t\t= n.zeros([num_points,1])\n\ttrajectories[k]['position']\t= n.zeros([num_points,3])\n\ttrajectories[k]['userdata']\t= n.zeros([num_points,num_fields])\n\n for k in range(0,num_lines):\n\t# print 'k=',k\n\tdata_list=data[k].split()\n\ttraj_num=int(data_list[0])\n\tpoint_num=k/num_trajs\n\n\t# newitem=n.array( strl2numl(data_list[13:],'float') )\n\t# print 'newitem=',newitem\n\t# print 'olditem=',trajectories[traj_num-1]['userdata'][point_num]\n\n#\tprint data_list[0:2]\n#\tprint data_list[2:7]\n\ttry:\n\t trajectories[traj_num-1]['timestamp'][point_num]= n.array( strl2numl(data_list[2:7]) )\n\t trajectories[traj_num-1]['age'][point_num]\t= n.array( strl2numl([data_list[8]],'float') )\n\t trajectories[traj_num-1]['position'][point_num] = n.array( strl2numl(data_list[9:12],'float') )\n\t trajectories[traj_num-1]['userdata'][point_num] = n.array( strl2numl(data_list[12:],'float') )\n\texcept IndexError:\n\t pass\n\n return trajectories", "title": "" }, { "docid": "8fd3594c3e8e1525f58c371878bce6d3", "score": "0.45693105", "text": "def retrive_data(self, market_p, product_id):\n\t\t#creating the request url for product\n\t\tprod_request = self._API_STRING.replace('{prod_id}',product_id)\n\t\tprod_request = prod_request.replace('{market_place}',market_p)\n\t\t\n\t\ttry:\n\t\t\t#api response for product name\n\t\t\tprod_response = requests.get(prod_request,auth = (self._AUTH_KEY,''))\n\t\texcept KeyboardInterrupt:\n\t\t\texit()\n\t\texcept:\n\t\t\traise ProdEqualHttpRequestError('bad request')\n\n\t\t#data object to be returned\n\t\tprod_obj = None\n\n\t\tif(prod_response.status_code == self._ok_status):\n\t\t\t\n\t\t\t#parse response to python object\n\t\t\ttry:\n\t\t\t\tprod_obj = prod_response.json()\n\n\t\t\t\tif(prod_obj['status'] == 'failed'):\n\t\t\t\t\traise ZincAPIError('Unable to fetch product data from the Zinc API.'+\n\t\t\t\t\t\t\t\t\t\t' Make sure the product_id and the market_place values are correct.')\n\t\t\texcept ValueError:\n\t\t\t\traise ProdEqualInternalError('Cannot load the product details due to an internal error (json issue)')\n\t\t\n\t\treturn prod_obj", "title": "" }, { "docid": "eab2b12528bff3aacd40f0dde427b2fb", "score": "0.4568199", "text": "def ecwmf_temperature(input_path, lonlat, time):\n # TODO; have swfo convert the files to HDF5\n raise AncillaryError(\"No ECWMF Temperature profile data\")\n # product = DatasetName.TEMPERATURE.value.lower()\n # search = pjoin(input_path, DatasetName.ECMWF_PATH_FMT.value)\n # files = glob.glob(search.format(product=product, year=time.year))\n # data = None\n # required_ymd = datetime.datetime(time.year, time.month, time.day)\n # for f in files:\n # url = urlparse(f, scheme='file').geturl()\n # ymd = splitext(basename(f))[0].split('_')[1]\n # ancillary_ymd = datetime.datetime.strptime(ymd, '%Y-%m-%d')\n # if ancillary_ymd == required_ymd:\n # bands = list(range(1, 38))\n # data = get_pixel(f, lonlat, bands)[::-1]\n\n # metadata = {'data_source': 'ECWMF Temperature',\n # 'url': url,\n # 'query_date': time}\n\n # # ancillary metadata tracking\n # md = extract_ancillary_metadata(f)\n # for key in md:\n # metadata[key] = md[key]\n\n # # internal file metadata (and reverse the ordering)\n # df = read_metadata_tags(f, bands).iloc[::-1]\n # df.insert(0, 'Temperature', data)\n\n # return df, metadata\n\n # if data is None:\n # raise AncillaryError(\"No ECWMF Temperature profile data\")", "title": "" }, { "docid": "50ff0097831c1c1d88155e545d0eb49e", "score": "0.4567876", "text": "def read_data_from(self, path, tidx = None):\n self.logger.debug('Getting data from {}'.format(path))\n node = self.get_node(path)\n\n try:\n data, meta = node['data']\n except KeyError:\n try:\n data, meta = node['data_static']\n except KeyError:\n self.logger.error('Could not find \"data\" or \"data_static\" in {}'.format(path))\n raise ValueError('Path {} does not exist in snapshot'.format(path))\n\n unit = meta['unit']\n\n # if tidx is None:\n # return PhysicalField(np.atleast_1d(data), unit)\n # else:\n # return PhysicalField(np.atleast_1d(data[tidx]), unit)\n\n return PhysicalField(np.atleast_1d(data), unit)", "title": "" }, { "docid": "e82790f2fcdf68c5903a38a6ea00c078", "score": "0.45663935", "text": "def readPT(PT_file):\n\n # Read abundance data and convert to array:\n f = open(PT_file, 'r')\n data = []\n for line in f.readlines():\n if line.startswith('#'):\n continue\n else:\n l = [value for value in line.split()]\n data.append(l)\n data = np.asarray(data)\n f.close()\n\n # Size of the data array (number of layers in the atmosphere)\n ndata = len(data)\n\n # Allocate arrays of pressure and temperature\n pressure = []\n temperature = []\n\n # Read lines and store pressure and temperature data\n for i in np.arange(ndata):\n pressure = np.append(pressure, data[i][0])\n temperature = np.append(temperature, data[i][1])\n pres = pressure.astype(float)\n temp = temperature.astype(float)\n\n return pres, temp", "title": "" }, { "docid": "294b92b872aa49c0e391d281edb12fb9", "score": "0.45649868", "text": "def get_sensor_data():\n gas_sensor_value = convert(GPIO.input(gas_sensor))\n vibration_sensor_value = convert(GPIO.input(vibration_sensor))\n time_returned, date = time_stamp()\n return (gas_sensor_value, vibration_sensor_value, time_returned, date)", "title": "" }, { "docid": "13b97eedf116c9340e613ca6719ff0c3", "score": "0.45638812", "text": "def read(self):\r\n try:\r\n return self.dev.read()\r\n except Exception as e:\r\n raise TelepythicError(self.dev, e)", "title": "" }, { "docid": "b8c659fb5fc2830cb3938c65b8e5d2d2", "score": "0.4560022", "text": "def extract_metadata_particle(self, raw_data, timestamp):\r\n sample = self._extract_sample(self._metadata_particle_class, None, raw_data, internal_timestamp=timestamp)\r\n return sample", "title": "" }, { "docid": "e936cc4c2eca3f3f6b7d4eb9a6cc6ab2", "score": "0.4558974", "text": "def _load_reading_row(self, row):\n \n try:\n reading_datetime = datetime.datetime.strptime(row[TemperatureReadingManager.TIMESTAMP_INDEX], \"%Y-%m-%d %H:%M:%S.%f\")\n temp_reading = TemperatureReading(reading_datetime,\n int(row[TemperatureReadingManager.SEQ_NUM_INDEX]),\n row[TemperatureReadingManager.SENSOR_MODEL_INDEX],\n float(row[TemperatureReadingManager.MIN_INDEX]),\n float(row[TemperatureReadingManager.AVG_INDEX]),\n float(row[TemperatureReadingManager.MAX_INDEX]),\n row[TemperatureReadingManager.STATUS_INDEX])\n except:\n raise ValueError(\"Invalid data entry\")\n\n return temp_reading", "title": "" }, { "docid": "8443c83f431c9b0ce5433d8c61b44219", "score": "0.455847", "text": "def get_data(self):\n\n buffer = []\n\n while True:\n data = self.uart.read(32) # read up to 32 bytes\n data = list(data)\n\n buffer += data\n\n while buffer and buffer[0] != 0x42:\n buffer.pop(0)\n print(\"Popping to start, remaining =\", len(buffer))\n\n if len(buffer) > 200:\n buffer = [] # avoid an overrun if all bad data\n print(\"OVERRUN\")\n if len(buffer) < 32:\n continue\n\n if buffer[1] != 0x4d:\n print(\"MISSING 2ND START CHARACTER\")\n buffer.pop(0)\n continue\n\n frame_len = struct.unpack(\">H\", bytes(buffer[2:4]))[0]\n if frame_len != 28:\n buffer = []\n print(\"WRONG FRAME LENGTH\")\n continue\n\n frame = struct.unpack(\">HHHHHHHHHHHHHH\", bytes(buffer[4:]))\n\n pm10_standard, pm25_standard, pm100_standard, \\\n pm10_env, pm25_env, pm100_env, \\\n particles_03um, particles_05um, particles_10um, \\\n particles_25um, particles_50um, particles_100um, \\\n skip, checksum = frame\n\n check = sum(buffer[0:30])\n\n if check != checksum:\n buffer = []\n print(\"CHECKSUM FAILED\")\n continue\n\n buffer = buffer[32:]\n\n return pm10_standard, pm25_standard, pm100_standard, \\\n pm10_env, pm25_env, pm100_env, \\\n particles_03um, particles_05um, particles_10um, \\\n particles_25um, particles_50um, particles_100um", "title": "" }, { "docid": "96d28358f01e74db7f7e2a89101e2113", "score": "0.4556465", "text": "def _get_temperature_from_buffer(data):\n unadjusted = (ord(data[0]) << 8) + ord(data[1])\n unadjusted &= SHT21._STATUS_BITS_MASK # zero the status bits\n unadjusted *= 175.72\n unadjusted /= 1 << 16 # divide by 2^16\n unadjusted -= 46.85\n return unadjusted", "title": "" }, { "docid": "96d28358f01e74db7f7e2a89101e2113", "score": "0.4556465", "text": "def _get_temperature_from_buffer(data):\n unadjusted = (ord(data[0]) << 8) + ord(data[1])\n unadjusted &= SHT21._STATUS_BITS_MASK # zero the status bits\n unadjusted *= 175.72\n unadjusted /= 1 << 16 # divide by 2^16\n unadjusted -= 46.85\n return unadjusted", "title": "" }, { "docid": "ef40dabb92d2efa1173f878434be372a", "score": "0.45563015", "text": "def read(self):\n\n self.execution_trace += \"- \" + self.tape_actual[self.index]\n return self.tape_actual[self.index]", "title": "" }, { "docid": "6e44222bb61fa3fecf7dffa3122cfa37", "score": "0.45561683", "text": "def read():\n exobj = (putil.exh.get_exh_obj()\n if putil.exh.get_exh_obj() else\n putil.exh.ExHandle())\n exobj.add_exception(\n exname='illegal_read_call',\n extype=TypeError,\n exmsg='Cannot call read'\n )", "title": "" }, { "docid": "17c09450d2b5c62308f2a713aa2a30c3", "score": "0.4555184", "text": "def read_device_data(dtype, did, client = None):\r\n device_config = config.read_device_config(dtype, did)\r\n print 'reading device: ', device_config['sensor']['type'], device_config['id']\r\n if not common.equals_ignore_case(device_config['sensor']['type'], dtype) :\r\n print 'Device type mismatch', device_config['sensor']['type'], dtype\r\n return\r\n if client is None: \r\n client = epmqtt.get_mqtt_client()\r\n \r\n if common.equals_ignore_case(device_config['sensor']['type'], 'temperature') :\r\n publish_temperature_data(dtype, did, client)\r\n #publish_temperature_data(type, id, client, device_config)\r", "title": "" }, { "docid": "6d5ad690c8bdeb9f61bb5268a6b83402", "score": "0.4548556", "text": "def collect_from_data_source(self) -> Dict[str, str]:\n temper = Temper()\n retries = 0\n degrees = {}\n while retries < MAX_RETRIES:\n results = temper.read()\n if \"internal temperature\" in results[0].keys():\n degrees = {\"thermometer_data\": str(results[0][\"internal temperature\"])}\n break\n self.context.logger.debug(\"Couldn't read the sensor I am re-trying.\")\n time.sleep(0.5)\n retries += 1\n return degrees", "title": "" }, { "docid": "25cef4301a876757bbb689af3642ac98", "score": "0.45414534", "text": "def process_data(self):\n timestamp = int(time.time() * 1000)\n try:\n data = self.buffer.decode().strip()\n if len(data) > 0:\n print(data)\n self.data_available(timestamp, data)\n except:\n # decoding error\n pass", "title": "" }, { "docid": "537cb366dce2bedb48cc355e5d33636a", "score": "0.45352104", "text": "def read_temp(self):\r\n\t\tdata = bus.read_i2c_block_data(HP206C_DEFAULT_ADDRESS, HP206C_READ_T, 3)\r\n\t\t\r\n\t\tcTemp = (((data[0] & 0x0F) * 65536) + (data[1] * 256) + data[2]) / 100.00\r\n\t\tfTemp = (cTemp * 1.8) + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "title": "" }, { "docid": "883f5868bd966d5e18963f41acf68375", "score": "0.45344928", "text": "def get_acceleration_data(self):\n x_data0 = self.bus.read_byte_data(ACC_ADDRESS, DATA_X0)\n x_data1 = self.bus.read_byte_data(ACC_ADDRESS, DATA_X1)\n\n y_data0 = self.bus.read_byte_data(ACC_ADDRESS, DATA_Y0)\n y_data1 = self.bus.read_byte_data(ACC_ADDRESS, DATA_Y1)\n\n z_data0 = self.bus.read_byte_data(ACC_ADDRESS, DATA_Z0)\n z_data1 = self.bus.read_byte_data(ACC_ADDRESS, DATA_Z1)\n\n # Combine respective registers\n x_data = ((x_data1 & 0x03) * 256) + x_data0\n y_data = ((y_data1 & 0x03) * 256) + y_data0\n z_data = ((z_data1 & 0x03) * 256) + z_data0\n\n # Apply 2s comp\n x_data = twos_comp(x_data, 16)\n y_data = twos_comp(y_data, 16)\n z_data = twos_comp(z_data, 16)\n\n # Convert to m/s^2 and round\n x_data *= CONV_FULLR * EARTH_GRAVITY\n y_data *= CONV_FULLR * EARTH_GRAVITY\n z_data *= CONV_FULLR * EARTH_GRAVITY\n\n # Round the values\n x_data = round(x_data, OUTPUT_PRECISION)\n y_data = round(y_data, OUTPUT_PRECISION)\n z_data = round(z_data, OUTPUT_PRECISION)\n\n return {\"x\": x_data, \"y\": y_data, \"z\": z_data}", "title": "" }, { "docid": "4fc17f4c03cf6bfe0f73ed92f118b66d", "score": "0.45320854", "text": "def get_temperature():\n read_sensors()\n return mcp9808_temp", "title": "" }, { "docid": "5936d601dc81bdae89fa3ff940592958", "score": "0.45212543", "text": "def read_raw(self):\n debug('read_raw()')\n \n if self.instrument == None: return \n else: \n #if self.model == 'RIGOLDE': _t.sleep(RIGOLDE_DELAY)\n return self.instrument.read_raw()", "title": "" }, { "docid": "67663b60dce1c921a4710400dd504859", "score": "0.45200938", "text": "def _read_sensors(self):\n\n # Read differents sensors\n for s in self._sensors_to_read:\n\n if s == 'a':\n # Accelerometer sensor in a non filtered way\n if self._accelerometer_filtered:\n parameters = ('A', 12, '@III')\n\n else:\n parameters = ('a', 6, '@HHH')\n\n self._debug('WARNING: Accelerometer not yet implemented!')\n\n elif s == 'n':\n # Proximity sensors\n res, prox = vrep.simxGetStringSignal(self._clientID, 'EPUCK_proxSens', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Proximity sensors readout failed: \", res)\n else:\n proxVals = vrep.simxUnpackFloats(prox)\n # TODO: Find out actual needed scaling factor\n proxVals = [int(x * 1000) for x in proxVals]\n self._proximity = tuple(proxVals)\n\n elif s == 'm':\n # Floor sensors\n res, floor1 = vrep.simxGetFloatSignal(self._clientID, 'EPUCK_mylightSens_0', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Floor 1 sensor readout failed: \", res)\n res, floor2 = vrep.simxGetFloatSignal(self._clientID, 'EPUCK_mylightSens_1', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Floor 2 sensor readout failed: \", res)\n res, floor3 = vrep.simxGetFloatSignal(self._clientID, 'EPUCK_mylightSens_2', vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Floor 3 sensor readout failed: \", res)\n # Scale returned values to mimic real robot; current factor is just guessed\n self._floor_sensors = (floor1 * 1800, floor2 * 1800, floor3 * 1800)\n\n elif s == 'q':\n # Motor position sensor\n # First: Get the handles of both motor joints\n res, leftMotor = vrep.simxGetObjectHandle(self._clientID, 'ePuck_leftJoint',\n vrep.simx_opmode_oneshot_wait)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Unable to get handle of left motor: \", res)\n continue\n res, rightMotor = vrep.simxGetObjectHandle(self._clientID, 'ePuck_rightJoint',\n vrep.simx_opmode_oneshot_wait)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Unable to get handle of right motor: \", res)\n continue\n\n # Second: Get the actual motor position (in radians)\n res, leftPos = vrep.simxGetJointPosition(self._clientID, leftMotor, vrep.simx_opmode_oneshot_wait)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Readout of left motor failed: \", res)\n continue\n res, rightPos = vrep.simxGetJointPosition(self._clientID, rightMotor, vrep.simx_opmode_streaming)\n if res != vrep.simx_return_ok:\n self._debug(\"WARNING: Readout of left motor failed: \", res)\n continue\n\n self._motor_position = (leftPos, rightPos)\n\n elif s == 'o':\n # Light sensors\n parameters = ('O', 16, '@HHHHHHHH')\n self._debug('WARNING: Light sensors not yet implemented!')\n\n elif s == 'u':\n # Microphone\n parameters = ('u', 6, '@HHH')\n self._debug('WARNING: Microphones not yet implemented!')\n\n elif s == 'e':\n # Motor Speed\n parameters = ('E', 4, '@HH')\n self._debug('WARNING: Motor speed not yet implemented!')\n\n elif s == 'i':\n # Do nothing for the camera, is an independent process\n pass\n\n else:\n self._debug('Unknow type of sensor to read')", "title": "" }, { "docid": "687031cf0ca5b722f30bbd9b11873fa9", "score": "0.45189866", "text": "def read(self):\n self.logger.info('Readig data')\n return self.instr.read()", "title": "" }, { "docid": "fb2a9d9c1c5a777a1bcd6f83d6bed5b8", "score": "0.451842", "text": "def read_data():\n\t# data\n\tgyroData = [0,0,0]\n\taccelData = [0,0,0]\n\t\n\t# print \"\\n new\"\n\n\t# check if gyro is ready\n\ttry:\n\t\t# get the 4th LSB\n\t\treadyG = (D.gyro.readU8(D.STATUS_REG_G) & (1<<3)) >> 3 \n\t\tif readyG==1:\n\t\t\t# read\n\t\t\tgyroData = read_gyro()\n\t\t\tpublish_gyro(gyroData)\n\texcept:\n\t\tprint \"Error reading gyro data\"\n\t\tpass\n\n\n\t# check if the accel is ready\n\ttry:\n\t\t# get the 4th LSB\n\t\treadyA = (D.gyro.readU8(D.STATUS_REG_A) & (1<<3)) >> 3 \n\t\tif readyA==1:\n\t\t\t# read\n\t\t\taccelData = read_accel()\n\t\t\tpublish_accel(accelData)\n\texcept:\n\t\tprint \"Bad reading\"\n\t\tpass\n\n\n\t#return gyroData,accelData", "title": "" }, { "docid": "47f3933e0ce5870f070bb20c19ae92f7", "score": "0.45157492", "text": "def read(self, ggpkfile):\n pass", "title": "" }, { "docid": "1d1486befb6614a6e28192cc6fdf6f5e", "score": "0.45077908", "text": "def read_data(c, pattern=None):\n measurements = get_measurements(c, pattern)\n queries = get_queries(measurements)\n return query_data(c, queries)", "title": "" }, { "docid": "ffaaca830849adb2d7c9ff3ab60a0bd4", "score": "0.45075893", "text": "def ReadData(self):\r\n\t\tf = open(self.src, \"r\")\r\n\t\tself.raw_data = f.read()\r\n\t\tf.close()", "title": "" }, { "docid": "cda16b3e5ad4d7eb4861575e3cf44423", "score": "0.45068067", "text": "def read_data(self):\n with open(self.json_file) as fp:\n for line in fp:\n line = line.strip()\n if not line:\n continue\n jdata = json.loads(line)\n shopglat = jdata.get('shopGlat')\n shopglng = jdata.get('shopGlng')\n ap = jdata.get('average_price')\n m = re.match('[^0-9]*?([0-9]+)[^0-9]*', ap)\n if m:\n p = int(m.group(1))\n self.geo_info.append(\n {\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [shopglng, shopglat]\n },\n 'count': p\n })", "title": "" }, { "docid": "21cf5ad9e6a6d9fc08117729760f1771", "score": "0.45054647", "text": "def read_trace(filename, dataname, k=100):\n tracedef_path = os.path.abspath(filename + '.dat')\n with open(tracedef_path, \"rb\") as f1:\n text = f1.read()\n blocks = text.split(b'#BEGINCHANNELHEADER')\n time_block = blocks[1]\n if time_block.find(b\"Zeit\") == -1:\n pass\n ts = float(\n (time_block[time_block.find(b'241,') + len(\"241,\"):].splitlines())[0]\n )\n n = len(blocks[2:])\n column_index = None\n for block in blocks[2:]:\n lines = block.splitlines()\n if lines[1] != b'200,' + dataname.encode(\"ascii\"):\n continue\n for line in lines:\n if line.find(b'221,') == -1:\n continue\n index = line.find(b'221,') + len(\"221,\")\n column_index = int(line[index:])\n break\n break\n if column_index is None:\n print('target signal is not found')\n result = None\n ts = None\n return result, ts\n tracedata_path = os.path.abspath(filename + \".r64\")\n with open(tracedata_path, \"rb\") as f2:\n data_all = f2.read()\n data_length = int(len(data_all) * (k/100.0))\n result_all = [\n struct.unpack(\"d\", data_all[i:i+8])[0]\n for i in range(0, data_length-8, 8)\n ]\n result = np.array(result_all[column_index-1::n])\n return result, ts", "title": "" }, { "docid": "af664ae99aa92acafac6ae8d98bd559f", "score": "0.4504879", "text": "async def get_reading(self) -> Reading:", "title": "" }, { "docid": "e1bafabdf7eaf62b1ef7b79f94bcee18", "score": "0.4504824", "text": "def read_telemetrystatus(path_name):\r\n data=pd.read_csv(path_name)\r\n #find the data lines number in the file('telemetry_status.csv')\r\n for i in range(len(data['vessel (use underscores)'])):\r\n if data['vessel (use underscores)'].isnull()[i]:\r\n data_line_number=i\r\n break\r\n #read the data about \"telemetry_status.csv\"\r\n telemetrystatus_df=pd.read_csv(path_name,nrows=data_line_number)\r\n as_list=telemetrystatus_df.columns.tolist()\r\n idex=as_list.index('vessel (use underscores)')\r\n as_list[idex]='Boat'\r\n telemetrystatus_df.columns=as_list\r\n for i in range(len(telemetrystatus_df)):\r\n telemetrystatus_df['Boat'][i]=telemetrystatus_df['Boat'][i].replace(\"'\",\"\")\r\n if not telemetrystatus_df['Lowell-SN'].isnull()[i]:\r\n telemetrystatus_df['Lowell-SN'][i]=telemetrystatus_df['Lowell-SN'][i].replace(',',',')\r\n if not telemetrystatus_df['logger_change'].isnull()[i]:\r\n telemetrystatus_df['logger_change'][i]=telemetrystatus_df['logger_change'][i].replace(',',',')\r\n return telemetrystatus_df", "title": "" }, { "docid": "e9e7c91eff4b73616f81095b1cdde6d1", "score": "0.4503076", "text": "def read(self):\n return self.tape[0]", "title": "" }, { "docid": "d5b71b53bc17768415aaadf677097d2c", "score": "0.4501596", "text": "def download_observation(self):\n retrieval_time = timezone.now().replace(microsecond=0)\n logger.info('Trying to connect to {}'.format(self.name))\n client, output = None, False\n try:\n client = telnetlib.Telnet(self.ip_address, self.port)\n if self.manufacturer == 'vaisala':\n pattern = '(^0[Rr]0),([A-Za-z]{2}=-?(\\d+(\\.\\d+)?)[A-Za-z#],?)+'\n clean_response = False\n while not clean_response:\n response = client.read_until('\\r\\n'.encode('utf8'), 60)\n m = re.search(pattern, response)\n if m:\n response = response.strip()\n clean_response = True\n else: # Default to using the Telvent station format.\n response = client.read_until('\\r\\n'.encode('utf8'), 60)\n response = response[2:]\n if client:\n client.close()\n except Exception as e:\n logger.error('Failed to read weather data from {}'.format(self.name))\n logger.exception(e)\n return None\n\n logger.info('PERIODIC READING OF {}'.format(self.name))\n logger.info(force_text(response))\n output = (self.pk, force_text(response), retrieval_time)\n return output", "title": "" }, { "docid": "a82a856d2592164fe618eb74732d057e", "score": "0.45006034", "text": "def get_product_info(self, product):\n raise NotImplementedError", "title": "" } ]
b8952633b24641cb5bcf4dabccd9c01a
Return the class containing all connections to the gateway.
[ { "docid": "3a17156ee575bc122fc50d9f4e910d08", "score": "0.5476334", "text": "def gateway_device(self):\n return self._gateway_device", "title": "" } ]
[ { "docid": "66be67a4d16a295d168036e8901ac466", "score": "0.6589074", "text": "def get_connections(self):\n connection_list = []\n for c in self._classical_connections:\n connection_list.append({'type': 'classical', 'connection': c})\n for q in self._quantum_connections:\n connection_list.append({'type': 'quantum', 'connection': q})\n return connection_list", "title": "" }, { "docid": "2eaa0defb3c90a685fbd52ecf444482b", "score": "0.65477216", "text": "def classical_connections(self):\n return self._classical_connections", "title": "" }, { "docid": "d2d8bb645f3a1b9af6496a3822b67a58", "score": "0.6325104", "text": "def get_gateway(self):\n return Gateway()", "title": "" }, { "docid": "e1093c4383cf3cac3fd3913130390ddc", "score": "0.6267506", "text": "def connections(self) -> \"Connections\":\n return () # Return no connections by default", "title": "" }, { "docid": "c8722bb421cb8627a5e7dc38a2c5c56e", "score": "0.6182516", "text": "def get_connection(self):\n return self.connection_class", "title": "" }, { "docid": "ed57b97e73f44fc5d8f4e21e91a7d5de", "score": "0.6056934", "text": "def get_connections(self):\n return self._connected_to", "title": "" }, { "docid": "8515fb5e5121146cbf6671614716b0c9", "score": "0.60421574", "text": "def get_gateway(self):\r\n return Server(**self.__config._sections['gateway'])", "title": "" }, { "docid": "20a42ef8ae6d1c25ca2d9b737f751068", "score": "0.6029156", "text": "def gateway(self):\n return self._gateway", "title": "" }, { "docid": "7d438152b643e61c8c7768664dd0f61d", "score": "0.60137606", "text": "def getConnections(self):", "title": "" }, { "docid": "5fbb751daae968d81ac6874dd4581c02", "score": "0.6010646", "text": "def connection(self) -> List[Connection]:\n return self._connection", "title": "" }, { "docid": "733321e6570d72e99632a5db893fb7c9", "score": "0.5835855", "text": "def getProxyClass(self):\n raise NotImplementedError", "title": "" }, { "docid": "292c0a8c39e3340ae6ef779c9c292654", "score": "0.5789348", "text": "def getConnections(self):\n return self.neighbour.keys()", "title": "" }, { "docid": "cabd2d6cf6b9eed13ee5525bc83ed2af", "score": "0.57642055", "text": "def gateway_info(self):\n return self._gateway_info", "title": "" }, { "docid": "ace578ef9c4801c42bf32cce42b0d36e", "score": "0.5727875", "text": "def host_gateway(self):\n return self._host_gateway", "title": "" }, { "docid": "aaa5198817ab62d9c39fbd9cd7ceb9ed", "score": "0.5693891", "text": "def _list_gateways():\n l_ret = netifaces.gateways()\n return l_ret", "title": "" }, { "docid": "19571aee3a3b81bec24935ae7a3e10d1", "score": "0.56753665", "text": "def list(self):\n resourceList = super(GatewayController, self).list()\n gateways = [gw for gw in resourceList.json if gw[\"purpose\"] == \"proxy\"]\n return ResourceList(self.resource_class, gateways)", "title": "" }, { "docid": "87d7fe0e2640c7dfb678ab58f29e7c28", "score": "0.5669583", "text": "def get_connections(self):\r\n with _subscribe_lock:\r\n d = _subscriptions.get(self)\r\n result = set()\r\n if d is not None:\r\n for con in d.keys():\r\n if isinstance(con, _MethodRef):\r\n con = con.resolve()\r\n if con is not None:\r\n result.add(con)\r\n return result", "title": "" }, { "docid": "d424b6d8fd3cfbe8120fa9d4ff09a96b", "score": "0.5660371", "text": "def connection_model(self):\n return self._connection_model", "title": "" }, { "docid": "2bd5ce43278dbfc975036d1f633206a0", "score": "0.56595266", "text": "def _class(self) -> List[str]:\n return self.__class", "title": "" }, { "docid": "ceea7453d021d003f9bdf787a93051b2", "score": "0.5658242", "text": "def getAllConns(self):\n oDriver = self.transform[0]\n All_incomingConn = []\n All_DestinationsConn = []\n All_Outputs = []\n\n print('{} :'.format(oDriver))\n # Incoming Connections\n for DriverSource, DriverDestination in oDriver.inputs(c=1, p=1):\n All_incomingConn.append(DriverDestination)\n\n if All_incomingConn:\n print(' Incomming Connections Are : ')\n for i, each in enumerate(All_incomingConn):\n print(' {} {}'.format(i, each))\n else:\n print(' No Incoming Connections')\n\n for DriverSource, DriverDestination in oDriver.outputs(c=1, p=1):\n All_DestinationsConn.append(DriverDestination)\n\n # Outputs\n for DriverSource, DriverDestination in oDriver.outputs(c=1, p=1):\n All_Outputs.append(DriverSource)\n\n if All_Outputs:\n print(' Outputs are : ')\n for i, each in enumerate(All_Outputs):\n print(' {} {}'.format(i, each))\n else:\n print(' No Outputs')\n\n # Destinations connections\n if All_DestinationsConn:\n print(' Destinations connections are : ')\n for i, each in enumerate(All_DestinationsConn):\n print(' {} {}'.format(i, each))\n else:\n print(' No destinations connections')", "title": "" }, { "docid": "c10442cefc80a6ccb7e8a1e03cd297b9", "score": "0.55696225", "text": "def connections(self) -> aws_cdk.aws_ec2.Connections:\n return jsii.get(self, \"connections\")", "title": "" }, { "docid": "7c0dd74abd152484c87a95500881ef25", "score": "0.5560098", "text": "def type(self):\n return Connection.Type", "title": "" }, { "docid": "6f38e826049e55a11f1f8fac30bf7a3f", "score": "0.55548894", "text": "def socket_instances(self):\n return self._socketInstances", "title": "" }, { "docid": "1026efca0d72cfdbd8a0eab12b0674cc", "score": "0.5548453", "text": "def connections(self):\n self.method = \"get_connections\"\n self.payload = json_rpc_node.payload\n self.payload['method']=self.method\n self.respData = node.getjson(self.payload, self.url).get('result')\n self.ipv4OutList = []\n self.ipv4InList = []\n self.resultDict={}\n try:\n for ipv4 in self.respData['connections']:\n if ipv4['incoming'] == False:\n self.ipv4OutList.append((ipv4['host'], ipv4['height']))\n else:\n self.ipv4InList.append((ipv4['host'], ipv4['height']))\n self.resultDict['outgoing'] = self.ipv4OutList\n self.resultDict['incoming'] = self.ipv4InList\n return self.resultDict\n except (KeyError, TypeError):\n return node.errorMessage", "title": "" }, { "docid": "926fd4376f96f96d299acf684fd0871c", "score": "0.55439365", "text": "def get_all_dns_connection(self):\n return self.m_connection.all_dns", "title": "" }, { "docid": "bc08ca9d1729e7a59336cb9782067130", "score": "0.5527111", "text": "def get_classes(self):\n return self.classes", "title": "" }, { "docid": "297695a0560747e5bbc5c5063aad3d87", "score": "0.55037576", "text": "async def get_gateway(self) -> None:\n self._logger.debug(\"Requesting Gateway\")\n req = await self._http_session.request(\"GET\", ApiPath(\"/gateway/bot\"))\n self._logger.debug(\"Received Gateway\")\n self.url = req[\"url\"]\n self.shards = req[\"shards\"]\n self.session_start_limit = req[\"session_start_limit\"]", "title": "" }, { "docid": "74a1f7410abac0b170d1e5f72376b3ac", "score": "0.5499863", "text": "def __init__(self):\r\n self.connections = {}", "title": "" }, { "docid": "6eb0c45f8e24cf79e315b65bc3f8bd3f", "score": "0.54956794", "text": "def quantum_connections(self):\n return self._quantum_connections", "title": "" }, { "docid": "2292e310da70f1d6dbdd422b0165403c", "score": "0.54873943", "text": "def connection_type(self):\n return self._connection_type", "title": "" }, { "docid": "00a37032b943a5b3cd55ec25acb3a70e", "score": "0.5484907", "text": "def get_classes(self):\n pass", "title": "" }, { "docid": "1f1b78945e5d92ad006aab49e9c4c44a", "score": "0.54755664", "text": "def __init__(self):\n self.connections = {}", "title": "" }, { "docid": "6566b6bb7039f95008d578ef6eade51e", "score": "0.5470466", "text": "def get_class(self):\n return self._class", "title": "" }, { "docid": "ff32e756c8120049eaa849bd441ee99d", "score": "0.54589885", "text": "def getServiceClass(self):\n svc = self.serviceclass()\n if svc:\n return {'protocol': self.protocol, 'port': self.port }\n return {}", "title": "" }, { "docid": "24a741e5c09b9a2836f9da2304c2caaa", "score": "0.54568744", "text": "def pqtotclientconnections(self) :\n try :\n return self._pqtotclientconnections\n except Exception as e:\n raise e", "title": "" }, { "docid": "7f34fb79ad5926a5efaf1b600d94454c", "score": "0.54326695", "text": "def gateway_type(self) -> str:\n return pulumi.get(self, \"gateway_type\")", "title": "" }, { "docid": "689b8fbf1bfefbca37681078bfed9394", "score": "0.54097486", "text": "def connectivity_service(self) -> List[ConnectivityService]:\n return self._connectivity_service", "title": "" }, { "docid": "fd2ca142d6e3c662758d770153a4e193", "score": "0.54079705", "text": "def get_connections():\n return cursor().execute('SELECT * FROM connections').fetchall()", "title": "" }, { "docid": "ba82efd6ba6a67ef0fd18bfb3290555a", "score": "0.540082", "text": "def collect_conn_info(self):\r\n self.conn_info = {}\r\n\r\n for eq in self:\r\n eq.collect_conn_info(self.conn_info)\r\n\r\n return self.conn_info", "title": "" }, { "docid": "2dbd2739c6299fac80278057e1ab855c", "score": "0.53974664", "text": "def connections(self) -> Optional[Sequence['outputs.ExpressRouteCircuitConnectionResponse']]:\n return pulumi.get(self, \"connections\")", "title": "" }, { "docid": "efe494fc0db6478bbb5f2fedf9a27c81", "score": "0.5393919", "text": "def get_connection(self):\n return self.conn_method", "title": "" }, { "docid": "9d1e1d3e1964f11508accb047af81128", "score": "0.538388", "text": "def get_network_adapters():\n ret = __proxy__[\"cimc.get_config_resolver_class\"](\"networkAdapterEthIf\", True)\n\n return ret", "title": "" }, { "docid": "de6cd72d5ba9abfcba0f52eaf7f43c7a", "score": "0.53749126", "text": "def gateway(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"gateway\")", "title": "" }, { "docid": "c549bb000a0907c8299abc24c8b03af5", "score": "0.5360984", "text": "def NetworkBackend():\n class NetworkBackend(get_superclass_dynamic()):\n pass\n\n return NetworkBackend", "title": "" }, { "docid": "e8d00c6fbb5217089d6abe46df40812b", "score": "0.5356791", "text": "def instance(self) -> ConnectionBase:\n return self._instance", "title": "" }, { "docid": "b997da74ca94c74eb18917e5a1ee0f71", "score": "0.53360647", "text": "def GetClass(self):\n return _pcbnew.NETCLASS_GetClass(self)", "title": "" }, { "docid": "9ead1bf71d241fa2fa2353a9474a237c", "score": "0.5323142", "text": "def _create_connection(self):\n created_connection = Connection(self) # creating an instance of connection\n self.CONNECTIONS.append(created_connection)\n self.actual_conn += 1\n return self.CONNECTIONS", "title": "" }, { "docid": "9b95623d35572ed3d7588c57a5dd42ea", "score": "0.5314401", "text": "def get():\n if Network.this is None:\n Network.this = Network()\n return Network.this", "title": "" }, { "docid": "6a219aec663d9a78d36db137e5a28258", "score": "0.531285", "text": "def max_connections(self):\n raise NotImplementedError", "title": "" }, { "docid": "c2b94e30454cc9e36b6eb08e39222aa1", "score": "0.53108895", "text": "def get_qos_forwarding_class_refs(self):\n return getattr(self, 'qos_forwarding_class_refs', None)", "title": "" }, { "docid": "c2b94e30454cc9e36b6eb08e39222aa1", "score": "0.53108895", "text": "def get_qos_forwarding_class_refs(self):\n return getattr(self, 'qos_forwarding_class_refs', None)", "title": "" }, { "docid": "2365277148d7b30b39c5a67bc233587f", "score": "0.52982914", "text": "def connection_type(self):\n return 0", "title": "" }, { "docid": "d23a99bdfe1c01e8673e73f2895ef39c", "score": "0.52920395", "text": "def python_class(self):\n return self._python_class", "title": "" }, { "docid": "663ac8e4533b219373ad05e5d4bfe747", "score": "0.52849853", "text": "def gateway_type(self) -> Optional[pulumi.Input['GatewayConfigGatewayType']]:\n return pulumi.get(self, \"gateway_type\")", "title": "" }, { "docid": "b23e380cc238b9baceefc05f136765d0", "score": "0.52737397", "text": "def multi_connection(self):\n return self.model.multi_connection", "title": "" }, { "docid": "f015a4fbb3e23f858ab42d0b4ff4c537", "score": "0.5271898", "text": "def get_classes(self):\n raise NotImplementedError", "title": "" }, { "docid": "bafcf721eefb11fe3da9a64349cd523f", "score": "0.52692896", "text": "def get_all_ips_connection(self):\n return self.m_connection.all_ips", "title": "" }, { "docid": "b0004e7c98488e20b149549172cce49d", "score": "0.5265435", "text": "def outputConnections(self):\n return self.connections.get('output').values()", "title": "" }, { "docid": "95246842785afbdb8314d2e30e25dc0a", "score": "0.526521", "text": "def list_gateways(\n connection: Connection, to_dictionary: bool = False, **filters\n) -> list['Gateway'] | list[dict]:\n return Gateway.list(connection, to_dictionary, **filters)", "title": "" }, { "docid": "f73d75baba67e8dedc54e869b828e121", "score": "0.5252852", "text": "def _get_gw_type(self):\n return self.__gw_type", "title": "" }, { "docid": "50b60cd103c2ebd90e58e16cdb4f7bd4", "score": "0.5248068", "text": "def classes(self):\n return self._classes", "title": "" }, { "docid": "50b60cd103c2ebd90e58e16cdb4f7bd4", "score": "0.5248068", "text": "def classes(self):\n return self._classes", "title": "" }, { "docid": "38053c8c2e23d95c3621f046e9f5c0b7", "score": "0.52471095", "text": "def _get_conn_class(self, loc):\r\n return {'http': httplib.HTTPConnection,\r\n 'https': httplib.HTTPSConnection}[loc.scheme]", "title": "" }, { "docid": "6d6f1a664e2f61680aeea71b435702a8", "score": "0.5240168", "text": "def get_connection(self):\r\n return Connection(**self.__config._sections['connection'])", "title": "" }, { "docid": "af7b9f8a6ca1fde7d92190b00b6934a0", "score": "0.52265334", "text": "def get_connection(self):\n self._pool_generator().next()\n return self.OUTPUT[-1]", "title": "" }, { "docid": "02b05b665ccc9a868ef82625bc01eb4c", "score": "0.5225401", "text": "def get_network_gateway(self, view, network):\n # Get the pool\n pool = self._get_pool(view, network)\n\n # Return the network gateway\n return pool['gateway']", "title": "" }, { "docid": "6f1068f8acf8f1f5377c28b8699ba1ad", "score": "0.52204734", "text": "def ConnectionType(self):\n return self._get_attribute('connectionType')", "title": "" }, { "docid": "68eeffa2e20894a8042c5d77099b0939", "score": "0.5191421", "text": "def connect_type(self):\n return self._connect_type", "title": "" }, { "docid": "7ae9c45a7aac6df8067da7c2edeb2e63", "score": "0.51897347", "text": "def sccurrentclientconnections(self) :\n\t\ttry :\n\t\t\treturn self._sccurrentclientconnections\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "37761df165917962b887f2796baa32c6", "score": "0.5188748", "text": "def GetClass(self):\n return _pcbnew.EDGE_MODULE_GetClass(self)", "title": "" }, { "docid": "821183b66028ef2142e0ed244a9cd9c7", "score": "0.5188384", "text": "def getClasses(self):\n return self.classes", "title": "" }, { "docid": "566b62b729c7f9ecff4349d7ec61de85", "score": "0.5184886", "text": "def configured_networks(self):\n return self._configured_networks", "title": "" }, { "docid": "d9ef15558b1e3c89e87d42cf1130624a", "score": "0.5183968", "text": "def network_list(self):\n nt_ks = self.compute_conn\n return [network.__dict__ for network in nt_ks.networks.list()]", "title": "" }, { "docid": "36c59e05b3d0d81a33849a9a537ad9bd", "score": "0.5174951", "text": "def connections(self):\n if 'connections' not in self.intel:\n return\n for edge in self.intel['connections']:\n yield self.build_for_row(self.api_client, edge)", "title": "" }, { "docid": "ab97c919a65116afdda86785c2dc6fac", "score": "0.5173148", "text": "def get_socket_list(self):\n return self._complete_socket_set", "title": "" }, { "docid": "751f921576f6d58873840ba591c88982", "score": "0.5163921", "text": "def getSocksEndpointClass(self):\n socks_endpoint_class = None\n if self.factory.method_spec.protocol == 'socks4':\n socks_endpoint_class = SOCKS4ClientEndpoint\n elif self.factory.method_spec.protocol == 'socks5':\n socks_endpoint_class = SOCKS5ClientEndpoint\n else:\n raise ValueError(\"Pluggable transport requires unknown protocol %s. Supported protocols are %s\" %\n (self.factory.method_spec.protocol, ('socks4', 'socks5')))\n return socks_endpoint_class", "title": "" }, { "docid": "07df523851d65f0175fb3792d39a170d", "score": "0.51616454", "text": "def get_networks(self):\n return self.networks.values()", "title": "" }, { "docid": "674033c778b96168978bf2fbc9c41e04", "score": "0.51604384", "text": "def list_connections(self, show_passthrough=True):\n conns = []\n for u,v,data in self._graph.edges(data=True):\n link = data['link']\n if v == '@bin' or u == '@bout': # leave out external connections to boundary\n continue\n elif u == '@bin':\n if show_passthrough:\n conns.extend([(src, '.'.join([v,dest])) \n for dest,src in link._dests.items() if not '.' in src])\n elif v == '@bout':\n if show_passthrough:\n conns.extend([('.'.join([u,src]), dest) \n for dest,src in link._dests.items() if not '.' in dest])\n else:\n conns.extend([('.'.join([u,src]), '.'.join([v,dest])) for dest,src in link._dests.items()])\n return conns", "title": "" }, { "docid": "cf1b4b7d3cd084cf109c5c8ffca0f1c2", "score": "0.5151478", "text": "def connection(self):\n return self.get_connection()", "title": "" }, { "docid": "eac7791f68773bc91e6827a59b6a6fa4", "score": "0.51511735", "text": "def get_connections():\n g.pg_conn = pg_pool.getconn()\n g.cursor = g.pg_conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n g.start_time = datetime.datetime.now()", "title": "" }, { "docid": "6ed72588a6131f5308ed284488e01831", "score": "0.5150315", "text": "def pool(self) -> aioredis.ConnectionsPool:\n return self._pool", "title": "" }, { "docid": "409b5d156f67f889958c31d0660a2f05", "score": "0.5146637", "text": "def connection_type(self) -> str:\n return pulumi.get(self, \"connection_type\")", "title": "" }, { "docid": "1daae040b29949545e50442d51e1bcc3", "score": "0.5143721", "text": "def getConnections(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "67e3562d9a0680e07aecae55965c5715", "score": "0.51433986", "text": "def get_num_connections(self):\n return self.impl.get_num_connections()", "title": "" }, { "docid": "eb85444e2fc6739a44c2ef6d4fa7b53f", "score": "0.5131379", "text": "def get_gateway_helper(gateway):\n\n\tassert isinstance(gateway, str) or isinstance(gateway, unicode)\n\n\tgateway = BeetleGateway.objects.get(name=gateway)\n\tconn_gateway = ConnectedGateway.objects.get(gateway=gateway)\n\n\treturn gateway, conn_gateway", "title": "" }, { "docid": "5aecbc721c9a7174f349932ffdde1c6b", "score": "0.5129398", "text": "def list_connections():\r\n global all_connections\r\n global active_clients\r\n\r\n for i, conn in enumerate(all_connections):\r\n try:\r\n # Check'number'of'alive'connections'\r\n # send a dummy connection request to check if the connection is still active\r\n conn.send(str.encode('test'))\r\n # because we don't know how much data we will get back, need to set the size (201480) high enough\r\n conn.recv(1024)\r\n except:\r\n # if do not receive anything back, the connection is not active, delete the connection\r\n del all_connections[i]\r\n del all_address[i]\r\n continue\r\n act_client = str(i) + \" \" + str(all_address[i][0]) + \" \" + str(all_address[i][1]) + \"\\n\"\r\n active_clients.append(act_client)", "title": "" }, { "docid": "65134003117b40a646f171b2e86cef6a", "score": "0.51251566", "text": "def get_connection(self):\n return self.connection_pool.getconn()", "title": "" }, { "docid": "81788c615ddb9e0ada7e2cd721d40d91", "score": "0.5123038", "text": "def device_class(self) -> str:\n return DEVICE_CLASS_CONNECTIVITY", "title": "" }, { "docid": "42efbcbf94334dd893d5bb136f1061e5", "score": "0.5115517", "text": "def sctotalclientconnections(self) :\n\t\ttry :\n\t\t\treturn self._sctotalclientconnections\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "4c3bf898d28fe144608903dbe2b8e2e2", "score": "0.51123667", "text": "def tunnels(self):\n return self._tunnels", "title": "" }, { "docid": "1424d75275116267407d2d575270e3ff", "score": "0.511049", "text": "def type_of_network(self):\n\t\treturn self.__type_of_network", "title": "" }, { "docid": "f0bbcd83db71320ca7690e2589aed444", "score": "0.5109286", "text": "def get_user_connections(self, status=None):\n q = Session.query(self.connection_class).filter(getattr(self.connection_class, self.object_id_name)==self.id)\n if status and isinstance(status, basestring):\n q = q.filter(self.connection_class.status==status)\n if status and isinstance(status, (list, tuple)):\n q = q.filter(self.connection_class.status.in_(status))\n return q.all()", "title": "" }, { "docid": "3f8371644ba3c796e74f6d2c5150acbc", "score": "0.5108533", "text": "def network(self):\n pass", "title": "" }, { "docid": "bff24831db33a261cf92e36209b5ff6f", "score": "0.51077676", "text": "def _get_network_instance(self):\n return self.__network_instance", "title": "" }, { "docid": "526395fda4a42872dc5b0f5ba7ea8e8b", "score": "0.51001394", "text": "def net(self):\n return", "title": "" }, { "docid": "6e9c00a1c859b640f88d933818812881", "score": "0.5094912", "text": "def get_current_network(self):\n constants = NetworkManager.const\n\n for conn in NetworkManager.NetworkManager.ActiveConnections:\n devices_types = [constants('device_type', x.DeviceType) == \"wifi\" for x in conn.Devices]\n if not any(devices_types):\n continue\n\n settings = conn.Connection.GetSettings()\n return WifiNetwork(ssid=settings[\"802-11-wireless\"][\"ssid\"], strength=-1)", "title": "" }, { "docid": "ce691e7be5eb63356b9e121815a878c1", "score": "0.50804216", "text": "def class_(self):\n\n sr = gxapi.str_ref()\n self.gdb.gxdb.get_chan_class(self._symb, sr)\n return sr.value", "title": "" }, { "docid": "f232afd81132bec7cabbefa44cdb826b", "score": "0.5077741", "text": "def bt_class(self):\n return self.remote_device_props.Get(\n constants.DEVICE_INTERFACE, 'Class')", "title": "" }, { "docid": "ddee35b30a8e5eecc7d8cb6aa953491d", "score": "0.5072032", "text": "def get_connectors(self):\n raise NotImplementedError()", "title": "" }, { "docid": "cf09c9285f1e3ae72cc1873824f1645e", "score": "0.50583756", "text": "def GetClass(self):\n return _pcbnew.VIA_GetClass(self)", "title": "" } ]
350b7523904a95a2ed8c1e0fa8b9a114
Concatenate two lists, always return a list of list
[ { "docid": "4ddec1d2b2a03bc4480dd6525386e2d3", "score": "0.78962314", "text": "def concatenateList(list1, list2):\n outputList = []\n\n ## list1\n # if it's an empty list\n if len(list1) == 0:\n outputList.append(list1)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list1[0], list):\n for i in range(len(list1)):\n outputList.append(list1[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list1)\n\n ## list2\n # if it's an empty list\n if len(list2) == 0:\n outputList.append(list2)\n # if it's already a list of list (from previous __add__)\n elif isinstance(list2[0], list):\n for i in range(len(list2)):\n outputList.append(list2[i])\n # first use of __add__, not a list of list\n else:\n outputList.append(list2)\n\n return outputList", "title": "" } ]
[ { "docid": "d76a43f7db550ba5c7436b8bff4a4884", "score": "0.844466", "text": "def concat_lists(list1, list2):\n\n # return list1 + list2\n for item in list2:\n list1.append(item)\n\n return list1", "title": "" }, { "docid": "dbd51aa3117026d4eabce506f4f5ae8e", "score": "0.8156493", "text": "def merge_lists(l1, l2):\n return [ *l1, *l2 ]", "title": "" }, { "docid": "4a73b4859cfa79f4b5d9f6cf214f07cf", "score": "0.79208654", "text": "def concat(a: list[int], b: list[int]) -> list[int]:\n result: list[int] = list()\n\n for x in a:\n result.append(x)\n for y in b: \n result.append(y)\n return result", "title": "" }, { "docid": "1516fee0bd213668176fe1a46a5f26b2", "score": "0.7868668", "text": "def concat_list(in_list):\n return list(itertools.chain(*in_list))", "title": "" }, { "docid": "f070aa57a78de4d8cee855a628af2cbc", "score": "0.7517699", "text": "def add_lists(first, second):\n\n copy = []\n for (i, j) in zip(first, second):\n i += j\n copy.append(i)\n\n return copy", "title": "" }, { "docid": "c937aaa79e159afe780020fbd20215ed", "score": "0.7501335", "text": "def _merge_lists(cls, li1, li2):\n if not li1:\n return li2[:]\n elif not li2:\n return li1[:]\n else:\n li = li1[:]\n for el in li2:\n if el not in li:\n li.append(el)\n return li", "title": "" }, { "docid": "9a6bbebb812182f15e8e6b321fb01b0d", "score": "0.7324203", "text": "def concat(list_of_lists):\n return reduce(lambda a,b : a+b,list_of_lists,[])", "title": "" }, { "docid": "09905b3caf5d61806966455e21fbd22a", "score": "0.72376263", "text": "def merge(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> List[Any]:\n return [*list1, *list2]", "title": "" }, { "docid": "00585d43b595797343fb54225068050e", "score": "0.72165537", "text": "def merge_lists(lists1, lists2):\n merged_lists = []\n for list1, list2 in zip(lists1, lists2):\n merged_lists.append(list1 + list2)\n return merged_lists", "title": "" }, { "docid": "bed29ae407ba4de22a958cba6f332f0c", "score": "0.72154635", "text": "def join(self: \"_List[_List[T]]\") -> \"_List[T]\":\n return ListMonad(*[element for lists in self for element in lists])", "title": "" }, { "docid": "b7562e324cc76007b9c9f548acac3d84", "score": "0.7214517", "text": "def add_lists (list1, list2):\n \n output = []\n for item1, item2 in zip(list1, list2):\n output.append(item1 + item2)\n return output", "title": "" }, { "docid": "7015854dc23e5d4a21782ff2a2bc7a89", "score": "0.71183556", "text": "def concat(seqs):\n return itertools.chain.from_iterable(seqs)", "title": "" }, { "docid": "d60e949c880a5afd3ce42adf3b32f726", "score": "0.7087358", "text": "def listExpend(input_list_1, input_list_2):\r\n output_list = []\r\n for element_1, element_2 in zip(input_list_1, input_list_2):\r\n output_list += [element_1]*element_2\r\n return output_list", "title": "" }, { "docid": "e45c4992ec7310585d29eb4720c3b06f", "score": "0.7076649", "text": "def concat(iterables):\n return itertools.chain.from_iterable(iterables)", "title": "" }, { "docid": "d4a5aa87f3ab458861cf62bb6c6d5519", "score": "0.70228696", "text": "def append(x, ys):\n return list(ys) + [x]", "title": "" }, { "docid": "37d8c8f938145e018ea52b3e7cc37721", "score": "0.70197886", "text": "def concat(*seqs):\n return itertools.chain.from_iterable(seqs)", "title": "" }, { "docid": "467847eb0baee17518f7970e65ae5b55", "score": "0.69950074", "text": "def cat_lists(*list_args):\n result = []\n for List in list_args:\n result.extend(List)\n\n return result", "title": "" }, { "docid": "70fc1a174925b046cfb17f523fbe30e5", "score": "0.6992389", "text": "def add_lists(a,b):\r\n\r\n for i in range(len(a)):\r\n a[i] += b[i]\r\n return a", "title": "" }, { "docid": "5edaafbbef8e8bb14bb120485e8f6ac8", "score": "0.69878733", "text": "def concat(x: list[int], y: list[int]) -> list[int]:\n con = list()\n i: int = 0\n count: int = 0\n while len(y) > i: \n con.append(y[i])\n i += 1\n while len(x) > count: \n con.append(x[count])\n count += 1 \n return con", "title": "" }, { "docid": "316412385c6081156db58b9012dc6887", "score": "0.6983133", "text": "def merge(list_of_lists):\n merged = list()\n for l in list_of_lists:\n merged.extend(l)\n return merged", "title": "" }, { "docid": "b9d610916aadc9f5de4e3e65f2feaff1", "score": "0.6964189", "text": "def _merge_lists(list1, list2):\n for v2 in reversed(list2):\n if isinstance(v2, Descriptor):\n if v2 in list1:\n v1 = list1.pop(list1.index(v2))\n list1.insert(0, v1.merge(v2))\n else:\n list1.insert(0, v2)\n elif isinstance(v2, list):\n raise CekitError(\"Cannot merge list of lists\")\n else:\n if v2 not in list1:\n list1.insert(0, v2)\n\n return list1", "title": "" }, { "docid": "f7d043de3eba87ad23d867773639e3fa", "score": "0.69377655", "text": "def append_list(list_a, list_b):\n for i, a in enumerate(list_a):\n list_b[i].append(a)\n\n return list_b", "title": "" }, { "docid": "50aefd0876a2d313234aaae1bdf904ca", "score": "0.69340515", "text": "def cat_arrays(arr1, arr2):\n return [x for x in arr1+arr2]", "title": "" }, { "docid": "53e04e03def62b459743aa4105c39e1a", "score": "0.6924937", "text": "def combinelists(oldlst, newlst):\n combined = oldlst\n if newlst not in oldlst:\n combined.append(newlst)\n return combined", "title": "" }, { "docid": "e5cb42047eb6436c7f43031955e4e510", "score": "0.69139725", "text": "def add_lists(*args):\n out = []\n for arg in args:\n out.extend(arg)\n return out", "title": "" }, { "docid": "3b5bcd37435f20cc1821ed4ffef786f2", "score": "0.68916386", "text": "def _add_list_values(a, b):\n new_list = []\n for i in range(len(a)):\n new_list.append(a[i] + b[i])\n return new_list", "title": "" }, { "docid": "66d6961727dac4121ffde5f7db4a58d0", "score": "0.6883244", "text": "def add(a: list, b: list) -> list:\n temp = zip(a, b)\n print(list(temp))", "title": "" }, { "docid": "bcdff456f457f344ee9af40c5a679c17", "score": "0.6877123", "text": "def add_two_lists(list1, list2):\n return list(map(lambda m: m[0] + m[1], list(zip(list1, list2))))", "title": "" }, { "docid": "25ac632322e6fda70bec1b77c08f7350", "score": "0.6818532", "text": "def concatenate_lists(*layers, **kwargs):\n ...", "title": "" }, { "docid": "37c46a724f0718ccdb9185478115d35d", "score": "0.67466223", "text": "def merge(list1, list2): \r\n if len(list1) == 0 or len(list2) == 0:\r\n new_list = [item for item in list1]\r\n new_list.extend(list2)\r\n return new_list\r\n else:\r\n if list1[0] <= list2[0]:\r\n new_list = list([list1[0]])\r\n new_list.extend(merge(list1[1:], list2))\r\n return new_list\r\n else:\r\n new_list = list([list2[0]])\r\n new_list.extend(merge(list1, list2[1:]))\r\n return new_list", "title": "" }, { "docid": "d956f85d22c06be847450ba3fb0aeaf5", "score": "0.67187357", "text": "def merger(self, *lists):\n\t\tself.merged=[]\n\t\tfor i in range(len(lists[0][0])):\n\t\t\tself.temp=[]\n\t\t\tfor j in range(len(lists[0])):\n\t\t\t\tself.temp.append(lists[0][j][i])\n\t\t\tself.merged.append(self.temp)\n\t\treturn self.merged", "title": "" }, { "docid": "2bcf2fce689901e02ecc74c242ed978d", "score": "0.66872436", "text": "def listops_union(list_a,list_b):\r\n\r\n retlist = list_a[:]\r\n for item in list_b: \r\n if item not in list_a:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)", "title": "" }, { "docid": "6eb6d97a576adcffc9bbd1010da79a7f", "score": "0.6676652", "text": "def union(self, other: list) -> 'List':\n if not isinstance(other, list):\n raise ValueError('The comparing element is not a list')\n\n return List(self + other).unique()", "title": "" }, { "docid": "28e8e4a6bc790064cf45fb13c87d7da4", "score": "0.66340876", "text": "def merge(list1, list2):\n holding = list1.to_list()\n [holding.append(i) for i in list2.to_list()]\n # for i in list2.to_list():\n # holding.append(i)\n holding = sorted(holding)\n\n output = LinkedList(Node(holding[0]))\n for i in holding[1:]:\n output.append(i)\n return output", "title": "" }, { "docid": "c09f6157678c82931eed70b1bc57f05a", "score": "0.66319495", "text": "def listify(lhs, rhs):\n if not lhs:\n return rhs\n if not rhs:\n return lhs\n if is_listing(lhs) and is_listing(rhs):\n return lhs + rhs\n if is_listing(lhs):\n return lhs + [rhs]\n if is_listing(rhs):\n return [lhs] + rhs\n return [lhs, rhs]", "title": "" }, { "docid": "e6339d6207cf3971631f47c38504fbb4", "score": "0.65963334", "text": "def merge ( list1, list2 ):\n new_list = []\n while len(list1)>0 and len(list2)>0:\n if list1[0] < list2[0]:\n new_list.append (list1[0])\n del list1[0]\n else:\n new_list.append (list2[0])\n del list2[0]\n return new_list + list1 + list2", "title": "" }, { "docid": "d41993b0142914edcbec72d45f4e83e3", "score": "0.65705204", "text": "def combine_list(lines):\n results = []\n for l in lines:\n results.extend(l)\n return list(filter(_remove, results))", "title": "" }, { "docid": "c4ff598408a91b571b772b8ead22951c", "score": "0.6569385", "text": "def flatten_list(lol):\n return list(itertools.chain.from_iterable(lol))", "title": "" }, { "docid": "22120f47e45d9c03013b2afd293f4462", "score": "0.6568194", "text": "def merge(*args):\n return reduce(list.__add__, args, list())", "title": "" }, { "docid": "af647709facd296143e5868254416293", "score": "0.6558392", "text": "def cartn_append(*list_of_lists: list) -> list:\n def aux(list_of_lists: list) -> list:\n if len(list_of_lists) == 0:\n return []\n elif len(list_of_lists) == 1:\n return list_of_lists[0]\n else:\n return aux([cart2_append(list_of_lists[0], list_of_lists[1])] + tail(list_of_lists[1:]))\n \n return aux(list(list_of_lists))", "title": "" }, { "docid": "c22b43e4fb4dbf7b29d60d87f9f0f6df", "score": "0.65140444", "text": "def flatten(listOfLists):\n return list(chain.from_iterable(listOfLists))", "title": "" }, { "docid": "9c1b97d7d1e0370d04f194c150833ce5", "score": "0.65105766", "text": "def merge_lists(list_1, list_2):\n if len(list_1) == 0:\n return list_2\n if len(list_2) == 0:\n return list_1\n\n new_list = []\n length = len(list_1) + len(list_2)\n while len(new_list) < length:\n if len(list_1) == 0:\n new_list = new_list + list_2\n elif len(list_2) == 0:\n new_list = new_list + list_1\n\n elif list_1[0] < list_2[0]:\n new_list.append(list_1[0])\n list_1.remove(list_1[0])\n elif list_1[0] >= list_2[0]:\n new_list.append(list_2[0])\n list_2.remove(list_2[0])\n return new_list", "title": "" }, { "docid": "f8b5f712e705920e00308ce2dda1b2ff", "score": "0.6494053", "text": "def merge_lists(a_lst, b_lst):\n\n i = 0\n j = 0\n merged_list = []\n while i < len(a_lst) and j < len(b_lst):\n \n if a_lst[i] < b_lst[j]:\n merged_list.append(a_lst[i])\n i += 1\n else:\n merged_list.append(b_lst[j])\n j += 1\n if i < len(a_lst):\n merged_list.extend(a_lst[i:])\n if j < len(b_lst):\n merged_list.extend(b_lst[j:])\n return merged_list", "title": "" }, { "docid": "fdae49e70daff128c952f2308b53d045", "score": "0.64696246", "text": "def union(list_a: list, list_b: list) -> list:\n if list_a is None:\n list_a = [None]\n if list_b is None:\n list_b = [None]\n return list(set(list_a) | set(list_b))", "title": "" }, { "docid": "7560458d5e70bcb2b023eb9977e5138d", "score": "0.6450688", "text": "def add(a, b):\n if len(a) < len(b):\n aa = itertools.chain(reversed(a), itertools.repeat(0))\n bb = reversed(b)\n elif len(b) < len(a):\n bb = itertools.chain(reversed(b), itertools.repeat(0))\n aa = reversed(a)\n else:\n aa = reversed(a)\n bb = reversed(b)\n cc = _add_stream(aa, bb)\n return list(reversed(cc))", "title": "" }, { "docid": "dce5cb8064849277baa067b53eeacea2", "score": "0.64493525", "text": "def merge(list1: list, list2: list) -> list:\r\n result = []\r\n i = 0\r\n j = 0\r\n # Iterate through each element and append the smaller element of each list to the resulting list.\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] < list2[j]:\r\n result.append(list1[i])\r\n i += 1\r\n else:\r\n result.append(list2[j])\r\n j += 1\r\n\r\n # Append the remaining lists to the resulting list.\r\n result.extend(list1[i:])\r\n result.extend(list2[j:])\r\n return result", "title": "" }, { "docid": "e8945d268dc4791f4a64de1744e67fee", "score": "0.64185613", "text": "def concatenate(self, other):\n return as_stream_iterator(_flatten_stream_from_reversed_list([other, self]))", "title": "" }, { "docid": "986c0b8485b444a49f1fd6ad2999286d", "score": "0.6401963", "text": "def union(llist_1 : LinkedList, llist_2 : LinkedList) -> LinkedList:\n # Convert to set to remove repeated entries in each list\n lset_1 = list_to_set(llist_1)\n lset_2 = list_to_set(llist_2)\n \n # Combine the two sets to create a union\n union_list = LinkedList()\n list_of_added = []\n for item in lset_1:\n union_list.append(item)\n list_of_added.append(item)\n\n for item in lset_2:\n if item not in list_of_added:\n union_list.append(item)\n\n return union_list", "title": "" }, { "docid": "031b040099193dfed46d670959545c30", "score": "0.6401917", "text": "def extend(*, list1 : Union[List[Any], ConduitVariable], list2 : Union[List[Any], ConduitVariable]) -> None:\n list1.extend(list2)", "title": "" }, { "docid": "1ff5f2afcf87151d67af130cf0be1b6b", "score": "0.6398544", "text": "def interleave(list1, list2):\r\n result = [] #Create an empty list which later we use it to add our result in it.\r\n extra = [] #Create an empty list which later we use it to sort out the extra cards.\r\n if len(list2) > len(list1):\r\n new_list = zip(list2, list1)\r\n for idx in range(len(list1),len(list2)):\r\n extra.append(list2[idx])\r\n else:\r\n new_list = zip(list1, list2)\r\n for idx in range(len(list2),len(list1)):\r\n extra.append(list1[idx])\r\n for item1, item2 in new_list:\r\n result.append(item1)\r\n result.append(item2)\r\n for item in extra:\r\n result.append(item)\r\n return result", "title": "" }, { "docid": "87fda664e8810b1b1177603e55c9a564", "score": "0.6389034", "text": "def concat(xss):\n return list(anyconfig.compat.from_iterable(xs for xs in xss))", "title": "" }, { "docid": "f9ea8ecc1af485e72cba80a985842b28", "score": "0.6383177", "text": "def interleave_lists(before, after):\n if len(before) != len(after):\n print \"Error: arrays must of same length in interleave_lists\"\n return None\n else:\n output = before + after\n output[::2] = before\n output[1::2] = after\n return output", "title": "" }, { "docid": "7836fdfeb18d808572028c51fdad97d4", "score": "0.6359892", "text": "def concatv(*seqs):\n return concat(seqs)", "title": "" }, { "docid": "79768f9bd597bb5d25e5b0e8f9422839", "score": "0.6334127", "text": "def flatten_lists(lst):\n return list(chain(*lst))", "title": "" }, { "docid": "92ebcd31ee14caad756c8b8ce6e38760", "score": "0.63247985", "text": "def concat_succ(L):\n if len(L) < 2:\n return L\n res = []\n last = L.pop()\n othe = L.pop()\n for i in last:\n for j in othe:\n if type(i) is list:\n if type(j) is list:\n res.append(i+j)\n else:\n res.append(i+[j])\n elif type(j) is list:\n res.append([i] + j)\n else:\n res.append([i] + [j])\n L = [res] + L\n return concat_succ(L)", "title": "" }, { "docid": "b44ba90e643f0e3e1981ba8d1e104e4c", "score": "0.6322912", "text": "def merge(x, y):\n try:\n from itertools import izip\n except ImportError:\n izip = zip\n from numpy import concatenate\n return (concatenate((a, b)) for a, b in izip(x, y))", "title": "" }, { "docid": "fd6b751869c61d64f2e14743af4c787a", "score": "0.6311606", "text": "def cat_arrays(arr1, arr2):\n res = []\n for i in arr1:\n res.append(i)\n for j in arr2:\n res.append(j)\n return (res)", "title": "" }, { "docid": "744117a5ae60366ebc3d3d51e24f6be9", "score": "0.62960863", "text": "def merge(lst1, lst2):\n if not lst1 or not lst2:\n return lst1 + lst2\n elif lst1[0] < lst2[0]:\n return [lst1[0]] + merge(lst1[1:], lst2)\n else:\n return [lst2[0]] + merge(lst1, lst2[1:])", "title": "" }, { "docid": "ed2ae58951f9a9a7a3808e43f22c0adc", "score": "0.6291317", "text": "def flatten(list_of_lists: List[List]) -> List:\n return list(itertools.chain(*list_of_lists))", "title": "" }, { "docid": "127eaf78ae0ce870610b3dffc0a2acad", "score": "0.628373", "text": "def _extend(cls, li1, li2):\n return li1 + li2", "title": "" }, { "docid": "9c3092bf1d7233163225059bfa39814d", "score": "0.6272074", "text": "def data_list_wdl_merge(data_list1:list, data_list2:list) -> list:\n list_size = len(data_list1)\n merged_data_list = []\n for i in range(list_size):\n merged_data_list.append(pd.concat([data_list1[i],data_list2[i]]))\n return merged_data_list", "title": "" }, { "docid": "320c1004d51010d62c04d88d4faced15", "score": "0.627036", "text": "def merge(list1, list2):\n merged = []\n if len(list1) < 1 or len(list2) <1:\n return list1 + list2\n else:\n ind_1 = 0\n ind_2 = 0\n while ind_1 < len(list1) and ind_2 < len(list2):\n #some appends to lists\n if list1[ind_1] < list2[ind_2]:\n merged.append(list1[ind_1])\n ind_1 += 1\n elif list2[ind_2] < list1[ind_1]:\n merged.append(list2[ind_2])\n ind_2 += 1\n elif list1[ind_1] == list2[ind_2]:\n merged.append(list1[ind_1])\n merged.append(list2[ind_2])\n ind_1 += 1\n ind_2 += 1\n #if reach end of one list, copy the remainder of the other\n if ind_1 >= len(list1) and ind_2 < len(list2):\n merged += list2[ind_2:]\n ind_2 = len(list2)\n elif ind_2 >= len(list2) and ind_1 < len(list1):\n merged += list1[ind_1:]\n ind_1 = len(list1)\n return merged", "title": "" }, { "docid": "b1f6bdbb0bf110ca12635a0193969395", "score": "0.6269036", "text": "def merge(list1, list2):\n answer = []\n assert answer == sorted(answer)\n\n idx1 = 0\n idx2 = 0\n while (idx1 < len(list1)) and (idx2 < len(list2)):\n if list1[idx1] < list2[idx2]:\n answer.append(list1[idx1])\n idx1 += 1\n elif list1[idx1] > list2[idx2]:\n answer.append(list2[idx2])\n idx2 += 1\n else:\n answer.append(list1[idx1])\n answer.append(list2[idx2])\n idx1 += 1\n idx2 += 1\n assert answer == sorted(answer)\n\n answer.extend(list1[idx1:])\n answer.extend(list2[idx2:])\n\n assert answer == sorted(answer)\n return answer", "title": "" }, { "docid": "b97b93fd51b4e7d639bbbff3213bbf71", "score": "0.6257212", "text": "def right_merge(self,list_to_merge):\n self.items = self.items + list_to_merge\n return self.items", "title": "" }, { "docid": "8ff00fd16abf935de1f1482c10bdd111", "score": "0.6239411", "text": "def __cross(self,A, B):\n return [s+t for s in A for t in B]", "title": "" }, { "docid": "d1b5da95e7811b7a950b1efc249bd392", "score": "0.61969376", "text": "def merge(list_1, list_2):\n l1, l2 = len(list_1), len(list_2) # Store the length of each list\n merged_output = [None for i in range(l1 + l2)]\n i, j = 0, 0\n # Compare each element of the two lists till one of them is exhausted\n while i < l1 and j < l2:\n if list_1[i] <= list_2[j]:\n merged_output[i + j] = list_1[i]\n i += 1\n else:\n merged_output[i + j] = list_2[j]\n j += 1\n\n # Check if list_1 is exhausted, add remaining element to the output\n for j in range(j, l2):\n merged_output[i + j] = list_2[j]\n\n # Check if list_2 is exhausted, add remaining element to the output\n for i in range(i, l1):\n merged_output[i + j] = list_1[i]\n\n # print(merged_output)\n return merged_output", "title": "" }, { "docid": "00c94ac1b4bb89bece4317d4b0b4587d", "score": "0.61935925", "text": "def __add__(self, other):\n return self + [other]", "title": "" }, { "docid": "a7528656e6e5d312de2ea1eb7eaf916d", "score": "0.61896914", "text": "def merge(list1: list, list2: list) -> list:\n output = []\n i, j = 0, 0\n while i < len(list1) and j < len(list2):\n if list1[i][1] <= list2[j][1]:\n output += [list1[i]]\n i += 1\n else:\n output += [list2[j]]\n j += 1\n return output + list1[i:] + list2[j:]", "title": "" }, { "docid": "615db0f99cac2819b5cacccdd28df587", "score": "0.61785376", "text": "def merge(list_a, list_b):\n new_list = []\n i = 0\n j = 0\n while (i < len(list_a) and j < len(list_b)):\n if(list_a[i] < list_b[j]):\n new_list.append(list_a[i])\n i += 1\n else:\n new_list.append(list_b[j])\n j += 1\n new_list += list_a[i:]\n new_list += list_b[j:]\n\n return new_list", "title": "" }, { "docid": "dd69c6780b365995836f50ff71bb5426", "score": "0.6173941", "text": "def concat_all(self):\n return self.merge(1)", "title": "" }, { "docid": "0027b3d932b633ad651909765679b702", "score": "0.6171826", "text": "def merge(list1, list2):\n result_list = []\n list1_length = len(list1)\n list2_length = len(list2)\n list1_index = 0\n list2_index = 0\n while list1_index < list1_length and list2_index < list2_length:\n if list1[list1_index] <= list2[list2_index]:\n result_list.append(list1[list1_index])\n list1_index = list1_index + 1\n else:\n result_list.append(list2[list2_index])\n list2_index = list2_index + 1\n \n if list1_index < list1_length:\n result_list.extend(list1[list1_index:])\n if list2_index < list2_length:\n result_list.extend(list2[list2_index:])\n \n return result_list", "title": "" }, { "docid": "851f03e1083dff8f3d86d243af030766", "score": "0.61513937", "text": "def ijoin_lists(l):\n if l:\n try:\n if not all(ymap(isinstance, l, list)):\n from tek.errors import MooException\n raise MooException('Some elements aren\\'t lists!')\n for i in cumsum([0] + list(map(len, l[:-1]))):\n l[i:i+1] = l[i]\n except Exception as e:\n logger.debug('ijoin_lists failed with: ' + str(e))\n return l", "title": "" }, { "docid": "743663ddfcc21c03cdecebffe45be906", "score": "0.6149276", "text": "def flatten(list_of_lists: List[Any]) -> List[Any]:\n return list(itertools.chain.from_iterable(list_of_lists))", "title": "" }, { "docid": "d249d1b9cba0b86553e4f281da9d2aa2", "score": "0.6135357", "text": "def flatten(llst):\n res = []\n for lst in llst:\n res += lst\n return res", "title": "" }, { "docid": "032d7ecfbe0f540a35f64525617dbf81", "score": "0.6134397", "text": "def merge(list1, list2): \n result = []\n copy1, copy2 = list1[:], list2[:]\n \n while min(copy1, copy2):\n if copy1[0] < copy2[0]:\n result.append(copy1[0])\n copy1.pop(0)\n else:\n result.append(copy2[0])\n copy2.pop(0)\n \n if copy1:\n result += copy1\n elif copy2:\n result += copy2\n \n return result", "title": "" }, { "docid": "34cd5c4b060669e779d796f173c4c978", "score": "0.61237276", "text": "def merge_down(lists):\r\n lst1 = transpose(lists)\r\n lst2 = merge_AllRight(lst1)\r\n lst3 = transpose(lst2)\r\n\r\n lists = lst3\r\n\r\n return lists", "title": "" }, { "docid": "cf14509b20936812587c466e31b9fdad", "score": "0.6121381", "text": "def combinations( l1, l2 ):\n for i in range(len(l1)):\n yield zip( l1,l2)\n l1.insert(0,l1.pop())", "title": "" }, { "docid": "7d720d61f4db72677d81a616785aa9cf", "score": "0.610107", "text": "def lists_combinations(list_1, list_2):\n return [x[0] + ' ' + x[1] for x in itertools.product(list_1, list_2)]", "title": "" }, { "docid": "a4448a64525fd394f29ea23993b88cda", "score": "0.60966784", "text": "def add_sort_list(\n l1: list,\n l2: list,\n ) -> list:\n \n # Add two lists\n l = l1 + l2\n\n # Sort the added lists\n l.sort()\n\n return l", "title": "" }, { "docid": "6324cae8ab9436d627a9d2255eba195b", "score": "0.6094901", "text": "def combine(a, b):\n assert len(a) == len(b)\n out = \"\"\n for i in range(len(a)):\n out += a[i]\n out += b[i]\n return out", "title": "" }, { "docid": "0d5f767d8af8a306a4494a37dd9b7f01", "score": "0.60926", "text": "def list_update(l1, l2):\n return filter(lambda e : e not in l2, l1) + list(l2)", "title": "" }, { "docid": "1e3e8edf1795b4280935d2f13f0939fe", "score": "0.60863525", "text": "def merge_many(*lsts):\n if not lsts:\n return []\n elif len(lsts) == 1:\n return lsts[0][:]\n elif len(lsts) == 2:\n return merge(*lsts)\n else:\n left = lsts[len(lsts) // 2:]\n right = lsts[:len(lsts) // 2]\n\n return merge(merge_many(*left), merge_many(*right))", "title": "" }, { "docid": "43ff9ef08c89031980473107669ac0c8", "score": "0.6081372", "text": "def flatten(ls):\n return sum(ls, [])", "title": "" }, { "docid": "ab98cbc4e6caa26a3b0bd5b278fa73fd", "score": "0.6070062", "text": "def listExtend(lst, items):\n if lst is None:\n return list(items)\n else:\n lst.extend(items)\n return lst", "title": "" }, { "docid": "a571a48ee8af10094e5f6ad0901ed9fe", "score": "0.60634154", "text": "def merge_lists_w_ordering(a: List[Any], b: List[Any]) -> List[Any]:\n overlap = set(a).intersection(b)\n\n result = []\n\n current, other = iter(a), iter(b)\n\n while True:\n for element in current:\n if element in overlap:\n overlap.discard(element)\n other, current = current, other\n break\n\n result.append(element)\n else:\n result.extend(other)\n break\n\n return result", "title": "" }, { "docid": "6fce3b857e95823eac8e23861f399231", "score": "0.60589546", "text": "def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return (None)\n newList = []\n for i in range(len(arr1)):\n newList.append(arr1[i] + arr2[i])\n return (newList)", "title": "" }, { "docid": "324b036dfb1b911861f35da508c341dc", "score": "0.60567653", "text": "def _addVectors(X1,X2):\n _checkSize(X1,X2)\n return [ X1[i] + X2[i] for i in range(len(X1))]", "title": "" }, { "docid": "95147cb71d7b9ed3dd98b3faca8a57a1", "score": "0.6056276", "text": "def cons(l, x):\n return l + [x]", "title": "" }, { "docid": "b1a81dec92100a4f0d4d799f23ea74f1", "score": "0.6054157", "text": "def union(a, b):\r\n return list(set(a) | set(b))", "title": "" }, { "docid": "04405f29de874b09a18bef6510104526", "score": "0.6041565", "text": "def concat_map(f, xs):\n return concat(imap(f, xs))", "title": "" }, { "docid": "84634e8d9c7aaa604add1cf607c18dd7", "score": "0.6039649", "text": "def merge(items1, items2):\r\n # TODO: Repeat until one list is empty\r\n # TODO: Find minimum item in both lists and append it to new list\r\n # TODO: Append remaining items in non-empty list to new list\r", "title": "" }, { "docid": "bd2008e84608f3ea171bd4a1f7ac02b9", "score": "0.60380715", "text": "def add(a, b):\n return [a[i] + b[i] for i in range(2)]", "title": "" }, { "docid": "c32c9342186d26f00fc69b0d7aa60d74", "score": "0.6029572", "text": "def union(a, b):\n return list(set(a) | set(b))", "title": "" }, { "docid": "c32c9342186d26f00fc69b0d7aa60d74", "score": "0.6029572", "text": "def union(a, b):\n return list(set(a) | set(b))", "title": "" }, { "docid": "fb361b3c104479d08d95805fec5bd082", "score": "0.6022447", "text": "def __listunion(self, c1, c2):\n s1 = {}\n for delta in c1:\n s1[delta] = 1\n\n\tc = c1[:]\n\tfor delta in c2:\n if not s1.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c", "title": "" }, { "docid": "dc945f0538de2ef8ed5ba1296ebcfc8a", "score": "0.60213846", "text": "def concat(cls, *args):\n \n if isinstance(args[0], list):\n sources = args[0]\n else:\n sources = list(args)\n \n return concat(Enumerable.for_each(sources))", "title": "" }, { "docid": "c7b013acc8394ef615d2d2c77b62b6ef", "score": "0.60160774", "text": "def concat_list(lst, batch_flags=None):\n slices = [slice(0)] * len(lst)\n datas = []\n row_flag = 0\n for i, r in enumerate(lst):\n if r is None:\n slices[i] = None\n continue\n j = -1\n if batch_flags is None or batch_flags[i]:\n for j, d in enumerate(r):\n datas.append(d)\n slices[i] = slice(row_flag, row_flag + j + 1)\n else:\n datas.append(r)\n slices[i] = row_flag\n row_flag += j + 1\n return datas, slices", "title": "" }, { "docid": "a2da069eeba49332a871dd30f9c48d70", "score": "0.6007684", "text": "def merge(l1, l2):\n\n #Reverse the lists\n l1 = list(reversed(l1))\n l2 = list(reversed(l2))\n\n ret = []\n\n while True:\n # If either list is empty, reverse the other one and append it to the end\n if not l1:\n ret.extend(reversed(l2))\n return ret\n if not l2:\n ret.extend(reversed(l1))\n return ret\n\n # Append the lowest last element of the two lists\n ret.append(l1.pop() if l1[-1] < l2[-1] else l2.pop())", "title": "" }, { "docid": "40ab95624c89bf58b04cf318dc5baf12", "score": "0.599734", "text": "def cross(a, b):\n return [s + t for s in a for t in b]", "title": "" }, { "docid": "dd8fb7c5db15808f1316e9bc1b911026", "score": "0.5985908", "text": "def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])", "title": "" } ]
15db5a05cb5149df307187fc4ce51663
General rank order filter. Applies a callback to each pixel in the image. The callback receives the sorted pixel values in the neighborhood around the pixel and has to return a new pixel value.
[ { "docid": "a5cd938a258b0d16ef7cc5243fb736da", "score": "0.7132174", "text": "def rank_order(img, callback):\n result = np.zeros(img.shape)\n height, width = img.shape\n for y in range(height):\n for x in range(width):\n y_start = y-1 if y > 0 else 0\n y_end = y+1 if y < height else height-1\n x_start = x-1 if x > 0 else 0\n x_end = x+1 if x < width else width-1\n neighborhood = img[y_start:y_end+1, x_start:x_end+1].flatten()\n neighborhood = np.array(sorted(list(neighborhood)))\n result[y, x] = callback(neighborhood)\n return result", "title": "" } ]
[ { "docid": "e16b5919ccebda07a9c2f8c9cd3f220f", "score": "0.5808636", "text": "def erosion(img):\n return rank_order(img, lambda neighborhood: neighborhood[0])", "title": "" }, { "docid": "dfb6ecf1a5601a0acb5831583b1d8255", "score": "0.5299369", "text": "def zrank(self, name, value, callback=None):\n self._execute_command('ZRANK', name, value, callback=callback)", "title": "" }, { "docid": "9b413621cbe1503cc39ec4a9d0fc98b9", "score": "0.5260944", "text": "def softranks(x, direction='ASCENDING', axis=-1, zero_based=True, **kwargs):\n if direction not in DIRECTIONS:\n raise ValueError('`direction` should be one of {}'.format(DIRECTIONS))\n\n descending = (direction == 'DESCENDING')\n z = _preprocess(x, axis)\n sorter = soft_quantilizer.SoftQuantilizer(z, descending=descending, **kwargs)\n ranks = sorter.softcdf * tf.cast(tf.shape(z)[1], dtype=x.dtype)\n if zero_based:\n ranks -= tf.cast(1.0, dtype=x.dtype)\n\n return _postprocess(ranks, x.shape, axis)", "title": "" }, { "docid": "a08ab0cfe61f9cfa99c7caca4c54a5c2", "score": "0.517549", "text": "def feature_ranking(score):\r\n idx = np.argsort(score, 0)\r\n print(idx[::-1])\r\n return idx[::-1]", "title": "" }, { "docid": "f3866a379fab13ac62a8bcb0266ae331", "score": "0.5096664", "text": "def gf2_rank(rows):\n rank = 0\n while rows:\n pivot_row = rows.pop()\n if pivot_row:\n rank += 1\n lsb = pivot_row & -pivot_row\n for index, row in enumerate(rows):\n if row & lsb:\n rows[index] = row ^ pivot_row\n return rank", "title": "" }, { "docid": "0ba05192073837b321c2e8bb89f9e024", "score": "0.5083015", "text": "def median(img):\n return rank_order(img, np.median)", "title": "" }, { "docid": "bce2c126f3943a87ce5e680bc31eb044", "score": "0.50806916", "text": "def rank(self, p, i):\n return self.bv.rank(tobits(p), i)", "title": "" }, { "docid": "90761b27e49175472db7b3130472c939", "score": "0.50763357", "text": "def zrevrank(self, name, value, callback=None):\n self._execute_command('ZREVRANK', name, value, callback=callback)", "title": "" }, { "docid": "ede26f46774657648a03c879118f4201", "score": "0.5074885", "text": "def percentile(scores, your_rank):\n scores.sort()\n for score in scores:\n if percentile_rank(scores, score) >= your_rank:\n return score", "title": "" }, { "docid": "ff6ae4f59d3af61561d50f42ade3cdb5", "score": "0.5030034", "text": "def dilation(img):\n return rank_order(img, lambda neighborhood: neighborhood[-1])", "title": "" }, { "docid": "4d0cc9ab929ab8d144c12e1b3c294a39", "score": "0.5002172", "text": "def predict_rank():\n pass", "title": "" }, { "docid": "20dcc3ab05d393262170af01eb48fa32", "score": "0.4989585", "text": "async def on_map_ranked(self, event):\n pass", "title": "" }, { "docid": "3c49003c8062f5e2f65dad0d1484976f", "score": "0.49844098", "text": "def avg_rank(real, percentiles):\n return np.sum(real*percentiles)/np.sum(real)", "title": "" }, { "docid": "aac42e0e75e7d872de11855822409d2d", "score": "0.49325675", "text": "def unrank(self, n, rnk):", "title": "" }, { "docid": "6c22b5d7691e21338bfc9bccb770accc", "score": "0.49263936", "text": "def rank(input_list, variant_name=\"AVERAGE\", inverse=False):\n input_copy = input_list[:]\n len_input_list = len(input_list)\n rank_list = [0] * len_input_list\n previous_ties = 0\n if not inverse: # high values get high ranks\n # obtain a value above the maximum value contained in input_list,\n # so it will never be picked as the minimum element of the list\n above_max_input = max(input_list) + 1\n curr_idx = 1\n while curr_idx <= len_input_list:\n # get the list of indices of the min elements of input_copy.\n # note that we might have ties.\n bottom_indices = argwhere(\n input_copy == amin(input_copy)).flatten().tolist()\n bottom_amount = len(bottom_indices)\n for bottom_idx in bottom_indices:\n if variant_name == \"AVERAGE\":\n # e.g., if 4 inputs are equal, and they would receive\n # ranks from 3 to 6, all of them will obtain rank 4.5\n # Afterwards the ranks increase from 7 on.\n rank_list[bottom_idx] = \\\n (2 * curr_idx + bottom_amount - 1) / 2.0\n elif variant_name == \"MIN\":\n # e.g., if 4 inputs are equal, and they would receive\n # ranks from 3 to 6, all of them will obtain rank 3.\n # Afterwards the ranks increase from 7 on.\n rank_list[bottom_idx] = curr_idx\n elif variant_name == \"MAX\":\n # e.g., if 4 inputs are equal, and they would receive\n # ranks from 3 to 6, all of them will obtain rank 6\n # Afterwards the ranks increase from 7 on.\n rank_list[bottom_idx] = curr_idx + bottom_amount - 1\n elif variant_name == \"DENSE\":\n # the same as for \"MIN\", but instead of ranking the\n # next elements counting from 7 on, the ranks will\n # increase from 4 on (no \"jumps\").\n rank_list[bottom_idx] = curr_idx - previous_ties\n elif variant_name == \"ORDINAL\":\n # the ties are ranked in a \"ordinal\" way, assigning\n # the smallest rank to the leftmost tie found, and so on.\n rank_list[bottom_idx] = curr_idx\n curr_idx += 1\n else:\n raise NotImplementedError(\n \"%s variant not implemented\" % variant_name)\n # \"discard\" the tie, so it will not be found as the min in\n # the next iteration (instead of removing it, which would\n # modify the list indices, I make it bigger than the\n # biggest input)\n input_copy[bottom_idx] = above_max_input\n if variant_name != \"ORDINAL\":\n # necessary adjustment in case of \"ORDINAL\"\n curr_idx += bottom_amount\n # necessary for the \"DENSE\" variant, to keep a count of ties\n previous_ties += bottom_amount - 1\n\n else: # inverse, i.e., small inputs get high ranks\n # same as the direct methods, but proceeding top->down\n below_min_input = min(input_list) - 1\n curr_idx = len_input_list\n while curr_idx > 0:\n top_indices = argwhere(\n input_copy == amax(input_copy)).flatten().tolist()\n top_amount = len(top_indices)\n for top_idx in top_indices:\n if variant_name == \"AVERAGE\":\n rank_list[top_idx] = \\\n len_input_list - (2 * curr_idx - top_amount - 1) / 2.0\n elif variant_name == \"MIN\":\n rank_list[top_idx] = len_input_list - curr_idx + 1\n elif variant_name == \"MAX\":\n rank_list[top_idx] = len_input_list - curr_idx + top_amount\n elif variant_name == \"DENSE\":\n rank_list[top_idx] = \\\n len_input_list - curr_idx - previous_ties + 1\n elif variant_name == \"ORDINAL\":\n rank_list[top_idx] = len_input_list - curr_idx + 1\n curr_idx -= 1\n else:\n raise NotImplementedError(\n \"%s variant not implemented\" % variant_name)\n input_copy[top_idx] = below_min_input\n if variant_name != \"ORDINAL\":\n curr_idx -= top_amount\n previous_ties += top_amount - 1\n return rank_list", "title": "" }, { "docid": "6fbc53c840ca2f84768031358ce2e64c", "score": "0.4878717", "text": "def iter_callback(self, loss):\r\n pass", "title": "" }, { "docid": "a5afd909b740cc6381124c458688c6c1", "score": "0.48740616", "text": "def feature_ranking(score):\n feature_order = np.argsort(score)\n return feature_order[::-1]", "title": "" }, { "docid": "a5afd909b740cc6381124c458688c6c1", "score": "0.48740616", "text": "def feature_ranking(score):\n feature_order = np.argsort(score)\n return feature_order[::-1]", "title": "" }, { "docid": "3982720cc4cfb44c66b798b0bc19a7ab", "score": "0.4871918", "text": "def arrayRankTransform(self, arr: List[int]) -> List[int]:\n elements = list(set(arr))\n sorted_ranks = sorted(elements)\n\n d = {}\n for i, x in enumerate(sorted_ranks): \n d[x] = i + 1\n \n res = []\n for x in arr: \n res.append(d[x])\n \n return res", "title": "" }, { "docid": "7dbf930dda74beaf3d428576f02d0df1", "score": "0.48600328", "text": "def _rank(x, order):\n\n assert order in ['asc', 'desc']\n\n ranks = []\n\n for i in range(len(x)):\n rank = 1\n\n for j in range(len(x)):\n if (order == 'asc' and x[i] < x[j]) or (order == 'desc' and x[i] > x[j]):\n rank += 1\n\n ranks.append(rank)\n\n # resolve ties\n for rank in range(1, max(ranks) + 1):\n count = 0\n indices = []\n\n for i in range(len(x)):\n if ranks[i] == rank:\n count += 1\n indices.append(i)\n\n for index in indices:\n ranks[index] = rank + 0.5 * (count - 1)\n\n return ranks", "title": "" }, { "docid": "aa0d16584ac3c02aed68e4466b97321a", "score": "0.48406768", "text": "def mean_reciprocal_rank(self, r):\n r = np.asarray(r).nonzero()[0]\n return 1. / (r[0] + 1) if r.size else 0.", "title": "" }, { "docid": "55a9cd9286f0a653b4a792d2a72c79b2", "score": "0.48369327", "text": "def _init_global_to_logical_rank_mapping(self,\n mapping: Dict,\n tensor: torch.Tensor,\n index_list: List[int] = []) -> Dict[int, List[int]]:\n for index, inner_tensor in enumerate(tensor):\n # index means the local rank in the current axis\n # inner_tensor refers to the processes with the same local rank\n\n if inner_tensor.numel() == 1:\n # if the inner_tensor only has one element, it means that\n # it already reaches the last axis\n # we append its local_rank in the last axis to the index_list\n # and assign to the mapping\n # the value of the mapping is the the local rank at the indexed axis of the device mesh\n mapping[int(inner_tensor)] = index_list + [index]\n else:\n # we recursively go into the function until we reach the last axis\n # meanwhile, we should add the local rank in the current axis in the index_list\n self._init_global_to_logical_rank_mapping(mapping, inner_tensor, index_list + [index])", "title": "" }, { "docid": "cf82b9698ed096d43cdfc74cd8ddcf35", "score": "0.48320302", "text": "def make_rank_func(weights):\n def rank(matchinfo):\n # matchinfo is defined as returning 32-bit unsigned integers\n # in machine byte order\n # http://www.sqlite.org/fts3.html#matchinfo\n # and struct defaults to machine byte order\n bufsize = len(matchinfo) # Length in bytes.\n matchinfo = [struct.unpack(b'I', matchinfo[i:i + 4])[0]\n for i in range(0, bufsize, 4)]\n it = iter(matchinfo[2:])\n return sum(x[0] * w / x[1]\n for x, w in zip(zip(it, it, it), weights)\n if x[1])\n return rank", "title": "" }, { "docid": "dc4f637174da6e14add1ccfa5e60dd88", "score": "0.4827901", "text": "def rank_population(self):\n scores = self.pool.map(lambda x: self.fittness_function(x), self.population)\n\n self.population = [x for x,_ in sorted(zip(self.population, scores), key = lambda x: x[1])]\n self.scores = sorted(scores)", "title": "" }, { "docid": "57079909ef369101c106d18009c51d2a", "score": "0.48251548", "text": "def rank(self):\n r = ([(i[0], i[2]) for i in self.m])\n return sorted(r, reverse=True)", "title": "" }, { "docid": "f89dcda1a9ffe6572ab22913c660c766", "score": "0.48248476", "text": "def bin_function(i, window_level, window_width, high, low):\n # Binning pixel\n if i < low:\n i = 0\n elif i > high:\n i = 255\n else:\n i = (((i - (window_level - 0.5)) / (window_width - 1)) + 0.5) * 255;\n return i", "title": "" }, { "docid": "a8cdbdd3f1535e2daac4b64880f9e6b2", "score": "0.48189706", "text": "def _get_matches_hook(self, y_pred_click_ranks):\n return math_ops.reciprocal(tf.cast(y_pred_click_ranks, tf.float32))", "title": "" }, { "docid": "89e97708c88e0e9e75420d94ba47c985", "score": "0.48075658", "text": "def apply(self, img):\n orig_shape = img.shape\n filtered = flatten_image_array(self.filterbank.apply(im))\n retv, res, neigh, dists = self.knn_eng.find_nearest(filtered, 1)\n return np.array(res, dtype=np.uint32)", "title": "" }, { "docid": "bfec27cc2cc3a0f7b25df017454e7e0c", "score": "0.4791486", "text": "def zrank(self, name, value):\r\n return self.execute_command('ZRANK', name, value)", "title": "" }, { "docid": "68487a87810c7ece72a7cf7965e61a39", "score": "0.4780964", "text": "def rank_zero_only(fn: Callable):\n\n @wraps(fn)\n def wrapped_fn(self, *args, **kwargs):\n if self.rank == 0:\n fn(self, *args, **kwargs)\n\n return wrapped_fn", "title": "" }, { "docid": "04bbdf7fdc5e2593ade7ea77bbfc5612", "score": "0.47293428", "text": "def test_softranks(self, axis, direction):\n shape = tf.TensorShape((3, 8, 6))\n n = shape[axis]\n p = int(np.prod(shape) / shape[axis])\n\n # Build a target tensor of ranks, of rank 2.\n # Those targets are zero based.\n target = tf.constant(\n [np.random.permutation(n) for _ in range(p)], dtype=tf.float32)\n\n # Turn it into a tensor of desired shape.\n target = ops._postprocess(target, shape, axis)\n\n # Apply a monotonic transformation to turn ranks into values\n sign = 2 * float(direction == 'ASCENDING') - 1\n x = sign * (1.2 * target - 0.4)\n\n # The softranks of x along the axis should be close to the target.\n eps = 1e-3\n sinkhorn_threshold = 1e-3\n tolerance = 0.5\n for zero_based in [False, True]:\n ranks = ops.softranks(\n x, direction=direction, axis=axis, zero_based=zero_based,\n epsilon=eps, sinkhorn_threshold=sinkhorn_threshold)\n targets = target + 1 if not zero_based else target\n self.assertAllClose(ranks, targets, tolerance, tolerance)", "title": "" }, { "docid": "64ef1b4a95004c2a7d59ce34b4c810c3", "score": "0.47105777", "text": "def rankdata(self, axis='sample', inplace=True, method='average'):\n def f(val, id_, _):\n return scipy.stats.rankdata(val, method=method)\n return self.transform(f, axis=axis, inplace=inplace)", "title": "" }, { "docid": "cb2d3b1510d630bc606b0d03df9d829b", "score": "0.47105446", "text": "def _score_based_global_pruning(weights, masks, prune_ratios, score_func, normalize=False):\n\n\t# compute scores for all layers\n\tscores = []\n\tfor layer in range(len(weights)):\n\t\tscore = score_func(weights, layer)\n\t\tif normalize:\n\t\t\tscore /= score.norm() # normalize to norm 1\n\t\tscores.append(score.view(-1))\n\tscores = torch.cat(scores, dim=0)\n\n\t# get linearized pruned index\n\t_, idx = torch.sort(scores, descending=True)\n\tsurv_ratio = 1 - prune_ratios[0] # only one scalar\n\tcutoff_index = round(surv_ratio * len(scores))\n\tpruned_idx = idx[cutoff_index:].tolist()\n\n\t# get new masks for each layer\n\tnew_masks = []\n\tfor layer in range(len(weights)):\n\t\t# print(f'Global pruning... layer: {layer}')\n\t\tnew_mask = torch.ones(masks[layer].shape)\n\t\tnew_mask_linearized = new_mask.view(-1)\n\n\t\ttotal = len(new_mask_linearized)\n\t\tpruned_idx_layer = [idx for idx in pruned_idx if idx < total]\n\t\tpruned_idx = [idx - total for idx in pruned_idx if idx >= total]\n\n\t\tnew_mask_linearized[pruned_idx_layer] = 0\n\t\tnew_masks.append(new_mask)\n\n\treturn new_masks", "title": "" }, { "docid": "8762b96772d9cb029a8946a9f0765699", "score": "0.47099948", "text": "def rankImplemented(self):", "title": "" }, { "docid": "81318c5b7c473bc6497b1d17ea0c9ea8", "score": "0.47085252", "text": "def test_compute_rank(\n rank_range,\n A_shape,\n num_runs,\n accept_threshold=False,\n logs=False):\n\n print(\"Running test_compute_rank ...\")\n\n results = np.zeros(\n (num_runs, rank_range[1] - rank_range[0] + 1),\n dtype=int)\n\n for rank in range(rank_range[0], rank_range[1] + 1):\n for run in range(num_runs):\n A = _build_matrix_rank_k(A_shape[0], A_shape[1], rank)\n estimated_rank = compute_rank(A)\n\n if logs:\n print(\n \"A rank:\", rank,\n \"estimated rank:\", estimated_rank,\n \"num_run:\", run + 1\n )\n\n if rank == estimated_rank:\n results[run, rank - rank_range[0]] = 1\n\n success_percent = results.mean() * 100\n print(\"(From test_compute_rank) Success %: \", success_percent)\n if accept_threshold is not False:\n assert(success_percent >= accept_threshold)\n return", "title": "" }, { "docid": "ccc534f43e1f188c4be0847e5c17ab29", "score": "0.47035596", "text": "def zrank(self, key, value):\n\n offset = self.get_node_offset(key)\n return self.servers[offset].zrank(key, value)", "title": "" }, { "docid": "b82100ca0e3cd3cef37b1dd5793a8540", "score": "0.47012287", "text": "def rank(self):\n pass", "title": "" }, { "docid": "3c96f9a0334ed230304069c2c80cca5c", "score": "0.46910423", "text": "def intensity_to_iter_index(pixel_val, iterable):\n index = int(pixel_val // (255 / len(iterable)))\n if index == len(iterable):\n index -= 1\n return index", "title": "" }, { "docid": "433b22cbb1c6be11d3e61f1d235681a9", "score": "0.46665376", "text": "def _get_matches_hook(self, y_pred_click_ranks):\n return tf.cast(y_pred_click_ranks, tf.float32)", "title": "" }, { "docid": "b2bad612b29e942a30e03033bd49761d", "score": "0.4663446", "text": "def compute_rank(byte_probabilities, real_byte):\n\n sorted_results = np.argsort(-byte_probabilities)\n for i in range(256):\n if sorted_results[i] == real_byte:\n return i\n raise ValueError(\"Invalid byte value: \", real_byte)", "title": "" }, { "docid": "5abf97a49cf055526aa04640c24982de", "score": "0.46626925", "text": "def __call__(self, y, r):\n\n filt_r = self._ref_prefilter(r)\n\n return filt_r, self._c_fb(filt_r - y)", "title": "" }, { "docid": "4f6d491b76108ae9432d3af38238d5bd", "score": "0.46603632", "text": "def mean_reciprocal_rank(rs):\n rs = (np.asarray(r).nonzero()[0] for r in rs)\n return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])", "title": "" }, { "docid": "784a3c94d34f45c84ef71b675cb97883", "score": "0.46560594", "text": "def compute_ranks(x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks", "title": "" }, { "docid": "784a3c94d34f45c84ef71b675cb97883", "score": "0.46560594", "text": "def compute_ranks(x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks", "title": "" }, { "docid": "784a3c94d34f45c84ef71b675cb97883", "score": "0.46560594", "text": "def compute_ranks(x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks", "title": "" }, { "docid": "784a3c94d34f45c84ef71b675cb97883", "score": "0.46560594", "text": "def compute_ranks(x):\n assert x.ndim == 1\n ranks = np.empty(len(x), dtype=int)\n ranks[x.argsort()] = np.arange(len(x))\n return ranks", "title": "" }, { "docid": "f2efcb34ea7f71bea5c812d16db6e32c", "score": "0.46499085", "text": "def apply_per_pixel(image, f):\n result = make_image(width(image), height(image))\n for x in range(height(result)):\n for y in range(width(result)):\n color = pixel(image, x, y)\n set_pixel(result, x, y, f(color))\n return result", "title": "" }, { "docid": "d7c3cca64418722e25f276b24212175a", "score": "0.46344262", "text": "def medianrank(im: np.ndarray, padding=0) -> np.ndarray:\n print(\"Warning: medianrank uses simple padding and WILL not work on edge pixels\")\n padded = padding * np.ones((im.shape[0]+2, im.shape[1]+2), dtype=int)\n padded[1:-1, 1:-1] = im\n newim = np.zeros_like(im)\n for i in range(1, im.shape[0]+1):\n for j in range(1, im.shape[1]+1):\n newim[i-1, j-1] = np.median(padded[i-1:i+2, j-1:j+2])\n return newim", "title": "" }, { "docid": "d32abf6eb72aabc0906f66906dca440f", "score": "0.463292", "text": "def batch_roi_func(self):\n return algorithms[self._algo_sel.index].batch_roi_func", "title": "" }, { "docid": "8635e9816d37ad6597198fb47ce1a8f8", "score": "0.4609403", "text": "def sort_pixels(img: Image) -> Image:\n img_width, img_height = img.size\n pixels = img.load() # create the pixel map\n\n j = 0\n while j < img_height:\n d = {}\n for i in range(img_width):\n d[avg(pixels[i,j])] = pixels[i,j]\n x = sorted(d.items())\n i = 0\n while i < (len(x)):\n pixels[i,j] = x[i][1]\n i+=1\n j+=1\n print(len(x))\n return img", "title": "" }, { "docid": "fed568530b65c1c7d5317261fe433201", "score": "0.4607072", "text": "def reRank(self, offset=1):\n\n for x in range(len(self.mMatches)):\n self.mMatches[x].mRank = offset + x", "title": "" }, { "docid": "132301c22286c838d67099c981ac0047", "score": "0.46062937", "text": "def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):\n q_g_dist = q_g_dist.cpu().numpy()\n q_q_dist = q_q_dist.cpu().numpy()\n g_g_dist = g_g_dist.cpu().numpy()\n\n original_dist = np.concatenate(\n [np.concatenate([q_q_dist, q_g_dist], axis=1),\n np.concatenate([q_g_dist.T, g_g_dist], axis=1)],\n axis=0)\n original_dist = np.power(original_dist, 2).astype(np.float32)\n original_dist = np.transpose(1. * original_dist / np.max(original_dist, axis=0))\n V = np.zeros_like(original_dist).astype(np.float32)\n initial_rank = np.argsort(original_dist).astype(np.int32)\n\n query_num = q_g_dist.shape[0]\n gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]\n all_num = gallery_num\n\n for i in range(all_num):\n # k-reciprocal neighbors\n forward_k_neigh_index = initial_rank[i, :k1 + 1]\n backward_k_neigh_index = initial_rank[forward_k_neigh_index, :k1 + 1]\n fi = np.where(backward_k_neigh_index == i)[0]\n k_reciprocal_index = forward_k_neigh_index[fi]\n k_reciprocal_expansion_index = k_reciprocal_index\n for j in range(len(k_reciprocal_index)):\n candidate = k_reciprocal_index[j]\n candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(k1 / 2.)) + 1]\n candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,\n :int(np.around(k1 / 2.)) + 1]\n fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2. / 3 * len(\n candidate_k_reciprocal_index):\n k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n\n k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])\n V[i, k_reciprocal_expansion_index] = 1. * weight / np.sum(weight)\n original_dist = original_dist[:query_num, ]\n if k2 != 1:\n V_qe = np.zeros_like(V, dtype=np.float32)\n for i in range(all_num):\n V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)\n V = V_qe\n del V_qe\n del initial_rank\n invIndex = []\n for i in range(gallery_num):\n invIndex.append(np.where(V[:, i] != 0)[0])\n\n jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)\n\n for i in range(query_num):\n temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float32)\n indNonZero = np.where(V[i, :] != 0)[0]\n indImages = [invIndex[ind] for ind in indNonZero]\n for j in range(len(indNonZero)):\n temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(V[i, indNonZero[j]],\n V[indImages[j], indNonZero[j]])\n jaccard_dist[i] = 1 - temp_min / (2. - temp_min)\n\n final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value\n del original_dist\n del V\n del jaccard_dist\n final_dist = final_dist[:query_num, query_num:]\n return final_dist", "title": "" }, { "docid": "38a595b721cc8c03ae66d9d0702fec8e", "score": "0.46053797", "text": "def rank_zero(self, f):\n def wrapper(f, *args, **kwargs):\n if self.comm.rank == 0:\n return f(*args, **kwargs)\n return decorator(wrapper, f)", "title": "" }, { "docid": "4aed564c6cee1f7083420e4352688028", "score": "0.46018898", "text": "def __rank_hist(self):\r\n self.ranks = dict()\r\n for crd in self.cards:\r\n self.ranks[crd.rank] = self.ranks.get(crd.rank, 0) + 1", "title": "" }, { "docid": "95f196c87d3df95e3a3fcbf9f89be585", "score": "0.4585896", "text": "def get_rank_vote(r, rank, vote_threshold=0.5):\n\n # Create dataframe for unique taxids filtered at this rank threshold\n taxid_counts = pd.DataFrame(dict.fromkeys(r.staxids.unique(), 1), index=[\"count\"]).T\n # Add taxid for rank being investigated\n rank_df = r.groupby(\"staxids\").first().reset_index()[[rank, \"staxids\"]].set_index(\"staxids\")\n rank_df = pd.merge(taxid_counts, rank_df, left_index=True, right_index=True)\n # Sum counts for current rank\n rank_sum = rank_df.groupby(rank).sum()\n rank_norm = rank_sum.div(rank_sum.sum())\n rank_norm = rank_norm.sort_values(\"count\", ascending=False)\n votes = rank_norm.loc[rank_norm[\"count\"] > vote_threshold]\n if len(votes) > 0:\n return r.loc[r[rank].isin(votes.index)]\n return []", "title": "" }, { "docid": "606c577884ba3a2579ae922305a4fd11", "score": "0.4572619", "text": "def zrank(self, *args):\n if self._cluster:\n return self.execute('ZRANK', *args, shard_key=args[0])\n return self.execute('ZRANK', *args)", "title": "" }, { "docid": "8e54577bf4e6bb53907f65c95aa8930f", "score": "0.45685503", "text": "def zrevrank(self, name, value):\r\n return self.execute_command('ZREVRANK', name, value)", "title": "" }, { "docid": "90c6a03edd64bec9cb86a7f4e216650d", "score": "0.45661572", "text": "def preprocess_fn(self, img):\n img = img / 255.\n return img", "title": "" }, { "docid": "a597bc1285becc2729f05d4ad8d95c56", "score": "0.4562209", "text": "def percentileRank(df, col):\n return (df[col].rank() / float(len(df[col][pd.notnull(df[col])==True]))*100).map(math.floor)", "title": "" }, { "docid": "8b9d1bf7afb21031c609c858d92862bb", "score": "0.45452452", "text": "def _rank_extracted_hotspots(self):\n hotspot_by_score = {hotspot.score_value: hotspot for hotspot in self.extracted_hotspots}\n score = sorted([f[0] for f in hotspot_by_score.items()], reverse=True)\n\n for i, key in enumerate(score):\n hotspot_by_score[key].rank = int(i + 1)\n\n extracted_hotspots_by_rank = {h.rank: h for h in self.extracted_hotspots}\n self.extracted_hotspots = [value for (key, value) in sorted(extracted_hotspots_by_rank.items())]\n\n for i, hs in enumerate(self.extracted_hotspots):\n hs.identifier = hs.score_value #\"rank_{}\".format(hs.rank)\n print(\"rank\", hs.rank, \"score\", hs.score_value)", "title": "" }, { "docid": "bd2065f937f3874f428f4235bdcba662", "score": "0.45439038", "text": "def ranks():\n return range(WORLD_SIZE)", "title": "" }, { "docid": "7e6d046de5fa034ce2513f33cdc423c4", "score": "0.45380482", "text": "def _compute_rank_reduction(matrix):\r\n return min(matrix.shape[0] / 3, 300)", "title": "" }, { "docid": "d5a8766a8e790ea29371c693d3a96a03", "score": "0.45284724", "text": "def rank_features(data,\n nb_bins=20,\n rank_threshold=80,\n z_file=None,\n metric='euclidean',\n redundant_threshold=1,\n nb_consensus_methods = 3):\n print(f\"*** Computing 1D feature ranking ...\")\n t1 = time.time()\n mad = statistics.mean_abs_difference(data)\n dispersion = statistics.dispersion(data)\n mean_median = statistics.mean_median(data)\n amam = statistics.amam(data)\n spec = statistics.spec_scores(data)\n\n t2 = time.time()\n print(f\"Dispersion tests took {round(t2-t1, 2)} sec\")\n\n entropy = np.array([\n statistics.compute_entropy(data[:, i], nb_bins=20)\n for i in range(data.shape[1])\n ])\n t3 = time.time()\n print(f\"Entropy computation {round(t3-t2, 2)} sec\")\n\n # compute 3 nearest neighbors and get feature type\n nbrs = NearestNeighbors(n_neighbors=4, algorithm='brute',\n metric=metric).fit(data.T)\n distances, indices = nbrs.kneighbors(data.T)\n\n meta_features = pd.concat([\n pd.DataFrame(data=distances[:, 1:], columns=[\"d1\", \"d2\", \"d3\"]),\n pd.DataFrame(data=indices[:, 1:], columns=[\"f1\", \"f2\", \"f3\"])\n ],\n axis=1)\n # meta_features = pd.DataFrame(index = np.arange(data.shape[1]))\n t4 = time.time()\n print(f\"KNN computation {round(t4-t3, 2)} sec\")\n\n meta_features[\"f\"] = meta_features.index\n meta_features[\"mad\"] = mad\n meta_features[\"dispersion\"] = dispersion\n meta_features[\"amam\"] = amam\n meta_features[\"mean_median\"] = mean_median\n meta_features[\"entropy\"] = entropy\n meta_features[\"spec\"] = -spec\n # meta_features[\"kstest\"] = kstest\n meta_features[\"uniform\"] = (meta_features[\"entropy\"] > 0.95).astype(int)\n features_to_scale = [\"mad\", \"dispersion\", \"amam\", \"mean_median\", \"spec\"]\n meta_features = meta_features.replace([np.inf, -np.inf], 0)\n scaled_values = preprocessing.MinMaxScaler().fit_transform(\n meta_features[meta_features[\"uniform\"] == 0][features_to_scale].values)\n\n thresholds = np.percentile(scaled_values, rank_threshold, axis=0)\n scaled_values = (scaled_values > thresholds).astype(int)\n\n meta_features[\"rank_mad\"] = -1\n meta_features[\"rank_dispersion\"] = -1\n meta_features[\"rank_amam\"] = -1\n meta_features[\"rank_mean_median\"] = -1\n meta_features[\"rank_spec\"] = -1\n\n meta_features.loc[meta_features[meta_features[\"uniform\"] == 0].index, [\n \"rank_mad\", \"rank_dispersion\", \"rank_amam\", \"rank_mean_median\",\n \"rank_spec\"\n ]] = scaled_values\n\n meta_features[\"relevance\"] = meta_features[[\n \"rank_mad\", \"rank_dispersion\", \"rank_amam\", \"rank_mean_median\",\n \"rank_spec\"\n ]].sum(axis=1)\n\n t5 = time.time()\n print(f\"Sorting and thresholds {round(t5-t4, 2)} sec\")\n if z_file is not None and os.path.isfile(z_file):\n print(\"Loading clustering from file\")\n Z = np.load(z_file)\n else:\n print(\"Performing hierarchical clustering...\")\n Z = linkage(data.T, method='complete', metric=metric)\n\n pred = fcluster(Z, redundant_threshold, criterion='distance')\n redundant = np.zeros_like(pred)\n\n t6 = time.time()\n print(f\"Hierarchical clustering {round(t6-t5, 2)} sec\")\n for c in np.unique(pred):\n idx = np.where(pred == c)[0]\n if len(idx) > 1:\n redundant[idx] = 1\n # select the features with the highest relevance score\n representative_features = meta_features[meta_features[\"f\"].isin(\n idx)].sort_values(by=\"relevance\",\n ascending=False)[\"f\"].values[:2]\n redundant[representative_features] = 0\n\n else:\n redundant[idx] = 0\n t7 = time.time()\n print(f\"Handle redundant features {round(t7-t6, 2)} sec\")\n\n meta_features[\"clusters\"] = pred\n meta_features[\"redundant\"] = redundant\n\n meta_features[\"1d\"] = meta_features[\"relevance\"].apply(lambda x: 1\n if x >= nb_consensus_methods else -1)\n t8 = time.time()\n print(f'Returning {meta_features[\"redundant\"].value_counts().get(1, 0)} redundant features ' +\n f'and {meta_features[\"1d\"].value_counts().get(1, 0)} important features')\n return meta_features", "title": "" }, { "docid": "1d39184677f4c834492a49f12cb8b360", "score": "0.45182067", "text": "def set_spatial_rank(self, rank: Optional[List[int]] = None) -> None:\n if rank is None and self.rank_fn is not None:\n rank = self.rank_fn()\n self.cur_rank = rank", "title": "" }, { "docid": "28110918ef18582768596a46818a465e", "score": "0.45178455", "text": "def _evaluate(self):\n filter = self.neuron.filters[self.channel]\n center = np.array(filter.shape) / 2\n\n ra_pix, dec_pix = self.coords.coords[\"offsets-neuron\"]\n\n # Recall C-style ordering of two-dimensional arrays. Shouldn't matter\n # too much as PINK enforces equal image dimensions for x/y axes\n self.ra_pix = ra_pix.value + center[1]\n self.dec_pix = dec_pix.value + center[0]\n\n # TODO: Consider desired behaviour when it comes to rounding pixel coordinates\n # TODO: Resolve the index value to the labels\n self.product_labels = filter[self.dec_pix.astype(int), self.ra_pix.astype(int)]\n self.coord_labels = list(map(self.neuron.resolve_label, self.product_labels))", "title": "" }, { "docid": "b5a1e36910a9228c1531ff3c2735240f", "score": "0.45113224", "text": "def compute_ranking(self) -> List[Tuple[Player, int]]:\n ranking = []\n for player in self.lst_player:\n points = player.get_points()\n ranking.append((player, points))\n ranking = sorted(ranking, key=lambda x: x[1], reverse=True)\n return ranking", "title": "" }, { "docid": "066721da4d37dc192062f4ba9ae1201c", "score": "0.45077705", "text": "def rerank(recos_df, user_id, rank_col):\n recos_df = groupby_rank(recos_df, user_id, rank_col, rank_col)\n return recos_df", "title": "" }, { "docid": "937dd58178f0b25f45bad44edebb783c", "score": "0.45075577", "text": "def rank_2d(confidences_2d: List[List[float]]) -> List[int]:\n raise NotImplementedError(\"Subclasses must implement this method\")", "title": "" }, { "docid": "bdedbd27b4722919383c45ab29d9cb47", "score": "0.45074695", "text": "def getRank(self) -> int:\n ...", "title": "" }, { "docid": "ef4b3db186e88a9172eac017507b3778", "score": "0.45048597", "text": "def map_i(self, r):\n return closest_index(self.R, r)", "title": "" }, { "docid": "da950a8a566564779e48b55fd25030e7", "score": "0.4485877", "text": "def _do_global_compute_points(cls, index_to_ri_scores, **kwargs):\n # This implementation doesn't do anything global.\n # Sub classes should override this if they need different\n # behaviour.\n for ri_scores in index_to_ri_scores:\n ri_scores.compute_points()", "title": "" }, { "docid": "0e8f60d34f0c9717e3090c1e1b5bcff9", "score": "0.44778588", "text": "def run(self, image):\n image = image.copy()\n image = np.moveaxis(image, -1, 0)\n result = []\n image *= 255\n image = image.astype(np.uint8)\n for i, stack in enumerate(image):\n local_otsu = filters.rank.otsu(stack, disk(self.radius))\n mask = stack > local_otsu\n stack = stack * mask\n if self.parameters['open_radius']:\n stack = ndimage.grey_opening(stack, self.parameters['open_radius'])\n result.append(stack)\n result = np.stack(result, axis=0)\n result = np.moveaxis(result, 0, -1)\n self.mask = result > 0\n self.image = self.apply_mask(result, self.mask)\n return self.image", "title": "" }, { "docid": "754d8d551e5cd0d0eadcd44f3ae6ad20", "score": "0.44767392", "text": "def ranks(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Rank]:", "title": "" }, { "docid": "c01accf8614aba55924b8fcac9c75f3d", "score": "0.44740134", "text": "def pre_process_image(image: np.ndarray) -> np.ndarray:\n image[:, :, 0] = cv2.equalizeHist(image[:, :, 0])\n image[:, :, 1] = cv2.equalizeHist(image[:, :, 1])\n image[:, :, 2] = cv2.equalizeHist(image[:, :, 2])\n image = image/255.-.5\n return image", "title": "" }, { "docid": "02bfab41d7e38262653ff60a851077af", "score": "0.44735047", "text": "def zrank(self, name, value, **options):\n return self.execute_command('ZRANK', name, value, **options)", "title": "" }, { "docid": "ea4ed5f93b3a7b1d470c39b5551d1a17", "score": "0.44732857", "text": "def zipf(rank, C=1, alpha=1):\n return C / (rank**alpha)", "title": "" }, { "docid": "1e460a0cc63c872a266d9419edbb76fe", "score": "0.44732565", "text": "def rank_2d(confidences_2d: List[List[float]]) -> List[int]:\n all_ordered_sample_indices = [\n heuristic.rank_2d(confidences_2d)\n for heuristic in EnsembleSampling.get_heuristics_2d()\n ]\n return Heuristic.ordered_indices_list_to_final_rank(all_ordered_sample_indices)", "title": "" }, { "docid": "ef209ef8e1a8756cd2ba9d436fdd817f", "score": "0.44552544", "text": "def zremrangebyrank(self, name, min, max, callback=None):\n self._execute_command('ZREMRANGEBYRANK', name, min, max,\n callback=callback)", "title": "" }, { "docid": "a213e79d362979ed2bc42363abf91f83", "score": "0.44440314", "text": "def test_neighbor_allgather_order(self):\n size = bf.size()\n rank = bf.rank()\n if size <= 1:\n fname = inspect.currentframe().f_code.co_name\n warnings.warn(\"Skip {} due to size 1\".format(fname))\n return\n dtypes = [torch.FloatTensor]\n if TEST_ON_GPU:\n dtypes += [torch.cuda.FloatTensor]\n for dtype in dtypes:\n tensor = torch.FloatTensor([rank])\n tensor = self.cast_and_place(tensor, dtype)\n gathered = bf.neighbor_allgather(tensor)\n # The order of gathered value is always the same as the in_neighbor_ranks.\n np.testing.assert_allclose(gathered.cpu().numpy(), bf.in_neighbor_ranks())", "title": "" }, { "docid": "4a90a6b9d0779ea97c8d98eba2079a99", "score": "0.44403872", "text": "def _create_ranked_list(self):\r\n # For now, just get negatively classified data and rank it in accordance with predict_proba values\r\n # (Lower value is higher ranked)\r\n self.ranked_list = sorted(self.data_points, key=lambda x: abs(x.score))\r\n return self.ranked_list", "title": "" }, { "docid": "995c5947eca7e83cc9b54ac3771c78c6", "score": "0.44387415", "text": "def calculate_percentiles_from_raster(raster_uri, percentiles):\n raster = gdal.Open(raster_uri, gdal.GA_ReadOnly)\n\n def numbers_from_file(fle):\n \"\"\"Generates an iterator from a file by loading all the numbers\n and yielding\n\n fle = file object\n \"\"\"\n arr = np.load(fle)\n for num in arr:\n yield num\n\n # List to hold the generated iterators\n iters = []\n\n band = raster.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n\n n_rows = raster.RasterYSize\n n_cols = raster.RasterXSize\n\n # Variable to count the total number of elements to compute percentile\n # from. This leaves out nodata values\n n_elements = 0\n\n #Set the row strides to be something reasonable, like 256MB blocks\n row_strides = max(int(2**28 / (4 * n_cols)), 1)\n\n for row_index in xrange(0, n_rows, row_strides):\n #It's possible we're on the last set of rows and the stride\n #is too big, update if so\n if row_index + row_strides >= n_rows:\n row_strides = n_rows - row_index\n\n # Read in raster chunk as array\n arr = band.ReadAsArray(0, row_index, n_cols, row_strides)\n\n tmp_uri = pygeoprocessing.geoprocessing.temporary_filename()\n tmp_file = open(tmp_uri, 'wb')\n # Make array one dimensional for sorting and saving\n arr = arr.flatten()\n # Remove nodata values from array and thus percentile calculation\n arr = np.delete(arr, np.where(arr == nodata))\n # Tally the number of values relevant for calculating percentiles\n n_elements += len(arr)\n # Sort array before saving\n arr = np.sort(arr)\n\n np.save(tmp_file, arr)\n tmp_file.close()\n tmp_file = open(tmp_uri, 'rb')\n tmp_file.seek(0)\n iters.append(numbers_from_file(tmp_file))\n arr = None\n\n # List to store the rank/index where each percentile will be found\n rank_list = []\n # For each percentile calculate nearest rank\n for perc in percentiles:\n rank = math.ceil(perc/100.0 * n_elements)\n rank_list.append(int(rank))\n\n # A variable to burn through when doing heapq merge sort over the\n # iterators. Variable is used to check if we've iterated to a\n # specified rank spot, to grab percentile value\n counter = 0\n # Setup a list of zeros to replace with percentile results\n results = [0] * len(rank_list)\n\n LOGGER.debug('Percentile Rank List: %s', rank_list)\n\n for num in heapq.merge(*iters):\n # If a percentile rank has been hit, grab percentile value\n if counter in rank_list:\n LOGGER.debug('percentile value is : %s', num)\n results[rank_list.index(counter)] = int(num)\n counter += 1\n\n band = None\n raster = None\n return results", "title": "" }, { "docid": "2c61787a4a4811b986c66f86e8747d44", "score": "0.4432996", "text": "def global_rank_to_local_rank(self, rank: int, axis: int = None) -> Union[List[int], int]:\n if self._is_init_from_process_group:\n raise RuntimeError(\n \"The logical device mesh is create with DeviceMesh.from_process_group, this method is not supported for this creation method as no global rank information is known.\"\n )\n\n local_ranks = self._global_to_local_rank_mapping[rank]\n if axis:\n return local_ranks[axis]\n else:\n return local_ranks", "title": "" }, { "docid": "8cfe65f89b528ef5ac04815b59686ad2", "score": "0.44320697", "text": "def rank_sort(a):\n return(rank.index(a[1])*10 + suit.index(a[0]))", "title": "" }, { "docid": "3f5c768dfc359ecf31627ef92cc62446", "score": "0.44309822", "text": "def rank_freq(hist):\n # sort the list of frequencies in decreasing order\n freqs = list(hist.values())\n freqs.sort(reverse=True)\n\n # enumerate the ranks and frequencies\n rf = [(r + 1, f) for r, f in enumerate(freqs)]\n return rf", "title": "" }, { "docid": "8b63c446018b3a77cd4131c8e54698e1", "score": "0.4429732", "text": "def get_ranks(d):\n raise NotImplementedError(\"Problem 3 Incomplete\")", "title": "" }, { "docid": "4e0ba9964a9832e38820feaf45af0715", "score": "0.4426355", "text": "def zremrangebyrank(self, *args):\n if self._cluster:\n return self.execute('ZREMRANGEBYRANK', *args, shard_key=args[0])\n return self.execute('ZREMRANGEBYRANK', *args)", "title": "" }, { "docid": "2e92f8ce6c242bd1cf4cf4169fe2a643", "score": "0.44156924", "text": "def rank_zero_only(fn: Callable) -> Callable:\n\n @wraps(fn)\n def wrapped_fn(*args: Any, **kwargs: Any) -> Optional[Any]:\n if rank_zero_only.rank == 0:\n return fn(*args, **kwargs)\n return None\n\n return wrapped_fn", "title": "" }, { "docid": "41c2d467a48dcc86fd12408dfcd6dfa7", "score": "0.44154748", "text": "def print_ranks(hist):\n for r, f in rank_freq(hist):\n print(r, f)", "title": "" }, { "docid": "d972c4df2937e92a1fea20b3b0427d8f", "score": "0.44146284", "text": "def conv2d_rank_weight_l1norm_fan_out(module: nn.Conv2d):\n _, idx = torch.sort(torch.sum(torch.abs(module.weight.data), dim=(1, 2, 3)), dim=0, descending=True)\n return idx", "title": "" }, { "docid": "e97168b5c781416de588ddcec4ddf49d", "score": "0.44128537", "text": "def dispatch(rank=0):\n\n def _sort_by_value(d):\n items = list(d.items())\n random.shuffle(items)\n items.sort(key=lambda x: x[1])\n return list(item[0] for item in items)\n\n for k, v in rpc_lb.items():\n logging.info(\"######rpc_lb[%s]=%f\", rpc_machine.get(k)[0], v)\n lb_list = _sort_by_value(rpc_lb)\n if len(lb_list) > rank:\n return lb_list[rank]\n return lb_list[len(lb_list) - 1]", "title": "" }, { "docid": "0d4a14d473d1193b2e09a86a6536cb19", "score": "0.44116879", "text": "def popularity_rank_history(self):\n return self._get_popularity_history(lambda x: (x.score_rank, x.total_number))", "title": "" }, { "docid": "07264c02afbd12b63b83a2a78edf6d40", "score": "0.44116503", "text": "def scan_neighbours(self, row, column):\n\n # are we still within the shape?\n if row < 0:\n return 'out'\n if row >= self.shape[0]:\n return 'out'\n if column < 0:\n return 'out'\n if column >= self.shape[1]:\n return 'out'\n\n # get ownership for this pixel if not used yet\n # with LockContext(self.region.region_lock):\n if True:\n if self.region.done[row, column] == self.cluster_id:\n return 'done'\n if self.region.done[row, column] > 0:\n return '%s' % self.region.done[row, column]\n\n self.region.done[row, column] = self.cluster_id\n\n # now this pixel is ours => we may use it\n if self.region.below[row, column]:\n return 'below'\n\n # add this pixel to the filtered image\n # with LockContext(self.region.image_lock):\n self.region.image[row, column] = self.region.region[row, column]\n\n # accumulate statistics\n self.cluster.add(row, column, self.region.region[row, column])\n\n # recurse\n if column < self.shape[1]:\n status = self.scan_neighbours(row, column+1)\n if status not in ('done', 'below', 'out'):\n self.cluster.neighbours[status] = True\n if row > 0:\n status = self.scan_neighbours(row-1, column)\n if status not in ('done', 'below', 'out'):\n self.cluster.neighbours[status] = True\n if column > 0:\n status = self.scan_neighbours(row, column-1)\n if status not in ('done', 'below', 'out'):\n self.cluster.neighbours[status] = True\n if row < self.shape[0]:\n status = self.scan_neighbours(row+1, column)\n if status not in ('done', 'below', 'out'):\n self.cluster.neighbours[status] = True\n\n return 'done'", "title": "" }, { "docid": "884681881dbe4537c2f1d730265822eb", "score": "0.44037852", "text": "def zrevrank(self, *args):\n if self._cluster:\n return self.execute('ZREVRANK', *args, shard_key=args[0])\n return self.execute('ZREVRANK', *args)", "title": "" }, { "docid": "df9d8d4031e1740103f0ff12a7dd72d3", "score": "0.44014186", "text": "def percentile_rank(scores, your_score):\n count = 0\n for score in scores:\n if score <= your_score:\n count += 1\n\n percentile_rank = 100.0 * count / len(scores)\n return percentile_rank", "title": "" }, { "docid": "9f48ed21fe726e5e641bf46978b7f273", "score": "0.43997282", "text": "def zrevrank(self, key, value):\n\n offset = self.get_node_offset(key)\n return self.servers[offset].zrevrank(key, value)", "title": "" }, { "docid": "0eec2d5fdb17d8413bb4e1ff1027e142", "score": "0.43948343", "text": "def rank(self, user_index, known_items = None): \n \n u_pref_score = np.array(self.score(user_index))\n if known_items is not None:\n u_pref_score[known_items] = None\n \n rank_item_list = (-u_pref_score).argsort() # ordering the items (in decreasing order) according to the preference score\n\n return rank_item_list", "title": "" }, { "docid": "c2f25f629ba23ddf59c5fe0c964f945d", "score": "0.43946314", "text": "def remove_rank(target, ranks):\n return tuple(filter(lambda x: x != target, ranks))", "title": "" }, { "docid": "af88adb4309a7813d8972ab78fb3d78f", "score": "0.43913612", "text": "def _ranks_from_score_columns(\n pred,\n *,\n true_modified_node_ilocs,\n unmodified_node_ilocs,\n true_rel_ilocs,\n modified_object,\n known_edges_graph,\n tie_breaking,\n):\n batch_size = len(true_modified_node_ilocs)\n assert pred.shape == (known_edges_graph.number_of_nodes(), batch_size)\n assert unmodified_node_ilocs.shape == true_rel_ilocs.shape == (batch_size,)\n\n # the score of the true edge, for each edge in the batch (this indexes in lock-step,\n # i.e. [pred[true_modified_node_ilocs[0], range(batch_size)[0]], ...])\n true_scores = pred[true_modified_node_ilocs, range(batch_size)]\n\n # for each column, compare all the scores against the score of the true edge\n greater = pred > true_scores\n greater_equal = pred >= true_scores\n\n # the raw rank is the number of elements scored higher than the true edge\n raw_rank = _ranks_from_comparisons(greater, greater_equal, tie_breaking)\n\n # the filtered rank is the number of unknown elements scored higher, where an element is\n # known if the edge (s, r, n) (for modified-object) or (n, r, o) (for modified-subject)\n # exists in known_edges_graph.\n if modified_object:\n neigh_func = known_edges_graph.out_nodes\n else:\n neigh_func = known_edges_graph.in_nodes\n\n for batch_column, (unmodified, r) in enumerate(\n zip(unmodified_node_ilocs, true_rel_ilocs)\n ):\n this_neighs = neigh_func(unmodified, edge_types=[r], use_ilocs=True)\n greater[this_neighs, batch_column] = False\n greater_equal[this_neighs, batch_column] = False\n\n # the actual elements should be counted as equal, whether or not it was a known edge or not\n greater_equal[true_modified_node_ilocs, range(batch_size)] = True\n\n filtered_rank = _ranks_from_comparisons(greater, greater_equal, tie_breaking)\n\n assert raw_rank.shape == filtered_rank.shape == (batch_size,)\n return raw_rank, filtered_rank", "title": "" }, { "docid": "c9a9b19e5e10d41fe7838b2c71d501d9", "score": "0.43907857", "text": "def rec_pin_callback(self, _):\n ori = self.read_bin_pins()\n \n self.photos = self.photos_dict[ori]\n\n if self.verbose:\n print('ori trial set ', intensity)", "title": "" }, { "docid": "384423abc152dcdf620ab91a52b0d7c1", "score": "0.43874735", "text": "def computeMappingFunc(self, I, outNum=10):\n assert (outNum <= 10)\n I = to_numpy(I) # convert to double\n feature = self.encode(self.rgbuv_hist(I))\n if outNum < len(self.wb_photo_finishing):\n wb_pf = rnd.sample(self.wb_photo_finishing, outNum)\n inds = []\n for j in range(outNum):\n inds.append(self.wb_photo_finishing.index(wb_pf[j]))\n\n else:\n wb_pf = self.wb_photo_finishing\n inds = list(range(0, len(wb_pf)))\n mfs = []\n\n D_sq = np.einsum('ij, ij ->i', self.features,\n self.features)[:, None] + np.einsum(\n 'ij, ij ->i', feature, feature) - 2 * self.features.dot(feature.T)\n\n # get smallest K distances\n idH = D_sq.argpartition(self.K, axis=0)[:self.K]\n dH = np.sqrt(\n np.take_along_axis(D_sq, idH, axis=0))\n weightsH = np.exp(-(np.power(dH, 2)) /\n (2 * np.power(self.sigma, 2))) # compute weights\n weightsH = weightsH / sum(weightsH) # normalize blending weights\n for i in range(len(inds)): # for each of the retried training examples,\n ind = inds[i] # for each WB & PF style,\n # generate a mapping function\n mf = sum(np.reshape(np.matlib.repmat(weightsH, 1, 27),\n (self.K, 1, 9, 3)) *\n self.mappingFuncs[(idH - 1) * 10 + ind, :])\n mfs.append(mf.reshape(9, 3, order=\"F\")) # reshape it to be 9 * 3\n return mfs", "title": "" } ]
897e1dc8a4d5d109f8f194759653c2ed
Replace the volume with the average of its left half and right half.
[ { "docid": "646a1178452e416d9a31392814ab4d31", "score": "0.0", "text": "def symmetricalize_volume(prob_vol):\n\n zc = prob_vol.shape[2]/2\n prob_vol_symmetric = prob_vol.copy()\n left_half = prob_vol[..., :zc]\n right_half = prob_vol[..., -zc:]\n left_half_averaged = (left_half + right_half[..., ::-1])/2.\n prob_vol_symmetric[..., :zc] = left_half_averaged\n prob_vol_symmetric[..., -zc:] = left_half_averaged[..., ::-1]\n return prob_vol_symmetric", "title": "" } ]
[ { "docid": "09b27b1c83e6077463596ea13e3703fe", "score": "0.64357704", "text": "def normalize_volume_in_place(vol):\n\n assert isinstance(vol, pydeform.Volume)\n\n # Convert the volume object into a numpy array\n # `copy=False` means that the array object only holds a reference to the \n # data within the volume allowing direct manipulation of elements.\n \n data = np.array(vol, copy=False)\n \n # Here we need to watch out since the typical\n # `data = data / data.max()`\n # will actually create a new array, not bound to the volume.\n # `out=data` makes sure we write to our original array.\n np.divide(data, np.max(data), out=data)", "title": "" }, { "docid": "e31c57cbc785972b4259be497091e176", "score": "0.6376455", "text": "def averageOptionVolumeAbove_0(self, volume):\r\n self.m_averageOptionVolumeAbove = volume", "title": "" }, { "docid": "67d021042ad5bad5031a8d597d812a1d", "score": "0.6302006", "text": "def adj_low_volume(mean_volume):\n return 0.2 * mean_volume", "title": "" }, { "docid": "6aac503a8073c0a858e9bc9337407f0d", "score": "0.6288235", "text": "def volume(self) -> float:\n x_min, x_max = self.x_range\n y_min, y_max = self.y_range\n z_min, z_max = self.z_range\n return (x_max - x_min) * (y_max - y_min) * (z_max - z_min)", "title": "" }, { "docid": "e5dfd717944a0b9fa572a16dfc1006b6", "score": "0.62233734", "text": "def volume(self): \n return (4/3) * math.pi * (self.radius ** 3)", "title": "" }, { "docid": "db283a3c544270f43489ecb423e9b9d0", "score": "0.6195064", "text": "def trap(self, A):\r\n left_maxs = [0 for _ in A] # on the left including itself\r\n right_maxs = [0 for _ in A] # on the right including itself\r\n\r\n left_max = 0\r\n for ind, val in enumerate(A):\r\n left_max = max(left_max, val)\r\n left_maxs[ind] = left_max\r\n\r\n right_max = 0\r\n # for ind, val in enumerate(reversed(A)): # ind incorrect\r\n for ind, val in reversed(list(enumerate(A))):\r\n right_max = max(right_max, val)\r\n right_maxs[ind] = right_max\r\n\r\n # calculate the volume\r\n volume = 0\r\n for ind, val in enumerate(A):\r\n volume += max(0, min(left_maxs[ind], right_maxs[ind]) - val)\r\n\r\n return volume", "title": "" }, { "docid": "fde41c65fdff7fd9b97eaafa38fcd909", "score": "0.6194961", "text": "def volume_equalizer(wav):\n control_vol = 0.1\n\n chunks = np.array_split(wav,8)\n vol = np.array([np.sqrt(np.mean(chunk**2)) for chunk in chunks])\n max_vol = vol.max()\n\n if max_vol == 0:\n return wav\n wav = wav * control_vol/max_vol\n wav = np.clip(wav,-1.0,1.0)\n return wav", "title": "" }, { "docid": "c68c5629569d1f9cb0e736ac34114d33", "score": "0.61137545", "text": "def calc_volume(self):\n vol = 0.\n for i, face in enumerate(self.faces):\n xc = self.get_tri_center(i)\n face_area = self.get_hs(i) * 0.5\n face_normal = self.get_normal(i)\n vol += np.dot(xc, face_normal) * face_area\n vol /= 3.0\n return vol", "title": "" }, { "docid": "896617b8517b08f623937461ef42e63b", "score": "0.61090404", "text": "def _handler_compute_volume(self, event):\n if self.test_valid_mask_selection_a():\n names_a = self._view_frame.get_selected_mask_names_a()\n union_masksA = self.compute_mask_union(names_a)\n\n spacing = union_masksA.data.GetSpacing()\n voxel_volume = spacing[0] * spacing[1] * spacing[2]\n \n accumulator = vtk.vtkImageAccumulate()\n accumulator.SetInput(union_masksA.data)\n accumulator.Update()\n nonzero_count = accumulator.GetMean()[0] * accumulator.GetVoxelCount()\n\n volume = voxel_volume * nonzero_count / 1000.0\n\n print \"Volume = %.2f ml\" % (volume)\n copy_to_clipboard = self._view_frame.dialog_yesno('Volume = %f ml\\n\\nCopy to clipboard?' % volume, 'Volume = %.1f%% ml' % (volume))\n if copy_to_clipboard:\n self._view_frame.copy_text_to_clipboard('%f' % volume)", "title": "" }, { "docid": "b1fc9582b3ebcef59d1fa5c3a152075b", "score": "0.60599655", "text": "def itensity_normalize_one_volume(volume):\n \n pixels = volume[volume > 0]\n mean = pixels.mean()\n std = pixels.std()\n out = (volume - mean)/std\n out_random = np.random.normal(0, 1, size = volume.shape)\n out[volume == 0] = out_random[volume == 0]\n return out", "title": "" }, { "docid": "92ad0ed85a94119bcf0db84206171afd", "score": "0.6051924", "text": "def volume(self):\n return np.prod(np.diff(self.bounds))", "title": "" }, { "docid": "c18686e5c002b8c82c939f1714d5e4b6", "score": "0.60364354", "text": "def normalize_volume(vol):\n\n assert isinstance(vol, pydeform.Volume)\n\n # Create a new array with a copy of the volume data\n data = np.array(vol)\n\n # Create a new normalized array\n data = data / np.max(data)\n\n # Create a new volume\n out = pydeform.Volume(data)\n # Copy original meta data (origin, spacing, direction)\n out.copy_meta_from(vol)\n\n return out", "title": "" }, { "docid": "0594713ba96899eb1981d34694c1f4cc", "score": "0.59745955", "text": "def volume(self):\n return (4 / 3) * math.pi * (self.radius ** 3)", "title": "" }, { "docid": "3d488728b93a2eb0d53fcf7e436e6935", "score": "0.59629804", "text": "def averageOptionVolumeAbove(self):\r\n return self.m_averageOptionVolumeAbove", "title": "" }, { "docid": "6db35ec2647535d59e0e0fad62bbe67c", "score": "0.59208107", "text": "def volume(self):\n return round((4 / 3) * math.pi * (self.radius ** 3), 2)", "title": "" }, { "docid": "16db50df2c6df033cde4157db8e03c81", "score": "0.59183574", "text": "def onchange_l_b_h(self):\n self.volume = float(self.ks_length if self.ks_length else 0) * float(\n self.ks_width if self.ks_width else 0) * float(\n self.ks_height if self.ks_height else 0)", "title": "" }, { "docid": "2ecbf1f5810740e40f774a8b29f9a104", "score": "0.5848753", "text": "def update_mv_avg(self, code):\n \n \n # new mvg average is equal to sum of the close prices divided by the number of prices stored\n new_mv_avg = sum(self.get_price(code)['close']) / len(self.get_price(code)['close'])\n self.set_mv_avg(code, new_mv_avg)", "title": "" }, { "docid": "7d10c06d7c7851caf108baaa0f4f5442", "score": "0.58411497", "text": "def M_average(self):\n volume_Ms = df.assemble(self._Ms_dg * df.dx)\n volume = df.assemble(self._Ms_dg * df.dx)\n return self.m_average * volume_Ms / volume", "title": "" }, { "docid": "b79dff999fb21b42c08cfd3ca9ab93c4", "score": "0.5836245", "text": "def prismVolume(length, height, width):\n volume = (width * length) * height\n\n return volume", "title": "" }, { "docid": "06f8d622e1efd756406596021139039b", "score": "0.58252376", "text": "def volume(length=10, width=5, height=2):\n return length * width * height", "title": "" }, { "docid": "971f885b4664ef792c594493b248595a", "score": "0.5803729", "text": "def average(self):\n if not self:\n raise(\"empty list\")\n \n av = self[0]\n\n for d in self[1:]:\n av.u += d.u\n av.v += d.v\n \n av.u /= len(self)\n av.v /= len(self)\n\n return av", "title": "" }, { "docid": "cd8ddc74b523bce0352105294fbfac72", "score": "0.5789603", "text": "def test_cut_volume_in_half(self):\n # There is a lot going on under the hood with .get_current_volume() and\n # .change_volume_using_percentage() . Check out /pagemodels/videopage.py\n video_page = pagemodels.videopage.VideoPage(self.driver)\n\n current_volume = video_page.get_current_volume() # returns a float 0 <= x <= 1\n\n target_volume = current_volume/2\n video_page.change_volume_using_percentage(target_volume)\n\n new_volume = video_page.get_current_volume()\n self.assertAlmostEqual(new_volume, target_volume, delta=0.02)\n\n # TEST CLEANUP- default state is 50% volume(not strictly enfored but recommended state).\n video_page.change_volume_using_percentage(.5)", "title": "" }, { "docid": "028cf90697a05fcb91f9834e82e826ce", "score": "0.5783894", "text": "def calc_db(self, volume):\n return self._min_volume + round(\n abs(self._min_volume - self._max_volume) * volume)", "title": "" }, { "docid": "80ec03b64ae6a986001515776b13aff9", "score": "0.57761586", "text": "def calc_volume(self):\n v_i = self.lat.matrix[0] \n v_j = self.lat.matrix[1] \n v_k = self.lat.matrix[2]\n \n v_ij = np.cross(v_i,v_j)\n self._property['volume'] = np.dot(v_ij,v_k)\n \n return", "title": "" }, { "docid": "913496a69dcaea84eba4a8b96582d8c6", "score": "0.577537", "text": "def vwap(csv_data, ticker):\n df=csv_data.loc[lambda x: x.Symbol==ticker, :]\n df.loc[:,'Average_vol']=((df.High+df.Low+df.Close)/3)*df.Volume\n return (np.sum(df['Average_vol'])/np.sum(df.Volume))", "title": "" }, { "docid": "bd88f9cecf7e87d0cf2b4aa8ebd3ba00", "score": "0.5772497", "text": "def volume(aabb):\n if not is_valid(aabb):\n return None\n n = dimension(aabb)\n vol = 1\n for i in range(n):\n vol *= aabb[1][i] - aabb[0][i]\n return vol", "title": "" }, { "docid": "ba352371da8a35012caea75bfe4bc8f7", "score": "0.5769134", "text": "def cub_volume(side):\r\n return float(side)**3", "title": "" }, { "docid": "52039a522ce48636378feb79a4bd7176", "score": "0.57661915", "text": "def volume(self):\n return sum(morphmath.segment_volume(s) for s in iter_segments(self))", "title": "" }, { "docid": "f7b7f0c64b7ecc859d2149b65e7948c9", "score": "0.5740939", "text": "def alt_volume_average(edges_x, edges_y, edges_z, values,\n new_edges_x, new_edges_y, new_edges_z, new_values):\n\n # Get cell indices.\n # First and last edges ignored => first and last cells extend to +/- infty.\n ix_l = np.searchsorted(edges_x[1:-1], new_edges_x, 'left')\n ix_r = np.searchsorted(edges_x[1:-1], new_edges_x, 'right')\n iy_l = np.searchsorted(edges_y[1:-1], new_edges_y, 'left')\n iy_r = np.searchsorted(edges_y[1:-1], new_edges_y, 'right')\n iz_l = np.searchsorted(edges_z[1:-1], new_edges_z, 'left')\n iz_r = np.searchsorted(edges_z[1:-1], new_edges_z, 'right')\n\n # Get number of cells.\n ncx = len(new_edges_x)-1\n ncy = len(new_edges_y)-1\n ncz = len(new_edges_z)-1\n\n # Working arrays for edges.\n x_edges = np.empty(len(edges_x)+2)\n y_edges = np.empty(len(edges_y)+2)\n z_edges = np.empty(len(edges_z)+2)\n\n # Loop over new_grid cells.\n for iz in range(ncz):\n hz = np.diff(new_edges_z[iz:iz+2])[0] # To calc. current cell volume.\n\n for iy in range(ncy):\n hyz = hz*np.diff(new_edges_y[iy:iy+2])[0] # \" \"\n\n for ix in range(ncx):\n hxyz = hyz*np.diff(new_edges_x[ix:ix+2])[0] # \" \"\n\n # Get start edge and number of cells of original grid involved.\n s_cx = ix_r[ix]\n n_cx = ix_l[ix+1] - s_cx\n\n s_cy = iy_r[iy]\n n_cy = iy_l[iy+1] - s_cy\n\n s_cz = iz_r[iz]\n n_cz = iz_l[iz+1] - s_cz\n\n # Get the involved original grid edges for this cell.\n x_edges[0] = new_edges_x[ix]\n for i in range(n_cx):\n x_edges[i+1] = edges_x[s_cx+i+1]\n x_edges[n_cx+1] = new_edges_x[ix+1]\n\n y_edges[0] = new_edges_y[iy]\n for j in range(n_cy):\n y_edges[j+1] = edges_y[s_cy+j+1]\n y_edges[n_cy+1] = new_edges_y[iy+1]\n\n z_edges[0] = new_edges_z[iz]\n for k in range(n_cz):\n z_edges[k+1] = edges_z[s_cz+k+1]\n z_edges[n_cz+1] = new_edges_z[iz+1]\n\n # Loop over each (partial) cell of the original grid which\n # contributes to the current cell of the new grid and add its\n # (partial) value.\n for k in range(n_cz+1):\n dz = np.diff(z_edges[k:k+2])[0]\n k += s_cz\n\n for j in range(n_cy+1):\n dyz = dz*np.diff(y_edges[j:j+2])[0]\n j += s_cy\n\n for i in range(n_cx+1):\n dxyz = dyz*np.diff(x_edges[i:i+2])[0]\n i += s_cx\n\n # Add this cell's contribution.\n new_values[ix, iy, iz] += values[i, j, k]*dxyz\n\n # Normalize by new_grid-cell volume.\n new_values[ix, iy, iz] /= hxyz", "title": "" }, { "docid": "4553c25baba1403ac2b7d17651ee675e", "score": "0.57287747", "text": "def volume(self) -> int:\n raise NotImplementedError", "title": "" }, { "docid": "b318d9f0dd39d22561ce7275f3dd8c69", "score": "0.570143", "text": "def volume(self):\n return float(self.get()[2])", "title": "" }, { "docid": "711a781ef8c898b3d0b59e6f93908a02", "score": "0.56853807", "text": "def box_volume(a, b, c):\n return a * b * c", "title": "" }, { "docid": "ac1947d000945fe9259e3121a43539c8", "score": "0.5682677", "text": "def volume(self): \n return 4*np.pi", "title": "" }, { "docid": "1dd5f2bff88655acb51fe468e83474fb", "score": "0.5667204", "text": "def get_volume(self) -> float:\n length = self.GENBOX.length\n return length[0] * length[1] * length[2]", "title": "" }, { "docid": "5e544e3bd078f1ab61eadd7ea7432073", "score": "0.56579477", "text": "def volume(self):\n area = self.area\n if self.unit.lower() == \"mm\":\n factor = area/1000.\n elif self.unit.lower() == \"m\":\n factor = area\n else:\n raise ValueError(\"self.unit should be either 'mm' or 'm'\")\n \n self.data.multiply(factor)\n self.unit = \"m3\"", "title": "" }, { "docid": "ffbce55377102f61066869e2701cb392", "score": "0.5644579", "text": "def average(self) -> float:\n return pulumi.get(self, \"average\")", "title": "" }, { "docid": "ffbce55377102f61066869e2701cb392", "score": "0.5644579", "text": "def average(self) -> float:\n return pulumi.get(self, \"average\")", "title": "" }, { "docid": "865009d74a76f22bf0c95b43eec9c556", "score": "0.5638755", "text": "def test_absolute_volume(self):\n\n assert self.test_shape.volume == (pytest.approx((10 * 20 * math.pi * (2 * 100)) + (\n 15 * 25 * math.pi * (2 * 200)) + (5 * 30 * math.pi * (2 * 300))))", "title": "" }, { "docid": "5ff969cb804b87c31d3374283a187528", "score": "0.56287867", "text": "def average(a):\r\n return sum(a,0.0)/len(a)", "title": "" }, { "docid": "e9d9bde9147dd8d9b4561effed658a84", "score": "0.5609936", "text": "def volume_avg(res, today):\n current_day = today\n days_counted = 0\n total = 0\n day10vol = 0\n day90vol = 0\n while days_counted < 90:\n try:\n current_day = next_day_back(res, current_day)\n except av_loader.TimeoutException:\n break\n total = total + float(res['Time Series (Daily)'][str(current_day)][\"5. volume\"])\n days_counted = days_counted + 1\n if days_counted == 10:\n day10vol = total / 10\n elif days_counted == 90:\n day90vol = total / 90\n return day10vol, day90vol", "title": "" }, { "docid": "90e588056c79ebfd4219a87cef0c7853", "score": "0.5604544", "text": "def vapor_volfrac(self):\n return fraction(self._vol[3]).view(tuple_array)", "title": "" }, { "docid": "28cdcd0c853c95057bdede9f9536529f", "score": "0.55953336", "text": "def volume(radii, ball, dimension):\n p = float(dimension)\n b = getball(ball)\n return (2*gamma(1/b + 1)*radii)**p / gamma(p/b + 1)", "title": "" }, { "docid": "6821151971a0b8e49f320ba81fa17978", "score": "0.5594544", "text": "def average(self):\n return pq.Quantity(\n float(self._get('average')),\n self.units\n )", "title": "" }, { "docid": "37e0265f38e889748603262ae8c44837", "score": "0.5589852", "text": "def mov_average(st):\n return sum(st)/len(st)", "title": "" }, { "docid": "90299ba8eaafedd2186df16277e0a0b4", "score": "0.55614257", "text": "def volume(self):\n return sum(s.volume for s in self.iter_sections())", "title": "" }, { "docid": "aa32ae7225f7eeb905c393597d98bd54", "score": "0.5526001", "text": "def normalize_volume(audio: np.array, volume_level: float) -> np.array:\n if not (0.0 <= volume_level <= 1.0):\n raise ValueError(f\"Volume must be in range [0.0, 1.0], received {volume_level}\")\n\n max_sample = np.max(np.abs(audio))\n if max_sample == 0:\n return audio\n\n return volume_level * (audio / np.max(np.abs(audio)))", "title": "" }, { "docid": "5965aee2dbb0de7d96b6340ec48c2647", "score": "0.55202496", "text": "def update_average(old_avg: float, old_num: int, new_avg: float, new_num: int):\r\n old_sum = old_avg * old_num\r\n new_sum = new_avg * new_num\r\n updated_sum = old_sum + new_sum\r\n updated_num = old_num + new_num\r\n updated_avg = updated_sum / updated_num\r\n return updated_avg", "title": "" }, { "docid": "8c8224f721a42b65b360220c8d3b0eba", "score": "0.55144864", "text": "def trap(height: List[int]) -> int:\n if not height:\n return 0\n\n l, r = 0, len(height) - 1\n l_max, r_max = height[0], height[-1]\n volume = 0\n\n while l <= r:\n l_max = max(l_max, height[l])\n r_max = max(r_max, height[r])\n\n if l_max < r_max:\n volume += l_max - height[l]\n l += 1\n else:\n volume += r_max - height[r]\n r -= 1\n\n return volume", "title": "" }, { "docid": "0bd4944120ebceba6b47b93a9267a9d6", "score": "0.55132073", "text": "def mean(self):\n return 0.5 * (self.lb + self.ub)", "title": "" }, { "docid": "c8a625ce838bf511e85fe79b2ef7f8ab", "score": "0.5509434", "text": "def average(self) -> float:\n if self.is_empty():\n return 0.0\n else:\n nodes = len(self)\n total = self.sum_values()\n return total / nodes", "title": "" }, { "docid": "cbaf35850c5ff6a0cd474fb3f77f6bdc", "score": "0.5505576", "text": "def _yz_average(self, var):\n return var", "title": "" }, { "docid": "3e1ccd6f65c8ebe67df55b313232bc81", "score": "0.5502571", "text": "def calc_vert_average(self, u):\n H = self.S - self.B\n uhat = self.vert_integrate(u)\n s = \"::: calculating vertical average :::\"\n print_text(s, self.color)\n ubar = project(uhat/H, self.Q)\n print_min_max(ubar, 'ubar')\n ubar = self.extrude(ubar, [2,6], 2)\n return ubar", "title": "" }, { "docid": "402527f288c94618bc1ba595e6c3b50a", "score": "0.54967636", "text": "def get_average(data):\n return sum(copy.copy(data))/len(data)", "title": "" }, { "docid": "b165e9704cb75d7a88abb84e94f71730", "score": "0.54942125", "text": "def middle_value(self) -> float:\n return (self.left + self.right) / 2", "title": "" }, { "docid": "b08bd806a9710d6ab80e48de47f431f7", "score": "0.54914445", "text": "def volume_ellipsoid(aspect):\n return 4.0*np.pi*aspect[0]*aspect[1]*aspect[2]/3.0", "title": "" }, { "docid": "1901d440bd07b8701e1a6f0cf4382794", "score": "0.5490675", "text": "def test_volume_invariance(self):\n volume = np.abs(self.tfr[self.doppler == 0, self.lag == 0]) ** 2\n volume_integral = (np.abs(self.tfr) ** 2).sum().sum()\n self.assertAlmostEqual(volume[0],\n volume_integral / (self.signal.shape[0] / 2))", "title": "" }, { "docid": "88685c4415c7d52b3df5f7704ea47ad8", "score": "0.54899865", "text": "def LIQUID_volfrac(self):\n return fraction(self._vol[2]).view(tuple_array)", "title": "" }, { "docid": "dcb200b056cf4fe8e9ded45810b8c905", "score": "0.54838204", "text": "def vol_mean(data):\n mean_list = []\n # Loop over the each volume and outputs the mean of each dimension\n for i in range(data.shape[-1]):\n mean = np.mean(data[...,i])\n mean_list.append(mean)\n return np.asarray(mean_list)", "title": "" }, { "docid": "9c64b84818dec0d3a2711e07209d6e8f", "score": "0.54822034", "text": "def normalize_volume(audio: np.array, volume_level: float) -> np.array:\n if not (0.0 <= volume_level <= 1.0):\n raise ValueError(f\"Volume must be in range [0.0, 1.0], received {volume_level}\")\n\n if audio.size == 0:\n return audio\n\n max_sample = np.max(np.abs(audio))\n if max_sample == 0:\n return audio\n\n return volume_level * (audio / np.max(np.abs(audio)))", "title": "" }, { "docid": "6a6005ba7b8932fdc47271865f312ffa", "score": "0.5478313", "text": "def get_volume(self):\r\n if self._software_mixing:\r\n return int(round(self._playbin.get_property('volume') * 100))\r\n\r\n if self._mixer is None:\r\n return None\r\n\r\n volumes = self._mixer.get_volume(self._mixer_track)\r\n avg_volume = float(sum(volumes)) / len(volumes)\r\n\r\n internal_scale = (0, 100)\r\n\r\n if self._volume_set is not None:\r\n volume_set_on_mixer_scale = self._rescale(\r\n self._volume_set, old=internal_scale, new=self._mixer_scale)\r\n else:\r\n volume_set_on_mixer_scale = None\r\n\r\n if volume_set_on_mixer_scale == avg_volume:\r\n return self._volume_set\r\n else:\r\n return self._rescale(\r\n avg_volume, old=self._mixer_scale, new=internal_scale)", "title": "" }, { "docid": "0b90988951b403c91f19250beb8e468c", "score": "0.54727787", "text": "def avg_current(w_in: np.ndarray, length: float, w_out: np.ndarray) -> None:\n\n w_out[:] = np.nan\n\n if np.isnan(w_in).any():\n return\n\n if not length >= 0 or not length < len(w_in):\n raise DSPFatal(\n \"length is out of range, must be between 0 and the length of the waveform\"\n )\n\n w_out[:] = w_in[int(length) :] - w_in[: -int(length)]\n w_out /= length", "title": "" }, { "docid": "abc131900aeb0241e47b8ebf243a7ff5", "score": "0.54701877", "text": "def normalize(self):\n max_possible = float(_get_max_value(self.sample_width))\n scale_factor = max_possible/self.max_amplitude\n self.adjust_volume(scale_factor)", "title": "" }, { "docid": "4688e27b19b53015062ae0302adcab45", "score": "0.54645807", "text": "def average(values):\n\treturn sum(values)/len(values)", "title": "" }, { "docid": "eb2b960d8283ad7e14b0c9c3dee16f57", "score": "0.54626715", "text": "def set_base_volume(self):\r\n test_recording = self.volume_test_recording()\r\n dB = self.calculate_volume_rms(test_recording)\r\n print(\"Registered volume: \" + str(dB))\r\n dB += 5\r\n print(\"Setting treshold: \" + str(dB))\r\n self.minimal_volume = dB", "title": "" }, { "docid": "b8980ace088fc8c564bbe4a6b06c843b", "score": "0.5460143", "text": "def calc_volume(self, decibel):\n return abs(self._min_volume - decibel) / abs(\n self._min_volume - self._max_volume)", "title": "" }, { "docid": "c9441d5be1e1a3a0028bebbe5d243571", "score": "0.5429265", "text": "def liquid_volfrac(self):\n return fraction(self._vol[1]).view(tuple_array)", "title": "" }, { "docid": "b2e18c72ddd08f03f507616174f34dfe", "score": "0.5419798", "text": "def __call__(self):\n if settings.get(\"sensor\", \"direction\") == \"+\":\n self.sensor.values.put(np.sum(self.avg_vector * self.sensor.buffer.data[0:self.avg]) / self.avg_div)\n else:\n self.sensor.values.put(1-np.sum(self.avg_vector * self.sensor.buffer.data[0:self.avg]) / self.avg_div)", "title": "" }, { "docid": "bd53c1cfc19a31792046bb3673fbffed", "score": "0.54193914", "text": "def average(data):\n return sum(data) / len(data)", "title": "" }, { "docid": "7c8a1b149a14e32a4d349570fc5ce7dc", "score": "0.541639", "text": "def rolling_average(oldAverage, newItem, oldAverageCount):\n return float((newItem + (oldAverageCount * (oldAverage))) /\n (oldAverageCount + 1))", "title": "" }, { "docid": "67fc5838cfa75b42fa352d7b6de43210", "score": "0.5411967", "text": "def volume_l(cfg, height_mm):\n if cfg['volume_method'] == 'linear':\n return height_mm * float(cfg['max_volume_l']) / float(cfg['max_height_mm'])\n elif cfg['volume_method'] == 'oval':\n return waterbag_cut_mm2(height_mm, float(cfg['flat_width_mm'])) \\\n * float(cfg['max_volume_l']) \\\n / waterbag_cut_mm2(float(cfg['max_height_mm']), float(cfg['flat_width_mm']))\n return -1", "title": "" }, { "docid": "e222a92269b6c29433a2271bd67fb54d", "score": "0.5408909", "text": "def inner_volume(self, u = Constant((0.0, 0.0, 0.0))) :\n\n # current inner volume\n domain = self.domain\n X = SpatialCoordinate(self.mesh)\n N = FacetNormal(self.mesh)\n\n x = X + u\n F = grad(x)\n n = cofac(F) * N\n\n ds_endo = ds(self.markers[\"lv\"], subdomain_data = self.subdomains)\n \n Vendo_form = -1/float(3) * inner(x, n) * ds_endo\n\n return assemble(Vendo_form)", "title": "" }, { "docid": "fa881da3ef440335a1c0271e1aa1df2e", "score": "0.54074514", "text": "def calc_average(last, current, share_of_use):\n return last + current * share_of_use", "title": "" }, { "docid": "6166d3b9fac73d1973fe6b15057d0d31", "score": "0.540639", "text": "def total_volume(self):\n total_vol = 0\n all_vols = []\n # updated 2014-01-21 for compatibility with new btstructs\n for Node in self._all_nodes:\n n = Node.content['p3d']\n if Node.index not in (1, 2, 3):\n p = Node.parent.content['p3d']\n H = np.sqrt(np.sum((n.xyz - p.xyz) ** 2))\n vol = np.pi * n.radius * n.radius * H\n all_vols.append(vol)\n total_vol = total_vol + vol\n return total_vol, all_vols", "title": "" }, { "docid": "70251333cebc428fa964c34df8cba44f", "score": "0.53989965", "text": "def total_volume(self):\n total_vol = 0\n all_vols = []\n # upated 2014-01-21 for compatibility with new btstructs2\n for node in self._all_nodes:\n n = node.content['p3d']\n if not node.index in (1, 2, 3):\n p = node.parent.content['p3d']\n H = np.sqrt(np.sum((n.xyz - p.xyz) ** 2))\n vol = np.pi * n.radius * n.radius * H\n all_vols.append(vol)\n total_vol = total_vol + vol\n return total_vol, all_vols", "title": "" }, { "docid": "01946ec54440dfa5009979333ec31708", "score": "0.5398465", "text": "def total_volume(self, value):\n self._total_volume = round(value, 1)", "title": "" }, { "docid": "343ab1bc11c4c41163005d2fd1f8efb3", "score": "0.5387557", "text": "def average(values):\r\n return sum(values) / len(values) if values else 0", "title": "" }, { "docid": "e2a4ac06e137cb5cd369023852da1cde", "score": "0.5387263", "text": "def get_mean_curverad(left_radii, right_radii):\n return np.mean(list(left_radii) + list(right_radii))", "title": "" }, { "docid": "719ad5b2e8b448311f7e1dd10d58c848", "score": "0.5386788", "text": "def get_mean(self):\n if sys.platform == 'win32':\n nul = \"NUL\"\n else:\n nul = \"/dev/null\"\n\n cmd = '\"' + self.ffmpeg_cmd + '\" -i \"' + self.input_file + '\" -filter:a \"volumedetect\" -vn -sn -f null ' + nul\n\n output = run_command(cmd)\n\n logger.debug(output)\n\n mean_volume_matches = re.findall(r\"mean_volume: ([\\-\\d\\.]+) dB\", output)\n if mean_volume_matches:\n self.mean_volume = float(mean_volume_matches[0])\n else:\n raise ValueError(\"could not get mean volume for \" + self.input_file)\n\n max_volume_matches = re.findall(r\"max_volume: ([\\-\\d\\.]+) dB\", output)\n if max_volume_matches:\n self.max_volume = float(max_volume_matches[0])\n else:\n raise ValueError(\"could not get max volume for \" + self.input_file)\n\n logger.info(\"mean volume: \" + str(self.mean_volume))\n logger.info(\"max volume: \" + str(self.max_volume))", "title": "" }, { "docid": "5391f10382d2862c6f007093d141e878", "score": "0.53862816", "text": "def normalize(self):\n source = self.internal_pop()\n M = source.max()\n M = 1.0 if M == 0.0 else M\n self.internal_push(source/M)", "title": "" }, { "docid": "7759393d90af4741c27b65cf8017a393", "score": "0.5382786", "text": "def avg(x):\n \n temp = sum(x) / len(x)\n temp = round(temp, 3)\n return temp", "title": "" }, { "docid": "da976ddf5c7879e4a4e8acd3bb94998d", "score": "0.53807884", "text": "def normalise(self):\n\t\tmin = sp.amin(self.data)\n\t\tmax = sp.amax(self.data)\n\t\tmean = (max + min) / 2.0\n\t\trange = max - min\n\t\tself.data = (self.data - mean) / (range / 2.0)", "title": "" }, { "docid": "c78c178a206f6c587eba8ae7fa2ec186", "score": "0.53789306", "text": "def volume_level(self):\n return int(self._volume) / MAX_VOL", "title": "" }, { "docid": "b4d1489f68a89b7f7d1e82b398c5437a", "score": "0.53781295", "text": "def update_volume(self, volume_=1.0):\r\n # SET THE VOLUME IN CASE OF AN INPUT ERROR\r\n if 0.0 >= volume_ >= 1.0:\r\n volume_ = 1.0\r\n # SET THE VOLUME FOR ALL SOUNDS\r\n for c in self.channels:\r\n c.set_volume(volume_)", "title": "" }, { "docid": "5811404f7e1d8c175ea67c539bceddd3", "score": "0.5375449", "text": "def average(l):\r\n return sum(l)/len(l)", "title": "" }, { "docid": "39dac7d46e62e05ef9626e47e693d9c2", "score": "0.53708494", "text": "def inv_sig_rescale(volume, width, level):\r\n\r\n epsilon = np.finfo(np.float).eps\r\n # Shifts the extramas values {0, 1} by epsilon to avoid ZeroDivision and log(0)\r\n volume[volume <= epsilon] = epsilon\r\n volume[volume >= 1 -epsilon] = 1 -epsilon\r\n\r\n return (-width *np.log(1./volume -1) +level)", "title": "" }, { "docid": "e68bf0690967668cf7533077a783ef12", "score": "0.53695846", "text": "def update_avg(self):\n avg_val = f'{np.round(np.average(self.gv.data_queue[self.ch]), 2)} Vrms'\n self.label.setText(avg_val)", "title": "" }, { "docid": "dbbe38d85a99c045e994e2efc4f62167", "score": "0.53654575", "text": "def get_volume(self):\n if self.volume == 0:\n self.gather_pixels()\n \n return self.volume", "title": "" }, { "docid": "c3431432e3ff7ea7f5e848e4bdc3033b", "score": "0.53653103", "text": "def get_avg_v_3d(self):\n n = Node.from_array(np.mean(self.velocity, axis=0)).as_unit_vector()\n if n.x() < -0.1:\n n.x(-0.1)\n elif n.x() > 0.1:\n n.x(0.1)\n if n.y() < -0.1:\n n.y(-0.1)\n elif n.y() > 0.1:\n n.y(0.1)\n return n", "title": "" }, { "docid": "3c2eac08a3e46bf43dd2a6156d5aac3e", "score": "0.5361149", "text": "def normed_to_average(self, data=None):\n if data is None:\n data = self.get_raw_data()\n series_list = []\n for i in data:\n series_list.append(data[i] / data[i].mean())\n return pandas.concat(series_list, axis=1)", "title": "" }, { "docid": "ec62729f10bdef8edecb5e0f2496d4b6", "score": "0.53606987", "text": "def sphere_volume(r):\n return (4 * 3.14159 / 3)*r**3", "title": "" }, { "docid": "5a2f7771405f707eac411765bccb735e", "score": "0.53474855", "text": "def volume_function(self, x: numpy.ndarray, dv: numpy.ndarray) -> float:\n # Filter design variables\n self.filter_variables(x)\n\n # Volume sensitivities\n dv[:] = 1.0\n\n # Sensitivity filtering\n self.filter.filter_volume_sensitivities(self.xPhys, dv)\n\n return self.xPhys.sum() - self.volfrac * x.size", "title": "" }, { "docid": "71dbc5ce4a858a4f9163a56c5be8fdee", "score": "0.53466153", "text": "def getAverage(self):\r\n if self.count == 0:\r\n return 0\r\n return self.total/self.count", "title": "" }, { "docid": "e0ca830353a6db41260d66ebc7e25c97", "score": "0.5344648", "text": "def value_volumes(self):\n scores = {\n (float(\"-inf\"), 10000): 0,\n (10000, 30000): 1,\n (30000, 50000): 2,\n (50000, 100000): 3,\n (100000, 250000): 4,\n (250000, 500000): 5,\n (500000, 750000): 6,\n (750000, 1000000): 8,\n (1000000, 2000000): 9,\n (2000000, float(\"inf\")): 10,\n }\n try:\n for lims in scores.keys():\n if (self.volume >= lims[0]) and (self.volume < lims[1]):\n print(f\"Volumes: {self.volume} -> {scores[lims]}\")\n return scores[lims]\n except (AttributeError, JSONDecodeError):\n print(\"value_volumes failed.\\nSetting value to 0 and moving on...\")\n return 0\n return 0", "title": "" }, { "docid": "0b78bf4f07e2b9aa765547ffbcffa46e", "score": "0.53427577", "text": "def get_average_color(self, blocks):\n total = np.array([(0,0,0) for _ in range(self.vector_size)])\n count = 0\n \n for vector in blocks:\n total += vector\n count += 1\n\n if count == 0:\n return random.choice(self.blocks)\n\n return total//count", "title": "" }, { "docid": "eb829e24f3d356cd68b58153ad6bcaed", "score": "0.53392994", "text": "def signal_loss(normalized_file, mask_file, roi_mask_file, verbose=0):\n\n mask_roi_data = nibabel.load(roi_mask_file).get_data()\n mask_data = nibabel.load(mask_file).get_data()\n data = nibabel.load(normalized_file).get_data()\n\n if len(data.shape) == 4:\n if verbose > 0:\n print \"Compute mean image from a 4D-volume\"\n mean_volume = np.mean(data, axis=3)\n else:\n mean_volume = data\n\n if verbose:\n print \"the volume shape is {}\".format(mean_volume.shape)\n\n return float(np.mean(data * mask_roi_data) / np.mean(data * mask_data))", "title": "" }, { "docid": "b22f08fca073812064411a7f551a24bc", "score": "0.5335305", "text": "def set_storage_value(self, volume):\n self.tank_reader.set_calibrate(volume)", "title": "" }, { "docid": "169ef6060fca7202f5294284892771a5", "score": "0.5334305", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "169ef6060fca7202f5294284892771a5", "score": "0.5334305", "text": "def average(values):\n return sum(values) / len(values)", "title": "" }, { "docid": "cf91fa7fb90d3fcc809765a38e9640f3", "score": "0.5330706", "text": "def filter_volume(self, value):\n\n if (self._volume - value >= 0):\n self._volume -= value\n return float(value)\n else:\n print('<<!!>> The tank does not have this volume available <<!!>>')", "title": "" }, { "docid": "00582896c308cda2f85ed1075c809065", "score": "0.5328858", "text": "def centered_average(nums):\n pass", "title": "" }, { "docid": "192d12d6b322c0587aefa97f52a76b58", "score": "0.5328372", "text": "def average(values):\n return sum(values) / len(values)", "title": "" } ]
5249c3119d444b60b8c04a101e4826a4
Gets the number_of_downloads of this DownloadableDataLinkInterface. Of downloads per user
[ { "docid": "0d12b58cb5ce76b3128a478964dc9dea", "score": "0.7901032", "text": "def number_of_downloads(self) -> int:\n return self._number_of_downloads", "title": "" } ]
[ { "docid": "8d49586cfa39b76befcca53789bce95b", "score": "0.6652608", "text": "def download_count(self):\n pass", "title": "" }, { "docid": "663700e1e8ba46404a911ca9c9716260", "score": "0.63293475", "text": "def user_count(self):\n return self._n_users", "title": "" }, { "docid": "b04d75e05e23f995170c2e1cb9166d50", "score": "0.60958946", "text": "def get_links_count(self):\n return sum(n * count for n, count in self._n_links_counter.items()) / 2", "title": "" }, { "docid": "c18a23524d9ebd05e91aa99795490994", "score": "0.6088981", "text": "def userCount(self):\n return len(self._users)", "title": "" }, { "docid": "c18a23524d9ebd05e91aa99795490994", "score": "0.6088981", "text": "def userCount(self):\n return len(self._users)", "title": "" }, { "docid": "f048d2d8f880e47d71d11a5c889fc906", "score": "0.6045305", "text": "def number_of_links(self) -> int:\n\t\treturn len(self.links_dict)", "title": "" }, { "docid": "09861ae62097fc870c3324befa6a2540", "score": "0.60167664", "text": "def total_linked_devices(self) -> int:\n total = 0\n\n try:\n total = self.get_linked_devices().count()\n except TypeError:\n # queryset wasn't returned\n pass\n\n return total", "title": "" }, { "docid": "f210d7baceae6d21065bbfe3a8611421", "score": "0.5985679", "text": "def number_of_downloads(self, number_of_downloads: int):\n\n self._number_of_downloads = number_of_downloads", "title": "" }, { "docid": "0b356bfdd4bba523d98d9bbe7965af2a", "score": "0.5983417", "text": "def nb_downlinks(self) -> int:\n return len(self._downlink)", "title": "" }, { "docid": "6a4d52dc170de389767d2bf7db2ee055", "score": "0.5974247", "text": "def download_count(self, val):\n pass", "title": "" }, { "docid": "152a3903d48d3e24c3feb241e9199ab6", "score": "0.5965236", "text": "def num_of_files_transferred(self):\r\n self.is_finished\r\n return self._details['jobDetail']['progressInfo']['numOfFilesTransferred']", "title": "" }, { "docid": "a702aca471f084a240e638a35fc35129", "score": "0.5852101", "text": "def number_users(self):\n return len(self.users)", "title": "" }, { "docid": "d6682dc615559d12f22b10fae56211b1", "score": "0.58327174", "text": "def getMaximumNumOfFilesToCache(self):\r\n return _osgSim.DatabaseCacheReadCallback_getMaximumNumOfFilesToCache(self)", "title": "" }, { "docid": "fdba61bda019a10f8f17b945c9ae3c3e", "score": "0.5831231", "text": "def get_cyberlink_count(self, user_id: int):\n with self.connection:\n return self.cursor.execute(\n f\"SELECT count(*) FROM cyberlinks WHERE user_id={user_id}\").fetchall()[0][0]", "title": "" }, { "docid": "ac5430097e827ce0d463e6694f0f5758", "score": "0.5785017", "text": "def get_num_files(self):\r\n return self.nfile", "title": "" }, { "docid": "58ca4507d86ac35b9adb5d35802a6cc0", "score": "0.57831645", "text": "def get_downloaded(self):\n return self.stats.downloaded / 1000.0 / 1000", "title": "" }, { "docid": "740da9a9b693da6b525ba232abaafa22", "score": "0.5779381", "text": "def dataset_count(self):\n\n return len(self.unique_datasets)", "title": "" }, { "docid": "35a49ad273fec3260734fa2320cb4f79", "score": "0.57741046", "text": "def get_number_runs(self):\n return self._myProject.get_number_data_files()", "title": "" }, { "docid": "fc8f459f80d2ca8247cae4925348af3b", "score": "0.5761456", "text": "def followers_number(self):\n return self.user.followers.count()", "title": "" }, { "docid": "0984af12a79b411a7184f957f1f8457d", "score": "0.57577395", "text": "def users_daily_length(self):\n transactions = self.usertrans\n fr = []\n for user in transactions:\n usertrans = transactions[user]\n for day in usertrans:\n userdaytrans = usertrans[day]\n fr.append(len(userdaytrans))\n return fr", "title": "" }, { "docid": "430acd1fa9e7842139bd5c568bc16908", "score": "0.575681", "text": "def get_access_count(self):\n return self.access_count", "title": "" }, { "docid": "24b943e84559c64312c05adf9fd7e428", "score": "0.5705114", "text": "def count_users(self) -> int:\n return self._count_model(User)", "title": "" }, { "docid": "f0c176d625786c0fb87cecf31269726e", "score": "0.56616217", "text": "def data_count(self):\n return(len(self.data))", "title": "" }, { "docid": "aeb251966013b58193db9560eab9fa4f", "score": "0.56588066", "text": "def data_file_count(self) -> Optional[int]:\n return pulumi.get(self, \"data_file_count\")", "title": "" }, { "docid": "c4fcbb8a91942edf387c9c5793a45dae", "score": "0.565819", "text": "def count_resources(self):\n total = 0\n for dataset in self.datasets:\n distribution = dataset.get('distribution', [])\n total += len(distribution)\n return total", "title": "" }, { "docid": "40841445418eb97559e0953783605b94", "score": "0.5643116", "text": "def num_of_files(self):\n return len(self.file_list)", "title": "" }, { "docid": "e6df52855167a848b7b9597f3325512f", "score": "0.56399703", "text": "def __len__(self):\n if self.train:\n return usps_dataset_multiplier * self.dataset_size\n else:\n return self.dataset_size", "title": "" }, { "docid": "2979badb75a85792e0f90cf589044632", "score": "0.563319", "text": "def number_of_assets(self) -> int:\n return self.__number_of_assets", "title": "" }, { "docid": "f39d078c8b562e44324e25d467847486", "score": "0.5630828", "text": "def inThreads(self):\n return len(self._download_threads)", "title": "" }, { "docid": "7c1d9b736cc99e61b0fca04528041e16", "score": "0.5623697", "text": "def count(self):\n return self.get_count()", "title": "" }, { "docid": "742fdb38b2a8d1cb8fdbefd243c42f1b", "score": "0.56230986", "text": "def getNumberOfCopiedFiles(self):\n return self._numCopiedFiles", "title": "" }, { "docid": "6e32f140d5241e5e8007c6d00ee210fa", "score": "0.5618403", "text": "def count(self) -> int:\n return pulumi.get(self, \"count\")", "title": "" }, { "docid": "46f64a371dec489aeb2df9f4cd2a8127", "score": "0.56174767", "text": "def getAccessCount(self, flags: EventFileFlags=None):\n if not flags:\n return len(self.accesses)\n\n cnt = 0\n for access in self.accesses:\n if access.evflags & flags:\n cnt += 1\n return cnt", "title": "" }, { "docid": "57c505f149a6686d8400a2939dbd32f4", "score": "0.56170243", "text": "def get_number_of_transfering_threads(self) -> int:\n return self._j_rocks_db_state_backend.getNumberOfTransferingThreads()", "title": "" }, { "docid": "f360e369c488826e05642f0cb8073ed5", "score": "0.5613187", "text": "def getNoOfUrls(self):\n return self.noUrls", "title": "" }, { "docid": "8dbf727505771b8c11ea8728ea7a90f8", "score": "0.5612988", "text": "def num_followers(self) -> int:\n return self.followers.count()", "title": "" }, { "docid": "0768e1848ff8d02999a52b704ad0eae6", "score": "0.56100965", "text": "def max_data_disk_count(self):\r\n return self._max_data_disk_count", "title": "" }, { "docid": "9ae550671e25b686f636c19e2af32b2f", "score": "0.5595734", "text": "def lb_counts(self):\n return sum(iov.length for iov in self)", "title": "" }, { "docid": "4da3dda2fb341216fbe84b4f25e25213", "score": "0.55653983", "text": "def __len__(self) -> int:\n\n\t\treturn np.ceil(len(self._access_cuboids) / self.__batch_size).astype(int)", "title": "" }, { "docid": "a9163ccfa252cdf64fdc09ffa5658578", "score": "0.55627435", "text": "def number_of_installments(self):\n return self.__number_of_installments", "title": "" }, { "docid": "2a942dadca581f8945949842412bd201", "score": "0.55583656", "text": "def size(self) -> int:\r\n\r\n if 'urls' in self.metadata:\r\n return len(self.metadata[\"urls\"]) \r\n else:\r\n return 0", "title": "" }, { "docid": "9339cf2481f67ba1b5fc8e99e6031de2", "score": "0.5547967", "text": "def file_count(self) -> int:\n return self.__file_count", "title": "" }, { "docid": "6f2c6b42e0f76becbbb300edf1be0d6d", "score": "0.5545125", "text": "def get_length(self):\r\n\r\n count = 0\r\n itr = self.head\r\n while itr:\r\n count += 1\r\n itr = itr.next\r\n return count", "title": "" }, { "docid": "eae5718568f54f9ac9516bc2539d6a57", "score": "0.5534047", "text": "def device_count(self):\n return len(self.devices)", "title": "" }, { "docid": "348a41311e397e8b00a38450b096a637", "score": "0.5533946", "text": "def num_training_accounts(self):\n return self.__proxy__['num_open_accounts']", "title": "" }, { "docid": "a9123133ff8c92f6cf59a93b73b8079b", "score": "0.5533492", "text": "def count(self):\n return len(self._data)", "title": "" }, { "docid": "6400623e81a482b7b17dcb268cddb0a9", "score": "0.55275077", "text": "def n_data(self) -> int:\n return self.data_set.count()", "title": "" }, { "docid": "ab380f085f709bacea396f541d0c5de9", "score": "0.55212903", "text": "def nfiles(self):\n return len(self.table)", "title": "" }, { "docid": "d227fc9c659d2e178f8d6439b2f010d9", "score": "0.552125", "text": "def get_counts(self):\n count = self.aio.read()\n return count", "title": "" }, { "docid": "8ea4332e119c5d7c98152ab9bb9e0a7e", "score": "0.5519515", "text": "def count(self):\n if self._loaded:\n return len(self._data)\n return 0", "title": "" }, { "docid": "b8bbb2aaf12a04aa9e9f24ed9ad2224c", "score": "0.55123925", "text": "def get_count(self):\n return self.count", "title": "" }, { "docid": "b8bbb2aaf12a04aa9e9f24ed9ad2224c", "score": "0.55123925", "text": "def get_count(self):\n return self.count", "title": "" }, { "docid": "9b5aefc4967759aa6c983614203983e9", "score": "0.5512131", "text": "def tot_count(self) -> int:\n return self._n", "title": "" }, { "docid": "b8302f716443dbbcc63c1eaf0965500f", "score": "0.55064225", "text": "def get_number_of_packages(self):\n\n info = Request(self.url_base).get(self.packages_path)[0]\n message = f\"Info request returned: {info}\"\n logging.debug(message)\n return int(info.get('package_count_human', 0))", "title": "" }, { "docid": "dbaffb9ec18789ef16824409a0ad7dfe", "score": "0.55040157", "text": "def count(self) -> int:\n return self._count", "title": "" }, { "docid": "dbaffb9ec18789ef16824409a0ad7dfe", "score": "0.55040157", "text": "def count(self) -> int:\n return self._count", "title": "" }, { "docid": "4a0bfc2cf3f1427b4c6187bed74ed7d0", "score": "0.55019236", "text": "def num_accounts(self):\n # return self._num_accounts\n return self.__proxy__['num_accounts']", "title": "" }, { "docid": "c4ced44c23b4c60bb7ee6c77020fd460", "score": "0.54939896", "text": "def get_number_of_transfer_threads(self) -> int:\n return self._j_state_backend.getNumberOfTransferThreads()", "title": "" }, { "docid": "c241b056e4c9f50b05dc05f28df6969f", "score": "0.5492786", "text": "def get_member_count(self):\n return self.users.all().count()", "title": "" }, { "docid": "0fc9d0ba063825ba96eabd978a4cdd20", "score": "0.54805374", "text": "def __len__(self) -> int:\n return math.ceil(len(self._dataset) / self._batch_size)", "title": "" }, { "docid": "0d112a917779916b34db39996fedc9b6", "score": "0.5478255", "text": "def __get_count(self):\n return self.__count", "title": "" }, { "docid": "69e7a9f28475df237c927b2d3dfe1d7e", "score": "0.547524", "text": "def size(self):\r\n return self.counts", "title": "" }, { "docid": "31458751f14c621ab4b7152bf4007ec9", "score": "0.5474895", "text": "def count(self):\n\t\treturn len(self.datasets)", "title": "" }, { "docid": "b0c28e2bd8afb605a29f0a33789ef1a6", "score": "0.54729074", "text": "def feeds(self) -> int:\n return self.__feeds", "title": "" }, { "docid": "ccf9c4fb323fb544e60917d38a23d3d3", "score": "0.5471404", "text": "def nbytes(self):\n dx = self.to_dask_array()\n if math.isnan(dx.size):\n logger.debug(\"Computing data nbytes: Performance may be degraded\")\n dx.compute_chunk_sizes()\n\n return dx.nbytes", "title": "" }, { "docid": "09d50acb3e5e8bdb7fada66b3980c9f8", "score": "0.54704034", "text": "def __len__(self):\n return int(np.floor(len(self.user_baskets) / self.batch_size))", "title": "" }, { "docid": "9aa32a8b790ccf3c1a9031512cbcc068", "score": "0.5468815", "text": "def number_of_members(self):\n\n return np.size(self.cluster_members)", "title": "" }, { "docid": "ab70b61d2f456be6aacc67db9a68a120", "score": "0.54666215", "text": "def cpu_share_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"cpu_share_count\")", "title": "" }, { "docid": "30959678e030fddef3cbf22ca9172727", "score": "0.54632616", "text": "def size(self):\n return self.count", "title": "" }, { "docid": "30959678e030fddef3cbf22ca9172727", "score": "0.54632616", "text": "def size(self):\n return self.count", "title": "" }, { "docid": "30959678e030fddef3cbf22ca9172727", "score": "0.54632616", "text": "def size(self):\n return self.count", "title": "" }, { "docid": "30959678e030fddef3cbf22ca9172727", "score": "0.54632616", "text": "def size(self):\n return self.count", "title": "" }, { "docid": "7d83df90aa775a0404a5674485e1c3dd", "score": "0.54563457", "text": "def n_bytes(self):\n return self.activity.nbytes", "title": "" }, { "docid": "c73f21718a7a0e76b22773ceb78000c9", "score": "0.545463", "text": "def count(self):\r\n info = self.describe()\r\n return info['Table'].get('ItemCount', 0)", "title": "" }, { "docid": "2638db285d898ddf6c762003fe9029f5", "score": "0.5452076", "text": "def getCount(self):\n return self.count", "title": "" }, { "docid": "e9b19ac9aacb4fa0a2d99d338f567d2a", "score": "0.54499453", "text": "def __len__(self) -> int:\n\n return len(self.__links)", "title": "" }, { "docid": "1d0a34e02d2060b1bbe316efe7600053", "score": "0.5446073", "text": "def file_count(self):\n\n return len(self.unique_files)", "title": "" }, { "docid": "fcd4a0786e858cd48fcf75a55e197d92", "score": "0.5442953", "text": "def size(self):\n return self._count", "title": "" }, { "docid": "fcd4a0786e858cd48fcf75a55e197d92", "score": "0.5442953", "text": "def size(self):\n return self._count", "title": "" }, { "docid": "fcd4a0786e858cd48fcf75a55e197d92", "score": "0.5442953", "text": "def size(self):\n return self._count", "title": "" }, { "docid": "fcd4a0786e858cd48fcf75a55e197d92", "score": "0.5442953", "text": "def size(self):\n return self._count", "title": "" }, { "docid": "d7d0ada8916202dcac23f138dfccf45c", "score": "0.54414815", "text": "def count(self) -> float:\n return self._count", "title": "" }, { "docid": "2078cffbf5352fdf9384d61190f7189f", "score": "0.5439156", "text": "def item_count(self):\n return self._n_items", "title": "" }, { "docid": "8a6cc9439a430260ec475d60542ec5df", "score": "0.5437559", "text": "def size(self):\n return self.__count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "f5e9158a3865880857b84f79a896ee6b", "score": "0.5433171", "text": "def count(self):\n return self._count", "title": "" }, { "docid": "fb183e3f7f57a69a1cb81a4a8df80324", "score": "0.54328126", "text": "def size(self):\n return self.num_items", "title": "" }, { "docid": "fb183e3f7f57a69a1cb81a4a8df80324", "score": "0.54328126", "text": "def size(self):\n return self.num_items", "title": "" }, { "docid": "fb183e3f7f57a69a1cb81a4a8df80324", "score": "0.54328126", "text": "def size(self):\n return self.num_items", "title": "" }, { "docid": "fb183e3f7f57a69a1cb81a4a8df80324", "score": "0.54328126", "text": "def size(self):\n return self.num_items", "title": "" }, { "docid": "6e0b2db04e3e6c69f074f708ca8f2337", "score": "0.5431334", "text": "def count(self) -> int:\n return len(self.files)", "title": "" }, { "docid": "cda4eceb01f10cc20ded5afab311e589", "score": "0.5430262", "text": "def NumSessions(self):\n return self._get_attribute('numSessions')", "title": "" }, { "docid": "10465f53fca638c24a2c51f878526b35", "score": "0.5423426", "text": "def num_fibers(self):\n return self._num_fibers", "title": "" }, { "docid": "95ff127d3c56810e84bcf9e2260029bb", "score": "0.5421042", "text": "def count(self):\n return self.__count", "title": "" } ]
80629d85ef87ffbba484cd8f9fefc5e2
Uses buildin landlab candy to get aspect values of the model grid
[ { "docid": "77d6d5ae26f283302d810124b22ecf64", "score": "0.58663845", "text": "def calcAspect(self):\n\n aspect = self._grid.calc_aspect_at_node()\n #this was moved to the .reshapeGrid() function for consitency.\n #need to reshape here directly...\n #aspect = aspect.reshape(self._grid.number_of_node_rows,\n # self._grid.number_of_node_columns)\n self._aspect = aspect.reshape(self._grid.number_of_node_rows, self._grid.number_of_node_columns)", "title": "" } ]
[ { "docid": "669f89cb45e23612c55003ca5c1b00e9", "score": "0.6027867", "text": "def _getGridInformation(self):\n # Are there cell areas associated with this model?\n if \"areacella\" not in self.variables.keys(): return\n f = Dataset(self.variables[\"areacella\"][0])\n self.cell_areas = f.variables[\"areacella\"][...]\n self.lat = f.variables[\"lat\"][...]\n self.lon = f.variables[\"lon\"][...]\n self.lat_bnds = np.zeros(self.lat.size+1)\n self.lat_bnds[:-1] = f.variables[\"lat_bnds\"][:,0]\n self.lat_bnds[-1] = f.variables[\"lat_bnds\"][-1,1]\n self.lon_bnds = np.zeros(self.lon.size+1)\n self.lon_bnds[:-1] = f.variables[\"lon_bnds\"][:,0]\n self.lon_bnds[-1] = f.variables[\"lon_bnds\"][-1,1]\n\n # Now we do the same for land fractions\n if \"sftlf\" not in self.variables.keys():\n self.land_areas = self.cell_areas \n else:\n self.land_fraction = (Dataset(self.variables[\"sftlf\"][0]).variables[\"sftlf\"])[...]\n # some models represent the fraction as a percent \n if np.ma.max(self.land_fraction) > 1: self.land_fraction *= 0.01\n np.seterr(over='ignore')\n self.land_areas = self.cell_areas*self.land_fraction\n np.seterr(over='warn')\n self.land_area = np.ma.sum(self.land_areas)\n return", "title": "" }, { "docid": "ff6f9ddae502c5de5e22fcd4e33e9f68", "score": "0.5718548", "text": "def calc_aspect_ratios(self, minor): \n if self.plates:\n # if the crystals are plates do the plate version\n self.ratios = [ cluster.aspect_ratio(method='plate', minor=minor) for cluster in self.clusters ] \n self.major_axis['z'] = [ cl.major_axis['z']/2 for cl in self.clusters ] #this has to be after ratios\n self.depth = [ cl.depth/2 for cl in self.clusters ] #this has to be after ratios -- defined in aspect ratio\n self.req = np.power((np.power(self.major_axis['z'],2)*self.depth),(1./3.)) #major axis from fit ellipse\n \n else:\n self.ratios = [ cluster.aspect_ratio(method='column', minor=minor) for cluster in self.clusters ]\n self.major_axis['z'] = [ cl.major_axis['z']/2 for cl in self.clusters ]\n self.depth = [ cl.depth/2 for cl in self.clusters ]\n self.req = np.power((np.power(self.depth,2)*self.major_axis['z']),(1./3.)) \n \n self.minor = minor", "title": "" }, { "docid": "cd079f118cf728d49ce3224b4076971d", "score": "0.56510586", "text": "def __init__(self):\n self.aspect_ratio = 1.5\n self.aspect_ratio_relative_deviation = 0.35\n\n self.min_area = 20 * 40", "title": "" }, { "docid": "2e26540f0262baf8af714f53e1631b84", "score": "0.5557481", "text": "def interpolate_grid(model, latentBounds, latents : np.array, site_name, coverage_threshold, check_site_boundary, num = 10):\n ##reset the latent grid stored\n grid.reset()\n site_grid.reset()\n \n print(\"generating latent grid...\")\n fig, axs = plt.subplots(num, num, figsize=(16, 16))\n fig.tight_layout()\n \n\n #axes\n xAx = np.linspace(latentBounds.xMin, latentBounds.xMax, num)\n yAx = np.linspace(latentBounds.yMin, latentBounds.yMax, num)\n\n #find the closest grid item to seed design\n closestLatents = find_seeds(latentBounds, xAx, yAx, latents)\n\n #loop over each latent vector\n for index, i in enumerate(xAx):\n for jindex, j in enumerate(yAx) :\n \n latent = torch.tensor( [float(i), float(j)]).to(device)\n # plot_sdf_from_latent( latent, model.forward, show_axis = False, ax = axs[index,jindex])\n \n im, coverage, site_excess = process_latent(model, latent, site_name, grid)\n\n grid.addValue( num - 1 - jindex, index, coverage)\n site_grid.addValue( num - 1 - jindex, index, site_excess)\n\n\n if([i, j] in closestLatents):\n #starts top left\n axs[num -1 - jindex, index].imshow(im, cmap = \"copper\")\n\n\n else:\n # im = 255 - im #inverting\n\n #create 4 channel image, set alpha to zero\n im4 = np.stack([im,im,im, np.zeros(im.shape).astype(np.int32)], axis = 2)\n\n #find black pixels and set their alpha to 1\n blacks = im4[:,:,0] == 0\n im4[:,:,3][blacks] = int(255)\n \n from random import randint\n axs[num -1- jindex, index].imshow(im4, vmin=0, vmax=255, cmap=\"RdBu\")\n\n axs[num - 1 - jindex, index].axis(\"off\")\n \n\n #handles heat maps\n grid_is_transparent = False\n\n if(coverage_threshold != False or check_site_boundary != False):\n\n create_heatmap(coverage_threshold)\n grid_is_transparent = True\n \n fig.savefig(os.path.join(dir_image, 'latent_grid.png'), transparent=grid_is_transparent)", "title": "" }, { "docid": "ce901d9c9d82c3c7b7d3578090fc01b0", "score": "0.54688954", "text": "def GetPixelAspectRatio(self) -> float:\n ...", "title": "" }, { "docid": "4e0f39f6cbcd629390bd4b0c8f569e4d", "score": "0.54367006", "text": "def filter1Goodwin_as_aspect_v3(walls, scale, a, feedback, total):\n\n row = a.shape[0]\n col = a.shape[1]\n\n filtersize = np.floor((scale + 0.0000000001) * 9)\n if filtersize <= 2:\n filtersize = 3\n else:\n if filtersize != 9:\n if filtersize % 2 == 0:\n filtersize = filtersize + 1\n\n filthalveceil = int(np.ceil(filtersize / 2.))\n filthalvefloor = int(np.floor(filtersize / 2.))\n\n filtmatrix = np.zeros((int(filtersize), int(filtersize)))\n buildfilt = np.zeros((int(filtersize), int(filtersize)))\n\n filtmatrix[:, filthalveceil - 1] = 1\n n = filtmatrix.shape[0] - 1\n buildfilt[filthalveceil - 1, 0:filthalvefloor] = 1\n buildfilt[filthalveceil - 1, filthalveceil: int(filtersize)] = 2\n\n y = np.zeros((row, col)) # final direction\n z = np.zeros((row, col)) # temporary direction\n x = np.zeros((row, col)) # building side\n walls[walls > 0] = 1\n\n for h in range(0, 180): # =0:1:180 #%increased resolution to 1 deg 20140911\n feedback.setProgress(int(h * total))\n if feedback.isCanceled():\n feedback.setProgressText(\"Calculation cancelled\")\n break\n filtmatrix1temp = sc.rotate(filtmatrix, h, order=1, reshape=False, mode='nearest') # bilinear\n filtmatrix1 = np.round(filtmatrix1temp)\n # filtmatrix1temp = sc.imrotate(filtmatrix, h, 'bilinear')\n # filtmatrix1 = np.round(filtmatrix1temp / 255.)\n # filtmatrixbuildtemp = sc.imrotate(buildfilt, h, 'nearest')\n filtmatrixbuildtemp = sc.rotate(buildfilt, h, order=0, reshape=False, mode='nearest') # Nearest neighbor\n # filtmatrixbuild = np.round(filtmatrixbuildtemp / 127.)\n filtmatrixbuild = np.round(filtmatrixbuildtemp)\n index = 270 - h\n if h == 150:\n filtmatrixbuild[:, n] = 0\n if h == 30:\n filtmatrixbuild[:, n] = 0\n if index == 225:\n # n = filtmatrix.shape[0] - 1 # length(filtmatrix);\n filtmatrix1[0, 0] = 1\n filtmatrix1[n, n] = 1\n if index == 135:\n # n = filtmatrix.shape[0] - 1 # length(filtmatrix);\n filtmatrix1[0, n] = 1\n filtmatrix1[n, 0] = 1\n\n for i in range(int(filthalveceil) - 1, row - int(filthalveceil) - 1): # i=filthalveceil:sizey-filthalveceil\n for j in range(int(filthalveceil) - 1, col - int(filthalveceil) - 1): # (j=filthalveceil:sizex-filthalveceil\n if walls[i, j] == 1:\n wallscut = walls[i - filthalvefloor:i + filthalvefloor + 1,\n j - filthalvefloor:j + filthalvefloor + 1] * filtmatrix1\n dsmcut = a[i - filthalvefloor:i + filthalvefloor + 1, j - filthalvefloor:j + filthalvefloor + 1]\n if z[i, j] < wallscut.sum(): # sum(sum(wallscut))\n z[i, j] = wallscut.sum() # sum(sum(wallscut));\n if np.sum(dsmcut[filtmatrixbuild == 1]) > np.sum(dsmcut[filtmatrixbuild == 2]):\n x[i, j] = 1\n else:\n x[i, j] = 2\n\n y[i, j] = index\n\n y[(x == 1)] = y[(x == 1)] - 180\n y[(y < 0)] = y[(y < 0)] + 360\n\n grad, asp = get_ders(a, scale)\n\n y = y + ((walls == 1) * 1) * ((y == 0) * 1) * (asp / (math.pi / 180.))\n\n dirwalls = y\n\n return dirwalls", "title": "" }, { "docid": "9ed6b1bab00b91ed511e5befc3838781", "score": "0.5414015", "text": "def updateGrid(self):\n\n self._dem = self._grid.at_node['topographic__elevation']\n self._slope = np.rad2deg(self._grid.at_node['topographic__steepest_slope'])\n self._aspect = self._grid.calc_aspect_at_node()", "title": "" }, { "docid": "7ca91db5a7456bbf0317215d53bd55eb", "score": "0.53400224", "text": "def cam(model, x, threshold=0.3, layer=3, classes=('Fire', 'Neutral', 'Smoke')):\n x = np.expand_dims(x, axis=0)\n\n last_conv_layer = model.get_layer('conv2d_{}'.format(layer))\n predict = model.predict(x)\n class_idx = np.argmax(predict[0])\n print(predict)\n print(class_idx)\n print(classes[int(class_idx)])\n\n class_output = model.output[:, class_idx]\n gap_weights = model.get_layer(\"global_average_pooling2d_1\")\n\n grads = K.gradients(class_output, gap_weights.output)[0]\n iterate = K.function([model.input], [grads, last_conv_layer.output[0]])\n pooled_grads_value, conv_layer_output_value = iterate([x])\n pooled_grads_value = np.squeeze(pooled_grads_value, axis=0)\n # print(pooled_grads_value)\n for i in range(len(pooled_grads_value)):\n conv_layer_output_value[:, :, i] *= pooled_grads_value[i]\n\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n # plt.matshow(heatmap)\n # plt.show()\n heatmap = cv2.resize(heatmap, (x.shape[2], x.shape[1]))\n\n class_area = np.sum(heatmap > threshold)\n\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n\n x = np.squeeze(x, axis=0) * 255.0\n x = x.astype('uint8')[:, :, ::-1] # BGR to RGB\n\n superimposed_img = heatmap * 0.4 + x\n\n combine = np.concatenate((x, heatmap, superimposed_img), axis=1)\n cv2.imwrite('origin_heatmap_cam.png', combine)\n\n cv2.imwrite('Class Activation Map/origin.jpg', x)\n cv2.imwrite('Class Activation Map/heatmap.jpg', heatmap)\n cv2.imwrite('Class Activation Map/cam.jpg', superimposed_img)\n\n return classes[int(class_idx)], class_area", "title": "" }, { "docid": "08a7076368c0624c1ccd88908e94801d", "score": "0.53243625", "text": "def _beam_area_pix(self):\n return self._beam_area() / self._pixel_area()", "title": "" }, { "docid": "177b2257376c0d1cbee8e4cdb43f1e72", "score": "0.53220063", "text": "def visualize_model(self, ax):\n ax.imshow(self.w[1:].reshape(28, -1, order='F').T, cmap='bone')", "title": "" }, { "docid": "3d9217ba9dffc8733711ece687c01427", "score": "0.53156143", "text": "def GetImageAspectRatio(self) -> float:\n ...", "title": "" }, { "docid": "a435cd3f96e6576795281ec5eb6f86bc", "score": "0.5249101", "text": "def plot_fitstatus():\n if (c.niter == 1):\n pylab.figure(1,figsize=(7.5,5.0))\n pylab.subplot(221)\n image1=pylab.imshow(numarray.mlab.rot90(numarray.swapaxes(c.galaxy,0,1)),aspect='preserve')\n image1.autoscale() # This line is necessary to set image1.norm.v*\n c.vmin= image1.norm.vmin\n c.vmax= image1.norm.vmax\n pylab.title(c.imagefile)\n \n pylab.subplot(222)\n pylab.imshow(numarray.mlab.rot90(numarray.swapaxes(c.model_galaxy,0,1)),aspect='preserve',vmin=c.vmin,vmax=c.vmax)\n if (c.niter == 1):\n pylab.colorbar()\n pylab.title('Model Galaxy, Iteration='+str(c.niter))\n pylab.subplot(223)\n residual = numarray.zeros((c.nxpts,c.nypts))\n residual2 = numarray.zeros((c.nxpts,c.nypts))\n validpix = where ((c.galaxy-c.background) !=0.0)\n residual[validpix] = c.galaxy[validpix]-c.model_galaxy[validpix]\n residual2[validpix] = c.galaxy[validpix]-c.model_galaxy[validpix]\n notvalidpix = where ((c.galaxy-c.background)==0.0)\n residual2[notvalidpix] = 0.0\n histrange=(c.vmax-c.vmin)/5.0\n pylab.imshow(numarray.mlab.rot90(numarray.swapaxes(residual2,0,1)),aspect='preserve',vmin=-1.0*histrange,vmax=histrange)\n if (c.niter == 1):\n pylab.colorbar()\n pylab.title('Residual = Real-Model')\n \n pylab.subplot(224)\n # the histogram of the data\n \n pylab.hist(residual,bins=arange(-1.0*histrange,histrange),hold=False)\n pylab.title('Difference Histogram')\n if (c.make_movie):\n iternum = \"%04d\" % (c.niter)\n pylab.savefig('iter'+iternum+'.png')", "title": "" }, { "docid": "c144cc23ae03c49b15b0b7c92f685e80", "score": "0.523963", "text": "def _calc(self):\n im = self.res.get_image().copy()\n for src_res, src_map, cells in self.replacements:\n for dst_name, src_name in cells.iteritems():\n dst_box = self.map.get_box(dst_name)\n src_box = src_map.get_box(src_name)\n src_im = src_res.get_image().crop(src_box)\n im.paste(src_im, dst_box)\n return im", "title": "" }, { "docid": "efcc38ab62f01017940433c8c1e38828", "score": "0.5235238", "text": "def compute_model(self, id=0, x=588.28, y=40.54, sh=[10,10], xspec=None, yspec=None, beam='A', verbose=False, in_place=True, outdata=None):\n \n from . import disperse\n import unicorn.utils_c\n \n xc, yc = int(x), int(y)\n xcenter = x - xc\n \n ### Get dispersion parameters at the reference position\n dy, lam = self.conf.get_beam_trace(x=x-self.pad, y=y-self.pad, dx=self.conf.dxlam[beam]+xcenter, beam=beam)\n dyc = np.cast[int](dy+20)-20+1 # 20 for handling int of small negative numbers\n \n ### Account for pixel centering of the trace\n yfrac = dy-np.floor(dy)\n \n ### Interpolate the sensitivity curve on the wavelength grid. \n ysens = lam*0\n so = np.argsort(lam)\n ysens[so] = unicorn.utils_c.interp_conserve_c(lam[so], self.conf.sens[beam]['WAVELENGTH'], self.conf.sens[beam]['SENSITIVITY'])/1.e17*np.abs(lam[1]-lam[0])\n if xspec is not None:\n yspec_int = ysens*0.\n yspec_int[so] = unicorn.utils_c.interp_conserve_c(lam[so], xspec, yspec)\n ysens *= yspec_int\n \n #print 'XXX', id, x, y, beam, ysens.max()\n \n x0 = np.array([yc, xc])\n slx = self.conf.dxlam[beam]+xc\n ok = (slx < self.sh_pad[1]) & (slx > 0)\n \n if in_place:\n #self.modelf *= 0\n outdata = self.modelf\n else:\n if outdata is None:\n outdata = self.modelf*0\n \n ### This is an array of indices for the spectral trace\n try:\n idxl = self.idx[dyc[ok]+yc,slx[ok]]\n except:\n if verbose:\n print 'Dispersed trace falls off the image: x=%.2f y=%.2f' %(x, y)\n \n return False\n \n ### Loop over pixels in the direct FLT and add them into a final 2D spectrum \n ### (in the full (flattened) FLT frame)\n ## adds into the output array, initializing full array to zero would be very slow\n status = disperse.disperse_grism_object(self.clip, self.seg, id, idxl, yfrac[ok], ysens[ok], outdata, x0, np.array(self.clip.shape), np.array(sh), np.array(self.sh_pad))\n \n if not in_place:\n return outdata\n else:\n return True", "title": "" }, { "docid": "e7148fa3e15b6496badaa7406d292a60", "score": "0.5230302", "text": "def GetResolutionAspectRatio(self) -> float:\n ...", "title": "" }, { "docid": "10a22ae534b8504d48d8ee8d38c2a5cc", "score": "0.5229373", "text": "def compute_aspect_ratio(tl, tr, bl, br, frame_shape):\n\n h1 = bl[1] - tl[1]\n h2 = br[1] - tr[1]\n w1 = tr[0] - tl[0]\n w2 = br[0] - bl[0]\n h = max(h1, h2)\n w = max(w1, w2)\n\n # image center\n u0 = frame_shape[1] / 2\n v0 = frame_shape[0] / 2\n\n ar_vis = w / h # visible aspect ratio\n\n m1 = np.append(tl, 1)\n m2 = np.append(tr, 1)\n m3 = np.append(bl, 1)\n m4 = np.append(br, 1)\n\n # cross product = prodotto vettoriale\n # dot product = prodotto scalare\n k2 = np.dot(np.cross(m1, m4), m3) / np.dot(np.cross(m2, m4), m3)\n k3 = np.dot(np.cross(m1, m4), m2) / np.dot(np.cross(m3, m4), m2)\n\n n2 = k2 * m2 - m1\n n3 = k3 * m3 - m1\n\n n21, n22, n23 = n2\n n31, n32, n33 = n3\n\n if n23 != 0 and n33 != 0:\n f_squared = -((1 / (n23 * n33)) * ((n21 * n31 - (n21 * n33 + n23 * n31) * u0 + n23 * n33 * (u0 ** 2)) + (\n n22 * n32 - (n22 * n33 + n23 * n32) * v0 + n23 * n33 * (v0 ** 2))))\n\n global focal_length, f_tot, num_f\n if 0 < f_squared < 2000 ** 2 and num_f < 300:\n f = np.sqrt(f_squared) # focal-lenght in pixels\n f_tot += f\n num_f += 1\n focal_length = f_tot / num_f\n f = focal_length\n\n A = np.array([[f, 0, u0], [0, f, v0], [0, 0, 1]], dtype=np.float32)\n\n At = np.transpose(A)\n Ati = np.linalg.inv(At)\n Ai = np.linalg.inv(A)\n\n # calculate the real aspect ratio\n ar_real = np.sqrt(np.dot(np.dot(np.dot(n2, Ati), Ai), n2) / np.dot(np.dot(np.dot(n3, Ati), Ai), n3))\n else:\n ar_real = np.sqrt((n21 ** 2 + n22 ** 2) / (n31 ** 2 + n32 ** 2))\n\n if ar_real < ar_vis:\n w = int(w)\n h = int(w / ar_real)\n else:\n h = int(h)\n w = int(ar_real * h)\n\n return h, w", "title": "" }, { "docid": "57d19e33e1f943213eff5f8b6eb7734c", "score": "0.52072287", "text": "def fixture_grid():\n return load_static_earth_relief()", "title": "" }, { "docid": "4f1397c5c5f169701bf38e0cc51ce37d", "score": "0.52037567", "text": "def viewModel(self):\n # Model dimentions\n x_coord = len(self.root.cellGrid.grid)\n y_coord = len(self.root.cellGrid.grid[0])\n z_coord = self.root.config[\"model\"][\"dimentions\"][\"height\"] \n # Return if 2d\n if(z_coord < 2):\n return\n # Plot the result in 3D view\n cells = self.extractCells() # Extract cells from the grid\n matrix = np.zeros((x_coord,y_coord,z_coord)) # define a 3d matrix\n colours = []\n for cell in cells:\n if(not (cell[\"state\"][\"type\"] == -100)): \n x = cell[\"cell_id\"][0]\n y = cell[\"cell_id\"][1]\n if (z_coord > 1): # if 3d add z cell\n if(cell[\"cell_id\"][2] != z_coord-1): #(condition to hide ceiling) \n z = cell[\"cell_id\"][2]\n else:\n continue\n else:\n z = 0 \n matrix[x, y, z] = 1\n for key, clr in self.root.config[\"model\"][\"colours\"].items():\n if(cell[\"state\"][\"type\"] == clr[\"type\"]):\n colours.append(cl.to_rgba(key, alpha=clr[\"alpha\"]/100))\n # Plot the matrix as 3d\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.set_aspect('auto')\n \n x,y,z = np.indices((x_coord,y_coord,z_coord))\n positions = np.c_[x[matrix==1],y[matrix==1],z[matrix==1]]\n pc = self.plotCubeAt(positions, colors=colours,edgecolor=\"black\")\n ax.add_collection3d(pc)\n \n ax.set_xlim([0,x_coord])\n ax.set_ylim([0,y_coord])\n ax.set_zlim([0,max(x_coord,y_coord)])\n\n plt.show()", "title": "" }, { "docid": "553c1c9e93bb3542ac5354a4b2f2daa4", "score": "0.5197436", "text": "def __init__(self, env, img_size=(84,84), crop_height=(0, 210), \n crop_width=(0, 160), channel_weights=[1.0, 1.0, 1.0]):\n super().__init__(env)\n self.img_size = (1,) + img_size\n self.crop_height = crop_height\n self.crop_width = crop_width\n self.channel_weights = channel_weights\n self.observation_space = Box(0.0, 1.0, self.img_size)", "title": "" }, { "docid": "8aff5c263bfe5a4c24bb5221cba0048e", "score": "0.51730174", "text": "def model_parameters(self,verbose=True):\n if verbose:\n print('cx: x position of the centre (in pixels)')\n print('cy: y position of the centre (in pixels)')\n print('q: axis ratio (a/b)')\n print('pa: position angle (in degrees)')\n print('z_grad : gradient in dex/kpc')\n print('z_0: central metallicity')\n return ['cx','cy','q','pa','z_grad','z_0']", "title": "" }, { "docid": "cfb733bafb1f50517d86e6ea26a5ab8a", "score": "0.51656216", "text": "def aug_imgs(model, batch, chooseaugs=[0,1,2,3], rand_angle=10, angle=None, save_dir=None):\n #apply same aug as during input\n # no softmax applied\n #receive a batch of size BXCXHXW\n # generate a batch of size augXBXCXHXW\n #after passing through model, get output of augXBXCXHXW, softmax\n # group to BXCXHXW after averaging\n batch = batch.detach()\n # aug=4\n if angle is None:\n angle = np.random.randint(rand_angle)\n\n topil = transforms.ToPILImage()\n totens = transforms.ToTensor()\n # rota = np.random.randint(10)\n scale = round(random.uniform(0.8, 1.4),2)\n shear = list(np.round(np.random.uniform(0, 4, size=2), 2))\n # translate = np.random.randint(2,size=2)\n\n if save_dir is not None:\n no_cols = 1+len(chooseaugs)\n fig, ax = plt.subplots(2, no_cols)\n fig.set_size_inches(6*no_cols,20)\n [axeach.axis(\"off\") for axeach in ax.flatten()]\n\n ax_plt = ax[:, 0] if save_dir is not None else None\n actual = varres(batch, model, 0, 0, axes=ax_plt)#actual output, same as rotation 0 \n augs = [actual]\n if 0 in chooseaugs:\n aug_ind = 0\n ax_plt = ax[:, 1+chooseaugs.index(aug_ind)] if save_dir is not None and aug_ind in chooseaugs else None\n rot = varres(batch, model, 0, param=angle, axes=ax_plt)\n augs.append(rot)\n if 1 in chooseaugs:\n aug_ind = 1\n ax_plt = ax[:, 1+chooseaugs.index(aug_ind)] if save_dir is not None and aug_ind in chooseaugs else None\n # ax_plt = [ax[0, 1+chooseaugs.index(aug_ind)],ax[1, 1+chooseaugs.index(1)]] if save_dir is not None and 1 in chooseaugs else None\n shearimg = varres(batch, model, aug_ind, param=shear, axes=ax_plt)\n augs.append(shearimg)\n if 2 in chooseaugs:\n aug_ind = 2\n ax_plt = ax[:, 1+chooseaugs.index(aug_ind)] if save_dir is not None and aug_ind in chooseaugs else None\n flip = varres(batch, model, aug_ind, param=None, axes=ax_plt)\n augs.append(flip)\n if 3 in chooseaugs:\n aug_ind = 3\n ax_plt = ax[:, 1+chooseaugs.index(aug_ind)] if save_dir is not None and aug_ind in chooseaugs else None\n scaleaug = varres(batch, model, 3, param=scale, axes=ax_plt)\n augs.append(scaleaug)\n\n if save_dir is not None:\n ax[1, 0].remove()\n plt.savefig(os.path.join(save_dir, \"check.png\"), bbox_inches = 'tight')#, padding=0)\n plt.close(\"all\")\n avg_out = torch.mean(torch.stack(augs,dim=0),dim=0)\n # torch.mean(torch.stack([batchout, img_aug1.view_as(batchout), img_aug2.view_as(batchout), \n # img_aug3.view_as(batchout), img_aug4.view_as(batchout)], dim=0), dim=0)\n return avg_out", "title": "" }, { "docid": "9c3a72b3e95ae61b959b04e9001b7a43", "score": "0.51566494", "text": "def test_get_norm(self):\n self.assertEqual(self.hawkes_kernel_exp.get_norm(), self.intensity)", "title": "" }, { "docid": "b4ed141face6bac8ba3fcfb3b31d15ef", "score": "0.51555634", "text": "def eval_metrics_CV():\n\n target_ims, input_ims = utils.load_phantom_ds(folder_path='PhantomsRadialNoisy/')\n # target_ims, input_ims = utils.load_walnut_ds()\n\n ds = MultiOrbitDataset(input_ims, target_ims, data_augmentation=False)\n norm_dl = DataLoader(ds, batch_size=50, sampler=ValSampler(len(ds), 500)) \n\n models_dir = Path('/media/beta/florian/model_weights')\n metrics = ['SSIM', 'DSC', 'PSNR']\n\n models, model_names = get_models()\n [model.set_normalization(norm_dl) for model in models]\n\n print(eval_init_metrics(metrics, ds))\n metrics_te = []\n for cv in ['01', '03', '05']:\n # if not cv == '01': continue\n task = 'meanVarInit' \n\n [model.msd.load_state_dict(\n torch.load(sorted(models_dir.glob(f'MSD_phantoms/MSD_d{d}_P_{task}_CV{cv}_*/best_*.h5'), key=_nat_sort)[0], map_location='cpu'))\n for model, d in zip(models[:2], [30, 80])]\n\n [model.msd.load_state_dict(\n torch.load(sorted(models_dir.glob(f'UNet_phantoms/UNet_f{f}_P_{task}_CV{cv}_*/best_*.h5'), key=_nat_sort)[0], map_location='cpu'))\n for model, f in zip(models[-4:], [8, 16, 32, 64])] \n\n metrics_te.append(eval_metrics(models, metrics, ds, 50))\n\n plot_metrics_CV(metrics_te, model_names, metrics, ref_metrics=eval_init_metrics(metrics, ds), filename=f'metrics_P_CV{cv}_best.png')", "title": "" }, { "docid": "888f51bb69a4765dfafe3a8b8a8fc2a9", "score": "0.5155248", "text": "def __init__(self, mode=\"bilinear\"):\n super().__init__()\n self.mode = mode\n self.ndims = settings.get_ndims()", "title": "" }, { "docid": "2badb2db8fb00c7a41c0cc92d03518eb", "score": "0.51364374", "text": "def ensemble_grid():\n \n lat_values = numpy.arange(-89.5, 90, 1.0)\n\n latitude = iris.coords.DimCoord(lat_values,\n var_name='lat',\n standard_name='latitude',\n long_name='latitude',\n units='degrees_north',\n coord_system=iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS))\n\n dummy_data = numpy.zeros(len(lat_values))\n new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(latitude, 0)])\n\n new_cube.coord('latitude').guess_bounds()\n\n return new_cube", "title": "" }, { "docid": "f815fa237589ae2cb01084b4ca4ace69", "score": "0.51313686", "text": "def setup_grid():\n i0t,imt,j0t,jmt = (0000 ,8640, 0, 4320)\n incr = 360.0/imt\n jR = np.arange(j0t, jmt)\n iR = np.arange(i0t, imt)\n latvec = ( 90 - jR*incr - incr/2)[::-1]\n lonvec = (-180 + iR*incr + incr/2)\n lons,lats = np.meshgrid(lonvec, latvec)\n grid = pr.geometry.GridDefinition(lons=lons, lats=lats)\n grid.ivec = np.arange(grid.shape[1])\n grid.jvec = np.arange(grid.shape[0])\n grid.iarr,grid.jarr = np.meshgrid(grid.ivec, grid.jvec)\n return grid", "title": "" }, { "docid": "46fdaaa2522966766da40140e6032bc4", "score": "0.51272047", "text": "def GetModelBounds(self):\n ...", "title": "" }, { "docid": "c9dde46a5cc65ad5edb59de59d7b7a7d", "score": "0.512222", "text": "def get_inst_parameters(self):\n model = self.model\n data = self.data\n args = self.args\n (x_train, y_train), (x_test, y_test) = data\n if not os.path.exists(args.save_dir+\"/check\"):\n os.makedirs(args.save_dir+\"/check\")\n \n if not os.path.exists(args.save_dir+\"/check/x_inst.npy\"):\n get_digitcaps_output = K.function([model.layers[0].input],[model.get_layer(\"digitcaps\").output])\n\n get_capsnet_output = K.function([model.layers[0].input],[model.get_layer(\"capsnet\").output])\n\n if (x_train.shape[0]%args.num_cls==0):\n lim = int(x_train.shape[0]/args.num_cls)\n else:\n lim = int(x_train.shape[0]/args.num_cls)+1\n\n for t in range(0,lim):\n if (t==int(x_train.shape[0]/args.num_cls)):\n mod = x_train.shape[0]%args.num_cls\n digitcaps_output = get_digitcaps_output([x_train[t*args.num_cls:t*args.num_cls+mod]])[0]\n capsnet_output = get_capsnet_output([x_train[t*args.num_cls:t*args.num_cls+mod]])[0]\n else:\n digitcaps_output = get_digitcaps_output([x_train[t*args.num_cls:(t+1)*args.num_cls]])[0]\n capsnet_output = get_capsnet_output([x_train[t*args.num_cls:(t+1)*args.num_cls]])[0]\n masked_inst = []\n inst = []\n where = []\n for j in range(0,digitcaps_output.shape[0]):\n ind = capsnet_output[j].argmax()\n inst.append(digitcaps_output[j][ind])\n where.append(ind)\n for z in range(0,args.num_cls):\n if (z==ind):\n continue\n else:\n digitcaps_output[j][z] = digitcaps_output[j][z].fill(0.0)\n masked_inst.append(digitcaps_output[j].flatten())\n masked_inst = np.asarray(masked_inst)\n masked_inst[np.isnan(masked_inst)] = 0\n inst = np.asarray(inst)\n where = np.asarray(where)\n if (t==0):\n x_inst = np.concatenate([inst])\n pos = np.concatenate([where])\n x_masked_inst = np.concatenate([masked_inst])\n else:\n x_inst = np.concatenate([x_inst,inst])\n pos = np.concatenate([pos,where])\n x_masked_inst = np.concatenate([x_masked_inst,masked_inst])\n np.save(args.save_dir+\"/check/x_inst\",x_inst)\n np.save(args.save_dir+\"/check/pos\",pos)\n np.save(args.save_dir+\"/check/x_masked_inst\",x_masked_inst)\n else:\n x_inst = np.load(args.save_dir+\"/check/x_inst.npy\")\n pos = np.load(args.save_dir+\"/check/pos.npy\")\n x_masked_inst = np.load(args.save_dir+\"/check/x_masked_inst.npy\")\n return x_inst,pos,x_masked_inst", "title": "" }, { "docid": "2cb1e2c5c9f5494b8590c129e8f6d246", "score": "0.5120587", "text": "def _set_dimensionless_parameters(self):\n\n # Macroscale Geometry\n self.l_cn = self.L_cn / self.L_x\n self.l_n = self.L_n / self.L_x\n self.l_s = self.L_s / self.L_x\n self.l_p = self.L_p / self.L_x\n self.l_cp = self.L_cp / self.L_x\n self.l_x = self.L_x / self.L_x\n self.l_y = self.L_y / self.L_z\n self.l_z = self.L_z / self.L_z\n self.l_Li = self.L_Li / self.L_x\n self.a_cc = self.l_y * self.l_z\n self.a_cooling = self.A_cooling / (self.L_z ** 2)\n self.v_cell = self.V_cell / (self.L_x * self.L_z ** 2)\n\n self.l = self.L / self.L_x\n self.delta = self.L_x / self.L_z # Aspect ratio\n\n # Tab geometry\n self.l_tab_n = self.L_tab_n / self.L_z\n self.centre_y_tab_n = self.Centre_y_tab_n / self.L_z\n self.centre_z_tab_n = self.Centre_z_tab_n / self.L_z\n self.l_tab_p = self.L_tab_p / self.L_z\n self.centre_y_tab_p = self.Centre_y_tab_p / self.L_z\n self.centre_z_tab_p = self.Centre_z_tab_p / self.L_z\n\n # Particle-size distribution geometry\n self.R_min_n = self.R_min_n_dim / self.R_n_typ\n self.R_min_p = self.R_min_p_dim / self.R_p_typ\n self.R_max_n = self.R_max_n_dim / self.R_n_typ\n self.R_max_p = self.R_max_p_dim / self.R_p_typ\n self.sd_a_n = self.sd_a_n_dim / self.R_n_typ\n self.sd_a_p = self.sd_a_p_dim / self.R_p_typ", "title": "" }, { "docid": "df8730d5d37fc91df685fff4466f1e26", "score": "0.5102993", "text": "def test_lens_model_correspondence(self):\n chameleon_lens = ChameleonLens()\n chameleon = Chameleon()\n\n x, y = util.make_grid(numPix=100, deltapix=0.1)\n e1, e2 = 0.0, 0\n w_c, w_t = 0.5, 1.0\n kwargs_light = {\"amp\": 1.0, \"w_c\": w_c, \"w_t\": w_t, \"e1\": e1, \"e2\": e2}\n kwargs_lens = {\"alpha_1\": 1.0, \"w_c\": w_c, \"w_t\": w_t, \"e1\": e1, \"e2\": e2}\n flux = chameleon.function(x=x, y=y, **kwargs_light)\n f_xx, f_xy, f_yx, f_yy = chameleon_lens.hessian(x=x, y=y, **kwargs_lens)\n kappa = 1 / 2.0 * (f_xx + f_yy)\n\n # flux2d = util.array2image(flux)\n # kappa2d = util.array2image(kappa)\n\n npt.assert_almost_equal(flux / np.mean(flux), kappa / np.mean(kappa), decimal=3)", "title": "" }, { "docid": "c4a223cad7b0b7f92799e55e221186fc", "score": "0.5090399", "text": "def resolve_aspect_ratio(self, info, **kwargs):\n return self.width / self.height", "title": "" }, { "docid": "8c99861b3c637af322dd544194d903c7", "score": "0.50894517", "text": "def _computeLightBounds(self):", "title": "" }, { "docid": "e5ea384ba1cb486765f94265f3dcc036", "score": "0.5069659", "text": "def get_params_grid(self):\n alphas_l1 = np.logspace(-6, 3, 50)\n for alpha_l1 in alphas_l1:\n yield {'alpha_l1': alpha_l1}", "title": "" }, { "docid": "cfdc095e7a1a0587a84730b8a38dae79", "score": "0.50689703", "text": "def __init__(self):\n self.samples = []\n self.grid = []\n self.best = 1.0\n \n self.setLogGrid(-4.0, 1.0, 128)", "title": "" }, { "docid": "9715e385214a77cbb9dfa027e69f5204", "score": "0.5066061", "text": "def _set_dimensional_parameters(self):\n\n # Macroscale geometry\n self.L_cn = pybamm.Parameter(\"Negative current collector thickness [m]\")\n self.L_n = pybamm.Parameter(\"Negative electrode thickness [m]\")\n self.L_s = pybamm.Parameter(\"Separator thickness [m]\")\n self.L_p = pybamm.Parameter(\"Positive electrode thickness [m]\")\n self.L_cp = pybamm.Parameter(\"Positive current collector thickness [m]\")\n self.L_x = (\n self.L_n + self.L_s + self.L_p\n ) # Total distance between current collectors\n self.L = self.L_cn + self.L_x + self.L_cp # Total cell thickness\n self.L_y = pybamm.Parameter(\"Electrode width [m]\")\n self.L_z = pybamm.Parameter(\"Electrode height [m]\")\n self.L_Li = pybamm.Parameter(\"Lithium counter electrode thickness [m]\")\n self.A_cc = self.L_y * self.L_z # Area of current collector\n self.A_cooling = pybamm.Parameter(\"Cell cooling surface area [m2]\")\n self.V_cell = pybamm.Parameter(\"Cell volume [m3]\")\n\n # Tab geometry\n self.L_tab_n = pybamm.Parameter(\"Negative tab width [m]\")\n self.Centre_y_tab_n = pybamm.Parameter(\"Negative tab centre y-coordinate [m]\")\n self.Centre_z_tab_n = pybamm.Parameter(\"Negative tab centre z-coordinate [m]\")\n self.L_tab_p = pybamm.Parameter(\"Positive tab width [m]\")\n self.Centre_y_tab_p = pybamm.Parameter(\"Positive tab centre y-coordinate [m]\")\n self.Centre_z_tab_p = pybamm.Parameter(\"Positive tab centre z-coordinate [m]\")\n self.A_tab_n = self.L_tab_n * self.L_cn # Area of negative tab\n self.A_tab_p = self.L_tab_p * self.L_cp # Area of negative tab\n\n # Microscale geometry\n # Note: for li-ion cells, the definition of the surface area to\n # volume ratio is overwritten in lithium_ion_parameters.py to be computed\n # based on the assumed particle shape\n self.a_n_dim = pybamm.Parameter(\n \"Negative electrode surface area to volume ratio [m-1]\"\n )\n self.a_p_dim = pybamm.Parameter(\n \"Positive electrode surface area to volume ratio [m-1]\"\n )\n self.b_e_n = pybamm.Parameter(\n \"Negative electrode Bruggeman coefficient (electrolyte)\"\n )\n self.b_e_s = pybamm.Parameter(\"Separator Bruggeman coefficient (electrolyte)\")\n self.b_e_p = pybamm.Parameter(\n \"Positive electrode Bruggeman coefficient (electrolyte)\"\n )\n self.b_s_n = pybamm.Parameter(\n \"Negative electrode Bruggeman coefficient (electrode)\"\n )\n self.b_s_p = pybamm.Parameter(\n \"Positive electrode Bruggeman coefficient (electrode)\"\n )\n\n # Particle-size distribution geometry\n self.R_min_n_dim = pybamm.Parameter(\"Negative minimum particle radius [m]\")\n self.R_min_p_dim = pybamm.Parameter(\"Positive minimum particle radius [m]\")\n self.R_max_n_dim = pybamm.Parameter(\"Negative maximum particle radius [m]\")\n self.R_max_p_dim = pybamm.Parameter(\"Positive maximum particle radius [m]\")\n self.sd_a_n_dim = pybamm.Parameter(\n \"Negative area-weighted particle-size standard deviation [m]\"\n )\n self.sd_a_p_dim = pybamm.Parameter(\n \"Positive area-weighted particle-size standard deviation [m]\"\n )", "title": "" }, { "docid": "8fdaa5d7f73cada6d8c19fa504a0b164", "score": "0.50514126", "text": "def getArea(self):\n\t\treturn self.surf.getArea()", "title": "" }, { "docid": "7496a20d766697db207e04e082650340", "score": "0.5046926", "text": "def CLalpha_models(AR, wingtype, model):\n AR = np.asarray(AR)\n\n if wingtype.lower()=='rectangular_flatplate':\n if model.lower()=='prandtl':\n # Prandtl [3]\n # TODO\n a0=5.989;\n #a0=2*np.pi\n xp=AR\n tau=0.0000055;\n Clalpha=a0/(1+a0/(np.pi*xp)*(1+tau));\n\n elif model.lower()=='helmbold':\n # Helmbold 1942 [3]\n a0=2*np.pi\n xh=AR\n Clalpha=a0/(np.sqrt(1+(a0/(np.pi*xh))**2)+ a0/(np.pi*xh));\n elif model.lower()=='linear':\n # Linear slope\n Clalpha=0.5*np.pi*AR\n\n elif model.lower()=='tuck':\n # Tuck 1993 [1]\n Clalpha=2*np.pi-np.pi*1/AR*(np.log(AR)+2.5620)+1.404*(1/AR)**2 *(np.log(AR)+3.645)\n elif model.lower()=='jones':\n # TODO I might have extracted that from a plot\n # [3]\n Jones =np.array([\n [1.1211,1.4700,1.8628,2.2775,2.6050,2.9435,3.3912,3.9700,4.6692,5.3248,5.7620,6.2867,6.7348,7.3252],\n [1.7463,2.0740,2.3799,2.6967,2.8935,3.0794,3.2981,3.5825,3.8344,4.0426,4.1414,4.2512,4.3392,4.4382]])\n AR = Jones[0,:]+0.05\n Clalpha = Jones[1,:]\n elif model.lower()=='exp1':\n # [3]\n FlatPlateExp = np.array([\n [1.9937,1.9823,1.9941,2.6271,3.0199,3.0087,4.5055,5.0077,5.1065,4.9753,5.9918,6.0023,5.9692,0.5225,0.7067,0.9574,0.9577,0.9570],\n [2.5109,2.6526,2.3801,2.8063,3.1231,3.2103,3.7251,4.0203,3.9223,3.8785,4.1090,4.2508,4.3598,0.7968,1.3313,1.6261,1.5171,1.7461]])\n AR = FlatPlateExp[0,:]+0.05\n Clalpha = FlatPlateExp[1,:]\n\n else:\n raise NotImplementedError('model', model)\n\n else:\n raise NotImplementedError('wingtype', wingtype)\n\n return AR,Clalpha", "title": "" }, { "docid": "b276ae3e069fb6b985675291b8ce19d2", "score": "0.504204", "text": "def main():\n model = ESANetOneModality()\n print(model)\n\n model.eval()\n # rgb_image = torch.randn(1, 3, 480, 640)\n rgb_image = torch.randn(1, 3, 1080, 1920)\n\n from torch.autograd import Variable\n\n inputs_rgb = Variable(rgb_image)\n with torch.no_grad():\n output = model(inputs_rgb)\n print(output.shape)", "title": "" }, { "docid": "5bea0e621f0fa281dce4cc1c1e640dd3", "score": "0.5036274", "text": "def doParametersOfInterest(self):\n xsecs = {\n \"sigma1_HZZ\": 290.58626,\n \"sigma3_HZZ\": 581.17253,\n# \"sigma3_HZZ\": 44.670158,\n \"sigma1_VBF\": 968.674,\n \"sigma3_VBF\": 10909.54,\n \"sigma1_ZH\": 9022.36,\n \"sigma3_ZH\": 434763.7,\n \"sigma1_WH\": 30998.54,\n \"sigma3_WH\": 2028656, \n \"sigma2_HZZ\": 105.85594,\n \"sigmaL1_HZZ\": 1.9846071e-06,\n \"sigma2_VBF\": 13102.71,\n \"sigmaL1_VBF\": 2.08309E-4,\n \"sigma2_ZH\": 713123,\n \"sigmaL1_ZH\": 33652.46e-6,\n \"sigma2_WH\": 3106339,\n \"sigmaL1_WH\": 11234.91e-5\n }\n\n self.modelBuilder.doVar(\"CMS_zz4l_fai1[0.0,-1.0,1.0]\");\n self.modelBuilder.doVar(\"muf[1.0,0,10]\");\n self.modelBuilder.doVar(\"muV[1.0,0.0,10.0]\");\n #self.modelBuilder.doSet(\"POI\",\"CMS_zz4l_fai1,muV,muf\")\n self.modelBuilder.doSet(\"POI\",\"CMS_zz4l_fai1,muV,muf\")\n# self.modelBuilder.out.var(\"muf\").setAttribute(\"flatParam\")\n \n self.modelBuilder.doVar('expr::a1(\"sqrt(1-abs(@0))\", CMS_zz4l_fai1)')\n self.modelBuilder.doVar('expr::a3(\"(@0>0 ? 1 : -1) * sqrt(abs(@0)*{sigma1_HZZ}/{sigma3_HZZ})\", CMS_zz4l_fai1)'.format(**xsecs))\n\n self.modelBuilder.factory_('expr::smCoupling_VBF(\"@0*@1**2 - @0*@1*@2*sqrt({sigma3_VBF}/{sigma1_VBF})\", muV,a1,a3)'.format(**xsecs))\n self.modelBuilder.factory_('expr::smCoupling_ZH(\"@0*@1**2 - @0*@1*@2*sqrt({sigma3_ZH}/{sigma1_ZH})\", muV,a1,a3)'.format(**xsecs))\n self.modelBuilder.factory_('expr::smCoupling_WH(\"@0*@1**2 - @0*@1*@2*sqrt({sigma3_WH}/{sigma1_WH})\", muV,a1,a3)'.format(**xsecs))\n \n self.modelBuilder.factory_('expr::bsmCoupling_VBF(\"@0*@1**2*{sigma3_VBF}/{sigma1_VBF} - @0*@1*@2*sqrt({sigma3_VBF}/{sigma1_VBF})\", muV,a3,a1)'.format(**xsecs))\n self.modelBuilder.factory_('expr::bsmCoupling_ZH(\"@0*@1**2*{sigma3_ZH}/{sigma1_ZH} - @0*@1*@2*sqrt({sigma3_ZH}/{sigma1_ZH})\", muV,a3,a1)'.format(**xsecs))\n self.modelBuilder.factory_('expr::bsmCoupling_WH(\"@0*@1**2*{sigma3_WH}/{sigma1_WH} - @0*@1*@2*sqrt({sigma3_WH}/{sigma1_WH})\", muV,a3,a1)'.format(**xsecs))\n\n self.modelBuilder.factory_('expr::intCoupling_VBF(\"@0*@1*@2*sqrt({sigma3_VBF}/{sigma1_VBF})*2\", muV,a1,a3)'.format(**xsecs))\n self.modelBuilder.factory_('expr::intCoupling_ZH(\"@0*@1*@2*sqrt({sigma3_ZH}/{sigma1_ZH})*2\", muV,a1,a3)'.format(**xsecs))\n self.modelBuilder.factory_('expr::intCoupling_WH(\"@0*@1*@2*sqrt({sigma3_WH}/{sigma1_WH})*2\", muV,a1,a3)'.format(**xsecs))", "title": "" }, { "docid": "a25b739c1c8daa0df2c82bc3e8b7ceda", "score": "0.5033106", "text": "def __init__(self, mode=\"bilinear\"):\n super().__init__()\n self.identity = Identity()\n self.grid_sampler = GridSampler(mode=mode)\n self.ndims = settings.get_ndims()", "title": "" }, { "docid": "07cdb665c194c5597eebaa804cd3d2d8", "score": "0.50314647", "text": "def getState(env):\n global env_model\n global blinky_pos\n global pinky_pos\n global clyde_pos\n global inky_pos\n global pacman_pos\n\n grid = copy.deepcopy(env_model)\n for i in range(14):\n for j in range(20):\n pixels = []\n for k in range(12):\n for l in range(8):\n pixels.append(env[i*12+k+2][j*8+l])\n histogram = Counter(pixels)\n color_count = 0\n color_key = None\n for key in histogram:\n # if the tile is empty\n if(key == 144):\n continue\n if(color_count < histogram[key]):\n color_count = histogram[key]\n color_key = key\n # Change dominant color to tile state\n if(color_key is None): # No majority color\n color_key = 0\n elif(color_key == 74): # If pink-ish is dominant in tile\n if(color_count == 8): # Corresponds to a dot, 4x2 pixels of value 74\n color_key = 1\n elif(color_count == 28): # Corresponds to a capsule, 4x7 pixels of value 74\n color_key = 2\n else: # Corresponds to a wall\n color_key = 6\n elif(color_key == 42): # If yellow is dominant in tile\n pacman_pos.append([i,j])\n continue\n # Corresponds to active ghosts\n elif(color_key == 70):\n blinky_pos.append([i, j, 1])\n continue\n elif(color_key == 184):\n clyde_pos.append([i, j, 1])\n continue\n elif(color_key == 38):\n inky_pos.append([i, j, 1])\n continue\n elif(color_key == 88):\n pinky_pos.append([i, j, 1])\n continue\n elif(color_key == 150):\n if(abs(i-blinky_pos[0])+abs(j-blinky_pos[1]) < 2):\n blinky_pos.append([i, j, 0])\n elif(abs(i-clyde_pos[0])+abs(j-clyde_pos[1]) < 2):\n clyde_pos.append([i, j, 0])\n elif(abs(i-inky_pos[0])+abs(j-inky_pos[1]) < 2):\n inky_pos.append([i, j, 0])\n elif(abs(i-pinky_pos[0])+abs(j-pinky_pos[1]) < 2):\n pinky_pos.append([i, j, 0]) \n continue\n\n grid[i][j] = color_key\n \n # Update Pacman positions in memory\n for i in range(2, len(pacman_pos)):\n if(pacman_pos[0] != pacman_pos[i][0] or pacman_pos[1] != pacman_pos[i][1]):\n pacman_pos = [pacman_pos[i][0], pacman_pos[i][1]]\n break\n pacman_pos = [pacman_pos[0], pacman_pos[1]]\n # Update the Ghosts positions in memory\n dist = float('Inf')\n idx = None\n for i in range(3, len(blinky_pos)):\n dx = abs(blinky_pos[i][0]-pacman_pos[0])\n dy = abs(blinky_pos[i][1]-pacman_pos[1])\n delta = 0\n if dx != 0 and dy != 0:\n delta = dx+dy-1\n else:\n delta = dx+dy\n if dist > delta:\n dist = delta\n idx = i\n if(idx is not None):\n blinky_pos = [blinky_pos[idx][0], blinky_pos[idx][1], blinky_pos[idx][2]]\n \n dist = float('Inf')\n idx = None\n for i in range(3, len(inky_pos)):\n dx = abs(inky_pos[i][0]-pacman_pos[0])\n dy = abs(inky_pos[i][1]-pacman_pos[1])\n delta = 0\n if dx != 0 and dy != 0:\n delta = dx+dy-1\n else:\n delta = dx+dy\n if dist > delta:\n dist = delta\n idx = i\n if(idx is not None):\n inky_pos = [inky_pos[idx][0], inky_pos[idx][1], inky_pos[idx][2]]\n \n dist = float('Inf')\n idx = None\n for i in range(3, len(pinky_pos)):\n dx = abs(pinky_pos[i][0]-pacman_pos[0])\n dy = abs(pinky_pos[i][1]-pacman_pos[1])\n delta = 0\n if dx != 0 and dy != 0:\n delta = dx+dy-1\n else:\n delta = dx+dy\n if dist > delta:\n dist = delta\n idx = i\n if(idx is not None):\n pinky_pos = [pinky_pos[idx][0], pinky_pos[idx][1], pinky_pos[idx][2]]\n\n dist = float('Inf')\n idx = None\n for i in range(3, len(clyde_pos)):\n dx = abs(clyde_pos[i][0]-pacman_pos[0])\n dy = abs(clyde_pos[i][1]-pacman_pos[1])\n delta = 0\n if dx != 0 and dy != 0:\n delta = dx+dy-1\n else:\n delta = dx+dy\n if dist > delta:\n dist = delta\n idx = i\n if(idx is not None):\n clyde_pos = [clyde_pos[idx][0], clyde_pos[idx][1], clyde_pos[idx][2]] \n\n # Place last known locations of the entities in the state\n grid[pacman_pos[0]][pacman_pos[1]] = 3\n grid[blinky_pos[0]][blinky_pos[1]] = 4 + blinky_pos[2]\n grid[inky_pos[0]][inky_pos[1]] = 4 + inky_pos[2]\n grid[pinky_pos[0]][pinky_pos[1]] = 4 + pinky_pos[2]\n grid[clyde_pos[0]][clyde_pos[1]] = 4 + clyde_pos[2]\n\n return grid", "title": "" }, { "docid": "851f56a661ad3d23663b0bbccef4acb4", "score": "0.5027792", "text": "def guess_areas(cube):\n# if cube.coord('latitude').bounds is None:\n try:\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n except:\n pass\n\n# if cube.coord('longitude').bounds is None:\n# cube.coord('longitude').guess_bounds()\n \n #slice = cube.slices(['latitude','longitude']).next()\n #grid_areas = iris.analysis.cartography.area_weights(slice)\n grid_areas = iris.analysis.cartography.area_weights(cube)\n\n return grid_areas", "title": "" }, { "docid": "400482dc19d09a477efde28d27f1da7b", "score": "0.50146616", "text": "def estimate_grids(self):\n if self.coordsys == 'polar':\n return self.params['c1arr'].min(), self.params['c2arr'].max()\n rvals = np.hypot(self.params['c1arr'], self.params['c2arr'])\n return rvals.min(), rvals.max()", "title": "" }, { "docid": "3c2a580e981df9cb6da71147cd8fc947", "score": "0.5004791", "text": "def init_grid():\n lat0 = iemre.SOUTH\n lat1 = iemre.NORTH\n lon0 = iemre.WEST\n lon1 = iemre.EAST\n x0, y0 = lalo2pt(lat1, lon0)\n x1, y1 = lalo2pt(lat0, lon1)\n\n fp = \"/home/ldm/data/gis/images/4326/q2/p48h.png\"\n q2 = gdal.Open(fp, 0)\n q2d = numpy.flipud( q2.ReadAsArray()[y0:y1:22,x0:x1:25] )\n\n return q2d / 25.4 # hard code snow ratio!", "title": "" }, { "docid": "d9b0d7bbe2c9974bbe80473aedf5e358", "score": "0.50044894", "text": "def aspect_ratio(boxes, aspect_ratio):\n boxes_ar = boxes.copy()\n boxes_ar[:, 0::4] = aspect_ratio * boxes[:, 0::4]\n boxes_ar[:, 2::4] = aspect_ratio * boxes[:, 2::4]\n return boxes_ar", "title": "" }, { "docid": "3bf68d4bfd6d2b31e8501c5c8b4fb27a", "score": "0.49981898", "text": "def gridplane(X,most,pi_arguments={},**kwargs):\n \n \n if (('biascorr' not in kwargs) and ('biascorr' not in pi_arguments)):\n biascorr = False\n else:\n biascorr = kwargs.get('biascorr')\n \n if len(pi_arguments) == 0:\n \n pi_arguments = {\n 'alpha': 0,\n 'ndir': 1000,\n 'trimming': 0,\n 'biascorr': biascorr, \n 'dmetric' : 'euclidean',\n 'alphamat': None,\n 'optrange': (-1,1),\n 'square_pi': False\n }\n \n if ('y' in kwargs):\n y = kwargs.pop('y')\n pi_arguments['y'] = y\n \n optrange = pi_arguments['optrange']\n optmax = optrange[1]\n \n alphamat = kwargs.pop('alphamat',pi_arguments['alphamat'])\n if (alphamat != None):\n optrange = np.sign(optrange)\n stop0s = np.arcsin(optrange[0])\n stop1s = np.arcsin(optrange[1])\n stop1c = np.arccos(optrange[0])\n stop0c = np.arccos(optrange[1])\n anglestart = max(stop0c,stop0s)\n anglestop = max(stop1c,stop1s)\n nangle = np.linspace(anglestart,anglestop,pi_arguments['ndir'],endpoint=False) \n alphamat = np.array([np.cos(nangle), np.sin(nangle)])\n if optmax != 1:\n alphamat *= optmax\n \n tj = np.matmul(X,alphamat)\n if pi_arguments['square_pi']:\n meas = [most.fit(tj[:,i],**pi_arguments)**2 \n for i in np.arange(0,pi_arguments['ndir'])]\n else:\n meas = [most.fit(tj[:,i],**pi_arguments) \n for i in np.arange(0,pi_arguments['ndir'])]\n \n maximo = np.max(meas)\n indmax = np.where(meas == maximo)[0]\n if len(indmax)>0:\n indmax = indmax[0]\n wi = np.array(alphamat[:,indmax]).reshape((2,1))\n \n return(wi,maximo)", "title": "" }, { "docid": "398d26a46330bd1f4e14b31aaf88909c", "score": "0.49935377", "text": "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu_fid[1.0,-15.0,15.0]\");\n self.modelBuilder.doVar(\"rho_0_45[1.0,-25.0,25.0]\");\n self.modelBuilder.doVar(\"rho_45_80[1.0,-25.0,25.0]\");\n self.modelBuilder.doVar(\"rho_80_120[1.0,-25.0,25.0]\");\n self.modelBuilder.doVar(\"rho_120_200[1.0,-25.0,25.0]\");\n self.modelBuilder.doVar(\"rho_200_350[1.0,-25.0,25.0]\");\n self.modelBuilder.doVar(\"rho_350_450[1.0,-25.0,25.0]\");\n pois = 'mu_fid,rho_0_45,rho_45_80,rho_80_120,rho_120_200,rho_200_350,rho_350_450'\n self.modelBuilder.doSet(\"POI\",pois)\n if self.options.mass != 0:\n if self.modelBuilder.out.var(\"MH\"):\n self.modelBuilder.out.var(\"MH\").removeRange()\n self.modelBuilder.out.var(\"MH\").setVal(self.options.mass)\n else:\n self.modelBuilder.doVar(\"MH[%g]\" % self.options.mass);\n\tself.modelBuilder.factory_('expr::scale_0_45(\"@0*@1\",mu_fid,rho_0_45)')\n self.modelBuilder.factory_('expr::scale_45_80(\"@0*@1\",mu_fid,rho_45_80)')\n self.modelBuilder.factory_('expr::scale_80_120(\"@0*@1\",mu_fid,rho_80_120)')\n self.modelBuilder.factory_('expr::scale_120_200(\"@0*@1\",mu_fid,rho_120_200)')\n self.modelBuilder.factory_('expr::scale_200_350(\"@0*@1\",mu_fid,rho_200_350)')\n self.modelBuilder.factory_('expr::scale_350_450(\"@0*@1\",mu_fid,rho_350_450)')\n self.modelBuilder.factory_('expr::scale_GT450(\"@0*(486.1-@1*287.2-@2*85.3-@3*48.0-@4*43.8-@5*18.8-@6*2.26)/0.735\",mu_fid,rho_0_45,rho_45_80,rho_80_120,rho_120_200,rho_200_350,rho_350_450)')", "title": "" }, { "docid": "475e2a5ade7220fb969b8606b0d00146", "score": "0.49831626", "text": "def _dimensions(self) -> None:\n #reference image\n self.width = abs(self.a.x - self.b.x)\n self.height = abs(self.a.y - self.b.y)\n\n #workspace spritesheet\n self.sheet_w = abs(self.sheet_a.x - self.sheet_b.x)\n self.sheet_h = abs(self.sheet_a.y - self.sheet_b.y)", "title": "" }, { "docid": "cc1026ead32aa90a8555f40a8e724d11", "score": "0.49772802", "text": "def gridplane_2(X,most,q,div,pi_arguments={},**kwargs):\n \n if (('biascorr' not in kwargs) and ('biascorr' not in pi_arguments)):\n biascorr = False\n else:\n biascorr = kwargs.get('biascorr')\n \n if len(pi_arguments) == 0:\n \n pi_arguments = {\n 'alpha': 0,\n 'ndir': 1000,\n 'trimming': 0,\n 'biascorr': biascorr, \n 'dmetric' : 'euclidean',\n 'alphamat': None,\n 'optrange': (-1,1),\n 'square_pi': False\n }\n\n \n if 'y' in kwargs:\n y = kwargs.pop('y')\n pi_arguments['y'] = y\n\n optrange = pi_arguments['optrange']\n optmax = optrange[1]\n \n alphamat = kwargs.pop('alphamat',pi_arguments['alphamat'])\n if (alphamat != None).any():\n anglestart = min(pi_arguments['_stop0c'],pi_arguments['_stop0s'])\n anglestop = min(pi_arguments['_stop1c'],pi_arguments['_stop1s'])\n nangle = np.linspace(anglestart,anglestop,pi_arguments['ndir'],endpoint=True)\n alphamat = np.array([np.cos(nangle), np.sin(nangle)])\n if optmax != 1:\n alphamat *= optmax\n alpha1 = alphamat\n divisor = np.sqrt(1 + 2*np.multiply(alphamat[0,:].reshape(1,-1),alphamat[1,:].reshape(1,-1))*q[0])\n alpha1 = np.divide(alphamat,np.repeat(divisor,2,0))\n tj = np.dot(X,alpha1)\n \n if pi_arguments['square_pi']:\n meas = [most.fit(tj[:,i],**pi_arguments)**2 \n for i in np.arange(0,pi_arguments['ndir'])]\n else:\n meas = [most.fit(tj[:,i],**pi_arguments) \n for i in np.arange(0,pi_arguments['ndir'])]\n\n maximo = np.max(meas)\n indmax = np.where(meas == maximo)[0]\n if len(indmax)>0:\n indmax = indmax[0]\n wi = np.array(alpha1[:,indmax]).reshape((2,1))\n \n return(wi,maximo)", "title": "" }, { "docid": "3e0e4299b06b48a30956f637b8d6286b", "score": "0.49764493", "text": "def get_paramgrid_2(self, param_grid):\n self.ramdonized_search_enable = False\n self.randomized_search_n_iter = 150\n self.grid_search_display_result = True\n \n param_grid['eta'] = [0.01] #train-mape:-0.448062+0.00334926 test-mape:-0.448402+0.00601761\n# param_grid['max_depth'] = [7] #train-mape:-0.363007+0.00454276 test-mape:-0.452832+0.00321641\n# param_grid['colsample_bytree'] = [0.8]\n param_grid['max_depth'] = range(5,8) #train-mape:-0.363007+0.00454276 test-mape:-0.452832+0.00321641\n param_grid['colsample_bytree'] = [0.6,0.8,1.0]\n \n# param_grid['lambda'] = range(1,15)\n# param_grid['max_depth'] = [3,4]\n# param_grid['eta'] = [0.01,0.1] # 0.459426+0.00518875\n# param_grid['subsample'] = [0.5] #0.458935+0.00522205\n# param_grid['eta'] = [0.005] #0.457677+0.00526401\n return param_grid", "title": "" }, { "docid": "a1962934270aeb078a9ef5487378e6e3", "score": "0.49700838", "text": "def projected_aspect_ratio(self, dim=2, direction=None):\n\n proj_grid = self.project_on_dim(dim=dim, direction=direction)\n\n x_proj = proj_grid.any(axis=0)\n y_proj = proj_grid.any(axis=1)\n x0 = np.arange(len(x_proj))[x_proj][0]\n x1 = np.arange(len(x_proj))[x_proj][-1]\n y0 = np.arange(len(y_proj))[y_proj][0]\n y1 = np.arange(len(y_proj))[y_proj][-1]\n return float(y1-y0+1)/float(x1-x0+1)", "title": "" }, { "docid": "c306741390be1e630df2b0d04ce734cf", "score": "0.49659324", "text": "def _get_outputs(self, multiplier, img):\n\n heatmap_avg = np.zeros((img.shape[0], img.shape[1], 18))\n bbox_all = []\n # max_scale = multiplier[-1]\n # max_size = max_scale * img.shape[0]\n # # padding\n # max_cropped, _, _ = crop_with_factor(\n # img, max_size, factor=32)\n\n for m in range(len(multiplier)):\n scale = multiplier[m]\n inp_size = scale * img.shape[0]\n\n # padding\n im_cropped, im_scale, real_shape = crop_with_factor(\n img, inp_size, factor=32, pad_val=128)\n im_data = resnet_preprocess(im_cropped)\n\n im_data = np.expand_dims(im_data, 0)\n with torch.no_grad():\n im_data = torch.from_numpy(im_data).type(torch.FloatTensor).cuda(device=self.params.gpus[0])\n\n heatmaps, [scores, classification, transformed_anchors] = self.model([im_data, self.params.subnet_name])\n # heatmaps, [scores, classification, transformed_anchors] = self.model_onnx(None, {'input.1':im_data})\n heatmaps = heatmaps.cpu().detach().numpy().transpose(0, 2, 3, 1)\n scores = scores.cpu().detach().numpy()\n classification = classification.cpu().detach().numpy()\n transformed_anchors = transformed_anchors.cpu().detach().numpy()\n\n heatmap = heatmaps[0, :int(im_cropped.shape[0] / 4), :int(im_cropped.shape[1] / 4), :]\n heatmap = cv2.resize(heatmap, None, fx=4, fy=4, interpolation=cv2.INTER_CUBIC)\n heatmap = heatmap[0:real_shape[0], 0:real_shape[1], :]\n heatmap = cv2.resize(\n heatmap, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)\n\n heatmap_avg = heatmap_avg + heatmap / len(multiplier)\n\n # bboxs\n idxs = np.where(scores > 0.5)\n bboxs=[]\n for j in range(idxs[0].shape[0]):\n bbox = transformed_anchors[idxs[0][j], :]/im_scale\n if int(classification[idxs[0][j]]) == 0: # class0=people\n bboxs.append(bbox.tolist())\n bbox_all.append(bboxs)\n\n return heatmap_avg, bbox_all", "title": "" }, { "docid": "4ec8ee48a3f957f5345048d23b8b7ca3", "score": "0.49635157", "text": "def _beam_area(self):\n return np.pi * self.bmaj * self.bmin / (4.0 * np.log(2.0))", "title": "" }, { "docid": "b7150b223e6965d768222d38dc6de560", "score": "0.49631506", "text": "def get_prepared_objp(self):\n objp = np.zeros( (self.m_ny * self.m_nx, 3), np.float32 )\n objp[:,:2] = np.mgrid[0:self.m_nx, 0:self.m_ny].T.reshape(-1,2)\n return objp", "title": "" }, { "docid": "370d6186e11d0c8838fe63abb6ba2029", "score": "0.49630952", "text": "def update(self, frame):\n size_x_scales = self.s_x * self.scales\n pyramid = get_pyramid_instance_image(frame, self.target_center, config.instance_size, size_x_scales, self.img_mean)\n\n instance_imgs = torch.cat([self.transforms(x)[None, :, :, :] for x in pyramid], dim=0)\n instance_imgs_var = Variable(instance_imgs.type(self.dtype))\n response_maps = self.model.forward((None, instance_imgs_var))\n response_maps = response_maps.data.cpu().numpy().squeeze()\n\n # visualize the response map\n #plt.imshow(response_maps[0])\n #plt.show()\n\n response_maps_up = [cv2.resize(x, (self.interp_response_sz, self.interp_response_sz), cv2.INTER_CUBIC) for x in response_maps]\n # get max score\n max_score = np.array([x.max() for x in response_maps_up]) * self.penalty\n\n # penality scale change\n scale_idx = max_score.argmax()\n response_map = response_maps_up[scale_idx]\n # make the response map sum to 1 namely normalization\n response_map -= response_map.min()\n response_map /= response_map.sum()\n\n # apply windowing\n response_map = (1 - config.window_influence) * response_map + config.window_influence * self.cosine_window\n max_r, max_c = np.unravel_index(response_map.argmax(), response_map.shape)\n\n # Convert the crop-relative coordinates to frame coordinates\n # displacement from the center in instance to final representation\n p = np.array([max_c, max_r]) # position of max response in response_maps_up\n center = (self.interp_response_sz - 1) / 2 # center of response_maps_up\n disp_response_interp = p - center # displacement in the interpolation response map\n # displacement in instance input\n disp_instance_input = disp_response_interp * config.total_stride / config.response_up_stride ###??????what want to compute??\n # displacement in instance frame\n scale = self.scales[scale_idx]\n disp_instance_frame = disp_instance_input * (self.s_x * scale) / config.instance_size\n # position within frame in frame coordinates\n self.target_center += disp_instance_frame\n # scale damping and saturation #????? what is damping and saturation?\n self.s_x *= ((1 - config.scale_lr) + config.scale_lr * scale)\n self.s_x = max(self.min_s_x, min(self.max_s_x, self.s_x))\n self.target_sz = ((1 - config.scale_lr) + config.scale_lr * scale) * self.target_sz\n bbox = (self.target_center[0] - self.target_sz[0] / 2 + 1, # xmin conver to 1-based\n self.target_center[1] - self.target_sz[1] / 2 + 1, # ymin\n self.target_center[0] + self.target_sz[0] / 2 + 1, # xmax\n self.target_center[1] + self.target_sz[1] / 2 + 1) # ymax\n return bbox", "title": "" }, { "docid": "658690cfa0b65dc79efea8cfafb43679", "score": "0.49626464", "text": "def quick_inspect(cellid=\"chn020f-b1\", batch=271,\n modelname=\"ozgf100ch18_wc18x1_fir15x1_lvl1_dexp1_fit01\"):\n xf, ctx = load_model_baphy_xform(cellid, batch, modelname, eval_model=True)\n\n modelspec = ctx['modelspec']\n est = ctx['est']\n val = ctx['val']\n nplt.quickplot(ctx)\n\n return modelspec, est, val", "title": "" }, { "docid": "c6cdcfe7d00503ac41314942aa93c831", "score": "0.49614468", "text": "def getLayeredModel():\n s = fillfloat(0.5,nx,nz)\n for iz in range(nz/3,2*nz/3):\n for ix in range(nx):\n s[iz][ix] = 0.35\n for iz in range(2*nz/3,nz):\n for ix in range(nx):\n s[iz][ix] = 0.2\n return s", "title": "" }, { "docid": "afd1740ac6392e4aa5ad515f4f50285f", "score": "0.49586898", "text": "def get_hyperparameter_grid(self):\n self.config.model_components_param = self.apply_auto_arima_model_components_defaults(\n model_components=self.config.model_components_param)\n # Returns a set of parameters for grid search\n hyperparameter_grid = {\n # Additional parameters\n \"estimator__freq\": self.config.metadata_param.freq,\n # pmdarima fit parameters\n \"estimator__start_p\": self.config.model_components_param.custom[\"start_p\"],\n \"estimator__d\": self.config.model_components_param.custom[\"d\"],\n \"estimator__start_q\": self.config.model_components_param.custom[\"start_q\"],\n \"estimator__max_p\": self.config.model_components_param.custom[\"max_p\"],\n \"estimator__max_d\": self.config.model_components_param.custom[\"max_d\"],\n \"estimator__max_q\": self.config.model_components_param.custom[\"max_q\"],\n \"estimator__start_P\": self.config.model_components_param.custom[\"start_P\"],\n \"estimator__D\": self.config.model_components_param.custom[\"D\"],\n \"estimator__start_Q\": self.config.model_components_param.custom[\"start_Q\"],\n \"estimator__max_P\": self.config.model_components_param.custom[\"max_P\"],\n \"estimator__max_D\": self.config.model_components_param.custom[\"max_D\"],\n \"estimator__max_Q\": self.config.model_components_param.custom[\"max_Q\"],\n \"estimator__max_order\": self.config.model_components_param.custom[\"max_order\"],\n \"estimator__m\": self.config.model_components_param.custom[\"m\"],\n \"estimator__seasonal\": self.config.model_components_param.custom[\"seasonal\"],\n \"estimator__stationary\": self.config.model_components_param.custom[\"stationary\"],\n \"estimator__information_criterion\": self.config.model_components_param.custom[\"information_criterion\"],\n \"estimator__alpha\": self.config.model_components_param.custom[\"alpha\"],\n \"estimator__test\": self.config.model_components_param.custom[\"test\"],\n \"estimator__seasonal_test\": self.config.model_components_param.custom[\"seasonal_test\"],\n \"estimator__stepwise\": self.config.model_components_param.custom[\"stepwise\"],\n \"estimator__n_jobs\": self.config.model_components_param.custom[\"n_jobs\"],\n \"estimator__start_params\": self.config.model_components_param.custom[\"start_params\"],\n \"estimator__trend\": self.config.model_components_param.custom[\"trend\"],\n \"estimator__method\": self.config.model_components_param.custom[\"method\"],\n \"estimator__maxiter\": self.config.model_components_param.custom[\"maxiter\"],\n \"estimator__offset_test_args\": self.config.model_components_param.custom[\"offset_test_args\"],\n \"estimator__seasonal_test_args\": self.config.model_components_param.custom[\"seasonal_test_args\"],\n \"estimator__suppress_warnings\": self.config.model_components_param.custom[\"suppress_warnings\"],\n \"estimator__error_action\": self.config.model_components_param.custom[\"error_action\"],\n \"estimator__trace\": self.config.model_components_param.custom[\"trace\"],\n \"estimator__random\": self.config.model_components_param.custom[\"random\"],\n \"estimator__random_state\": self.config.model_components_param.custom[\"random_state\"],\n \"estimator__n_fits\": self.config.model_components_param.custom[\"n_fits\"],\n \"estimator__out_of_sample_size\": self.config.model_components_param.custom[\"out_of_sample_size\"],\n \"estimator__scoring\": self.config.model_components_param.custom[\"scoring\"],\n \"estimator__scoring_args\": self.config.model_components_param.custom[\"scoring_args\"],\n \"estimator__with_intercept\": self.config.model_components_param.custom[\"with_intercept\"],\n # pmdarima predict parameters\n \"estimator__return_conf_int\": self.config.model_components_param.custom[\"return_conf_int\"],\n \"estimator__dynamic\": self.config.model_components_param.custom[\"dynamic\"],\n }\n\n # Overwrites values by `model_components.hyperparameter_override`\n # This may produce a list of dictionaries for grid search.\n hyperparameter_grid = update_dictionaries(\n hyperparameter_grid,\n overwrite_dicts=self.config.model_components_param.hyperparameter_override,\n allow_unknown_keys=False\n )\n\n # Ensures all items have the proper type for\n # `sklearn.model_selection.RandomizedSearchCV`.\n # List-type hyperparameters are specified below\n # with their accepted non-list type values.\n hyperparameter_grid = dictionaries_values_to_lists(\n hyperparameter_grid)\n\n return hyperparameter_grid", "title": "" }, { "docid": "49f5fa4351c1c327437f478d3ea53612", "score": "0.49551862", "text": "def __init__(self, config):\n self.window_zoom_crop_size = thelper.utils.str2size(thelper.utils.get_key_def(\"window_zoom_crop_size\", config, \"250x250\"))\n self.window_zoom_size = thelper.utils.str2size(thelper.utils.get_key_def(\"window_zoom_size\", config, \"500x500\"))\n self.window_zoom_name = \"zoom\"\n self.image_crop = np.empty((self.window_zoom_crop_size[1], self.window_zoom_crop_size[0], 3), dtype=np.uint8)\n self.image_zoom = np.empty((self.window_zoom_size[1], self.window_zoom_size[0], 3), dtype=np.uint8)\n self.mask_crop = np.empty((self.window_zoom_crop_size[1], self.window_zoom_crop_size[0]), dtype=np.uint8)\n self.mask_crop_color = np.empty((self.window_zoom_crop_size[1], self.window_zoom_crop_size[0], 3), dtype=np.uint8)\n self.mask_zoom = np.empty((self.window_zoom_size[1], self.window_zoom_size[0], 3), dtype=np.uint8)\n self.display_zoom = np.empty((self.window_zoom_size[1], self.window_zoom_size[0], 3), dtype=np.uint8)\n self.zoom_interp_type = thelper.utils.import_class(thelper.utils.get_key_def(\"zoom_interp_type\", config, \"cv2.INTER_NEAREST\"))\n cv.namedWindow(self.window_zoom_name, cv.WINDOW_AUTOSIZE | cv.WINDOW_GUI_NORMAL)", "title": "" }, { "docid": "bec0ce8d64f1e7ced2e6662f5c948993", "score": "0.49488515", "text": "def _calcAspect(self, mode):\n\n aspect = (180/np.pi) * np.arctan2(self.dzdy, -self.dzdx)\n aspect = np.mod(450 - aspect, 360)\n aspect[aspect == 360] = 0\n\n self._aspect = aspect", "title": "" }, { "docid": "564658685d23d9c42c0b8809c1556fbd", "score": "0.49482635", "text": "def __create_grid_objects(self):\n\n # Initial conditions\n xz_lines = []\n xy_lines = []\n yz_lines = []\n camera_axes = self.camera_axes\n # Locate centre of axes\n if self.__relative_cam:\n x_origin, y_origin, z_origin = round(self.__scene.center.x),\\\n round(self.__scene.center.y),\\\n round(self.__scene.center.z)\n self.__focal_point = [x_origin, y_origin, z_origin]\n else:\n x_origin, y_origin, z_origin = self.__focal_point[0], \\\n self.__focal_point[1], \\\n self.__focal_point[2]\n\n # CAMERA AXES | DISPLAYED GRID | XZ PLANE | XY PLANE | YZ PLANE\n # x,y,z | x,y,z | x,z | x,y | y,z\n # -------------+-----------------+----------+----------+----------\n # -,-,- | +,+,+ | +,+ | +,+ | +,+\n # -,-,+ | +,+,- | +,- | +,+ | +,-\n # -,+,- | +,-,+ | +,+ | +,- | -,+\n # -,+,+ | +,-,- | +,- | +,- | -,-\n # +,-,- | -,+,+ | -,+ | -,+ | +,+\n # +,-,+ | -,+,- | -,- | -,+ | +,-\n # +,+,- | -,-,+ | -,+ | -,- | -,+\n # +,+,+ | -,-,- | -,- | -,- | -,-\n # min = -num_squares or 0, around the default position\n # max = +num_squares or 0, around the default position\n # e.g. at the origin, for negative axes: -10 -> 0, positive axes: 0 -> 10\n min_x_coord = x_origin + int(-(self.__num_squares / 2) + (sign(camera_axes.x) * -1) * (self.__num_squares / 2))\n max_x_coord = x_origin + int((self.__num_squares / 2) + (sign(camera_axes.x) * -1) * (self.__num_squares / 2))\n\n min_y_coord = y_origin + int(-(self.__num_squares / 2) + (sign(camera_axes.y) * -1) * (self.__num_squares / 2))\n max_y_coord = y_origin + int((self.__num_squares / 2) + (sign(camera_axes.y) * -1) * (self.__num_squares / 2))\n\n min_z_coord = z_origin + int(-(self.__num_squares / 2) + (sign(camera_axes.z) * -1) * (self.__num_squares / 2))\n max_z_coord = z_origin + int((self.__num_squares / 2) + (sign(camera_axes.z) * -1) * (self.__num_squares / 2))\n\n # XZ plane\n for x_point in range(min_x_coord, max_x_coord + 1):\n # Draw a line across for each x coord, along the same y-axis, from min to max z coord\n xz_lines.append(create_line(\n vector(x_point, y_origin, min_z_coord),\n vector(x_point, y_origin, max_z_coord),\n self.__scene\n ))\n for z_point in range(min_z_coord, max_z_coord + 1):\n # Draw a line across each z coord, along the same y-axis, from min to max z coord\n xz_lines.append(create_line(\n vector(min_x_coord, y_origin, z_point),\n vector(max_x_coord, y_origin, z_point),\n self.__scene\n ))\n\n # XY plane\n for x_point in range(min_x_coord, max_x_coord + 1):\n # Draw a line across each x coord, along the same z-axis, from min to max y coord\n xy_lines.append(create_line(\n vector(x_point, min_y_coord, z_origin),\n vector(x_point, max_y_coord, z_origin),\n self.__scene\n ))\n for y_point in range(min_y_coord, max_y_coord + 1):\n # Draw a line across each y coord, along the same z-axis, from min to max x coord\n xy_lines.append(create_line(\n vector(min_x_coord, y_point, z_origin),\n vector(max_x_coord, y_point, z_origin),\n self.__scene\n ))\n\n # YZ plane\n for y_point in range(min_y_coord, max_y_coord + 1):\n # Draw a line across each y coord, along the same x-axis, from min to max z coord\n yz_lines.append(create_line(\n vector(x_origin, y_point, min_z_coord),\n vector(x_origin, y_point, max_z_coord),\n self.__scene\n ))\n for z_point in range(min_z_coord, max_z_coord + 1):\n # Draw a line across each z coord, along the same x-axis, from min to max y coord\n yz_lines.append(create_line(\n vector(x_origin, min_y_coord, z_point),\n vector(x_origin, max_y_coord, z_point),\n self.__scene\n ))\n\n # Compound the lines together into respective objects\n xz_plane = compound(xz_lines)\n xy_plane = compound(xy_lines)\n yz_plane = compound(yz_lines)\n\n # Combine all into one list\n grid = [None, None, None]\n grid[self.__xy_plane_idx] = xy_plane\n grid[self.__xz_plane_idx] = xz_plane\n grid[self.__yz_plane_idx] = yz_plane\n\n return grid", "title": "" }, { "docid": "04d94de7d9071454cc034bdab47bbe29", "score": "0.49439356", "text": "def __init__(self, image_height, image_width,\n camera_params=CameraParams(horizontal_fov=54.13, vertical_fov=42.01, z=1.68, pitch=-3.02, yaw=0.2)):\n degrees_per_x_pixel = camera_params.horizontal_fov / image_width\n degrees_per_y_pixel = camera_params.vertical_fov / image_height\n\n vanishing_point_x = (image_width / 2) + (camera_params.yaw / degrees_per_x_pixel)\n vanishing_point_y = (image_height / 2) - (camera_params.pitch / degrees_per_y_pixel)\n\n # Expected vehicle size. Note this does not need to be spot on to get detections although it might be good to\n # search for a few different sizes.\n vehicle_width_meters = 3.0\n vehicle_height_meters = 3.0\n\n # Defines a conic search corridor in front of vehicle, with grids placed at specific distances.\n roi_widths_meters = [28.0, 30.0, 38.0, 46.0, 54.0, 62.0, 70.0]\n roi_heights_meters = [7.375, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0]\n roi_distances_meters = [15.0, 20.0, 40.0, 60.0, 80.0, 100.0, 120.0]\n\n # Grid settings to use when doing a manual camera alignment calibration based on lanes width and dash length.\n # dash_length = 3.048\n # between_dash_length = 3 * dash_length\n # roi_distances_meters = [10, 10 + dash_length, 10 + dash_length + between_dash_length, 10 + dash_length * 2 + between_dash_length, 10 + dash_length * 2 + between_dash_length*2]\n\n self.generators = []\n\n # Calculate grid parameters for each search distance\n for roi_distance_meters, roi_width_meters, roi_heigth_meters in zip(roi_distances_meters,\n roi_widths_meters,\n roi_heights_meters):\n # roi_width_meters = 3.6576\n # roi_heigth_meters = 3.0\n\n degrees_per_meter = np.degrees(np.arctan(1.0 / roi_distance_meters))\n\n # Region of interest\n roi_width_pixels = roi_width_meters * degrees_per_meter / degrees_per_x_pixel\n roi_height_pixels = roi_heigth_meters * degrees_per_meter / degrees_per_y_pixel\n roi_left = (vanishing_point_x - roi_width_pixels / 2).astype(np.int)\n roi_right = np.minimum(image_width - 1, roi_left + roi_width_pixels).astype(np.int)\n roi_left = np.maximum(0, roi_left).astype(int)\n\n # Place 1/4 of the ROI below estimated road surface, skip this when calibrating\n vertical_offset = (1*roi_heigth_meters / 4)\n roi_bottom = vanishing_point_y + ((vertical_offset + camera_params.z) * degrees_per_meter /\n degrees_per_y_pixel)\n roi_top = np.maximum(0, roi_bottom - roi_height_pixels + 1).astype(np.int)\n roi_bottom = np.minimum(image_height - 1, roi_bottom).astype(np.int)\n\n # Search window size\n vehicle_width_pixels = (vehicle_width_meters * degrees_per_meter / degrees_per_x_pixel).astype(int)\n vehicle_height_pixels = (vehicle_height_meters * degrees_per_meter / degrees_per_y_pixel).astype(int)\n\n self.generators.append(GridGenerator(roi=RegionOfInterest(top=roi_top, left=roi_left,\n bottom=roi_bottom, right=roi_right),\n window_size=WindowSize(height=vehicle_height_pixels,\n width=vehicle_width_pixels),\n window_overlap=WindowOverlap(vertical=0.75, horizontal=0.75)))", "title": "" }, { "docid": "92ce8b28a06cfea1e114bb1b169e9b7d", "score": "0.49431401", "text": "def visualize_model(self, ax):\n tr = np.c_[[model.w[1:] for model in self.models]].T\n tr2 = tr.reshape(28, 28, 10, order='F')\n tr3 = np.transpose(tr2, axes=[1, 0, 2])\n ax.imshow(tr3.reshape(28, -1, order='F'), cmap='bone')", "title": "" }, { "docid": "e1673d4d7c1991544eb1b9a0d847afc4", "score": "0.4938665", "text": "def init_dispersion(self, xoff=0, yoff=0):\n import unicorn.utils_c\n \n self.dy, self.lam = self.conf.get_beam_trace(x=self.x-xoff-self.pad, y=self.y+yoff-self.pad, dx=self.conf.dxlam[self.beam]+self.xcenter-xoff, beam=self.beam)\n \n self.dy += yoff\n \n # 20 for handling int of small negative numbers\n self.dyc = np.cast[int](self.dy+20)+-20+1\n self.yfrac = self.dy-np.floor(self.dy)\n \n dl = np.abs(self.lam[1]-self.lam[0])\n self.ysens = unicorn.utils_c.interp_conserve_c(self.lam, self.conf.sens[self.beam]['WAVELENGTH'], self.conf.sens[self.beam]['SENSITIVITY'])/1.e17*dl\n \n self.idxl = np.arange(np.product(self.shg)).reshape(self.shg)[self.dyc+self.cutout_dimensions[0], self.dx-self.dx[0]+self.cutout_dimensions[1]]", "title": "" }, { "docid": "a28f7a935b92da6af397c6423f320a18", "score": "0.4925643", "text": "def __init__(self,CMei_model):\n self.model = CMei_model\n\n # create M, a grid of distorted pixel coordinates\n M = []\n\n for i in range(-100,self.model.height,5):\n for j in range(-100,self.model.width,5):\n # order as in Scaramuzza!?\n M.append(np.array([float(j),float(i)]))\n\n self.M = np.array(M)\n\n self.Md = self.undistort_points(self.M.T).T\n \n\n self.M = np.array([self.M[i,0] + self.M[i,1]*1j for i in range(len(self.M))])\n\n self.interpolator = scipy.interpolate.LinearNDInterpolator(self.Md, self.M)", "title": "" }, { "docid": "d08e0fedbb8f2877724f71485190061c", "score": "0.49216688", "text": "def _update_image_aspect(self):\n\t\tw = int(self._app.settings.get_string(\"export-width\"))\n\t\th = int(self._app.settings.get_string(\"export-height\"))\n\t\tself.image_aspect = w / h\n\t\tlogger.debug(\"New image aspect: %s\", self.image_aspect)", "title": "" }, { "docid": "f19910560c87d03bec44ac87cb94a9f5", "score": "0.49213117", "text": "def __move_grid_objects(self):\n camera_axes = self.camera_axes\n # Locate centre of axes\n if self.__relative_cam:\n x_origin, y_origin, z_origin = round(self.__scene.center.x), \\\n round(self.__scene.center.y), \\\n round(self.__scene.center.z)\n self.__focal_point = [x_origin, y_origin, z_origin]\n # Convert focal point for 2D rendering. Puts focus point in centre of the view\n if not self.__is_3d:\n self.__focal_point = [val - int(self.__num_squares / 2) for val in self.__focal_point]\n x_origin = self.__focal_point[0]\n y_origin = self.__focal_point[1]\n z_origin = 0\n self.__focal_point[2] = z_origin\n else:\n x_origin, y_origin, z_origin = self.__focal_point[0], \\\n self.__focal_point[1], \\\n self.__focal_point[2]\n\n # CAMERA AXES | DISPLAYED GRID | XZ PLANE | XY PLANE | YZ PLANE\n # x,y,z | x,y,z | x,z | x,y | y,z\n # -------------+-----------------+----------+----------+----------\n # -,-,- | +,+,+ | +,+ | +,+ | +,+\n # -,-,+ | +,+,- | +,- | +,+ | +,-\n # -,+,- | +,-,+ | +,+ | +,- | -,+\n # -,+,+ | +,-,- | +,- | +,- | -,-\n # +,-,- | -,+,+ | -,+ | -,+ | +,+\n # +,-,+ | -,+,- | -,- | -,+ | +,-\n # +,+,- | -,-,+ | -,+ | -,- | -,+\n # +,+,+ | -,-,- | -,- | -,- | -,-\n # min = -num_squares or 0, around the default position\n # max = +num_squares or 0, around the default position\n # e.g. at the origin, for negative axes: -10 -> 0, positive axes: 0 -> 10\n min_x_coord = x_origin + int(-(self.__num_squares / 2) + (sign(camera_axes.x) * -1) * (self.__num_squares / 2))\n max_x_coord = x_origin + int((self.__num_squares / 2) + (sign(camera_axes.x) * -1) * (self.__num_squares / 2))\n\n min_y_coord = y_origin + int(-(self.__num_squares / 2) + (sign(camera_axes.y) * -1) * (self.__num_squares / 2))\n max_y_coord = y_origin + int((self.__num_squares / 2) + (sign(camera_axes.y) * -1) * (self.__num_squares / 2))\n\n min_z_coord = z_origin + int(-(self.__num_squares / 2) + (sign(camera_axes.z) * -1) * (self.__num_squares / 2))\n max_z_coord = z_origin + int((self.__num_squares / 2) + (sign(camera_axes.z) * -1) * (self.__num_squares / 2))\n\n # Compound origins are in the middle of the bounding boxes. Thus new pos will be between max and min.\n x_middle = (max_x_coord + min_x_coord) / 2\n y_middle = (max_y_coord + min_y_coord) / 2\n z_middle = (max_z_coord + min_z_coord) / 2\n\n # XY Plane\n if camera_axes.z < 0:\n self.grid_object[self.__planes_idx][self.__xy_plane_idx].pos = vector(x_middle, y_middle, min_z_coord)\n else:\n self.grid_object[self.__planes_idx][self.__xy_plane_idx].pos = vector(x_middle, y_middle, max_z_coord)\n\n # XZ Plane\n if camera_axes.y < 0:\n self.grid_object[self.__planes_idx][self.__xz_plane_idx].pos = vector(x_middle, min_y_coord, z_middle)\n else:\n self.grid_object[self.__planes_idx][self.__xz_plane_idx].pos = vector(x_middle, max_y_coord, z_middle)\n\n # YZ Plane\n if camera_axes.x < 0:\n self.grid_object[self.__planes_idx][self.__yz_plane_idx].pos = vector(min_x_coord, y_middle, z_middle)\n else:\n self.grid_object[self.__planes_idx][self.__yz_plane_idx].pos = vector(max_x_coord, y_middle, z_middle)", "title": "" }, { "docid": "33b6c06a7a2cd713988b47955691bbb7", "score": "0.49176052", "text": "def change_panorama_fov(self):\n self.pano_alpha_min = int(self.parent.ui.min_pano.text())\n self.pano_alpha_max = int(self.parent.ui.max_pano.text())\n # self.rho = self.parent.moildev.getRhoFromAlpha(self.pano_alpha_min)\n self.process_to_panorama()", "title": "" }, { "docid": "346753261956fd2b158093cd8a5f4dcb", "score": "0.49174315", "text": "def representation_grid_values(self) -> np.ndarray:\n return np.arange(self.min, self.max + 1, self.step)", "title": "" }, { "docid": "3992040dd2b18dc71eaae738bf2c3cbf", "score": "0.49137956", "text": "def __init__(self, wavel1, nfr):\n self.isz = isz = 256 # image size (in pixels)\n self.xx, self.yy = np.meshgrid(np.arange(isz)-isz/2, np.arange(isz)-isz/2)\n self.nfr = nfr # number of frames\n \n self.pscale = 5 # image plate scale (in mas/pixel)\n self.wavel1 = wavel1#1.55e-6 # wavelength for image (in meters)\n self.tdiam = 1.075 # telescope diameter (in meters)\n \n dtor = np.pi / 180.0 # degree to radian conversion factor\n self.rad2mas = 3.6e6 / dtor # radian to milliarcsecond conversion factor\n \n ld_r = self.wavel1 / self.tdiam # lambda/D (in radians)\n ld_p = ld_r * self.rad2mas / self.pscale # lambda/D (in pixels)\n self.prad = prad = np.round(isz / ld_p / 2.0) # simulated aperture radius (in pixels)\n self.ll = self.tdiam * isz / (2 * prad) # wavefront extent (in meters)\n\n \n self.spatial_scale = (np.arange(isz)-isz/2)*self.wavel1/isz*self.rad2mas/self.pscale # in meter, for pupil plane\n self.angular_scale = (np.arange(isz)-isz/2) * self.pscale # in mas, for PSF or image\n \n pupil = np.zeros((isz,isz)) # array of zeros\n \n # Apperture A, D, C, B, resp beam 2, 1, 3, 4\n x_mcoord = [2.725, -2.812, -2.469, -0.502] # x-coordinates of N telescopes in meter\n y_mcoord = [2.317, 1.685, -1.496, -2.363] # y-coordinates of N telescopes in meter\n \n x_pcoord = []\n y_pcoord = []\n for i in range(len(x_mcoord)):\n x0 = x_mcoord[i]/self.wavel1/self.rad2mas * self.pscale*isz\n y0 = y_mcoord[i]/self.wavel1/self.rad2mas * self.pscale*isz\n x_pcoord.append(x0)\n y_pcoord.append(y0)\n pupil[(self.xx-x0)**2 + (self.yy-y0)**2 < prad**2] = 1.0\n \n x_pcoord = np.array(x_pcoord)\n y_pcoord = np.array(y_pcoord)\n \n self.x_pcoord, self.y_pcoord = x_pcoord, y_pcoord\n\n self.pupil = pupil/np.sum(pupil)", "title": "" }, { "docid": "3ffa76a22fd2d35ab123fd1d48470db2", "score": "0.49037802", "text": "def __init__(\r\n self,\r\n deflections_y,\r\n deflections_x,\r\n image_plane_grid,\r\n preload_grid=None,\r\n preload_blurring_grid=None,\r\n # normalization_scale: float = 1.0,\r\n ):\r\n super().__init__()\r\n\r\n self.deflections_y = deflections_y\r\n self.deflections_x = deflections_x\r\n\r\n self.image_plane_grid = image_plane_grid\r\n\r\n self.centre = image_plane_grid.origin\r\n\r\n self.preload_grid = preload_grid\r\n self.preload_deflections = None\r\n self.preload_blurring_grid = preload_blurring_grid\r\n self.preload_blurring_deflections = None\r\n\r\n if self.preload_grid is not None:\r\n self.normalization_scale = 1.0\r\n self.preload_deflections = self.deflections_2d_from_grid(grid=preload_grid)\r\n\r\n if self.preload_blurring_grid is not None:\r\n self.normalization_scale = 1.0\r\n self.preload_blurring_deflections = self.deflections_2d_from_grid(\r\n grid=preload_blurring_grid\r\n )\r\n\r\n self.normalization_scale = 1.0 # normalization_scale\r", "title": "" }, { "docid": "43546bf0512fc3c19bebe26c9b6f52ef", "score": "0.49024853", "text": "def test_interpolate_effective_area_per_energy_and_fov():\n n_en = 20\n n_th = 1\n en = np.logspace(-2, 2, n_en)\n # applying a simple sigmoid function\n aeff0 = 1.e4 / (1 + 1 / en**2) * u.Unit('m2')\n\n # assume that for parameters 'x' and 'y' the Aeff scales x*y*Aeff0\n x = [0.9, 1.1]\n y = [8., 11.5]\n n_grid = len(x) * len(y)\n aeff = np.empty((n_grid, n_th, n_en))\n pars = np.empty((n_grid, 2))\n i_grid = 0\n for xx in x:\n for yy in y:\n aeff[i_grid, 0, :] = aeff0 * xx * yy / 10\n pars[i_grid, :] = np.array([xx, yy])\n i_grid += 1\n aeff *= u.Unit('m2')\n pars0 = (1, 10)\n min_aeff = 1 * u.Unit('m2')\n aeff_interp = interp.interpolate_effective_area_per_energy_and_fov(aeff, pars, pars0, min_effective_area=min_aeff, method='linear')\n # allowing for 3% accuracy except of close to the minimum value of Aeff\n assert np.allclose(aeff_interp[:, 0], aeff0, rtol=0.03, atol=min_aeff)", "title": "" }, { "docid": "f3a01401cef047944d7a375d7c5e87cc", "score": "0.48992178", "text": "def images(self, outputs):", "title": "" }, { "docid": "c80a04731c860562f499c9e5b46ad8c7", "score": "0.48952866", "text": "def CIC_density_assignment(box):\r\n N_g = box.N_g\r\n box.rho = np.zeros((box.N_g, box.N_g, box.N_g))\r\n \r\n for i in range(len(box.x)):\r\n x_cell = int(box.x[i] // 1) % N_g\r\n y_cell = int(box.y[i] // 1) % N_g\r\n z_cell = int(box.z[i] // 1) % N_g\r\n \r\n #print (x_cell,y_cell,z_cell)\r\n \r\n x_cell_f = (x_cell + 1) % N_g\r\n y_cell_f = (y_cell + 1) % N_g\r\n z_cell_f = (z_cell + 1) % N_g\r\n \r\n #print (x_cell_f,y_cell_f,z_cell_f)\r\n \r\n dx = (box.x[i] - x_cell) % N_g\r\n dy = (box.y[i] - y_cell) % N_g\r\n dz = (box.z[i] - z_cell) % N_g\r\n \r\n #print (dx,dy,dz)\r\n \r\n tx = 1 - dx\r\n ty = 1 - dy\r\n tz = 1 - dz\r\n \r\n #print (tx,ty,tz)\r\n \r\n box.rho[x_cell, y_cell, z_cell] += box.m[i]*tx*ty*tz\r\n \r\n box.rho[x_cell_f, y_cell, z_cell] += box.m[i]*dx*ty*tz\r\n box.rho[x_cell, y_cell_f, z_cell] += box.m[i]*tx*dy*tz\r\n box.rho[x_cell, y_cell, z_cell_f] += box.m[i]*tx*ty*dz\r\n \r\n box.rho[x_cell_f, y_cell_f, z_cell] += box.m[i]*dx*dy*tz\r\n box.rho[x_cell_f, y_cell, z_cell_f] += box.m[i]*dx*ty*dz\r\n box.rho[x_cell, y_cell_f, z_cell_f] += box.m[i]*tx*dy*dz \r\n \r\n box.rho[x_cell_f, y_cell_f, z_cell_f] += box.m[i]*dx*dy*dz", "title": "" }, { "docid": "4d90a504e1896631acbb00d08a18ab55", "score": "0.4893819", "text": "def __call__(self, params):\n # Parameters array is set out as follows\n amplitude, frequency, width, splitting, inclination, background = params\n\n # First calculate mode height\n height = 2.0 * amplitude **2 / (np.pi * width)\n\n # Set up model array\n m = np.zeros(len(freq))\n epsilon = splitting/width\n X = (self.freq-(frequency+0.5*splitting))/(0.5*width)\n denominator = 2.0*epsilon/(1.-epsilon**2+X**2)\n \n # to get flat top rather than drop in middle of model we must make sure that arctan(*)>0\n m += height/(2.0*epsilon)*np.arctan(np.abs(denominator))\n \n return m + background #modelled flattened lorentzian + background", "title": "" }, { "docid": "f0373e18ae0f411d2cb1aa63048c4d5f", "score": "0.48928544", "text": "def getParams(self):\n # Change thresholds \n self.params.minThreshold = cv2.getTrackbarPos('minThresh','bars')\n self.params.maxThreshold = cv2.getTrackbarPos('maxThresh','bars')\n self.params.thresholdStep = cv2.getTrackbarPos('step','bars')\n\n # Filter by Area. \n self.params.filterByArea = True\n self.params.minArea = cv2.getTrackbarPos('minArea','bars')\n self.params.maxArea = cv2.getTrackbarPos('maxArea','bars')\n\n # Circularity\n self.params.filterByCircularity = True\n self.params.minCircularity = cv2.getTrackbarPos('minCircularity','bars')/100.\n self.params.maxCircularity = cv2.getTrackbarPos('maxCircularity','bars')/100.\n\n # Convexity\n self.params.filterByConvexity = True\n self.params.minConvexity = cv2.getTrackbarPos('minConvexity','bars')/100.\n self.params.maxConvexity = cv2.getTrackbarPos('maxConvexity','bars')/100.\n\n # Inertia\n self.params.filterByInertia = False\n #print self.params.maxInertiaRatio\n #self.params.minInertiaRatio = cv2.getTrackbarPos('minIertia','bars')/100.\n #self.params.maxInertiaRatio = cv2.getTrackbarPos('maxIertia','bars')/100.\n\n # Distance\n self.params.minDistBetweenBlobs = cv2.getTrackbarPos('minDistance','bars')", "title": "" }, { "docid": "82c8a018c71f89bb4413fbb02bb2a44b", "score": "0.48915738", "text": "def blended_grid(self):\n \n nc = Dataset('DATA/blended_grid_new.nc', 'r') \n #print nc\n \n lon = nc.variables['lon_rho'][:]\n lat = nc.variables['lat_rho'][:]\n mask = nc.variables['mask_rho'][:]\n x_bg = np.zeros_like(lon)\n y_bg = np.zeros_like(lat)\n for i in range(lon.shape[0]):\n for j in range(lon.shape[1]):\n (y_bg[i,j],x_bg[i,j])=utm.from_latlon(lat[i,j],lon[i,j])[0:2]\n \n #### subset Blend_grid for interpolation ####\n def findNearset(x,y,lon,lat):\n \"\"\"\n Return the J,I indices of the nearst grid cell to x,y\n \"\"\"\n \n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n \n return np.argwhere(dist==dist.min())\n\n #### Step 1) subset for SUNTANS interpolation\n NE = (29.868007, -94.175217) \n SW = (28.361303, -95.073081) \n \n #### searching for the index of the subset domain for interpolation\n ind = findNearset(SW[1], SW[0], lon, lat)\n J0=ind[0][0] \n I0=ind[0][1] \n \n ind = findNearset(NE[1], NE[0], lon, lat)\n J1=ind[0][0] +22 #-2\n I1=ind[0][1]\n \n self.yss = y_bg[J0:J1,I0:I1] ##subset x,y\n self.xss = x_bg[J0:J1,I0:I1]\n self.maskss = mask[J0:J1,I0:I1] \n \n #### Step 2) subset for ROMS interpolation\n #### Prepare for interpolate original ROMS velocity\n SW0 = (self.lat_rho[0,0], self.lon_rho[0,0])\n NE0 = (self.lat_rho[-1,-1], self.lon_rho[-1,-1])\n ind0 = findNearset(SW0[1], SW0[0], lon, lat)\n JJ0=ind0[0][0] \n II0=ind0[0][1] \n \n ind0 = findNearset(NE0[1], NE0[0], lon, lat)\n JJ1=ind0[0][0] + 6\n II1=ind0[0][1]\n \n #self.yss0 = y_bg[JJ0:JJ1,II0:II1] ##subset x,y for ROMS velocity\n #self.xss0 = x_bg[JJ0:JJ1,II0:II1]\n #self.maskss0 = mask[JJ0:JJ1,II0:II1]\n #self.lon0 = lon[JJ0:JJ1,II0:II1]\n #self.lat0 = lat[JJ0:JJ1,II0:II1]\n \n self.yss0 = y_bg ##subset x,y for ROMS velocity\n self.xss0 = x_bg\n self.maskss0 = mask\n self.lon0 = lon\n self.lat0 = lat\n \n self.lon_rho0 = nc.variables['lon_rho'][:]\n self.lat_rho0 = nc.variables['lat_rho'][:]\n self.angle_rho0 = nc.variables['angle_rho'][:]\n self.pm0 = nc.variables['pm'][:]\n self.pn0 = nc.variables['pn'][:]\n self.lon_vert0 = nc.variables['lon_vert'][:]\n self.lat_vert0 = nc.variables['lat_vert'][:]\n self.h0 = nc.variables['h'][:]\n\n self.lat_u = nc.variables['lat_u'][:]\n self.lon_u = nc.variables['lon_u'][:]\n self.mask_u = nc.variables['mask_u'][:]\n self.lat_v = nc.variables['lat_v'][:]\n self.lon_v = nc.variables['lon_v'][:]\n self.mask_v = nc.variables['mask_v'][:]\n self.angle = nc.variables['angle'][:]\n \n ## subset weight variables ##\n #self.w_sun = self.w_sun[JJ0:JJ1,II0:II1]\n #self.w_roms = self.w_roms[JJ0:JJ1,II0:II1]\n \n #self.JJJ0 = J0-JJ0\n #self.JJJ1 = J1-JJ0\n \n #self.III0 = I0-II0\n #self.III1 = I1-II0\n\n self.JJJ0 = J0\n self.JJJ1 = J1\n \n self.III0 = I0\n self.III1 = I1 \n #pdb.set_trace()", "title": "" }, { "docid": "aa685a9d0e1d8106a9ccfc77ef14b603", "score": "0.48910135", "text": "def __init__(self, ox, oy, resolution, rr, fc_x, fc_y, tc_x, tc_y):\n\n self.resolution = resolution # get resolution of the grid\n self.rr = rr # robot radis\n self.min_x, self.min_y = 0, 0\n self.max_x, self.max_y = 0, 0\n self.obstacle_map = None\n self.x_width, self.y_width = 0, 0\n self.motion = self.get_motion_model() # motion model for grid search expansion\n self.calc_obstacle_map(ox, oy)\n\n self.fc_x = fc_x\n self.fc_y = fc_y\n self.tc_x = tc_x\n self.tc_y = tc_y\n#model\n ############you could modify the setup here for different aircraft models (based on the lecture slide) ##########################\n self.C_F = C_F\n self.Delta_F = d_F\n self.C_T = C_T\n self.Delta_T = d_T\n self.C_C = 10\n \n# self.Delta_F_A = 2 # additional fuel\n# self.Delta_T_A = 5 # additional time \n \n \n\n self.costPerGrid = self.C_F * self.Delta_F + self.C_T * self.Delta_T + self.C_C", "title": "" }, { "docid": "8eafdd54c26511a2903fe1210d2d6a11", "score": "0.48901153", "text": "def test_mom_and_cice_input_grids(self):\n\n helper = ExpTestHelper('minimal_01deg_jra55_ryf_control')\n fo = nc.Dataset(os.path.join(helper.mom_input, 'ocean_hgrid.nc'))\n fi = nc.Dataset(os.path.join(helper.cice_input, 'grid.nc'))\n with nc.Dataset(os.path.join(helper.mom_input, 'ocean_mask.nc')) as f:\n mask = 1 - f.variables['mask'][:]\n\n x = fo.variables['x'][:, :]\n y = fo.variables['y'][:, :]\n\n # t-cell centres\n xo_t = x[1::2, 1::2]\n yo_t = y[1::2, 1::2]\n\n xi_t = np.rad2deg(fi.variables['tlon'][:])\n yi_t = np.rad2deg(fi.variables['tlat'][:])\n\n assert np.allclose(xi_t, xo_t, atol=0, rtol=1e-15)\n assert np.allclose(yi_t, yo_t, atol=0, rtol=1e-15)\n\n # u-cell centres\n xo_u = x[2::2, 2::2]\n yo_u = y[2::2, 2::2]\n\n xi_u = np.rad2deg(fi.variables['ulon'][:])\n yi_u = np.rad2deg(fi.variables['ulat'][:])\n\n #assert np.allclose(xi_u, xo_u, atol=0, rtol=1e-15)\n #assert np.allclose(yi_u, yo_u, atol=0, rtol=1e-15)\n\n #del x, y, xo_t, yo_t, xi_t, yi_t, xo_u, yo_u, xi_u, yi_u\n\n # cell area\n # Add up areas, going clockwise from bottom left.\n area = fo.variables['area'][:]\n areao_t = area[0::2, 0::2] + area[1::2, 0::2] + area[1::2, 1::2] + area[0::2, 1::2]\n\n # These need to wrap around the globe. Copy ocn_area and\n # add an extra column at the end.\n area_ext = np.append(area[:], area[:, 0:1], axis=1)\n areao_u = area_ext[0::2, 1::2] + area_ext[1::2, 1::2] + area_ext[1::2, 2::2] + area_ext[0::2, 2::2]\n\n areai_t = fi.variables['tarea'][:]\n areai_u = fi.variables['uarea'][:]\n\n assert np.array_equal(areao_t, areai_t)\n assert np.array_equal(areao_u, areai_u)\n\n # cell edge length\n # Grab the Northern and Eastern edge distances, not the centre distance,\n # this is what cice should be using.\n dx = fo.variables['dx'][:]\n dy = fo.variables['dy'][:]\n dxo_t = dx[2::2, ::2] + dx[2::2, 1::2]\n dyo_t = dy[::2, 2::2] + dy[1::2, 2::2]\n\n dxi_t = fi.variables['htn'][:] / 100\n dyi_t = fi.variables['hte'][:] / 100\n\n assert np.allclose(dxi_t, dxo_t, atol=0, rtol=1e-15)\n assert np.allclose(dyi_t, dyo_t, atol=0, rtol=1e-15)\n\n import pdb\n pdb.set_trace()\n\n # angle wrt East\n angleo_t = fo.variables['angle_dx'][1::2, 1::2]\n angleo_u = np.ma.array(fo.variables['angle_dx'][2::2, 2::2], mask=mask)\n\n anglei_t = np.rad2deg(fi.variables['angleT'][:])\n anglei_u = np.ma.array(np.rad2deg(fi.variables['angle'][:]), mask=mask)\n\n #import pdb\n #pdb.set_trace()\n\n assert np.allclose(anglei_t, angleo_t, atol=0, rtol=1e-15)\n # Compare U using the land mask because MOM sets some land only longitudes to zero.\n assert np.ma.allclose(anglei_u, angleo_u, atol=0, rtol=1e-15)\n\n fo.close()\n fi.close()", "title": "" }, { "docid": "a947a6f52bf08faceccbbc5dbe3c37f6", "score": "0.48877594", "text": "def LV_PP_canon():\n xm = -3\n xM = 2.5\n ym = -3\n yM = 2.5\n \n X, Y = np.meshgrid(np.arange(xm-1, xM+1, .15), np.arange(ym-1, yM+1, .15))\n \n U = (a-np.exp(Y))\n V = (np.exp(X)-b)\n \n return (X, Y, U, V, xm,xM,ym,yM)", "title": "" }, { "docid": "757e947bc78e05131986a47f81b1568e", "score": "0.48859343", "text": "def rescale_arch_params(self, model):\n\n def comp_rescale_value(old_weights, new_weights, index, block_id, branch_id):\n old_exp_sum = old_weights.exp().sum()\n new_drop_arch_params = [new_weights[block_id][branch_id][h_idx\n ] for h_idx in index]\n new_exp_sum = torch.stack(new_drop_arch_params).exp().sum()\n rescale_value = torch.log(old_exp_sum / new_exp_sum)\n\n return rescale_value\n\n if hasattr(model, 'module'):\n model = model.module\n \n alpha_head_index = model.alpha_head_index\n alpha_head_weights_drop = model.alpha_head_weights_drop\n alpha_stack_index = model.alpha_stack_index\n alpha_stack_weights_drop = model.alpha_stack_weights_drop\n\n # rescale the arch params for head layers\n for i, (alpha_head_weights_drop_block, alpha_head_index_block) in enumerate(\n zip(alpha_head_weights_drop, alpha_head_index)):\n for j, (alpha_head_weights_drop_branch, alpha_head_index_branch) in enumerate(\n zip(alpha_head_weights_drop_block, alpha_head_index_block)):\n rescale_value = comp_rescale_value(alpha_head_weights_drop_branch,\n model.alpha_head_weights,\n alpha_head_index_branch, i, j)\n for idx in alpha_head_index_branch:\n model.alpha_head_weights[i].data[j][idx] += rescale_value\n\n # rescale the arch params for stack layers\n for i, (alpha_stack_weights_drop_block, alpha_stack_index_block) in enumerate(\n zip(alpha_stack_weights_drop, alpha_stack_index)):\n for j, (alpha_stack_weights_drop_branch, alpha_stack_index_branch) in enumerate(\n zip(alpha_stack_weights_drop_block, alpha_stack_index_block)):\n rescale_value = comp_rescale_value(alpha_stack_weights_drop_branch,\n model.alpha_stack_weights,\n alpha_stack_index_branch, i, j)\n for idx in alpha_stack_index_branch:\n model.alpha_stack_weights[i].data[j][idx] += rescale_value", "title": "" }, { "docid": "11d8ad6b38ac9cf3b3fe1f9ac4fe7d0a", "score": "0.4883358", "text": "def calc_commanded_azel():\n observer.date = ephem.now()\n body.compute(observer)\n return body.az*convert, body.alt*convert", "title": "" }, { "docid": "872bd17bc5f6562280dd634e3b002708", "score": "0.4881484", "text": "def compute_parameters(self): ################ SOLO AL PRINCIPIO\n # Auto mode\n # Compute binarization threshold from percentile\n self.binarize_threshold.value = int(\n round(self.percentile_stdev / 16.0 + 5, 0))\n\n # Compute minimal area from camera height.\n # Divided into two lines, because it is not linear.\n if self.camera_height < 2400:\n min_area = 110\n elif 2400 <= self.camera_height < 2850:\n min_area = -0.0875 * self.camera_height + 320\n min_area = 5 * round(min_area / 5)\n elif 2850 <= self.camera_height <= 3200:\n min_area = -0.0571 * self.camera_height + 223\n min_area = 5 * round(min_area / 5)\n else:\n min_area = 40\n self.min_area.value = int(min_area)\n print('\tDRIVERRRR ---> self.min_area.value = ', self.min_area.value)\n # Adapt detect threshold from middle pixel\n # At 2400 => 9, at 3200 => 5\n self.detect_threshold.value = int(-(self.camera_height/266) + 17)\n\n print(\"[Driver] Estimated parameters. Min area: {} | \" \\\n \"Detect Threshold: {} | \" \\\n \"Binarize Threshold: {}\".format(\n self.min_area.value,\n self.detect_threshold.value,\n self.binarize_threshold.value\n ))", "title": "" }, { "docid": "d2d222219cf0745cc5ce991e62812379", "score": "0.48785454", "text": "def get_obstacles(self, nx, ny):\n obstacle_field = (self.env_field == np.inf).astype(int)\n resized_obstacle_field = zoom(obstacle_field, (nx / obstacle_field.shape[0], ny / obstacle_field.shape[1]))\n return resized_obstacle_field", "title": "" }, { "docid": "255f11b48f1f2ae0c7f85099ce078ffc", "score": "0.48699915", "text": "def _format_outputs(self, x, img_h, img_w):\n\n b, c = x.shape\n h, w = 7, 7\n # B * (H * W * (num_anchors * 5 + num_classes)) --> B * H * W * (num_anchors * 5 + num_classes)\n x = x.view(b, h, w, self.num_anchors * 5 + self.num_classes)\n # Classification scores\n b_scores = x[..., -self.num_classes:].view(b, h * w, -1)\n # Repeat for anchors to keep compatibility across YOLO versions\n b_scores = b_scores.unsqueeze(2).repeat_interleave(self.num_anchors, dim=2)\n # B * H * W * (num_anchors * 5 + num_classes) --> B * H * W * num_anchors * 5\n x = x[..., :self.num_anchors * 5].view(b, h, w, self.num_anchors, 5)\n # Cell offset\n c_x = torch.arange(0, w, dtype=torch.float) * img_w / w\n c_y = torch.arange(0, h, dtype=torch.float) * img_h / h\n # Box coordinates\n b_x = (torch.sigmoid(x[..., 0]) + c_x.view(1, 1, -1, 1)).view(b, -1, self.num_anchors)\n b_y = (torch.sigmoid(x[..., 1]) + c_y.view(1, -1, 1, 1)).view(b, -1, self.num_anchors)\n # B * H * W * num_anchors * (5 + num_classes) --> B * (H * W) * num_anchors * (5 + num_classes)\n # x = x.view(b, h * w, self.num_anchors, -1)\n b_w = torch.sigmoid(x[..., 2]).view(b, -1, self.num_anchors)\n b_h = torch.sigmoid(x[..., 3]).view(b, -1, self.num_anchors)\n # B * (H * W) * num_anchors * 4\n b_coords = torch.stack((b_x, b_y, b_w, b_h), dim=3)\n # Objectness\n b_o = torch.sigmoid(x[..., 4]).view(b, -1, self.num_anchors)\n\n return b_coords, b_o, b_scores", "title": "" }, { "docid": "6d6934d01cc5c0ec697d10c1b49b6c54", "score": "0.48605704", "text": "def colorization_inference(model, img_path):\n # # cfg = model.cfg\n # # device = next(model.parameters()).device # model device\n #\n # # prepare data\n # data = torch.load(img, map_location='cpu')\n # x, y, res, out = data.values()\n # x_ = x.unsqueeze(0).cuda()\n #\n # # forward the model\n # with torch.no_grad():\n # results = model.forward(x_).squeeze()\n torch.cuda.empty_cache()\n orig_image = PIL.Image.open(img_path).convert('RGB')\n render_factor = 10\n render_base = 16\n render_sz = render_factor * render_base\n targ_sz = (render_sz, render_sz)\n model_image = orig_image.resize(targ_sz, resample=PIL.Image.BILINEAR).convert('LA').convert('RGB')\n x = pil2tensor(model_image, np.float32)\n x = x.cuda()\n\n x.div_(255)\n\n # imagenet的均值和方差\n mean = torch.tensor([0.4850, 0.4560, 0.4060]).cuda()\n std = torch.tensor([0.2290, 0.2240, 0.2250]).cuda()\n\n x_ = norm(x, mean, std)\n\n\n model.eval()\n with torch.no_grad():\n results = model.forward(x_.unsqueeze(0)).squeeze().cpu()\n\n # results = torch.load('/home/SENSETIME/renqin/PycharmProjects/mmediting-master/res.pt').squeeze(0)\n # results 比 results_1小,理论上应该相等\n\n results = denorm(results.detach().cpu(), mean.cpu(), std.cpu())\n\n results = results.float().clamp(min=0, max=1)\n\n out = (results.numpy()*255).astype('uint8').transpose(1, 2, 0)\n out = Image.fromarray(out)\n\n # return PIL.fromarray(out)\n\n raw_color = out.resize(orig_image.size, resample=PIL.Image.BILINEAR)\n # import numpy as np\n print(np.array(raw_color))\n final = post_process(raw_color, orig_image)\n print(np.array(final))\n\n return final", "title": "" }, { "docid": "f0aacbc42f24fdd5d51bb1619f75bc18", "score": "0.48591608", "text": "def get_grid_weight(tik_instance, grid_w, grid_h, rois_start_w, rois_start_h,\n height, width, dtype):\n x_lo_w = tik_instance.Tensor(\n dtype, [128], name=\"x_lo_w\", scope=tik.scope_ubuf)\n x_hi_w = tik_instance.Tensor(\n dtype, [128], name=\"x_hi_w\", scope=tik.scope_ubuf)\n y_lo_w = tik_instance.Tensor(\n dtype, [128], name=\"y_lo_w\", scope=tik.scope_ubuf)\n y_hi_w = tik_instance.Tensor(\n dtype, [128], name=\"_lo_w\", scope=tik.scope_ubuf)\n x_lo = tik_instance.Tensor(\n \"int32\", [128], name=\"x_lo\", scope=tik.scope_ubuf)\n x_hi = tik_instance.Tensor(\n \"int32\", [128], name=\"x_hi\", scope=tik.scope_ubuf)\n y_lo = tik_instance.Tensor(\n \"int32\", [128], name=\"y_lo\", scope=tik.scope_ubuf)\n y_hi = tik_instance.Tensor(\n \"int32\", [128], name=\"y_hi\", scope=tik.scope_ubuf)\n\n raw_x = tik_instance.Tensor(\n dtype, [128], name=\"raw_x\", scope=tik.scope_ubuf)\n raw_y = tik_instance.Tensor(\n dtype, [128], name=\"raw_y\", scope=tik.scope_ubuf)\n x_output = tik_instance.Tensor(\n dtype, [128], name=\"x_output\", scope=tik.scope_ubuf)\n y_output = tik_instance.Tensor(\n dtype, [128], name=\"y_output\", scope=tik.scope_ubuf)\n tmp_fp16 = tik_instance.Tensor(\n \"float16\", [128], name=\"tmp_fp16\", scope=tik.scope_ubuf)\n\n\n const_value_0_127 = tik_instance.Tensor(\n dtype, (128,), name=\"const_value_0_127\", scope=tik.scope_ubuf)\n if dtype == \"float32\":\n dtype_num = 1\n else:\n dtype_num = 2\n vconv_f322s32f_suppot = tbe_platform.cce_conf.intrinsic_check_support( \\\n \"Intrinsic_vconv\", \"f322s32f\")\n if vconv_f322s32f_suppot is False or dtype == \"float16\":\n const_value_0_127_int = tik_instance.Tensor(\n \"int32\", (128,), name=\"const_value_0_127_int\",\n scope=tik.scope_ubuf)\n with tik_instance.for_range(0, 128) as i:\n const_value_0_127_int[i].set_as(i)\n if dtype == \"float32\":\n const_value_0_127_float = tik_instance.Tensor(\n \"float16\", (128,),\n name=\"const_value_0_127_float\",\n scope=tik.scope_ubuf)\n tik_instance.vec_conv(64, \"\", const_value_0_127_float,\n const_value_0_127_int, 2, 4, 8, 1.0)\n tik_instance.vec_conv(64, \"\", const_value_0_127,\n const_value_0_127_float,\n 2, 8, 4)\n else:\n tik_instance.vec_conv(64, \"\", const_value_0_127,\n const_value_0_127_int,\n 2, 4, 8, 1.0)\n\n else:\n with tik_instance.for_range(0, 128) as i:\n const_value_0_127[i] = i\n\n\n grid_w_vector = tik_instance.Tensor(\n dtype, [128], name=\"grid_w_vector\", scope=tik.scope_ubuf)\n grid_h_vector = tik_instance.Tensor(\n dtype, [128], name=\"grid_h_vector\", scope=tik.scope_ubuf)\n\n tik_instance.vec_muls(64 * dtype_num, grid_w_vector, const_value_0_127,\n grid_w, 2 // dtype_num, 8, 8)\n tik_instance.vec_muls(64 * dtype_num, grid_h_vector, const_value_0_127,\n grid_h, 2 // dtype_num, 8, 8)\n # fp16 scalar floating-point operation is not allowed in aicore fucntion\n # fl32 scalar floating-point operation is not allowed on mini\n if vconv_f322s32f_suppot is False or dtype == \"float16\":\n point_05 = tik_instance.Scalar(dtype, init_value=0.5)\n point_05_tensor = tik_instance.Tensor(\n dtype, [1], name=\"point_05_tensor\", scope=tik.scope_ubuf)\n tik_instance.vec_dup(1, point_05_tensor, 0.5, 1, 0)\n tik_instance.vec_muls(1, point_05_tensor, point_05_tensor, grid_w,\n 1, 8, 8)\n tik_instance.vec_adds(1, point_05_tensor, point_05_tensor, rois_start_w,\n 1, 8, 8)\n point_05.set_as(point_05_tensor[0])\n tik_instance.vec_adds(64 * dtype_num, raw_x, grid_w_vector, point_05,\n 2 // dtype_num, 8, 8)\n tik_instance.vec_dup(1, point_05_tensor, 0.5, 1, 0)\n\n tik_instance.vec_muls(1, point_05_tensor, point_05_tensor, grid_h,\n 1, 8, 8)\n tik_instance.vec_adds(1, point_05_tensor, point_05_tensor, rois_start_h,\n 1, 8, 8)\n point_05.set_as(point_05_tensor[0])\n tik_instance.vec_adds(64 * dtype_num, raw_y, grid_h_vector,\n point_05, 2 // dtype_num, 8, 8)\n # fp32 besides mini\n else:\n half_grid = 0.5 * grid_w + rois_start_w\n tik_instance.vec_adds(64 * dtype_num, raw_x, grid_w_vector,\n half_grid, 2 // dtype_num, 8, 8)\n half_grid = 0.5 * grid_h + rois_start_h\n tik_instance.vec_adds(64 * dtype_num, raw_y, grid_h_vector,\n half_grid, 2 // dtype_num, 8, 8)\n\n const_zero = tik_instance.Tensor(\n dtype, [64 * dtype_num], name=\"const_zero\", scope=tik.scope_ubuf)\n\n tik_instance.vec_dup(64 * dtype_num, const_zero, 0, 1, 0)\n\n # if (y <= 0) y = 0;\n # if (x <= 0) x = 0;\n tik_instance.vec_max(64 * dtype_num, x_output, raw_x, const_zero,\n 2 // dtype_num, 8, 8, 0)\n tik_instance.vec_max(64 * dtype_num, y_output, raw_y, const_zero,\n 2 // dtype_num, 8, 8, 0)\n\n # y_low = (int)y;\n # x_low = (int)x;\n if vconv_f322s32f_suppot is False and dtype == \"float32\":\n tik_instance.vec_conv(64, \"\", tmp_fp16, x_output, 2, 4, 8)\n tik_instance.vec_conv(64, \"floor\", x_lo, tmp_fp16, 2, 8, 4)\n tik_instance.vec_conv(64, \"\", tmp_fp16, y_output, 2, 4, 8)\n tik_instance.vec_conv(64, \"floor\", y_lo, tmp_fp16, 2, 8, 4)\n else:\n tik_instance.vec_conv(64, \"floor\", x_lo, x_output, 2,\n 8, 8 // dtype_num)\n tik_instance.vec_conv(64, \"floor\", y_lo, y_output, 2,\n 8, 8 // dtype_num)\n\n # y_high = y_low + 1;\n # x_high = x_low + 1;\n const_one = tik_instance.Tensor(\n \"int32\", [64], name=\"const_one\", scope=tik.scope_ubuf)\n tik_instance.vec_dup(64, const_one, 1, 1, 0)\n # 128 int32 4B /256\n tik_instance.vec_add(64, x_hi, x_lo, const_one, 2, 8, 8, 0)\n tik_instance.vec_add(64, y_hi, y_lo, const_one, 2, 8, 8, 0)\n\n const_value_fp32 = tik_instance.Tensor(\n dtype, [64 * dtype_num], name=\"const_value_fp32\", scope=tik.scope_ubuf)\n const_value_int32 = tik_instance.Tensor(\n \"int32\", [64], name=\"const_value_int32\", scope=tik.scope_ubuf)\n\n tik_instance.vec_dup(64 * dtype_num, const_value_fp32, width - 1, 1, 0)\n tik_instance.vec_dup(64, const_value_int32, width - 1, 1, 0)\n tik_instance.vec_min(64, x_lo, x_lo, const_value_int32, 2, 8, 8, 0)\n tik_instance.vec_min(64, x_hi, x_hi, const_value_int32, 2, 8, 8, 0)\n tik_instance.vec_min(64 * dtype_num, x_output, x_output, const_value_fp32,\n 2 // dtype_num, 8, 8, 0)\n\n tik_instance.vec_dup(64, const_value_int32, height - 1, 1, 0)\n tik_instance.vec_dup(64 * dtype_num, const_value_fp32, height - 1, 1, 0)\n tik_instance.vec_min(64, y_lo, y_lo, const_value_int32, 2, 8, 8, 0)\n tik_instance.vec_min(64, y_hi, y_hi, const_value_int32, 2, 8, 8, 0)\n tik_instance.vec_min(64 * dtype_num, y_output, y_output, const_value_fp32,\n 2 // dtype_num, 8, 8, 0)\n\n # ly = y - y_low;\n # lx = x - x_low;\n tmp_fp32 = tik_instance.Tensor(\n dtype, [128], name=\"tmp_fp32\", scope=tik.scope_ubuf)\n\n if vconv_f322s32f_suppot is False and dtype == \"float32\":\n tik_instance.vec_conv(64, \"\", tmp_fp16, x_lo, 2, 4, 8, 1.0)\n tik_instance.vec_conv(64, \"\", tmp_fp32, tmp_fp16, 2, 8, 4)\n else:\n # float16 add 1.0 float32 can not add 1.0\n if dtype == \"float32\":\n tik_instance.vec_conv(64, \"\", tmp_fp32, x_lo, 2, 8, 8)\n else:\n tik_instance.vec_conv(64, \"\", tmp_fp32, x_lo, 2,\n 8 // dtype_num, 8, 1.0)\n\n tik_instance.vec_sub(64 * dtype_num, x_lo_w, x_output, tmp_fp32,\n 2 // dtype_num, 8, 8, 8)\n\n if vconv_f322s32f_suppot is False and dtype == \"float32\":\n tik_instance.vec_conv(64 * dtype_num, \"\", tmp_fp16, y_lo, 2,\n 4, 8, 1.0)\n tik_instance.vec_conv(64 * dtype_num, \"\", tmp_fp32, tmp_fp16, 2,\n 8, 4)\n else:\n if dtype == \"float32\":\n tik_instance.vec_conv(64, \"\", tmp_fp32, y_lo, 2, 8, 8)\n else:\n tik_instance.vec_conv(64, \"\", tmp_fp32, y_lo, 2,\n 8 // dtype_num, 8, 1.0)\n\n tik_instance.vec_sub(64 * dtype_num, y_lo_w, y_output, tmp_fp32,\n 2 // dtype_num, 8, 8, 8)\n\n # hy = 1. - ly;\n # hx = 1. - lx;\n tik_instance.vec_dup(64 * dtype_num, const_value_fp32, 1.0, 1, 0)\n tik_instance.vec_sub(64 * dtype_num, x_hi_w, const_value_fp32, x_lo_w,\n 2 // dtype_num, 8, 0, 8)\n tik_instance.vec_sub(64 * dtype_num, y_hi_w, const_value_fp32, y_lo_w,\n 2 // dtype_num, 8, 0, 8)\n\n return x_lo_w, x_hi_w, y_lo_w, y_hi_w, x_lo, x_hi, y_lo, y_hi, raw_x, raw_y", "title": "" }, { "docid": "e7a534b69bf13d874888c022c9be0c6e", "score": "0.48559472", "text": "def aspect_mode(self):\n if self.vlc_aspect_mode == 'stretch':\n window_ratio=self.vlc_window_width/self.vlc_window_height\n self.vlcdriver.sendline('ratio '+ str(self.vlc_window_width)+':'+str(self.vlc_window_height))\n return 'normal',''\n \n elif self.vlc_aspect_mode == 'fill':\n self.vlcdriver.sendline('crop '+ str(self.vlc_window_width)+':'+str(self.vlc_window_height))\n return 'normal',''\n \n elif self.vlc_aspect_mode == 'letterbox':\n # default vlc behavior\n return 'normal',''\n \n elif self.vlc_aspect_mode == 'vlc':\n if self.vlc_aspect_ratio != '' or self.vlc_crop!= '':\n if self.vlc_crop!= '':\n self.vlcdriver.sendline('crop '+ self.vlc_crop)\n \n if self.vlc_aspect_ratio != '':\n self.vlcdriver.sendline('ratio '+ self.vlc_aspect_ratio)\n return 'normal',''\n else:\n return 'error', 'crop or aspect mode not specified for vlc option'\n else:\n return 'error','Aspect Mode cannot be blank '+ self.vlc_aspect_mode", "title": "" }, { "docid": "eaf6b4a0fda42d35c490f066d46770c2", "score": "0.48543155", "text": "def _update_dims(self):\n\n self.w = self.right - self.left\n self.h = self.bottom - self.top", "title": "" }, { "docid": "a9a8ac15dd303b1a619c87b9ce4dd7ac", "score": "0.4850025", "text": "def create_grid_infos(self, x, y, z, **kwargs):\n self.grid_lower_bound = torch.Tensor([cfg[0] for cfg in [x, y, z]])\n self.grid_interval = torch.Tensor([cfg[2] for cfg in [x, y, z]])\n self.grid_size = torch.Tensor([(cfg[1] - cfg[0]) / cfg[2]\n for cfg in [x, y, z]])", "title": "" }, { "docid": "9117489300976e9a673df4cc08f256ed", "score": "0.4848271", "text": "def test(model):\n digit = cv2.imread('../assets/temp.png', 0)\n digit = digit.reshape((1, 28, 28)).astype('float32')\n digit = digit / 255\n\n # Predict with model:\n input_data = np.array([digit])\n prediction = model.predict(input_data)\n print(prediction)\n\n return", "title": "" }, { "docid": "e2d03b46007d854ad85bc6e4f351b377", "score": "0.48462823", "text": "def _eval_predictions(self):\n\n if \"segm\" in self._tasks and \"mesh\" in self._tasks:\n results = evaluate_for_pix3d(\n self._predictions,\n self._coco_api,\n self._metadata,\n self._filter_iou,\n mesh_models=self._mesh_models,\n device=self._device,\n )\n\n # print results\n self._logger.info(\"Box AP %.5f\" % (results[\"box_ap@%.1f\" % 0.5]))\n self._logger.info(\"Mask AP %.5f\" % (results[\"mask_ap@%.1f\" % 0.5]))\n self._logger.info(\"Mesh AP %.5f\" % (results[\"mesh_ap@%.1f\" % 0.5]))\n self._results[\"shape\"] = results", "title": "" }, { "docid": "3b041929685b5f2138c9f2b5b68d2142", "score": "0.48302644", "text": "def test_artifacts():\n mlt_grid = np.arange(-12, 12, 24/360)[None, :] * np.ones((60, 1))\n mlat_grid = np.arange(30, 90)[:, None] * np.ones((1, 360))\n corr = rbf_inversion.get_artifacts(np.ones(1) * 180, '9', mlt_grid=mlt_grid, mlat_grid=mlat_grid)[0]\n artifacts = np.load(config.artifact_file)\n assert np.allclose(np.roll(artifacts['9'], 180, axis=1), corr)\n\n corr = rbf_inversion.get_artifacts(np.ones(1) * 179, '7')[0]\n assert np.allclose(np.roll(artifacts['7'], 180, axis=1)[:, ::2], corr)", "title": "" }, { "docid": "d02230390d88d91523d965f49f944aa8", "score": "0.48290104", "text": "def alt_get_magnetic_field(model, efield):\n grid = efield.grid\n\n # Carry out the curl (^ corresponds to differentiation axis):\n # H_x = (E_z^1 - E_y^2)\n e3d_hx = (np.diff(efield.fz, axis=1)/efield.grid.h[1][None, :, None] -\n np.diff(efield.fy, axis=2)/efield.grid.h[2][None, None, :])\n e3d_hx[0, :, :] = e3d_hx[-1, :, :] = 0\n\n # H_y = (E_x^2 - E_z^0)\n e3d_hy = (np.diff(efield.fx, axis=2)/efield.grid.h[2][None, None, :] -\n np.diff(efield.fz, axis=0)/efield.grid.h[0][:, None, None])\n e3d_hy[:, 0, :] = e3d_hy[:, -1, :] = 0\n\n # H_z = (E_y^0 - E_x^1)\n e3d_hz = (np.diff(efield.fy, axis=0)/efield.grid.h[0][:, None, None] -\n np.diff(efield.fx, axis=1)/efield.grid.h[1][None, :, None])\n e3d_hz[:, :, 0] = e3d_hz[:, :, -1] = 0\n\n # Divide by averaged relative magnetic permeability, if not not None.\n if model.mu_r is not None:\n\n # Get volume-averaged values.\n vmodel = emg3d.models.VolumeModel(model, efield)\n\n # Plus and minus indices.\n ixm = np.r_[0, np.arange(grid.shape_cells[0])]\n ixp = np.r_[np.arange(grid.shape_cells[0]), grid.shape_cells[0]-1]\n iym = np.r_[0, np.arange(grid.shape_cells[1])]\n iyp = np.r_[np.arange(grid.shape_cells[1]), grid.shape_cells[1]-1]\n izm = np.r_[0, np.arange(grid.shape_cells[2])]\n izp = np.r_[np.arange(grid.shape_cells[2]), grid.shape_cells[2]-1]\n\n # Average mu_r for dual-grid.\n zeta_x = (vmodel.zeta[ixm, :, :] + vmodel.zeta[ixp, :, :])/2.\n zeta_y = (vmodel.zeta[:, iym, :] + vmodel.zeta[:, iyp, :])/2.\n zeta_z = (vmodel.zeta[:, :, izm] + vmodel.zeta[:, :, izp])/2.\n\n hvx = grid.h[0][:, None, None]\n hvy = grid.h[1][None, :, None]\n hvz = grid.h[2][None, None, :]\n\n # Define the widths of the dual grid.\n dx = (np.r_[0., grid.h[0]] + np.r_[grid.h[0], 0.])/2.\n dy = (np.r_[0., grid.h[1]] + np.r_[grid.h[1], 0.])/2.\n dz = (np.r_[0., grid.h[2]] + np.r_[grid.h[2], 0.])/2.\n\n # Multiply fields by mu_r.\n e3d_hx *= zeta_x/(dx[:, None, None]*hvy*hvz)\n e3d_hy *= zeta_y/(hvx*dy[None, :, None]*hvz)\n e3d_hz *= zeta_z/(hvx*hvy*dz[None, None, :])\n\n new = np.r_[e3d_hx.ravel('F'), e3d_hy.ravel('F'), e3d_hz.ravel('F')]\n hfield = emg3d.Field(\n efield.grid, data=new, frequency=efield._frequency, electric=False)\n hfield.field /= efield.smu0\n\n # Return.\n return hfield", "title": "" }, { "docid": "7bf5ab2b9f4beed08a432a43b00fd42b", "score": "0.48284787", "text": "def test_grid_resolution_steps_examples():\n assert np.allclose(kde.grid_resolution_steps(81, 1),\n np.array([81, 27, 9, 3, 1]))\n assert np.allclose(kde.grid_resolution_steps(2, 1),\n np.array([2, 1]))\n assert np.allclose(kde.grid_resolution_steps(1.0001, 1),\n np.array([1.0001, 1]))\n assert np.allclose(kde.grid_resolution_steps(4050, 50),\n np.array([4050, 1350, 450, 150, 50]))", "title": "" }, { "docid": "628e9c0ab5850827d152043390aacb39", "score": "0.48275435", "text": "def __init__(self, env):\n\t\tgym.ObservationWrapper.__init__(self, env)\n\t\tself.width = 84\n\t\tself.height = 84\n\t\tself.observation_space = spaces.Box(low=0, high=255, shape=(self.height, self.width, 1))", "title": "" }, { "docid": "adadb9180fed0c3ab62be936ed85c334", "score": "0.48259848", "text": "def _read_config_params(self):\n self._median_half_width = config.CONFIG['Surface']['median_width_rel_iqr']*self._data.iqr\n self._n_grid_pts = config.CONFIG['Surface']['number_grid_points']\n self._alpha = config.CONFIG['Surface']['alpha']\n self._x_color = config.CONFIG['Surface']['x_color']\n self._y_color = config.CONFIG['Surface']['y_color']\n self._z_color = config.CONFIG['Surface']['z_color']", "title": "" }, { "docid": "09be88dd15781c9dc0c53b387d68c5af", "score": "0.48256388", "text": "def get_observations(self):\n\n color_array = self.get_image()\n print(color_array.shape)\n depth_array = self.get_depth()\n print(depth_array.shape)\n if torch.cuda.is_available():\n img_var = Variable(depth_array).cuda()\n else:\n img_var = Variable(depth_array)\n\n \n \n \n out = self.net(img_var)\n \n \n width = 0.08-self.fingerLPos.getValue()-self.fingerRPos.getValue()\n \n obs = np.append((out, width))\n\n return obs", "title": "" }, { "docid": "5a22f958639400327f3492249d93b142", "score": "0.48198488", "text": "def __get_cfgs(self):\n if self.args.model_type.startswith('vgg_16') and self.args.dataset == 'cifar10':\n # default = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512] out channels\n cfg_0 = [3, 64, 'M', 64, 128, 'M', 128, 256, 256, 'M', 256, 512, 512, 'M', 512, 512, 512]\n cfg_same = [int(c * self.args.target_ratio) if c != 'M' else c for c in cfg_0]\n cfg_A = [3, 32, 'M', 64, 128, 'M', 128, 256, 256, 'M', 256, 256, 256, 'M', 256, 256, 256]\n cfg_B = [3, 26, 'M', 52, 103, 'M', 103, 205, 205, 'M', 205, 205, 256, 'M', 205, 205, 205]\n cfg_C = [3, 26, 'M', 32, 64, 'M', 64, 128, 128, 'M', 128, 128, 128, 'M', 205, 205, 205]\n return cfg_same\n\n elif self.args.model_type.startswith('vgg_19') and self.args.dataset == 'ilsvrc_12':\n # defaul [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n cfg_0 = [3, 64, 'M', 64, 128, 'M', 128, 256, 256, 256, 'M', 256, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']\n cfg_same = [int(c * self.args.target_ratio) if c != 'M' else c for c in cfg_0]\n return cfg_same\n\n elif self.args.model_type.startswith('vgg_16') and self.args.dataset == 'ilsvrc_12':\n cfg_0 = [3, 64, 'M', 64, 128, 'M', 128, 256, 256, 'M', 256, 512, 512, 'M', 512, 512, 512]\n cfg_same = [int(c * self.args.target_ratio) if c != 'M' else c for c in cfg_0]\n cfg_2x = [3, 35, 'M', 35, 70, 'M', 70, 140, 140, 'M', 140, 422, 422, 'M', 422, 512, 512]\n cfg_4x = [3, 24, 'M', 26, 41, 'M', 58, 108, 108, 'M', 128, 184, 276, 'M', 276, 512, 512]\n cfg_5x = [3, 24, 'M', 22, 41, 'M', 51, 108, 89, 'M', 111, 184, 276, 'M', 228, 512, 512]\n return cfg_4x\n\n elif self.args.model_type.startswith('resnet_56') and self.args.dataset == 'cifar10':\n skip = [0, 16, 20, 38, 54]\n # target_ratios = [0.25, 0.5, 0.7]\n target_ratios = [self.args.target_ratio] * 3\n layer_id = 0\n cfg_skip = []\n for m in self.model.modules():\n if isinstance(m, nn.Conv2d):\n in_channels = m.in_channels\n if layer_id % 2 == 0 and layer_id not in skip:\n if layer_id <= 18:\n stage = 0\n elif layer_id <= 36:\n stage = 1\n else:\n stage = 2\n cfg_skip.append(int(target_ratios[stage]*in_channels)+1)\n layer_id += 1\n continue\n else:\n cfg_skip.append(in_channels)\n layer_id += 1\n return cfg_skip\n\n elif self.args.model_type.startswith('resnet_34') and self.args.dataset == 'ilsvrc_12':\n skip = [0, 6, 12, 14, 24]\n target_ratios = [self.args.target_ratio]*3 + [1.]\n layer_id = 0\n cfg_skip = []\n for m in self.model.modules():\n if isinstance(m, nn.Conv2d):\n\n if m.kernel_size == (1,1):\n continue\n\n in_channels = m.in_channels\n if layer_id % 2 == 0 and layer_id not in skip:\n if layer_id <= 6:\n stage = 0\n elif layer_id <= 12:\n stage = 1\n elif layer_id <= 24:\n stage = 2\n else:\n stage = 3\n num_channels_left = int(target_ratios[stage]*in_channels)\n cfg_skip.append(num_channels_left)\n print(\"%d channels left for module\" % num_channels_left, m)\n layer_id += 1\n else:\n cfg_skip.append(in_channels)\n layer_id += 1\n return cfg_skip\n else:\n raise NotImplementedError", "title": "" }, { "docid": "b51bcc92830b93ba385088fdd56e134b", "score": "0.48180428", "text": "def main():\n jura_data = np.genfromtxt('data.txt', names=True)\n ns = nscore.transform(jura_data['Pb'])\n y = ns.direct(jura_data['Pb'])\n\n nx = 60\n ny = 60\n xmin = 1.6\n xmax = 4.2\n ymin = 0.8\n ymax = 3.4\n\n xi, yi = np.meshgrid( np.linspace(xmin, xmax, nx), np.linspace(ymin, ymax, ny) )\n v_est, v_var = kriging.ordinary_mesh(jura_data['X'], jura_data['Y'], y, xi.flatten(), yi.flatten(), model_function)\n\n nsim = 400\n v_sim = np.zeros((nx*ny, nsim))\n for i in np.arange(nsim):\n v_sim[:,i] = sgs.conditional(jura_data['X'], jura_data['Y'], y, xi.flatten(), yi.flatten(), covmodel, 15)\n v_mean = np.mean(v_sim, axis=1)\n v_var_sim = np.var(v_sim, axis=1)\n\n plt.figure(1)\n\n plt.subplot(221)\n grid = v_est.reshape((nx,ny))\n plt.imshow(grid, extent=(xi.min(),xi.max(),yi.min(),yi.max()), interpolation='nearest')\n plt.title('Kriging')\n plt.colorbar()\n \n plt.subplot(222)\n grid = v_mean.reshape((nx,ny))\n plt.imshow(grid, extent=(xi.min(),xi.max(),yi.min(),yi.max()), interpolation='nearest')\n plt.title('Simulation mean')\n plt.colorbar()\n\n plt.subplot(223)\n grid = v_var.reshape((nx,ny))\n plt.imshow(grid, extent=(xi.min(),xi.max(),yi.min(),yi.max()), interpolation='nearest')\n plt.title('Kriging')\n plt.colorbar()\n \n plt.subplot(224)\n grid = v_var_sim.reshape((nx,ny))\n plt.imshow(grid, extent=(xi.min(),xi.max(),yi.min(),yi.max()), interpolation='nearest')\n plt.title('Simulation variance')\n plt.colorbar()\n\n plt.show()", "title": "" } ]
49c5861372d25ec22218cc91fe7a198f
Job directory names default to "run00001, run00002..."
[ { "docid": "b642aa1b7be602aae0a5da10195b24ec", "score": "0.5937661", "text": "def runCreateJobsDirs(jobj, jobEditorList, scriptEditor):\n \n global u\n\n # Set basic script editor for all jobs\n jobj.setScriptEditor(scriptEditor)\n\n #\n # Loop over input file editor objects created above\n # Each input file object represents a new job because the editor\n # will be used to create a new input file in each new run directory\n #\n for ijob, jobInputEditor in enumerate(jobEditorList):\n \n # Construct next job setup object from template\n j=copy.copy(jobj)\n \n # Default run directory name\n jobNumStr = u.getTagForSorting(ijob+1)\n runDir=\"run-\" + jobNumStr\n fullRunDir = os.path.join(topRunDir, runDir)\n\n # Set script edit so run directory is inserted\n scriptEditor.setDictValue(\"EDIT_rundir=\"+runDir)\n\n j.setRunDir(fullRunDir) # Set run directory pathname\n j.setInputEditor(jobInputEditor) # Set job object input file editor\n j.createJobRun() # Create run space with file from repo\n j.changeInputFile() # Use file editor object on inputfile\n j.changeScriptFile() # Use file editor object on scriptfile", "title": "" } ]
[ { "docid": "e3dfac1458829ce7e315ecb8bcf34f06", "score": "0.69982713", "text": "def job_directory(uuid):\n d = os.path.join(app.config['JOB_FOLDER'], str(uuid))\n return d", "title": "" }, { "docid": "ab57418d59989585fe271ae6ceedfa0c", "score": "0.6934569", "text": "def work_dir(self):\n return settings.JOBS_DIR / self.job_number", "title": "" }, { "docid": "41534685cea132c69b1773ec15b4fea2", "score": "0.6614931", "text": "def _get_job_dir(output_path: str, job_name: str) -> str:\n\n return os.path.join(output_path, job_name)", "title": "" }, { "docid": "13f150880f31663b98be2d77c765be6b", "score": "0.64321613", "text": "def new_run_log_dir():\n log_dir = \"./runs\"\n run_id = len([name for name in os.listdir(log_dir)])\n run_log_dir = os.path.join(log_dir, str(run_id))\n return run_log_dir", "title": "" }, { "docid": "8407f8203a79d924f2b33406d0d63b44", "score": "0.6406988", "text": "def fixie_jobs_dir():\n fjd = os.path.join(ENV.get('FIXIE_DATA_DIR'), 'jobs')\n os.makedirs(fjd, exist_ok=True)\n return fjd", "title": "" }, { "docid": "3929cb224381393526048818cd5599ff", "score": "0.6283515", "text": "def _task_directory(self, job_id, task_id, task_attempt):\n dir_name = 'task' if task_id is None else str(task_id)\n if task_attempt:\n dir_name = '%s.%s' % (dir_name, task_attempt)\n return self._provider_root() + '/' + job_id + '/' + dir_name", "title": "" }, { "docid": "f3fb667e5a6e76f8eed038d6669666ed", "score": "0.6282766", "text": "def calculate_experiment_folder_for_new_run ():\n run_number = 1\n while True:\n result = 'run-%03d/' % (run_number)\n if not os.path.isdir (result) and not os.path.exists (result):\n return result\n run_number += 1", "title": "" }, { "docid": "fe782295768435f3d6889a9d35b0f84b", "score": "0.62659824", "text": "def run_dir(self):\n return Path(self.name_list['run_dir'])", "title": "" }, { "docid": "f0ef59c5c7ed379cf76cc79b2e0bc79c", "score": "0.6203404", "text": "def get_jobs():\n jobs = [os.path.join(JOBS_DIR, job)\n for job in os.listdir(JOBS_DIR)]\n return jobs", "title": "" }, { "docid": "aad6dac3863eccdd001336e65c1939b1", "score": "0.6175089", "text": "def _get_jobfile_path(self, request_id, directory):\n return os.path.join(directory, \"sheepdog_{:03}.py\".format(request_id))", "title": "" }, { "docid": "86dc3b225a367a78b218a9674ec01d4f", "score": "0.6104069", "text": "def _dqdir(self, jobdir):\n if jobdir:\n dqdir = join(jobdir, 'requests.queue')\n if not exists(dqdir):\n os.makedirs(dqdir)\n return dqdir", "title": "" }, { "docid": "eae170247885177926762431a3b0d43a", "score": "0.60546017", "text": "def make_job(dir):\r\n raise NotImplementedError", "title": "" }, { "docid": "4e09cf961c0be5950fc02ba2873fb741", "score": "0.6036355", "text": "def test_simple_job_runner_set_log_dir_multiple_times(self):\n # Create a temporary log directory\n self.log_dir = self.make_tmp_dir()\n # Create a runner and execute the echo command\n runner = SimpleJobRunner()\n # Reset the log directory\n runner.set_log_dir(self.log_dir)\n jobid1 = self.run_job(runner,'test1',self.working_dir,'echo',('this is a test',))\n # Rest the log directory again and run second job\n runner.set_log_dir(self.working_dir)\n jobid2 = self.run_job(runner,'test2',self.working_dir,'echo',('this is a test',))\n # Rest the log directory again and run 3rd job\n runner.set_log_dir(self.log_dir)\n jobid3 = self.run_job(runner,'test3',self.working_dir,'echo',('this is a test',))\n # Wait for jobs to finish\n self.wait_for_jobs(runner,jobid1,jobid2,jobid3)\n # Check outputs\n self.assertEqual(runner.name(jobid1),'test1')\n self.assertTrue(os.path.isfile(runner.logFile(jobid1)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid1)))\n self.assertEqual(runner.name(jobid2),'test2')\n self.assertTrue(os.path.isfile(runner.logFile(jobid2)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid2)))\n self.assertEqual(runner.name(jobid3),'test3')\n self.assertTrue(os.path.isfile(runner.logFile(jobid3)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid3)))\n # Check log files are in the correct directories\n self.assertEqual(os.path.dirname(runner.logFile(jobid1)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid1)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.logFile(jobid2)),self.working_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid2)),self.working_dir)\n self.assertEqual(os.path.dirname(runner.logFile(jobid3)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid3)),self.log_dir)", "title": "" }, { "docid": "b368ac8a0a92d64bf9d10b0b78c8e2bc", "score": "0.5980029", "text": "def get_desired_jobs():\n jobs = {}\n test_dir = os.path.join(\n os.path.dirname(os.path.abspath(__file__)) + TEST_CONFIG_DIR\n )\n\n for filename in os.listdir(test_dir):\n jobs[filename.split(\".\")[0]] = read_job_spec(filename)\n return jobs", "title": "" }, { "docid": "bfde9f4df9451cfff59cb8279d9af087", "score": "0.5918263", "text": "def test_ge_job_runner_set_log_dir_multiple_times(self):\n # Create a temporary log directory\n self.log_dir = self.make_tmp_dir()\n # Create a runner and execute the echo command\n runner = GEJobRunner(ge_extra_args=self.ge_extra_args)\n # Reset the log directory\n runner.set_log_dir(self.log_dir)\n jobid1 = self.run_job(runner,'test1',self.working_dir,'echo',('this is a test',))\n # Rest the log directory again and run second job\n runner.set_log_dir(self.working_dir)\n jobid2 = self.run_job(runner,'test2',self.working_dir,'echo',('this is a test',))\n # Rest the log directory again and run 3rd job\n runner.set_log_dir(self.log_dir)\n jobid3 = self.run_job(runner,'test3',self.working_dir,'echo',('this is a test',))\n self.wait_for_jobs(runner,jobid1,jobid2,jobid3)\n # Check outputs\n self.assertEqual(runner.name(jobid1),'test1')\n self.assertTrue(os.path.isfile(runner.logFile(jobid1)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid1)))\n self.assertEqual(runner.name(jobid2),'test2')\n self.assertTrue(os.path.isfile(runner.logFile(jobid2)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid2)))\n self.assertEqual(runner.name(jobid3),'test3')\n self.assertTrue(os.path.isfile(runner.logFile(jobid3)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid3)))\n # Check log files are in the correct directories\n self.assertEqual(os.path.dirname(runner.logFile(jobid1)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid1)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.logFile(jobid2)),self.working_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid2)),self.working_dir)\n self.assertEqual(os.path.dirname(runner.logFile(jobid3)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid3)),self.log_dir)", "title": "" }, { "docid": "b02f2a8770fe84deb8e2aeefd1e5c122", "score": "0.59112513", "text": "def test_runningDirectoryPath(self):\n e = executers.ExecutionOptions(label=None)\n e.setRunDirFromCaseTitle(caseTitle=\"test\")\n self.assertEqual(os.path.basename(e.runDir), \"508bc04f-0\")\n\n e = executers.ExecutionOptions(label=\"label\")\n e.setRunDirFromCaseTitle(caseTitle=\"test\")\n self.assertEqual(os.path.basename(e.runDir), \"b07da087-0\")\n\n e = executers.ExecutionOptions(label=\"label2\")\n e.setRunDirFromCaseTitle(caseTitle=\"test\")\n self.assertEqual(os.path.basename(e.runDir), \"9c1c83cb-0\")", "title": "" }, { "docid": "8ebbaaa69b3285d7e9fe362142202266", "score": "0.5894594", "text": "def job_file(uuid, *args):\n return os.path.join(job_directory(uuid), *args)", "title": "" }, { "docid": "166c4c018c2f19b56589130a9c610b77", "score": "0.5857865", "text": "def build_dir_name(self):\n return os.path.join(CLUSTERFUZZ_BUILDS_DIR,\n str(self.testcase_id) + '_build')", "title": "" }, { "docid": "4993f9c3eb34488d05846121b936c23d", "score": "0.58551764", "text": "def get_jobdir(slug, dirtype='work'):\n Slug.validate(slug)\n return os.path.join(jobdirs.get(dirtype), str(slug))", "title": "" }, { "docid": "5a13009b654f8982439aed12a669b7fc", "score": "0.5795115", "text": "def bench_dir(self):\n return f\"/apps/{self.sub_path()}\"", "title": "" }, { "docid": "6712da60c843eb909f1bfe657c76c075", "score": "0.5775189", "text": "def setup_job_dir(self, data_files):\n\n # Download the job execution input files\n self.retrieve_input_data_files(data_files)", "title": "" }, { "docid": "0f41a66c7afa45a7dbaeb28af604e90d", "score": "0.5761182", "text": "def test_job_dir(self):\r\n self.assertEqual(self.tko_job.dir, self.pb_job.dir)", "title": "" }, { "docid": "c79fbc98368e46533f0cc69e315cacbc", "score": "0.5734653", "text": "def make_jobs(commands, job_prefix, queue, jobs_dir=\"jobs/\",\n walltime=\"06:00:00\", nodes=1, ncpus=16, mem=16, keep_output=\"oe\"):\n\n filenames=[]\n create_dir(jobs_dir)\n job_list_name = get_tmp_filename(tmp_dir=jobs_dir, prefix=job_prefix+\"_joblist_\", suffix = \".txt\")\n \n job_log_name = get_tmp_filename(tmp_dir=jobs_dir, prefix=job_prefix+\"_prallel_job_log\", suffix = \".txt\")\n \n out_fh_list = open(job_list_name,\"w\")\n \n for command in commands[0:len(commands)-1]:\n out_fh_list.write(command+\"\\n\")\n\n out_fh_list.close()\n\n job_name = get_tmp_filename(tmp_dir=jobs_dir, prefix=job_prefix+\"_\", suffix = \".txt\")\n\n out_fh = open(job_name,\"w\")\n\n #num_nodes = int(math.ceil((len(commands)-1)/8.0))\n # If you use the lab queue, then change the num_nodes and ncpus as:\n num_nodes = 1\n ncpus = len(commands) - 1\n \n out_fh.write(QSUB_TEXT % (walltime, num_nodes, ncpus, mem, queue, job_prefix, keep_output, job_list_name, len(commands)-1, job_log_name, commands[-1]))\n\n out_fh.close()\n\n filenames.append(job_name)\n\n return filenames", "title": "" }, { "docid": "98451119476ecc5b8280a9bb26dfe2de", "score": "0.5719749", "text": "def save_dir(self):\r\n return self.run_name", "title": "" }, { "docid": "2f4e6fb71a6ad44e439ae93d23c23dc4", "score": "0.56993693", "text": "def test_list_namespaced_jobs(self):\n pass", "title": "" }, { "docid": "377c0a84a436708e2fdf417b19f79bfb", "score": "0.5686704", "text": "def make_results_dirs(run_name, base_path='saves'):\n base_dir = os.path.join('saves/', run_name)\n i = 0\n while os.path.exists(base_dir + f\"_{i}\"):\n i += 1\n base_dir += f\"_{i}\"\n checkpoint_dir = os.path.join(base_dir, 'checkpoints')\n os.makedirs(checkpoint_dir)\n log_dir = os.path.join(base_dir, 'logs')\n os.makedirs(log_dir)\n return log_dir, checkpoint_dir", "title": "" }, { "docid": "89a689a2735e5ff9e388ec45555ad5ca", "score": "0.56802416", "text": "def parse_run_id(run_id):\n if not run_id:\n raise ValueError('run_id should be a non empty string')\n dir_list = run_id.split(':')\n return path.join(*dir_list)", "title": "" }, { "docid": "f120301a572710d16fcc5bc6e6b4e53c", "score": "0.56511885", "text": "def _running_dir(self) -> Path:\n return self._data_dir / \"running\"", "title": "" }, { "docid": "0b83a65cfc465377a3969f6701ae0492", "score": "0.5632627", "text": "def get_output_folder(parent_dir, run_name):\n\n if not os.path.isdir(parent_dir):\n os.mkdir(parent_dir)\n\n experiment_id = 0\n for folder_name in os.listdir(parent_dir):\n if (not os.path.isdir(joinpath(parent_dir, folder_name))) or (run_name not in folder_name):\n continue\n try:\n folder_name = int(folder_name.split('-run')[-1])\n if folder_name > experiment_id:\n experiment_id = folder_name\n except:\n pass\n experiment_id += 1\n\n parent_dir = joinpath(parent_dir, run_name)\n parent_dir = parent_dir + '-run{}'.format(experiment_id)\n return parent_dir", "title": "" }, { "docid": "4c1ab59cabeb4fb29718d53cfd3fc079", "score": "0.5631187", "text": "def test_simple_job_runner_set_log_dir(self):\n # Create a temporary log directory\n self.log_dir = self.make_tmp_dir()\n # Create a runner and execute the echo command\n runner = SimpleJobRunner()\n # Reset the log directory\n runner.set_log_dir(self.log_dir)\n jobid = self.run_job(runner,'test',self.working_dir,'echo',('this is a test',))\n self.wait_for_jobs(runner,jobid)\n # Check outputs\n self.assertEqual(runner.name(jobid),'test')\n self.assertTrue(os.path.isfile(runner.logFile(jobid)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid)))\n # Check log files are the log directory, not the working directory\n self.assertEqual(os.path.dirname(runner.logFile(jobid)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid)),self.log_dir)", "title": "" }, { "docid": "e33f2e18b5bcd37a5c279cf06ef03e65", "score": "0.56172526", "text": "def get_log_directory(job):\n log_path = get_log_path()\n if log_path is None:\n return None\n return log_path / job.identifier", "title": "" }, { "docid": "dd86e6ce669d78a6e198352151fe8388", "score": "0.5585985", "text": "def execution_path(self):\r\n return os.path.join(self.job.tag(), self.execution_subdir)", "title": "" }, { "docid": "6a27dadbe7b8a26575e9a05c50543089", "score": "0.55743057", "text": "def get_run_dir(sample_dir, k, run=\"\"):\n if not run:\n run = \"\"\n run = str(run)\n k_dir = get_k_dir(sample_dir, k)\n if run.strip() == \"\":\n run_dirs = [d for d in os.listdir(k_dir) if d.startswith(\"run_\")]\n if len(run_dirs) == 1:\n run_dir = os.path.join(k_dir, run_dirs[0])\n else:\n raise ValueError(\"Run must be specified if there are >1 runs.\")\n elif run == \"best\":\n run_dir = get_best_run_dir(k_dir)\n else:\n run_dir = os.path.join(k_dir, f\"run_{run}\")\n if not os.path.isdir(run_dir):\n run_dir += \"-best\"\n if not os.path.isdir(run_dir):\n raise ValueError(f\"Directory {run_dir} does not exist.\")\n return run_dir", "title": "" }, { "docid": "9d0ccaddf7b522495ec74f9adb69fede", "score": "0.55513704", "text": "def runname(self):\n if not hasattr(self,'_runname'):\n self._runname='%s_%s' % \\\n (datetime.now().strftime('%Y%m%d%H%M%S'),os.path.basename(tempfile.mkstemp()[1]))\n return self._runname", "title": "" }, { "docid": "a23b8fa67df430218a03525c8a15b9e1", "score": "0.5534198", "text": "def find_available_run_name(me_dir):\n\n name = 'run_%02d'\n data = [int(s[4:j]) for s in os.listdir(pjoin(me_dir,'Events')) for \n j in range(4,len(s)+1) if \\\n s.startswith('run_') and s[4:j].isdigit()]\n return name % (max(data+[0])+1)", "title": "" }, { "docid": "87c0794b7c8c3e0be4ef35c9e48b23f3", "score": "0.5531217", "text": "def fixie_jobid_file():\n fjf = os.path.join(ENV.get('FIXIE_JOBS_DIR'), 'id')\n fjf = expand_file_and_mkdirs(fjf)\n return fjf", "title": "" }, { "docid": "e099ceccd38b34967602a7e9f70c6eb3", "score": "0.551551", "text": "def test_ge_job_runner_set_log_dir(self):\n # Create a temporary log directory\n self.log_dir = self.make_tmp_dir()\n # Create a runner and execute the echo command\n runner = GEJobRunner(ge_extra_args=self.ge_extra_args)\n # Reset the log directory\n runner.set_log_dir(self.log_dir)\n jobid = self.run_job(runner,'test',self.working_dir,'echo','this is a test')\n self.wait_for_jobs(runner,jobid)\n # Check outputs\n self.assertEqual(runner.name(jobid),'test')\n self.assertTrue(os.path.isfile(runner.logFile(jobid)))\n self.assertTrue(os.path.isfile(runner.errFile(jobid)))\n # Check log files are the log directory, not the working directory\n self.assertEqual(os.path.dirname(runner.logFile(jobid)),self.log_dir)\n self.assertEqual(os.path.dirname(runner.errFile(jobid)),self.log_dir)", "title": "" }, { "docid": "f55783a4426a26a3222a9c2bf3ad6e3c", "score": "0.5510345", "text": "def dir_submission_results(submission_root):\n return os.path.join(submission_root, 'results')", "title": "" }, { "docid": "aca94204496bd01263f293dfdb7ffb15", "score": "0.5495262", "text": "def postprocess(self):\n fs = os.listdir(\".\")\n if os.path.exists(self.output_file) and self.suffix != \"\":\n os.mkdir(f\"run{self.suffix}\")\n for f in fs:\n if \"json\" in f:\n continue\n if not os.path.isdir(f):\n if self.final:\n shutil.move(f, f\"run{self.suffix}/{f}\")\n else:\n shutil.copy(f, f\"run{self.suffix}/{f}\")\n\n # Remove continuation so if a subsequent job is run in\n # the same directory, will not restart this job.\n if os.path.exists(\"continue.json\"):\n os.remove(\"continue.json\")", "title": "" }, { "docid": "72ce9fb969d2e9205ab98b2c8535b2c4", "score": "0.5492262", "text": "def make_jobdir(self, dirtype):\n if dirtype not in ('error', 'results', 'work'):\n raise ValueError('no such dirtype')\n os.mkdir(get_jobdir(self.slug, dirtype), 0750)", "title": "" }, { "docid": "376c1ba16f0bb29adea47977f5234e8d", "score": "0.54666024", "text": "def get_output_folder(parent_dir, env_name):\n if not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n experiment_id = 0\n for folder_name in os.listdir(parent_dir):\n if not os.path.isdir(os.path.join(parent_dir, folder_name)):\n continue\n try:\n folder_name = int(folder_name.split('-run')[-1])\n if folder_name > experiment_id:\n experiment_id = folder_name\n except:\n pass\n experiment_id += 1\n\n parent_dir = os.path.join(parent_dir, env_name)\n parent_dir = parent_dir + '-run{}'.format(experiment_id)\n return parent_dir", "title": "" }, { "docid": "42d17d029cd3d551d0cb89b9e5886a47", "score": "0.5459057", "text": "def build_job_scripts(directory, jobs):\n for job in jobs:\n scriptPath = os.path.join(directory, \"jobs\", job.name)\n scriptFile = file(scriptPath, \"w\")\n scriptFile.write(\"#!/bin/bash\\n\")\n scriptFile.write(\"#$ -S /bin/bash\\n\")\n scriptFile.write(job.script + \"\\n\")\n scriptFile.close()\n os.chmod(scriptPath, 0755)\n job.scriptPath = scriptPath", "title": "" }, { "docid": "9f2645b1a4add1bacea1cea3a408d13c", "score": "0.54477876", "text": "def get_jobs(self):\n run_ids = self.run_ids\n input_json = construct_copy_outputs_input(run_ids)\n\n mapping_file_content, pairing_file_content, data_clinical_content = generate_sample_pairing_and_mapping_files(\n run_ids\n )\n\n input_json[\"meta\"] = [\n {\"class\": \"File\", \"basename\": \"sample_mapping.txt\", \"contents\": mapping_file_content},\n {\"class\": \"File\", \"basename\": \"sample_pairing.txt\", \"contents\": pairing_file_content},\n {\"class\": \"File\", \"basename\": \"sample_data_clinical.txt\", \"contents\": data_clinical_content},\n ]\n\n number_of_runs = len(run_ids)\n name = \"ARGOS COPY OUTPUTS %s runs [%s,..] \" % (number_of_runs, run_ids[0])\n\n app = self.get_pipeline_id()\n pipeline = Pipeline.objects.get(id=app)\n pipeline_version = pipeline.version\n project_prefix = input_json[\"project_prefix\"]\n\n tags = {\"run_ids\": run_ids}\n\n copy_outputs_job_data = {\"app\": app, \"inputs\": input_json, \"name\": name, \"tags\": tags}\n\n \"\"\"\n If project_prefix and job_group_id, write output to a directory\n that uses both\n \"\"\"\n output_directory = None\n if project_prefix:\n tags[\"project_prefix\"] = project_prefix\n if self.job_group_id:\n jg = JobGroup.objects.get(id=self.job_group_id)\n jg_created_date = jg.created_date.strftime(\"%Y%m%d_%H_%M_%f\")\n output_directory = os.path.join(\n pipeline.output_directory, \"argos\", project_prefix, pipeline_version, jg_created_date\n )\n copy_outputs_job_data[\"output_directory\"] = output_directory\n copy_outputs_job = [RunCreator(**copy_outputs_job_data)]\n\n return copy_outputs_job", "title": "" }, { "docid": "97d919db0fb4acdf7a0b466c3c38f478", "score": "0.5406646", "text": "def setRunDir(self):\n\t\t### get the path to input stack\n\t\tstackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False)\n\t\t### good idea to set absolute path,\n\t\tstackpath = os.path.abspath(stackdata['path']['path'])\n\t\t### same thing in one step\n\t\trundir = os.path.join(stackpath, \"../../align\", self.params['runname'])\n\t\t### good idea to set absolute path,\n\t\t### cleans up 'path/stack/stack1/../../example/ex1' -> 'path/example/ex1'\n\t\tself.params['rundir'] = os.path.abspath(rundir)", "title": "" }, { "docid": "5b1cb79a534aa4d4a6cecb1767db1707", "score": "0.5397449", "text": "def jobfolder(self):\n from . import load\n from .. import is_interactive\n if self._jobfolder is None:\n if self._rootpath is None: \n if is_interactive:\n from .. import interactive\n if interactive.jobfolder is None:\n print \"No current job-dictionary.\"\n return\n return interactive.jobfolder.root\n else: raise RuntimeError('No job-folder.')\n else: self._jobfolder = load(self.rootpath, timeout=30)\n return self._jobfolder.root", "title": "" }, { "docid": "0c50b872776cef23a5951fb908ec73b6", "score": "0.5394799", "text": "def main():\n\n cgroup_base_path = '/sys/fs/cgroup/cpuset/slurm_%s' % os.uname()[1]\n\n excluded_uids = []\n if len(sys.argv) >= 2:\n excluded_uids = [int(uid_s) for uid_s in sys.argv[1].split(',')]\n\n user_jobs = {}\n\n if os.path.isdir(cgroup_base_path):\n slurm_cgroup_items = os.listdir(cgroup_base_path)\n else:\n slurm_cgroup_items = []\n\n for slurm_cgroup in slurm_cgroup_items:\n if not slurm_cgroup.startswith('uid_'):\n continue\n\n uid = slurm_cgroup.split('_')[1]\n if uid <= 1000 or uid in excluded_uids:\n continue\n\n user_cgroup_path = \"%s/%s\" % (cgroup_base_path, slurm_cgroup)\n user_cgroup_items = os.listdir(user_cgroup_path)\n for user_cgroup in user_cgroup_items:\n if not user_cgroup.startswith('job_'):\n continue\n\n jobid = user_cgroup.split('_')[1]\n job_cgroup_path = \"%s/%s\" % (user_cgroup_path, user_cgroup)\n job_cgroup_items = os.listdir(job_cgroup_path)\n step_list = []\n for job_cgroup in job_cgroup_items:\n if not job_cgroup.startswith('step_'):\n continue\n step_list.append(job_cgroup)\n if uid not in user_jobs:\n user_jobs[uid] = []\n user_jobs[uid].append((jobid, ','.join(step_list)))\n\n print json.dumps(user_jobs)", "title": "" }, { "docid": "8075be17cdbed132917ef0ac55d87d07", "score": "0.539019", "text": "def generate_log_file_name():\n return \"\".join( [ \n JOB_NAME,\n '_',\n datetime.now().strftime('%Y%m%d%H%M%S'),\n '.log'\n ] )", "title": "" }, { "docid": "e31616cea5ca571de5dc86f6d2a8acdf", "score": "0.5371177", "text": "def main():\n # adding command line argument\n parser = get_parser()\n args = vars(parser.parse_args())\n\n # Set the variable work_dir with the first argument passed\n work_dir = args['work_dir'][0]\n\n batch_rename(work_dir)", "title": "" }, { "docid": "2215300e90f7c607abc72d6793240245", "score": "0.5362197", "text": "def build_job_scripts(root_dir: Path, jobs: List) -> None:\n # Loop over the job list, creating each job script in turn, and then adding\n # scriptPath to the Job object\n for job in jobs:\n scriptpath = root_dir / \"jobs\" / job.name\n with open(scriptpath, \"w\") as scriptfile:\n scriptfile.write(f\"#!/bin/sh\\n#$ -S /bin/bash\\n{job.script}\\n\")\n job.scriptpath = scriptpath", "title": "" }, { "docid": "4505b038d7aef146b446292e8fad4619", "score": "0.53507584", "text": "def lab_dir():\n return config.junos_dir", "title": "" }, { "docid": "ce9bc6ced08293e8d7241d5ad0606146", "score": "0.5349961", "text": "def create_run_directory(prefix, default_directory_number=None):\n dir_number = default_directory_number\n if (dir_number is None):\n dir_ids = []\n for file_name in os.listdir(os.getcwd()):\n if (os.path.isdir(file_name)) and (file_name.startswith(prefix)):\n dir_id = file_name.split(\"_\")[-1]\n if (dir_id.isdigit()):\n dir_ids.append(int(dir_id))\n if (len(dir_ids) > 0):\n dir_number = max(max(dir_ids) + 1, 1)\n else :\n dir_number = 1\n dir_name = prefix + \"_\" + str(dir_number)\n if (os.path.isdir(dir_name)):\n raise OSError(\"The directory %s already exists.\"%os.path.abspath(dir_name))\n else :\n os.makedirs(dir_name)\n return os.path.abspath(dir_name)", "title": "" }, { "docid": "d4ecc8d19829718caf2ddce89c1b0336", "score": "0.5342676", "text": "def setRunDir(self):\n\t\tsubtomorunq = appiondata.ApSubTomogramRunData()\n\t\tsubtomorundata = subtomorunq.direct_query(self.params['subtomoId'])\n\t\tsubtomoq = appiondata.ApTomogramData(subtomorun=subtomorundata)\n\t\tresults = subtomoq.query(results=1)\n\t\tif results:\n\t\t\tsubtomodir = results[0]['path']['path']\n\t\t\ttiltdirs = subtomodir.split('/tiltseries')\n\t\t\tself.params['rundir'] = os.path.join(tiltdirs[0],\"average\",self.params['runname'])", "title": "" }, { "docid": "cbaffed537477ca9bd05438718b227f5", "score": "0.53357834", "text": "def _job_name(self, appname) -> str:\n return \"juju_{}_{}_{}_prometheus_scrape\".format(\n self.model.name, self.model.uuid[:7], appname\n )", "title": "" }, { "docid": "4174073aaf32def8a0ecfc0c2ae5a452", "score": "0.5314784", "text": "def build_submission(directory, jobs, nodes=1): \n build_directories(directory)\n build_job_scripts(directory, jobs)\n submit_jobs(directory, jobs, nodes)", "title": "" }, { "docid": "81dff956894389f9df781b7f7c35bd02", "score": "0.53141534", "text": "def sbatchInDir(dirname):\n tmp = os.getcwd()\n os.chdir(dirname)\n # this breaks backwards compatibility, python3.7 and higher only!\n finished = subprocess.run([\"sbatch\", \"job.sfincsScan\"], capture_output=True, text=True)\n stdout = finished.stdout\n search = \"Submitted batch job \"\n i1 = stdout.find(search) + len(search)\n #print(\"stdout\")\n #print(stdout)\n #print(stdout[i1:])\n jobid = stdout[i1:].split(None,1)[0]\n #jobid=\"1\"\n os.chdir(tmp) \n return Job(dirname,jobid)", "title": "" }, { "docid": "3e08bda27382e5bb548cf528a21dd31d", "score": "0.53059775", "text": "def _kaggle_dir_name(competition_or_dataset: str) -> str:\n return competition_or_dataset.replace('/', '_')", "title": "" }, { "docid": "63a88bcf9d34955e7e5693feffcb3ee9", "score": "0.5294526", "text": "def get_all_experiment_runs():\n dirs = [item for item in os.listdir(RESULTS_DIRECTORY)]\n timed_dirs = []\n for directory in dirs:\n try:\n dt = strptime(directory, \"%Y-%m-%d_%H-%M-%S\")\n timed_dirs.append((directory, dt))\n except ValueError:\n pass\n timed_dirs.sort(key=lambda pair: pair[1], reverse=True)\n dirs = [pair[0] for pair in timed_dirs]\n return dirs", "title": "" }, { "docid": "e22ebc7e35060593508789270df113b6", "score": "0.52898467", "text": "def get_current_job_file_logs(self):\n ...", "title": "" }, { "docid": "dd61c3faf7c48b0383b9b5d5d05102db", "score": "0.5288489", "text": "def generate_job_name(cls):\n # first check if it is a Stalker Project\n from anima.dcc import mayaEnv\n\n m = mayaEnv.Maya()\n v = m.get_current_version()\n if v is not None:\n from stalker import Version\n\n assert isinstance(v, Version)\n return \"%s:%s_v%03i%s\" % (\n v.task.project.code,\n v.nice_name,\n v.version_number,\n v.extension,\n )\n\n return os.path.basename(pm.sceneName())", "title": "" }, { "docid": "9d53d1730ec33cd6d366a1f9b1b05080", "score": "0.52880806", "text": "def genRunDir(testObj,runIdx):\n runDir = testObj.runDir # TODO: expand to full path -- os.path.abspath()?\n i = 0\n while True:\n newDir = \"%s/RUN.%s.%s.%d\" %(runDir,testObj.bin,testObj.testName,i)\n if not os.path.isdir(newDir):\n break\n i += 1\n print \" + Create dir: %s\" %newDir\n os.makedirs(newDir)\n oneTest.actualRunDir = newDir\n\n walkDir = self.e_cwd + '/' + runDir\n walkOff = len(walkDir)\n print \" + walkDir: %s\" %walkDir\n for dirName, subdirList, fileList in os.walk(walkDir):\n # modify subdirList to skip unwanted subdirectories\n # Note: os.walk flattens the structure\n #\n if len(dirName) > walkOff:\n continue\n else:\n l_dirName = ''\n\n if 'CVS' in subdirList:\n subdirList.remove('CVS') # TODO: add other names to be skipped programatically\n l_RUNDir = []\n for aDir in subdirList:\n if aDir.find('RUN.') == 0:\n l_RUNDir.append(aDir)\n for aDir in l_RUNDir:\n subdirList.remove(aDir)\n\n # Create symbolic links\n for fname in fileList:\n os.symlink( '../' + fname, newDir + '/' + fname) #os.symlink( walkDir + '/' + fname, newDir + '/' + fname)\n for aDir in subdirList:\n os.symlink( '../' + aDir, newDir + '/' + aDir) #os.symlink( walkDir + '/' + aDir, newDir + '/' + aDir)\n\n return newDir", "title": "" }, { "docid": "9f0911d11e5e6af74f4e0d90ec699dff", "score": "0.52875775", "text": "def find_toplevel_job_dir(start_dir):\r\n job_dir = start_dir\r\n while not os.path.exists(os.path.join(job_dir, \".autoserv_execute\")):\r\n if job_dir == \"/\":\r\n return None\r\n job_dir = os.path.dirname(job_dir)\r\n return job_dir", "title": "" }, { "docid": "e95e1f23a5e0c154d6ae06bb69c027ee", "score": "0.528432", "text": "def _jobname(self):\n if 1 == self.max_cmds:\n assert 1 == len(self._pool), \\\n \"If there's a single-command limit on job submission, jobname \" \\\n \"must be determined with exactly one sample in the pool, but \" \\\n \"there is/are {}.\".format(len(self._pool))\n sample, _ = self._pool[0]\n name = sample.name\n else:\n # Note the order in which the increment of submission count and\n # the call to this function can influence naming. Make the jobname\n # generation call (this method) before incrementing the\n # submission counter, but add 1 to the index so that we get a\n # name concordant with 1-based, not 0-based indexing.\n name = \"lump{}\".format(self._num_total_job_submissions + 1)\n return \"{}_{}\".format(self.pl_key, name)", "title": "" }, { "docid": "17942c34c233f3e96f9902c998ee5b1e", "score": "0.5269418", "text": "def _jobs(self):\n return []", "title": "" }, { "docid": "ed0d767a0ba5843deedfd317962ed9cf", "score": "0.5258065", "text": "def submit_jobs(self):\n short_names = self.ffastq.keys()\n sampinfo = self.sampleinfo\n for sample in short_names:\n sconffile = sampinfo[sample]['sample_config']\n job_shell_file = os.path.join(self.param['outdir'], 'job_sample_logs/'+sample+'.job')\n\n if 'dscigend' in socket.gethostname():\n PE='OpenMP'\n else:\n PE='node'\n\n with open(job_shell_file, 'w') as jsh:\n jsh.write('#!/bin/sh\\n')\n jsh.write('#$ -S /bin/bash\\n')\n jsh.write('#$ -j y\\n')\n jsh.write('#$ -cwd\\n')\n #jsh.write('#$ -V\\n')\n jsh.write('#$ -m a\\n')\n jsh.write('#$ -M dipen.sangurdekar@biogen.com\\n')\n #jsh.write('#$ -l s_rt=48:00:00\\n')\n jsh.write('#$ -N {0};SampleID={1}\\n'.format(self.conf['Series_Info']['job_name'], sample))\n jsh.write('#$ -pe {1} {0}\\n'.format(self.conf['Global_Parameters']['NUMTHREADS'], PE))\n jsh.write('#$ -R y\\n')\n jsh.write('#$ -o {0}\\n'.format(sampinfo[sample]['sample_sge_err']))\n jsh.write('source /etc/profile.d/set_modules.sh\\n')\n jsh.write('module use -a {0}\\n'.format(self.conf['Global_Parameters']['MODULEPATH']))\n jsh.write('module load synapse\\n')\n jsh.write('echo \"** Job Execution on Host: `hostname`\"\\n')\n cmd = '{0} --pipeline {1} --config {2} --globalconfig NULL\\n' \\\n .format(SFPATH, self.conf['Pipeline_Info']['workflow'], sconffile)\n jsh.write(cmd)\n\n Job(jobfile=job_shell_file, config=sconffile, pidlogfile=self.param['pidlog'], testmode=self.args.test)\n pass", "title": "" }, { "docid": "f3a28ab9466604d0fd8e0db70623060a", "score": "0.5251271", "text": "def create_log_dir(job_id):\n base_dir = os.path.join(settings.LOG_DIR, '{}'.format(datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")))\n if _setup_dir(base_dir):\n sub_dir = os.path.join(base_dir, job_id)\n if _setup_dir(sub_dir):\n return sub_dir\n else:\n return None\n else:\n return None", "title": "" }, { "docid": "8952b3b3b5e730ad3872c7508491507a", "score": "0.5246735", "text": "def dir_queue(self):\n return self.abspath('queue')", "title": "" }, { "docid": "f53132153d9f42805d267108f00f18db", "score": "0.52424836", "text": "def _init_paths(self):\n # Where the script is has the config defaults.\n if __name__ != '__main__':\n self.script_dir = os.path.dirname(__file__)\n else:\n self.script_dir = os.path.abspath(sys.path[0])\n # Where we run the job.\n self.job_dir = os.getcwd()", "title": "" }, { "docid": "fc5fd2bb50e7cc40659774be93986672", "score": "0.5235665", "text": "def run(ctx):\n multiple_runs([{}], ctx.obj['runs'], ctx.obj['cpus'], ctx.obj['output_dir'])", "title": "" }, { "docid": "c123b4bab76cd30273d9678dd75c85ee", "score": "0.5234785", "text": "def test_40_jobs(self):\r\n pass", "title": "" }, { "docid": "c123b4bab76cd30273d9678dd75c85ee", "score": "0.5234785", "text": "def test_40_jobs(self):\r\n pass", "title": "" }, { "docid": "4cad3570bb5ba81cc27692204bdc0453", "score": "0.52275336", "text": "def submit_jobs(self, jobnames, folder=None):\n\n import time\n from pyemto.utilities.utils import run_emto\n\n sleeptime = 10\n\n if folder is None:\n folder = self.folder\n\n job_ids = []\n for i in range(len(jobnames)):\n job_ids.append(run_emto(jobnames[i], folder=self.folder))\n\n # Flatten job_ids list and convert the integers into strings\n job_ids = [item for sublist in job_ids for item in sublist]\n for i in range(len(job_ids)):\n job_ids[i] = str(job_ids[i])\n\n # Give SLURM some time to register the jobs.\n # If continued straight away the self.wait_for_jobs\n # script will likely think all the jobs have finished\n # since it cannot find them yet.\n time.sleep(sleeptime)\n\n return job_ids", "title": "" }, { "docid": "8d217b1202fe613d66db0dc1d4615814", "score": "0.5225632", "text": "def evaluation_log_directory(self) -> str:\n return os.path.join(self.evaluation_directory, \"log\")", "title": "" }, { "docid": "ef70dd3377dcba00dc5f362e526fdc8f", "score": "0.52254915", "text": "def project_folders(self):\n return [\"output_dir\", \"results_subdir\", \"submission_subdir\"]", "title": "" }, { "docid": "fdbe155a89c88c932aa7f4c85df86422", "score": "0.5213555", "text": "def runner_name(runner: RunnerModule) -> str:\n return module_basename(runner).replace(\"_\", \"-\")", "title": "" }, { "docid": "0e538a54605cee095e99478ef30b9776", "score": "0.52067655", "text": "def format_running_jobs(self, jobs):\n\n if len(jobs) == 0:\n return \"There is no running jobs!\"\n\n return '\\n'.join(['%s - %s (%s) - %s' % (str(job['number']),\n job['name'],\n job['url'],\n job['executor']) for job in jobs]).strip()", "title": "" }, { "docid": "bede4f5b4dc6d91b053be456e31d7f6a", "score": "0.5200722", "text": "def abqInputNames(job):\n jobname = os.path.basename(job)\n if jobname.endswith('.inp'):\n jobname = jobname[:-4]\n filename = os.path.abspath(job)\n if not filename.endswith('.inp'):\n filename += '.inp'\n return jobname,filename", "title": "" }, { "docid": "10ed22c946835a828c6b7df4dd27c91c", "score": "0.5196548", "text": "def directory_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"directory_name\")", "title": "" }, { "docid": "bf14d741d557f9df4db8abeed0a2a706", "score": "0.51842064", "text": "def run_neb_subfolders(self):\n myname=self.keywords['name']\n subdirs = dirutil.walkdirs(myname,1,1)\n imct = 0\n numim = int(self.keywords['program_keys']['mast_neb_settings']['images'])\n for subdir in subdirs:\n if imct == 0 or imct > numim:\n pass\n else:\n newname = os.path.join(myname, subdir)\n self.keywords['name']=newname\n BaseIngredient.run(self,\"serial\")\n imct = imct + 1\n self.keywords['name']=myname\n return", "title": "" }, { "docid": "a899ea8bf8e01960dfc0e60caeaf633e", "score": "0.51755965", "text": "def get_summary_writer_log_dir(args: argparse.Namespace) -> str:\n tb_log_dir_prefix = (\n f\"CNN_bn_\"\n f\"mode={args.mode}_\"\n f\"decay={args.weight_decay}_\"\n f\"run_\"\n )\n i = 0\n while i < 1000:\n tb_log_dir = args.log_dir / (tb_log_dir_prefix + str(i))\n if not tb_log_dir.exists():\n return str(tb_log_dir)\n i += 1\n return str(tb_log_dir)", "title": "" }, { "docid": "5277b430bd70fcc1ed608128d5f22591", "score": "0.51718915", "text": "def __scores_directory__(self, protocol):\n return os.path.join(self.m_configuration.user_directory, self.m_args.score_sub_directory, protocol, self.m_args.score_directory)", "title": "" }, { "docid": "a263a33bb98f93ca3b139ceb384bd5ea", "score": "0.5164971", "text": "def work_dir(self):\n return self.parser['DEFAULT']['work_dir']", "title": "" }, { "docid": "975e8f3341cd423f7fc8fc1f7624dfce", "score": "0.51593935", "text": "def genJob(self,tempprefix,finalprefix,cifname):\n #DEFINE RELEVANT DIRECTORIES\n finaldir = self.main_dir+cifname+'/'+finalprefix+'/'\n tempdir = self.temp_dir+tempprefix+'/'\n if not os.path.exists(finaldir):\n os.makedirs(finaldir)\n \n #GENERATE INPUTS IN THE FINAL DIRECTORY\n Inputs = inputs(cifname)\n Inputs.INCAR(tempdir,finaldir,'INCAR')\n Inputs.POSCAR_POTCAR(self.cif_dir,finaldir)\n Inputs.KPOINTS(self.main_dir,finaldir)\n Inputs.QUEUE(tempdir,finaldir,finalprefix,'marcc.job')\n \n #MOVE BACK TO MAIN DIRECTORY\n os.chdir(self.main_dir)\n return", "title": "" }, { "docid": "b000fa062f12ce8098939f8b6515ac28", "score": "0.5158647", "text": "def logdir(self) -> str:\n pass", "title": "" }, { "docid": "b5f3ef6830085a1c1399ef849c245cc7", "score": "0.5146445", "text": "def main():\n\n settings = get_project_settings()\n configure_logging(settings)\n\n args, remainder = _parse_args()\n LOGGER.info(args)\n LOGGER.info(remainder)\n\n base_dir = Path(settings[\"BASE_DIR\"]).resolve()\n cache_dir = base_dir / \".scrapy\" / \"httpcache\"\n feeds_dir = Path(args.feeds_dir) if args.feeds_dir else base_dir / \"feeds\"\n feeds_dir = feeds_dir.resolve()\n feeds_dir_scraper = (\n feeds_dir / args.feeds_subdir if args.feeds_subdir else feeds_dir / args.spider\n )\n file_tag = normalize_space(args.file_tag)\n out_file = feeds_dir_scraper / \"%(class)s\" / f\"%(time)s{file_tag}.jl\"\n\n LOGGER.info(\"Output file will be <%s>\", out_file)\n\n from_settings = job_dir_from_settings(settings)\n job_dir = (\n Path(args.job_dir)\n if args.job_dir\n else Path(from_settings)\n if from_settings\n else base_dir / \"jobs\" / args.spider\n )\n job_dir = job_dir.resolve()\n\n cache_dir.mkdir(parents=True, exist_ok=True)\n feeds_dir_scraper.mkdir(parents=True, exist_ok=True)\n job_dir.mkdir(parents=True, exist_ok=True)\n\n dont_run_before_file = job_dir / \".dont_run_before\"\n dont_run_before = parse_date(\n args.dont_run_before, tzinfo=timezone.utc\n ) or date_from_file(dont_run_before_file, tzinfo=timezone.utc)\n\n if dont_run_before:\n LOGGER.info(\"Don't run before %s\", dont_run_before.isoformat())\n sleep_seconds = dont_run_before.timestamp() - now().timestamp()\n if sleep_seconds > 0:\n LOGGER.info(\"Going to sleep for %.1f seconds\", sleep_seconds)\n sleep(sleep_seconds)\n\n states = _find_states(\n job_dir, state_file=settings.get(\"STATE_TAG_FILE\") or \".state\"\n )\n\n running = sorted(sub_dir for sub_dir, state in states.items() if state == \"running\")\n\n if len(running) > 1:\n LOGGER.warning(\n \"Found %d running jobs %s, please check and fix!\", len(running), running\n )\n return\n\n if running:\n LOGGER.info(\"Found a running job <%s>, skipping...\", running[0])\n return\n\n resumable = sorted(\n sub_dir for sub_dir, state in states.items() if state in RESUMABLE_STATES\n )\n\n if len(resumable) > 1:\n LOGGER.warning(\n \"Found %d resumable jobs %s, please check and fix!\",\n len(resumable),\n resumable,\n )\n return\n\n if resumable:\n LOGGER.info(\"Resuming previous job <%s>\", resumable[0])\n\n job_tag = resumable[0] if resumable else now().strftime(DATE_FORMAT)\n curr_job = job_dir / job_tag\n\n command = [\n \"scrapy\",\n \"crawl\",\n args.spider,\n \"--output\",\n str(out_file),\n \"--set\",\n f\"JOBDIR={curr_job}\",\n \"--set\",\n f\"DONT_RUN_BEFORE_FILE={dont_run_before_file}\",\n ] + remainder\n\n LOGGER.info(\"Executing command %r\", command)\n\n try:\n execute(argv=command)\n finally:\n garbage_collect()", "title": "" }, { "docid": "38507e985e4851847142eda876c9f651", "score": "0.51451474", "text": "def plural_jobs_per_submission(self,dryrun=False,limit_seconds=-1,\n template_script=None):\n self.make_mpi_command_dict()\n\n tmpdir = _clmgr_dir+'/tmpdir'\n os.system('mkdir -p '+tmpdir)\n\n cwd = os.getcwd()\n pid = os.getpid()\n njobs = 0\n jobs = []\n #...Num procs per node\n npn = self.machine.nprocs_per_node\n #...Num nodes per submission\n limit_nodes = self.machine.qattr['num_nodes']\n limit_sec = self.machine.qattr['limit_sec']\n if limit_seconds > 0:\n limit_sec = min(limit_sec,limit_seconds)\n logger.info('Limit second is modified from machine default value'\n +' to {0:d}.'.format(limit_sec))\n max_ctime = 0.0\n #...Initialize job_info\n job_info = {}\n #...Initial submission-ID\n isub = 1 \n job_info['WORKDIR'] = tmpdir\n job_info['QUEUE'] = queue\n #...Use full procs per node\n job_info['NPROCS_NODE'] = npn\n dirs = []\n sum_nodes = 0\n commands = \"\"\n for i,d in enumerate(self.dirs_to_work):\n os.chdir(d)\n calc = self.Calculator(d)\n nnodes,npn1,npara = calc.estimate_nprocs(max_npn=npn)\n if sum_nodes+nnodes > limit_nodes:\n #...Register job_info and dirs to jobs\n isub = len(jobs)+1\n job_info['NNODES'] = sum_nodes\n job_info['NPROCS'] = npn *sum_nodes\n job_info['JOB_NAME'] = 'clmgr{0:d}_{1:d}'.format(pid,isub)\n job_info['COMMANDS'] = commands\n # hours = int(max_ctime /3600 +1)\n hours,minutes,seconds = sec2hms(max_ctime)\n job_info['WALLTIME'] = '{0:d}:{1:02d}:{2:02d}'.format(hours,\n minutes,\n seconds)\n job = {}\n job['info'] = copy.copy(job_info)\n job['dirs'] = copy.copy(dirs)\n jobs.append(job)\n #...Initialize some\n sum_nodes = 0\n commands = \"\"\n dirs = []\n max_ctime = 0.0\n make_rankfile(sum_nodes,nnodes,npn1,d)\n sum_nodes += nnodes\n dirs.append(d)\n ctime = calc.estimate_calctime(nprocs=npara)\n max_ctime = min(max(max_ctime,ctime),limit_sec)\n #...max_ctime = max(max_ctime,3600)\n self.mpi_command_dict['npara'] = npara\n self.mpi_command_dict['rankfile'] = './rankfile'\n commands += \"cd {0:s}\\n\".format(d) \\\n +self.machine.get_mpi_command(**self.mpi_command_dict) \\\n +\" &\\n\"\n if dirs:\n # Register rest of works to jobs\n isub = len(jobs) +1\n job_info['JOB_NAME'] = 'clmgr{0:d}_{1:d}'.format(pid,isub)\n job_info['NNODES'] = sum_nodes\n job_info['NPROCS'] = npn *sum_nodes\n job_info['COMMANDS'] = commands\n # hours = max(int(max_ctime /3600),1)\n hours,minutes,seconds = sec2hms(max_ctime)\n # job_info['WALLTIME'] = '{0:d}:00:00'.format(hours)\n job_info['WALLTIME'] = '{0:d}:{1:02d}:{2:02d}'.format(hours,\n minutes,\n seconds)\n job = {}\n job['info'] = copy.copy(job_info)\n job['dirs'] = copy.copy(dirs)\n jobs.append(job)\n\n #...Submit jobs to the scheduler\n # print('len(jobs) = ',len(jobs))\n os.chdir(tmpdir)\n jobs2 = []\n for i, job in enumerate(jobs):\n jobnum = i+1\n job_info = job['info']\n dirs = job['dirs']\n script = self.sched.script_plural(job_info,template_script)\n batch_fname = self._batch_fname.format(batch_id=\n '{0:d}_{1:d}'.format(pid,jobnum))\n with open(batch_fname,\"w\") as f:\n f.write(script)\n\n if dryrun:\n logger.info(self.sched.get_command('submit')\n +' '+batch_fname+' at '+os.getcwd())\n jobid = 0\n else:\n try:\n jobid = self.sched.submit(batch_fname)\n njobs += 1\n except Exception as e:\n logger.warn('There is an error occurred, e = ',e)\n pass\n for d in dirs:\n jobs2.append((d,jobid))\n with open(d+'/'+self._stat_file,'w') as f:\n f.write('{0:d}\\n'.format(jobid))\n os.chdir(cwd)\n return jobs2", "title": "" }, { "docid": "bfd3aadffe83adadeced6364d940ddad", "score": "0.5144348", "text": "def create_logdir(model_name):\n time = datetime.datetime.now().strftime(\"%b%d_%H-%M-%S\")\n logdir = os.path.abspath(os.path.join(\"runs\", f\"{time}_{model_name}\"))\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n return logdir", "title": "" }, { "docid": "c83f9254ed90da1fb712c67518c7c2a4", "score": "0.5142161", "text": "def directory_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"directory_name\")", "title": "" }, { "docid": "0ebfa2f90d187ad77e1538d5cfc762e5", "score": "0.5136042", "text": "def get_results_dir_list(pid, core_dir_basename):\r\n pid_dir_dict = {}\r\n for debugdir_file in glob.glob(\"/tmp/autotest_results_dir.*\"):\r\n a_pid = os.path.splitext(debugdir_file)[1]\r\n results_dir = open(debugdir_file).read().strip()\r\n pid_dir_dict[a_pid] = os.path.join(results_dir, core_dir_basename)\r\n\r\n results_dir_list = []\r\n # If a bug occurs and we can't grab the PID for the process that died, just\r\n # return all directories available and write to all of them.\r\n if pid is not None:\r\n while pid > 1:\r\n if pid in pid_dir_dict:\r\n results_dir_list.append(pid_dir_dict[pid])\r\n pid = get_parent_pid(pid)\r\n else:\r\n results_dir_list = pid_dir_dict.values()\r\n\r\n return (results_dir_list or\r\n pid_dir_dict.values() or\r\n [os.path.join(\"/tmp\", core_dir_basename)])", "title": "" }, { "docid": "465ca2e6624fe1c8aa872ea68533ec1f", "score": "0.51317674", "text": "def init_rundir(path_to_run):\n os.mkdir(path_to_run)\n\n job_output_dir = os.path.join(path_to_run, \"batch_output\")\n os.mkdir(job_output_dir)\n\n networks_dir = os.path.join(path_to_run, \"networks\")\n os.mkdir(networks_dir)\n\n code_dir = os.path.join(path_to_run, \"code\")\n os.mkdir(code_dir)\n\n loss_dir = os.path.join(path_to_run, \"loss\")\n os.mkdir(loss_dir)\n\n checkpoints_dir = os.path.join(path_to_run, \"checkpoints\")\n os.mkdir(checkpoints_dir)\n\n data_dir = os.path.join(path_to_run, \"training_data\")\n os.mkdir(data_dir)\n\n features_dir = os.path.join(data_dir, \"features\")\n os.mkdir(features_dir)\n labels_dir = os.path.join(data_dir, \"labels\")\n os.mkdir(labels_dir)\n\n return", "title": "" }, { "docid": "136c8537bd7e02ff8880dd89de137770", "score": "0.5128066", "text": "def prepare_work_dir(config: Union[Config, ConfigDict]) -> str:\n base_work_dir = config.work_dir\n checkpoint_dirs = glob.glob(os.path.join(base_work_dir, \"checkpoints_round_*\"))\n train_round_checkpoint_dir = os.path.join(base_work_dir, f\"checkpoints_round_{len(checkpoint_dirs)}\")\n os.makedirs(train_round_checkpoint_dir)\n config.work_dir = train_round_checkpoint_dir\n if \"meta\" not in config.runner:\n config.runner.meta = ConfigDict()\n config.runner.meta.exp_name = f\"train_round_{len(checkpoint_dirs)}\"\n return train_round_checkpoint_dir", "title": "" }, { "docid": "91045887585dae1351192deba1695a68", "score": "0.5123673", "text": "def _make_default_output_name(input_dir, input_type):\n input_dir = input_dir.rstrip(os.path.sep)\n return os.path.join(os.path.dirname(input_dir), \"processed_%s.json\" % input_type)", "title": "" }, { "docid": "f0d5ae0d786203afde0d76ae3cfd284b", "score": "0.5122013", "text": "def _get_results_path(environment, job_name, folder_name = 'stored_results'):\n dirname = job_name\n results_path = _os.path.join(_tempfile.gettempdir(), dirname, folder_name)\n if isinstance(environment, _environment.EC2):\n\n # get credentials\n if not hasattr(environment, 'aws_access_key'):\n # no access key in environment, try to get from get_credentials\n try:\n access_key, secret_key = _gl.aws.get_credentials()\n except KeyError:\n # not in get_credentials and not in environment, fail validation\n raise RuntimeError('Validation Failed: Unable to retrieve AWS credentials for S3, needed for writing results from this execution. Use graphlab.aws.set_credentials() or set the credentials in the EC2 Environment object.')\n else:\n access_key = environment.aws_access_key\n secret_key = environment.aws_secret_key\n\n path = _os.path.join(environment.s3_bucket, environment.s3_log_folder_path, dirname, folder_name)\n results_path = \"s3://%s:%s:%s\" % (access_key, secret_key, path)\n elif isinstance(environment, _environment.Hadoop):\n # get the root path in hdfs land, to support actually launching in HDFS\n if environment.get_config_dir() is not None:\n root = _subprocess.check_output(\n [\"hdfs\", \"--config\", _os.path.expanduser(environment.get_config_dir()),\n \"getconf\", \"-confKey\", \"fs.defaultFS\"]).rstrip()\n else:\n root = _subprocess.check_output([\"hdfs\", \"getconf\", \"-confKey\", \"fs.defaultFS\"]).rstrip()\n\n # get the username for running the Hadoop job.\n user = _pwd.getpwuid(_os.getuid()).pw_name\n if 'HADOOP_USER_NAME' in _os.environ:\n user = _os.environ['HADOOP_USER_NAME']\n dirname = dirname + '-' + folder_name\n results_path = _os.path.join(root, 'user', user, dirname)\n\n return results_path", "title": "" }, { "docid": "2a13101f28b18a709b797c779852e4b9", "score": "0.5118983", "text": "def scanInDir(dirname):\n tmp = os.getcwd()\n os.chdir(dirname)\n # this breaks backwards compatibility, python3.7 and higher only!\n finished = subprocess.run(\"yes | sfincsScan\",shell=True, capture_output=True, text=True)\n os.chdir(tmp)\n stdout = finished.stdout\n search = \"Here are the directories that will be created:\\n[\"\n i1 = stdout.find(search)\n i2 = i1 + stdout[i1:].find(\"]\")\n subdirnames = [x.strip()[1:-1] for x in stdout[i1+len(search):i2].split(',') ] \n dirnames = [dirname + \"/\" + sdn for sdn in subdirnames if len(sdn) > 0]\n jobids = []\n search = \"Submitted batch job \"\n lenS = len(search)\n for i in range(len(dirnames)):\n i2 = i2 + stdout[i2:].find(search) + lenS\n jobids.append(stdout[i2:].split(None,1)[0])\n\n stype = scanType(dirname)\n subdirs = [os.path.join(dirname, f) for f in os.listdir(dirname) if os.path.isdir(os.path.join(dirname, f))]\n # TODO: Check if this works\n olddirs = [(d,Job.getLatestJobID(d)) for d in subdirs if d not in (dirnames + ['prelim', 'Erscan'])]\n print(\"These old unfinished simulations were automatically added to the list of simulations:\")\n print(olddirs)\n # add jobs that were previously run but ran out of time\n # or OOM\n # in the former case, nothing will actually be done\n # except that the list of unfinished simulations will be non-empty\n # so that the program waits\n for (x,y) in olddirs:\n s = Job(x,y).status\n if s == \"OOM\" or s == \"TIME\":\n dirnames.append(x)\n jobids.append(y)\n\n if stype == 1:\n scan = Scan1([Job(x,y) for (x,y) in zip(dirnames,jobids)])\n elif stype == 2:\n scan = Scan2([Job(x,y) for (x,y) in zip(dirnames,jobids)])\n else:\n scan = Scan([Job(x,y) for (x,y) in zip(dirnames,jobids)])\n scan.dirname = dirname\n return scan", "title": "" }, { "docid": "79aba3c1831f4266a8b32fab508bee2f", "score": "0.5116407", "text": "def create_input_file_dir(config):\n [job] = OqJob.objects.filter(id=config[\"jobid\"])\n job.path = os.path.join(job.oq_params.upload.path, str(job.id))\n os.mkdir(job.path)\n os.chmod(job.path, 0777)\n job.status = \"running\"\n job.save()\n return job", "title": "" }, { "docid": "4ce6de4c0d3c02b31f4f7ed9da016d48", "score": "0.51140285", "text": "def BuildListOfProcessedRunsOnDisk(self):\n rundirs_on_disk = []\n if self.REVISION == \"mc\":\n # the monitoring ROOT files are all stored in one directory\n monitoring_files = [ f for f in listdir(join(self.INPUT_DIRECTORY,self.ROOTFILE_DIR)) \n if (isfile(join(self.INPUT_DIRECTORY,self.ROOTFILE_DIR,f))and(f[-5:]=='.root')) ]\n for fname in monitoring_files:\n tokens = fname[:-5].split('_')\n if len(tokens) < 3:\n continue\n try:\n runnum = int(tokens[2])\n except ValueError:\n logging.error(\"skipping file \" + fname + \" ...\")\n if tokens[2] not in rundirs_on_disk:\n rundirs_on_disk.append(tokens[2])\n \n else:\n # The monitoring ROOT files are stored in one directory per run\n dirs_on_disk = [ d for d in listdir(join(self.INPUT_DIRECTORY,self.REVISION,self.ROOTFILE_DIR)) \n if os.path.isdir(join(self.INPUT_DIRECTORY,self.REVISION,self.ROOTFILE_DIR,d)) ]\n \n for dirname in sorted(dirs_on_disk):\n try:\n runnum = int(dirname)\n except ValueError:\n logging.error(\"skipping directory \" + dirname + \" ...\")\n rundirs_on_disk.append(dirname)\n\n return sorted(rundirs_on_disk)", "title": "" }, { "docid": "1c62ae8a0ef7382052c87f181b541875", "score": "0.51071495", "text": "def generate_run_name(pipeline_name: str):\n return \"{}_run-{}\".format(pipeline_name, utils.random_string(5))", "title": "" }, { "docid": "25c7d4cebecf959f73147495caf88d4c", "score": "0.5105669", "text": "def _get_experiment_directory_path(group_results, pi_experiment_name, start_date):\n experiment_directory_name = (\n group_results\n and _get_most_recent_experiment_directory_name(pi_experiment_name)\n or _generate_experiment_directory_name(start_date, pi_experiment_name)\n )\n return os.path.join(get_base_output_path(), experiment_directory_name)", "title": "" }, { "docid": "e047ff27364619f75a568c2b44be4d74", "score": "0.5105065", "text": "def get_new_dir_name_from_args(args: List) -> str:\n if not 1 < len(args) < 3:\n raise ValueError('Incorrect number of arguments. Usage: \"start <project directory>\"')\n\n return args[1]", "title": "" }, { "docid": "2c0df43c519775dec00aec89910b20d7", "score": "0.5100202", "text": "def get_job_name(self):\n # jobid is an overloaded term in the pbsmrtpipe world, so we use job_name here.\n return self.mjob.job.jobid", "title": "" }, { "docid": "6f650f5c54c7eb41a1e2dc930ae9e592", "score": "0.5099983", "text": "def _generate_config_dir(self, uuid, container):\n return 'hadoop_' + str(uuid) + '_' + str(container['data_ip'])", "title": "" } ]
72fe97dcf6b04db82bce33b9f3fc4a0f
Return a Django form field appropriate for a date property. This defaults to a DateField instance, except if auto_now or auto_now_add is set, in which case None is returned, as such 'auto' fields should not be rendered as part of the form.
[ { "docid": "d14063d7089b49052f3395734f3e4c68", "score": "0.8639604", "text": "def get_form_field(self, **kwargs):\r\n if self.auto_now or self.auto_now_add:\r\n return None\r\n defaults = {'form_class': forms.DateField}\r\n defaults.update(kwargs)\r\n return super(DateProperty, self).get_form_field(**defaults)", "title": "" } ]
[ { "docid": "00950dd3fa209a44c2a5f1d3e907a1b5", "score": "0.77816665", "text": "def get_form_field(self, **kwargs):\r\n if self.auto_now or self.auto_now_add:\r\n return None\r\n defaults = {'form_class': forms.DateTimeField}\r\n defaults.update(kwargs)\r\n return super(DateTimeProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "4b269ec71214c2a3c46d1fc91f795499", "score": "0.76691645", "text": "def convert_DateProperty(model, prop, kwargs):\r\n if prop.auto_now or prop.auto_now_add:\r\n return None\r\n\r\n return f.DateField(format='%Y-%m-%d', **kwargs)", "title": "" }, { "docid": "ee2f0c100591f9eb286486569394322a", "score": "0.69408727", "text": "def create_generic_date_field(name, attr_getter, dt_format, today_by_default=True):\r\n \r\n def value(model):\r\n \"\"\" Returns the model date or today's date \"\"\"\r\n default_date = datetime.date.today() if today_by_default else None\r\n dt = model and attr_getter(model) or default_date\r\n return dt and formatting.format_date(dt, dt_format)\r\n \r\n return Field(name=name, value=value)", "title": "" }, { "docid": "788644ec43b6da36d93c470fb792e869", "score": "0.67961437", "text": "def get_form_field(self, **kwargs):\r\n if self.auto_now or self.auto_now_add:\r\n return None\r\n defaults = {'form_class': forms.TimeField}\r\n defaults.update(kwargs)\r\n return super(TimeProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "f0ee98c28794269fe01a653b6d602b47", "score": "0.67168206", "text": "def _date_field(self):\n for field in self.model._meta.fields:\n if isinstance(field, (DateTimeField, DateField)):\n return field.name", "title": "" }, { "docid": "cf217c076733cef6388afc390ef16e78", "score": "0.6399484", "text": "def create_date_field(name, model_date_attribute, dt_format, today_by_default=True):\r\n return create_generic_date_field(name, operator.attrgetter(model_date_attribute), dt_format, today_by_default)", "title": "" }, { "docid": "cbc5f8f73403a4e62342f6dc3ca4f5b7", "score": "0.6111343", "text": "def get_form_field(self, **kwargs):\r\n defaults = {}\r\n if hasattr(forms, 'FloatField'):\r\n defaults['form_class'] = forms.FloatField\r\n defaults.update(kwargs)\r\n return super(FloatProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "968903afae040a1ed90311eef2806db5", "score": "0.6089097", "text": "def convert_DateTimeProperty(model, prop, kwargs):\r\n if prop.auto_now or prop.auto_now_add:\r\n return None\r\n\r\n return f.DateTimeField(format='%Y-%m-%d %H-%M-%S', **kwargs)", "title": "" }, { "docid": "58a5d39348d8863e24f49432d7e637a1", "score": "0.59988314", "text": "def fieldfunt(field):\n return forms.DateTimeField(label=field.name, initial=field, help_text=\"Time stamp\")", "title": "" }, { "docid": "d465ea278da7b28c81458b3b41be656c", "score": "0.59116423", "text": "def get_end_date_field(self):\n if self.end_date_field is None:\n raise ImproperlyConfigured(\"%s.end_date_field is required.\" % self.__class__.__name__)\n return self.end_date_field", "title": "" }, { "docid": "ad644fa19582173ed123e9fd248cf3af", "score": "0.58976954", "text": "def get_carddate_field( self, date ):\n\n if type(date) == datetime.date:\n return date.strftime('%m%Y')\n\n raise ValueError( 'date value must be a datetime.date instance' )", "title": "" }, { "docid": "9d7f7c0b6e9a4f2489eb50674c013eba", "score": "0.5890687", "text": "def Date(request):\n schema = schemaish.Structure()\n schema.add('myDateField', schemaish.Date())\n form = formish.Form(schema, 'form')\n return form", "title": "" }, { "docid": "c3f0d1778e2ff93fbb3fbecd78b40611", "score": "0.5839333", "text": "def value(model):\r\n default_date = datetime.date.today() if today_by_default else None\r\n dt = model and attr_getter(model) or default_date\r\n return dt and formatting.format_date(dt, dt_format)", "title": "" }, { "docid": "c42fa4a89963fe130c682b63635528f0", "score": "0.5730355", "text": "def _get_date(self, value, as_date=False):\n if value is None:\n return None\n\n if isinstance(value, Date):\n return value._datetime if not as_date else value\n\n if isinstance(value, date):\n return value if not as_date else Date.instance(value)\n\n raise ValueError('Invalid date \"{}\"'.format(value))", "title": "" }, { "docid": "237e902a76a983b3a907db14d7b73772", "score": "0.57200694", "text": "def get_field(self, model_field):\r\n return Field()", "title": "" }, { "docid": "7f2c71fad1e22fd670ea4237aa56ab2b", "score": "0.57104397", "text": "def decide_date(self):\n return self._decide_date", "title": "" }, { "docid": "7744164c1dad8dcd9881a0fa5fa33f08", "score": "0.5662445", "text": "def mdc_dateinput(context):\n\n props, children = [], []\n props.append(\n cp(\"name\", context['widget']['name']))\n if context[\"widget\"][\"value\"] is not None:\n props.append(\n cp(\"value\", context[\"widget\"][\"value\"]))\n\n for attr, value in context['widget']['attrs'].items():\n props.append(cp(attr, value))\n\n return mdc.DateField(props, children, context)", "title": "" }, { "docid": "29e2b3ef7c2aba282178a0feeab4e602", "score": "0.5645978", "text": "def get_form_field(self, **kwargs):\r\n return None", "title": "" }, { "docid": "fc320942194314f8d1d96eb2bf6b6e76", "score": "0.5618907", "text": "def get_date_form(param, default=None, is_mandatory=False):\n try:\n value = request.form[param]\n except KeyError:\n if is_mandatory:\n raise errors.Invalid(\"'{}' parameter is required\".format(param))\n return default\n\n return schemas.validate(schemas.Timestamp(), value, name=param)", "title": "" }, { "docid": "37601399db579b39da3b0b9a8b5a4b55", "score": "0.5598217", "text": "def get_date_object(date=None):\r\n if date:\r\n date_obj = datetime.datetime.strptime(date, '%Y-%m-%d').date()\r\n else:\r\n date_obj = datetime.date.today()\r\n return date_obj", "title": "" }, { "docid": "6beb273a9eed00573030647daa03ecfe", "score": "0.555527", "text": "def __init__(self, field, request, params, model, model_admin, field_path):\n super().__init__(field, request, params, model, model_admin, field_path)\n\n now = timezone.now()\n if isinstance(field, models.DateTimeField):\n today = now.replace(hour=0, minute=0, second=0, microsecond=0)\n else: # field is a models.DateField\n today = now.date()\n tomorrow = today + datetime.timedelta(days=1)\n\n self.lookup_kwarg_since = \"%s__gte\" % field_path\n self.lookup_kwarg_until = \"%s__lt\" % field_path\n self.links = (\n (_(\"Any date\"), {}),\n (\n _(\"Today\"),\n {\n self.lookup_kwarg_since: str(today),\n self.lookup_kwarg_until: str(tomorrow),\n },\n ),\n (\n _(\"Last 7 days\"),\n {\n self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),\n self.lookup_kwarg_until: str(tomorrow),\n },\n ),\n (\n _(\"Last month\"),\n {\n self.lookup_kwarg_since: str(today - relativedelta(months=1)),\n self.lookup_kwarg_until: str(tomorrow),\n },\n ),\n (\n _(\"Last 6 months\"),\n {\n self.lookup_kwarg_since: str(today - relativedelta(months=6)),\n self.lookup_kwarg_until: str(tomorrow),\n },\n ),\n (\n _(\"Last year\"),\n {\n self.lookup_kwarg_since: str(today - relativedelta(years=1)),\n self.lookup_kwarg_until: str(tomorrow),\n },\n ),\n )\n if field.null:\n self.lookup_kwarg_isnull = \"%s__isnull\" % field_path\n self.links += (\n (_(\"No date\"), {self.field_generic + \"isnull\": \"True\"}),\n (_(\"Has date\"), {self.field_generic + \"isnull\": \"False\"}),\n )", "title": "" }, { "docid": "a5cfe86d054957e8366cd592e35d0443", "score": "0.5539572", "text": "def get_date(self):\n return as_py_date(self.widget.GetValue())", "title": "" }, { "docid": "039c4d11947fc883bf2241c14352e29b", "score": "0.5505086", "text": "def doctest_LazyDateField(): # noqa: E501 line too long", "title": "" }, { "docid": "d3f23f3a44fbd6acf01e23cc3766adbf", "score": "0.54958254", "text": "def convert(self, model, prop, field_args):\r\n kwargs = {\r\n 'label': prop.name.replace('_', ' ').title(),\r\n 'default': prop.default_value(),\r\n 'validators': [],\r\n }\r\n if field_args:\r\n kwargs.update(field_args)\r\n\r\n if prop.required:\r\n kwargs['validators'].append(validators.required())\r\n\r\n if prop.choices:\r\n # Use choices in a select field.\r\n kwargs['choices'] = [(v, v) for v in prop.choices]\r\n return f.SelectField(**kwargs)\r\n else:\r\n converter = self.converters.get(type(prop).__name__, None)\r\n if converter is not None:\r\n return converter(model, prop, kwargs)", "title": "" }, { "docid": "3d2985d276bcfaaa42c4dea6ac4387dd", "score": "0.5453636", "text": "def getFieldValue(self, form, doc=None, editmode_obsolete=False,\n creation=False, request=None):\n fieldValue = BaseField.getFieldValue(\n self, form, doc, editmode_obsolete, creation, request)\n\n mode = self.context.field_mode\n if (mode == \"EDITABLE\" and\n request and\n ((doc is None and not(creation)) or\n 'Plomino_datagrid_rowdata' in request)):\n fieldname = self.context.id\n fieldValue = request.get(fieldname, fieldValue)\n\n if fieldValue and isinstance(fieldValue, basestring):\n fmt = self.format\n if not fmt:\n fmt = form.getParentDatabase().datetime_format\n fieldValue = StringToDate(fieldValue, fmt)\n return fieldValue", "title": "" }, { "docid": "844aa67a2be02ba9ea38c95567d8e0a8", "score": "0.54128104", "text": "def get_date(self):\n return self.date", "title": "" }, { "docid": "6edb325f4cd3f174eee7ccc11396d735", "score": "0.54102534", "text": "def get_user_defined_parameter_select_date(self):\n return self.get_text_from_element(self.user_defined_parameter_select_date_locator, is_a_input_field=True)", "title": "" }, { "docid": "093dda339fec8c574c47a87da02f7a31", "score": "0.5383661", "text": "def get_form_field(self, **kwargs):\r\n defaults = {'form_class': forms.BooleanField}\r\n defaults.update(kwargs)\r\n return super(BooleanProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "9297d3d154b907382ceb04dbd1b8366e", "score": "0.53678477", "text": "def get_form_field(self, **kwargs):\r\n defaults = {'form_class': ModelChoiceField,\r\n 'reference_class': self.reference_class}\r\n defaults.update(kwargs)\r\n return super(ReferenceProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "a93c2f0a8117df13fe8a1cfc31c3f7f0", "score": "0.53560776", "text": "def get_field(self, field, default=None):\n raise NotImplementedError()", "title": "" }, { "docid": "a19a3ea3aff244ad0617e3a89c5e6fe5", "score": "0.53498256", "text": "def get_date(self):\n return as_py_date(self.widget.GetDate())", "title": "" }, { "docid": "a701d6159b757040269a65a0b88cccb1", "score": "0.53265834", "text": "def date_created(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"date_created\")", "title": "" }, { "docid": "f4c4f78d719326581615224cd9bd9460", "score": "0.52754986", "text": "def get_form_field(self, **kwargs):\r\n defaults = {'form_class': forms.IntegerField}\r\n defaults.update(kwargs)\r\n return super(IntegerProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "a0955330d614b3d65bd5d9608c94283e", "score": "0.5250243", "text": "def clean(self, value):\r\n super(DateField, self).clean(value)\r\n if value in EMPTY_VALUES:\r\n return None\r\n if isinstance(value, datetime.datetime):\r\n return value.date()\r\n if isinstance(value, datetime.date):\r\n return value\r\n for format in self.input_formats:\r\n try:\r\n return datetime.date(*time.strptime(value, format)[:3])\r\n except ValueError:\r\n continue\r\n raise ValidationError(self.error_messages['invalid'])", "title": "" }, { "docid": "1df4c1b17b40dc8bfa66817330b96362", "score": "0.52480656", "text": "def BookingDateFieldWidget(field, request):\n return FieldWidget(field, BookingDateWidget(request))", "title": "" }, { "docid": "a2c62943380d7ff597eb9581bb993e2e", "score": "0.52240914", "text": "def get_field(opts, field_name):\n if not USE_LEGACY_FIELD_API:\n field = opts.get_field(field_name)\n direct = not field.auto_created\n else:\n field, _, direct, _ = opts.get_field_by_name(field_name)\n return field, direct", "title": "" }, { "docid": "9c8a62f5d485058b9c17c4b93a3cdaae", "score": "0.52240103", "text": "def get_form_field(self, **kwargs):\r\n if not hasattr(forms, 'FileField'):\r\n return None\r\n defaults = {'form_class': forms.FileField}\r\n defaults.update(kwargs)\r\n return super(BlobProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "dd870a376d5b844a81249f7f3b69c2ac", "score": "0.52018297", "text": "def format_date_field(dt: datetime.datetime, field: str, args: List[str]) -> str:\n fields = field.split(\".\")\n if len(fields) == 1:\n # no subfield, just return the formatted date str\n return dt.date().isoformat()\n if len(fields) > 2:\n raise ValueError(f\"Unhandled template value: {field}\")\n subfield = fields[1]\n if subfield == \"strftime\":\n if not args:\n return None\n try:\n return dt.strftime(args[0])\n except:\n raise ValueError(f\"Invalid strftime template: '{args}'\")\n else:\n try:\n return getattr(DateTimeFormatter(dt), subfield)\n except AttributeError as e:\n raise ValueError(f\"Unhandled template value: {field}\") from e", "title": "" }, { "docid": "98dcc0667e72189ee82ac50d1841d552", "score": "0.5195535", "text": "def get_create_date(self):\n\n return self.create_date.value", "title": "" }, { "docid": "98dcc0667e72189ee82ac50d1841d552", "score": "0.5195535", "text": "def get_create_date(self):\n\n return self.create_date.value", "title": "" }, { "docid": "5bb5346caa0f772bb29072d50d3abbda", "score": "0.5184367", "text": "def is_date(self, objecttype, fieldname):\n field = self.get_field(objecttype, fieldname)\n if not field:\n return False\n\n if field.get(\"input_type\") in [\"datetimepicker\", \"datepicker\"]:\n return True\n\n return False", "title": "" }, { "docid": "6f2674e8da5b2ac22e384aaae8eae1a3", "score": "0.5181086", "text": "def get_date(self):\n return self.return_value(self.value.date())", "title": "" }, { "docid": "ffcf82c1b6b75036938ddb9c3e11146f", "score": "0.5174721", "text": "def clean(self, value):\r\n super(DateField, self).clean(value)\r\n if value in EMPTY_VALUES:\r\n return None\r\n if isinstance(value, datetime.datetime):\r\n return value.date()\r\n if isinstance(value, datetime.date):\r\n return value\r\n for format in self.input_formats:\r\n try:\r\n return datetime.date(*time.strptime(value, format)[:3])\r\n except ValueError:\r\n continue\r\n raise ValidationError(gettext(u'Enter a valid date.'))", "title": "" }, { "docid": "b39fd456debc10f5ca49d99c41d44a1b", "score": "0.51601213", "text": "def _date(self) -> datetime:\n return self.__date", "title": "" }, { "docid": "5e0ae17e445867b96f5a5e3a6cd9836c", "score": "0.51217663", "text": "def get_cost_policy_formula_select_date(self):\n return self.get_text_from_element(self.cost_policy_formula_select_date_locator, is_a_input_field=True)", "title": "" }, { "docid": "5558ef7e212282a6698198d28e9e4aad", "score": "0.51178914", "text": "def reformat_date_or_auto(date):\n if date is None:\n return date\n elif date.lower() == \"auto\":\n return \"auto\"\n else:\n return timestamp.reformat_date(date)", "title": "" }, { "docid": "6d1d96849096aed45059356c4ef5c18a", "score": "0.51085657", "text": "def get_form_field(self, form_class=forms.CharField, **kwargs):\r\n defaults = {'required': self.required}\r\n if self.verbose_name:\r\n defaults['label'] = self.verbose_name.capitalize().replace('_', ' ')\r\n if self.choices:\r\n choices = []\r\n if not self.required or (self.default is None and\r\n 'initial' not in kwargs):\r\n choices.append(('', '---------'))\r\n for choice in self.choices:\r\n choices.append((str(choice), unicode(choice)))\r\n defaults['widget'] = forms.Select(choices=choices)\r\n if self.default is not None:\r\n defaults['initial'] = self.default\r\n defaults.update(kwargs)\r\n return form_class(**defaults)", "title": "" }, { "docid": "f3a952edc9e647fb7ba97d80b060b980", "score": "0.51064956", "text": "def field_dict(self, model):\r\n if self.field_names is None:\r\n return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField)])\r\n else:\r\n return dict([(f.name, f) for f in model._meta.fields if isinstance(f, models.DateField) and f.name in self.field_names])", "title": "" }, { "docid": "bb302058582cbc55f39c2ddc75a22499", "score": "0.5105611", "text": "def _make_single_date_lookup(self, date):\n date_field = self.get_date_field()\n if self.uses_datetime_field:\n since = self._make_date_lookup_arg(date)\n until = self._make_date_lookup_arg(date + datetime.timedelta(days=1))\n return {\n '%s__gte' % date_field: since,\n '%s__lt' % date_field: until,\n }\n else:\n # Skip self._make_date_lookup_arg, it's a no-op in this branch.\n return {date_field: date}", "title": "" }, { "docid": "c7706a72b74782bd6d9afdccb66f9a9a", "score": "0.5082652", "text": "def created_date(self) -> Optional[str]:\n return pulumi.get(self, \"created_date\")", "title": "" }, { "docid": "d378d017eeb6c099a5c77afc0f3d7dd9", "score": "0.5076566", "text": "def _make_date_lookup_arg(self, value):\n if self.uses_datetime_field:\n value = datetime.datetime.combine(value, datetime.time.min)\n if settings.USE_TZ:\n value = timezone.make_aware(value, timezone.get_current_timezone())\n return value", "title": "" }, { "docid": "322bb813499aa38227de60d925f64669", "score": "0.5058185", "text": "def convert_TimeProperty(model, prop, kwargs):\r\n if prop.auto_now or prop.auto_now_add:\r\n return None\r\n\r\n return f.DateTimeField(format='%H-%M-%S', **kwargs)", "title": "" }, { "docid": "adc33bcf71b69548de7deed3d53ea347", "score": "0.5051118", "text": "def to_date_field(formatter):\n class DateConverter(object):\n\n def __init__(self, formatter):\n self.formatter = formatter\n\n def __call__(self, value):\n if isinstance(value, string_types):\n value = datetime.strptime(value, self.formatter).date()\n\n if isinstance(value, datetime):\n value = value.date()\n\n return value\n\n return DateConverter(formatter)", "title": "" }, { "docid": "1942a03da9d60ed0d9107cdf58bf599b", "score": "0.50415564", "text": "def get_initial_for_field(self, field, field_name):\n\n value = self.initial.get(field_name, field.initial)\n if callable(value) and not getattr(value, \"do_not_call_in_templates\", False):\n value = value()\n return value", "title": "" }, { "docid": "0a592cba8e7b2c5a9447587837cf17dc", "score": "0.5030985", "text": "def get_date(self):\n\t\treturn self._idate", "title": "" }, { "docid": "2559cffae04bd29a0d53cbf501b1e2ee", "score": "0.5015771", "text": "def doctest_LazyDateField_empty():", "title": "" }, { "docid": "290e02e6d8b78845e2af2a38cdb78139", "score": "0.50037515", "text": "def get_date(self) -> Optional[HistoricDateTime]:\n if self.date:\n return self.date\n elif not self._state.adding and self.containers.exists():\n try:\n containment: SourceContainment = (\n self.source_containments.first().select_related('container')\n )\n return containment.container.date\n except (ObjectDoesNotExist, AttributeError):\n pass\n return None", "title": "" }, { "docid": "6f298cce3251d441af43eafbdf10c082", "score": "0.49986902", "text": "def _date_format(self):\n return getattr(self, 'date_format', None)", "title": "" }, { "docid": "3510090695836cf562e83738bc1dad45", "score": "0.4979512", "text": "def formfield_for_dbfield(self, db_field, **kwargs):\n request = kwargs.pop(\"request\", None)\n\n # If the field specifies choices, we don't need to look for special\n # admin widgets - we just need to use a select widget of some kind.\n if db_field.choices is not None:\n return self.formfield_for_choice_field(db_field, request, **kwargs)\n\n if isinstance(db_field, ListField) and isinstance(db_field.field, ReferenceField):\n return self.formfield_for_manytomany(db_field, request, **kwargs)\n\n # handle RelatedFields\n if isinstance(db_field, ReferenceField):\n # For non-raw_id fields, wrap the widget with a wrapper that adds\n # extra HTML -- the \"add other\" interface -- to the end of the\n # rendered output. formfield can be None if it came from a\n # OneToOneField with parent_link=True or a M2M intermediary.\n form_field = db_field.formfield(**kwargs)\n if db_field.name not in self.raw_id_fields:\n related_modeladmin = self.admin_site._registry.get(db_field.document_type)\n can_add_related = bool(related_modeladmin and\n related_modeladmin.has_add_permission(request))\n form_field.widget = widgets.RelatedFieldWidgetWrapper(\n form_field.widget, RelationWrapper(db_field.document_type), self.admin_site,\n can_add_related=can_add_related)\n return form_field\n\n if isinstance(db_field, StringField):\n if db_field.max_length is None:\n kwargs = dict({'widget': widgets.AdminTextareaWidget}, **kwargs)\n else:\n kwargs = dict({'widget': widgets.AdminTextInputWidget}, **kwargs)\n return db_field.formfield(**kwargs)\n\n # If we've got overrides for the formfield defined, use 'em. **kwargs\n # passed to formfield_for_dbfield override the defaults.\n for klass in db_field.__class__.mro():\n if klass in self.formfield_overrides:\n kwargs = dict(self.formfield_overrides[klass], **kwargs)\n return db_field.formfield(**kwargs)\n\n # For any other type of field, just call its formfield() method.\n return db_field.formfield(**kwargs)", "title": "" }, { "docid": "03e798d7e2f6284c339a2f6abc2c826a", "score": "0.49671984", "text": "def date(self, title, value=None, *args, **kwargs):\n return self.datePicker(title, value, *args, **kwargs)", "title": "" }, { "docid": "14cd0f028753a02f337a922372b03d42", "score": "0.496116", "text": "def get_date(self):\n try:\n if len(self.info[9]) == 6:\n # Format date to YYYY/MM/DD (Compatible with pyephem)\n return format_date(self.info[9])\n\n else:\n return None\n\n except IndexError:\n return None", "title": "" }, { "docid": "14cd0f028753a02f337a922372b03d42", "score": "0.496116", "text": "def get_date(self):\n try:\n if len(self.info[9]) == 6:\n # Format date to YYYY/MM/DD (Compatible with pyephem)\n return format_date(self.info[9])\n\n else:\n return None\n\n except IndexError:\n return None", "title": "" }, { "docid": "53da1d2fb9986c5889047bf3043c6d56", "score": "0.4960476", "text": "def set_new_start_date(self, date):\n if date != \"\":\n self.set_value_into_input_field(self.new_start_date_locator, date)\n else:\n self.set_value_into_input_field(self.new_start_date_locator, str(self.get_current_date()))", "title": "" }, { "docid": "04d6d922899c6f2fc5e34753b885ca7d", "score": "0.4956903", "text": "def _BuildProperty(field):\n\n # pylint: disable=W0212\n def Get(self):\n if field.name not in self._fields:\n self._fields[field.name] = field.GetDefaultValue()\n return self._fields[field.name]\n\n # pylint: disable=W0212\n def Set(self, value):\n self._fields[field.name] = field.field_type.Convert(value)\n\n return property(Get, Set)", "title": "" }, { "docid": "25e62773e64624ce4eca4029c56479a0", "score": "0.4956541", "text": "def date_fields(self):\n return [f.name for f in self.fields if f.type == DATE_FIELD]", "title": "" }, { "docid": "b3a5247aa94f350e0380c0a84cf3a361", "score": "0.49463847", "text": "def field(self):\n if self.hex_view:\n return self.hex_field\n elif self.formatted_view:\n return self.formatted_field\n else:\n return self.text_field", "title": "" }, { "docid": "ece2a676a61f6b3100012a5a60a72534", "score": "0.4942176", "text": "def field_get(self, field, default=None):\n\n\t\t# If the field doesn't exist\n\t\tif field not in self._dRecord:\n\t\t\treturn default\n\n\t\t# Return the field\n\t\treturn self._dRecord[field]", "title": "" }, { "docid": "5e657eb30ca6a4f64d292fc02be5e247", "score": "0.4922155", "text": "def validate_date(date: datetime) -> str:\n return str(date.date()) if date else NA_FIELD", "title": "" }, { "docid": "1d48db693ff436df764a0bf0d02242c7", "score": "0.49103355", "text": "def filed_date(self, filed_date):\n\n self._filed_date = filed_date", "title": "" }, { "docid": "70b23f2a28b7ab5298b882b5805a7e33", "score": "0.48986894", "text": "def get_field(self, *args, **kwargs):\n field_kw = self.get_field_extra_args(self, *args, **kwargs)\n field_type = self.get_field_type()\n field = field_type(self.description, field_kw)\n return self.name, field", "title": "" }, { "docid": "7852da16c1ae9d79d10ccc0d021aa9a6", "score": "0.48946092", "text": "def set_date_input_formats(fields):\n from django import forms\n for f in fields:\n if isinstance(fields[f], forms.DateField):\n # Use custom date field so that finnish date format is allowed when saving.\n fields[f].widget.format = '%d.%m.%Y'\n fields[f].input_formats = ['%d.%m.%Y']", "title": "" }, { "docid": "3180a6ac4a444f149895e9f755086ad3", "score": "0.4892571", "text": "def marshall_date(self, fieldname, value):\n try:\n value = parsedate.date_from_string(value)\n except ValueError, e:\n raise self._err(\"Value supplied to field %r must be a \"\n \"valid date: was %r: error is '%s'\" %\n (fieldname, value, str(e)))\n return marshall.date_to_string(value)", "title": "" }, { "docid": "75ab2c5c499daeb0ce6843a30e1dfa16", "score": "0.48896804", "text": "def get_input_field(\n field: Union[Field, ForeignObjectRel], registry: Registry\n) -> Union[Scalar, Structure]:\n return convert_django_field_with_choices(field, registry)", "title": "" }, { "docid": "bd31db84555010cc2183f28504a21d39", "score": "0.48813018", "text": "def formfield( self, **kwargs ):\n\t\tdefaults = {\n\t\t\t\"form_class\" : DynamicFormField,\n\t\t\t\"form\": self.form\n\t\t}\n\t\tdefaults.update( kwargs )\n\n\t\treturn super( DynamicModelField, self ).formfield( **defaults )", "title": "" }, { "docid": "356a9576b24e12c7fc0095a7e57f93eb", "score": "0.48792815", "text": "def get_prepopulated_value(field, instance):\n if hasattr(field.populate_from, '__call__'):\n # AutoSlugField(populate_from=lambda instance: ...)\n return field.populate_from(instance)\n else:\n # AutoSlugField(populate_from='foo')\n attr = getattr(instance, field.populate_from)\n return callable(attr) and attr() or attr", "title": "" }, { "docid": "71e84fef42540585f7d449e6359cde9f", "score": "0.48667878", "text": "def formfield_for_foreignkey(self, db_field, request=None, **kwargs):\r\n if db_field.name in self.raw_id_fields:\r\n kwargs['widget'] = widgets.ForeignKeyRawIdWidget(db_field.rel)\r\n elif db_field.name in self.radio_fields:\r\n kwargs['widget'] = widgets.AdminRadioSelect(attrs={\r\n 'class': get_ul_class(self.radio_fields[db_field.name]),\r\n })\r\n kwargs['empty_label'] = db_field.blank and _('None') or None\r\n\r\n return db_field.formfield(**kwargs)", "title": "" }, { "docid": "b4002e5a144bb3fe8897fd5e61d60c6f", "score": "0.486144", "text": "def getStartDate(self):\n return self.getOrDefault(self.start_date)", "title": "" }, { "docid": "104b606891adb6ff5b601deab02cfb7f", "score": "0.48581582", "text": "def formfield_for_dbfield(self, db_field, **kwargs):\r\n request = kwargs.pop(\"request\", None)\r\n\r\n # If the field specifies choices, we don't need to look for special\r\n # admin widgets - we just need to use a select widget of some kind.\r\n if db_field.choices:\r\n return self.formfield_for_choice_field(db_field, request, **kwargs)\r\n\r\n # ForeignKey or ManyToManyFields\r\n if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):\r\n # Combine the field kwargs with any options for formfield_overrides.\r\n # Make sure the passed in **kwargs override anything in\r\n # formfield_overrides because **kwargs is more specific, and should\r\n # always win.\r\n if db_field.__class__ in self.formfield_overrides:\r\n kwargs = dict(self.formfield_overrides[db_field.__class__], **kwargs)\r\n\r\n # Get the correct formfield.\r\n if isinstance(db_field, models.ForeignKey):\r\n formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)\r\n elif isinstance(db_field, models.ManyToManyField):\r\n formfield = self.formfield_for_manytomany(db_field, request, **kwargs)\r\n\r\n # For non-raw_id fields, wrap the widget with a wrapper that adds\r\n # extra HTML -- the \"add other\" interface -- to the end of the\r\n # rendered output. formfield can be None if it came from a\r\n # OneToOneField with parent_link=True or a M2M intermediary.\r\n if formfield and db_field.name not in self.raw_id_fields:\r\n formfield.widget = widgets.RelatedFieldWidgetWrapper(formfield.widget, db_field.rel, self.admin_site)\r\n\r\n return formfield\r\n\r\n # If we've got overrides for the formfield defined, use 'em. **kwargs\r\n # passed to formfield_for_dbfield override the defaults.\r\n for klass in db_field.__class__.mro():\r\n if klass in self.formfield_overrides:\r\n kwargs = dict(self.formfield_overrides[klass], **kwargs)\r\n return db_field.formfield(**kwargs)\r\n\r\n # For any other type of field, just call its formfield() method.\r\n return db_field.formfield(**kwargs)", "title": "" }, { "docid": "876af4453a3591c137973a87a9c5298c", "score": "0.48572418", "text": "def create_date(self):\n if self.json_object['create_date'] is None:\n return None\n else:\n return dateutil.parser.parse(\n self.json_object['create_date']\n )", "title": "" }, { "docid": "21a74a4947c2bc7dca864701386bfeec", "score": "0.48447663", "text": "def _get_field_default(field: dataclasses.Field):\n # Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed\n default_factory = field.default_factory # type: ignore\n if default_factory is not dataclasses.MISSING:\n return default_factory\n elif field.default is dataclasses.MISSING:\n return marshmallow.missing\n return field.default", "title": "" }, { "docid": "1b2873e827ba04595ec3cd9e8603fc5a", "score": "0.4831724", "text": "def _convert_date(date_field):\n\n date = datetime.datetime.strptime(str(date_field), \"%m/%d/%Y\").date()\n return date", "title": "" }, { "docid": "6b0252eb6ecbcaf6987b4d3d00988bf9", "score": "0.4826108", "text": "def enable_custom_date(ael_input):\n custom_date = ael_variables.get('start_date')\n if ael_input.value == 'Custom Date':\n custom_date.enabled = True\n elif ael_input.value == 'Last Business Day':\n custom_date.enabled = False\n custom_date.value = LAST_BUSINESS_DAY\n else:\n custom_date.enabled = False\n custom_date.value = DATE_TODAY", "title": "" }, { "docid": "474ca96fb6b8c66e8a1bf6c8c7eae734", "score": "0.48146042", "text": "def form_field(field):\n\treturn {'field': field}", "title": "" }, { "docid": "0008efa73e14727025f5c712ce2ce3e6", "score": "0.48081267", "text": "def get_initial_for_field(self, field, field_name):\n\n value = self.initial.get(field_name, field.initial)\n if callable(value) and not issubclass(value, CriterionEvaluator):\n value = value()\n return value", "title": "" }, { "docid": "977084dbc5e9195924adfcc8c345dcbc", "score": "0.48076323", "text": "def get_pynagField(self, field_name, css_tag=\"\", required=None):\r\n # Lets figure out what type of field this is, default to charfield\r\n object_type = self.pynag_object['object_type']\r\n definitions = object_definitions.get(object_type) or {}\r\n options = definitions.get(field_name) or {}\r\n\r\n # Find out what type of field to create from the field_name.\r\n # Lets assume charfield in the beginning\r\n field = forms.CharField()\r\n\r\n if False is True:\r\n pass\r\n elif field_name in ('contact_groups', 'contactgroups', 'contactgroup_members'):\r\n all_groups = Model.Contactgroup.objects.filter(\r\n contactgroup_name__contains=\"\")\r\n choices = sorted(\r\n map(lambda x: (x.contactgroup_name, x.contactgroup_name), all_groups))\r\n field = PynagChoiceField(\r\n choices=choices, inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif field_name == 'use':\r\n all_objects = self.pynag_object.objects.filter(name__contains='')\r\n choices = map(lambda x: (x.name, x.name), all_objects)\r\n field = PynagChoiceField(\r\n choices=sorted(choices), inline_help_text=_(\"No %s selected\") % {'field_name': field_name})\r\n elif field_name in ('servicegroups', 'servicegroup_members'):\r\n all_groups = Model.Servicegroup.objects.filter(\r\n servicegroup_name__contains='')\r\n choices = map(\r\n lambda x: (x.servicegroup_name, x.servicegroup_name), all_groups)\r\n field = PynagChoiceField(\r\n choices=sorted(choices), inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif field_name in ('hostgroups', 'hostgroup_members', 'hostgroup_name') and object_type != 'hostgroup':\r\n all_groups = Model.Hostgroup.objects.filter(\r\n hostgroup_name__contains='')\r\n choices = map(\r\n lambda x: (x.hostgroup_name, x.hostgroup_name), all_groups)\r\n field = PynagChoiceField(\r\n choices=sorted(choices), inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif field_name == 'members' and object_type == 'hostgroup':\r\n all_groups = Model.Host.objects.filter(host_name__contains='')\r\n choices = map(lambda x: (x.host_name, x.host_name), all_groups)\r\n field = PynagChoiceField(\r\n choices=sorted(choices), inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif field_name == 'host_name' and object_type == 'service':\r\n all_groups = Model.Host.objects.filter(host_name__contains='')\r\n choices = map(lambda x: (x.host_name, x.host_name), all_groups)\r\n field = PynagChoiceField(\r\n choices=sorted(choices), inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif field_name in ('contacts', 'members'):\r\n all_objects = Model.Contact.objects.filter(\r\n contact_name__contains='')\r\n choices = map(\r\n lambda x: (x.contact_name, x.contact_name), all_objects)\r\n field = PynagChoiceField(\r\n choices=sorted(choices), inline_help_text=_(\"No %s selected\") % {'field_name': field_name})\r\n elif field_name.endswith('_period'):\r\n all_objects = Model.Timeperiod.objects.filter(\r\n timeperiod_name__contains='')\r\n choices = [('', '')] + map(\r\n lambda x: (x.timeperiod_name, x.timeperiod_name), all_objects)\r\n field = forms.ChoiceField(choices=sorted(choices))\r\n elif field_name.endswith('notification_commands'):\r\n all_objects = Model.Command.objects.filter(\r\n command_name__contains='')\r\n choices = [('', '')] + map(\r\n lambda x: (x.command_name, x.command_name), all_objects)\r\n field = PynagChoiceField(choices=sorted(choices))\r\n # elif field_name == 'check_command':\r\n # all_objects = Model.Command.objects.all\r\n # choices = [('','')] + map(lambda x: (x.command_name, x.command_name), all_objects)\r\n # field = forms.ChoiceField(choices=sorted(choices))\r\n elif field_name.endswith('notification_options') and self.pynag_object.object_type == 'host':\r\n field = PynagChoiceField(\r\n choices=HOST_NOTIFICATION_OPTIONS, inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif field_name.endswith('notification_options') and self.pynag_object.object_type == 'service':\r\n field = PynagChoiceField(\r\n choices=SERVICE_NOTIFICATION_OPTIONS, inline_help_text=_(\"No %(field_name)s selected\") % {'field_name': field_name})\r\n elif options.get('value') == '[0/1]':\r\n field = forms.CharField(widget=PynagRadioWidget)\r\n\r\n # Lets see if there is any help text available for our field\r\n if field_name in object_definitions[object_type]:\r\n help_text = object_definitions[object_type][field_name].get(\r\n 'help_text', _(\"No help available for this item\"))\r\n field.help_text = help_text\r\n\r\n # No prettyprint for macros\r\n if field_name.startswith('_'):\r\n field.label = field_name\r\n\r\n # If any CSS tag was given, add it to the widget\r\n self.add_css_tag(field=field, css_tag=css_tag)\r\n\r\n if 'required' in options:\r\n self.add_css_tag(field=field, css_tag=options['required'])\r\n field.required = options['required'] == 'required'\r\n else:\r\n field.required = False\r\n\r\n # At the moment, our database of required objects is incorrect\r\n # So if caller did not specify if field is required, we will not\r\n # make it required\r\n if required is None:\r\n field.required = False\r\n else:\r\n field.required = required\r\n\r\n # Put inherited value in the placeholder\r\n inherited_value = self.pynag_object._inherited_attributes.get(\r\n field_name)\r\n if inherited_value is not None:\r\n self.add_placeholder(\r\n field, _('%(inherited_value)s (inherited from template)') % {'inherited_value': inherited_value})\r\n\r\n if field_name in MULTICHOICE_FIELDS:\r\n self.add_css_tag(field=field, css_tag=\"multichoice\")\r\n\r\n return field", "title": "" }, { "docid": "af76f3da75c3d721f5ecb0ba905bb601", "score": "0.48006123", "text": "def date(self) -> datetime.date:\n return self.identifier.date", "title": "" }, { "docid": "48cd11d94f67622241a86f2768250bc3", "score": "0.47968692", "text": "def get_start_date(self):\n return self.get(\"start_date\")", "title": "" }, { "docid": "8f4682f3906bff0a038d42385fbb21fe", "score": "0.479625", "text": "def daytime(self):\n try:\n return self.property1\n except TypeError:\n return None", "title": "" }, { "docid": "b675aa7c917221b04f27cf87c43ed9fb", "score": "0.47880003", "text": "def serialize_date(self):\r\n try:\r\n return self.birthdate.strftime(\"%d-%m-%Y\")\r\n except:\r\n return None", "title": "" }, { "docid": "f4f214ff2e041dc1a69dfcb1c5de2cf0", "score": "0.47832242", "text": "def _get_model_date(model: Path) -> datetime.datetime:\n return datetime.datetime.strptime(model.name, \"%Y%m%d\")", "title": "" }, { "docid": "67fe21e48b071ef8ad4fe125a07186b6", "score": "0.4768644", "text": "def to_internal_value(self, data: str) -> datetime:\n data = super(DateTimeBooleanField, self).to_internal_value(data) \\\n if not isinstance(data, bool) else data\n\n # Only allow boolean value\n if not isinstance(data, bool):\n raise ValidationError('Should be a boolean value')\n\n return timezone.now() if data else None", "title": "" }, { "docid": "d3c02404c5f2a7682719a681c8dd7336", "score": "0.47661346", "text": "def date_obj(date):\n if type(date) is dt:\n return date\n if type(date) is datetime.date:\n return dt(date.year, date.month, date.day)\n return dt.strptime(date, DATE_FORMAT)", "title": "" }, { "docid": "2d5db88a1548ee3797861b1e5754199d", "score": "0.47483695", "text": "def get_value_for_form(self, instance):\r\n return None", "title": "" }, { "docid": "70be7b2bcd2306dc993ccb1ce8285ec1", "score": "0.47483477", "text": "def field(self) -> Optional[str]:\n return pulumi.get(self, \"field\")", "title": "" }, { "docid": "70be7b2bcd2306dc993ccb1ce8285ec1", "score": "0.47483477", "text": "def field(self) -> Optional[str]:\n return pulumi.get(self, \"field\")", "title": "" }, { "docid": "70be7b2bcd2306dc993ccb1ce8285ec1", "score": "0.47483477", "text": "def field(self) -> Optional[str]:\n return pulumi.get(self, \"field\")", "title": "" }, { "docid": "70be7b2bcd2306dc993ccb1ce8285ec1", "score": "0.47483477", "text": "def field(self) -> Optional[str]:\n return pulumi.get(self, \"field\")", "title": "" }, { "docid": "c67788d6edc101cff993e61d7ffe3e02", "score": "0.47411618", "text": "def get_meta_field_value(self, field):\n\n if not self.has_meta():\n return None\n\n if field not in self.meta:\n return None\n\n return self.meta[field]", "title": "" }, { "docid": "4bcf50c48c25c0c5086de31c2bb1ec27", "score": "0.47381553", "text": "def get_form_field(self, **kwargs):\r\n defaults = {'form_class': forms.URLField}\r\n defaults.update(kwargs)\r\n return super(LinkProperty, self).get_form_field(**defaults)", "title": "" }, { "docid": "47ce7ccb01ec374ac917f96cc227dee4", "score": "0.4732683", "text": "def pretty_date(self, typeof='created'):\n if typeof == 'created':\n return utils.pretty_date(self.created_on)\n elif typeof == 'updated':\n return utils.pretty_date(self.updated_on)", "title": "" } ]
35b6ba6294b09c77fcf4be6cc931faec
To update the requirements for MudTelnet, edit the requirements.txt file.
[ { "docid": "1134af41f3fedd4a5977df6c9d8b6488", "score": "0.0", "text": "def get_requirements():\n with open(\"requirements.txt\", \"r\") as f:\n req_lines = f.readlines()\n reqs = []\n for line in req_lines:\n # Avoid adding comments.\n line = line.split(\"#\")[0].strip()\n if line:\n reqs.append(line)\n return reqs", "title": "" } ]
[ { "docid": "7d535ffadbd06d4155dcb12434db0959", "score": "0.5219412", "text": "def SetupLMNotification(self):\n self.Install('python3')\n self.RemoteCommand('pip install requests')\n self.PushDataFile(\n self._LM_NOTICE_SCRIPT, f'{self.temp_dir}\\\\{self._LM_NOTICE_SCRIPT}'\n )", "title": "" }, { "docid": "7e38f3f702e18aa212cb747f7ef6e493", "score": "0.48356634", "text": "def iot_set():\n mod = [\n ModuleInstall(\n 'phue', 'pip', purpose=\"A Philips Hue Python library\", usage=\"IoT\"),\n ModuleInstall('ledcontroller', 'pip',\n purpose=\"Controller library for limitlessled/easybulb/milight Wi-Fi LEDs\", usage=\"IoT\"),\n ModuleInstall('enum-compat', 'pip',\n purpose=\"enum/enum34 compatibility package\") if sys.version_info[0] == 2 else None,\n ModuleInstall('netifaces', 'pip',\n purpose=\"Portable network interface information.\"),\n ModuleInstall('ifaddr', 'pip', purpose=\"for zero conf\"),\n ModuleInstall('zeroconf', 'pip',\n purpose=\"Multicast DNS Service Discovery for Python, originally by Paul Scott-Murphy.\"),\n ModuleInstall('casttube', 'pip', purpose=\"for pychromecast\"),\n ModuleInstall('pychromecast', 'pip',\n purpose=\"Library for Python 2 and 3 to communicate with the Google Chromecast.\"),\n ModuleInstall('python-xlib', 'pip', mname=\"Xlib\",\n purpose=\"Python X Library\"),\n ModuleInstall('pyHook', 'wheel',\n purpose=\"Python wrapper for global input hooks in Windows. The package provides callbacks \"\n + \"for mouse and keyboard events;\"\n + \" events can be monitored and filtered.\"),\n ModuleInstall('pyuserinput', 'pip', mname=\"pykeyboard\",\n purpose=\"A simple, cross-platform module for mouse and keyboard control\"),\n ModuleInstall('tellcore-py', 'pip', mname=\"tellcore\", usage=\"IoT\",\n purpose=\"Python wrapper for Telldus' home automation library\"),\n ModuleInstall('python-nmap', 'pip', mname=\"nmap\",\n purpose=\"This is a python class to use nmap and access scan results from python3\"),\n ModuleInstall('python-magic', 'pip',\n purpose=\"File type identification using libmagic\"),\n ModuleInstall('websocket-client', 'pip', mname=\"websocket\",\n purpose=\"WebSocket client for python. hybi13 is supported.\"),\n ModuleInstall('pushbullet.py', 'pip', usage=\"IoT\",\n purpose=\"A simple python client for pushbullet.com\"),\n ModuleInstall('sseclient-py', 'pip', mname=\"sseclient\",\n purpose=\"A Python client for SSE event sources that seamlessly \"\n + \"integrates with urllib3 and requests.\"),\n ModuleInstall('python-nest', 'pip', usage=\"IoT\",\n purpose=\"Python API and command line tool for talking to the Nest™ Thermostat\"),\n ModuleInstall('VarEvents', 'pip', usage=\"IoT\",\n purpose=\"Python module to create variables that can raise custom events.\"),\n ModuleInstall('PyISY', 'pip', usage=\"IoT\",\n purpose=\"Python module to talk to ISY994 from UDI.\"),\n ModuleInstall('python-pushover', 'pip', usage=\"IoT\",\n purpose=\"Comprehensive bindings and command line utility for the Pushover notification service\"),\n ModuleInstall('transmissionrpc', 'pip', usage=\"IoT\",\n purpose=\"Python module that implements the Transmission bittorent client RPC protocol.\"),\n ModuleInstall('pyowm', 'pip', usage=\"DATA\",\n purpose=\"A Python wrapper around the OpenWeatherMap web API\"),\n ModuleInstall('sleekxmpp', 'pip', usage=\"IoT\",\n purpose=\"SleekXMPP is an elegant Python library for XMPP (aka Jabber, Google Talk, etc).\"),\n ModuleInstall('dnspython3', 'pip', usage=\"IoT\",\n purpose=\"A DNS toolkit for Python 3.x\"),\n ModuleInstall('blockchain', 'pip', usage=\"IoT\",\n purpose=\"Blockchain API library (v1)\"),\n ModuleInstall('python-mpd2', 'pip', usage=\"IoT\", mname=\"mdp2\",\n purpose=\"A Python MPD client library\"),\n ModuleInstall('hikvision', 'pip', usage=\"IoT\",\n purpose=\"Provides a python interface to interact with a hikvision camera\"),\n ModuleInstall('jsonrpc-requests', 'pip', mname=\"jsonrpc_requests\",\n purpose=\"A JSON-RPC client library, backed by requests\"),\n ModuleInstall(\n 'cookies', 'pip', purpose=\"Friendlier RFC 6265-compliant cookie parser/renderer\"),\n ModuleInstall('responses', 'pip',\n purpose=\"A utility library for mocking out the `requests` Python library.\"),\n ModuleInstall('python-forecastio', 'pip', usage=\"IoT\",\n purpose=\"A thin Python Wrapper for the Forecast.io weather API\"),\n ModuleInstall('pyserial', 'pip', mname=\"serial\",\n usage=\"IoT\", purpose=\"Python Serial Port Extension\"),\n ModuleInstall('PyMata', 'pip', usage=\"IoT\",\n purpose=\"A Python Protocol Abstraction Library For Arduino Firmata\"),\n ModuleInstall('pyRFXtrx', 'pip', mname=\"RFXtrx\",\n purpose=\"A Python library to communicate with the RFXtrx family of devices\"),\n # ModuleInstall('pymysensors', 'github', 'theolind', mname=\"mysensors\",\n # purpose=\"Python API for talking to a MySensors gateway\"),\n ModuleInstall('pynetgear', 'pip', usage=\"IoT\",\n purpose=\"Access Netgear routers using their SOAP API\"),\n ModuleInstall('netdisco', 'pip', usage=\"IoT\",\n purpose=\"Discover devices on your local network\"),\n ModuleInstall('pywemo', 'pip', usage=\"IoT\",\n purpose=\"Access WeMo switches using their SOAP API\"),\n ModuleInstall('python-wink', 'pip', mname=\"pywink\",\n purpose=\"Python implementation of the Wink API\"),\n ModuleInstall('slacker', 'pip', purpose=\"Slack API client\"),\n ModuleInstall(\n 'vincenty', 'pip', purpose=\"Calculate the geographical distance between 2 points with extreme accuracy.\"),\n ModuleInstall('pyusb', 'pip', usage=\"IoT\",\n purpose=\"Python USB access module\"),\n ModuleInstall('temperusb', 'pip', usage=\"IoT\",\n purpose=\"Reads temperature from TEMPerV1 devices (USB 0c45:7401)\"),\n ModuleInstall('pyedimax', 'github', 'rkabadi', usage=\"IoT\",\n purpose=\"Pyedimax is a python library for interfacing with the Edimax Smart Plug switches SP-1101W and SP-2101W\"),\n\n # Uncomment for Raspberry Pi\n # ModuleInstall('RPi.GPIO', 'pip'),\n # uncomment on a Raspberry Pi / Beaglebone\n # ModuleInstall('Adafruit_Python_DHT', 'github', 'mala-zaba', purpose=\"Adafruit temperature/humidity sensor\", usage=\"IoT\"),\n\n ModuleInstall('paho-mqtt', 'pip', mname=\"paho.mqtt\", usage=\"IoT\",\n purpose=\"MQTT version 3.1/3.1.1 client class\"),\n # ModuleInstall('pymodbus', 'github', 'bashwork', usage='IoT'),\n ModuleInstall('python-verisure', 'github', 'persandstrom', mname=\"verisure\", usage='IoT',\n purpose=\"A python module for reading and changing status of verisure devices through mypages.\"),\n ModuleInstall('voluptuous', 'pip',\n purpose=\"Voluptuous is a Python data validation library\"),\n ModuleInstall('astral', 'pip',\n purpose=\"Calculations for the position of the sun and moon.\"),\n ModuleInstall('voluptuous_serialize', 'pip',\n purpose=\"for homeassistant\"),\n\n ModuleInstall('homeassistant', 'pip',\n purpose=\"Home Assistant is a home automation platform running on Python 3. \"\n + \"The goal of Home Assistant is to be able to track and control all devices at \"\n + \"home and offer a platform for automating control.\",\n web=\"https://github.com/balloob/home-assistant/\",\n usage=\"IoT\"),\n ]\n return [_ for _ in mod if _ is not None]", "title": "" }, { "docid": "10f6c4b08aeeae8d33dad3b1fcbc4a87", "score": "0.4784656", "text": "def install_munin_node(add_to_master=True):\n\n # install munin-node\n sudo('apt-get -yq install munin-node')\n\n # add allow IP to munin-node.conf -> allow IP must be escaped REGEX-style\n ip = '%(hq)s' % env\n ip.replace('.', '\\\\\\.')\n sed('/etc/munin/munin-node.conf', '127\\\\\\.0\\\\\\.0\\\\\\.1', '%s' % ip, use_sudo=True)\n sudo('service munin-node restart')\n\n # add node to munin-master on Headquarters server so\n # system information is actually collected\n if add_to_master:\n with settings(host_string='%(hq)s:22' % env):\n path = '/etc/munin/munin.conf'\n append(path, '[%(hostname)s]' % env, use_sudo=True)\n append(path, ' address %(server_ip)s' % env, use_sudo=True)\n append(path, ' ', use_sudo=True)", "title": "" }, { "docid": "b7e7c931b7fef537d3291a19dd2f0c53", "score": "0.47829765", "text": "def install(ctx):\n ctx.run(\"pip install -r requirements.txt -U\")\n #ctx.run(\"pip install -e . -U\")", "title": "" }, { "docid": "22e2dd2975f3ec20b25fac4487894cb2", "score": "0.47822088", "text": "def setMME(controller, node):\n #This UPI function stops the MME\n controller.blocking(False).node(node).net.MME_deactivation()\n gevent.sleep(5)\n #This UPI function starts the MME\n controller.delay(3).node(node).net.MME_activation()\n gevent.sleep(5)", "title": "" }, { "docid": "17c049fe458d616e58ae3ba568cae495", "score": "0.47781417", "text": "def requirements():\n\n run('pip install -r {req}'.format(req=REQUIREMENTS))", "title": "" }, { "docid": "4356ff18326feccfa613ed2b3db76f82", "score": "0.47715598", "text": "def main():\n\n proxies = {\n \"http\": \"http://192.168.1.15:8080\",\n }\n\n requests.get(\"http://[fec0::4]:61616/rl\", proxies=proxies)", "title": "" }, { "docid": "e1e0d3716eb807b5a130a2b23409ed9e", "score": "0.47593388", "text": "def deps():\n\n run('jspm install')", "title": "" }, { "docid": "e37f9b61fd1483f102164ca9564986d2", "score": "0.4747332", "text": "def test_nessus_rhel_07_021710(host):\n\n assert not host.package('telnet-server').is_installed", "title": "" }, { "docid": "0869e97bfde4d9ced4560464513a98d5", "score": "0.4741507", "text": "def depends_mecab_version(cls) -> str:\n return \"0.996\"", "title": "" }, { "docid": "750d248668b54f58ad5db392387f4c36", "score": "0.47370616", "text": "def dependencies(default_env='dev'):\n pip_install(default_env)\n npm_install()\n bower_install()\n nltk_init()", "title": "" }, { "docid": "2e8db9f204c26c92b41dace04fdb0472", "score": "0.47181875", "text": "def update_requirements(reqs_type='production'):\n require('hosts')\n require('code_dir')\n\n with cd(env.code_dir), prefix('. venv/bin/activate'):\n sudo('pip install -r requirements/%s.txt' % reqs_type)", "title": "" }, { "docid": "b41e8d3bea1a519946dd240d455741b3", "score": "0.46836364", "text": "def install_flask_dependencies(number):\n if exists(remote_app_directory) is False:\n\tsudo('mkdir %s' % remote_app_directory)\n\t#sudo('mkdir' + remote_app_directory)\n if exists(remote_twilix_directory) is False:\n\tsudo('mkdir %s' % remote_twilix_directory)\n\t#sudo('mkdir' + remote_twilix_directory)\n with lcd(local_app_directory):\n\twith cd(remote_app_directory):\n\t sudo('virtualenv venv')\n sudo('source venv/bin/activate')\n sudo('pip install Flask==0.10.1')\n sudo('pip install twilio==3.6.15')\n\twith cd(remote_twilix_directory):\n\t put('*', './', use_sudo=True)\n sudo(\"echo 'export USER_NUMBER=%s' >> /root/.bash_profile\" % number)\n sudo(\"source /root/.bash_profile\")", "title": "" }, { "docid": "547cfe1dd0bd3f9587daa27eee86470b", "score": "0.4630108", "text": "async def requirements(self):\n\t\tawait self.bot.say(\"The latest version of New John City requires the following objects:\\n\\n**Chicago DLC:**\\nhttps://store.steampowered.com/app/361290/OMSI_2_Addon_Chicago_Downtown/\\n\\n**Willshire Objects**\\nhttp://www.vtransitcenter.com/index.php?action=downloads;sa=view;down=56\\n\\n**Simple Streets:**\\nhttp://www.omnibussimulator.de/backup/index.php?page=Thread&threadID=2500\\n\\n**New Flyer Powertrain Mod:**\\nhttp://www.vtransitcenter.com/index.php/topic,8.0.html\")", "title": "" }, { "docid": "92e9f108932b657349c74e9046b28aea", "score": "0.46139124", "text": "def update_requirements():\n require('environment')\n project_run(u'HOME=%(home)s %(virtualenv)s/bin/pip install --use-mirrors -r %(requirements)s' % {\n 'virtualenv': env.virtualenv_root,\n 'requirements': os.path.join(env.code_root, 'requirements', 'production.txt'),\n 'home': env.home,\n })", "title": "" }, { "docid": "f3f619a36fc17be7772d2b0631290b26", "score": "0.4582937", "text": "def mobilenetv2(**kwargs):\n return MobileNetV2(**kwargs)", "title": "" }, { "docid": "35d3867ca8465b93e96e335f4d010346", "score": "0.45617425", "text": "def upgrade_requirements():\n\n with virtualenv(env.virtualenv_path):\n with cd(env.app_root):\n fab_install_requirements(\"requirements.txt\", upgrade=True, use_sudo=True)", "title": "" }, { "docid": "700b391a13e048882409c0c30f32f178", "score": "0.4560301", "text": "def install_requirements():\n print blue(\"Installing requirements\")\n with fabtools.python.virtualenv(env.virtualenv):\n run(\"pip install -r %s\" % os.path.join(env.new_release_path, 'requirements.txt'))", "title": "" }, { "docid": "71819fb4d8772073a1e18c4e6b31f4f4", "score": "0.45327824", "text": "def connectMPD():\n client = MPDClient() # create client object\n client.timeout = 10 # network timeout in seconds (floats allowed), default: None\n client.idletimeout = None # timeout for fetching the result of the idle command is handled seperately, default: None\n try:\n client.connect(\"localhost\", 6600) # connect to localhost:6600\n except Exception :\n print \"Can Connect to MPD...\"", "title": "" }, { "docid": "2a0c85ac27f11033a437fbf45bfaab30", "score": "0.4494146", "text": "def update_requirements():\n with cd(env.root_dir):\n with _virtualenv():\n run('pip install -r requirements.txt')", "title": "" }, { "docid": "43505de53221b5f47b86d49461f3bd31", "score": "0.44914833", "text": "def test_update_device_token(self):\n pass", "title": "" }, { "docid": "a74af0d6ffd925909c519d56071c9606", "score": "0.44695562", "text": "def test_add_device(self):\n pass", "title": "" }, { "docid": "101dee201bb66b89ec3504044e8921e9", "score": "0.44637555", "text": "def try_import_munkres():\n msg = \"munkres is required, you can install by `pip install munkres --user`. \"\n return try_import('munkres', msg)", "title": "" }, { "docid": "196e9e297c32eed82bad5970a17ee70d", "score": "0.44518122", "text": "def _upgrade_messaging() -> None:\n # Major update message for 1.3\n print(\n Style.BRIGHT\n + \"Yark 1.3 is out now! Go to https://github.com/Owez/yark to download\"\n + Style.DIM\n + \" (pip is no longer supported)\"\n + Style.NORMAL\n )\n\n # Give a warning if it's been over a year since release\n if datetime.datetime.utcnow().year >= 2024:\n print(\n Fore.YELLOW\n + \"You're currently on an outdated version of Yark\"\n + Fore.RESET,\n file=sys.stderr,\n )", "title": "" }, { "docid": "87f0519342e9b70d8d6ab82a080c76b3", "score": "0.44493747", "text": "def install_riot():\n\n packages = ['make', 'docker.io']\n packages += ['python3-serial', 'python3-pexpect']\n packages += ['python3-cryptography', 'python3-pyasn1', 'python3-ecdsa',\n 'python3-crypto']\n install(' '.join(packages))\n disable_dns_mask_for_docker()\n run('pip3 install pytest pytest-html pyserial scapy')", "title": "" }, { "docid": "a86c4fb12993631792b78fc3b700b1c2", "score": "0.44432557", "text": "def testMODRDN(self):\n proto = LDIFDeltaDriver()\n\n with self.assertRaises(NotImplementedError):\n proto.dataReceived(\n b\"\"\"version: 1\ndn: cn=foo,dc=example,dc=com\nchangetype: modrdn\n\n\"\"\"\n )", "title": "" }, { "docid": "6134b5f1d2820706a71b44bed4814d11", "score": "0.44083765", "text": "def tutorials_todo_update():\n global LIBTUTO_DBUS_HANDLER\n if LIBTUTO_DBUS_HANDLER is None:\n LIBTUTO_DBUS_HANDLER = LibtutoDbusHandler()\n\n LIBTUTO_DBUS_HANDLER.update_tuto_num()", "title": "" }, { "docid": "51cf1dc2a2df084f469636d205cfc6e1", "score": "0.43973684", "text": "def _Install(vm):\n vm.InstallPackages('numactl')", "title": "" }, { "docid": "4ccad8fb6597f04d097585cabebd9359", "score": "0.43925914", "text": "def setModelTypeAsMTCNN(self):\r\n self.__modelType = \"MTCCN\"", "title": "" }, { "docid": "a241845a31c3195905cb5aec014cceb4", "score": "0.4389078", "text": "def main():\n net = Mininet(controller=RemoteController)\n\n # ryu-manager --ofp-listen-host '127.0.0.2' ryu.app.simple_switch_13\n net.addController('c1', ip='127.0.0.2')\n\n # ryu-manager --ofp-listen-host '127.0.0.3' ryu.app.simple_switch_13\n net.addController('c2', ip='127.0.0.3')\n\n s1 = net.addSwitch('s1', protocols='OpenFlow13')\n s2 = net.addSwitch('s2', protocols='OpenFlow13')\n\n h1 = net.addHost('h1')\n h2 = net.addHost('h2')\n\n net.addLink(s1, h1)\n net.addLink(s2, h2)\n\n net.addLink(s1, s2)\n\n net.start()\n CLI(net)\n net.stop()", "title": "" }, { "docid": "1f646678564001e11c10f9fc6bc271a4", "score": "0.43797964", "text": "def __init__(self):\n self._mobility_stub = MobilityServiceStub(get_rpc_channel(\"mobilityd\"))", "title": "" }, { "docid": "92296f19d397e15a8a104e7f8945554f", "score": "0.43787974", "text": "def install(self):", "title": "" }, { "docid": "c2810e279e98ca0f2e724dd157942054", "score": "0.43771297", "text": "def main():\n if config['local']: # do not send requests to adafruit or MQTT when on local PC\n return\n\n client = Client(username, api_key) # basic client\n mqtt_client = MQTTClient(username, api_key) # mqtt client\n\n # Reset feeds\n for feed_id, feed_name in config['adafruit']['feeds']['ids'].items():\n client.send(feed_id, config['adafruit']['feeds']['defaults'][feed_name])\n pb.info(\"-> [server] Adafruit feeds reset\")\n\n # MQTT setup\n mqtt_client.on_connect = connected\n mqtt_client.on_message = message\n mqtt_client.connect()\n mqtt_client.loop_blocking()", "title": "" }, { "docid": "b6e3097d07a088d418c2f45e341d5c1f", "score": "0.43722942", "text": "def setup_mobile_io(m: 'MobileIO'):\n\n # MobileIO Button Config\n reset_pose_btn = 1\n joined_flipper_btn = 6\n quit_btn = 8\n\n slider_flip1 = 3\n slider_flip2 = 4\n slider_flip3 = 5\n slider_flip4 = 6\n\n joy_fwd = 2\n joy_rot = 1\n\n arm_enable = 2\n arm_lock = 4\n gripper_close = 5\n\n # set mobileIO control config\n m.set_led_color(\"blue\")\n m.set_snap(slider_flip1, 0)\n m.set_snap(slider_flip2, 0)\n m.set_snap(slider_flip3, 0)\n m.set_snap(slider_flip4, 0)\n\n m.set_button_mode(joined_flipper_btn, 1)\n m.set_button_mode(arm_enable, 1)\n m.set_button_mode(arm_lock, 1)\n m.set_button_mode(gripper_close, 1)\n\n m.set_button_output(reset_pose_btn, 1)\n m.set_button_output(quit_btn, 1)\n\n m.set_button_output(arm_enable, 1)\n m.set_button_output(arm_lock, 1)\n\n def parse_mobile_io_feedback(m: 'MobileIO'):\n should_exit = m.get_button_state(quit_btn)\n should_reset = m.get_button_state(reset_pose_btn)\n # Chassis Control\n aligned_flipper_mode = m.get_button_state(joined_flipper_btn)\n joy_vel_fwd = m.get_axis_state(joy_fwd)\n joy_vel_rot = m.get_axis_state(joy_rot)\n\n # Flipper Control\n flip1 = m.get_axis_state(slider_flip1)\n flip2 = m.get_axis_state(slider_flip2)\n flip3 = m.get_axis_state(slider_flip3)\n flip4 = m.get_axis_state(slider_flip4)\n\n tready_inputs = TreadyInputs(\n should_reset,\n ChassisVelocity(joy_vel_fwd, joy_vel_rot),\n [flip1, flip2, flip3, flip4],\n aligned_flipper_mode)\n\n try:\n # reorder quaternion components\n wxyz = m.orientation\n xyzw = [*wxyz[1:4], wxyz[0]]\n rotation = R.from_quat(xyzw).as_matrix()\n except ValueError as e:\n print(f'Error getting orientation as matrix: {e}\\n{m.orientation}')\n rotation = np.eye(3)\n\n arm_inputs = ArmMobileIOInputs(\n np.copy(m.position),\n rotation,\n m.get_button_state(arm_lock),\n m.get_button_state(arm_enable),\n m.get_button_state(gripper_close))\n\n #return DemoInputs(should_exit, should_reset, tready_inputs, arm_inputs)\n return tready_inputs, arm_inputs\n\n return parse_mobile_io_feedback", "title": "" }, { "docid": "2f1d65c33622a586a51918a735b7f073", "score": "0.43674096", "text": "def install_requires():\n return reqs('default.txt')", "title": "" }, { "docid": "e49270ccef937e1ab8c0d93c72a49463", "score": "0.436715", "text": "def downgrade():", "title": "" }, { "docid": "e49270ccef937e1ab8c0d93c72a49463", "score": "0.436715", "text": "def downgrade():", "title": "" }, { "docid": "237ae61831d6e53c810718a28c6fbdea", "score": "0.43599042", "text": "def __virtual__():\n if HAS_REQUESTS is False:\n return (\n False,\n \"The requests python package is required for the mandrill execution module\",\n )\n return __virtualname__", "title": "" }, { "docid": "e15b259c97591ff0ba3f3ff2444cc2dc", "score": "0.43435192", "text": "def patch_sdk():", "title": "" }, { "docid": "e15b259c97591ff0ba3f3ff2444cc2dc", "score": "0.43435192", "text": "def patch_sdk():", "title": "" }, { "docid": "0efd8fe18dfbb87e13688f5ade636775", "score": "0.43428117", "text": "def main():\n\n # Add your app token here\n YOUR_APP_TOKEN = \"\" # Leave out the \"Bearer \"\n\n print(\"Connecting to the Cloud...\")\n client = Client(token=YOUR_APP_TOKEN)\n user_id = client.api.get_oauth2_user_info()['id']\n\n print(\"Gathering All User Devices...\")\n devices_json = client.api.get_user_devices(user_id)\n devices = [resources.Device(id=d['id'], client=client) for d in devices_json]\n print(\"{} devices found.\".format(len(devices)))\n\n # Use if you want extended data, such as name, description, etc.\n print(\"Gathering Device Data...\")\n for device in devices:\n device.get_info()\n\n print(\"Starting MQTT Stream...\")\n mqtt = dc.MqttStream(callback=None, # A callback of \"None\" will queue messages as they are recieved.\n devices=devices)\n mqtt.start()\n\n print(\"Started.\")\n while True:\n time.sleep(1)\n for msg in mqtt.get_messages():\n print(get_device_name(devices, msg.payload), resources_simplifier(msg.payload))", "title": "" }, { "docid": "64c736afc21f27d32e2d8b55f8808334", "score": "0.43383586", "text": "def main():\n logging.basicConfig(level=logging.WARNING)\n\n loop = asyncio.get_event_loop()\n asyncio.ensure_future(TG_BOT.loop())\n\n app = web.Application(loop=loop)\n app.router.add_route('GET', '/rooms/{room_alias}', matrix_room)\n app.router.add_route('PUT', '/transactions/{transaction}',\n matrix_transaction)\n web.run_app(app, port=5000)", "title": "" }, { "docid": "48a7663971696f30ce09436b7dbe665c", "score": "0.4336373", "text": "def install_dependencies():\n local('pip install --upgrade setuptools pip')", "title": "" }, { "docid": "e7834dac9742f3bb3441346bcf607d99", "score": "0.4329496", "text": "def install_iotlab():\n packages = ['libssl-dev']\n install(' '.join(packages))\n\n run('pip3 install iotlabcli iotlabsshcli')", "title": "" }, { "docid": "9e03b41d9d87bccbcfbbe5844af420d9", "score": "0.43243715", "text": "def downgrade():\n pass", "title": "" }, { "docid": "d337bc527d4fd8cad35d383762bb7c03", "score": "0.43130335", "text": "def setModelTypeAsMTCNN(self):\r\n self.__modelType = \"mtcnn\"", "title": "" }, { "docid": "7681a77d1e7b33a274168e23d2097f80", "score": "0.43095696", "text": "def test_update_device(self):\n pass", "title": "" }, { "docid": "92ed993d8f81165dd6d49d786401c98b", "score": "0.43053025", "text": "def test_install_helper_port(self):\n self.port_install_test('cdrtools')", "title": "" }, { "docid": "74dab2a0c0e91f7fd22356de0b8356c6", "score": "0.43011105", "text": "def maya_useNewAPI():\r\n pass", "title": "" }, { "docid": "aa235f08d2325559e42b0e47029d7b92", "score": "0.4300388", "text": "async def test_protocol_factory_udp_multicast_wo():\n test_url1: str = \"udp+wo://239.2.3.1\"\n config: dict = {\"COT_URL\": test_url1}\n reader, writer = await pytak.protocol_factory(config)\n assert reader == None\n assert isinstance(writer, pytak.asyncio_dgram.aio.DatagramClient)", "title": "" }, { "docid": "86d3fa447603bcbe3e941195ecf37f74", "score": "0.4298087", "text": "def test_reactant2_network(self):\n raise NotImplementedError", "title": "" }, { "docid": "1f41fe5f6aa99cd7960c4f5809457aeb", "score": "0.42945382", "text": "def testMODDN(self):\n proto = LDIFDeltaDriver()\n\n with self.assertRaises(NotImplementedError):\n proto.dataReceived(\n b\"\"\"version: 1\ndn: cn=foo,dc=example,dc=com\nchangetype: moddn\n\n\"\"\"\n )", "title": "" }, { "docid": "a3a071808be785f7c7c2a8e97cdb7f66", "score": "0.42942286", "text": "def setup_optilux_cinemacontent():\n \n # Load the ZCML configuration for the optilux.policy package.\n # This includes the other products below as well.\n \n fiveconfigure.debug_mode = True\n import optilux.cinemacontent\n zcml.load_config('configure.zcml', optilux.cinemacontent)\n fiveconfigure.debug_mode = False\n \n # We need to tell the testing framework that these products\n # should be available. This can't happen until after we have loaded\n # the ZCML.\n \n ztc.installPackage('optilux.cinemacontent')", "title": "" }, { "docid": "f272f0294b15a25094e7fa49938dc16e", "score": "0.4284533", "text": "def connect_telnet(ip):\r\n \r\n # Try to connect with Telnet\r\n try:\r\n device = telnetlib.Telnet(ip)\r\n # The device doesn't support either SSH or Telnet \r\n except:\r\n return False\r\n \r\n device.read_until(\"Username: \")\r\n device.write(USER + \"\\n\")\r\n device.read_until(\"Password: \")\r\n device.write(PASS + \"\\n\")\r\n\r\n # Telnet sucks\r\n time.sleep(1.5)\r\n \r\n \r\n try:\r\n msg = device.read_until(\"Login invalid\", timeout = 1)\r\n if \"Login invalid\" in msg:\r\n print \"Wrong credentials with Telnet\"\r\n return False\r\n except:\r\n pass\r\n \r\n\t# Commands to configure in the device\r\n print \"Connected with Telnet\"\r\n device.write(\"config t\" + \"\\n\")\r\n device.read_until(\"#\")\r\n device.write(\"\" + \"\\n\")\r\n device.read_until(\"#\")\r\n device.write(\"\" + \"\\n\")\r\n device.read_until(\"#\")\r\n device.write(\"\" + \"\\n\")\r\n device.read_until(\"#\")\r\n \r\n print \"Configuration commited succefully with Telnet at IP: {}\\n\".format(ip)\r\n\r\n # Report on Telnet devices\r\n with open(r\"Telnet_devices.txt\", 'a') as log:\r\n log.write(ip+'\\n')\r\n return True", "title": "" }, { "docid": "e9bd6c29efb01ac287bc0c6675e19f10", "score": "0.42815045", "text": "def bootstrap():\n # upload requirement.txt\n put('requirement.txt', os.path.join(TMP, 'requirement.txt'))\n with cd(TMP):\n run('pip install -r requirement.txt')\n # cleaning\n run('rm %s' % os.path.join(TMP, 'requirement.txt'))", "title": "" }, { "docid": "b93e4e9d05f7bfa0d8db44c6be05bf24", "score": "0.42787886", "text": "def mm(c):\n c.run(\"python manage.py makemigrations\", pty=True)\n c.run(\"python manage.py migrate\", pty=True)", "title": "" }, { "docid": "f0b97d6b4040650da4df39617aa2c5d5", "score": "0.42773715", "text": "def __init__(self, temboo_session):\n super(SendMessage, self).__init__(temboo_session, '/Library/Nexmo/SMS/SendMessage')", "title": "" }, { "docid": "ffa3eff0c1394d13bebe415066e73dcf", "score": "0.4275133", "text": "def telnet_main(ip_address,login,passw):\r\n ## ALLOWS ME TO SWITCH OFF RoUTERS and switches for nOW\r\n #ip = input(\"Enter IP Address: \")\r\n ip = \"192.168.0.118\"\r\n #ip = ip_address\r\n #password = passw\r\n username = login\r\n password= \"cisco\"\r\n #password = getpass()\r\n \r\n\r\n tmp_device2 = NetworkDevice(ip, username, password)\r\n\r\n tmp_device2.remote_conn = telnet.establish_connection(ip, username, password)\r\n t = telnet.disable_paging(tmp_device2.remote_conn)\r\n print (\"disable paging = \"+t) \r\n #print(test_device2.show_version)\r\n## remote_conn.write(b\"\\n\")\r\n## remote_conn.write(b\"show version\\n\")\r\n##\r\n## time.sleep(1)\r\n## test_device2.show_version = remote_conn.read_very_eager().decode()\r\n return tmp_device2", "title": "" }, { "docid": "51d1fde121dc6fbfe54b21e1e57e2097", "score": "0.42747447", "text": "async def test_setting_attribute_via_mqtt_json_message(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_setting_attribute_via_mqtt_json_message(\n hass, mqtt_mock_entry, update.DOMAIN, DEFAULT_CONFIG\n )", "title": "" }, { "docid": "2f92783534a5707604c75bee751e5f6e", "score": "0.4266604", "text": "def install_manager_packages():\n pkg_list = [\"mysql-server\", \"python-mysqldb\", \"openssh-server\", \"make\",\n \"git\", \"cron\", \"unzip\"]\n install_apt_packages(pkg_list)", "title": "" }, { "docid": "733ad81950ae3a7d730ba4135658069e", "score": "0.42612216", "text": "def test_post_pmt_installs(self):\n pass", "title": "" }, { "docid": "21f4a69f96528f20b582ee686137d022", "score": "0.4248311", "text": "def maya_useNewAPI():\n pass", "title": "" }, { "docid": "21f4a69f96528f20b582ee686137d022", "score": "0.4248311", "text": "def maya_useNewAPI():\n pass", "title": "" }, { "docid": "21f4a69f96528f20b582ee686137d022", "score": "0.4248311", "text": "def maya_useNewAPI():\n pass", "title": "" }, { "docid": "fadec7ecd249652fcbf91bc4d0bcdd75", "score": "0.42193985", "text": "def update_package_lists():\n run('apt-get update')", "title": "" }, { "docid": "bcea76ee9debe499e383de53af6f2ecd", "score": "0.42156148", "text": "def install_requirements():\n update_package_lists()\n install_apache()\n install_mod_wsgi()\n install_pip()\n install_virtualenv()", "title": "" }, { "docid": "0b0c7bbcd0e88f8c5b777b532deb2a93", "score": "0.42098084", "text": "def test_reactant1_network(self):\n raise NotImplementedError", "title": "" }, { "docid": "a513d86abc75234a0c357bf7d5c846f0", "score": "0.4207544", "text": "def AddMtuArg(parser):\n parser.add_argument(\n '--mtu',\n type=int,\n help=\"\"\"Maximum transmission unit(MTU) is the size of the largest frame\n that can be transmitted on this network. Default value is\n 1460 bytes, the maximum is 1500 bytes. The MTU advertised\n via DHCP to all instances attached to this network.\"\"\")", "title": "" }, { "docid": "768a32b29ad8aa81c8d5169a7640ffc9", "score": "0.42062753", "text": "def installBuildDepends():\n\n\terror_code = call(\"apt-get install --assume-yes graphviz libxml2-utils libopenexr-dev libjasper-dev libenchant-dev \\\n\t\t\t\t\tlibavahi-common-dev libaspell-dev libasound2-dev libldap2-dev \\\n\t\t\t\t\tlibsmbclient-dev libxkbfile-dev libxklavier-dev libxdamage-dev \\\n\t\t\t\t\tlibbluetooth-dev libusb-dev network-manager-dev \\\n\t\t\t\t\tlibsensors4-dev libnm-util-dev libcfitsio3-dev libnova-dev libeigen2-dev \\\n\t\t\t\t\tlibfacile-ocaml-dev libboost-python-dev libsvn-dev libsvncpp-dev \\\n\t\t\t\t\tlibcommoncpp2-dev libidn11-dev libpci-dev libxss-dev libxft-dev \\\n\t\t\t\t\tlibpolkit-agent-1-dev libpolkit-backend-1-dev libpolkit-gobject-1-dev libspectre-dev\", shell=True)", "title": "" }, { "docid": "0fe7be47e6f1da33933c30e66cdfef28", "score": "0.41963962", "text": "def setup_platform(\n hass: HomeAssistant,\n config: ConfigType,\n add_entities: AddEntitiesCallback,\n discovery_info: DiscoveryInfoType | None = None,\n) -> None:\n vehicle = config.get(CONF_NAME)\n email = config.get(CONF_EMAIL)\n sensors: dict[int, TorqueSensor] = {}\n\n hass.http.register_view(\n TorqueReceiveDataView(email, vehicle, sensors, add_entities)\n )", "title": "" }, { "docid": "c284028cea6edfd71fae16895bad5e57", "score": "0.41937548", "text": "def maya_useNewAPI():\n\tpass", "title": "" }, { "docid": "8a7e505750540086c401f306100e8720", "score": "0.41934416", "text": "def webmin():\n print(\"\\n\\t INSTALLING webmin\")\n call_cmd(\"sudo apt-get install perl libnet-ssleay-perl openssl libauthen-pam-perl \"\n \"libpam-runtime libio-pty-perl apt-show-versions python -y\")\n call_cmd(\"wget http://prdownloads.sourceforge.net/webadmin/webmin_1.740_all.deb\")\n call_cmd(\"sudo dpkg -i webmin_1.740_all.deb\")", "title": "" }, { "docid": "196aedf07a14fdfc9751251e77f889af", "score": "0.41852295", "text": "def mitmf():\n\ttargets = \"\"\n\tfor v in victims:\n\t\ttargets += v + ',' \n\ttargets = targets.rstrip(',')\n\tcmd = \"\"\"\n\tcd MITMf ; python2.7 mitmf.py -i eth0 --spoof --arp --target {} --gateway {} & \n\t\"\"\".format(targets, GATEWAY_ADDRESS)\n\tprint(cmd)\n\tos.system(cmd)", "title": "" }, { "docid": "b2b3c27dab36caf8dc34d225db2b3da4", "score": "0.41672876", "text": "def test_nessus_rhel_07_040700(host):\n\n assert not host.package('tftp-server').is_installed", "title": "" }, { "docid": "c7568d34562fbcaec7dac90df5df3ca1", "score": "0.41659197", "text": "def install_bacula_client():\n\n # Official repos only have version 5.0.1, we need 5.0.3\n sudo('add-apt-repository ppa:mario-sitz/ppa')\n sudo('apt-get update')\n sudo('apt-get -yq install bacula-fd')\n\n configure_bacula_client()", "title": "" }, { "docid": "c915d675199147cb16969bf578583599", "score": "0.41588885", "text": "def install(self) -> None:\n ...", "title": "" }, { "docid": "c915d675199147cb16969bf578583599", "score": "0.41588885", "text": "def install(self) -> None:\n ...", "title": "" }, { "docid": "314f817244fc63cfbdc4f1d29e286e49", "score": "0.41562563", "text": "def skype_call_recorder():\n r1 = env.run(\"sudo wget http://atdot.ch/scr/files/0.10/skype-call-recorder-ubuntu_0.10_amd64.deb\")\n r2 = env.run(\"sudo dpkg --install skype-call-recorder-ubuntu_0.10_amd64.deb\")\n # clean up, delete downloaded file.\n r3 = env.run(\"sudo rm skype-call-recorder-ubuntu_0.10_amd64.deb\")\n return r1, r2, r3", "title": "" }, { "docid": "c8ea36ddd6cd09f3997f1b7fe87835da", "score": "0.41554573", "text": "def main():\n session = session_factory()\n reader = SimpleMFRC522()\n board = Arduino('/dev/ttyUSB0')\n\n mainApp = App(session, reader, board)\n asyncio.run(mainApp.main())", "title": "" }, { "docid": "d8c1e63c86dad27273a74d0de2af8405", "score": "0.4152637", "text": "def main():\n\n # read default value from TelnetClient class (module_utils/telnet_util.py)\n DEFAULT_BECOME = TelnetClient.DEFAULT_BECOME # False\n DEFAULT_CONNECT_TIMEOUT = TelnetClient.DEFAULT_CONNECT_TIMEOUT # 10\n DEFAULT_LOGIN_TIMEOUT = TelnetClient.DEFAULT_LOGIN_TIMEOUT # 5\n DEFAULT_COMMAND_TIMEOUT = TelnetClient.DEFAULT_COMMAND_TIMEOUT # 5\n DEFAULT_PAUSE = TelnetClient.DEFAULT_PAUSE # 1\n DEFAULT_CONSOLE = TelnetClient.DEFAULT_CONSOLE # False\n\n argument_spec = dict(\n commands=dict(type='list', required=True),\n network_os=dict(default='ios', type='str'),\n host=dict(type='str', required=True),\n port=dict(default=23, type='int'),\n user=dict(default=\"\", type='str'),\n password=dict(default=\"\", type='str'),\n become=dict(default=DEFAULT_BECOME, type='bool'),\n become_pass=dict(default=\"\", type='str'),\n connect_timeout=dict(default=DEFAULT_CONNECT_TIMEOUT, type='int'),\n login_timeout=dict(default=DEFAULT_LOGIN_TIMEOUT, type='int'),\n command_timeout=dict(default=DEFAULT_COMMAND_TIMEOUT, type='int'),\n pause=dict(default=DEFAULT_PAUSE, type='int'),\n console=dict(default=DEFAULT_CONSOLE, type='bool'),\n log=dict(default=False, type='bool'),\n debug=dict(default=False, type='bool')\n )\n\n # generate module instance\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n\n tc = TelnetClient(module.params)\n result = tc.process_command()\n\n if result.get('failed'):\n module.fail_json(**result)\n\n module.exit_json(**result)", "title": "" }, { "docid": "6d9e543ff894faceb1119a5a016d54c2", "score": "0.41524464", "text": "def teleopInit(self):\n pass", "title": "" }, { "docid": "6d9e543ff894faceb1119a5a016d54c2", "score": "0.41524464", "text": "def teleopInit(self):\n pass", "title": "" }, { "docid": "6d9e543ff894faceb1119a5a016d54c2", "score": "0.41524464", "text": "def teleopInit(self):\n pass", "title": "" }, { "docid": "9b03a179f4b2b0bf013af448e9553328", "score": "0.4146953", "text": "async def test_protocol_factory_udp_multicast():\n test_url1: str = \"udp://239.2.3.1\"\n config: dict = {\"COT_URL\": test_url1}\n reader, writer = await pytak.protocol_factory(config)\n assert isinstance(reader, pytak.asyncio_dgram.aio.DatagramServer)\n assert isinstance(writer, pytak.asyncio_dgram.aio.DatagramClient)", "title": "" }, { "docid": "32e063b6c38779c524a97f94be463fee", "score": "0.41370493", "text": "def test_valid_form(self):\n data = {'from_number': '1112223333', 'content': 'hi there'}\n form = TelerivetForm(data, backend_name='telerivet-backend')\n self.assertTrue(form.is_valid())", "title": "" }, { "docid": "deb0c5bbd637859b9f9bad18a8c79203", "score": "0.41362053", "text": "def _update(self, msg):\n\t\ttry:\n\t\t\ttn = telnetlib.Telnet(self.host, self.port)\n\t\texcept Exception as e:\n\t\t\tprint(\"Error connecting to \" + self.host + \":\" + str(self.port) + \"\\n\\t\" + str(e))\n\t\t\texit()\n\t\ttn.write(('%s\\n' % msg).encode('ascii'))\n\t\tresponse = tn.read_some().decode('ascii').strip()\n\t\ttn.write('q\\n'.encode('ascii'))\n\t\treturn response", "title": "" }, { "docid": "50a046f9f48b3720e0182b274abbb66e", "score": "0.413463", "text": "def main():\n bot = TahmaTassuBot(TOKEN)\n bot.run()", "title": "" }, { "docid": "97ccac82e7e3b986992a95685fd842b5", "score": "0.41305137", "text": "def sms_reply():\n # Start our TwiML response\n resp = MessagingResponse()\n\n # Add a message\n resp.message(request.form[\"Body\"])\n\n \n connection=dbFunctions.create_server_connection(\"localhost\", \"rskuma\", \"password\")\n\n\n return str(resp)", "title": "" }, { "docid": "a86f43278a2d0db67c62a45434b2d45e", "score": "0.41288075", "text": "def mobilenet_v2(pretrained=False, progress=True, **kwargs):\n model = MobileNetV2(**kwargs)\n return model", "title": "" }, { "docid": "765f94cc0acd1f67f8414df77baf610a", "score": "0.41264802", "text": "def onRequires(self, product, version):\n pass", "title": "" }, { "docid": "12e92284b8eea5d8cf13c0ac942496c0", "score": "0.41213682", "text": "def test_correct_format(self, req, includes):\n MQTTRequest(Mock(), req, includes)", "title": "" }, { "docid": "9744a0c707fbcccdb14b50254ac5a35d", "score": "0.41199893", "text": "def install_requirements():\n run('cd %(path)s; %(path)s/bin/pip install uwsgi' % env)\n run('cd %(path)s; %(path)s/bin/pip install -r ./releases/%(release)s/requirements.txt' % env)", "title": "" }, { "docid": "b414c8a55c72c5269eab6551cb17ec20", "score": "0.4112873", "text": "def do_setup():\n setup(\n name='luft',\n version=version,\n description='Luft is an interactive client (cli)'\n 'that help you with common BI tasks (loading, historization, etc.).',\n long_description=long_description,\n long_description_content_type='text/markdown',\n license='',\n\n author='BI Team @ LMC, s.r.o.',\n author_email='info@lmc.eu',\n maintainer='Radek Tomšej',\n maintainer_email='radek.tomsej@lmc.eu',\n\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python',\n 'Topic :: Software Development :: Libraries',\n ],\n keywords=['cli', 'client', 'bi', 'generator',\n 'yaml', 'airflow', 'luft', 'lmc'],\n\n packages=find_packages(exclude=['tests*', 'docs*']),\n python_requires='>=3.6, <4',\n include_package_data=True,\n\n entry_points={\n 'console_scripts': [\n 'luft=cli.luft:luft',\n ]\n },\n\n install_requires=install_requires,\n extras_require=extras_require,\n )", "title": "" }, { "docid": "abfea9a036ae4d41adcf5559fe64f10f", "score": "0.4111561", "text": "def test_postdevice_types(self):\n pass", "title": "" }, { "docid": "09b9975259cd02de7d9ca19de84c0195", "score": "0.41095284", "text": "def main():\n robot = robo.Snatch3r()\n mqtt_client = com.MqttClient(robot)\n mqtt_client.connect_to_pc()\n robot.loop_forever()", "title": "" }, { "docid": "ae2c282e036525727351db5a679b670a", "score": "0.4097402", "text": "def update_req():\n # check whether in virtualenv\n if not os.environ.get(\"VIRTUAL_ENV\"):\n _warn(\"You are not in an Virtualenv, please activate it first\")\n return\n local(\"pip freeze|grep -v distribute > %s/pip_requirements.txt\" % CURRENT_PATH)", "title": "" }, { "docid": "3cf2890b67f09019da216023a3ea9908", "score": "0.40819976", "text": "def madison(bot, ievent):\n package = str(ievent.args[0])\n if len(ievent.args) > 1:\n suites = \",\".join(ievent.args[1:])\n else:\n suites = \"\"\n\n data = urlencode({\n 'package': package,\n 's': suites,\n 'text': 'on',\n })\n\n f = urlopen('http://qa.debian.org/madison.php?%s' % data)\n for l in f.readlines():\n bot.msg(choose_dest(ievent), \"%s: %s\" % (ievent.nick, l.rstrip().encode(\"utf-8\")))", "title": "" }, { "docid": "1db19d1119b7a45b931c148e4c31b9a8", "score": "0.40819606", "text": "def update_package_manager_package_lists():\n sudo(\"apt-get update\")", "title": "" }, { "docid": "d86221e7bdf68884862260a7f25e7647", "score": "0.40759444", "text": "def setup_mqtt_client(mqtt_conf, mqtt_client):\n\n if mqtt_conf['TLS']['enable']:\n logger.info(\"TLS Setup for Broker\")\n logger.info(\"checking TLS_Version\")\n tls = mqtt_conf['TLS']['tls_version']\n if tls == 'tlsv1.2':\n tlsVersion = ssl.PROTOCOL_TLSv1_2\n elif tls == \"tlsv1.1\":\n tlsVersion = ssl.PROTOCOL_TLSv1_1\n elif tls == \"tlsv1\":\n tlsVersion = ssl.PROTOCOL_TLSv1\n else:\n logger.info(\"Unknown TLS version - ignoring\")\n tlsVersion = None\n if not mqtt_conf['TLS']['insecure']:\n\n logger.info(\"Searching for Certificates in certdir\")\n CERTS_DIR = mqtt_conf['TLS']['certs']['certdir']\n if os.path.isdir(CERTS_DIR):\n logger.info(\"certdir exists\")\n CA_CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['cafile'])\n CERT_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['certfile'])\n KEY_FILE = os.path.join(CERTS_DIR, mqtt_conf['TLS']['certs']['keyfile'])\n\n mqtt_client.tls_set(ca_certs=CA_CERT_FILE, certfile=CERT_FILE, keyfile=KEY_FILE, cert_reqs=ssl.CERT_REQUIRED, tls_version=tlsVersion)\n else:\n logger.error(\"certdir does not exist.. check path\")\n sys.exit()\n else:\n mqtt_client.tls_set(ca_certs=None, certfile=None, keyfile=None, cert_reqs=ssl.CERT_NONE, tls_version=tlsVersion)\n mqtt_client.tls_insecure_set(True)\n \n if mqtt_conf['username'] and mqtt_conf['password']:\n logger.info(\"setting username and password for Broker\")\n mqtt_client.username_pw_set(mqtt_conf['username'], mqtt_conf['password'])\n \n return mqtt_client", "title": "" }, { "docid": "cb785bf7b74cf1db77b012d66ed3dcd2", "score": "0.40753925", "text": "def demoserver():\n install_base()\n download_kolibri()\n configure_nginx()\n configure_kolibri()\n restart_kolibri(post_restart_sleep=30) # wait for DB migration to happen...\n provisiondevice()\n import_channels()\n restart_kolibri()\n puts(green('Kolibri demo server setup complete.'))", "title": "" }, { "docid": "93bcd9cf9b848792e17c5642e52ff329", "score": "0.40697452", "text": "def install_all_packages():\n install_common()\n install_iotlab()\n install_riot()", "title": "" } ]
d8c6bfa24395eb5f91b31736e769a1ba
reply thread by user that requires approval
[ { "docid": "221aa17e2f2d39a5a31dbe00bfb3663b", "score": "0.66562307", "text": "def test_user_moderation_queue(self):\n self.override_acl({'require_replies_approval': 1})\n\n response = self.client.post(\n self.api_link, data={\n 'post': \"Lorem ipsum dolor met!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n thread = Thread.objects.get(pk=self.thread.pk)\n self.assertFalse(thread.is_unapproved)\n self.assertTrue(thread.has_unapproved_posts)\n self.assertEqual(thread.replies, self.thread.replies)\n\n post = self.user.post_set.all()[:1][0]\n self.assertTrue(post.is_unapproved)\n\n category = Category.objects.get(slug='first-category')\n self.assertEqual(category.threads, self.category.threads)\n self.assertEqual(category.posts, self.category.posts)", "title": "" } ]
[ { "docid": "dae93e3ca0fd0a90b7397e1dbd1f795e", "score": "0.6544899", "text": "def test_user_moderation_queue_bypass(self):\n override_acl(self.user, {'can_approve_content': 1})\n\n self.override_acl({'require_replies_approval': 1})\n\n response = self.client.post(\n self.api_link, data={\n 'post': \"Lorem ipsum dolor met!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n thread = Thread.objects.get(pk=self.thread.pk)\n self.assertFalse(thread.is_unapproved)\n self.assertFalse(thread.has_unapproved_posts)\n self.assertEqual(thread.replies, self.thread.replies + 1)\n\n post = self.user.post_set.all()[:1][0]\n self.assertFalse(post.is_unapproved)\n\n category = Category.objects.get(slug='first-category')\n self.assertEqual(category.threads, self.category.threads)\n self.assertEqual(category.posts, self.category.posts + 1)", "title": "" }, { "docid": "b5c2da69ad700386f4e9b5e39439b352", "score": "0.6392418", "text": "def reply(self):\n sub = reddit.get_submission(self.comment.permalink)\n author = self.comment.author\n try:\n # First message will be a reply in a thread\n # afterwards are PM in the same thread\n if (sub.id not in self.subId):\n self.comment.reply(self._replyMessage.format(\n self._replyDate))\n else:\n reddit.send_message(author, 'RemindMeBot Reminder!', self._replyMessage.format(\n self._replyDate))\n except (HTTPError, ConnectionError, Timeout, timeout) as err:\n print err\n # PM instead if the banned from the subreddit\n if str(err) == \"403 Client Error: Forbidden\":\n reddit.send_message(author, 'RemindMeBot Reminder!', self._replyMessage.format(\n self._replyDate))\n except RateLimitExceeded as err:\n print err\n # PM when I message too much\n reddit.send_message(author, 'RemindMeBot Reminder!', self._replyMessage.format(\n self._replyDate))\n time.sleep(10)\n except APIException as err: # Catch any less specific API errors\n print err\n else:\n # only message the thread once\n self.subId.append(sub.id)\n print self._replyMessage.format(\n self._replyDate)", "title": "" }, { "docid": "847eb783af569b05b1b9f5b7e0d96518", "score": "0.60916436", "text": "def button_to_approve(self):\r\n lines = self.approving_matrix_line_ids.filtered(lambda r: r.approved == False)\r\n for line in lines:\r\n if line.employee_ids and len(line.employee_ids) > 0:\r\n user_ids = line.employee_ids.mapped('user_id')\r\n \"\"\" create notification in discussion panel \"\"\"\r\n if user_ids:\r\n self.send_mail_pr_approval_process(user_ids)\r\n break\r\n\r\n if lines:\r\n return super(purchase_request, self).button_to_approve()\r\n else:\r\n return super(purchase_request, self).button_approved()", "title": "" }, { "docid": "dccdfd09c610156af91ccc5474c79ecd", "score": "0.6060785", "text": "def on_pending_moderation(self, user): # P\n self.console.write('%s:%s is waiting for broadcast approval.' %\n (user.nick, user.handle), Color.B_YELLOW)", "title": "" }, { "docid": "24224ef7c6d484820946213ecb73f852", "score": "0.5905701", "text": "async def reply(ctx, request_id: str = None, *, reply: str = None):\n if (request_id == None) or (reply == None):\n await ctx.send(\"Sorry, but I'll need the request ID and reply to process this command.\")\n return\n if len(reply) > 1500:\n await message.channel.send(f\"**Sorry, but your reply is too long. Please keep it under 1500 characters.**\")\n return\n asker = await resolve_accepted_asker(request_id)\n if asker == None:\n await ctx.send(\"Sorry. Doesn't look like that request ID exists.\")\n else:\n asker_user = bot.get_user(int(asker))\n await asker_user.send(f\"**You have received a reply from {ctx.message.author.name}:**\\n\\n{reply}\\n\\nhttps://discord.com/channels/{ctx.message.channel.guild.id}/{ctx.message.channel.id}/{ctx.message.id}\")\n await ctx.message.add_reaction(\"\\u2705\")", "title": "" }, { "docid": "832bec2a07351304460bf66059d5d373", "score": "0.5887017", "text": "def replies():", "title": "" }, { "docid": "b1f57e795438b0f92636743b6bb89321", "score": "0.5861306", "text": "def reply(request, thread):\n if not request.user.is_authenticated():\n return HttpResponseServerError()\n t = get_object_or_404(Thread, pk=thread)\n if t.closed:\n return HttpResponseServerError()\n body = request.POST.get('body', False)\n p = Post(\n thread=t, \n author=request.user,\n body=body,\n time=datetime.now(),\n )\n p.save()\n\n sub = Subscription.objects.filter(thread=t, author=request.user)\n if request.POST.get('subscribe',False):\n if not sub:\n s = Subscription(\n author=request.user,\n thread=t\n )\n s.save()\n else:\n if sub:\n sub.delete()\n\n # Subscriptions are updated now send mail to all the authors subscribed in\n # this thread.\n mail_subject = ''\n try:\n mail_subject = settings.FORUM_MAIL_PREFIX \n except AttributeError:\n mail_subject = '[Forum]'\n\n mail_from = ''\n try:\n mail_from = settings.FORUM_MAIL_FROM\n except AttributeError:\n mail_from = settings.DEFAULT_FROM_EMAIL\n\n mail_tpl = loader.get_template('forum/notify.txt')\n c = Context({\n 'body': wordwrap(striptags(body), 72),\n 'site' : Site.objects.get_current(),\n 'thread': t,\n })\n\n #email = EmailMessage('Hello', 'Body goes here', 'from@example.com',\n # ['to1@example.com', 'to2@example.com'], ['bcc@example.com'],\n # headers = {'Reply-To': 'another@example.com'})\n email = EmailMessage(\n subject=mail_subject+' '+striptags(t.title),\n body= mail_tpl.render(c),\n from_email=mail_from,\n to=[mail_from],\n bcc=[s.author.email for s in t.subscription_set.all()],)\n email.send(fail_silently=True)\n\n return HttpResponseRedirect(p.get_absolute_url())", "title": "" }, { "docid": "f80ab51a6007a26ce90c1935a164dc82", "score": "0.5845358", "text": "def test_cant_reply_thread(self):\n self.override_acl({'can_reply_threads': 0})\n\n response = self.client.post(self.api_link)\n self.assertContains(\n response, \"You can't reply to threads in this category.\", status_code=403\n )", "title": "" }, { "docid": "d573d6b48ded2301131981ae925a98ab", "score": "0.58110386", "text": "def update_gmail_replies_by_user(self):\n logging.info('update_gmail_replies_by_user')\n self.meta.change_view('json')\n userinfo_id = self.request.get('userinfo_id')\n email = self.request.get('email')\n user_info = None\n if userinfo_id:\n user_info = UserInfo.get_by_id(int(userinfo_id))\n elif email:\n user_info = UserInfo.find_by_properties(email=email)\n if not user_info:\n logging.error('NoUserInfo')\n return\n \n logging.info('update_gmail_replies_by_user %s' % user_info.email)\n client = GoogleMessageClient(user_info=user_info)\n result = client.search_messages(subject='+re:', tos=['me'], limit=1000, after=datetime.utcnow() - timedelta(days=2))\n logging.info(result)\n if not result or not result.has_key('messages'):\n logging.info('NoMessage')\n return\n thread_ids = [m['threadId'] for m in result['messages']]\n logging.info(thread_ids)\n #TODO: check signal query index\n signals = Signal.query(Signal.thread_id.IN(thread_ids), Signal.sender == user_info.email, Signal.replied == None).fetch()\n if not signals:\n logging.info('NoSignal')\n return\n tokens = []\n for s in signals:\n logging.info('signal_token %s' % s.token)\n tokens.append(s.token)\n taskqueue.add(url='/update_gmail_reply_by_mail',\n params={\n 'signal_id': s.key.id(),\n })\n self.context['data'] = dict(signal_tokens=tokens)", "title": "" }, { "docid": "b213bd0ca9795566060076b55b122787", "score": "0.57584876", "text": "def replies(self):", "title": "" }, { "docid": "80e6b5d7242e8f21ebaef6397899cd8e", "score": "0.5746617", "text": "def test_restricted_hide_reply(self):\n f = ForumFactory(restrict_posting=True)\n t = ThreadFactory(forum=f)\n u = UserFactory()\n self.client.login(username=u.username, password=\"testpass\")\n response = get(self.client, \"forums.posts\", args=[f.slug, t.pk])\n self.assertNotContains(response, \"thread-reply\")", "title": "" }, { "docid": "6420fcc9de167f886b35c28d1c788a44", "score": "0.5730291", "text": "def awaiting_moderation(user):\n return IncomingMail.objects.filter(state='moderate').filter(mailing_list__moderators__username=user.username)", "title": "" }, { "docid": "b1f1c326a6636673732adf11389a2631", "score": "0.57289517", "text": "async def accept(ctx, request_id: str = None):\n if (str(ctx.message.author.id) not in config['admins']) and (str(ctx.message.author.id) not in config['moderators']):\n await ctx.send('Sorry, but you can\\'t do that!')\n else:\n if request_id == None:\n await ctx.send(\"Sorry, but I'll need the request ID to process this command.\")\n return\n asker = await resolve_pending_asker(request_id)\n if asker == None:\n await ctx.send(\"Sorry. Doesn't look like that request ID exists.\")\n else:\n requests['accepted'][asker] = requests['pending'][asker]\n requests['pending'].pop(asker)\n save_requests(requests)\n public_channel = bot.get_channel(config['public_channel'])\n await public_channel.send(f\"**Anonymous question with reply ID ``{requests['accepted'][asker][0]}``:**\\n\\n{requests['accepted'][asker][1]}\")\n asker_user = bot.get_user(int(asker))\n await asker_user.send(f\"**Your request has been approved! Check the <#{config['public_channel']}> channel for further replies. \" + \n \"You may receive notifications for replies through this bot, and you may provide \" +\n \"additional info anonymously by messaging this bot.\\n\\nTo close this request, use `!close` in this DM.**\")\n await ctx.send(f\"Request `{request_id}` successfully accepted!\")", "title": "" }, { "docid": "c7053407cf07ed829e6c8293a97c5263", "score": "0.570316", "text": "def test_can_reply_thread(self):\n self.override_acl()\n response = self.client.post(\n self.api_link, data={\n 'post': \"This is test response!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n thread = Thread.objects.get(pk=self.thread.pk)\n\n self.override_acl()\n response = self.client.get(self.thread.get_absolute_url())\n self.assertContains(response, \"<p>This is test response!</p>\")\n\n # api increased user's posts counts\n self.reload_user()\n self.assertEqual(self.user.threads, 0)\n self.assertEqual(self.user.posts, 1)\n\n post = self.user.post_set.all()[:1][0]\n self.assertEqual(post.category_id, self.category.pk)\n self.assertEqual(post.original, \"This is test response!\")\n self.assertEqual(post.poster_id, self.user.id)\n self.assertEqual(post.poster_name, self.user.username)\n\n self.assertEqual(thread.last_post_id, post.id)\n self.assertEqual(thread.last_poster_id, self.user.id)\n self.assertEqual(thread.last_poster_name, self.user.username)\n self.assertEqual(thread.last_poster_slug, self.user.slug)\n\n category = Category.objects.get(pk=self.category.pk)\n self.assertEqual(category.last_thread_id, thread.id)\n self.assertEqual(category.last_thread_title, thread.title)\n self.assertEqual(category.last_thread_slug, thread.slug)\n\n self.assertEqual(category.last_poster_id, self.user.id)\n self.assertEqual(category.last_poster_name, self.user.username)\n self.assertEqual(category.last_poster_slug, self.user.slug)", "title": "" }, { "docid": "b5712cfced3372ef93de90604a0c05fd", "score": "0.56875193", "text": "def direct_answer(self, thread_id: int, text: str) -> DirectMessage:\n assert self.user_id, \"Login required\"\n return self.direct_send(text, [], [int(thread_id)])", "title": "" }, { "docid": "e5b22e826580a12727377a056748e378", "score": "0.56694084", "text": "def notification_message(bot, update, user_data):\n\n\n user_data['notif_message']= update.message.text\n keyboard = [['Yes'], ['No']]\n reply_markup = ReplyKeyboardMarkup(keyboard)\n messageContent = \"Targeting {} users. Requesting confirmation...\".format(len(get_user_list()))\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent, reply_markup=reply_markup)\n return NOTIF_CONFIRM", "title": "" }, { "docid": "0970ea393b1d4ebdab2d757acd0c318e", "score": "0.56631863", "text": "def send_request_notification_mail(user, aReq):\n aUser = User.objects.get(id=user)\n p = aUser.get_profile()\n title = \"RAMCO request participation\"\n #TODO : redirect confirm on URLs\n content = \"Hi \"+aUser.username+\", \"+aReq.user.username+\" has requested your participation to a request \\\"\" + aReq.title + \"\\\". Here is the link to the request: \" \\\n \"http://localhost:8000/requestView/\" + str(aReq.id)\n if aReq.key:\n content = content+\"\\nTo access this request you need this key: \"+aReq.key\n send_mail(title, content, 'ramco.confirm@gmail.com', [aUser.email], fail_silently=False)", "title": "" }, { "docid": "17678977689e79d8a8e6d75f1648e828", "score": "0.5631521", "text": "def continueThread(self, msg, msgdata, threadReference):\n subscriber = Subscriber(self.mlist)\n message = Message(self.mlist)\n senderID = subscriber.getSubscriber_id(msg, msgdata, safe=1, loose=1)\n msgdata['message_id'] = message.createMessage(msg, msgdata)\n\n pref=True\n\n # email selected people\n #Execute a SELECT statement, to find the list of matching subscribers.\n command = \"lists = [(subscriber.mailman_key.encode('utf-8')) for subscriber in result_continue_sql]\\n\"\n if DEBUG_MODE:\n syslog('info', 'DlistUtils:(continueThread) executing query:\\n%s', command)\n\n # these people get the email by default\n result = self.store.find(Subscriber,And(Subscriber.deleted == False, Subscriber.suppress == 0, Subscriber.preference == 1))\n email_keys = set(subscriber.mailman_key.encode('utf-8') for subscriber in result)\n\n # these people get the mail, because they overrode their preference for this thread\n result = self.store.find((Subscriber,Override),And(Override.subscriber_id == Subscriber.subscriber_id, Subscriber.suppress == 0, Override.thread_id == msgdata['thread_id'],Override.preference == 1))\n yes_email_keys = set(subscriber.mailman_key.encode('utf-8') for (subscriber,override) in result)\n\n # these people don't get the mail, due to override\n result = self.store.find((Subscriber,Override),And(Override.subscriber_id == Subscriber.subscriber_id, Override.thread_id == msgdata['thread_id'],Override.preference == 0))\n no_email_keys = set(subscriber.mailman_key.encode('utf-8') for (subscriber,override) in result)\n\n email_keys.update(yes_email_keys)\n email_keys.difference_update(no_email_keys)\n\n self.email_recipients(msg, msgdata, email_keys, pref)\n\n # Make original message go to nobody (but be archived)\n msgdata['recips'] = []", "title": "" }, { "docid": "532dc72b8e9214e8955c1fe4ab6994e8", "score": "0.56312233", "text": "async def staffreq(self, ctx: GuildContext, *, msg_request: str = \"\"):\n author = ctx.author\n await ctx.message.delete()\n msg = f\"❗️ **Assistance requested**: {ctx.channel.mention} by {author.mention} | {self.bot.escape_text(author)} @here\"\n if msg_request != \"\":\n embed = discord.Embed(color=discord.Color.gold())\n embed.description = msg_request\n else:\n embed = None\n await self.bot.channels['mods'].send(msg, embed=embed, allowed_mentions=discord.AllowedMentions(everyone=True)) # type: ignore\n try:\n await author.send(f\"✅ Online staff have been notified of your request in {ctx.channel.mention}.\", embed=embed) # type: ignore\n except discord.errors.Forbidden:\n pass", "title": "" }, { "docid": "680a5a84c521b5f173c4c67310ba5e23", "score": "0.5628153", "text": "def send_auto_reply(self, d):\n\t\tsignature = self.email_settings.support_signature\n\n\t\tresponse = self.email_settings.support_autoreply or (\"\"\"\nA new Ticket has been raised for your query. If you have any additional information, please\nreply back to this mail.\n\t\t\nWe will get back to you as soon as possible\n\n[This is an automatic response]\n\n\t\t\"\"\" + (signature or ''))\n\n\t\tfrom webnotes.utils.email_lib import sendmail\n\t\t\n\t\tsendmail(\\\n\t\t\trecipients = [d.raised_by], \\\n\t\t\tsender = self.email_settings.support_email, \\\n\t\t\tsubject = '['+d.name+'] ' + str(d.subject or ''), \\\n\t\t\tmsg = response)", "title": "" }, { "docid": "9163c75de2ed4332ed6cd3dea294b3bd", "score": "0.5625022", "text": "def in_reply_to_user_id(self):\n\t\treturn self.safe_get_int_column(\"in_reply_to_user_id\")", "title": "" }, { "docid": "091efecd35c7de5a070d602bbd8dfeb7", "score": "0.55918014", "text": "def reply_f(reply, comment_obj, pekofy_msg=None):\r\n replies[\"pekofy\"][\"message\"] = pekofy_msg\r\n if pekofy_msg and is_triggering(pekofy_msg, \"nothing changed\"):\r\n reply = \"nothing changed\"\r\n\r\n reply_content = replies[reply]\r\n if not random.randint(0, 100) <= (replies[reply]['chance'] if 'chance' in reply_content else 100) or \\\r\n already_replied_to(comment_obj, reply):\r\n return\r\n\r\n message = random.choice(reply_content[\"messages\"]) if \"messages\" in replies[reply] else reply_content[\"message\"]\r\n try:\r\n comment_obj.reply(message)\r\n global comments_replied\r\n comments_replied += 1\r\n except Exception as e:\r\n print(f\"Couldn't reply: {e}\")\r\n notify_author(e, comment_obj, message)\r\n print(f\"{reply}: https://www.reddit.com{comment_obj.permalink}\")\r\n print(f\"Reply: {message}\")\r\n print(\"------------------------\")", "title": "" }, { "docid": "70a35c664c3538e26dd7d755f0eea5f5", "score": "0.55775565", "text": "def reply_to_submission(submission: Submission) -> bool:\n for reply in submission.comments.list():\n if reply.author.name == args.username:\n log.info(f'Already replied to submission {submission.id}')\n return False\n\n if args.dry_run:\n log.info(f'Dry-run, skipping submission: {submission.shortlink}')\n else:\n replies_per_thread = cache.get(f'replies_per_submission:{submission.id}') or 0\n cache.set(f'replies_per_submission:{submission.id}', replies_per_thread + 1)\n\n log.info(f'Replying to submission: {submission.shortlink}')\n\n submission.reply(COMMENT.format(signature=submission.id))\n submission.upvote()\n return True\n\n return False", "title": "" }, { "docid": "5dcf4c92f3ce0b2c73c15d7d57b07cf5", "score": "0.5524766", "text": "def reply(request, thread):\n if not request.user.is_authenticated():\n return HttpResponseServerError()\n t = get_object_or_404(Thread, pk=thread)\n if t.closed:\n return HttpResponseServerError()\n if not Forum.objects.has_access(t.forum, request.user.groups.all()):\n return HttpResponseForbidden()\n\n if request.method == \"POST\":\n form = ReplyForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n body = form.cleaned_data['body']\n p = Post(\n thread=t,\n author=request.user,\n body=body,\n time=datetime.now(),\n )\n p.save()\n\n sub = Subscription.objects.filter(thread=t, author=request.user)\n if form.cleaned_data.get('subscribe',False):\n if not sub:\n s = Subscription(\n author=request.user,\n thread=t\n )\n s.save()\n else:\n if sub:\n sub.delete()\n\n # Subscriptions are updated now send mail to all the authors subscribed in\n # this thread.\n mail_subject = ''\n try:\n mail_subject = settings.FORUM_MAIL_PREFIX\n except AttributeError:\n mail_subject = '[Forum]'\n\n mail_from = ''\n try:\n mail_from = settings.FORUM_MAIL_FROM\n except AttributeError:\n mail_from = settings.DEFAULT_FROM_EMAIL\n\n mail_tpl = loader.get_template('forum/notify.txt')\n c = Context({\n 'body': wordwrap(striptags(body), 72),\n 'site' : Site.objects.get_current(),\n 'thread': t,\n })\n\n email = EmailMessage(\n subject=mail_subject+' '+striptags(t.title),\n body= mail_tpl.render(c),\n from_email=mail_from,\n to=[mail_from],\n bcc=[s.author.email for s in t.subscription_set.all()],)\n email.send(fail_silently=True)\n\n## data={'title': t.title,\n## 'summary': t.title,\n## 'attached_timestamp':datetime.now()\n## }\n## attachment_form=AttachmentForm(data=data,files=form.files)\n## a=attachment_form.errors\n## content_type =ContentType.objects.get_for_model(Post)\n## object_id = p.id\n## ffs=form.files\n## debug()\n## if attachment_form.is_valid():\n## attachment = attachment_form.save(commit=False)\n## attachment.content_type = content_type\n## attachment.object_id = object_id\n## attachment.attached_by = request.user\n## attachment.save()\n\n# for attachedfilefield in form.files:\n# #file_path = '%s%s' % (settings.MEDIA_ROOT, form.files[attachedfilefield])\n# attachment_file = form.files[attachedfilefield]\n# file_path =os.path.join(ATTACHMENT_DIR, randomfilename(attachment_file.name))\n# (mimetype, encoding) = mimetypes.guess_type(file_path)\n#\n# try:\n# mime_type = mimetype\n# except:\n# mime_type = 'text/plain'\n#\n# attach=Attachment(\n# content_type =ContentType.objects.get_for_model(Post),\n# object_id = p.id,\n# title = attachment_file.name,\n# summary = t.title,\n# attached_by = request.user,\n# )\n# attach.save_uploaded_file(attachment_file)\n# attach.save()\n\n\n for attachedfilefield in form.files:\n #file_path = '%s%s' % (settings.MEDIA_ROOT, form.files[attachedfilefield])\n attachment_file = form.files[attachedfilefield]\n attach=Attachment()\n attach.handle_uploaded_attachment(p,\n attachment_file,\n request.user,\n attachment_file.name,\n t.title\n )\n\n return HttpResponseRedirect(p.get_absolute_url())\n else:\n form = ReplyForm()\n\n return render_to_response('forum/reply.html',\n RequestContext(request, {\n 'form': form,\n 'forum': t.forum,\n 'thread': t,\n }))", "title": "" }, { "docid": "3ee1076ee0ec5c01e3bc40eb2760d803", "score": "0.5523966", "text": "def new_reply(cls, thread, user, content, arquivo):\n msg = cls.objects.create(thread=thread, sender=user, content=content, file_upload=arquivo)\n thread.userthread_set.exclude(user=user).update(unread=True)\n thread.userthread_set.filter(user=user).update(unread=False)\n message_sent.send(sender=cls, message=msg, thread=thread, reply=True)\n return msg", "title": "" }, { "docid": "2dd5835dbc2cc79150e3b9a50cfedc1b", "score": "0.55041254", "text": "def request_reply(self, school_id, form_data):\n if not super().validate_data(form_data):\n return False\n\n # check if content exists that is being replied to\n content_id = form_data[\"content_id\"]\n if content_id is None or not super().content_exists(content_id):\n return False\n\n # get new request_id\n request_id = self.name_provider.generate_request_id(super().user_email)\n if request_id is None:\n return False\n\n # store data\n return super().request_content(request_id, {\n \"sid\": school_id,\n \"content_id\": content_id,\n \"text\": form_data[\"text\"],\n \"uid\": super().display_name,\n \"email\": super().user_email\n })", "title": "" }, { "docid": "123de930206595e63b1dcbfa09245a79", "score": "0.5498849", "text": "def new_reply(self, permalink, message, origin_date, author): \n\t\t\"\"\"\n\t\tprint(self._replyMessage.format(\n\t\t\t\tmessage,\n\t\t\t\tpermalink\n\t\t\t)\n\t\t\"\"\"\n\t\tprint(\"---------------\")\n\t\tprint(author)\n\t\tprint(permalink)\n\n\t\torigin_date_text = \"\"\n\t\t# Before feature was implemented, there are no origin dates stored\n\t\tif origin_date is not None:\n\t\t\torigin_date_text = (\"\\n\\nYou requested this reminder on: \" \n\t\t\t\t\t\t\t\t\"[**\" + str(origin_date) + \" UTC**](http://www.wolframalpha.com/input/?i=\"\n\t\t\t\t\t\t\t\t+ str(origin_date) + \" UTC To Local Time)\")\n\n\t\ttry:\n\t\t\treddit.redditor(str(author)).message(\n\t\t\t\tsubject='Hello, ' + str(author) + ' RemindMeBot Here!',\n\t\t\t\tmessage=self._replyMessage.format(\n\t\t\t\t\tmessage=str(message),\n\t\t\t\t\toriginal=str(permalink),\n\t\t\t\t\tparent= self.parent_comment(permalink),\n\t\t\t\t\torigin_date_text = origin_date_text\n\t\t\t\t))\n\t\t\tprint(\"Did It\")\n\t\t\treturn True\n\t\texcept APIException as err:\n\t\t\tprint(\"APIException\", err)\n\t\t\tif \"USER_DOESNT_EXIST\" in str(err):\n\t\t\t\tprint(\"User doesn't exist, skipping\")\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\texcept IndexError as err:\n\t\t\tprint(\"IndexError\", err)\n\t\t\treturn False\n\t\texcept (HTTPError, ConnectionError, Timeout, timeout, ServerError) as err:\n\t\t\tprint(\"HTTPError\", err)\n\t\t\ttime.sleep(10)\n\t\t\treturn False", "title": "" }, { "docid": "8ea1e6ecc6382f797f61c5cfcca20e96", "score": "0.5494967", "text": "def _get_reply(self, view, user_projector):\n if view.get('deleted', False): # if it has the field deleted in it, it means it was deleted\n return None\n reply = dict()\n reply['text'] = self._clean_text(view['message'])\n reply['user'] = user_projector.get(view['user_id'], -1) # -1 here means the user is no longer in the class???\n reply['posted_at'] = view['created_at']\n reply['replies'] = self._get_replies(view.get('replies', []), user_projector)\n return reply", "title": "" }, { "docid": "30f0b2363b949a879a63ed589bdb7be9", "score": "0.5474875", "text": "def pass_thread_control(psid, metadata, target_app_id=None):\n if target_app_id is None:\n target_app_id = os.getenv(\"ASHA_ID\")\n\n param = {\"access_token\": os.getenv(\"PAGE_TOKEN\")}\n\n header = {\"Content-Type\": \"application/json\"}\n\n post_data = json.dumps({\n \"recipient\":{\"id\":psid},\n \"target_app_id\":int(target_app_id),\n \"metadata\":metadata\n })\n\n return requests.post(BASE_URL, params=param, headers=header, data=post_data).status_code", "title": "" }, { "docid": "ddff56f72d7314c684a0aea6a7428ca9", "score": "0.5472028", "text": "def send_approval_request():\n\n msg = Message(\n \"Altmetrics: New user registered\",\n sender=current_app.config.get('MAIL_DEFAULT_SENDER'),\n recipients=[current_app.config.get('TECH_EMAIL')]\n )\n context = {'user_name': current_app.config.get('TECH_NAME')}\n configure_mail_body(msg, 'new_user_registered', context)\n\n mail.send(msg)", "title": "" }, { "docid": "ff718259dda8e79e602c17ab01ec9ee9", "score": "0.5471805", "text": "def test_cant_reply_thread_as_guest(self):\n self.logout_user()\n\n response = self.client.post(self.api_link)\n self.assertEqual(response.status_code, 403)", "title": "" }, { "docid": "b62d4f6415c41140c8ded2c33ae6a3b9", "score": "0.5452193", "text": "def newThread(self, msg, msgdata, threadBase=None):\n id, name = self.createThread(msg, msgdata, threadBase)\n msgdata['thread_id'] = id\n msgdata['thread_name'] = name\n\n # Delete any other 'To' headings\n del msg['To']\n msg['To'] = '%s+%s@%s' % (self.mlist.internal_name(),\n \t name,\n \t self.mlist.host_name) \n for i in (1, 2):\n # different footers for different prefs, so we need to queue separately\n if(i==1):\n #For condition where preference = True\n pref=True\n if DEBUG_MODE:\n syslog('info', 'DlistUtils:(newThread)executing query:\\nfor pref = true\\n\\n')\n if(i==2):\n #For condition where preference = False\n pref=False\n if DEBUG_MODE:\n syslog('info', 'DlistUtils:(newThread)executing query:\\nfor pref = false\\n\\n')\n\n #Execute a SELECT statement, to find the list of matching subscribers.\n result_new_sql = self.store.find(Subscriber,And(Subscriber.preference == pref,Subscriber.deleted == False,Subscriber.suppress == 0))\n lists = [(subscriber.mailman_key.encode('utf-8')) for subscriber in result_new_sql]\n if DEBUG_MODE:\n syslog('info','value of lists: %s\\n', lists)\n self.email_recipients(msg, msgdata, lists, pref)\n\n # Make original message go to nobody (but be archived)\n msgdata['recips'] = []", "title": "" }, { "docid": "b293678461e958166f84e8a50844135f", "score": "0.53831846", "text": "def reply_to_comment(comment: Comment) -> bool:\n # Don't reply to comments we've already replied to\n for reply in comment.replies.list():\n if reply.author.name == args.username:\n log.info(f'Already replied to comment {comment.id}')\n return False\n\n # Mark the comment as replied to in cache\n replies_per_thread = cache.get(f'replies_per_submission:{comment.submission.id}') or 0\n if replies_per_thread >= MAX_COMMENT_REPLIES_PER_SUBMISSION:\n log.info(f'Already replied to thread {comment.submission.id}; skipping')\n return False\n\n if not args.dry_run:\n cache.set(f'replies_per_submission:{comment.submission.id}', replies_per_thread + 1)\n\n log.info(f'Commenting on comment: {comment.permalink}')\n\n comment.reply(COMMENT.format(signature=comment.submission.id))\n comment.upvote()\n\n return True\n\n else:\n log.info(f'Dry-run, skipping comment: {comment.permalink}')\n\n return False", "title": "" }, { "docid": "586c13516539245188135693c24f9932", "score": "0.537787", "text": "def confirm_allowed(self, user):", "title": "" }, { "docid": "3561586acffa5239fa1bfa56442f8918", "score": "0.53558064", "text": "def reply(request, document_slug, thread_id):\n doc = get_document(document_slug, request)\n\n form = ReplyForm(request.POST)\n post_preview = None\n if form.is_valid():\n thread = get_object_or_404(Thread, pk=thread_id, document=doc)\n\n if not thread.is_locked:\n reply_ = form.save(commit=False)\n reply_.thread = thread\n reply_.creator = request.user\n if \"preview\" in request.POST:\n post_preview = reply_\n elif not _is_ratelimited(request):\n reply_.save()\n\n # Subscribe the user to the thread.\n if Setting.get_for_user(request.user, \"kbforums_watch_after_reply\"):\n NewPostEvent.notify(request.user, thread)\n\n # Send notifications to thread/forum watchers.\n NewPostEvent(reply_).fire(exclude=[reply_.creator])\n\n return HttpResponseRedirect(reply_.get_absolute_url())\n\n return posts(request, document_slug, thread_id, form, post_preview)", "title": "" }, { "docid": "90d4a632904e1a72d0b45bf5edb2f479", "score": "0.5352638", "text": "async def Accept(ctx, userid):\n guild = bot.get_guild(int(SERVERID))\n submit_channel = discord.utils.get(guild.text_channels, name = \"officer-applications\")\n welcome_channel = discord.utils.get(guild.text_channels, name = \"guild-general\")\n\n # We get a member reference here for the applicant instead of a user reference. This allows us to do more guild-specific stuff like roles\n member = await guild.fetch_member(userid)\n \n # DEBUG\n print(f'A command from {ctx.author} has been received to accept the application with ID {userid}')\n\n # Check if the member was found, and exit the function if not\n if member is not None:\n print(f'User found! {member.name}')\n else:\n await submit_channel.send(f'User with ID {userid} not found! Maybe they left the server? Or')\n return\n\n # Check if the approver has the manage permissions role, and exit the funtion if not\n if ctx.author.guild_permissions.manage_roles:\n dm_channel = await member.create_dm()\n else:\n await submit_channel.send(f'Command invoker does not have required permissions to approve the applicant. GTFO NOOB')\n return\n\n # Finds the role to promote to user to (member in our case), and promotes the user\n # TODO - Permissions check loop for the bot to make sure the bot can manage roles\n role = discord.utils.get(guild.roles, name='Member')\n await member.add_roles(role)\n\n # Checks passed. Send a nice embedded message to the applicant with the introduction and To-Do list\n await member.dm_channel.send(\"https://memegenerator.net/img/instances/78053019.jpg\")\n\n embed = discord.Embed(title=f\"Your application has been successful, welcome to Kaikoura! Please read the below instructions for your next steps:\", color=0xFF5733, timestamp=datetime.now())\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.set_footer(text=f\"Application approved by {ctx.author.name} at {datetime.now()}\")\n embed.add_field(name=\"Get invited to the guild in-game\", value=\"Please reach out to an officer who may be online, or use the in-game guild recruitment tool and search for **Kaikoura**.\", inline=False)\n embed.add_field(name=\"Check & Change your Discord Nickname to your in-game characters name\", value=\"This is server specific, and for us to know who you are easily! You can do this by right-clicking your name on the server list on the right hand side and selecting 'nickname'. It won't alter your discords main nickname or names on any of your other servers.\", inline=False)\n embed.add_field(name=\"In-Game Calendar for signups\", value=\"We use the in-game calendar to manage signups for events. PLEASE check it now and make it a habbit to update this with your availability. If you want to come and know you can make it, select 'Signup'. If you want to come but don't know if you can make it or might be late, sign up as 'Tentative'. If you can't make it / don't want to come, DON'T SIGN UP! It helps the officer team out a lot to know who's coming to what.\", inline=False)\n embed.add_field(name=\"Check out the guild bible\", value=\"there is a specific text channel in the members section called 'guild-bible'. This contains a link to a google slides doc that is the central place for all of our guild info. Please take a look at it over the next week or so and let me know if you have any questions.\", inline=False)\n embed.add_field(name=\"Mute the 'guild bot commands' channel\", value=\"We have a channel specifically for giving commands to the guild bots (including me!). We suggest you mute this channel to avoid unecessary spam. If you decide to mute the whole server, please leave @mentions unmuted as we use this to notify members for events starting etc.\", inline=False)\n \n await member.dm_channel.send(embed=embed)\n \n # Generic welcome message\n await welcome_channel.send(f'Good news everybody, <@{userid}> has made it through the gruelling application process and is here to get carried. Give him a Whaaalecum and good yo-momma joke')\n \n # Confirmation Message\n await submit_channel.send(f'If the code made it this far, the {member.name} has been accepted, given a role and the welcome/introduction message by {ctx.author.name}! Hurrah!')\n\n #Debug\n print(f'If the code made it this far, the {member.name} has been accepted, given a role and the welcome message by {ctx.author.name}! Hurrah!')", "title": "" }, { "docid": "574e53a9861efcf1fdf7ad0a1f534846", "score": "0.534606", "text": "def test_category_moderation_queue_bypass(self):\n override_acl(self.user, {'can_approve_content': 1})\n\n self.category.require_replies_approval = True\n self.category.save()\n\n response = self.client.post(\n self.api_link, data={\n 'post': \"Lorem ipsum dolor met!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n thread = Thread.objects.get(pk=self.thread.pk)\n self.assertFalse(thread.is_unapproved)\n self.assertFalse(thread.has_unapproved_posts)\n self.assertEqual(thread.replies, self.thread.replies + 1)\n\n post = self.user.post_set.all()[:1][0]\n self.assertFalse(post.is_unapproved)\n\n category = Category.objects.get(slug='first-category')\n self.assertEqual(category.threads, self.category.threads)\n self.assertEqual(category.posts, self.category.posts + 1)", "title": "" }, { "docid": "a66eead6e2390591db46da60d3bd2070", "score": "0.53387165", "text": "def test_show_reply_fields(self):\n t = ThreadFactory()\n u = UserFactory()\n\n self.client.login(username=u.username, password=\"testpass\")\n response = get(self.client, \"forums.posts\", args=[t.forum.slug, t.pk])\n self.assertContains(response, \"thread-reply\")", "title": "" }, { "docid": "1077583dffd5b7d5877053dcb42bf219", "score": "0.5329512", "text": "def RequestUserAttention(self, flags=USER_ATTENTION_INFO):", "title": "" }, { "docid": "4dca94b9189c3cf9093ba9a3399b63c6", "score": "0.5328529", "text": "def approve_to_contract(self,\n user,\n is_approved=False,\n is_denied=False,\n denial_reason=None,\n sender_pk=None):\n if user.role == User.FOO:\n # sender_pk is None here. FOO can only get feedback notification.\n # Notify county manager for my county\n field_officer = get_object_or_404(FieldOfficer, user=user)\n my_county = field_officer.area.county\n my_county_manager = CountyManager.objects.get(\n area__county=my_county)\n # CM that will receive this request.\n request_recipient = get_object_or_404(\n User, county_manager=my_county_manager)\n\n self.is_approved_by_field_officer_contract = True\n self.approve_activate = True # Indicates activation approval request has been sent\n is_approved = \"false\"\n if self.is_active:\n is_approved = \"true\"\n notify.send(\n # sender of notification\n field_officer,\n # recipient of notification\n recipient=request_recipient,\n # Statement describing this action\n verb=\n f\"Please approve {self.first_name} {self.last_name}. Request from {field_officer.user.first_name} {field_officer.user.last_name}\",\n # Notification metadata\n agent=self.id,\n agent_name=f\"{self.first_name} {self.last_name}\",\n field_officer=field_officer.id,\n is_approved=is_approved,\n )\n\n logger.critical( f\"Approval request for the Agent {self.first_name} {self.last_name} successfully sent to the County Manager {request_recipient.first_name} {request_recipient.last_name} by {field_officer.user.first_name} {field_officer.user.last_name}\")\n\n if user.role == User.CM:\n # sender_pk needed so that CM can send feedback/reply notification.\n try:\n # Ensure agent is already approved by his/her field officer\n assert self.is_approved_by_field_officer_contract, \"Approval needed by Field Outreach Officer first!\"\n # Notify field officer who sent approval request\n # Current user handling FOOs request - a CM\n county_manager = get_object_or_404(CountyManager, user=user)\n # Field officer who sent agent approval request to this CM\n field_officer = get_object_or_404(FieldOfficer, pk=sender_pk)\n feedback_recipient = get_object_or_404(\n User, field_officer=field_officer)\n\n verb = \"\"\n if is_approved:\n self.is_approved_by_county_manager_contract = True\n # reset these fields to nothing\n self.delete_reason = None\n self.contract_denied_reason = None\n self.activate()\n verb = f\"{self.first_name} {self.last_name} has been approved\"\n\n logger.critical( f\"Approval request for the Agent {self.first_name} {self.last_name} approved by the County Manager {county_manager.user.first_name} {county_manager.user.last_name}. Request sent by the Field Officer: {field_officer.user.first_name} {field_officer.user.last_name}\")\n if is_denied:\n self.contract_denied_reason = denial_reason\n verb = f\"{self.first_name} {self.last_name} has been denied approval\"\n logger.critical( f\"Approval request for the Agent {self.first_name} {self.last_name} denied by the County Manager {county_manager.user.first_name} {county_manager.user.last_name}. Denial Reason: {denial_reason}. Request sent by the Field Officer: {field_officer.user.first_name} {field_officer.user.last_name}\")\n\n self.approve_activate = False # To show request has been handled. Activated by fresh approval approval\n # format to json parseable values\n approved = \"false\"\n denied = \"false\"\n if is_approved:\n approved = \"true\"\n if is_denied:\n denied = \"true\"\n\n notify.send(\n county_manager,\n recipient=feedback_recipient,\n verb=verb,\n agent=self.id,\n denial_reason=denial_reason,\n # format to valid json values\n is_approved=approved,\n is_denied=denied,\n )\n\n except (AssertionError, Exception) as e:\n raise", "title": "" }, { "docid": "88bc2d1551df0fd0f675cdc5e20c64fe", "score": "0.5322964", "text": "def flag_abuse_for_thread(request, course_id, thread_id):\n course_key = CourseKey.from_string(course_id)\n user = cc.User.from_django_user(request.user)\n thread = cc.Thread.find(thread_id)\n thread.flagAbuse(user, thread)\n\n return JsonResponse(prepare_content(thread.to_dict(), course_key))", "title": "" }, { "docid": "60cfd358259ea2d4e9518c33a978071c", "score": "0.5317412", "text": "def reply(self, reply=None, failure=None, log_failure=True):", "title": "" }, { "docid": "2803316654a623f8a3b7eba5f8fb3458", "score": "0.53157556", "text": "def inform_user(self, user: str, channel: str, thread_ts: str):\n message = new_message(channel, user, thread_ts=thread_ts, blocks=[consts.AUTOREPLY_BLOCK])\n return self.slack_client.chat_postMessage(**message)", "title": "" }, { "docid": "85b208bce3a23c919ad3f493e32f978d", "score": "0.53052574", "text": "async def deny(self, ctx: commands.Context, target: discord.Member):\n bot = self.bot\n guild = ctx.guild\n author = ctx.author\n applicant = get(guild.roles, name=\"Staff Applicant\")\n if applicant in target.roles:\n await ctx.send(\"Would you like to specify a reason? (yes/no)\")\n pred = MessagePredicate.yes_or_no(ctx)\n try:\n await bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result is True:\n await ctx.send(\"Please, specify your reason now.\")\n\n def check(m):\n return m.author == author\n\n try:\n reason = await bot.wait_for(\"message\", timeout=120, check=check)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n await target.send(\n \"Your application in {0} has been denied.\\n*Reason:* {1}\".format(\n guild.name, reason.content\n )\n )\n else:\n await target.send(\n \"Your application in {0} has been denied.\".format(guild.name)\n )\n await target.remove_roles(applicant)\n await ctx.send(\"Denied {0}'s application.\".format(target.mention))\n else:\n await ctx.send(\n \"Uh oh. Looks like {0} hasn't applied for anything.\".format(\n target.mention\n )\n )", "title": "" }, { "docid": "98ca83c31080dbf27478de53354d0331", "score": "0.5293936", "text": "def send_approval_mail(ev, adm, path):\n prez = ev.association.associationuser_set.filter(role=2)\n if not prez:\n return False\n prez = prez[0].user.email\n\n context = {'title': 'Requete d\\'approbation: ' + ev.title,\n 'link': path,\n 'event': ev\n }\n\n obj = '[APPROBATION][' + ev.association.name + '] Requete d\\'approbation: ' + ev.title\n text_bd = render_to_string(\"emails/email-approval-template.txt\", context)\n html_bd = render_to_string(\"emails/email-approval-template.html\", context)\n\n targets = []\n if prez == ev.manager.email:\n targets = [adm.email]\n elif adm.email == ev.manager.email:\n targets = [prez]\n else:\n targets = [adm.email, prez, ev.manager.email]\n\n return send_mail(obj, text_bd, html_bd, targets)", "title": "" }, { "docid": "e1279376c6c759e98a6cebbac185d66d", "score": "0.5291902", "text": "def checkReply(self) -> None:\n\t\tif self.reply_parent_msg_id:\n\t\t\tself.is_reply = True", "title": "" }, { "docid": "84617e795a068a2d17f2830f7d1058d1", "score": "0.52897495", "text": "async def handle_survey(self, ctx, msg, guild): #updated\n\n react = await self.ask_yn(msg,\n \"Are you sure you want to submit this feedback anonymously?\\n\"\n \"You must add a reaction for feedback to be submitted!\",\n timeout=120)\n\n #===== if user says yes\n if react:\n header = \"```css\\nAnonymous User Feedback\\n```\"\n\n #===== Time out handing\n elif react == None:\n await msg.channel.send(\"You took too long respond. Cancelling action, feedback **not** sent.\")\n return\n\n #===== if user says no\n else:\n header = \"```css\\nUser Feedback\\n```User: \" + msg.author.mention + \"\\nMessage:\"\n\n msg_content = msg.content.strip()[(len(ctx.prefix) + len(ctx.invoked_with)):]\n msg_attach = []\n feedback_channel = discord.utils.get(guild.channels, id=self.config.channels['feedback_id'])\n\n\n async with feedback_channel.typing():\n #=== if msg has an attachment\n if msg.attachments:\n for attach in msg.attachments:\n fb = await attach.read()\n filename = attach.filename\n\n msg_attach.append(discord.File(BytesIO(fb), filename=filename, spoiler=False))\n\n #=== if feedback cannot be sent as one message\n if len(msg_content) > ((2000 - len(header))):\n m = await feedback_channel.send(header)\n await feedback_channel.send(msg_content)\n\n if msg_attach is not None:\n await feedback_channel.send(files=msg_attach)\n\n else:\n m = await feedback_channel.send(f\"{header} {msg_content}\", files=msg_attach)\n\n #===== Log info to database\n await self.db.execute(pgCmds.ADD_DM_FEEDBACK, msg.author.id, msg.channel.id, m.id, m.channel.id, m.guild.id, m.created_at)\n\n #===== Tell the user their feedback is sent\n await msg.channel.send(f\"Your feedback has been submitted.\\nThank you for helping make {guild.name} a better place.\")\n\n return", "title": "" }, { "docid": "7e8b527d63c7484ad57071477b101fd7", "score": "0.52748656", "text": "def vote():\n sqlsession = Session()\n threads = (sqlsession.query(models.Thread)\n .options(\n joinedload(models.Thread.task_assignations)\n .joinedload(models.TaskAssignation.task),\n joinedload(models.Thread.messages))\n .order_by(models.Thread.last_msg.desc())).all()\n return render_template('vote.html', threads=threads)", "title": "" }, { "docid": "853647f510f9ea9e7144483d18168c7e", "score": "0.52743614", "text": "def reply_webapi(self, text, attachments=None, blocks=None, as_user=True, in_thread=None):\n if in_thread is None:\n in_thread = 'thread_ts' in self.body\n\n if in_thread:\n self.send_webapi(text, attachments=attachments, blocks=blocks, as_user=as_user, thread_ts=self.thread_ts)\n else:\n text = self.gen_reply(text)\n self.send_webapi(text, attachments=attachments, blocks=blocks, as_user=as_user)", "title": "" }, { "docid": "dfe208fa83d90bf8403b9b4b2d531bf6", "score": "0.52717394", "text": "def test_post_user_reply(self):\n with self.client() as c:\n with c.session_transaction() as sess:\n sess[ 'user_id' ] = 1\n sess[ 'token' ] = self.token\n c.set_cookie('localhost', 'MYCOOKIE', 'cookie_value')\n res = c.post('/reply', json=self.reply)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data[ 'success' ], True)\n self.assertEqual(data[ 'message' ], 'reply created successfully')", "title": "" }, { "docid": "cf8d0ea3c44907f1b03c0a884bd18a17", "score": "0.52692974", "text": "def new_reply(self, permalink, message, author): \n \"\"\"\n print self._replyMessage.format(\n message,\n permalink\n )\n \"\"\"\n print \"---------------\"\n print author\n print permalink \n try:\n reddit.send_message(\n recipient=str(author), \n subject='Hello, ' + str(author) + ' RemindMeBot Here!', \n message=self._replyMessage.format(\n message=message,\n original=permalink,\n parent= self.parent_comment(permalink)\n ))\n print \"Did It\"\n return True \n except InvalidUser as err:\n print \"InvalidUser\", err\n return True\n except APIException as err:\n print \"APIException\", err\n return False\n except IndexError as err:\n print \"IndexError\", err\n return False\n except (HTTPError, ConnectionError, Timeout, timeout) as err:\n print \"HTTPError\", err\n time.sleep(10)\n return False\n except RateLimitExceeded as err:\n print \"RateLimitExceeded\", err\n time.sleep(10)\n return False\n except praw.errors.HTTPException as err:\n print\"praw.errors.HTTPException\"\n time.sleep(10)\n return False", "title": "" }, { "docid": "9455429787ec0911e802a6cc01b24b05", "score": "0.5251416", "text": "def get_request(thread, user):\n if thread is None:\n raise ValueError('Invalid parameter: thread')\n\n if user is None:\n raise ValueError('Invalid parameter: user')\n\n threaduser = thread.get_threaduser(user.id)\n\n return 'chat/request/' + threaduser.id if threaduser is not None else None", "title": "" }, { "docid": "ebe5a1ed79d162cc498792d00d41f7d4", "score": "0.5222939", "text": "def reply(self):\n data = self.get_selected_item()\n\n if data['type'] == 'Submission':\n body = data['text']\n description = 'submission'\n reply = data['object'].add_comment\n elif data['type'] in ('Comment', 'InboxComment'):\n body = data['body']\n description = 'comment'\n reply = data['object'].reply\n elif data['type'] == 'Message':\n body = data['body']\n description = 'private message'\n reply = data['object'].reply\n else:\n self.term.flash()\n return\n\n # Construct the text that will be displayed in the editor file.\n # The post body will be commented out and added for reference\n lines = [' |' + line for line in body.split('\\n')]\n content = '\\n'.join(lines)\n comment_info = docs.REPLY_FILE.format(\n author=data['author'],\n type=description,\n content=content)\n\n with self.term.open_editor(comment_info) as comment:\n if not comment:\n self.term.show_notification('Canceled')\n return\n\n with self.term.loader('Posting {}'.format(description), delay=0):\n reply(comment)\n # Give reddit time to process the submission\n time.sleep(2.0)\n\n if self.term.loader.exception is None:\n self.reload_page()\n else:\n raise TemporaryFileError()", "title": "" }, { "docid": "f54f3577fbaf35ae628ed4a2e215b9fa", "score": "0.5213145", "text": "def reply():\n if request.method == \"GET\":\n qid = request.args.get(\"qid\")\n return render_template(\"teacherform.html\", qid=qid)\n else:\n # Insert answer\n rows = db.execute(\"INSERT INTO Question (AnswerText) VALUES(:answertext) WHERE QuestionId = :qid\",\n answertext=request.form.get(\"AnswerText\"), qid=request.form.get(\"QuestionID\"))", "title": "" }, { "docid": "e7db85153e7de77d5c52a4584dfed9f5", "score": "0.5209699", "text": "def test_thread_visibility(self):\n self.override_acl({'can_see': 0})\n response = self.client.post(self.api_link)\n self.assertEqual(response.status_code, 404)\n\n self.override_acl({'can_browse': 0})\n response = self.client.post(self.api_link)\n self.assertEqual(response.status_code, 404)\n\n self.override_acl({'can_see_all_threads': 0})\n response = self.client.post(self.api_link)\n self.assertEqual(response.status_code, 404)", "title": "" }, { "docid": "60007da1aec77746f8af34ac66e19f4b", "score": "0.5203198", "text": "async def reply(self, msg, t=1):\n return await self.channel.sendChat(\n msg,\n json.dumps(\n {\n \"attach_only\": False,\n \"attach_type\": t,\n \"mentions\": [],\n \"src_linkId\": self.channel.li,\n \"src_logId\": self.logId,\n \"src_mentions\": [],\n \"src_message\": self.message,\n \"src_type\": self.type,\n \"src_userId\": self.author,\n }\n ),\n 26,\n )", "title": "" }, { "docid": "5ce61e934565561312a3a4923cbe7db9", "score": "0.5198554", "text": "def reply_function(comment_id_input, api_input, reddit_object_input):\n reply_handling(comment_id_input, api_input, reddit_object_input)\n return 'No command found'", "title": "" }, { "docid": "65b0005105d899f5f0004131b3ddb28d", "score": "0.5197287", "text": "def test_omit_other_moderation_queues(self):\n self.category.require_threads_approval = True\n self.category.require_edits_approval = True\n self.category.save()\n\n self.override_acl({\n 'require_threads_approval': 1,\n 'require_edits_approval': 1,\n })\n\n response = self.client.post(\n self.api_link, data={\n 'post': \"Lorem ipsum dolor met!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n thread = Thread.objects.get(pk=self.thread.pk)\n self.assertFalse(thread.is_unapproved)\n self.assertFalse(thread.has_unapproved_posts)\n self.assertEqual(thread.replies, self.thread.replies + 1)\n\n post = self.user.post_set.all()[:1][0]\n self.assertFalse(post.is_unapproved)\n\n category = Category.objects.get(slug='first-category')\n self.assertEqual(category.threads, self.category.threads)\n self.assertEqual(category.posts, self.category.posts + 1)", "title": "" }, { "docid": "8310b6a90aa0f05f63d0f84a06a36df9", "score": "0.51951075", "text": "def create_reply(self, request):\n # get new reply_id\n school_id = request[\"sid\"]\n user_email = request[\"email\"]\n reply_id = self.name_provider.generate_reply_id(request[\"email\"], request[\"content_id\"])\n\n return super().make_content(reply_id, {\n \"sid\": school_id,\n \"content_id\": request[\"content_id\"],\n \"text\": request[\"text\"],\n \"uid\": request[\"uid\"],\n \"email\": user_email,\n \"time\": request[\"time\"]\n })", "title": "" }, { "docid": "51ab773fec6d7e41db44b47ed9a893dc", "score": "0.5194453", "text": "def test_review_reply_email(self):\n review_request = self.create_review_request(\n summary='My test review request')\n review_request.publish(review_request.submitter)\n\n base_review = self.create_review(review_request=review_request)\n base_review.publish()\n\n # Clear the outbox.\n mail.outbox = []\n\n reply = self.create_reply(base_review)\n reply.publish()\n\n from_email = build_email_address_for_user(reply.user)\n\n self.assertEqual(len(mail.outbox), 1)\n self.assertEqual(mail.outbox[0].from_email, self.sender)\n self.assertEqual(mail.outbox[0].extra_headers['From'], from_email)\n self.assertEqual(mail.outbox[0].subject,\n 'Re: Review Request %s: My test review request'\n % review_request.pk)\n self.assertValidRecipients([\n review_request.submitter.username,\n base_review.user.username,\n reply.user.username,\n ])\n\n message = mail.outbox[0].message()\n self.assertEqual(message['Sender'], self._get_sender(reply.user))", "title": "" }, { "docid": "b7b3160e956903119209fd3ee3f2823d", "score": "0.51919675", "text": "def reply_handling(comment_id, api_key, reddit_object):\n comment_id.mark_read()\n # If you don't mark read before, it'll reply twice.\n comment_id_string = fullname_creator(comment_id)\n unread_comment = reddit_object.comment(id=comment_id_string)\n result = command_processor(comment_id.body, api_key)\n unread_comment.reply(result)\n log_to_cloud_watch_output(comment_id, result)\n comment_id.save()\n return None", "title": "" }, { "docid": "fefb821e9cd299507d88211bd877d99c", "score": "0.5187359", "text": "def get_user_approval():\n response = input(\"\\nIs this info correct? (y/N): \").lower()\n if response not in [\"y\", \"yes\"]:\n logging.info(\"Program cancelled by the user.\")\n print(\"\\nCancelling...\")\n exit()", "title": "" }, { "docid": "08cf7adf59b48fb33a4ed06c65ae9137", "score": "0.5186599", "text": "def test_gmail_users_threads_modify(self):\n pass", "title": "" }, { "docid": "f9c17276e4896135a87a6f0491dccbfb", "score": "0.5183106", "text": "def can_approve(user, approval):\n\n return approval.can_be_approved_by(user)", "title": "" }, { "docid": "ae533e744b88f97cbda05c481cffce36", "score": "0.5175433", "text": "def vote_for_thread(request, course_id, thread_id, value):\n thread = cc.Thread.find(thread_id)\n result = _vote_or_unvote(request, course_id, thread, value)\n return result", "title": "" }, { "docid": "622bdb6b4bca7f5ccf948a4b000da985", "score": "0.51740474", "text": "def accept_submission(request):\n if request.method == 'GET':\n if request.session.keys():\n handle = request.session.get(\"handle\")\n user_type = request.session.get(\"user_type\")\n if viewhelper.is_moderator(user_type):\n task_id = request.GET.get(\"task_id\")\n user_handle = request.GET.get(\"handle\")\n if task_id is not None and user_handle is not None:\n task = models.get_task(task_id)\n user_task_map = models.get_user_task_map(task_id=task_id, handle=user_handle)\n if task is not None and task.owner == handle and user_task_map is not None:\n models.accept_submission(task_id=task_id, handle=user_handle)\n return redirect('/janta/task-status/?task_id='+task_id)\n return redirect(\"/janta/index/\")\n return redirect(\"/janta/login/\")", "title": "" }, { "docid": "0a7f2319ff1ebd6b0594272cf3505eee", "score": "0.51734227", "text": "def thread(thread_id):\n sqlsession = Session()\n thread = (sqlsession.query(models.Thread)\n .options(\n joinedload(models.Thread.messages)\n .joinedload(models.Message.poster_email)\n .joinedload(models.PosterEmail.poster),\n joinedload(models.Thread.task_assignations))\n .filter(models.Thread.id == thread_id)).one()\n tasks = (sqlsession.query(models.Task)).all()\n posters = set(msg.poster_email.poster\n for msg in thread.messages\n if msg.poster_email is not None)\n # The email addresses participating in this thread but not yet associated\n # to a Poster\n registerable_senders = [msg.from_\n for msg in thread.messages\n if not msg.poster_email]\n return render_template('thread.html',\n thread=thread, tasks=tasks, posters=posters,\n registerable_senders=registerable_senders)", "title": "" }, { "docid": "69ed19955847d8ea0979e0d3bec28620", "score": "0.5173326", "text": "def handle(self, *args, **options):\n self.stdout.write(\"Sending review notifications...\")\n\n # (author) -> (locale, project) -> (approved, rejected)\n data = defaultdict(lambda: defaultdict(lambda: (list(), list())))\n start = timezone.now() - timedelta(days=1)\n for suggestion in Translation.objects.filter(\n (Q(approved_date__gt=start) | Q(rejected_date__gt=start))\n & Q(user__profile__review_notifications=True)\n ):\n author = suggestion.user\n locale = suggestion.locale\n project = suggestion.entity.resource.project\n\n if suggestion.approved and suggestion.approved_user != author:\n data[author][(locale, project)][0].append(suggestion.entity.pk)\n elif suggestion.rejected and suggestion.rejected_user != author:\n data[author][(locale, project)][1].append(suggestion.entity.pk)\n\n for author, notifyData in data.items():\n desc = self.get_description(notifyData)\n notify.send(\n sender=author,\n recipient=author,\n verb=\"has reviewed suggestions\",\n description=desc,\n )\n\n self.stdout.write(f\"Sent {len(data)} review notifications.\")", "title": "" }, { "docid": "6b900bf22049ce667f4f495ebd6d89f2", "score": "0.51663184", "text": "def message_get_reply_to(self, cr, uid, ids, context=None):\n return [task.project_id.message_get_reply_to()[0] if task.project_id else False\n for task in self.browse(cr, uid, ids, context=context)]", "title": "" }, { "docid": "44c9e3bf4b834bcf5b9d61dce3bac550", "score": "0.5165667", "text": "def setCompletedForUser():", "title": "" }, { "docid": "d3afb4f6ab9e019f9ee46205642ed307", "score": "0.5150133", "text": "def test_category_moderation_queue(self):\n self.category.require_replies_approval = True\n self.category.save()\n\n response = self.client.post(\n self.api_link, data={\n 'post': \"Lorem ipsum dolor met!\",\n }\n )\n self.assertEqual(response.status_code, 200)\n\n thread = Thread.objects.get(pk=self.thread.pk)\n self.assertFalse(thread.is_unapproved)\n self.assertTrue(thread.has_unapproved_posts)\n self.assertEqual(thread.replies, self.thread.replies)\n\n post = self.user.post_set.all()[:1][0]\n self.assertTrue(post.is_unapproved)\n\n category = Category.objects.get(slug='first-category')\n self.assertEqual(category.threads, self.category.threads)\n self.assertEqual(category.posts, self.category.posts)", "title": "" }, { "docid": "15219a8e17c78b59959c0eb72f373754", "score": "0.51477", "text": "async def reply(self, cls:\"TwitchClient\", reply:str) -> None:\n\t\treturn await cls.sendMessage(self.room_name, reply)", "title": "" }, { "docid": "86ad0e25bfec88af52ff6fe5a0d4392d", "score": "0.51399666", "text": "def auto_reply(msg):\n if chat_type_.is_all or msg.sender.nick_name in chat_type_.filter:\n _msg_reply = self._replier.do_reply(msg)\n self._log(msg_=msg, msg_reply_=_msg_reply)", "title": "" }, { "docid": "dc3b7f73373a8eaa67ff6196acaee280", "score": "0.51290417", "text": "def reply(self, *args, **kwargs):\n\treply_comment = self.__class__(*args, **kwargs)\n\treply_comment.parent = self\n reply_comment.root = self.root\n\treturn(reply_comment)", "title": "" }, { "docid": "380ffd18c94ba47f390ed012e2358e95", "score": "0.51267886", "text": "def _(self, user, perm, subject):\n return False", "title": "" }, { "docid": "5fd54b5bd112dd3be6bc7b96bcf150c3", "score": "0.51249397", "text": "def bot_replies(comment, reply_link):\n\ttry:\n\t\t#this is just for my own edification\n\t\tauthor = str(comment.author.name)\n\t\tauthor_id = str(comment.author.id)\n\t\tcomment_id = str(comment.id)\n\t\ttext = comment.text\n\t\tprint comment_id + \": \" + text + \" \" + author + \" \" + author_id + \"\\n\"\n\t\t\n\t\t#this actually posts the reply\n\t\tcomment.reply(reply_link)\n\t\tmake_pic(author)\n\t\ttime.sleep(Reply_delay)\n\t\t# yolo_amount += 1\n\texcept UnicodeEncodeError:\n\t\tprint \"There was an unicode error.\"\n\t\t# continue\n\texcept AttributeError:\n\t\tprint \"AttributeError. Skipping it.\"\n\t\t# continue\t", "title": "" }, { "docid": "7e0bc2642282ea341bfa3d645b19e10c", "score": "0.51174825", "text": "def getRepliesByThreadId(self,channel_id,thread_id):\n\n threadMessages = self.sc.api_call(\n \"conversations.replies\",\n channel=channel_id,\n ts=thread_id\n\n #inclusive=true\n ) \n\n\n \n messages = threadMessages[\"messages\"]\n threadSpecificMessages = {}\n profiles = {}\n for message in messages:\n if \"client_msg_id\" in message:\n message[\"id\"] = message['client_msg_id'] \n if \"bot_id\" in message:\n message[\"id\"] = message['bot_id']\n if not \"reply_count\" in message:\n threadSpecificMessages.update({message['id']:message})\n if \"user\" in message:\n if message['user'] in profiles:\n message['profile'] = profiles[message['user']]\n else:\n profiles[message['user']] = self.getUserById(message['user'])\n message[\"profile\"] = profiles[message['user']]\n return threadSpecificMessages", "title": "" }, { "docid": "f14e3fd7b0adc4cfd1243ca8b853b906", "score": "0.51111287", "text": "def send_reply(self, context, reply):\n raise NotImplementedError", "title": "" }, { "docid": "771d2d09035994557c6dcd64d9a5812e", "score": "0.5106316", "text": "def _request_speaker_permission(client, channel_name, user_id):\n if not channel_speaker_permission:\n client.audience_reply(channel_name, True, False)\n _wait_func = _wait_speaker_permission(\n client, channel_name, user_id)\n print(\n \"[/] You've raised your hand. Wait for the moderator to give you the permission.\")", "title": "" }, { "docid": "c9f08579725ff3a7c4d462dc5dfb98d5", "score": "0.5106127", "text": "async def repost_thread(self, ctx):\n self.last_post_id = \"something else\"\n await ctx.message.add_reaction('\\u2705')", "title": "" }, { "docid": "7e38224d1fc87dd561f2ca417d8330fe", "score": "0.5101742", "text": "def action_waiting_approval(self):\n self.approval_user_id = self.env.user.id\n self.state = 'sale'", "title": "" }, { "docid": "549845303ea0892b2c9583217acaa1e9", "score": "0.5098259", "text": "def checkCompletedFor(user_id):", "title": "" }, { "docid": "c71f89bf633819ee8774cffddc203a6b", "score": "0.50951505", "text": "async def create_interview_room(self, guild: discord.Guild, app: List[str]) -> None:\n\n applicant = discord.utils.get(guild.members, id=app[1])\n\n interview_info = self.interview_info.get(app[2])\n\n # Create Private Thread for the user\n app_parent = self.client.get_channel(interview_info['parent'])\n\n #delete this later\n message = None\n # message = await app_parent.send('Uncomment this in your development environment')\n\n txt_channel = await app_parent.create_thread(name=f\"{applicant.display_name}'s-interview\", message=message, reason=f\"{app[2].title()} Interview Room\")\n\n # Add permissions for the user in the interview room\n parent_channel = self.client.get_channel(interview_info['parent'])\n interview_vc = self.client.get_channel(interview_info['interview'])\n\n # Updates the applicant's application in the database, adding the channels ids\n await self.update_application(applicant.id, txt_channel.id, interview_vc.id, app[2])\n\n # Set channel perms for the user.\n await parent_channel.set_permissions(applicant, read_messages=True, send_messages=False, view_channel=True)\n await interview_vc.set_permissions(applicant, speak=True, connect=True, view_channel=True)\n\n app_embed = discord.Embed(\n title=f\"{applicant.name}'s Interview\",\n description=f\"\"\"\n Hello, {applicant.mention}, we have received and reviewed your `{app[2].title().replace('_', ' ')}` application. In order to explain how our system works we have to schedule a voice conversation with you.\n When would be the best time to talk to one of our staff?\"\"\",\n color=applicant.color)\n\n formatted_pings = await self.format_application_pings(guild, interview_info['pings'])\n await txt_channel.send(content=f\"{formatted_pings}, {applicant.mention}\", embed=app_embed)", "title": "" }, { "docid": "0c6cfbb2c40c3d44358603dbfb719fca", "score": "0.50939536", "text": "def new_thread(request, mid, pid=None):\n context = get_mentorship_context(request.user, mid)\n template = 'mentor/new_thread.html'\n mentorship = context['mentorship']\n\n # Selected writing prompt\n prompt = None\n if pid is not None:\n prompt = get_object_or_404(CurriculumPrompt, pk=pid)\n context['prompt'] = prompt\n\n if request.method == 'POST':\n if 'subject' not in request.POST:\n raise Http404\n if 'text' not in request.POST:\n raise Http404\n subject = request.POST['subject']\n text = request.POST['text']\n \n error = False\n if not len(subject):\n request.user.message_set.create(message=\"Please provide a subject for the new thread.\")\n error = True\n if not len(text):\n request.user.message_set.create(message=\"Please write some text in the message body.\")\n error = True\n if not error:\n thread = mentorship.messagethread_set.create(subject=subject, prompt=prompt)\n thread.message_set.create(sender=request.user, text=text)\n return redirect(\"view-thread\", mid=mid, tid=thread.id)\n\n return render_to_response(template, context, RequestContext(request))", "title": "" }, { "docid": "e369f54f3115914136902445d3c673a8", "score": "0.5086928", "text": "def record_reply_received(self, email):\n queue = Queue.objects.get(pk=1)\n actions = self.action.filter(queue=queue)\n resolution = 'Reply received from %s on %s' % (email.fromParticipant, email.date)\n for action in actions:\n follow_up = FollowUp (\n ticket=action,\n date=datetime.now(),\n comment=resolution,\n title=\"Reply received\",\n public=True)\n follow_up.save()\n \n action.status = Ticket.CLOSED_STATUS\n action.resolution = follow_up.comment\n action.save()", "title": "" }, { "docid": "58adc39c34968615f32299aab8dbf024", "score": "0.50842065", "text": "def __str__(self):\n return str(self.user_id) + \"'s pto request for \" + str(self.date) + \" in approval: \"", "title": "" }, { "docid": "35e3104ac6f02d48b80f6633ce197902", "score": "0.50817585", "text": "def _(self, user, perm, subject):\n return True", "title": "" }, { "docid": "81cec3dff14c9ad0fa0893174fa6b8c2", "score": "0.5079312", "text": "def pending_task():\r\n row = db(db.a_owner.owner_email == get_email()).select().first()\r\n q = ((db.task.shared_email == row.owner_email) & (db.task.pending == True))\r\n\r\n db.task.author.readable=True\r\n db.task.author.label='Sent From'\r\n db.task.author.represent = lambda id, row: id.owner_email\r\n grid = SQLFORM.grid(q,\r\n fields=[db.task.title, db.task.author],\r\n csv=False, create=False, editable=False, deletable=False,\r\n links=[lambda row: A('Accept',_class=\"btn\",_href=URL(\"default\",\"accept_task\",args=[row.id])),\r\n lambda row: A('Reject',_class=\"btn\",_href=URL(\"default\",\"reject_task\",args=[row.id]))]\r\n )\r\n return dict(grid=grid)", "title": "" }, { "docid": "f18670397df3f47cffb5f6803f70836c", "score": "0.507773", "text": "def messageReply(self, user, msg, pm):\n cmd = msg.split(' ')[0] #grab the command from the start [\"!cmd\",\"message\"]\n if cmd in self.COMMANDS.keys(): #is the command valid\n if self.COMMANDS[cmd].cmd_action(self,msg,user): #exicute the action and if true reply\n self.COMMANDS[cmd].reply(self,pm,user) #reply to user\n \n output = ''\n respond = False\n \n #should be user-op commands here probably\n if user != \"SalesBot\" and not pm:\n if 'SalesBot' in msg:\n if 'who made you' in msg.lower():\n output = (\"Gafail started making me on the 30/10/2014\") \n respond = True \n \n elif 'help' in msg.lower():\n saleshelp(self,user)\n respond = False \n\n elif 'night' in msg.lower():\n output = (\"Good night\") \n respond = True \n \n else:\n output = (\"hello\") \n respond = False\n \n elif 'anyone' in msg.lower():\n if 'sell' in msg.lower():\n output = (\"Please type !forsale to see what is being sold on CantHub or add a buy request with !want <your item here>\") \n respond = True \n elif 'buy' in msg.lower():\n output = (\"Trying to sell something? Why not list it on SalesBot with !sell 'your item here'\") \n respond = True \n \n elif 'anybody ' in msg.lower():\n if 'sell' in msg.lower():\n output = (\"Please type !forsale to see what is being sold on CantHub or add a buy request with !want <your item here>\") \n respond = True \n elif 'buy' in msg.lower():\n output = (\"Trying to sell something? Why not list it on SalesBot with !sell <your item here>\") \n respond = True \n else:\n respond = False \n else:\n respond = False \n \n if respond:\n self.say(output)", "title": "" }, { "docid": "71563106dad8eb04189cae5d2f7ff866", "score": "0.50668275", "text": "def buildReply(self, body=None):\n\t\tmsg = Message(to=self.getFrom(), frm=self.getTo(), body=body)\n\t\tthr = self.getThread()\n\t\tif thr:\n\t\t\tmsg.setThread(thr)\n\t\treturn msg", "title": "" }, { "docid": "6de32d360d1b57df99fb4b160fcdf83e", "score": "0.5063836", "text": "def send_email(userid):", "title": "" }, { "docid": "2a182bb394c8908a922bf35423ec198d", "score": "0.50632006", "text": "def user_is_in_thread(self, user_id, thread):\n if user_id == SYSTEM_USER:\n return True\n for user in thread.users:\n if user_id == user.id:\n return True\n return False", "title": "" }, { "docid": "89b5bf9379abd6ac108b5e317fb44c50", "score": "0.5058523", "text": "def _wait_speaker_permission(client, channel_name, user_id):\n # Get some random users from the channel.\n _channel_info = client.get_channel(channel_name)\n if _channel_info['success']:\n for _user in _channel_info['users']:\n if _user['user_id'] != user_id:\n user_id = _user['user_id']\n break\n # Check if the moderator allowed your request.\n res_inv = client.accept_speaker_invite(channel_name, user_id)\n if res_inv['success']:\n print(\"[-] Now you have a speaker permission.\")\n print(\" Please re-join this channel to activate a permission.\")\n return False\n return True", "title": "" }, { "docid": "82f91faa5f654dc9c8db11fbd70260e1", "score": "0.5053003", "text": "def post_approve_approval_request(\n self, response: accessapproval.ApprovalRequest\n ) -> accessapproval.ApprovalRequest:\n return response", "title": "" }, { "docid": "a4b47c878c6e7098c8b25e6a68a3e529", "score": "0.50466216", "text": "def action(self, user, channel, msg):\n pass", "title": "" }, { "docid": "e916550d2b535dc869d31a589913996f", "score": "0.5046468", "text": "def approve_user(user_id, awaiting_role_id, new_role_id, mail_context):\n\n user = User.query.get(user_id)\n user.roles.remove(\n Role.query.get(awaiting_role_id)\n )\n user.roles.append(\n Role.query.get(new_role_id)\n )\n user.approved = True\n db.session.commit()\n\n msg = Message(\n \"Altmetrics: Your account has been approved\",\n sender=current_app.config.get('MAIL_DEFAULT_SENDER'),\n recipients=[user.email]\n )\n mail_context.update(user_name=user.first_name)\n configure_mail_body(msg, 'account_approved', mail_context)\n\n mail.send(msg)", "title": "" }, { "docid": "6c0c1407f0980d047fc9e527e374c6f9", "score": "0.50457805", "text": "def gmail_send_reply(self, reply):\n # print(\"Simulating sending a reply to gmail:\", reply.split(\"|\", 1)[0])\n self.gmail.gmail_send_reply(self.gmail.gmail, reply)\n return", "title": "" }, { "docid": "2e45fad8a08ae46ba783f5d088018502", "score": "0.50348073", "text": "async def take_match_as_commentator(self, ctx, *args):\n await self.take_or_drop_match(ctx, args, True, tosurnament.UserDetails.get_as_commentator(self.bot, ctx.author))", "title": "" }, { "docid": "10b64f36b7e68db38402f47c9a9a199c", "score": "0.50305253", "text": "def _accept_member(self):\n search_SQL = t4.search(\"USER_ID\", self.id)\n db.controller.execute(search_SQL)\n results = db.controller.get()\n if len(results) == 0:\n return -1\n else:\n for result in results:\n user_id, status, timestamp = result[1], result[2], result[3]\n if status == \"PENDING\":\n update_SQL = t4.update_row(\"STATUS\", \"OK\", \"USER_ID\", user_id)\n db.controller.execute(update_SQL)\n return 0", "title": "" }, { "docid": "bc128f4b691c76117998b69ee120d871", "score": "0.5029028", "text": "def onMessage(self, mid, author_id, message, message_object, thread_id, thread_type, **kwargs):\n\n print(thread_id)\n\n self.markAsDelivered(author_id, thread_id)\n self.markAsRead(author_id)\n\n # if message_object.text == 'Talk with strangers!':\n # self.action = 2\n\n if author_id == self.uid:\n if message_object.text == 'Stop!':\n self.action = 0\n message_relay = 'Bot: Đã tiếp nhận yêu cầu!!'\n relay = Message(text = message_relay)\n self.send(relay, thread_id=thread_id, thread_type=thread_type)\n if message_object.text == 'Start!':\n self.action = 1\n message_relay = 'Bot: Đã tiếp nhận yêu cầu!!'\n relay = Message(text = message_relay)\n self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n if self.action == 0:\n return\n else:\n log.info(\"{} from {} in {}\".format(message_object, thread_id, thread_type.name))\n \n # If you're not the author, echo\n if author_id != self.uid:\n user = self.fetchUserInfo(author_id)[author_id]\n # Đánh dấu “Yêu thích” tin nhắn người gửi\n self.reactToMessage(mid, MessageReaction.LOVE)\n\n # message_relay = 'Vui lòng chờ trong giây lát!'\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n # message_relay = 'Thông tin người gửi: {}\\n'.format(user.name)\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n # message_relay = 'Mã người gửi: {}\\n'.format(user.uid)\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n # message_relay = 'Hình ảnh người gửi: {}'.format(user.photo)\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n # message_relay = 'Có phải bạn của chủ nhân: {}\\n'.format(user.is_friend)\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n # message_relay = 'Giới tính: {}\\n'.format(user.gender)\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n # message_relay = 'Địa chỉ profile: {}\\n'.format(user.url)\n # relay = Message(text = message_relay)\n # self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n message_relay = 'Bot: Đã nhận được tin nhắn từ \"{}\" với nội dung là: \"{}\"'.format(self.fetchThreadInfo(thread_id)[thread_id].name, message)\n relay = Message(text = message_relay)\n self.send(relay, thread_id=thread_id, thread_type=thread_type)\n\n message_relay = 'Bot: Đang tiến hành gửi mail!'\n relay = Message(text = message_relay)\n self.send(relay, thread_id=thread_id, thread_type=thread_type)\n # Gửi mail\n send_mail = SendMail\n email_from = 'dinhtruong018@gmail.com'\n password = 'XXX'\n email_to = 'XXX@gmail.com'\n try:\n send_mail.Send(email_from, password, email_to, user.name, message)\n message_relay = \"\"\"\n Bot: Thư đã được gửi đến Peter.\n Đừng đi đâu hết!\n Peter sẽ trả lời bạn ngay ~~\n \"\"\"\n relay = Message(text = message_relay)\n self.send(relay, thread_id=thread_id, thread_type=thread_type)\n except:\n message_relay = \"\"\"\n Bot: Có lỗi khi gửi mail!\n Xin lỗi!\n \"\"\"\n relay = Message(text = message_relay)\n self.send(relay, thread_id=thread_id, thread_type=thread_type)", "title": "" } ]
ab03f1747aff3494dfab4385cd45f172
getStateSet(LODRef self) > StateSet getStateSet(LODRef self) > StateSet
[ { "docid": "16883e913ce1cddc5b304449a08d6bf2", "score": "0.76371", "text": "def getStateSet(self, *args):\n return _osg.LODRef_getStateSet(self, *args)", "title": "" } ]
[ { "docid": "aa7e5bd964b2b6cf791ceef094fe12ab", "score": "0.728946", "text": "def getStateSet(self, *args):\n return _osg.PagedLODRef_getStateSet(self, *args)", "title": "" }, { "docid": "80b2eb5fffe4ea6649af0c73500b6410", "score": "0.7240245", "text": "def getOrCreateStateSet(self):\n return _osg.LODRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "db6694f4e6770eca58f36cebdfffd0b6", "score": "0.7008767", "text": "def getStateSet(self, *args):\n return _osg.TransformRef_getStateSet(self, *args)", "title": "" }, { "docid": "b48638de9cd6694281b89305a79169de", "score": "0.70003647", "text": "def getStateSet(self, *args):\n return _osg.GeodeRef_getStateSet(self, *args)", "title": "" }, { "docid": "8c2734d4ea9415ab41148d6a5931740a", "score": "0.6935852", "text": "def getStateSet(self, *args):\n return _osg.AutoTransformRef_getStateSet(self, *args)", "title": "" }, { "docid": "da6a6db2fd422fb63b0621e098f9b95f", "score": "0.6920046", "text": "def getStateSet(self, *args):\n return _osg.SwitchRef_getStateSet(self, *args)", "title": "" }, { "docid": "2ec7edc47148b0648cf85619d4452a34", "score": "0.69025815", "text": "def getOrCreateStateSet(self):\n return _osg.PagedLODRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "dd74e7e7cae8b39d134fa042ad68d714", "score": "0.6876072", "text": "def getStateSet(self, *args):\n return _osg.BillboardRef_getStateSet(self, *args)", "title": "" }, { "docid": "7f50100578e2b22bce481ac7d2e74c68", "score": "0.6868588", "text": "def getStateSet(self, *args):\n return _osg.GroupRef_getStateSet(self, *args)", "title": "" }, { "docid": "9147bf20f1e3b5ef1e26dd8735b98a2f", "score": "0.6790786", "text": "def getStateSet(self, *args):\n return _osg.ProjectionRef_getStateSet(self, *args)", "title": "" }, { "docid": "8978f81eee30444d0ba39d45950a9fce", "score": "0.67904186", "text": "def getStateSet(self, *args):\n return _osg.MatrixTransformRef_getStateSet(self, *args)", "title": "" }, { "docid": "84d9f43f54f38dffdaa920be843c7dcb", "score": "0.67845875", "text": "def getStateSet(self, *args):\n return _osg.Node_getStateSet(self, *args)", "title": "" }, { "docid": "986fa36a99643973dbe20cdc497457e1", "score": "0.67650485", "text": "def getStateSet(self, *args):\n return _osg.NodeRef_getStateSet(self, *args)", "title": "" }, { "docid": "7ed782d0c859162851f80caa82e713d1", "score": "0.67460066", "text": "def setStateSet(self, *args):\n return _osg.LODRef_setStateSet(self, *args)", "title": "" }, { "docid": "2414a23f9b5e5c9947ab739ad9c06355", "score": "0.6740409", "text": "def getStateSet(self, *args):\n return _osg.ProxyNodeRef_getStateSet(self, *args)", "title": "" }, { "docid": "382fbe9cefdde8c546a1791d0f1618be", "score": "0.66764235", "text": "def getStateSet(self, *args):\n return _osg.Drawable_getStateSet(self, *args)", "title": "" }, { "docid": "93c088fc66ed2c9403e5f63b84212179", "score": "0.66050786", "text": "def getOrCreateStateSet(self):\n return _osg.GeodeRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "f29b4222faa927f386a81a86e8929809", "score": "0.65530837", "text": "def setStateSet(self, *args):\n return _osg.PagedLODRef_setStateSet(self, *args)", "title": "" }, { "docid": "e245452717dd1a2e325ba801d5484973", "score": "0.6464153", "text": "def getOrCreateStateSet(self):\n return _osg.TransformRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "9a3e6f75415ca314bffb21e956644df1", "score": "0.6459196", "text": "def getStateSet(self, *args):\n return _osg.CameraRef_getStateSet(self, *args)", "title": "" }, { "docid": "9cd67781f5ccc7be49b9c6dd445d012a", "score": "0.6425586", "text": "def getOrCreateStateSet(self):\n return _osg.ProjectionRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "a6a3a966cd747bc548545fd09ab203c6", "score": "0.6404033", "text": "def getOrCreateStateSet(self):\n return _osg.AutoTransformRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "9fafa0590414153d292489e32214c204", "score": "0.63546985", "text": "def getOrCreateStateSet(self):\n return _osg.MatrixTransformRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "018c7695fd6429b3bcb2a9e54d235794", "score": "0.63445044", "text": "def getStateSet(self, *args):\n return _osg.CameraViewRef_getStateSet(self, *args)", "title": "" }, { "docid": "0f77f0b21b88cd43265d68ffe2c95d81", "score": "0.6343231", "text": "def getOrCreateStateSet(self):\n return _osg.Drawable_getOrCreateStateSet(self)", "title": "" }, { "docid": "57162b984af195551eb63d1c7f383d7f", "score": "0.6299877", "text": "def setStateSet(self, *args):\n return _osg.GeodeRef_setStateSet(self, *args)", "title": "" }, { "docid": "75973a05b15fd31c3afe54d8c69fc8a9", "score": "0.6291951", "text": "def getOrCreateStateSet(self):\n return _osg.GroupRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "b0a745f87c1a6581bc51aa32c628e380", "score": "0.6289322", "text": "def getOrCreateStateSet(self):\n return _osg.BillboardRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "2307a9a1c1ae9249d9356fd386c2ccad", "score": "0.62553734", "text": "def getOrCreateStateSet(self):\n return _osg.Node_getOrCreateStateSet(self)", "title": "" }, { "docid": "271fbb91d2c12ac1d4f06be196daf2aa", "score": "0.6250708", "text": "def getOrCreateStateSet(self):\n return _osg.SwitchRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "70471a7d0cacd7f7cea50b465f4f604f", "score": "0.62456757", "text": "def getOrCreateStateSet(self):\n return _osg.NodeRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "1310f3ad5366cbea40051fc025cc63a2", "score": "0.6238457", "text": "def _calculate_state_set(self, start, end):\n start_states = list()\n end_states = list()\n\n \"\"\" Recursively get all the context start states \"\"\"\n s = start\n while s is not None:\n start_states.insert(0, s)\n context = s.context\n if context and not isinstance(context, Statechart):\n s = context\n else:\n s = None\n\n \"\"\" Recursively get all the context end states \"\"\"\n e = end\n while e is not None:\n end_states.insert(0, e)\n context = e.context\n if context and not isinstance(context, Statechart):\n e = context\n else:\n e = None\n\n \"\"\" Get the Least Common Ancestor (LCA) of the start and end states \"\"\"\n min_state_count = min(len(start_states), len(end_states))\n lca = min_state_count - 1\n\n if start is not end:\n lca = 0\n while lca < min_state_count:\n if start_states[lca] is not end_states[lca]:\n break\n lca += 1\n\n \"\"\" Starting from the LCA get the states that will be deactivated \"\"\"\n i = lca\n while i < len(start_states):\n self.deactivate.insert(0, start_states[i])\n i += 1\n\n \"\"\" Starting from the LCA get the states that will be activated \"\"\"\n i = lca\n while i < len(end_states):\n self.activate.append(end_states[i])\n i += 1", "title": "" }, { "docid": "180969a50aa042be9f499c82b34a9626", "score": "0.62186146", "text": "def getOrCreateStateSet(self):\n return _osg.ProxyNodeRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "59ebb0f94c57e693d62e2a670c14534b", "score": "0.6134655", "text": "def __lt__(self, *args):\n return _osg.StateSet___lt__(self, *args)", "title": "" }, { "docid": "a6aa4d14429f554d787c919e7f04d255", "score": "0.6049963", "text": "def __getitem__(self, *args):\n return _osg.vectorStateSet___getitem__(self, *args)", "title": "" }, { "docid": "5276af0d415e18e8e8ff19d5f452cc8c", "score": "0.60298854", "text": "def get_states(self):\n abstract", "title": "" }, { "docid": "0bd0f9c5fbff60fe2c82ae7dbe87e12c", "score": "0.60153866", "text": "def setStateSet(self, *args):\n return _osg.ProjectionRef_setStateSet(self, *args)", "title": "" }, { "docid": "3983b5ae8ddfc7d2b21a18c3f75451f4", "score": "0.5863571", "text": "def setStateSet(self, *args):\n return _osg.GroupRef_setStateSet(self, *args)", "title": "" }, { "docid": "04939c814348d6e4da991c4b5329daa8", "score": "0.5845063", "text": "def setStateSet(self, *args):\n return _osg.NodeRef_setStateSet(self, *args)", "title": "" }, { "docid": "f2efef6c2bb17061b590b5adbf0057bd", "score": "0.584505", "text": "def __getstate__(self):\n\n state = self.__dict__.copy()\n for k in ['M', 'pool']:\n if k in state:\n del state[k]\n return state", "title": "" }, { "docid": "f8b4fea628f77f9083534479dbf63ab7", "score": "0.58442265", "text": "def setStateSet(self, *args):\n return _osg.TransformRef_setStateSet(self, *args)", "title": "" }, { "docid": "a8821d0f5b23bf721658e1d37eb6e4ca", "score": "0.58399713", "text": "def __getstate__(self):\n state = self.__dict__.copy() # copy the objects state\n # Remove unpicklable entries (those which are lazily loaded\n del state[\"rays_per_sweep\"]\n del state[\"gate_x\"]\n del state[\"gate_y\"]\n del state[\"gate_z\"]\n del state[\"gate_longitude\"]\n del state[\"gate_latitude\"]\n del state[\"gate_altitude\"]\n return state", "title": "" }, { "docid": "433f4007c4716216ddcc931254e947bb", "score": "0.5828916", "text": "def setStateSet(self, *args):\n return _osg.Node_setStateSet(self, *args)", "title": "" }, { "docid": "ead3cecf818f127261e1c4dba9f8d1ae", "score": "0.5827797", "text": "def getOrCreateStateSet(self):\n return _osg.CameraRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "38fcb0a842ddcaecff61d69b3eb52dbf", "score": "0.58226717", "text": "def __deref__(self):\n return _osg.refStateAttribute___deref__(self)", "title": "" }, { "docid": "fac2565c79d4b6da42189e9715a3298c", "score": "0.581707", "text": "def setStateSet(self, *args):\n return _osg.SwitchRef_setStateSet(self, *args)", "title": "" }, { "docid": "34fc4a8e266b3485f39d0a215af9ec79", "score": "0.5807538", "text": "def merge(self, *args):\n return _osg.StateSet_merge(self, *args)", "title": "" }, { "docid": "0171409a81b81b0a28a6f92c8bd61fbc", "score": "0.57960683", "text": "def getOrCreateStateSet(self):\n return _osg.CameraViewRef_getOrCreateStateSet(self)", "title": "" }, { "docid": "ec5760a47a3ee2dae6683718a4f87b7d", "score": "0.57678056", "text": "def setStateSet(self, *args):\n return _osg.ProxyNodeRef_setStateSet(self, *args)", "title": "" }, { "docid": "c2dd8a71e664808afb6f19e496fb8a75", "score": "0.5761508", "text": "def getStates(self):\n return NotImplemented", "title": "" }, { "docid": "b7001af68cfbc958d37b5589fe1f8ccd", "score": "0.57576734", "text": "def setStateSet(self, *args):\n return _osg.Drawable_setStateSet(self, *args)", "title": "" }, { "docid": "d8d05eafb5acf2e72093864825630dd7", "score": "0.5748045", "text": "def setStateSet(self, *args):\n return _osg.AutoTransformRef_setStateSet(self, *args)", "title": "" }, { "docid": "e949c1e87537cad5b517be053fb151d4", "score": "0.5712362", "text": "def setStateSet(self, *args):\n return _osg.MatrixTransformRef_setStateSet(self, *args)", "title": "" }, { "docid": "088c75e79d388d4f8ea64a85d276b7f1", "score": "0.57112753", "text": "def compare(self, *args):\n return _osg.StateSet_compare(self, *args)", "title": "" }, { "docid": "5bb1581a05b59d2373fb7355f29eae86", "score": "0.57091624", "text": "def __getstate__(self):\n state = self.__dict__.copy()\n return state", "title": "" }, { "docid": "5bb1581a05b59d2373fb7355f29eae86", "score": "0.57091624", "text": "def __getstate__(self):\n state = self.__dict__.copy()\n return state", "title": "" }, { "docid": "1047aa4f80fbfdce22a04f89e383c617", "score": "0.56869143", "text": "def get_states(self):\n raise NotImplementedError", "title": "" }, { "docid": "706891e2ba5f1f7dd447a2fb0a739cf9", "score": "0.5655846", "text": "def setStateSet(self, *args):\n return _osg.BillboardRef_setStateSet(self, *args)", "title": "" }, { "docid": "12efcb2ad8dffc2078303078df09fbd5", "score": "0.5627381", "text": "def __eq__(self, *args):\n return _osg.StateSet___eq__(self, *args)", "title": "" }, { "docid": "850c0a923a216d08461eb38d5dcb6b33", "score": "0.5610645", "text": "def __getState__(self): \n\t\todict = {\"lights\":self.lights}\n\t\treturn odict", "title": "" }, { "docid": "07091bd5cc8e4f4ba8ddcca4597e7dca", "score": "0.5610037", "text": "def _state(self) -> dict:", "title": "" }, { "docid": "3decc461f9f728133eabd817138e151d", "score": "0.56056863", "text": "def state(self) -> 'States':\n raise NotImplementedError()", "title": "" }, { "docid": "25e2ea0a9df9940d1cb319d2b316647d", "score": "0.5604889", "text": "def pushStateSet(self, *args):\n return _osg.State_pushStateSet(self, *args)", "title": "" }, { "docid": "f7eebee0203877c75a1c90ad17b9685a", "score": "0.55829334", "text": "def __getstate__(self):\n state = copy.copy(self.__dict__)\n state[\"token2idx\"] = dict(state[\"token2idx\"])\n state[\"idx2token\"] = dict(state[\"idx2token\"])\n return state", "title": "" }, { "docid": "12914e37d4a328e2258b78f8f7fee72f", "score": "0.5556125", "text": "def __getstate__(self):\n state = self.__dict__.copy()\n return state", "title": "" }, { "docid": "217ba42fa24742bca9a9dac77e48fce7", "score": "0.55236036", "text": "def test():\n\ttestpath = [(frozenset([1, 10]), frozenset(['light', 2, 5]), 5), # state 1\n\t\t\t\t(5, 2, '->'), # action 1\n\t\t\t\t(frozenset([10, 5]), frozenset([1, 2, 'light']), 2), # state 2\n\t\t\t\t(2, 1, '->'), # action 2\n\t\t\t\t(frozenset([1, 2, 10]), frozenset(['light', 5]), 5),\n\t\t\t\t(5, 5, '->'), \n\t\t\t\t(frozenset([1, 2]), frozenset(['light', 10, 5]), 10),\n\t\t\t\t(5, 10, '->'), \n\t\t\t\t(frozenset([1, 10, 5]), frozenset(['light', 2]), 2),\n\t\t\t\t(2, 2, '->'), \n\t\t\t\t(frozenset([2, 5]), frozenset([1, 10, 'light']), 10),\n\t\t\t\t(10, 1, '->'), \n\t\t\t\t(frozenset([1, 2, 5]), frozenset(['light', 10]), 10),\n\t\t\t\t(10, 10, '->'), \n\t\t\t\t(frozenset([1, 5]), frozenset(['light', 2, 10]), 10),\n\t\t\t\t(10, 2, '->'), \n\t\t\t\t(frozenset([2, 10]), frozenset([1, 5, 'light']), 5),\n\t\t\t\t(5, 1, '->'), \n\t\t\t\t(frozenset([2, 10, 5]), frozenset([1, 'light']), 1),\n\t\t\t\t(1, 1, '->')]\n\tassert path_states(testpath) == [(frozenset([1, 10]), frozenset(['light', 2, 5]), 5), # state 1\n\t\t(frozenset([10, 5]), frozenset([1, 2, 'light']), 2), # state 2\n\t\t(frozenset([1, 2, 10]), frozenset(['light', 5]), 5),\n\t\t(frozenset([1, 2]), frozenset(['light', 10, 5]), 10),\n\t\t(frozenset([1, 10, 5]), frozenset(['light', 2]), 2),\n\t\t(frozenset([2, 5]), frozenset([1, 10, 'light']), 10),\n\t\t(frozenset([1, 2, 5]), frozenset(['light', 10]), 10),\n\t\t(frozenset([1, 5]), frozenset(['light', 2, 10]), 10),\n\t\t(frozenset([2, 10]), frozenset([1, 5, 'light']), 5),\n\t\t(frozenset([2, 10, 5]), frozenset([1, 'light']), 1)]\n\tassert path_actions(testpath) == [(5, 2, '->'), # action 1\n\t\t(2, 1, '->'), # action 2\n\t\t(5, 5, '->'), \n\t\t(5, 10, '->'), \n\t\t(2, 2, '->'), \n\t\t(10, 1, '->'), \n\t\t(10, 10, '->'), \n\t\t(10, 2, '->'), \n\t\t(5, 1, '->'), \n\t\t(1, 1, '->')]\n\tprint bridge_problem(frozenset((1, 2),))[-1][-1]\n\tassert bridge_problem(frozenset((1, 2),))[-1][-1] == 2 # the [-1][-1] grabs the total elapsed time\n\tassert bridge_problem(frozenset((1, 2, 5, 10),))[-1][-1] == 17\n\treturn 'tests pass'", "title": "" }, { "docid": "c0c017a8fab9bd2e8e90cab8c0ede9d8", "score": "0.5499696", "text": "def __getstate__(self): \n\t\todict = VisualizationModule.__getstate__(self)\n\t\todict.update({\"planeWidgetX\":self.getVTKState(self.planeWidgetX)})\n\t\todict.update({\"planeWidgetY\":self.getVTKState(self.planeWidgetY)})\n\t\todict.update({\"planeWidgetZ\":self.getVTKState(self.planeWidgetZ)})\n\t\todict.update({\"prop1\":self.getVTKState(self.prop1)})\n\t\todict.update({\"prop2\":self.getVTKState(self.prop2)})\n\t\todict.update({\"prop3\":self.getVTKState(self.prop3)}) \n\t\todict.update({\"renderer\":self.getVTKState(self.renderer)})\n\t\todict.update({\"camera\":self.getVTKState(self.renderer.GetActiveCamera())})\n\t\todict.update({\"x\":self.x})\n\t\todict.update({\"z\":self.z})\n\t\todict.update({\"y\":self.y})\n\t\treturn odict", "title": "" }, { "docid": "e1c2e6bf8b6d0a54280ce09bc6fc081d", "score": "0.5496155", "text": "def __getstate__(self):\n d = self.__dict__.copy()\n return d", "title": "" }, { "docid": "4ad8d245a017c50410a457d16b47a6a2", "score": "0.5473576", "text": "def __getstate__(self):\r\n d = copy.copy(self.__dict__)\r\n del d['store']\r\n return d", "title": "" }, { "docid": "4ad8d245a017c50410a457d16b47a6a2", "score": "0.5473576", "text": "def __getstate__(self):\r\n d = copy.copy(self.__dict__)\r\n del d['store']\r\n return d", "title": "" }, { "docid": "4ad8d245a017c50410a457d16b47a6a2", "score": "0.5473576", "text": "def __getstate__(self):\r\n d = copy.copy(self.__dict__)\r\n del d['store']\r\n return d", "title": "" }, { "docid": "0f5ead2553c30a9722a1092cf759ca89", "score": "0.5470129", "text": "def __ref__(self):\n return _osg.refStateAttribute___ref__(self)", "title": "" }, { "docid": "72386e63a7340eed4000318b5cb87c1a", "score": "0.5425783", "text": "def _seperate(\n state_sets: Iterable[StateMap[str]],\n) -> Tuple[MutableStateMap[str], MutableStateMap[Set[str]]]:\n state_set_iterator = iter(state_sets)\n unconflicted_state = dict(next(state_set_iterator))\n conflicted_state: MutableStateMap[Set[str]] = {}\n\n for state_set in state_set_iterator:\n for key, value in state_set.items():\n # Check if there is an unconflicted entry for the state key.\n unconflicted_value = unconflicted_state.get(key)\n if unconflicted_value is None:\n # There isn't an unconflicted entry so check if there is a\n # conflicted entry.\n ls = conflicted_state.get(key)\n if ls is None:\n # There wasn't a conflicted entry so haven't seen this key before.\n # Therefore it isn't conflicted yet.\n unconflicted_state[key] = value\n else:\n # This key is already conflicted, add our value to the conflict set.\n ls.add(value)\n elif unconflicted_value != value:\n # If the unconflicted value is not the same as our value then we\n # have a new conflict. So move the key from the unconflicted_state\n # to the conflicted state.\n conflicted_state[key] = {value, unconflicted_value}\n unconflicted_state.pop(key, None)\n\n return unconflicted_state, conflicted_state", "title": "" }, { "docid": "7a755ff672e22801557f50ab01df751e", "score": "0.5421615", "text": "def getState(self):\n pass", "title": "" }, { "docid": "2df4d58ec98e13bda4d1668c22fdad73", "score": "0.5419801", "text": "def __getstate__(self):\n return self.__dict__.copy()", "title": "" }, { "docid": "14db040905aec6d01264ee2a35baf504", "score": "0.5403518", "text": "def __getstate__(self):\n return self.items()", "title": "" }, { "docid": "dfa31d2c8b258ac6c6f4b7d497bde9bd", "score": "0.5381906", "text": "def operator(state):\n set_of_states = []\n for row_index in range(8):\n for col_index in range(8):\n if (row_index, col_index) not in state:\n new_state = state.copy()\n new_state.add((row_index, col_index))\n set_of_states.append(new_state)\n return set_of_states", "title": "" }, { "docid": "495fb3ec66477794ca113cf4151812f5", "score": "0.5376463", "text": "def getState(self):\r\n return self.A.state", "title": "" }, { "docid": "892adaff32b83a14bcd735a934968364", "score": "0.53719854", "text": "def followes(state):\n # Create a new set, with state as its only member\n states = set()\n states.add(state)\n # Check id state has arrows labelled e from it\n if state.label is None:\n # Check if edge1 is a state\n if state.edge1 is not None:\n # if theres an edge1, follow it\n states |= followes(state.edge1)\n # Check if edge2 is a state \n if state.edge2 is not None:\n # If there's an edge2, follow it\n states |= followes(state.edge2)\n # Return the set of states\n return states", "title": "" }, { "docid": "d3d78b731351596dee79caa39cf40d1b", "score": "0.5352358", "text": "def __getstate__(self):\n d=copy.copy(self.__dict__)\n\n def convert_spectrum(spectrum):\n if isinstance(spectrum,IsotropicSpectrum) or isinstance(spectrum,DiffuseFunction):\n return (type(spectrum),spectrum.name())\n elif isinstance(spectrum,IsotropicPowerLaw):\n return (type(spectrum),spectrum.flux(),spectrum.index())\n elif isinstance(spectrum,IsotropicConstant):\n return (type(spectrum),spectrum.constant())\n else:\n # unrecognized type\n return spectrum\n\n d['dmodel'] = map(convert_spectrum,d['dmodel'])\n return d", "title": "" }, { "docid": "a53cf6042f0f228d509fc4d513e4af07", "score": "0.5336635", "text": "def update_states():\n\n def visit(d):\n if not isinstance(d, Live2D):\n return\n\n if d.name is None:\n return\n\n state = states[d.name]\n\n if state.mark:\n return\n\n state.mark = True\n\n if state.new is d:\n return\n\n # Shouldn't happen, but stop thrashing if it does.\n if state.old is d:\n return\n\n if state.cycle_new:\n state.old = state.new\n state.old_base_time = state.new_base_time\n else:\n state.old = None\n state.old_base_time = None\n state.expressions = [ ]\n state.old_expressions = [ ]\n\n state.new = d\n\n if d.sustain:\n state.new_base_time = state.old_base_time\n else:\n state.new_base_time = None\n\n state.cycle_new = True\n\n sls = renpy.display.core.scene_lists()\n\n for d in sls.get_all_displayables(current=True):\n if d is not None:\n d.visit_all(visit)\n\n for s in states.values():\n if not s.mark:\n s.cycle_new = False\n\n s.mark = False", "title": "" }, { "docid": "e89aaed7ca52f6e7de71c662aa371978", "score": "0.5332873", "text": "def __getstate__(self): \n\t\todict = VisualizationModule.__getstate__(self)\n\t\t#print \"Saving Slice =\" ,self.parameters[\"Slice\"]\n\t\t#print \"Returning\",odict\n\t\todict[\"parameters\"] = self.parameters\n\t\treturn odict", "title": "" }, { "docid": "8eee03577b85409fba918ff325b4da95", "score": "0.53290164", "text": "def get_state(self, name):", "title": "" }, { "docid": "8eee03577b85409fba918ff325b4da95", "score": "0.53290164", "text": "def get_state(self, name):", "title": "" }, { "docid": "bd200b86bbfe97bf196d02f8fe5bca95", "score": "0.5328982", "text": "def test_reference_cycle(self):\n\n class A:\n pass\n\n class B:\n pass\n\n a = A()\n b = B()\n a.a = b\n b.b = a\n state = state_pickler.get_state(a)\n z = A()\n z.a = B()\n z.a.b = z\n state_pickler.set_state(z, state)", "title": "" }, { "docid": "f9a56b0364c374947270601fb3947ce9", "score": "0.5328287", "text": "def __ne__(self, *args):\n return _osg.StateSet___ne__(self, *args)", "title": "" }, { "docid": "7e21456658b1f71627810644181b9bb1", "score": "0.5327539", "text": "def updateObjectSets(self): \n self.objectSetsRaw = search.returnObjectSets()\n \n self.refPrefixes = self.objectSetsRaw['referenced'].keys()\n self.refSetsDict = self.objectSetsRaw['referenced'] or {}\n self.setTypesDict = self.objectSetsRaw['cgmTypes'] or {}\n self.setGroups = self.objectSetsRaw['objectSetGroups'] or []\n #Set Group stuff\n self.setGroupName = False\n \n for s in self.setGroups:\n\tif s in self.refSetsDict.get('From Scene'):\n\t self.setGroupName = s\n\t self.setsGroup = SetFactory(s)\n\t break\n \n self.mayaSets = self.objectSetsRaw['maya'] or []\n self.qssSets = self.objectSetsRaw['qss'] or []\n\t\n self.sortedSets = []\n self.objectSets = []\n self.activeSets = []\n \n #Sort sets we want to actually load\n self.sortedSets = []\n \n #Sort for activeRefs\n tmpActiveRefSets = []\n if self.ActiveRefsOptionVar.value:\n\tfor r in self.ActiveRefsOptionVar.value:\n\t #If value, let's add or subtract based on if our set refs are found\n\t if self.refSetsDict.get(r):\n\t\ttmpActiveRefSets.extend(self.refSetsDict.get(r))\n\t\t\t \n #Sort for active types \n tmpActiveTypeSets = []\n if self.setTypesDict.keys() and self.ActiveTypesOptionVar.value:\n\tfor t in self.setTypesDict.keys():\n\t if t in self.ActiveTypesOptionVar.value and self.setTypesDict.get(t):\t \n\t\ttmpActiveTypeSets.extend(self.setTypesDict.get(t))\n\n if tmpActiveTypeSets and tmpActiveRefSets:\n\tself.sortedSets = lists.returnMatchList(tmpActiveTypeSets,tmpActiveRefSets)\n elif tmpActiveTypeSets:\n\tself.sortedSets = tmpActiveTypeSets\n else:\n\tself.sortedSets = tmpActiveRefSets\n\t\n\t\n #Next step, hiding. First get our cull lists\n if self.sortedSets:\n self.objectSets = self.sortedSets\n else:\n self.objectSets = self.objectSetsRaw['all']\n\t\n # Start pulling out stuff by making a list we can iterate through as culling from a list you're iterating through doesn't work\n bufferList = copy.copy(self.objectSets)\n \n # Hide Set groups\n if mc.optionVar(q='cgmVar_HideSetGroups'):\n for s in self.setGroups:\n\t try:self.objectSets.remove(s)\n\t except:pass\n\t \n \n # Hide animLayer Sets\n if mc.optionVar(q='cgmVar_HideAnimLayerSets'):\n for s in bufferList:\n if search.returnObjectType(s) == 'animLayer':\n\t\ttry:self.objectSets.remove(s)\n\t\texcept:pass\n\n \n # Hide Maya Sets\n if mc.optionVar(q='cgmVar_HideMayaSets'):\n\tfor s in self.mayaSets:\n\t try:self.objectSets.remove(s)\n\t except:pass\n\t \n\t \n # Hide non qss Sets\n #print self.qssSets\n #print self.objectSets\n if mc.optionVar(q='cgmVar_HideNonQss'):\n\t#print \"sorting for qss\"\n\tfor s in bufferList:\n\t if s not in self.qssSets and s not in self.setGroups:\n\t\ttry:self.objectSets.remove(s)\n\t\texcept:pass \n \n \n #Refresh our active lists \n if self.ActiveObjectSetsOptionVar.value:\n\tfor o in self.objectSets:\n\t if o in self.ActiveObjectSetsOptionVar.value:\n\t\tself.activeSets.append(o) \n \n self.setInstances = {}\n self.setInstancesFastIndex = {}\n\t\n #If we have object sets to load, we're gonna initialize em\n if self.objectSets:\n\t#Counter build\n\tmayaMainProgressBar = guiFactory.doStartMayaProgressBar(len(self.objectSetsRaw),\"Getting Set info\")\n\t\n for i,o in enumerate(self.objectSets):\n\t if mc.progressBar(mayaMainProgressBar, query=True, isCancelled=True ) :\n\t\tbreak\n\t mc.progressBar(mayaMainProgressBar, edit=True, status = (\"On set %s\"%(o)), step=1) \t\n\t \n\t self.setInstances[i] = SetFactory(o) #Store the instance so we only have to do it once\n\t sInstance = self.setInstances[i] #Simple link to keep the code cleaner\n\t \n\t self.setInstancesFastIndex[o] = i #Store name to index for fast lookup of index on the fly\n\t \t \n\tguiFactory.doEndMayaProgressBar(mayaMainProgressBar)\n\n # Set Group creation if they don't have em\n if mc.optionVar( q='cgmVar_MaintainLocalSetGroup' ) and not self.setGroupName:\n initializeSetGroup(self)\n \n if mc.optionVar( q='cgmVar_MaintainLocalSetGroup' ):\n doGroupLocal(self)", "title": "" }, { "docid": "1f57a161f825047075fa26e39cd768cb", "score": "0.5324889", "text": "def __getstate__(self):\n state = copy.copy(self.__dict__)\n state[\"_token_to_index\"] = dict(state[\"_token_to_index\"])\n state[\"_index_to_token\"] = dict(state[\"_index_to_token\"])\n\n if \"_retained_counter\" in state:\n state[\"_retained_counter\"] = {\n key: dict(value) for key, value in state[\"_retained_counter\"].items()\n }\n\n return state", "title": "" }, { "docid": "dd6d90a91c39880ba1f62c105c9196f4", "score": "0.531493", "text": "def test_get_set_state(self):\n # Create a second target classifier\n clf_new = self._create_clf(\n self.n_features, self.n_classes, self.batch_size)\n self._test_get_set_state(self.clf, clf_new, self.ts)", "title": "" }, { "docid": "f8d9091f55ddd86780f56f93ab78efc9", "score": "0.5314017", "text": "def get_state(self) -> Mapping[str, Any]:", "title": "" }, { "docid": "2f2d9ce145d0e1f8619c7fe55ee836b0", "score": "0.5312846", "text": "def singleState_set(Nmax):\n\tStateset=[]\n\tfor N in range(Nmax+1):\n\t\tfor L in range(N%2,N+1,2):\n\t\t\tjset=so3.couple_so3(L,.5)\n\t\t\tjset.sort()\n\t\t\tfor J in jset:\n\t\t\t\tStateset.append(SingleState(N,L,J))\n\treturn Stateset", "title": "" }, { "docid": "510421819455c552e8e2a79f7ceaaed6", "score": "0.5298213", "text": "def __setitem__(self, *args):\n return _osg.vectorStateSet___setitem__(self, *args)", "title": "" }, { "docid": "121e73b6213c9a931bcb45b22516ac19", "score": "0.5290404", "text": "def __lt__(self, *args):\n return _osg.StateAttribute___lt__(self, *args)", "title": "" }, { "docid": "47aac5131353515e289282e78d8c7f8f", "score": "0.5289945", "text": "def __getstate__(self):\n state = self.__dict__\n show_pickle_warning = not (\n \"show_pickle_warnings_\" in state and not state[\"show_pickle_warnings_\"]\n )\n if \"raw_julia_state_\" in state and show_pickle_warning:\n warnings.warn(\n \"raw_julia_state_ cannot be pickled and will be removed from the \"\n \"serialized instance. This will prevent a `warm_start` fit of any \"\n \"model that is deserialized via `pickle.load()`.\"\n )\n state_keys_containing_lambdas = [\"extra_sympy_mappings\", \"extra_torch_mappings\"]\n for state_key in state_keys_containing_lambdas:\n if state[state_key] is not None and show_pickle_warning:\n warnings.warn(\n f\"`{state_key}` cannot be pickled and will be removed from the \"\n \"serialized instance. When loading the model, please redefine \"\n f\"`{state_key}` at runtime.\"\n )\n state_keys_to_clear = [\"raw_julia_state_\"] + state_keys_containing_lambdas\n pickled_state = {\n key: (None if key in state_keys_to_clear else value)\n for key, value in state.items()\n }\n if (\"equations_\" in pickled_state) and (\n pickled_state[\"equations_\"] is not None\n ):\n pickled_state[\"output_torch_format\"] = False\n pickled_state[\"output_jax_format\"] = False\n if self.nout_ == 1:\n pickled_columns = ~pickled_state[\"equations_\"].columns.isin(\n [\"jax_format\", \"torch_format\"]\n )\n pickled_state[\"equations_\"] = (\n pickled_state[\"equations_\"].loc[:, pickled_columns].copy()\n )\n else:\n pickled_columns = [\n ~dataframe.columns.isin([\"jax_format\", \"torch_format\"])\n for dataframe in pickled_state[\"equations_\"]\n ]\n pickled_state[\"equations_\"] = [\n dataframe.loc[:, signle_pickled_columns]\n for dataframe, signle_pickled_columns in zip(\n pickled_state[\"equations_\"], pickled_columns\n )\n ]\n return pickled_state", "title": "" }, { "docid": "3a6974f8ffc39352c882887c23cfdc20", "score": "0.5288807", "text": "def computeStates(self):\n self.states = set()\n queue = []\n self.states.add(self.startState())\n queue.append(self.startState())\n while len(queue) > 0:\n state = queue.pop()\n for action in self.actions(state):\n for newState, prob, reward in self.succAndProbReward(state, action):\n if newState not in self.states:\n self.states.add(newState)\n queue.append(newState)\n # print(self.states)", "title": "" }, { "docid": "222939cc8d5212e6016c26560a693e77", "score": "0.5283661", "text": "def __setstate__(self, state):\n if 'diff' in state:\n diff = state['diff']\n if not isinstance(diff, AttrDict):\n diff = AttrDict(diff)\n state['diff'] = diff\n for attr, val in diff.items():\n if not isinstance(val, Diff):\n diff[attr] = Diff(**val)\n self.state = state['state']\n self.diff = state['diff']", "title": "" }, { "docid": "bf2f32f4092e266848d9e5d1f964f8a9", "score": "0.528329", "text": "def _get_set_(self):\n return self.__set_", "title": "" }, { "docid": "71a37d2641b0a64feaa8a97d9ea06cd2", "score": "0.5280834", "text": "def get_state(self):\n return self[:]", "title": "" }, { "docid": "19d19fe5aaab838f684202a4f297274a", "score": "0.5279621", "text": "def __deref__(self):\n return _osg.LODRef___deref__(self)", "title": "" }, { "docid": "52addce909397de1d7d0dada82148732", "score": "0.52761924", "text": "def get(self):\n return self.state", "title": "" } ]
cca8c52535af0f8ee21bead37ab93bef
Testcase8 in NAT Functionality
[ { "docid": "822b6acbfd248a6794dd1f9ebbdb2990", "score": "0.6763254", "text": "def test_nat_func_8(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_8 starts ****\")\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(30)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy(delete=1):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n if not self.steps.testApplyUpdatePrsToPtg('external',\n PRS_ICMP_TCP\n ):\n return 0\n sleep(20) #Above update takes time to take effect on the ACI side\n #Verifying DNATed Traffic from both VMs\n LOG.info(\n \"\\n DNATed Traffic from ExtRTR to VMs after ExtPol is re-created\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0", "title": "" } ]
[ { "docid": "952b0702aa0a844ea1ddd3dae56a140c", "score": "0.7165074", "text": "def test_twice_nat_interface_addr(self):\n flags = self.config_flags.NAT_IS_TWICE_NAT\n self.vapi.nat44_add_del_interface_addr(\n sw_if_index=self.pg11.sw_if_index, flags=flags, is_add=1\n )\n\n # no address in NAT pool\n adresses = self.vapi.nat44_address_dump()\n self.assertEqual(0, len(adresses))\n\n # configure interface address and check NAT address pool\n self.pg11.config_ip4()\n adresses = self.vapi.nat44_address_dump()\n self.assertEqual(1, len(adresses))\n self.assertEqual(str(adresses[0].ip_address), self.pg11.local_ip4)\n self.assertEqual(adresses[0].flags, flags)\n\n # remove interface address and check NAT address pool\n self.pg11.unconfig_ip4()\n adresses = self.vapi.nat44_address_dump()\n self.assertEqual(0, len(adresses))", "title": "" }, { "docid": "eb3b854d71c47d91385e5eaf86d13be1", "score": "0.711561", "text": "def test_next_src_nat(self):\n\n twice_nat_addr = \"10.0.1.3\"\n external_port = 80\n local_port = 8080\n post_twice_nat_port = 0\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n self.nat_add_address(twice_nat_addr, twice_nat=1)\n flags = (\n self.config_flags.NAT_IS_OUT2IN_ONLY\n | self.config_flags.NAT_IS_SELF_TWICE_NAT\n )\n self.nat_add_static_mapping(\n self.pg6.remote_ip4,\n self.pg1.remote_ip4,\n local_port,\n external_port,\n proto=IP_PROTOS.tcp,\n vrf_id=1,\n flags=flags,\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg6.sw_if_index, is_add=1\n )\n\n p = (\n Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac)\n / IP(src=self.pg6.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=12345, dport=external_port)\n )\n self.pg6.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, twice_nat_addr)\n self.assertNotEqual(tcp.sport, 12345)\n post_twice_nat_port = tcp.sport\n self.assertEqual(ip.dst, self.pg6.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n p = (\n Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac)\n / IP(src=self.pg6.remote_ip4, dst=twice_nat_addr)\n / TCP(sport=local_port, dport=post_twice_nat_port)\n )\n self.pg6.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg1.remote_ip4)\n self.assertEqual(tcp.sport, external_port)\n self.assertEqual(ip.dst, self.pg6.remote_ip4)\n self.assertEqual(tcp.dport, 12345)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "866a5b8ae8030051cef9cb4eb4841316", "score": "0.6788748", "text": "def test_nat_func_7(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_7 starts ****\")\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testCreateNsp():\n return 0\n if not self.steps.testApplyRemoveNSpFromPtg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testApplyRemoveNSpFromPtg(nspuuid=None):\n return 0\n sleep(5)\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(30)\n #Verifying DNATed Traffic from both VMs\n LOG.info(\n \"\\n DNATed Traffic from ExtRTR to VMs after NSP is removed from PTG\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0", "title": "" }, { "docid": "b1863f554ab3d9782835465ffd03b889", "score": "0.6765557", "text": "def test_nat_func_2(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_2 starts ****\")\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0", "title": "" }, { "docid": "5a78ae76a1ae9f6f03aa391bacbfcd31", "score": "0.67219484", "text": "def test_ipsec_nat_tun(self):\n scapy_tun_sa = SecurityAssociation(ESP, spi=self.scapy_tun_spi,\n crypt_algo=self.crypt_algo,\n crypt_key=self.crypt_key,\n auth_algo=self.auth_algo,\n auth_key=self.auth_key,\n tunnel_header=IP(\n src=self.pg1.remote_ip4,\n dst=self.tun_if.remote_ip4),\n nat_t_header=UDP(\n sport=4500,\n dport=4500))\n # in2out - from private network to public\n pkts = self.create_stream_plain(\n self.pg1.remote_mac, self.pg1.local_mac,\n self.pg1.remote_ip4, self.tun_if.remote_ip4)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.tun_if.get_capture(len(pkts))\n self.verify_capture_encrypted(capture, scapy_tun_sa)\n\n vpp_tun_sa = SecurityAssociation(ESP,\n spi=self.vpp_tun_spi,\n crypt_algo=self.crypt_algo,\n crypt_key=self.crypt_key,\n auth_algo=self.auth_algo,\n auth_key=self.auth_key,\n tunnel_header=IP(\n src=self.tun_if.remote_ip4,\n dst=self.pg1.remote_ip4),\n nat_t_header=UDP(\n sport=4500,\n dport=4500))\n\n # out2in - from public network to private\n pkts = self.create_stream_encrypted(\n self.tun_if.remote_mac, self.tun_if.local_mac,\n self.tun_if.remote_ip4, self.pg1.remote_ip4, vpp_tun_sa)\n self.logger.info(ppc(\"Sending packets:\", pkts))\n self.tun_if.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_plain(capture)", "title": "" }, { "docid": "b33a38b4ac3443362f4ab4306ff14a74", "score": "0.67164546", "text": "def test_nat_func_4(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_4 starts ****\")\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if self.steps.testAssociateFipToVMs(): #Negative Check\n\t LOG.error(\n \"\\n Expected FIP Association To Fail,\"\n \" since L3P is NOT yet associated to ExtSeg\")\n return 0\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n if not self.steps.testVerifyCfgdObjects():\n return 0\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0", "title": "" }, { "docid": "54be5301b38f4212fe44ff753c38a706", "score": "0.6690755", "text": "def test_nat_func_3(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_3 starts ****\")\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0", "title": "" }, { "docid": "d861e2399d1046aca4075a4dd4d59281", "score": "0.66860706", "text": "def test_outside_address_distribution(self):\n\n x = 100\n nat_addresses = []\n\n for i in range(1, x):\n a = \"10.0.0.%d\" % i\n nat_addresses.append(a)\n\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n self.vapi.nat44_add_del_address_range(\n first_ip_address=nat_addresses[0],\n last_ip_address=nat_addresses[-1],\n vrf_id=0xFFFFFFFF,\n is_add=1,\n flags=0,\n )\n\n self.pg0.generate_remote_hosts(x)\n\n pkts = []\n for i in range(x):\n info = self.create_packet_info(self.pg0, self.pg1)\n payload = self.info_to_payload(info)\n p = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_hosts[i].ip4, dst=self.pg1.remote_ip4)\n / UDP(sport=7000 + i, dport=8000 + i)\n / Raw(payload)\n )\n info.data = p\n pkts.append(p)\n\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n recvd = self.pg1.get_capture(len(pkts))\n for p_recvd in recvd:\n payload_info = self.payload_to_info(p_recvd[Raw])\n packet_index = payload_info.index\n info = self._packet_infos[packet_index]\n self.assertTrue(info is not None)\n self.assertEqual(packet_index, info.index)\n p_sent = info.data\n packed = socket.inet_aton(p_sent[IP].src)\n numeric = struct.unpack(\"!L\", packed)[0]\n numeric = socket.htonl(numeric)\n a = nat_addresses[(numeric - 1) % len(nat_addresses)]\n self.assertEqual(\n a,\n p_recvd[IP].src,\n \"Invalid packet (src IP %s translated to %s, but expected %s)\"\n % (p_sent[IP].src, p_recvd[IP].src, a),\n )", "title": "" }, { "docid": "2ee5115c23ba851d0303ee7c4a0362cc", "score": "0.66806847", "text": "def test_nat_func_6(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_6 starts ****\")\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n #Intentionally launching VMs in same avail-zone/comp-node\n if not self.steps.testLaunchVmsForEachPt():\n return 0\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(30)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0\n self.steps.testDeleteNatPool()\n if not self.steps.testCreateNatPoolAssociateExtSeg(\n poolname=self.natpoolname,\n natpool=self.fipsubnet2\n ):\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(30)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet2,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\n \"\\n DNATed Traffic from ExtRTR to VMs with FIPs from New NAT-Pool\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n\n if not self.steps.testDisassociateFipFromVMs():\n return 0", "title": "" }, { "docid": "9669217a472bb95c4ab178f4b6862b3c", "score": "0.66555214", "text": "def test_nat_func_1(self):\n LOG.info(\n \"\\n **** Execution of Testcase TEST_NAT_FUNC_1 starts ****\")\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n #NOTE:The below flag check cannot be applied in test-workflows\n #where ExtSeg is created after the L3Policies, else L3Ps will not\n #have ExtSeg association.\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testLaunchVmsForEachPt():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs(release_fip=False):\n return 0\n sleep(10)\n #Inter-change of FIPs\n if not self.steps.testAssociateFipToVMs(ic=True):\n return 0\n sleep(10)\n LOG.info(\n \"\\n DNATed Traffic from ExtRTR to VMs after Inter-Change of FIPs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0", "title": "" }, { "docid": "3edd40eebd39d986de3e09dbc888c541", "score": "0.65366495", "text": "def test_nat_func_5(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_5 starts ****\")\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0\n DcExtsegid = self.steps.testCreateExtSegWithDefault(EXTSEG_SEC)\n if not DcExtsegid:\n return 0\n print 'DcExtSegID ==',DcExtsegid\n if not self.steps.testAssociateExtSegToBothL3ps(extsegid=DcExtsegid):\n return 0\n if not self.steps.testUpdateNatPoolAssociateExtSeg(DcExtsegid):\n return 0\n if not self.steps.testCreateUpdateExternalPolicy(extseg=DcExtsegid,\n extpol=''):\n return 0\n if not self.steps.testApplyUpdatePrsToPtg(\n 'external',\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testAssociateFipToVMs(ExtSegName=EXTSEG_SEC):\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip2_extrtr,\n action='update'\n )\n #Verifying DNATed Traffic from both VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n if not self.steps.testDisassociateFipFromVMs():\n return 0", "title": "" }, { "docid": "998823c73f33f51845e2ed536cde2de5", "score": "0.6250086", "text": "def test_static_lb_2(self):\n external_addr = self.nat_addr\n external_port = 80\n local_port = 8080\n server1 = self.pg0.remote_hosts[0]\n server2 = self.pg0.remote_hosts[1]\n\n locals = [\n {\"addr\": server1.ip4, \"port\": local_port, \"probability\": 70, \"vrf_id\": 0},\n {\"addr\": server2.ip4, \"port\": local_port, \"probability\": 30, \"vrf_id\": 0},\n ]\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.vapi.nat44_add_del_lb_static_mapping(\n is_add=1,\n flags=flags,\n external_addr=external_addr,\n external_port=external_port,\n protocol=IP_PROTOS.tcp,\n local_num=len(locals),\n locals=locals,\n )\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, flags=flags, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n # from client to service\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=12345, dport=external_port)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n server = None\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertIn(ip.dst, [server1.ip4, server2.ip4])\n if ip.dst == server1.ip4:\n server = server1\n else:\n server = server2\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from service back to client\n p = (\n Ether(src=server.mac, dst=self.pg0.local_mac)\n / IP(src=server.ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=local_port, dport=12345)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.nat_addr)\n self.assertEqual(tcp.sport, external_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from client to server (no translation)\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=server1.ip4)\n / TCP(sport=12346, dport=local_port)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n server = None\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, server1.ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from service back to client (no translation)\n p = (\n Ether(src=server1.mac, dst=self.pg0.local_mac)\n / IP(src=server1.ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=local_port, dport=12346)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, server1.ip4)\n self.assertEqual(tcp.sport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "2d30aabd285f0fddbd753acbdc00e94c", "score": "0.61547345", "text": "def test_parasitic_tcp_traceroute(self):\n sport = self.get_sport()\n dport = self.dport\n ipid = int(RandShort())\n\n ip_layer = IP(dst=self.dst_ip,\n id=ipid, ttl=self.max_ttl)\n\n syn = ip_layer/TCP(sport=sport, dport=dport, flags=\"S\", seq=0)\n\n log.msg(\"Sending...\")\n syn.show2()\n\n synack = yield self.sr1(syn)\n\n log.msg(\"Got response...\")\n synack.show2()\n\n if not synack:\n log.err(\"Got no response. Try increasing max_ttl\")\n return\n\n if synack[TCP].flags == 11:\n log.msg(\"Got back a FIN ACK. The destination port is closed\")\n return\n\n elif synack[TCP].flags == 18:\n log.msg(\"Got a SYN ACK. All is well.\")\n else:\n log.err(\"Got an unexpected result\")\n return\n\n ack = ip_layer/TCP(sport=synack.dport,\n dport=dport, flags=\"A\",\n seq=synack.ack, ack=synack.seq + 1)\n\n yield self.send(ack)\n\n self.report['hops'] = []\n # For the time being we make the assumption that we are NATted and\n # that the NAT will forward the packet to the destination even if the TTL has \n for ttl in range(1, self.max_ttl):\n log.msg(\"Sending packet with ttl of %s\" % ttl)\n ip_layer.ttl = ttl\n empty_tcp_packet = ip_layer/TCP(sport=synack.dport,\n dport=dport, flags=\"A\",\n seq=synack.ack, ack=synack.seq + 1)\n\n answer = yield self.sr1(empty_tcp_packet)\n if not answer:\n log.err(\"Got no response for ttl %s\" % ttl)\n continue\n\n try:\n icmp = answer[ICMP]\n report = {'ttl': empty_tcp_packet.ttl,\n 'address': answer.src,\n 'rtt': answer.time - empty_tcp_packet.time\n }\n log.msg(\"%s: %s\" % (dport, report))\n self.report['hops'].append(report)\n\n except IndexError:\n if answer.src == self.dst_ip:\n answer.show()\n log.msg(\"Reached the destination. We have finished the traceroute\")\n return", "title": "" }, { "docid": "0657ce5b891b2db3f38f48be86a3515d", "score": "0.61486334", "text": "def test_snat_func_9(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_9 starts ****\")\n \n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n self.steps.AddSShContract(self.apicip) ## Adding SSH contract\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testLaunchVmsForEachPt():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testVerifyCfgdObjects(nat_type='snat'):\n return 0\n sleep(15)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n SNATPOOL,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying SNATed Traffic from both VMs\n LOG.info(\"\\n SNATed Traffic from VMs to ExtRTR\")\n if not self.steps.testTrafficFromVMsToExtRtr(self.targetiplist):\n return 0", "title": "" }, { "docid": "aa1617082385348e0a41b84e9b70b694", "score": "0.61098427", "text": "def test_router_dns_guestipquery(self):\n\n self.logger.debug(\"Starting test_router_dns_guestipquery...\")\n public_ip = self.test_router_common()[0]\n\n self.logger.debug(\"Creating Firewall rule for VM ID: %s\" % self.vm.id)\n FireWallRule.create(\n self.apiclient,\n ipaddressid=public_ip.id,\n protocol=self.services[\"natrule1\"][\"protocol\"],\n cidrlist=['0.0.0.0/0'],\n startport=self.services[\"natrule1\"][\"publicport\"],\n endport=self.services[\"natrule1\"][\"publicport\"]\n )\n\n self.logger.debug(\"Creating NAT rule for VM ID: %s\" % self.vm.id)\n nat_rule1 = NATRule.create(\n self.apiclient,\n self.vm,\n self.services[\"natrule1\"],\n public_ip.id\n )\n nat_rules = list_nat_rules(\n self.apiclient,\n id=nat_rule1.id\n )\n self.assertEqual(\n isinstance(nat_rules, list),\n True,\n \"Check for list NAT rules response return valid data\"\n )\n self.assertTrue(\n len(nat_rules) >= 1,\n \"Check for list NAT rules to have at least one rule\"\n )\n self.assertEqual(\n nat_rules[0].state,\n 'Active',\n \"Check list port forwarding rules\"\n )\n\n result = None\n try:\n self.logger.debug(\"SSH into guest VM with IP: %s\" % nat_rule1.ipaddress)\n ssh = self.vm.get_ssh_client(ipaddress=nat_rule1.ipaddress, port=self.services['natrule1'][\"publicport\"], retries=15)\n result = str(ssh.execute(\"nslookup google.com\"))\n except Exception as e:\n self.fail(\"Failed to SSH into VM - %s due to exception: %s\" % (nat_rule1.ipaddress, e))\n\n if not result:\n self.fail(\"Did not to receive any response from the guest VM, failing.\")\n\n self.assertTrue(\"google.com\" in result and \"10.1.1.1\" in result,\n \"VR DNS should serve requests from guest network, unable to get valid nslookup result from guest VM.\")", "title": "" }, { "docid": "15b832168da9e094911ac68d416f44d2", "score": "0.6095368", "text": "def test_snat_func_10(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_10 starts ****\")\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testVerifyCfgdObjects(nat_type='snat'):\n return 0\n self.steps.AddSShContract(self.apicip) ## Adding SSH contract\n if not self.steps.testLaunchVmsForEachPt():\n return 0\n sleep(15)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n SNATPOOL,\n self.gwip1_extrtr,\n action='update'\n )\n #Verifying SNATed Traffic from both VMs\n LOG.info(\"\\n SNATed Traffic from VMs to ExtRTR\")\n if not self.steps.testTrafficFromVMsToExtRtr(self.targetiplist):\n return 0\n #Verifying DNATed Traffic from ExtRtr to VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(30)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n\t#Verifying Traffic to be SNATed on DisAsso FIPs\n LOG.info(\n \"\\n SNATed Traffic from VMs to ExtRTR post FIPs disassociated\")\n if not self.steps.testDisassociateFipFromVMs():\n return 0\n if not self.steps.testTrafficFromVMsToExtRtr(self.targetiplist):\n return 0", "title": "" }, { "docid": "e0d034c797ed883fe462c3773ce731cf", "score": "0.6094481", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[0], \" (3000:10::10 -> 3000:20::20)\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n\n # Make this single switchport a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n self.add_vlan_member(device, vlan20, eth1)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv6_ipaddr('3000:10:0:0:0:0:0:1', 120)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv6_ipaddr('3000:20:0:0:0:0:0:1', 120)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv6_ipaddr('3000:20::20', 128)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcpv6_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ipv6_dst='3000:20:0:0:0:0:0:20',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=10,\n ipv6_hlim=64)\n exp_pkt = simple_tcpv6_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ipv6_dst='3000:20:0:0:0:0:0:20',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=20,\n ipv6_hlim=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[0]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "c600c37dc3ddaa1a0c4f950ca587dd08", "score": "0.60710204", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (3000:10::10 -> 3000:20::20)\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make switchport a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n\n eth2 = self.cfg_l2intf_on_port(device, port2, mode='trunk')\n self.add_vlan_member(device, vlan20, eth2)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv6_ipaddr('3000:10:0:0:0:0:0:1', 120)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv6_ipaddr('3000:20:0:0:0:0:0:1', 120)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv6_ipaddr('3000:20::20', 128)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcpv6_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ipv6_dst='3000:20:0:0:0:0:0:20',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=10,\n ipv6_hlim=64)\n exp_pkt = simple_tcpv6_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ipv6_dst='3000:20:0:0:0:0:0:20',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=20,\n ipv6_hlim=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "1c5b6aacecd5017be8417f60d7cf3c01", "score": "0.6038735", "text": "def test_output_feature_and_service3(self):\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n self.nat_add_address(self.nat_addr)\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg1.remote_ip4,\n external_addr,\n local_port,\n external_port,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg0)\n self.vapi.nat44_ed_add_del_output_interface(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=external_addr)\n / TCP(sport=12345, dport=external_port)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg0.remote_ip4)\n self.assertEqual(tcp.sport, 12345)\n self.assertEqual(ip.dst, self.pg1.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4)\n / TCP(sport=local_port, dport=12345)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, external_addr)\n self.assertEqual(tcp.sport, external_port)\n self.assertEqual(ip.dst, self.pg0.remote_ip4)\n self.assertEqual(tcp.dport, 12345)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "39919e81ec5ef20633fe048ebd003622", "score": "0.60157025", "text": "def test_static_lb(self):\n external_addr_n = self.nat_addr\n external_port = 80\n local_port = 8080\n server1 = self.pg0.remote_hosts[0]\n server2 = self.pg0.remote_hosts[1]\n\n locals = [\n {\"addr\": server1.ip4, \"port\": local_port, \"probability\": 70, \"vrf_id\": 0},\n {\"addr\": server2.ip4, \"port\": local_port, \"probability\": 30, \"vrf_id\": 0},\n ]\n\n self.nat_add_address(self.nat_addr)\n self.vapi.nat44_add_del_lb_static_mapping(\n is_add=1,\n external_addr=external_addr_n,\n external_port=external_port,\n protocol=IP_PROTOS.tcp,\n local_num=len(locals),\n locals=locals,\n )\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, flags=flags, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n # from client to service\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=12345, dport=external_port)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n server = None\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertIn(ip.dst, [server1.ip4, server2.ip4])\n if ip.dst == server1.ip4:\n server = server1\n else:\n server = server2\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from service back to client\n p = (\n Ether(src=server.mac, dst=self.pg0.local_mac)\n / IP(src=server.ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=local_port, dport=12345)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.nat_addr)\n self.assertEqual(tcp.sport, external_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n sessions = self.vapi.nat44_user_session_dump(server.ip4, 0)\n self.assertEqual(len(sessions), 1)\n self.assertTrue(sessions[0].flags & self.config_flags.NAT_IS_EXT_HOST_VALID)\n self.vapi.nat44_del_session(\n address=sessions[0].inside_ip_address,\n port=sessions[0].inside_port,\n protocol=sessions[0].protocol,\n flags=(\n self.config_flags.NAT_IS_INSIDE\n | self.config_flags.NAT_IS_EXT_HOST_VALID\n ),\n ext_host_address=sessions[0].ext_host_address,\n ext_host_port=sessions[0].ext_host_port,\n )\n sessions = self.vapi.nat44_user_session_dump(server.ip4, 0)\n self.assertEqual(len(sessions), 0)", "title": "" }, { "docid": "cb96ce72fd7733b4013317d7ca0c19ef", "score": "0.6006661", "text": "def test_output_feature_and_service(self):\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n self.nat_add_address(self.nat_addr)\n flags = self.config_flags.NAT_IS_ADDR_ONLY\n self.vapi.nat44_add_del_identity_mapping(\n ip_address=self.pg1.remote_ip4,\n sw_if_index=0xFFFFFFFF,\n flags=flags,\n is_add=1,\n )\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg0.remote_ip4,\n external_addr,\n local_port,\n external_port,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg0)\n self.vapi.nat44_ed_add_del_output_interface(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n # from client to service\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=external_addr)\n / TCP(sport=12345, dport=external_port)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg0.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from service back to client\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=local_port, dport=12345)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, external_addr)\n self.assertEqual(tcp.sport, external_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from local network host to external network\n pkts = self.create_stream_in(self.pg0, self.pg1)\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_out(capture, ignore_port=True)\n pkts = self.create_stream_in(self.pg0, self.pg1)\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_out(capture, ignore_port=True)\n\n # from external network back to local network host\n pkts = self.create_stream_out(self.pg1)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n self.verify_capture_in(capture, self.pg0)", "title": "" }, { "docid": "98389accbbf3e340e4eb44a045cbde37", "score": "0.59847635", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (3000:10::10 -> 3000:20::20)\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10\n vlan10 = self.add_vlan(device, 10)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make 1 port a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n\n # Add SVI IP address to vlan 10\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv6_ipaddr('3000:10::1', 120)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n # Make other port a sub-interface port\n eth2_20 = self.add_logical_l3intf(device, vrf, rmac)\n ethcfg2 = self.cfg_subintf_on_port(device, port2, eth2_20, vlan_id=20)\n ipaddr2 = self.make_ipv6_ipaddr('3000:20::1', 120)\n self.cfg_ip_address(device, eth2_20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv6_ipaddr('3000:20::20', 128)\n nhop1 = self.add_nhop(device, eth2_20)\n self.add_neighbor_l3intf(device, eth2_20, nhop1, '00:20:20:20:20:25',\n ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcpv6_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ipv6_dst='3000:20:0:0:0:0:0:20',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=10,\n ipv6_hlim=64)\n exp_pkt = simple_tcpv6_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ipv6_dst='3000:20:0:0:0:0:0:20',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=20,\n ipv6_hlim=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "d3be21d2ba141164f2d2366863e84759", "score": "0.59674877", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[0], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n\n # Make this single switchport a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n self.add_vlan_member(device, vlan20, eth1)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n dl_vlan_enable=True,\n vlan_vid=10,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n dl_vlan_enable=True,\n vlan_vid=20,\n ip_id=105,\n ip_ttl=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[0]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "fd496975bd5a4591cdc3c13f3c88a6cb", "score": "0.5965338", "text": "def test_01_nat_usage(self):\n # Validate the following\n # 1. Acquire a IP for this account\n # 2. Create a PF rule on the IP associated with this account.\n # NET.RULEADD event is registered for this account in\n # cloud.usage_event table\n # 3. Delete the created PF rule from the account. NET.RULEDelete event\n # is registered for this account in cloud.usage_event table\n # 4. Delete this account.\n\n self.debug(\"Creating NAT rule with public IP: %s\" %\n self.public_ip_1.ipaddress.id)\n #Create NAT rule\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine,\n self.services[\"natrule\"],\n self.public_ip_1.ipaddress.id\n )\n\n # Delete NAT Rule\n self.debug(\"Deleting NAT rule: %s\" % nat_rule.id)\n nat_rule.delete(self.apiclient)\n\n # Fetch project account ID from project UUID\n self.debug(\n \"select project_account_id from projects where uuid = '%s';\" \\\n % self.project.id)\n\n qresultset = self.dbclient.execute(\n \"select project_account_id from projects where uuid = '%s';\" \\\n % self.project.id\n )\n self.assertEqual(\n isinstance(qresultset, list),\n True,\n \"Check DB query result set for valid data\"\n )\n\n self.assertNotEqual(\n len(qresultset),\n 0,\n \"Check DB Query result set\"\n )\n qresult = qresultset[0]\n\n account_id = qresult[0]\n self.debug(\"select type from usage_event where account_id = '%s';\" \\\n % account_id)\n\n qresultset = self.dbclient.execute(\n \"select type from usage_event where account_id = '%s';\" \\\n % account_id\n )\n\n self.assertEqual(\n isinstance(qresultset, list),\n True,\n \"Check DB query result set for valid data\"\n )\n self.assertNotEqual(\n len(qresultset),\n 0,\n \"Check DB Query result set\"\n )\n\n qresult = str(qresultset)\n self.debug(\"Query result: %s\" % qresult)\n\n # Check for NET.RULEADD, NET.RULEDELETE in cloud.usage_event table\n self.assertEqual(\n qresult.count('NET.RULEADD'),\n 1,\n \"Check NET.RULEADD event in events table\"\n )\n\n self.assertEqual(\n qresult.count('NET.RULEDELETE'),\n 1,\n \"Check NET.RULEDELETE in events table\"\n )\n return", "title": "" }, { "docid": "1073ae523435ffeaffdd93799d4ebe75", "score": "0.59622437", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make switchport a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n\n eth2 = self.cfg_l2intf_on_port(device, port2, mode='trunk')\n self.add_vlan_member(device, vlan20, eth2)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n dl_vlan_enable=True,\n vlan_vid=10,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n dl_vlan_enable=True,\n vlan_vid=20,\n ip_id=105,\n ip_ttl=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "2bbf04ded840d5f60e8f6ab684fc6220", "score": "0.5960858", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10\n vlan10 = self.add_vlan(device, 10)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make 1 port a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n\n # Add SVI IP address to vlan 10\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n # Make other port a sub-interface port\n eth2_20 = self.add_logical_l3intf(device, vrf, rmac)\n ethcfg2 = self.cfg_subintf_on_port(device, port2, eth2_20, vlan_id=20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, eth2_20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, eth2_20)\n self.add_neighbor_l3intf(device, eth2_20, nhop1, '00:20:20:20:20:25',\n ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n dl_vlan_enable=True,\n vlan_vid=10,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n dl_vlan_enable=True,\n vlan_vid=20,\n ip_id=105,\n ip_ttl=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "93b906dbc6e27b542628d315d4bcaf78", "score": "0.59592575", "text": "def test_hairpinning_unknown_proto(self):\n host = self.pg0.remote_hosts[0]\n server = self.pg0.remote_hosts[1]\n host_in_port = 1234\n server_out_port = 8765\n server_nat_ip = \"10.0.0.11\"\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n # add static mapping for server\n self.nat_add_static_mapping(server.ip4, server_nat_ip)\n\n # host to server\n p = (\n Ether(src=host.mac, dst=self.pg0.local_mac)\n / IP(src=host.ip4, dst=server_nat_ip)\n / TCP(sport=host_in_port, dport=server_out_port)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n self.pg0.get_capture(1)\n\n p = (\n Ether(dst=self.pg0.local_mac, src=host.mac)\n / IP(src=host.ip4, dst=server_nat_ip)\n / GRE()\n / IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4)\n / TCP(sport=1234, dport=1234)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n p = self.pg0.get_capture(1)\n packet = p[0]\n try:\n self.assertEqual(packet[IP].src, self.nat_addr)\n self.assertEqual(packet[IP].dst, server.ip4)\n self.assertEqual(packet.haslayer(GRE), 1)\n self.assert_packet_checksums_valid(packet)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", packet))\n raise\n\n # server to host\n p = (\n Ether(dst=self.pg0.local_mac, src=server.mac)\n / IP(src=server.ip4, dst=self.nat_addr)\n / GRE()\n / IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4)\n / TCP(sport=1234, dport=1234)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n p = self.pg0.get_capture(1)\n packet = p[0]\n try:\n self.assertEqual(packet[IP].src, server_nat_ip)\n self.assertEqual(packet[IP].dst, host.ip4)\n self.assertEqual(packet.haslayer(GRE), 1)\n self.assert_packet_checksums_valid(packet)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", packet))\n raise", "title": "" }, { "docid": "8dbfc8d7fbdd7ecec29f93945801ad3c", "score": "0.5950301", "text": "def test_snat_func_12(self):\n LOG.info(\n \"\\n**** Execution of Testcase TEST_NAT_FUNC_12 starts ****\")\n #NOTE: For this TC, want to add host_pool_cidr to the L3out\n #while remove it from L3out1(this was already added by\n #test_runner func).Ensure to list this TC as the last TC to run\n\tif not self.plugin:\n self.steps.addhostpoolcidr(delete=True,flag=self.flag)\n self.steps.addhostpoolcidr(l3out=EXTSEG_SEC)\n if not self.steps.testCreateExtSegWithDefault(EXTSEG_PRI):\n return 0\n if not self.steps.testCreatePtgDefaultL3p():\n return 0\n if not self.steps.testCreateNonDefaultL3pAndL2p():\n return 0\n if not self.steps.testCreatePtgWithNonDefaultL3p():\n return 0\n if not self.steps.testCreatePolicyTargetForEachPtg():\n return 0\n if not self.steps.testCreateUpdateExternalPolicy():\n return 0\n for ptgtype in ['internal','external']:\n if not self.steps.testApplyUpdatePrsToPtg(\n ptgtype,\n PRS_ICMP_TCP\n ):\n return 0\n self.steps.AddSShContract(self.apicip) ## Adding SSH contract\n if not self.steps.testLaunchVmsForEachPt(az2=self.avail_zone):\n return 0\n if self.flag != 'default_external_segment_name':\n if not self.steps.testAssociateExtSegToBothL3ps():\n return 0\n if not self.steps.testCreateNatPoolAssociateExtSeg():\n return 0\n if not self.steps.testVerifyCfgdObjects():\n return 0\n if not self.steps.testAssociateFipToVMs():\n return 0\n sleep(10)\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n self.fipsubnet1,\n self.gwip1_extrtr,\n action='update'\n )\n #Verify DNATed traffic from ExtRtr to all VMs\n LOG.info(\"\\n DNATed Traffic from ExtRTR to VMs\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr):\n return 0\n DcExtsegid = self.steps.testCreateExtSegWithDefault(EXTSEG_SEC)\n if not DcExtsegid:\n return 0\n print 'DcExtSegID ==',DcExtsegid\n if not self.steps.testCreateUpdateExternalPolicy(extseg=DcExtsegid,\n extpol='nondefault'):\n return 0\n if not self.steps.testApplyUpdatePrsToPtg(\n 'external',\n PRS_ICMP_TCP\n ):\n return 0\n if not self.steps.testDisassociateFipFromVMs(vmname=True,\n release_fip=False):\n return 0\n if not self.steps.testAssociateExtSegToBothL3ps(extsegid=DcExtsegid,\n both=False):\n return 0\n #Verify DNATed traffic from ExtRtr to 1 VM on L3out1\n LOG.info(\n \"\\n DNATed Traffic from ExtRTR to the ONLY VM with FIP\")\n if not self.steps.testTrafficFromExtRtrToVmFip(self.extrtr,fip=True):\n return 0\n #Verify SNATed traffic from ExtRtr to 1 VM on the L3out2\n LOG.info(\n \"\\n SNATed Traffic from the ONLY VM without FIP to ExtRtr\")\n self.forextrtr.add_route_in_extrtr(\n self.extrtr,\n SNATPOOL,\n self.gwip2_extrtr,\n action='update'\n )\n if not self.steps.testTrafficFromVMsToExtRtr(self.targetiplist):\n return 0", "title": "" }, { "docid": "41a0529d9c1e2b079fc37007026f47e7", "score": "0.59394413", "text": "def ipv6():\n pass", "title": "" }, { "docid": "18a272524234bcff1ffaebc94484a064", "score": "0.5930842", "text": "def test_unknown_proto(self):\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n # in2out\n p = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=self.tcp_port_in, dport=20)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n p = self.pg1.get_capture(1)\n\n p = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / GRE()\n / IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4)\n / TCP(sport=1234, dport=1234)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n p = self.pg1.get_capture(1)\n packet = p[0]\n try:\n self.assertEqual(packet[IP].src, self.nat_addr)\n self.assertEqual(packet[IP].dst, self.pg1.remote_ip4)\n self.assertEqual(packet.haslayer(GRE), 1)\n self.assert_packet_checksums_valid(packet)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", packet))\n raise\n\n # out2in\n p = (\n Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / GRE()\n / IP(src=self.pg2.remote_ip4, dst=self.pg2.remote_ip4)\n / TCP(sport=1234, dport=1234)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n p = self.pg0.get_capture(1)\n packet = p[0]\n try:\n self.assertEqual(packet[IP].src, self.pg1.remote_ip4)\n self.assertEqual(packet[IP].dst, self.pg0.remote_ip4)\n self.assertEqual(packet.haslayer(GRE), 1)\n self.assert_packet_checksums_valid(packet)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", packet))\n raise", "title": "" }, { "docid": "c7580576596f15590a9d53ef7552a0cf", "score": "0.5930737", "text": "def test_multiple_vrf_2(self):\n\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n port = 0\n\n self.nat_add_address(self.nat_addr)\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_ed_add_del_output_interface(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1, flags=flags\n )\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg5.remote_ip4,\n external_addr,\n local_port,\n external_port,\n vrf_id=1,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n p = (\n Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac)\n / IP(src=self.pg5.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=2345, dport=22)\n )\n self.pg5.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.nat_addr)\n self.assert_packet_checksums_valid(p)\n port = tcp.sport\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=22, dport=port)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg5.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg5.remote_ip4)\n self.assertEqual(tcp.dport, 2345)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "8f11eec91f4312e3e6156ffafa448c25", "score": "0.591673", "text": "def test_0411_spoof_test(self):\n self.run_negative_conn_test(AF_INET, 1)", "title": "" }, { "docid": "c0590a2af13abea3a80a1fdcb3b68408", "score": "0.59117246", "text": "def test_dynamic(self):\n pkt_count = 1500\n tcp_port_offset = 20\n udp_port_offset = 20\n icmp_id_offset = 20\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n # in2out\n tc1 = self.statistics[\"/nat44-ed/in2out/slowpath/tcp\"]\n uc1 = self.statistics[\"/nat44-ed/in2out/slowpath/udp\"]\n ic1 = self.statistics[\"/nat44-ed/in2out/slowpath/icmp\"]\n dc1 = self.statistics[\"/nat44-ed/in2out/slowpath/drops\"]\n\n i2o_pkts = [[] for x in range(0, self.vpp_worker_count)]\n\n for i in range(pkt_count):\n p = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=tcp_port_offset + i, dport=20)\n )\n i2o_pkts[p[TCP].sport % self.vpp_worker_count].append(p)\n\n p = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / UDP(sport=udp_port_offset + i, dport=20)\n )\n i2o_pkts[p[UDP].sport % self.vpp_worker_count].append(p)\n\n p = (\n Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / ICMP(id=icmp_id_offset + i, type=\"echo-request\")\n )\n i2o_pkts[p[ICMP].id % self.vpp_worker_count].append(p)\n\n for i in range(0, self.vpp_worker_count):\n if len(i2o_pkts[i]) > 0:\n self.pg0.add_stream(i2o_pkts[i], worker=i)\n\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(pkt_count * 3, timeout=5)\n\n if_idx = self.pg0.sw_if_index\n tc2 = self.statistics[\"/nat44-ed/in2out/slowpath/tcp\"]\n uc2 = self.statistics[\"/nat44-ed/in2out/slowpath/udp\"]\n ic2 = self.statistics[\"/nat44-ed/in2out/slowpath/icmp\"]\n dc2 = self.statistics[\"/nat44-ed/in2out/slowpath/drops\"]\n\n self.assertEqual(tc2[:, if_idx].sum() - tc1[:, if_idx].sum(), pkt_count)\n self.assertEqual(uc2[:, if_idx].sum() - uc1[:, if_idx].sum(), pkt_count)\n self.assertEqual(ic2[:, if_idx].sum() - ic1[:, if_idx].sum(), pkt_count)\n self.assertEqual(dc2[:, if_idx].sum() - dc1[:, if_idx].sum(), 0)\n\n self.logger.info(self.vapi.cli(\"show trace\"))\n\n # out2in\n tc1 = self.statistics[\"/nat44-ed/out2in/fastpath/tcp\"]\n uc1 = self.statistics[\"/nat44-ed/out2in/fastpath/udp\"]\n ic1 = self.statistics[\"/nat44-ed/out2in/fastpath/icmp\"]\n dc1 = self.statistics[\"/nat44-ed/out2in/fastpath/drops\"]\n\n recvd_tcp_ports = set()\n recvd_udp_ports = set()\n recvd_icmp_ids = set()\n\n for p in capture:\n if TCP in p:\n recvd_tcp_ports.add(p[TCP].sport)\n if UDP in p:\n recvd_udp_ports.add(p[UDP].sport)\n if ICMP in p:\n recvd_icmp_ids.add(p[ICMP].id)\n\n recvd_tcp_ports = list(recvd_tcp_ports)\n recvd_udp_ports = list(recvd_udp_ports)\n recvd_icmp_ids = list(recvd_icmp_ids)\n\n o2i_pkts = [[] for x in range(0, self.vpp_worker_count)]\n for i in range(pkt_count):\n p = (\n Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(dport=choice(recvd_tcp_ports), sport=20)\n )\n o2i_pkts[p[TCP].dport % self.vpp_worker_count].append(p)\n\n p = (\n Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / UDP(dport=choice(recvd_udp_ports), sport=20)\n )\n o2i_pkts[p[UDP].dport % self.vpp_worker_count].append(p)\n\n p = (\n Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / ICMP(id=choice(recvd_icmp_ids), type=\"echo-reply\")\n )\n o2i_pkts[p[ICMP].id % self.vpp_worker_count].append(p)\n\n for i in range(0, self.vpp_worker_count):\n if len(o2i_pkts[i]) > 0:\n self.pg1.add_stream(o2i_pkts[i], worker=i)\n\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(pkt_count * 3)\n for packet in capture:\n try:\n self.assert_packet_checksums_valid(packet)\n self.assertEqual(packet[IP].dst, self.pg0.remote_ip4)\n if packet.haslayer(TCP):\n self.assert_in_range(\n packet[TCP].dport,\n tcp_port_offset,\n tcp_port_offset + pkt_count,\n \"dst TCP port\",\n )\n elif packet.haslayer(UDP):\n self.assert_in_range(\n packet[UDP].dport,\n udp_port_offset,\n udp_port_offset + pkt_count,\n \"dst UDP port\",\n )\n else:\n self.assert_in_range(\n packet[ICMP].id,\n icmp_id_offset,\n icmp_id_offset + pkt_count,\n \"ICMP id\",\n )\n except:\n self.logger.error(\n ppp(\"Unexpected or invalid packet (inside network):\", packet)\n )\n raise\n\n if_idx = self.pg1.sw_if_index\n tc2 = self.statistics[\"/nat44-ed/out2in/fastpath/tcp\"]\n uc2 = self.statistics[\"/nat44-ed/out2in/fastpath/udp\"]\n ic2 = self.statistics[\"/nat44-ed/out2in/fastpath/icmp\"]\n dc2 = self.statistics[\"/nat44-ed/out2in/fastpath/drops\"]\n\n self.assertEqual(tc2[:, if_idx].sum() - tc1[:, if_idx].sum(), pkt_count)\n self.assertEqual(uc2[:, if_idx].sum() - uc1[:, if_idx].sum(), pkt_count)\n self.assertEqual(ic2[:, if_idx].sum() - ic1[:, if_idx].sum(), pkt_count)\n self.assertEqual(dc2[:, if_idx].sum() - dc1[:, if_idx].sum(), 0)\n\n sc = self.statistics[\"/nat44-ed/total-sessions\"]\n self.assertEqual(\n sc[:, 0].sum(),\n len(recvd_tcp_ports) + len(recvd_udp_ports) + len(recvd_icmp_ids),\n )", "title": "" }, { "docid": "52a8ccc1740995102a0bae528f03236b", "score": "0.5910309", "text": "def test_0410_spoof_test(self):\n self.run_negative_conn_test(AF_INET, 0)", "title": "" }, { "docid": "322abd0deef9fa783e235f0eb04d4f03", "score": "0.58874875", "text": "def test_v4ok(self):\n addr = IPAddr('127.127.127.127')\n self.assertEqual(addr.family, socket.AF_INET)\n self.assertEqual(addr.addr, socket.inet_aton('127.127.127.127'))", "title": "" }, { "docid": "34c0d125f17fd250ca42265f272ad9f6", "score": "0.5861034", "text": "def test_01_create_lb_rule_src_nat(self):\n\n # Validate the Following:\n #1. listLoadBalancerRules should return the added rule\n #2. attempt to ssh twice on the load balanced IP\n #3. verify using the UNAME of the VM\n # that round robin is indeed happening as expected\n src_nat_ip_addrs = PublicIPAddress.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.assertEqual(\n isinstance(src_nat_ip_addrs, list),\n True,\n \"Check list response returns a valid list\"\n )\n src_nat_ip_addr = src_nat_ip_addrs[0]\n\n # Check if VM is in Running state before creating LB rule\n vm_response = VirtualMachine.list(\n self.apiclient,\n account=self.account.name,\n domainid=self.account.domainid\n )\n\n self.assertEqual(\n isinstance(vm_response, list),\n True,\n \"Check list VM returns a valid list\"\n )\n\n self.assertNotEqual(\n len(vm_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n for vm in vm_response:\n self.assertEqual(\n vm.state,\n 'Running',\n \"VM state should be Running before creating a NAT rule.\"\n )\n\n #Create Load Balancer rule and assign VMs to rule\n lb_rule = LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n src_nat_ip_addr.id,\n accountid=self.account.name\n )\n self.cleanup.append(lb_rule)\n lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2])\n lb_rules = list_lb_rules(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n #verify listLoadBalancerRules lists the added load balancing rule\n self.assertNotEqual(\n len(lb_rules),\n 0,\n \"Check Load Balancer Rule in its List\"\n )\n self.assertEqual(\n lb_rules[0].id,\n lb_rule.id,\n \"Check List Load Balancer Rules returns valid Rule\"\n )\n\n # listLoadBalancerRuleInstances should list all\n # instances associated with that LB rule\n lb_instance_rules = list_lb_instances(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_instance_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(lb_instance_rules),\n 0,\n \"Check Load Balancer instances Rule in its List\"\n )\n self.debug(\"lb_instance_rules Ids: %s, %s\" % (\n lb_instance_rules[0].id,\n lb_instance_rules[1].id\n ))\n self.debug(\"VM ids: %s, %s\" % (self.vm_1.id, self.vm_2.id))\n\n self.assertIn(\n lb_instance_rules[0].id,\n [self.vm_1.id, self.vm_2.id],\n \"Check List Load Balancer instances Rules returns valid VM ID\"\n )\n\n self.assertIn(\n lb_instance_rules[1].id,\n [self.vm_1.id, self.vm_2.id],\n \"Check List Load Balancer instances Rules returns valid VM ID\"\n )\n\n\n unameResults = []\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults, True)\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults)\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults)\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults)\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults)\n\n self.debug(\"UNAME: %s\" % str(unameResults))\n self.assertIn(\n \"Linux\",\n unameResults,\n \"Check if ssh succeeded for server1\"\n )\n self.assertIn(\n \"Linux\",\n unameResults,\n \"Check if ssh succeeded for server2\"\n )\n\n #SSH should pass till there is a last VM associated with LB rule\n lb_rule.remove(self.apiclient, [self.vm_2])\n\n # making unameResultss list empty\n unameResults[:] = []\n\n try:\n self.debug(\"SSHing into IP address: %s after removing VM (ID: %s)\" %\n (\n src_nat_ip_addr.ipaddress,\n self.vm_2.id\n ))\n\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults)\n self.assertIn(\n \"Linux\",\n unameResults,\n \"Check if ssh succeeded for server1\"\n )\n except Exception as e:\n self.fail(\"%s: SSH failed for VM with IP Address: %s\" %\n (e, src_nat_ip_addr.ipaddress))\n\n lb_rule.remove(self.apiclient, [self.vm_1])\n\n with self.assertRaises(Exception):\n self.debug(\"Removed all VMs, trying to SSH\")\n self.try_ssh(src_nat_ip_addr.ipaddress, unameResults)\n return", "title": "" }, { "docid": "08fcb5788d72d3966198c391d091e911", "score": "0.5854702", "text": "def test_icmp_error(self):\n\n payload = \"H\" * 10\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n # in2out (initiate connection)\n p1 = [\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / UDP(sport=21, dport=20)\n / payload,\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=21, dport=20, flags=\"S\")\n / payload,\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / ICMP(type=\"echo-request\", id=7777)\n / payload,\n ]\n\n capture = self.send_and_expect(self.pg0, p1, self.pg1)\n\n # out2in (send error message)\n p2 = [\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / ICMP(type=\"dest-unreach\", code=\"port-unreachable\")\n / c[IP:]\n for c in capture\n ]\n\n capture = self.send_and_expect(self.pg1, p2, self.pg0)\n\n for c in capture:\n try:\n assert c[IP].dst == self.pg0.remote_ip4\n assert c[IPerror].src == self.pg0.remote_ip4\n except AssertionError as a:\n raise AssertionError(f\"Packet {pr(c)} not translated properly\") from a", "title": "" }, { "docid": "842d986522a8a95dc65929c6990a0b17", "score": "0.5845603", "text": "def runTest(self):\n\n print(\"\\nStarting Everflow Neighbor Discovery test\")\n\n self.check_mirrored_packet()\n self.check_mirrored_packet(ipv6=True)", "title": "" }, { "docid": "ff5f5b3c175fee09e26858261b092832", "score": "0.5840701", "text": "def test_lb_affinity(self):\n external_addr = self.nat_addr\n external_port = 80\n local_port = 8080\n server1 = self.pg0.remote_hosts[0]\n server2 = self.pg0.remote_hosts[1]\n\n locals = [\n {\"addr\": server1.ip4, \"port\": local_port, \"probability\": 50, \"vrf_id\": 0},\n {\"addr\": server2.ip4, \"port\": local_port, \"probability\": 50, \"vrf_id\": 0},\n ]\n\n self.nat_add_address(self.nat_addr)\n self.vapi.nat44_add_del_lb_static_mapping(\n is_add=1,\n external_addr=external_addr,\n external_port=external_port,\n protocol=IP_PROTOS.tcp,\n affinity=10800,\n local_num=len(locals),\n locals=locals,\n )\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, flags=flags, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n p = (\n Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=1025, dport=external_port)\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n backend = capture[0][IP].dst\n\n sessions = self.vapi.nat44_user_session_dump(backend, 0)\n self.assertEqual(len(sessions), 1)\n self.assertTrue(sessions[0].flags & self.config_flags.NAT_IS_EXT_HOST_VALID)\n self.vapi.nat44_del_session(\n address=sessions[0].inside_ip_address,\n port=sessions[0].inside_port,\n protocol=sessions[0].protocol,\n flags=(\n self.config_flags.NAT_IS_INSIDE\n | self.config_flags.NAT_IS_EXT_HOST_VALID\n ),\n ext_host_address=sessions[0].ext_host_address,\n ext_host_port=sessions[0].ext_host_port,\n )\n\n pkts = []\n for port in range(1030, 1100):\n p = (\n Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=port, dport=external_port)\n )\n pkts.append(p)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n for p in capture:\n self.assertEqual(p[IP].dst, backend)", "title": "" }, { "docid": "9ea8bb283c3fc8179b1ca065e446808a", "score": "0.58350813", "text": "def test_0011_basic_conn_test(self):\n self.run_basic_conn_test(AF_INET6, 0)", "title": "" }, { "docid": "f4a5c638e070ab956d8a2d4b8e38b1e9", "score": "0.58343613", "text": "def test_multiple_vrf_1(self):\n\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n port = 0\n\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1, flags=flags\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg6.sw_if_index, is_add=1\n )\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg5.remote_ip4,\n external_addr,\n local_port,\n external_port,\n vrf_id=1,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n p = (\n Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac)\n / IP(src=self.pg6.remote_ip4, dst=external_addr)\n / TCP(sport=12345, dport=external_port)\n )\n self.pg6.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg5.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg5.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n p = (\n Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac)\n / IP(src=self.pg5.remote_ip4, dst=self.pg6.remote_ip4)\n / TCP(sport=local_port, dport=12345)\n )\n self.pg5.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, external_addr)\n self.assertEqual(tcp.sport, external_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "1a6ade9c12120b0039ca6bc90eadeacf", "score": "0.57995003", "text": "def show_nat(self, *a):\n self.checkFirewall()\n masq = self.config.Shorewall.get('masq', [])\n natRules = []\n for k,mas in masq.items():\n for v in mas:\n if type(v) == list:\n l = [str(Console.renameIfaces(k))]\n l.extend([str(Console.renameIfaces(i.replace('-', 'Any'))) for i in v])\n natRules.append(l)\n else:\n natRules.append([\n Console.renameIfaces(k), \n 'Any', Console.renameIfaces(v), 'Any', 'Any', 'Any', 'Any'\n ])\n if len(natRules) < 1:\n print \"No nat rules found\"\n return\n else:\n print \" RN| Dest Int | Dest Network | Src Int | Src Network | Nat IP | Protocol | Port |\"\n print \" |----------+---------------------+----------+---------------------+-----------------+----------+-------|\"\n ruleIndex = 0\n for natRule in natRules:\n print \"% 4d| % 8s | % 19s | % 8s | % 19s | % 15s | % 8s | % 5s |\" % (\n ruleIndex, natRule[0], natRule[1], natRule[2], \n natRule[3], natRule[4], natRule[5], natRule[6],\n ) #Not ideal\n ruleIndex += 1\n \n print \" +----------+---------------------+----------+---------------------+-----------------+----------+-------+\"", "title": "" }, { "docid": "1b4456a4ae8d318712201e0870b19856", "score": "0.578539", "text": "def test_icmp_error_fwd_outbound(self):\n\n # Ensure that an outbound ICMP error message is properly associated\n # with the inbound forward bypass session it is related to.\n payload = \"H\" * 10\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n # enable forwarding and initiate connection out2in\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n p1 = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.pg0.remote_ip4)\n / UDP(sport=21, dport=20)\n / payload\n )\n\n self.pg1.add_stream(p1)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)[0]\n\n self.logger.info(self.vapi.cli(\"show nat44 sessions\"))\n\n # reply with ICMP error message in2out\n # We cannot reliably retrieve forward bypass sessions via the API.\n # session dumps for a user will only look on the worker that the\n # user is supposed to be mapped to in2out. The forward bypass session\n # is not necessarily created on that worker.\n p2 = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / ICMP(type=\"dest-unreach\", code=\"port-unreachable\")\n / capture[IP:]\n )\n\n self.pg0.add_stream(p2)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(1)[0]\n\n self.logger.info(self.vapi.cli(\"show nat44 sessions\"))\n\n self.logger.info(ppp(\"p1 packet:\", p1))\n self.logger.info(ppp(\"p2 packet:\", p2))\n self.logger.info(ppp(\"capture packet:\", capture))", "title": "" }, { "docid": "e0fde30be8b67e5bae92ee04a9fc0cf2", "score": "0.578467", "text": "def test_tcp_close(self):\n config = self.vapi.nat44_show_running_config()\n old_timeouts = config.timeouts\n new_transitory = 2\n self.vapi.nat_set_timeouts(\n udp=old_timeouts.udp,\n tcp_established=old_timeouts.tcp_established,\n icmp=old_timeouts.icmp,\n tcp_transitory=new_transitory,\n )\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n self.nat_add_address(self.pg1.local_ip4)\n twice_nat_addr = \"10.0.1.3\"\n service_ip = \"192.168.16.150\"\n self.nat_add_address(twice_nat_addr, twice_nat=1)\n\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, flags=flags, is_add=1\n )\n self.vapi.nat44_ed_add_del_output_interface(\n is_add=1, sw_if_index=self.pg1.sw_if_index\n )\n\n flags = (\n self.config_flags.NAT_IS_OUT2IN_ONLY | self.config_flags.NAT_IS_TWICE_NAT\n )\n self.nat_add_static_mapping(\n self.pg0.remote_ip4, service_ip, 80, 80, proto=IP_PROTOS.tcp, flags=flags\n )\n sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)\n start_sessnum = len(sessions)\n\n # SYN packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=service_ip)\n / TCP(sport=33898, dport=80, flags=\"S\")\n )\n capture = self.send_and_expect(self.pg1, p, self.pg0, n_rx=1)\n p = capture[0]\n tcp_port = p[TCP].sport\n\n # SYN + ACK packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=twice_nat_addr)\n / TCP(sport=80, dport=tcp_port, flags=\"SA\")\n )\n self.send_and_expect(self.pg0, p, self.pg1, n_rx=1)\n\n # ACK packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=service_ip)\n / TCP(sport=33898, dport=80, flags=\"A\")\n )\n self.send_and_expect(self.pg1, p, self.pg0, n_rx=1)\n\n # FIN packet in -> out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=twice_nat_addr)\n / TCP(sport=80, dport=tcp_port, flags=\"FA\", seq=100, ack=300)\n )\n self.send_and_expect(self.pg0, p, self.pg1, n_rx=1)\n\n # FIN+ACK packet out -> in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=service_ip)\n / TCP(sport=33898, dport=80, flags=\"FA\", seq=300, ack=101)\n )\n self.send_and_expect(self.pg1, p, self.pg0, n_rx=1)\n\n # ACK packet in -> out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=twice_nat_addr)\n / TCP(sport=80, dport=tcp_port, flags=\"A\", seq=101, ack=301)\n )\n self.send_and_expect(self.pg0, p, self.pg1, n_rx=1)\n\n # session now in transitory timeout, but traffic still flows\n # try FIN packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=service_ip)\n / TCP(sport=33898, dport=80, flags=\"F\")\n )\n self.pg1.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n\n self.virtual_sleep(new_transitory, \"wait for transitory timeout\")\n self.pg0.get_capture(1)\n\n # session should still exist\n sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)\n self.assertEqual(len(sessions) - start_sessnum, 1)\n\n # send FIN+ACK packet out -> in - will cause session to be wiped\n # but won't create a new session\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=service_ip)\n / TCP(sport=33898, dport=80, flags=\"FA\", seq=300, ack=101)\n )\n self.send_and_assert_no_replies(self.pg1, p)\n sessions = self.vapi.nat44_user_session_dump(self.pg0.remote_ip4, 0)\n self.assertEqual(len(sessions) - start_sessnum, 0)", "title": "" }, { "docid": "1550d80c9cea4dc12d6f21352a471ed6", "score": "0.5768169", "text": "def test_self_twice_nat_lb_positive(self):\n self.twice_nat_common(lb=True, self_twice_nat=True, same_pg=True, client_id=1)", "title": "" }, { "docid": "d7854050e101d1a9d0a036e6f842ff7f", "score": "0.57635325", "text": "def portForward(port):\n try:\n import miniupnpc\n u = miniupnpc.UPnP(None, None, 200, port)\n #Begin Debug info\n safeprint('inital(default) values :')\n safeprint(' discoverdelay' + str(u.discoverdelay))\n safeprint(' lanaddr' + str(u.lanaddr))\n safeprint(' multicastif' + str(u.multicastif))\n safeprint(' minissdpdsocket' + str(u.minissdpdsocket))\n safeprint('Discovering... delay=%ums' % u.discoverdelay)\n safeprint(str(u.discover()) + 'device(s) detected')\n #End Debug info\n u.selectigd()\n global ext_ip\n ext_ip = u.externalipaddress()\n safeprint(\"external ip is: \" + str(ext_ip))\n for i in range(0,20):\n try:\n safeprint(\"Port forward try: \" + str(i),verbosity=1)\n if u.addportmapping(port+i, 'TCP', get_lan_ip(), port, 'Bounty Net', ''):\n global ext_port\n ext_port = port + i\n safeprint(\"External port is \" + str(ext_port))\n return True\n except Exception as error:\n safeprint(\"Failed: \" + str(type(error)))\n safeprint(error)\n except Exception as error:\n safeprint(\"Failed: \" + str(type(error)))\n safeprint(error)\n return False", "title": "" }, { "docid": "4e5aa8a6b69196cb59419c2495f047f8", "score": "0.5752728", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (3000:10::10 -> 3000:30::10)\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n # ingress\n port1 = self.select_port(device, swports[0])\n\n # egress\n port2 = self.select_port(device, swports[1])\n port3 = self.select_port(device, swports[2])\n\n # make ingress port a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n\n # Add SVI IP addresses to vlan 10\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv6_ipaddr('3000:10::1', 120)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n # make egress ports 'sub-interface' ports\n eth2_20 = self.add_logical_l3intf(device, vrf, rmac)\n ethcfg2 = self.cfg_subintf_on_port(device, port2, eth2_20, vlan_id=20)\n ipaddr2 = self.make_ipv6_ipaddr('3000:20::1', 120)\n self.cfg_ip_address(device, eth2_20, vrf, ipaddr2)\n\n eth3_20 = self.add_logical_l3intf(device, vrf, rmac)\n ethcfg3 = self.cfg_subintf_on_port(device, port3, eth3_20, vlan_id=20)\n ipaddr3 = self.make_ipv6_ipaddr('3000:21::1', 120)\n self.cfg_ip_address(device, eth3_20, vrf, ipaddr3)\n\n # neighbor ip\n ipaddr4 = self.make_ipv6_ipaddr('3000:30::10', 128)\n\n nhop1 = self.add_nhop(device, eth2_20)\n self.add_neighbor_l3intf(device, eth2_20, nhop1, '00:11:22:33:44:55',\n ipaddr4)\n\n nhop2 = self.add_nhop(device, eth3_20)\n self.add_neighbor_l3intf(device, eth3_20, nhop2, '00:11:22:33:44:56',\n ipaddr4)\n\n # ecmp\n ecmp = self.add_ecmp(device)\n self.add_ecmp_member(device, ecmp, 2, [nhop1, nhop2])\n self.add_static_route(device, vrf, ipaddr4, ecmp)\n\n pkt = simple_tcpv6_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ipv6_dst='3000:30:0:0:0:0:0:10',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=10,\n ipv6_hlim=64)\n exp_pkt1 = simple_tcpv6_packet(\n eth_dst='00:11:22:33:44:55',\n eth_src='00:77:66:55:44:33',\n ipv6_dst='3000:30:0:0:0:0:0:10',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=20,\n ipv6_hlim=63)\n\n exp_pkt2 = simple_tcpv6_packet(\n eth_dst='00:11:22:33:44:56',\n eth_src='00:77:66:55:44:33',\n ipv6_dst='3000:30:0:0:0:0:0:10',\n ipv6_src='3000:10:0:0:0:0:0:10',\n dl_vlan_enable=True,\n vlan_vid=20,\n ipv6_hlim=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_any_packet_any_port(self, [exp_pkt1, exp_pkt2],\n [swports[1], swports[2]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "e4d8ed3262e234a17fedb9d417586879", "score": "0.5749266", "text": "def test_output_feature_and_service2(self):\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n self.nat_add_address(self.nat_addr)\n\n self.vapi.nat44_ed_add_del_output_interface(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n # session initiated from service host - translate\n pkts = self.create_stream_in(self.pg0, self.pg1)\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_out(capture, ignore_port=True)\n\n pkts = self.create_stream_out(self.pg1)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n self.verify_capture_in(capture, self.pg0)\n\n # session initiated from remote host - do not translate\n tcp_port_in = self.tcp_port_in\n udp_port_in = self.udp_port_in\n icmp_id_in = self.icmp_id_in\n\n self.tcp_port_in = 60303\n self.udp_port_in = 60304\n self.icmp_id_in = 60305\n\n try:\n pkts = self.create_stream_out(\n self.pg1, self.pg0.remote_ip4, use_inside_ports=True\n )\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n self.verify_capture_in(capture, self.pg0)\n\n pkts = self.create_stream_in(self.pg0, self.pg1)\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4, same_port=True)\n finally:\n self.tcp_port_in = tcp_port_in\n self.udp_port_in = udp_port_in\n self.icmp_id_in = icmp_id_in", "title": "" }, { "docid": "3648acedcd94258ba2b7f6cf15954047", "score": "0.5743709", "text": "def test_map_t_ip6_psid(self):\n\n #\n # Add a domain that maps from pg0 to pg1\n #\n map_dst = \"2001:db8::/32\"\n map_src = \"1234:5678:90ab:cdef::/64\"\n ip4_pfx = \"192.168.0.0/24\"\n tag = \"MAP-T Test Domain\"\n\n self.vapi.map_add_domain(\n ip6_prefix=map_dst,\n ip4_prefix=ip4_pfx,\n ip6_src=map_src,\n ea_bits_len=16,\n psid_offset=6,\n psid_length=4,\n mtu=1500,\n tag=tag,\n )\n\n # Enable MAP-T on interfaces.\n self.vapi.map_if_enable_disable(\n is_enable=1, sw_if_index=self.pg0.sw_if_index, is_translation=1\n )\n self.vapi.map_if_enable_disable(\n is_enable=1, sw_if_index=self.pg1.sw_if_index, is_translation=1\n )\n\n map_route = VppIpRoute(\n self,\n \"2001:db8::\",\n 32,\n [\n VppRoutePath(\n self.pg1.remote_ip6,\n self.pg1.sw_if_index,\n proto=DpoProto.DPO_PROTO_IP6,\n )\n ],\n )\n map_route.add_vpp_config()\n\n p_ether6 = Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac)\n p_ip6 = IPv6(\n src=\"2001:db8:1f0::c0a8:1:f\", dst=\"1234:5678:90ab:cdef:ac:1001:200:0\"\n )\n\n # Send good IPv6 source port, ensure translated IPv4 received\n payload = TCP(sport=0xABCD, dport=80)\n p6 = p_ether6 / p_ip6 / payload\n p4_translated = IP(src=\"192.168.0.1\", dst=self.pg0.remote_ip4) / payload\n p4_translated.id = 0\n p4_translated.ttl -= 1\n rx = self.send_and_expect(self.pg1, p6 * 1, self.pg0)\n for p in rx:\n self.validate(p[1], p4_translated)\n\n # Send bad IPv6 source port, ensure translated IPv4 not received\n payload = TCP(sport=0xDCBA, dport=80)\n p6 = p_ether6 / p_ip6 / payload\n self.send_and_assert_no_replies(self.pg1, p6 * 1)", "title": "" }, { "docid": "a6012c571a37a699456a0129ed96300b", "score": "0.57225955", "text": "def neighsol(addr, src, iface, timeout=1, chainCC=0):\r\n\r\n nsma = in6_getnsma(inet_pton(socket.AF_INET6, addr))\r\n d = inet_ntop(socket.AF_INET6, nsma)\r\n dm = in6_getnsmac(nsma)\r\n p = Ether(dst=dm)/IPv6(dst=d, src=src, hlim=255)\r\n p /= ICMPv6ND_NS(tgt=addr)\r\n p /= ICMPv6NDOptSrcLLAddr(lladdr=get_if_hwaddr(iface))\r\n res = srp1(p,type=ETH_P_IPV6, iface=iface, timeout=1, verbose=0, \r\n chainCC=chainCC) \r\n\r\n return res", "title": "" }, { "docid": "2f4ce8708a0c23512513a4f2651d9abe", "score": "0.5722262", "text": "def test_source_ip_success(self):\n self.do_test_connect(socket.AF_INET, \"127.0.0.1\", source_ip=\"127.0.0.1\")", "title": "" }, { "docid": "5d5665c990b6ddce65a658e9bbd44562", "score": "0.5711513", "text": "def runTest(self):\n print \"\"\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.30.5 [id = 105])\")\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n # ingress\n port1 = self.select_port(device, swports[0])\n\n # egress\n port2 = self.select_port(device, swports[1])\n port3 = self.select_port(device, swports[2])\n\n # make ingress port a 'trunk' port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='trunk')\n self.add_vlan_member(device, vlan10, eth1)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n\n # make egress ports 'sub-interface' ports\n eth2_20 = self.add_logical_l3intf(device, vrf, rmac)\n ethcfg2 = self.cfg_subintf_on_port(device, port2, eth2_20, vlan_id=20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, eth2_20, vrf, ipaddr2)\n\n eth3_20 = self.add_logical_l3intf(device, vrf, rmac)\n ethcfg3 = self.cfg_subintf_on_port(device, port3, eth3_20, vlan_id=20)\n ipaddr3 = self.make_ipv4_ipaddr('172.16.21.1', 24)\n self.cfg_ip_address(device, eth3_20, vrf, ipaddr3)\n\n # neighbor ip\n ipaddr4 = self.make_ipv4_ipaddr('172.16.30.5', 32)\n\n nhop1 = self.add_nhop(device, eth2_20)\n self.add_neighbor_l3intf(device, eth2_20, nhop1, '00:11:22:33:44:55',\n ipaddr4)\n\n nhop2 = self.add_nhop(device, eth3_20)\n self.add_neighbor_l3intf(device, eth3_20, nhop2, '00:11:22:33:44:56',\n ipaddr4)\n\n # ecmp\n ecmp = self.add_ecmp(device)\n self.add_ecmp_member(device, ecmp, 2, [nhop1, nhop2])\n self.add_static_route(device, vrf, ipaddr4, ecmp)\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.30.5',\n ip_src='172.16.10.5',\n ip_id=105,\n dl_vlan_enable=True,\n vlan_vid=10,\n ip_ttl=64)\n exp_pkt1 = simple_tcp_packet(\n eth_dst='00:11:22:33:44:55',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.30.5',\n ip_src='172.16.10.5',\n dl_vlan_enable=True,\n vlan_vid=20,\n ip_id=105,\n ip_ttl=63)\n exp_pkt2 = simple_tcp_packet(\n eth_dst='00:11:22:33:44:56',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.30.5',\n ip_src='172.16.10.5',\n dl_vlan_enable=True,\n vlan_vid=20,\n ip_id=105,\n ip_ttl=63)\n\n try:\n send_packet(self, swports[0], str(pkt))\n verify_any_packet_any_port(self, [exp_pkt1, exp_pkt2],\n [swports[1], swports[2]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "df7e6d964627a039f14d1a4ac6014cd2", "score": "0.5711228", "text": "def test_tor(self):\n # If tor is not to be tested, then nothing is done.\n if self.TOR is not None:\n # Create a connexion manager.\n cm = ConnectionManager(str_vpn=self.VPN, str_tor_pwd=self.TOR)\n\n # Get the router public IP (might be from the VPN).\n ip_public = cm.request('https://api.ipify.org', bl_clear=True)\n # Get the IP at the TOR exit node. Requests a new identity and ask for the new IP.\n ip_tor_1 = cm.request('https://api.ipify.org')\n cm.new_identity()\n ip_tor_2 = cm.request('https://api.ipify.org')\n\n # Comapring each IPs. they should all be different.\n self.assertNotEqual(ip_public, ip_tor_1)\n self.assertNotEqual(ip_public, ip_tor_2)\n self.assertNotEqual(ip_tor_1, ip_tor_2)", "title": "" }, { "docid": "584c5d685fe02310317a106ad941555c", "score": "0.57059014", "text": "def test_multiple_vrf_5(self):\n\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n port = 0\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1, flags=flags\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1, flags=flags\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg6.sw_if_index, is_add=1\n )\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg5.remote_ip4,\n external_addr,\n local_port,\n external_port,\n vrf_id=1,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n self.nat_add_static_mapping(\n self.pg0.remote_ip4,\n external_sw_if_index=self.pg0.sw_if_index,\n local_port=local_port,\n vrf_id=0,\n external_port=external_port,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n # from client to server (both VRF1, no translation)\n p = (\n Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac)\n / IP(src=self.pg6.remote_ip4, dst=self.pg5.remote_ip4)\n / TCP(sport=12348, dport=local_port)\n )\n self.pg6.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg5.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg5.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from server back to client (both VRF1, no translation)\n p = (\n Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac)\n / IP(src=self.pg5.remote_ip4, dst=self.pg6.remote_ip4)\n / TCP(sport=local_port, dport=12348)\n )\n self.pg5.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg5.remote_ip4)\n self.assertEqual(tcp.sport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from client VRF1 to server VRF0 (no translation)\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg6.remote_ip4)\n / TCP(sport=local_port, dport=12349)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg0.remote_ip4)\n self.assertEqual(tcp.sport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from server VRF0 back to client VRF1 (no translation)\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg6.remote_ip4)\n / TCP(sport=local_port, dport=12349)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg0.remote_ip4)\n self.assertEqual(tcp.sport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from client VRF0 to server VRF1 (no translation)\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg5.remote_ip4)\n / TCP(sport=12344, dport=local_port)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg5.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg5.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from server VRF1 back to client VRF0 (no translation)\n p = (\n Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac)\n / IP(src=self.pg5.remote_ip4, dst=self.pg0.remote_ip4)\n / TCP(sport=local_port, dport=12344)\n )\n self.pg5.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg5.remote_ip4)\n self.assertEqual(tcp.sport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "c88a16727b29c8c72626f04d38dccbf4", "score": "0.5696959", "text": "def test_0111_basic_conn_test(self):\n self.run_negative_conn_test(AF_INET, 1)", "title": "" }, { "docid": "2b099be6532f58e67111ab68eaacdc83", "score": "0.5687459", "text": "def __init__(__self__, *,\n nat_ip_allocate_option: pulumi.Input[str],\n router: pulumi.Input[str],\n source_subnetwork_ip_ranges_to_nat: pulumi.Input[str],\n drain_nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n enable_dynamic_port_allocation: Optional[pulumi.Input[bool]] = None,\n enable_endpoint_independent_mapping: Optional[pulumi.Input[bool]] = None,\n icmp_idle_timeout_sec: Optional[pulumi.Input[int]] = None,\n log_config: Optional[pulumi.Input['RouterNatLogConfigArgs']] = None,\n max_ports_per_vm: Optional[pulumi.Input[int]] = None,\n min_ports_per_vm: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]] = None,\n subnetworks: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkArgs']]]] = None,\n tcp_established_idle_timeout_sec: Optional[pulumi.Input[int]] = None,\n tcp_time_wait_timeout_sec: Optional[pulumi.Input[int]] = None,\n tcp_transitory_idle_timeout_sec: Optional[pulumi.Input[int]] = None,\n udp_idle_timeout_sec: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"nat_ip_allocate_option\", nat_ip_allocate_option)\n pulumi.set(__self__, \"router\", router)\n pulumi.set(__self__, \"source_subnetwork_ip_ranges_to_nat\", source_subnetwork_ip_ranges_to_nat)\n if drain_nat_ips is not None:\n pulumi.set(__self__, \"drain_nat_ips\", drain_nat_ips)\n if enable_dynamic_port_allocation is not None:\n pulumi.set(__self__, \"enable_dynamic_port_allocation\", enable_dynamic_port_allocation)\n if enable_endpoint_independent_mapping is not None:\n pulumi.set(__self__, \"enable_endpoint_independent_mapping\", enable_endpoint_independent_mapping)\n if icmp_idle_timeout_sec is not None:\n pulumi.set(__self__, \"icmp_idle_timeout_sec\", icmp_idle_timeout_sec)\n if log_config is not None:\n pulumi.set(__self__, \"log_config\", log_config)\n if max_ports_per_vm is not None:\n pulumi.set(__self__, \"max_ports_per_vm\", max_ports_per_vm)\n if min_ports_per_vm is not None:\n pulumi.set(__self__, \"min_ports_per_vm\", min_ports_per_vm)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if nat_ips is not None:\n pulumi.set(__self__, \"nat_ips\", nat_ips)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if rules is not None:\n pulumi.set(__self__, \"rules\", rules)\n if subnetworks is not None:\n pulumi.set(__self__, \"subnetworks\", subnetworks)\n if tcp_established_idle_timeout_sec is not None:\n pulumi.set(__self__, \"tcp_established_idle_timeout_sec\", tcp_established_idle_timeout_sec)\n if tcp_time_wait_timeout_sec is not None:\n pulumi.set(__self__, \"tcp_time_wait_timeout_sec\", tcp_time_wait_timeout_sec)\n if tcp_transitory_idle_timeout_sec is not None:\n pulumi.set(__self__, \"tcp_transitory_idle_timeout_sec\", tcp_transitory_idle_timeout_sec)\n if udp_idle_timeout_sec is not None:\n pulumi.set(__self__, \"udp_idle_timeout_sec\", udp_idle_timeout_sec)", "title": "" }, { "docid": "f1ed9aa7212bc26933ef3fee9e6a9493", "score": "0.5682614", "text": "def test_permit_ip_address(self):\n self.uut.permit(\"127.0.0.1\")\n self.assertEqual(self.uut.evaluate(\"127.0.0.1\"), True)", "title": "" }, { "docid": "94e9e3ce7d3d11fd885b75b652de88ff", "score": "0.5681334", "text": "def test_permit_ip_network(self):\n self.uut.permit(\"192.168.0.0/16\")\n self.assertEqual(self.uut.evaluate(\"192.168.1.1\"), True)", "title": "" }, { "docid": "a74bfabeee86231efa5388d818a812d7", "score": "0.56654155", "text": "def test_v6ok(self):\n addr = IPAddr('2001:4f8::1')\n self.assertEqual(addr.family, socket.AF_INET6)\n self.assertEqual(addr.addr, socket.inet_pton(socket.AF_INET6, '2001:4f8::1'))", "title": "" }, { "docid": "919c3b48019b956033270116ed622def", "score": "0.5659944", "text": "def ip():\n ipV4 = get('https://api.ipify.org').text\n ipV6 = get('https://api64.ipify.org').text\n print(\"\\nPUBLICK IP VERSION 4 : \" + ipV4)\n if ipV4 != ipV6:\n print(\"\\nPUBLIC IP VERSION 6 : \" + ipV6)\n localIP = socket.gethostbyname(socket.gethostname())\n print(\"\\nLOCAL IP : \" + localIP)", "title": "" }, { "docid": "5c0fe3fe0d694afc85fbe99eb6eb5d31", "score": "0.56508434", "text": "def setupTunnel( main, srcIp, srcPort, dstIp, dstPort ):\n main.step( \"Set up tunnel from Mininet node \" +\n str( srcIp ) + \":\" + str( srcPort ) + \" to ONOS node \"\n + str(dstIp) + \":\" + str(dstPort) )\n forwarding = '%s:%s:%s:%s' % ( srcIp, srcPort, dstIp, dstPort )\n command = 'ssh -nNT -o \"PasswordAuthentication no\" \\\n -o \"StrictHostKeyChecking no\" -l sdn -L %s %s & ' % ( forwarding, dstIp )\n\n\n tunnelResult = main.TRUE\n tunnelResult = main.Mininet.node( \"root\", command )\n utilities.assert_equals( expect = True,\n actual = ( \"PasswordAuthentication\" in tunnelResult ),\n onpass = \"Created tunnel succeeded\",\n onfail = \"Create tunnel failed\" )\n if ( \"PasswordAuthentication\" not in tunnelResult ) :\n main.cleanup()\n main.exit()", "title": "" }, { "docid": "a0393078911c5681284de3def00d7780", "score": "0.56424713", "text": "def test_tor_port_onions(node_factory):\n # please define your values\n torip = '127.0.0.1'\n torips = '127.0.0.1:9051'\n torport = 9050\n torserviceport = 9051\n\n if not check_socket(torip, torserviceport):\n return\n\n if not check_socket(torip, torport):\n return\n\n portA, portB = reserve(), reserve()\n\n l1 = node_factory.get_node(may_fail=True, options={'bind-addr': '127.0.0.1:{}'.format(portA), 'addr': ['statictor:{}/torport=45321'.format(torips)]})\n l2 = node_factory.get_node(may_fail=True, options={'bind-addr': '127.0.0.1:{}'.format(portB), 'addr': ['statictor:{}/torport=45321/torblob=11234567890123456789012345678901'.format(torips)]})\n\n assert l1.daemon.is_in_log('45321,127.0.0.1:{}'.format(l1.port))\n assert l2.daemon.is_in_log('x2y4zvh4fn5q3eouuh7nxnc7zeawrqoutljrup2xjtiyxgx3emgkemad.onion:45321,127.0.0.1:{}'.format(l2.port))", "title": "" }, { "docid": "fc1cffc92aec8990996d397d8dc6ffd8", "score": "0.5635593", "text": "def test_reverse_lookup(self):\n ipaddr = '8.8.8.8'\n expect = 'google-public-dns-a.google.com'\n address = ExternalAddress(False, 5).lookup_reverse_dns(ipaddr)\n self.assertEqual(\n expect, address,\n \"invalid reverse lookup for ip %s ('%s' != '%s')\" %\n (ipaddr, expect, address))", "title": "" }, { "docid": "bbc157ade6628541590744af2cf3df3b", "score": "0.5629702", "text": "def test_tcp_session_open_retransmit1(self):\n\n in_port = self.tcp_port_in\n ext_port = self.tcp_external_port\n payload = \"H\" * 10\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n self.vapi.nat_set_timeouts(\n udp=300, tcp_established=7440, tcp_transitory=5, icmp=60\n )\n # SYN packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"S\")\n )\n p = self.send_and_expect(self.pg0, p, self.pg1)[0]\n out_port = p[TCP].sport\n\n # SYN + ACK packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=ext_port, dport=out_port, flags=\"SA\")\n )\n self.send_and_expect(self.pg1, p, self.pg0)\n\n # ACK in->out does not arrive\n\n # resent SYN + ACK packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=ext_port, dport=out_port, flags=\"SA\")\n )\n self.send_and_expect(self.pg1, p, self.pg0)\n\n # ACK packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"A\")\n )\n self.send_and_expect(self.pg0, p, self.pg1)\n\n # Verify that the data can be transmitted after the transitory time\n self.virtual_sleep(6)\n\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"PA\")\n / Raw(payload)\n )\n self.send_and_expect(self.pg0, p, self.pg1)", "title": "" }, { "docid": "312c3deb0e2c5f6105917a72a520b5bd", "score": "0.5621329", "text": "def test_multiple_vrf_3(self):\n\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n port = 0\n\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1, flags=flags\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg6.sw_if_index, is_add=1\n )\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg0.remote_ip4,\n external_sw_if_index=self.pg0.sw_if_index,\n local_port=local_port,\n vrf_id=0,\n external_port=external_port,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n # from client VRF1 to service VRF0\n p = (\n Ether(src=self.pg6.remote_mac, dst=self.pg6.local_mac)\n / IP(src=self.pg6.remote_ip4, dst=self.pg0.local_ip4)\n / TCP(sport=12346, dport=external_port)\n )\n self.pg6.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg0.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from service VRF0 back to client VRF1\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg6.remote_ip4)\n / TCP(sport=local_port, dport=12346)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg6.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, self.pg0.local_ip4)\n self.assertEqual(tcp.sport, external_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "eb0543de82846a083b0aa5dc8a9450bd", "score": "0.5574406", "text": "def test_tcp_session_open_retransmit2(self):\n\n in_port = self.tcp_port_in\n ext_port = self.tcp_external_port\n payload = \"H\" * 10\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n self.vapi.nat_set_timeouts(\n udp=300, tcp_established=7440, tcp_transitory=5, icmp=60\n )\n # SYN packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"S\")\n )\n p = self.send_and_expect(self.pg0, p, self.pg1)[0]\n out_port = p[TCP].sport\n\n # SYN + ACK packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=ext_port, dport=out_port, flags=\"SA\")\n )\n self.send_and_expect(self.pg1, p, self.pg0)\n\n # ACK packet in->out -- not received by the server\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"A\")\n )\n self.send_and_expect(self.pg0, p, self.pg1)\n\n # PUSH + ACK packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"PA\")\n / Raw(payload)\n )\n self.send_and_expect(self.pg0, p, self.pg1)\n\n # resent SYN + ACK packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=ext_port, dport=out_port, flags=\"SA\")\n )\n self.send_and_expect(self.pg1, p, self.pg0)\n\n # resent ACK packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"A\")\n )\n self.send_and_expect(self.pg0, p, self.pg1)\n\n # resent PUSH + ACK packet in->out\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"PA\")\n / Raw(payload)\n )\n self.send_and_expect(self.pg0, p, self.pg1)\n\n # ACK packet out->in\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=self.pg1.remote_ip4, dst=self.nat_addr)\n / TCP(sport=ext_port, dport=out_port, flags=\"A\")\n )\n self.send_and_expect(self.pg1, p, self.pg0)\n\n # Verify that the data can be transmitted after the transitory time\n self.virtual_sleep(6)\n\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)\n / TCP(sport=in_port, dport=ext_port, flags=\"PA\")\n / Raw(payload)\n )\n self.send_and_expect(self.pg0, p, self.pg1)", "title": "" }, { "docid": "5fb0b61676dfc04c8e419161b3494dfc", "score": "0.5561785", "text": "def test_deny_after_permitting_other_address(self):\n self.uut.permit(\"127.0.0.1\")\n self.assertEqual(self.uut.evaluate(\"192.168.0.1\"), False)", "title": "" }, { "docid": "f8c25fd73d771bc3bf62197346beec4c", "score": "0.55371857", "text": "def test_0110_basic_conn_test(self):\n self.run_negative_conn_test(AF_INET, 0)", "title": "" }, { "docid": "7c25b2447310a7220259455b492af2ab", "score": "0.5531884", "text": "def test_static_lb_multi_clients(self):\n\n external_addr = self.nat_addr\n external_port = 80\n local_port = 8080\n server1 = self.pg0.remote_hosts[0]\n server2 = self.pg0.remote_hosts[1]\n server3 = self.pg0.remote_hosts[2]\n\n locals = [\n {\"addr\": server1.ip4, \"port\": local_port, \"probability\": 90, \"vrf_id\": 0},\n {\"addr\": server2.ip4, \"port\": local_port, \"probability\": 10, \"vrf_id\": 0},\n ]\n\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, flags=flags, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg1.sw_if_index, is_add=1\n )\n\n self.nat_add_address(self.nat_addr)\n self.vapi.nat44_add_del_lb_static_mapping(\n is_add=1,\n external_addr=external_addr,\n external_port=external_port,\n protocol=IP_PROTOS.tcp,\n local_num=len(locals),\n locals=locals,\n )\n\n server1_n = 0\n server2_n = 0\n clients = ip4_range(self.pg1.remote_ip4, 10, 50)\n pkts = []\n for client in clients:\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=client, dst=self.nat_addr)\n / TCP(sport=12345, dport=external_port)\n )\n pkts.append(p)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n for p in capture:\n if p[IP].dst == server1.ip4:\n server1_n += 1\n else:\n server2_n += 1\n self.assertGreaterEqual(server1_n, server2_n)\n\n local = {\n \"addr\": server3.ip4,\n \"port\": local_port,\n \"probability\": 20,\n \"vrf_id\": 0,\n }\n\n # add new back-end\n self.vapi.nat44_lb_static_mapping_add_del_local(\n is_add=1,\n external_addr=external_addr,\n external_port=external_port,\n local=local,\n protocol=IP_PROTOS.tcp,\n )\n server1_n = 0\n server2_n = 0\n server3_n = 0\n clients = ip4_range(self.pg1.remote_ip4, 60, 110)\n pkts = []\n for client in clients:\n p = (\n Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac)\n / IP(src=client, dst=self.nat_addr)\n / TCP(sport=12346, dport=external_port)\n )\n pkts.append(p)\n self.assertGreater(len(pkts), 0)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n for p in capture:\n if p[IP].dst == server1.ip4:\n server1_n += 1\n elif p[IP].dst == server2.ip4:\n server2_n += 1\n else:\n server3_n += 1\n self.assertGreater(server1_n, 0)\n self.assertGreater(server2_n, 0)\n self.assertGreater(server3_n, 0)\n\n local = {\n \"addr\": server2.ip4,\n \"port\": local_port,\n \"probability\": 10,\n \"vrf_id\": 0,\n }\n\n # remove one back-end\n self.vapi.nat44_lb_static_mapping_add_del_local(\n is_add=0,\n external_addr=external_addr,\n external_port=external_port,\n local=local,\n protocol=IP_PROTOS.tcp,\n )\n server1_n = 0\n server2_n = 0\n server3_n = 0\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n for p in capture:\n if p[IP].dst == server1.ip4:\n server1_n += 1\n elif p[IP].dst == server2.ip4:\n server2_n += 1\n else:\n server3_n += 1\n self.assertGreater(server1_n, 0)\n self.assertEqual(server2_n, 0)\n self.assertGreater(server3_n, 0)", "title": "" }, { "docid": "a2ac715502662acf3ec1e25e8b6379c3", "score": "0.55289704", "text": "def add_tunnel(self, _bss_, _apn_):\n self.bss = _bss_\n self.apn = _apn_\n self.tid_out = get_tid()\n self.tid_in = get_tid()\n\n ##Attempt to find path between the two nodes\n ##If no path is found, tunnel is added to INACTIVE_TUNNELS and is attempted to recreate next time\n ##when new link between forwarders is up\n try:\n self.path_out = topo.get_tunnel(self.bss.name, self.apn.name)\n self.path_in = topo.get_tunnel(self.apn.name, self.bss.name)\n except nx.NetworkXNoPath:\n LOG.warning(\"Warning: Couldn't find path, network might not be converged yet. Retrying when next forwarder joins network.\")\n INACTIVE_TUNNELS.append(tunnel(self.bss,self.apn, self.tid_out, self.tid_in))\n return\n\n ##Set forwarding rules for all but last forwarder on the way OUT\n ##On first forwarder on the way OUT these rules are placed into table MAC_TUNNEL_TABLE while on 'dumb' forwarders it goes to 0\n dp = dpset.get(self.path_out[0].dpid)\n parser = dp.ofproto_parser\n match = parser.OFPMatch(eth_dst=self.tid_out)\n actions = [parser.OFPActionOutput(self.path_out[0].port_out)]\n self.add_flow(dp, 300, match, actions, MAC_TUNNEL_TABLE)\n\n ##Rules for all 'dumb' forwardes on the way OUT\n for node in self.path_out[1:-1]:\n dp = dpset.get(node.dpid)\n parser = dp.ofproto_parser\n match = parser.OFPMatch(eth_dst=self.tid_out)\n actions = [parser.OFPActionOutput(node.port_out)]\n self.add_flow(dp, 300, match, actions, 0)\n\n ##Last forwarder on the way OUT needs to set eth_dst to eth_addr of APN otherwise it won't be processed\n dp = dpset.get(self.path_out[-1].dpid)\n parser = dp.ofproto_parser\n match = parser.OFPMatch(eth_dst=self.tid_out)\n actions = [ parser.OFPActionSetField(eth_dst=self.apn.eth_addr), parser.OFPActionOutput(self.path_out[-1].port_out)]\n self.add_flow(dp, 300, match, actions, 0)\n\n ##Here comes tunnel for way IN\n ##On first forwarder on the way IN these rules are placed into table MAC_TUNNEL_TABLE while on 'dumb' forwarders it goes to 0\n dp = dpset.get(self.path_in[0].dpid)\n parser = dp.ofproto_parser\n match = parser.OFPMatch(eth_dst=self.tid_in)\n actions = [parser.OFPActionOutput(self.path_in[0].port_out)]\n self.add_flow(dp, 300, match, actions, MAC_TUNNEL_TABLE)\n\n ##Rules for 'dumb' forwarders\n for node in self.path_in[1:-1]:\n dp = dpset.get(node.dpid)\n parser = dp.ofproto_parser\n match = parser.OFPMatch(eth_dst=self.tid_in)\n actions = [parser.OFPActionOutput(node.port_out)]\n self.add_flow(dp, 300, match, actions, 0)\n\n ##Last forwarder on the way IN sends packet to table #4 where it's matched based on active PDP CNTs\n dp = dpset.get(self.path_in[-1].dpid)\n parser = dp.ofproto_parser\n match = parser.OFPMatch(eth_dst=self.tid_in)\n inst = [ parser.OFPInstructionGotoTable(OF_GPRS_TABLE_IN) ]\n req = parser.OFPFlowMod(datapath=dp, priority=500, match=match, instructions=inst, table_id=0)\n dp.send_msg(req)\n\n ACTIVE_TUNNELS.append(tunnel(self.bss,self.apn, self.tid_out, self.tid_in, self.path_out, self.path_in))\n LOG.debug('Tunnel between '+str(self.bss.name)+' and '+str(self.apn.name) + ' was set up.')", "title": "" }, { "docid": "ef6ce61c0949e52a4788c9230b184e2e", "score": "0.55235493", "text": "def test_08_create_network_without_sourceNAT(self):\n\n self.debug(\"Creating a VPC offering without LB service\")\n self.services[\"vpc_offering\"][\n \"supportedservices\"] = 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat'\n\n vpc_off = VpcOffering.create(\n self.apiclient,\n self.services[\"vpc_offering\"]\n )\n self.cleanup.append(vpc_off)\n self.validate_vpc_offering(vpc_off)\n\n self.debug(\"Enabling the VPC offering created\")\n vpc_off.update(self.apiclient, state='Enabled')\n\n self.debug(\"creating a VPC network in the account: %s\" %\n self.account.name)\n self.services[\"vpc\"][\"cidr\"] = '10.1.1.1/16'\n vpc = VPC.create(\n self.apiclient,\n self.services[\"vpc\"],\n vpcofferingid=vpc_off.id,\n zoneid=self.zone.id,\n account=self.account.name,\n domainid=self.account.domainid\n )\n self.cleanup.append(vpc)\n self.validate_vpc_network(vpc)\n\n self.debug(\"Creating network offering without SourceNAT service\")\n self.services[\"network_offering\"][\n \"supportedservices\"] = 'Dhcp,Dns,PortForwarding,Lb,UserData,StaticNat,NetworkACL'\n self.services[\"network_offering\"][\"serviceProviderList\"] = {\n \"Dhcp\": 'VpcVirtualRouter',\n \"Dns\": 'VpcVirtualRouter',\n \"PortForwarding\": 'VpcVirtualRouter',\n \"Lb\": 'VpcVirtualRouter',\n \"UserData\": 'VpcVirtualRouter',\n \"StaticNat\": 'VpcVirtualRouter',\n \"NetworkACL\": 'VpcVirtualRouter'\n }\n\n self.debug(\"Creating network offering without SourceNAT\")\n with self.assertRaises(Exception):\n nw = NetworkOffering.create(\n self.apiclient,\n self.services[\"network_offering\"],\n conservemode=False\n )\n self.cleanup.append(nw)\n self.debug(\"Network creation failed as VPC doesn't have LB service\")\n return", "title": "" }, { "docid": "5e2fc6933815af10a737006e242e42fc", "score": "0.55170244", "text": "def test_router_dns_externalipquery(self):\n\n self.logger.debug(\"Starting test_router_dns_externalips...\")\n\n public_ip = self.test_router_common()[0]\n\n self.logger.debug(\"Querying VR DNS IP: \" + public_ip.ipaddress)\n resolver = dns.resolver.Resolver()\n resolver.namerservers = [public_ip.ipaddress]\n try:\n resolver.query('google.com', 'A')\n self.fail(\"Non-guest network IPs are able to access VR DNS, failing.\")\n except:\n self.logger.debug(\"VR DNS query failed from non-guest network IP as expected\")", "title": "" }, { "docid": "8cc5d567e0e3771b28fa9f45dccadea5", "score": "0.55045336", "text": "def dns():\n pass", "title": "" }, { "docid": "de7639c2c419308ead4110da7c0bf117", "score": "0.5490384", "text": "def test_self_twice_nat_positive(self):\n self.twice_nat_common(self_twice_nat=True, same_pg=True)", "title": "" }, { "docid": "78d84b84911d87e89c4750dabc62c01d", "score": "0.547807", "text": "def test_routing_ip_install(self):\n self._common_install(\n \"some_id\", routing_ip_prefix.create,\n {\n 'prefix': {\n \"dlr_id\": \"dlr_id\",\n \"name\": \"name\",\n \"ipAddress\": \"ipAddress\"\n }\n }\n )", "title": "" }, { "docid": "8abce90a5f924f6099aef94cda24d9dd", "score": "0.54736966", "text": "def test_get_network(self):\n pass", "title": "" }, { "docid": "4144680933573c65a5780b9b4ae0d2a7", "score": "0.54707897", "text": "def test_ip_address(self):\n command = \"ip a show primary dev %s\" % self.test_network[\"iface\"]\n expected = tuple(self.test_network[\"service\"])\n self.output_check(command, expected)", "title": "" }, { "docid": "e322579e3edba45404efdda05bba3f50", "score": "0.5470773", "text": "def est_IPv4 ( trame ) :\n if \"0800\" == ''.join(trame[12:14]) :\n return True\n return False", "title": "" }, { "docid": "33e62304bf46d9fed10c48e035115a44", "score": "0.5464999", "text": "def runTest(self):\n print \"\"\n print \"Sending L3 packet from port %d -> port %d\" % (swports[0],\n swports[1])\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make switchports a vlan member port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='access')\n self.add_vlan_member(device, vlan10, eth1)\n\n eth2 = self.cfg_l2intf_on_port(device, port2, mode='access')\n self.add_vlan_member(device, vlan20, eth2)\n\n # Create mac address table entries\n self.add_mac_address_table_entry(device, vlan10, '00:10:10:10:10:15',\n 2, eth1)\n self.add_mac_address_table_entry(device, vlan20, '00:20:20:20:20:25',\n 2, eth2)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n ip_ttl=63)\n\n try:\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n print \"Delete SVI interface\"\n # remove neighbor before remove SVI \n self.no_neighbor(device, neighbor1)\n self.no_static_route(device, vrf, ipaddr3, nhop1)\n self.no_nhop(device, nhop1)\n # remove SVI\n self.no_ip_address(device, intf_vl20, vrf, ipaddr2)\n self.no_logical_l3vlan(device, intf_vl20)\n\n print \"Re-add SVI interface\"\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # re-add neighbor\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n\n print (\"Re-sending packet port %d\" % swports[0], \" -> port %d\"\n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "2c6ee6d42f45cbd1ecbdeec5aab48d2e", "score": "0.5464999", "text": "def runTest(self):\n print \"\"\n print \"Sending L3 packet from port %d -> port %d\" % (swports[0],\n swports[1])\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make switchports a vlan member port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='access')\n self.add_vlan_member(device, vlan10, eth1)\n\n eth2 = self.cfg_l2intf_on_port(device, port2, mode='access')\n self.add_vlan_member(device, vlan20, eth2)\n\n # Create mac address table entries\n self.add_mac_address_table_entry(device, vlan10, '00:10:10:10:10:15',\n 2, eth1)\n self.add_mac_address_table_entry(device, vlan20, '00:20:20:20:20:25',\n 2, eth2)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Create ip addr and use it as host\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n ip_ttl=63)\n\n try:\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n print \"Delete IP address\"\n self.no_neighbor(device, neighbor1)\n self.no_static_route(device, vrf, ipaddr3, nhop1)\n self.no_nhop(device, nhop1)\n\n print \"Re-add IP address\"\n ipaddr3 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr3)\n self.add_static_route(device, vrf, ipaddr3, nhop1)\n\n print (\"Re-sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "9bbe0084a16ed66df9ee6724a2334498", "score": "0.54584634", "text": "def test_multiple_vrf_4(self):\n\n external_addr = \"1.2.3.4\"\n external_port = 80\n local_port = 8080\n port = 0\n\n flags = self.config_flags.NAT_IS_INSIDE\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg0.sw_if_index, is_add=1, flags=flags\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1\n )\n self.vapi.nat44_interface_add_del_feature(\n sw_if_index=self.pg5.sw_if_index, is_add=1, flags=flags\n )\n flags = self.config_flags.NAT_IS_OUT2IN_ONLY\n self.nat_add_static_mapping(\n self.pg5.remote_ip4,\n external_addr,\n local_port,\n external_port,\n vrf_id=1,\n proto=IP_PROTOS.tcp,\n flags=flags,\n )\n\n # from client VRF0 to service VRF1\n p = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IP(src=self.pg0.remote_ip4, dst=external_addr)\n / TCP(sport=12347, dport=external_port)\n )\n self.pg0.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg5.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.dst, self.pg5.remote_ip4)\n self.assertEqual(tcp.dport, local_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise\n\n # from service VRF1 back to client VRF0\n p = (\n Ether(src=self.pg5.remote_mac, dst=self.pg5.local_mac)\n / IP(src=self.pg5.remote_ip4, dst=self.pg0.remote_ip4)\n / TCP(sport=local_port, dport=12347)\n )\n self.pg5.add_stream(p)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(1)\n p = capture[0]\n try:\n ip = p[IP]\n tcp = p[TCP]\n self.assertEqual(ip.src, external_addr)\n self.assertEqual(tcp.sport, external_port)\n self.assert_packet_checksums_valid(p)\n except:\n self.logger.error(ppp(\"Unexpected or invalid packet:\", p))\n raise", "title": "" }, { "docid": "4abc8c8a3c7d6bd752d8e6462b242cf6", "score": "0.54576737", "text": "def runTest(self):\n print \"\"\n print \"Sending L3 packet from port %d -> port %d\" % (swports[0],\n swports[1])\n\n vrf = self.add_vrf(device, 2)\n\n # Create vlan 10 & 20\n vlan10 = self.add_vlan(device, 10)\n vlan20 = self.add_vlan(device, 20)\n\n # Add router MAC\n rmac = self.add_rmac(device)\n self.add_router_mac(device, rmac,'00:77:66:55:44:33')\n\n port1 = self.select_port(device, swports[0])\n port2 = self.select_port(device, swports[1])\n\n # Make switchports a vlan member port\n eth1 = self.cfg_l2intf_on_port(device, port1, mode='access')\n self.add_vlan_member(device, vlan10, eth1)\n\n eth2 = self.cfg_l2intf_on_port(device, port2, mode='access')\n self.add_vlan_member(device, vlan20, eth2)\n\n # Create mac address table entries\n self.add_mac_address_table_entry(device, vlan10, '00:10:10:10:10:15',\n 2, eth1)\n self.add_mac_address_table_entry(device, vlan20, '00:20:20:20:20:25',\n 2, eth2)\n\n # Add SVI IP addresses to vlan 10 & 20\n intf_vl10 = self.add_logical_l3vlan(device, vrf, rmac, 10)\n ipaddr1 = self.make_ipv4_ipaddr('172.16.10.1', 24)\n self.cfg_ip_address(device, intf_vl10, vrf, ipaddr1)\n\n intf_vl20 = self.add_logical_l3vlan(device, vrf, rmac, 20)\n ipaddr2 = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20, vrf, ipaddr2)\n\n # Duplicate SVI address. This will fail to add\n intf_vl20_dup = self.add_logical_l3vlan(device, vrf, rmac, 20,\n store=False)\n ipaddr2_dup = self.make_ipv4_ipaddr('172.16.20.1', 24)\n self.cfg_ip_address(device, intf_vl20_dup, vrf, ipaddr2_dup,\n store=False)\n\n # Create ip addr and use it as host\n ipaddr4 = self.make_ipv4_ipaddr('172.16.20.5', 32)\n nhop1 = self.add_nhop(device, intf_vl20)\n neighbor1 = self.add_neighbor_l3intf(device, intf_vl20, nhop1,\n '00:20:20:20:20:25', ipaddr4)\n # currently status doesn't return correct error code\n status = self.add_static_route(device, vrf, ipaddr4, nhop1)\n\n\n pkt = simple_tcp_packet(\n eth_dst='00:77:66:55:44:33',\n eth_src='00:10:10:10:10:15',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n ip_ttl=64)\n exp_pkt = simple_tcp_packet(\n eth_dst='00:20:20:20:20:25',\n eth_src='00:77:66:55:44:33',\n ip_dst='172.16.20.5',\n ip_src='172.16.10.5',\n ip_id=105,\n ip_ttl=63)\n\n try:\n print (\"Sending packet port %d\" % swports[0], \" -> port %d\" \n % swports[1], \" (172.16.10.5 -> 172.16.20.5 [id = 105])\")\n send_packet(self, swports[0], str(pkt))\n verify_packets(self, exp_pkt, [swports[1]])\n\n finally:\n self.cleanup()", "title": "" }, { "docid": "dc214a82cf7997803ad07c82bae4fc63", "score": "0.5433812", "text": "def test_self_twice_nat_lb_negative(self):\n self.twice_nat_common(lb=True, self_twice_nat=True, same_pg=True, client_id=2)", "title": "" }, { "docid": "7ab2d715459351e19debbc92d63aa4ce", "score": "0.54337525", "text": "def _build_tunnel_packet(outer_src_ip, outer_dst_ip):\n exp_pkt = testutils.simple_ip_packet(\n ip_src=outer_src_ip,\n ip_dst=outer_dst_ip,\n pktlen=20\n )\n exp_pkt = mask.Mask(exp_pkt)\n exp_pkt.set_do_not_care_scapy(Ether, \"dst\")\n exp_pkt.set_do_not_care_scapy(Ether, \"src\")\n exp_pkt.set_do_not_care_scapy(IP, \"ihl\")\n exp_pkt.set_do_not_care_scapy(IP, \"tos\")\n exp_pkt.set_do_not_care_scapy(IP, \"len\")\n exp_pkt.set_do_not_care_scapy(IP, \"id\")\n exp_pkt.set_do_not_care_scapy(IP, \"flags\")\n exp_pkt.set_do_not_care_scapy(IP, \"frag\")\n exp_pkt.set_do_not_care_scapy(IP, \"ttl\")\n exp_pkt.set_do_not_care_scapy(IP, \"proto\")\n exp_pkt.set_do_not_care_scapy(IP, \"chksum\")\n exp_pkt.set_ignore_extra_bytes()\n return exp_pkt", "title": "" }, { "docid": "05b3eb079960778869d524b23eb443d4", "score": "0.5433437", "text": "def test_register_network(self):\n pass", "title": "" }, { "docid": "37bf27d3b9ff753dd46d45e7287ec979", "score": "0.54289734", "text": "def test_01_acquire_public_ips_in_isolated_network_with_single_vr(self):\n\n # Create new domain1\n self.domain1 = Domain.create(\n self.apiclient,\n services=self.services[\"acl\"][\"domain1\"],\n parentdomainid=self.domain.id)\n # Create account1\n self.account1 = Account.create(\n self.apiclient,\n self.services[\"acl\"][\"accountD1\"],\n domainid=self.domain1.id\n )\n self.cleanup.append(self.account1)\n self.cleanup.append(self.domain1)\n\n # 1. Create network offering with single VR, and enable it\n self.network_offering = NetworkOffering.create(\n self.apiclient,\n self.services[\"isolated_network_offering\"],\n )\n self.network_offering.update(self.apiclient, state='Enabled')\n self.cleanup.append(self.network_offering)\n\n # 2. create isolated network with the network offering\n self.services[\"network\"][\"zoneid\"] = self.zone.id\n self.services[\"network\"][\"networkoffering\"] = self.network_offering.id\n self.network1 = Network.create(\n self.apiclient,\n self.services[\"network\"],\n self.account1.name,\n self.account1.domainid\n )\n\n # 3. create a vm in the network.\n try:\n self.virtual_machine1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"virtual_machine\"],\n accountid=self.account1.name,\n domainid=self.account1.domainid,\n serviceofferingid=self.service_offering.id,\n templateid=self.template.id,\n zoneid=self.zone.id,\n networkids=self.network1.id\n )\n except Exception as e:\n self.fail(\"Exception while deploying virtual machine: %s\" % e)\n\n # verify the available nics in VR should be \"eth0,eth1,eth2\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_router_publicnic_state(router, host, \"eth2\")\n\n # 4. get a free public ip, assign to network, and create port forwarding rules (ssh) to the vm\n ipaddress = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n )\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine1,\n self.services[\"natrule\"],\n ipaddressid=ipaddress.ipaddress.id,\n openfirewall=True\n )\n # verify the available nics in VR should be \"eth0,eth1,eth2\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP/new ip\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress.ipaddress.ipaddress, \"eth2\", True)\n self.verify_router_publicnic_state(router, host, \"eth2\")\n\n # 5. release the new ip\n ipaddress.delete(self.apiclient)\n\n # verify the available nics in VR should be \"eth0,eth1,eth2\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress.ipaddress.ipaddress, \"eth2\", False)\n self.verify_router_publicnic_state(router, host, \"eth2\")\n\n # 6. create new public ip range 1\n self.services[\"publiciprange\"][\"zoneid\"] = self.zone.id\n self.services[\"publiciprange\"][\"forvirtualnetwork\"] = \"true\"\n random_subnet_number = random.randrange(10,50)\n self.services[\"publiciprange\"][\"vlan\"] = get_free_vlan(\n self.apiclient,\n self.zone.id)[1]\n self.services[\"publiciprange\"][\"gateway\"] = \"172.16.\" + str(random_subnet_number) + \".1\"\n self.services[\"publiciprange\"][\"startip\"] = \"172.16.\" + str(random_subnet_number) + \".2\"\n self.services[\"publiciprange\"][\"endip\"] = \"172.16.\" + str(random_subnet_number) + \".10\"\n self.services[\"publiciprange\"][\"netmask\"] = \"255.255.255.0\"\n self.public_ip_range1 = PublicIpRange.create(\n self.apiclient,\n self.services[\"publiciprange\"]\n )\n self.cleanup.append(self.public_ip_range1)\n\n # 7. get a free ip 4 in new ip range 2, assign to network, and enable static nat to vm\n ip_address_1 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)\n ipaddress_1 = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n ipaddress=ip_address_1\n )\n\n StaticNATRule.enable(\n self.apiclient,\n virtualmachineid=self.virtual_machine1.id,\n ipaddressid=ipaddress_1.ipaddress.id,\n networkid=self.network1.id\n )\n\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")\n\n # 8. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2,\n ip_address_2 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)\n ipaddress_2 = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n ipaddress=ip_address_2\n )\n\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine1,\n self.services[\"natrule\"],\n ipaddressid=ipaddress_2.ipaddress.id,\n openfirewall=True\n )\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")\n\n # 9. get a free ip in new ip range, assign to network, and create port forwarding rules (ssh) to the vm\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 2, new ip 3\n ip_address_3 = self.get_free_ipaddress(self.public_ip_range1.vlan.id)\n ipaddress_3 = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n ipaddress=ip_address_3\n )\n\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine1,\n self.services[\"natrule\"],\n ipaddressid=ipaddress_3.ipaddress.id,\n openfirewall=True\n )\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")\n\n # 10. release new ip 2\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 1, new ip 3\n ipaddress_2.delete(self.apiclient)\n\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")\n\n # 11. release new ip 1\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3\n ipaddress_1.delete(self.apiclient)\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_1.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_2.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")\n\n # 12. create new public ip range 2\n self.services[\"publiciprange\"][\"zoneid\"] = self.zone.id\n self.services[\"publiciprange\"][\"forvirtualnetwork\"] = \"true\"\n self.services[\"publiciprange\"][\"vlan\"] = get_free_vlan(\n self.apiclient,\n self.zone.id)[1]\n self.services[\"publiciprange\"][\"gateway\"] = \"172.16.\" + str(random_subnet_number + 1) + \".1\"\n self.services[\"publiciprange\"][\"startip\"] = \"172.16.\" + str(random_subnet_number + 1) + \".2\"\n self.services[\"publiciprange\"][\"endip\"] = \"172.16.\" + str(random_subnet_number + 1) + \".10\"\n self.services[\"publiciprange\"][\"netmask\"] = \"255.255.255.0\"\n self.public_ip_range2 = PublicIpRange.create(\n self.apiclient,\n self.services[\"publiciprange\"]\n )\n self.cleanup.append(self.public_ip_range2)\n\n # 13. get a free ip 4 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4\n\n ip_address_4 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)\n ipaddress_4 = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n ipaddress=ip_address_4\n )\n\n StaticNATRule.enable(\n self.apiclient,\n virtualmachineid=self.virtual_machine1.id,\n ipaddressid=ipaddress_4.ipaddress.id,\n networkid=self.network1.id\n )\n\n\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3|eth4\")\n\n # 14. get a free ip 5 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5\n ip_address_5 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)\n ipaddress_5 = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n ipaddress=ip_address_5\n )\n\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine1,\n self.services[\"natrule\"],\n ipaddressid=ipaddress_5.ipaddress.id,\n openfirewall=True\n )\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3|eth4\")\n\n # 15. get a free ip 6 in new ip range 2, assign to network, and create port forwarding rules (ssh) to the vm\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/5/6\n ip_address_6 = self.get_free_ipaddress(self.public_ip_range2.vlan.id)\n ipaddress_6 = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.zone.id,\n networkid=self.network1.id,\n ipaddress=ip_address_6\n )\n\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine1,\n self.services[\"natrule\"],\n ipaddressid=ipaddress_6.ipaddress.id,\n openfirewall=True\n )\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth4\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3|eth4\")\n\n # 16. release new ip 5\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 4/6\n ipaddress_5.delete(self.apiclient)\n\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3|eth4\")\n\n # 17. release new ip 4\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 3, eth4 -> new ip 6\n ipaddress_4.delete(self.apiclient)\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_3.ipaddress.ipaddress, \"eth3\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3|eth4\")\n\n # 18. release new ip 3\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6\n ipaddress_3.delete(self.apiclient)\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth4\")\n\n # 19. restart network\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth4,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth4 -> new ip 6\n self.network1.restart(self.apiclient)\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth4,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth4\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth4\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth4\")\n\n # 20. reboot router\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6\n if len(routers) > 0:\n router = routers[0]\n cmd = rebootRouter.rebootRouterCmd()\n cmd.id = router.id\n self.apiclient.rebootRouter(cmd)\n router = self.get_router(router.id)\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")\n\n # 21. restart network with cleanup\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6\n self.network1.restart(self.apiclient, cleanup=True)\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth3\", True)\n\n # 22. restart network with cleanup, makeredundant=true\n # verify the available nics in VR should be \"eth0,eth1,eth2,eth3,\"\n # verify the IPs in VR. eth0 -> guest nic, eth2 -> source nat IP, eth3 -> new ip 6\n self.network1.restart(self.apiclient, cleanup=True, makeredundant=True)\n routers = self.get_routers(self.network1.id)\n for router in routers:\n host = self.get_router_host(router)\n self.verify_network_interfaces_in_router(router, host, \"eth0,eth1,eth2,eth3,\")\n guestIp, controlIp, sourcenatIp = self.get_router_ips(router)\n self.verify_ip_address_in_router(router, host, guestIp, \"eth0\", True)\n self.verify_ip_address_in_router(router, host, controlIp, \"eth1\", True)\n self.verify_ip_address_in_router(router, host, sourcenatIp, \"eth2\", True)\n self.verify_ip_address_in_router(router, host, ipaddress_4.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_5.ipaddress.ipaddress, \"eth3\", False)\n self.verify_ip_address_in_router(router, host, ipaddress_6.ipaddress.ipaddress, \"eth3\", True)\n self.verify_router_publicnic_state(router, host, \"eth2|eth3\")", "title": "" }, { "docid": "cdbf9546078957ba8c934af4fcb4e557", "score": "0.54213345", "text": "def test_deny_after_permitting_other_network(self):\n self.uut.permit(\"192.168.0.0/16\")\n self.assertEqual(self.uut.evaluate(\"10.0.0.1\"), False)", "title": "" }, { "docid": "93a13fe34b29740d552055a295a4b7ac", "score": "0.54179287", "text": "def test_connect_robot_with_ipaddr_correct(self):\n controllers = com.discover_controllers_on_network()\n self.controller, _, connected = com.connect_robot_with_ipaddr(controllers, '127.0.0.1')\n self.assertTrue(connected)", "title": "" }, { "docid": "06987966a9d33a593eadd0af9ceeac46", "score": "0.5417532", "text": "def test_users_dump(self):\n\n self.nat_add_address(self.nat_addr)\n self.nat_add_inside_interface(self.pg0)\n self.nat_add_outside_interface(self.pg1)\n\n self.vapi.nat44_forwarding_enable_disable(enable=1)\n\n local_ip = self.pg0.remote_ip4\n external_ip = self.nat_addr\n self.nat_add_static_mapping(local_ip, external_ip)\n\n users = self.vapi.nat44_user_dump()\n self.assertEqual(len(users), 0)\n\n # in2out - static mapping match\n\n pkts = self.create_stream_out(self.pg1)\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n self.verify_capture_in(capture, self.pg0)\n\n pkts = self.create_stream_in(self.pg0, self.pg1)\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_out(capture, same_port=True)\n\n users = self.vapi.nat44_user_dump()\n self.assertEqual(len(users), 1)\n static_user = users[0]\n self.assertEqual(static_user.nstaticsessions, 3)\n self.assertEqual(static_user.nsessions, 0)\n\n # in2out - no static mapping match (forwarding test)\n\n host0 = self.pg0.remote_hosts[0]\n self.pg0.remote_hosts[0] = self.pg0.remote_hosts[1]\n try:\n pkts = self.create_stream_out(\n self.pg1, dst_ip=self.pg0.remote_ip4, use_inside_ports=True\n )\n self.pg1.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg0.get_capture(len(pkts))\n self.verify_capture_in(capture, self.pg0)\n\n pkts = self.create_stream_in(self.pg0, self.pg1)\n self.pg0.add_stream(pkts)\n self.pg_enable_capture(self.pg_interfaces)\n self.pg_start()\n capture = self.pg1.get_capture(len(pkts))\n self.verify_capture_out(capture, nat_ip=self.pg0.remote_ip4, same_port=True)\n finally:\n self.pg0.remote_hosts[0] = host0\n\n users = self.vapi.nat44_user_dump()\n self.assertEqual(len(users), 2)\n if str(users[0].ip_address) == self.pg0.remote_hosts[0].ip4:\n non_static_user = users[1]\n static_user = users[0]\n else:\n non_static_user = users[0]\n static_user = users[1]\n self.assertEqual(static_user.nstaticsessions, 3)\n self.assertEqual(static_user.nsessions, 0)\n self.assertEqual(non_static_user.nstaticsessions, 0)\n self.assertEqual(non_static_user.nsessions, 3)\n\n users = self.vapi.nat44_user_dump()\n self.assertEqual(len(users), 2)\n if str(users[0].ip_address) == self.pg0.remote_hosts[0].ip4:\n non_static_user = users[1]\n static_user = users[0]\n else:\n non_static_user = users[0]\n static_user = users[1]\n self.assertEqual(static_user.nstaticsessions, 3)\n self.assertEqual(static_user.nsessions, 0)\n self.assertEqual(non_static_user.nstaticsessions, 0)\n self.assertEqual(non_static_user.nsessions, 3)", "title": "" }, { "docid": "8d4d1c61831aa89ecc1b0c9f4ba9acf7", "score": "0.5383938", "text": "def test_evaluate_ip_address(self):\n self.assertEqual(self.uut.evaluate(\"127.0.0.1\"), False)", "title": "" }, { "docid": "3dc917cb3b12e84787d201fe881f569b", "score": "0.5383363", "text": "def __init__(__self__, *,\n drain_nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n enable_dynamic_port_allocation: Optional[pulumi.Input[bool]] = None,\n enable_endpoint_independent_mapping: Optional[pulumi.Input[bool]] = None,\n icmp_idle_timeout_sec: Optional[pulumi.Input[int]] = None,\n log_config: Optional[pulumi.Input['RouterNatLogConfigArgs']] = None,\n max_ports_per_vm: Optional[pulumi.Input[int]] = None,\n min_ports_per_vm: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n nat_ip_allocate_option: Optional[pulumi.Input[str]] = None,\n nat_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n router: Optional[pulumi.Input[str]] = None,\n rules: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatRuleArgs']]]] = None,\n source_subnetwork_ip_ranges_to_nat: Optional[pulumi.Input[str]] = None,\n subnetworks: Optional[pulumi.Input[Sequence[pulumi.Input['RouterNatSubnetworkArgs']]]] = None,\n tcp_established_idle_timeout_sec: Optional[pulumi.Input[int]] = None,\n tcp_time_wait_timeout_sec: Optional[pulumi.Input[int]] = None,\n tcp_transitory_idle_timeout_sec: Optional[pulumi.Input[int]] = None,\n udp_idle_timeout_sec: Optional[pulumi.Input[int]] = None):\n if drain_nat_ips is not None:\n pulumi.set(__self__, \"drain_nat_ips\", drain_nat_ips)\n if enable_dynamic_port_allocation is not None:\n pulumi.set(__self__, \"enable_dynamic_port_allocation\", enable_dynamic_port_allocation)\n if enable_endpoint_independent_mapping is not None:\n pulumi.set(__self__, \"enable_endpoint_independent_mapping\", enable_endpoint_independent_mapping)\n if icmp_idle_timeout_sec is not None:\n pulumi.set(__self__, \"icmp_idle_timeout_sec\", icmp_idle_timeout_sec)\n if log_config is not None:\n pulumi.set(__self__, \"log_config\", log_config)\n if max_ports_per_vm is not None:\n pulumi.set(__self__, \"max_ports_per_vm\", max_ports_per_vm)\n if min_ports_per_vm is not None:\n pulumi.set(__self__, \"min_ports_per_vm\", min_ports_per_vm)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if nat_ip_allocate_option is not None:\n pulumi.set(__self__, \"nat_ip_allocate_option\", nat_ip_allocate_option)\n if nat_ips is not None:\n pulumi.set(__self__, \"nat_ips\", nat_ips)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if router is not None:\n pulumi.set(__self__, \"router\", router)\n if rules is not None:\n pulumi.set(__self__, \"rules\", rules)\n if source_subnetwork_ip_ranges_to_nat is not None:\n pulumi.set(__self__, \"source_subnetwork_ip_ranges_to_nat\", source_subnetwork_ip_ranges_to_nat)\n if subnetworks is not None:\n pulumi.set(__self__, \"subnetworks\", subnetworks)\n if tcp_established_idle_timeout_sec is not None:\n pulumi.set(__self__, \"tcp_established_idle_timeout_sec\", tcp_established_idle_timeout_sec)\n if tcp_time_wait_timeout_sec is not None:\n pulumi.set(__self__, \"tcp_time_wait_timeout_sec\", tcp_time_wait_timeout_sec)\n if tcp_transitory_idle_timeout_sec is not None:\n pulumi.set(__self__, \"tcp_transitory_idle_timeout_sec\", tcp_transitory_idle_timeout_sec)\n if udp_idle_timeout_sec is not None:\n pulumi.set(__self__, \"udp_idle_timeout_sec\", udp_idle_timeout_sec)", "title": "" }, { "docid": "eda518134b7d7755490a548a2b26a1a8", "score": "0.5382174", "text": "def test_create_port_neg(self):\n # create l2domain on VSD\n vsd_l2domain_template = self.create_vsd_l2domain_template(\n ip_type=\"DUALSTACK\",\n dhcp_managed=True,\n cidr4=self.cidr4,\n cidr6=self.cidr6)\n vsd_l2domain = self.create_vsd_l2domain(vsd_l2domain_template['ID'])\n\n # create Openstack IPv4 subnet on Openstack based on VSD l2domain\n net_name = data_utils.rand_name('network-')\n network = self.create_network(network_name=net_name)\n ipv4_subnet = self.create_subnet(\n network,\n gateway=self.gateway4,\n cidr=self.cidr4,\n enable_dhcp=True,\n mask_bits=self.mask_bits,\n nuagenet=vsd_l2domain['ID'],\n net_partition=CONF.nuage.nuage_default_netpartition)\n\n ipv6_subnet = self.create_subnet(\n network,\n ip_version=6,\n gateway=self.gateway6,\n cidr=self.cidr6,\n mask_bits=self.cidr6._prefixlen,\n enable_dhcp=False,\n nuagenet=vsd_l2domain['ID'],\n net_partition=CONF.nuage.nuage_default_netpartition)\n\n # noinspection PyPep8\n invalid_ipv6 = [\n ('::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # # Loopback\n ('FE80::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # # Link local address\n (\"FF00:5f74:c4a5:b82e:ffff:ffff:ffff:ffff\", MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # multicast\n ('FF00::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # multicast address\n ('::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # not specified address\n ('::', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # empty address\n (\"2001:ffff:ffff:ffff:ffff:ffff:ffff:ffff\", MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # valid address, not in subnet\n\n ('', MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # empty string\n (\"2001:5f74:c4a5:b82e:ffff:ffff:ffff:ffff:ffff\", MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid address, too much segments\n (\"2001:5f74:c4a5:b82e:ffff:ffff:ffff\", MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid address, seven segments\n (\"2001;5f74.c4a5.b82e:ffff:ffff:ffff\", MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid address, wrong characters\n (\"2001:5f74:c4a5:b82e:100.12.13.1\", MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid fornmat: must have :: between hex and decimal part.\n ]\n\n for ipv6, msg in invalid_ipv6:\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'], 'ip_address': IPAddress(self.cidr4.first + 40)},\n {'subnet_id': ipv6_subnet['id'], 'ip_address': ipv6}]}\n self.assertRaisesRegexp(tempest_exceptions.BadRequest, msg % ipv6, self.create_port, network, **port_args)\n\n pass", "title": "" }, { "docid": "3b03708bdd2aaafe13478b4da053ac89", "score": "0.53807896", "text": "def startNAT( root, inetIntf='eth0', subnet='10.0/8' ):\n\n # Identify the interface connecting to the mininet network\n localIntf = root.defaultIntf()\n\n # Flush any currently active rules\n root.cmd( 'iptables -F' )\n root.cmd( 'iptables -t nat -F' )\n\n # Create default entries for unmatched traffic\n root.cmd( 'iptables -P INPUT ACCEPT' )\n root.cmd( 'iptables -P OUTPUT ACCEPT' )\n root.cmd( 'iptables -P FORWARD DROP' )\n\n # Configure NAT\n root.cmd( 'iptables -I FORWARD -i', localIntf, '-d', subnet, '-j DROP' )\n root.cmd( 'iptables -A FORWARD -i', localIntf, '-s', subnet, '-j ACCEPT' )\n root.cmd( 'iptables -A FORWARD -i', inetIntf, '-d', subnet, '-j ACCEPT' )\n root.cmd( 'iptables -t nat -A POSTROUTING -o ', inetIntf, '-j MASQUERADE' )\n\n # Instruct the kernel to perform forwarding\n root.cmd( 'sysctl net.ipv4.ip_forward=1' )", "title": "" }, { "docid": "3c8a9b115f2f8793aededd3a4f23b210", "score": "0.5377308", "text": "def test_in_network_svc_mirroring(self):\n return self.verify_svc_mirroring(svc_mode='in-network')", "title": "" }, { "docid": "2bd6d094b20573c4f3cf599cbbf0afeb", "score": "0.5369563", "text": "def test_map_uni(self):\n\n logger = mock.MagicMock()\n self.tested_instance._create_crossconnection = mock.MagicMock()\n\n src_port = [\"address\", \"1-1-1\"]\n dst_port = [\"address\", \"1-1-2\"]\n self.tested_instance.map_uni(src_port, dst_port, logger)\n\n self.tested_instance._create_crossconnection.assert_called_once_with(src_port=src_port,\n dst_port=dst_port,\n conn_type=\"1WAY\")", "title": "" }, { "docid": "edb188833db26abac3acc7ea5214532c", "score": "0.5366094", "text": "def test_static_tor_onions(node_factory):\n # please define your values\n torip = '127.0.0.1'\n torips = '127.0.0.1:9051'\n torport = 9050\n torserviceport = 9051\n portA, portB = reserve(), reserve()\n\n if not check_socket(format(torip), torserviceport):\n return\n\n if not check_socket(format(torip), torport):\n return\n\n l1 = node_factory.get_node(may_fail=True, options={\n 'bind-addr': '127.0.0.1:{}'.format(portA),\n 'addr': ['statictor:{}'.format(torips)]\n })\n l2 = node_factory.get_node(may_fail=True, options={\n 'bind-addr': '127.0.0.1:{}'.format(portB),\n 'addr': ['statictor:{}/torblob=11234567890123456789012345678901/torport={}'.format(torips, 9736)]\n })\n\n assert l1.daemon.is_in_log('127.0.0.1:{}'.format(l1.port))\n # Did not specify torport, so it's the default.\n assert l1.daemon.is_in_log('.onion:{}'.format(default_ln_port(l1.info[\"network\"])))\n assert l2.daemon.is_in_log('x2y4zvh4fn5q3eouuh7nxnc7zeawrqoutljrup2xjtiyxgx3emgkemad.onion:{},127.0.0.1:{}'.format(9736, l2.port))", "title": "" }, { "docid": "be8f835cb65f457f57cd43b93f748998", "score": "0.53624237", "text": "def test_0050_loopback_prepare_test(self):\n self.create_loopback_interfaces(2)\n for i in range(2):\n intf = self.lo_interfaces[i]\n intf.admin_up()\n intf.local_ip4 = self.pg_interfaces[i].remote_ip4\n intf.local_ip4_prefix_len = 32\n intf.config_ip4()\n intf.local_ip6 = self.pg_interfaces[i].remote_ip6\n intf.local_ip6_prefix_len = 128\n intf.config_ip6()", "title": "" }, { "docid": "a5de64105831f3da3007718eb6f1d804", "score": "0.5358184", "text": "def CASE32( self, main ):\n main.case( \"Configure onos-app-fwd and check if configuration successful. \" )\n main.step( \"Install reactive forwarding app.\" )\n installResults = main.ONOScli1.activateApp( \"org.onosproject.fwd\" )\n utilities.assert_equals( expect=main.TRUE, actual=installResults,\n onpass = \"Configure fwd successful\", onfail=\"Configure fwd failed\" )\n main.step( \"Run pingall to check connectivity. \" )\n pingResult = main.FALSE\n passMsg = \"Reactive Pingall test passed\"\n pingResult = main.Mininet1.pingall()\n if not pingResult:\n main.log.warn( \"First pingall failed. Trying again...\" )\n pingResult = main.Mininet1.pingall()\n passMsg += \"on the second try\"\n utilities.assert_equals( expect=main.TRUE, actual=pingResult, onpass=passMsg, onfail= \"Reactive Pingall failed, \" + \"one or more ping pairs failed.\" )", "title": "" }, { "docid": "07f52c5a066f875de364f0295ae82f6e", "score": "0.5348731", "text": "def test_symmetric_vote(self):\n for i in range(2):\n address = (\"140.0.0.2\", i + 1)\n candidate = self._community.create_candidate(address, False, address, address, u\"unknown\")\n self._dispersy.wan_address_vote((\"1.0.0.1\", i + 1), candidate)\n self.assertEqual(self._dispersy.connection_type, u\"symmetric-NAT\")\n\n # because we CANDIDATE didn't send any messages to COMMUNITY, the CANDIDATE timestamps have never been set. In\n # the current code this results in the CANDIDATE to remain 'obsolete'.\n self.assertIsNone(candidate.get_category(time()))\n self.assertEqual(self._community.cleanup_candidates(), 2)\n\n for i in range(2):\n address = (\"140.0.0.3\", i + 1)\n candidate = self._community.create_candidate(address, False, address, address, u\"unknown\")\n self._dispersy.wan_address_vote((\"1.0.0.1\", 1), candidate)\n self.assertEqual(self._dispersy.connection_type, u\"unknown\")", "title": "" }, { "docid": "836d4d9a74d27bc384d1e5e5331ebbe6", "score": "0.5345261", "text": "def test_deny_address_just_off_edge_of_permitted_network(self):\n self.uut.permit(\"172.16.0.0/12\")\n self.assertEqual(self.uut.evaluate(\"172.32.0.0\"), False)", "title": "" } ]
80ee60f6a1ed3ed14d64af8a5c4cf192
Main helper that moves the aliens. Determines when to move aliens based on the time that has passed. At the start, and each time the aliens move, _time it is reset to 0. The setting of _time to 0 is taken care of in update(). Then, add the number of seconds that have passed to _time, and do not move the aliens. When _time is bigger than ALIEN_SPEED, move the aliens again. If the aliens move beyond the border of the game's frame, reset position and move them down and then to the left.
[ { "docid": "ea6b3cbe3660c8a85a1d6cbd10106bd4", "score": "0.68448657", "text": "def _move_Aliens_Main(self, key_input, dt):\n assert isinstance(key_input, GInput)\n assert isinstance(dt, int) or isinstance(dt, float)\n \n self._time += dt\n \n if key_input.is_key_down('lctrl') or key_input.is_key_down('rctrl'):\n if self._time > self._alienSpeed/2 and self._direction == 'right':\n self._check_Alien_Fire()\n #self._move_Aliens_Down_Main()\n self._move_Aliens_Right()\n self._time = 0\n \n if self._time > self._alienSpeed/2 and self._direction == 'left':\n self._check_Alien_Fire()\n #self._move_Aliens_Down_Main()\n self._move_Aliens_Left()\n self._time = 0\n \n else:\n if self._time > self._alienSpeed and self._direction == 'right':\n self._check_Alien_Fire()\n #self._move_Aliens_Down_Main()\n self._move_Aliens_Right()\n self._time = 0\n \n if self._time > self._alienSpeed and self._direction == 'left':\n self._check_Alien_Fire()\n #self._move_Aliens_Down_Main()\n self._move_Aliens_Left()\n self._time = 0\n \n #print(str(self._direction))\n \n self._move_Aliens_Down_Main()", "title": "" } ]
[ { "docid": "7bf63b7d7114febe5dcb3a3f30558c6f", "score": "0.71475893", "text": "def movealiens(self):\n if self._emptyaliens == False:\n if self._ismovingright == True:\n if self._time >= self._alienspeed:\n self.moveright()\n self._alienstep = self._alienstep + 1\n elif self._ismovingright == False:\n if self._time >= self._alienspeed:\n self.moveleft()\n self._alienstep = self._alienstep + 1", "title": "" }, { "docid": "6a77ca1905a3a7227862ebba5747dc78", "score": "0.7084952", "text": "def _animateAliens(self, dt):\n if self._time * self._wavenumber > ALIEN_SPEED:\n if self._direction == 'right':\n self._animatealienright()\n if self._direction == 'left':\n self._animateAlienleft()\n self._time = 0\n else:\n self._time += dt", "title": "" }, { "docid": "d2c4ef04d36007703ceac254b5568ef5", "score": "0.67561626", "text": "def _moveAliens(self,dt,alienSpeed):\n assert type(dt) in [int,float]\n assert type(alienSpeed) in [int,float]\n self._time += dt\n\n if self._time > alienSpeed: #if time to walk, then walk\n self._walks += 1\n for row in self._aliens:\n for alien in row:\n if alien is not None:\n alienX = alien.getX()\n alienY = alien.getY()\n if self._alienDir == 'right':\n alien.setX(alienX + ALIEN_H_WALK)\n elif self._alienDir == 'left':\n alien.setX(alienX - ALIEN_H_WALK)\n elif self._alienDir == 'right-down':\n alien.setY(alienY - ALIEN_V_WALK)\n elif self._alienDir == 'left-down':\n alien.setY(alienY - ALIEN_V_WALK)\n self._switchAlienDir() #Switch the alien direction", "title": "" }, { "docid": "817ec7a2a3369a5735c02094999870a9", "score": "0.6638007", "text": "def _update_aliens(self, dt):\n # Call helper to see if all aliens have been destroyed\n self._check_win()\n\n # Check to see if the aliens should be moved\n if self._time >= self._alienspeed:\n self._move_aliens() # Call helper to move them if necessary\n self._time = 0 # reset time\n else:\n self._time += dt # add time if alins have not moved\n\n # Call helper to check if any aliens have been killed\n self._kill_aliens()", "title": "" }, { "docid": "653adacb86a2c91634d9ef6ac72e4009", "score": "0.6384242", "text": "def moveAlien(self):\r\n for y in range(ALIENS_IN_ROW):\r\n if self.isEmptyColumn(y) == False:\r\n for x in range(len(self._aliens)):\r\n if not self._aliens[x][y] is None:\r\n wizz = self._aliens[x][y]\r\n for y in range(ALIENS_IN_ROW):\r\n h = ALIENS_IN_ROW - 1 - y\r\n if self.isEmptyColumn(h) == False:\r\n for x in range(len(self._aliens)):\r\n if not self._aliens[x][h] is None:\r\n queburt = self._aliens[x][h]\r\n if (self._time > ALIEN_SPEED):\r\n if (wizz.getX() > (GAME_WIDTH - ALIEN_H_SEP - ALIEN_WIDTH/2)) \\\r\n and (self.getDn() == 0):\r\n self.downAlien()\r\n elif (queburt.getX() < (ALIEN_H_SEP + ALIEN_WIDTH/2)) \\\r\n and (self.getDn() == 2):\r\n self.downAlien()\r\n elif self.getRt() == True:\r\n self.rightAlien()\r\n elif self.getRt() == False:\r\n self.leftAlien()\r\n self._time = 0\r\n self.alienSteps += 1", "title": "" }, { "docid": "da852958762f05f1b63bdd72191fbf7b", "score": "0.63293236", "text": "def _move_aliens(self):\n self._asteps += 1 # Keep track of number of steps taken to see when to fire\n alien_xs = []\n # Check if aliens should move down and change direction\n for row in self._aliens:\n for alien in row:\n if not alien is None:\n alien_xs.append(alien.x)\n if len(alien_xs) == 0:\n self._check_win()\n else:\n furthest_left = max(alien_xs)\n furthest_right = min(alien_xs)\n # Move down and march left\n if furthest_left > GAME_WIDTH - ALIEN_H_SEP and self._adirect == \"right\":\n self._move_aliens_down()\n self._adirect = \"left\"\n # Move down and march right\n if furthest_right < ALIEN_H_SEP and self._adirect == \"left\":\n self._move_aliens_down()\n self._adirect = \"right\"\n # Call helpers to move left or right\n if self._adirect == \"left\":\n self._move_aliens_left()\n if self._adirect == \"right\":\n self._move_aliens_right()", "title": "" }, { "docid": "c0ec5f39603efd9f02b8eff529390c73", "score": "0.60435295", "text": "def update(self, delta_time):\n\n # Only move and do things if the game is running.\n if self.current_state == GAME_RUNNING:\n\n self.player_sprite.change_x = 0\n self.player_sprite.change_y = 0\n\n if self.up_pressed and not self.down_pressed:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif self.down_pressed and not self.up_pressed:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n if self.left_pressed and not self.right_pressed:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif self.right_pressed and not self.left_pressed:\n self.player_sprite.change_x = MOVEMENT_SPEED\n\n # Call update to move the player\n self.player_sprite_list.update()\n # Call update on the treats and catchers\n self.treat_sprite_list.update()\n self.catcher_sprite_list.update()\n\n # Generate a list of all treats that collided with the player.\n hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.treat_sprite_list)\n\n # Loop through each colliding sprite, remove it, and add to the score.\n for treat in hit_list:\n treat.kill()\n\n self.score += 1\n\n # Check for collision between player and dogcatcher\n catcher_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.catcher_sprite_list)\n\n # If player hits a catcher, add score to high scores and change states\n # to game over\n if catcher_hit_list:\n self.add_score('manger_hi_scores.py')\n self.current_state = GAME_OVER\n\n # See if we should go to level 2\n if len(self.treat_sprite_list) == 0 and self.level == 1 and \\\n self.score == TREAT_COUNT_L1:\n self.catcher_sprite_list = arcade.SpriteList()\n self.level += 1\n self.current_state = BEFORE_LEVEL_2\n\n # See if we should go to level 3\n elif len(self.treat_sprite_list) == 0 and self.level == 2 and \\\n self.score == TREAT_COUNT_L1 + TREAT_COUNT_L2:\n self.catcher_sprite_list = arcade.SpriteList()\n self.level += 1\n self.current_state = BEFORE_LEVEL_3\n\n # See if player has won the game. If so, add score to high scores list\n # and enter won game state\n elif len(self.treat_sprite_list) == 0 and self.level == 3 and \\\n self.score == TREAT_COUNT_L1 + TREAT_COUNT_L2 + TREAT_COUNT_L3:\n self.catcher_sprite_list = arcade.SpriteList()\n self.add_score('manger_hi_scores.py')\n self.current_state = GAME_WON", "title": "" }, { "docid": "adfeb8b5bfa93a8a68e93618a8ce9a7f", "score": "0.5920735", "text": "def advance_time(self):\n self.move_ants()\n if self.food_reserves > 0:\n self.birth_ant()\n self.food_reserves -= 1", "title": "" }, { "docid": "6101cacb46a9d51af93ba0ad0124344c", "score": "0.58982784", "text": "def move(self):\n #For the second to last frame of the damaged animation, the velocity\n #should be set to zero\n if self.damage_time == 1:\n self.vel_x = 0\n self.vel_y = 0\n if self.damage_time > 0:\n self.damage_time -= 1\n if self.shielding:\n self.vel_x = 0\n\n\n self.pos_x += self.vel_x\n self.pos_y += self.vel_y\n self.rect = pygame.Rect(self.pos_x, self.pos_y, self.width, self.height)\n if(self.pos_y > 1000):\n self.pos_y = 0\n self.pos_x = 700\n self.lives -= 1\n self.weight = 1", "title": "" }, { "docid": "73a1c00e48b98b587d62acace6f82d61", "score": "0.5891082", "text": "def update(self, elapsed):\n pressed = PlayerCannon.KEYS_PRESSED\n space_pressed = pressed[key.SPACE] == 1\n if PlayerShoot.INSTANCE is None and space_pressed:\n self.parent.add(PlayerShoot(self.x, self.y + 50))\n\n movement = pressed[key.RIGHT] - pressed[key.LEFT]\n if movement != 0:\n self.move(self.speed * movement * elapsed)", "title": "" }, { "docid": "f68e19e58c6edb1032e201548370b028", "score": "0.57227916", "text": "def move_step(self, dt):\n ride_animations = ['src/ride_1.png', 'src/ride_2.png']\n rocket_x = self.rocket.pos[0]\n rocket_y = self.rocket.pos[1]\n\n platform_x = self.platform.pos[0]\n platform_y = self.platform.pos[1]\n\n currentx = self.player.pos[0]\n currenty = self.player.pos[1]\n\n bg_1_x = self.background.pos[0]\n bg_2_x = self.background2.pos[0]\n\n sun_x = self.sun.pos[0]\n\n step_size = 300 * dt\n\n\n if \"a\" in self.keysPressed:\n currentx -= step_size * 3\n if \"d\" in self.keysPressed:\n currentx += step_size * 3\n\n # Gravity (While not pressing W)\n if 'w' not in self.keysPressed:\n if collide(self.player, self.ground) is False and collide(self.player, self.platform) is False:\n currenty -= 3 # Gravity power\n self.player.pos = (currentx, currenty)\n self.player.source = random.choice(ride_animations)\n\n # ==============================================\n # Ride on Ground\n if collide(self.player, self.ground):\n self.player.pos = (currentx, currenty + self.ground.pos[1])\n if 'd' not in self.keysPressed:\n self.player.source = random.choice(ride_animations)\n else:\n self.player.source = 'player_jump.png'\n\n # Ride on platform\n elif collide(self.player, self.platform):\n self.player.pos = (currentx, self.platform.pos[1] + 20)\n if 'd' not in self.keysPressed:\n self.player.source = random.choice(ride_animations)\n else:\n self.player.source = 'player_jump.png'\n\n if collide(self.player, self.ground) and \"w\" in self.keysPressed or collide(self.player, self.platform) and \"w\" in self.keysPressed:\n if currenty < 200:\n currenty += step_size * 30\n self.player.source = 'player_jump.png'\n self.player.pos = (currentx, currenty)\n\n # ==============================================\n # If you hit a rocket.\n if collide(self.player, self.rocket):\n print('BOOM!')\n\n # point HIT\n if collide(self.player, self.enemy):\n self.enemy.pos = (random.randint(0, 500), random.randint(0, 500))\n\n # ==============================================\n # rocket\n if self.rocket.pos[0] > -5:\n self.rocket.pos = (rocket_x - random.randint(0, 10), rocket_y)\n else:\n self.rocket.pos = (799, random.randint(0, 100))\n # Platform\n if self.platform.pos[0] > -self.platform.size[0]:\n self.platform.pos = (platform_x - 1, platform_y)\n else:\n self.platform.pos = (799, random.randint(0, 30))\n\n # Background movement\n if bg_1_x > -800:\n self.background.pos = (bg_1_x - 0.5, self.background.pos[1])\n elif bg_1_x == -800:\n bg_1_x = 800\n self.background.pos = (bg_1_x, self.background.pos[1])\n if bg_2_x > -800:\n self.background2.pos = (bg_2_x - 0.5, self.background2.pos[1])\n elif bg_2_x == -800:\n bg_2_x = 800\n self.background2.pos = (bg_2_x, self.background2.pos[1])\n\n sun_x -= 0.2\n self.sun.pos = (sun_x,self.sun.pos[1])", "title": "" }, { "docid": "300a7c811e5464d93c3131eeae4c9b54", "score": "0.569465", "text": "def update(self, delta_time):\n self.check_keys()\n self.check_collisions()\n self.check_off_screen()\n\n for rock in self.rocks:\n rock.advance()\n\n for laser in self.lasers:\n laser.duration -= 1\n if laser.duration > 0:\n laser.advance()\n else:\n laser.alive = False\n\n # Random number generator to periodically create new large asteroid\n if self.play_game:\n if random.randint(0, 200) == 1:\n self.create_target()\n # Only update the ship if the play_game bool is true\n self.ship.advance()\n\n # Create 3 new asteroids if the list of asteroids is ever zero\n if len(self.rocks) == 0:\n for i in range(3):\n self.create_target()", "title": "" }, { "docid": "3caa63da06cfad282aabecaf7a7e1b03", "score": "0.56741685", "text": "def movement(self):\n self.mx = 0\n self.my = 0\n direction = \"idle\"\n key = pygame.key.get_pressed()\n\n # moving left\n if key[pygame.K_a] and not key[pygame.K_d]:\n self.mx = -config['player_speed']\n direction = \"left\"\n\n # moving right\n if key[pygame.K_d] and not key[pygame.K_a]:\n self.mx = config['player_speed']\n direction = \"right\"\n\n # moving up\n if key[pygame.K_w] and not key[pygame.K_s]:\n self.my = -config['player_speed']\n direction = \"up\"\n\n # moving down\n if key[pygame.K_s] and not key[pygame.K_w]:\n self.my = config['player_speed']\n direction = \"down\"\n\n # moving diagonally\n if self.mx != 0 and self.my != 0:\n self.mx *= 0.7071\n self.my *= 0.7071\n\n # shooting left\n if key[pygame.K_LEFT] and not key[pygame.K_RIGHT]:\n if self.shoot_delay == 0:\n self.shoot_delay = config['weapon_shoot_delay']\n Fireball(self.game, self.x - 35, self.y + 15, 'left')\n\n direction = \"left\"\n\n # shooting right\n elif key[pygame.K_RIGHT] and not key[pygame.K_LEFT]:\n if self.shoot_delay == 0:\n self.shoot_delay = config['weapon_shoot_delay']\n Fireball(self.game, self.x + 50, self.y + 15, 'right')\n\n direction = \"right\"\n\n # shooting up\n elif key[pygame.K_UP] and not key[pygame.K_DOWN]:\n if self.shoot_delay == 0:\n self.shoot_delay = config['weapon_shoot_delay']\n Fireball(self.game, self.x + 5, self.y - 35, 'up')\n\n direction = \"up\"\n\n # shooting down\n elif key[pygame.K_DOWN] and not key[pygame.K_UP]:\n if self.shoot_delay == 0:\n self.shoot_delay = config['weapon_shoot_delay']\n Fireball(self.game, self.x + 5, self.y + 35, 'down')\n\n direction = \"down\"\n\n if self.shoot_delay > 0:\n self.shoot_delay -= 1\n\n self.sprite_update(direction)", "title": "" }, { "docid": "0f04c262559e3f991b80e461d1f97d9c", "score": "0.5648438", "text": "def move(self, speed, elapsed_time):\n self.sprite.x += speed * elapsed_time", "title": "" }, { "docid": "03a86d9961b53b86f7a527c8f8e9d4e8", "score": "0.5640062", "text": "def update(self, seconds):\n # ----- kill because... ------\n if self.hitpoints <= 0:\n self.kill()\n if self.max_age is not None and self.age > self.max_age:\n self.kill()\n if self.max_distance is not None and self.distance_traveled > self.max_distance:\n self.kill()\n # ---- movement with/without boss ----\n if self.bossnumber is not None:\n if self.kill_with_boss:\n if self.bossnumber not in VectorSprite.numbers:\n self.kill()\n if self.sticky_with_boss and self.bossnumber in VectorSprite.numbers:\n boss = VectorSprite.numbers[self.bossnumber]\n self.pos = pygame.math.Vector2(boss.pos.x, boss.pos.y)\n self.set_angle(boss.angle)\n self.pos += self.move * seconds\n self.move *= self.friction \n if self.gravity is not None:\n self.move += self.gravity\n self.distance_traveled += self.move.length() * seconds\n self.age += seconds\n self.wallbounce()\n self.rect.center = ( round(self.pos.x, 0), -round(self.pos.y, 0) )", "title": "" }, { "docid": "198f5c9dde933844c0b8a79528197035", "score": "0.56142735", "text": "def update(self, delta_time):\n if not self.pause and self.current_state == GAME_RUNNING:\n self.check_keys()\n self.check_collisions()\n self.check_off_screen()\n self.ship.advance()\n self.player_two.advance()\n self.create_asteroids()\n self.check_death()\n\n if random.randint(1, ENEMY_SHIP_SPAWN_TIMER) == 1:\n enemy = AggressiveEnemyShip()\n enemy.center.new_location(self.ship, self.player_two)\n self.enemies.append(enemy)\n\n if random.randint(1, ENEMY_FREIGHTER_SPAWN_TIMER) == 1:\n enemy = EnemyFreighter()\n enemy.center.new_location(self.ship, self.player_two)\n self.enemies.append(enemy)\n\n for asteroid in self.asteroids:\n asteroid.advance()\n\n for bullet in self.bullets:\n bullet.advance()\n\n # Power ups will disappear after a while\n for power in self.power_ups:\n if power.timer > 0:\n power.timer -= 1\n power.advance()\n else:\n power.alive = False\n\n for enemy in self.enemies:\n if enemy.health > 0:\n if enemy.type == 1:\n self.enemy_attack(enemy)\n enemy.advance()\n else:\n enemy.alive = False\n\n if self.ship.health > SHIP_BASE_HEALTH:\n self.ship.health = SHIP_BASE_HEALTH\n\n if self.player_two.health > SHIP_BASE_HEALTH:\n self.player_two.health = SHIP_BASE_HEALTH\n\n if self.power_up:\n if self.power_up_timer > 1:\n self.shoot(self.ship)\n self.power_up_timer -= 1\n else:\n self.power_up = False\n self.power_up_timer = POWER_UP_TIMER\n\n if self.power_up_2:\n if self.power_up_timer_2 > 1:\n self.shoot(self.player_two)\n self.power_up_timer_2 -= 1\n else:\n self.power_up_2 = False\n self.power_up_timer_2 = POWER_UP_TIMER", "title": "" }, { "docid": "d2dd9f1c27bac9c1b5db6a03de8cd379", "score": "0.5587741", "text": "def update(self, screen, speed):\r\n #move towards the left\r\n self.rect.centerx -= self.dx\r\n if self.rect.right < 0:\r\n self.resetSec += 1\r\n if self.resetSec == self.i:\r\n self.resetSec = 0\r\n self.reset(screen, speed)\r\n\r\n #shoots forward if monster is of type 2\r\n if self.canShoot == 2:\r\n self.shootSec += 1\r\n if self.shootSec == 60:\r\n self.shootSec = 0\r\n self.shoot = True\r\n #shoots downwards if monster is of type 3\r\n elif self.canShoot == 3:\r\n self.shootSec += 1\r\n if self.shootSec == 60:\r\n self.shootSec = 0\r\n self.bomb = True", "title": "" }, { "docid": "3413478fb9e1dd990be8378906065c53", "score": "0.556675", "text": "def update(self, dt):\n # Update direction\n angle = Ship.rot_vel if self.left else 0\n angle = angle - Ship.rot_vel if self.right else angle\n self.dir.rotate_ip(angle * dt)\n\n # Update velocity\n self.vel += self.acc * self.dir * dt\n\n # Limit top speed\n if self.vel.magnitude() > Ship.vel_lim:\n self.vel.scale_to_length(Ship.vel_lim)\n\n # Update position\n self.pos += self.vel * dt\n\n # Wrap screen\n if not self.reenter and self.pos.x + Ship.size < 0:\n self.reenter = True\n self.pos.x = SCREEN_WIDTH + Ship.size\n elif not self.reenter and self.pos.x - Ship.size > SCREEN_WIDTH:\n self.reenter = True\n self.pos.x = 0 - Ship.size\n\n if not self.reenter and self.pos.y + Ship.size < 0:\n self.reenter = True\n self.pos.y = SCREEN_HEIGHT + Ship.size\n elif not self.reenter and self.pos.y - Ship.size > SCREEN_HEIGHT:\n self.reenter = True\n self.pos.y = 0 - Ship.size\n\n self.rect.center = self.pos\n\n if self.reenter:\n self.reenter = \\\n self.pos.x + Ship.size >= 0 and \\\n self.pos.x - Ship.size <= SCREEN_WIDTH and \\\n self.pos.y + Ship.size >= 0 and \\\n self.pos.y - Ship.size <= SCREEN_HEIGHT\n\n # Spawn bullets\n if self.shooting:\n cur_time = time.time()\n since_last = cur_time - self.last_shot_time if self.last_shot_time != -1 else 0\n if self.last_shot_time == -1 or since_last >= 1 / Ship.shots_per_sec:\n nose = self.pos + self.dir * (Ship.size / 2)\n if self.spawnBullet != None:\n self.spawnBullet(nose, self.dir)\n self.last_shot_time = cur_time", "title": "" }, { "docid": "079756b4203af2669fc83431406eac6d", "score": "0.5541177", "text": "def move(self):\n\n if self.falling:\n self.time += 1\n\n d = self.vel * (self.time / 60) + G / 2 * (self.time / 60) ** 2\n if not self.alive:\n d = G / 2 * (self.time / 60) ** 2\n\n # maximum speed:\n if abs(d) >= MAX_D:\n d = (d / abs(d)) * MAX_D\n\n if d < 0:\n d -= 4\n\n if self.falling:\n self.y += d\n\n if self.y < 0:\n self.y = 0\n\n if self.y > 541:\n self.y = 541\n self.angle = self.get_angle()\n return False", "title": "" }, { "docid": "688dd8ec72ff54f1b64964c0cb46e8b1", "score": "0.5534986", "text": "def update(self, delta_time: float) -> None:\n global explode, explode_x, explode_y, fps, position_y_1, position_y_2\n global level, prompt, prompt_time, boss_hp, boss_hp_current\n global up_pressed, down_pressed, left_pressed, right_pressed\n global laser_bomb, laser_effect, laser_fps, laser_counter, laser_counter_update\n global boss_create_fps, boss_sound_on, game_sound_on, game_sound_1, game_sound_2, game_sound_3, game_sound_4\n global boss_sound_1, boss_sound_2, boss_sound_3, game_sound, boss_sound_4, boss_sound_5\n global enemy_track, mode, moves, mm, move_list\n\n if self.current_state != GAME_RUNNING and self.frame_count % 3480 == 0:\n try:\n pass\n #arcade.play_sound(background_sound)\n except Exception as e:\n print(\"Error playing sound.\", e)\n\n if self.current_state == GAME_RUNNING:\n try:\n pass\n #background_sound.pause()\n except Exception as e:\n print(\"Error pausing sound.\", e)\n\n if level == 5:\n self.current_state = WIN\n return\n if self.current_state == GAME_RUNNING:\n '''\n if self.boss and boss_sound_on == 0:\n boss_sound_on = 1\n\n try:\n if level == 0:\n game_sound.pause()\n arcade.play_sound(boss_sound_1)\n if level == 1:\n game_sound_1.pause()\n arcade.play_sound(boss_sound_2)\n if level == 2:\n game_sound_2.pause()\n arcade.play_sound(boss_sound_3)\n if level == 3:\n game_sound_3.pause()\n arcade.play_sound(boss_sound_4)\n if level == 4:\n game_sound_4.pause()\n arcade.play_sound(boss_sound_5)\n except Exception as e:\n print(\"Error pausing sound.\", e)\n \n if not self.boss:\n try:\n if level == 0:\n boss_sound_1.pause()\n if level == 1:\n boss_sound_2.pause()\n if level == 2:\n boss_sound_3.pause()\n if level == 3:\n boss_sound_4.pause()\n if level == 4:\n boss_sound_5.pause()\n\n except Exception as e:\n print(\"Error pausing sound.\", e)\n\n boss_sound_on = 0\n # if (self.frame_count - fps) == 180 and fps != 0:\n # game_sound_on = 0\n\n if game_sound_on == 0:\n try:\n if level == 0:\n arcade.play_sound(game_sound)\n if level == 1:\n arcade.play_sound(game_sound_1)\n if level == 2:\n arcade.play_sound(game_sound_2)\n if level == 3:\n arcade.play_sound(game_sound_3)\n if level == 4:\n arcade.play_sound(game_sound_4)\n\n except Exception as e:\n print(\"Error playing sound.\", e)\n game_sound_on = 1\n '''\n # update remaining laser based on current score\n laser_counter = Score // 2000 + 1\n if laser_counter + laser_counter_update == 1:\n #arcade.play_sound(missile_sound_1)\n self.laser_player += 1\n laser_counter_update -= 1\n\n if self.hp <= 0:\n '''\n game_sound_on = 10\n try:\n game_sound.pause()\n game_sound_1.pause()\n game_sound_2.pause()\n game_sound_3.pause()\n game_sound_4.pause()\n\n boss_sound_1.pause()\n boss_sound_2.pause()\n boss_sound_3.pause()\n boss_sound_4.pause()\n boss_sound_5.pause()\n\n except Exception as e:\n print(\"Error pausing sound.\", e)\n '''\n self.dead()\n\n else:\n # drop hp bonus every 10s\n if self.frame_count % 600 == 599:\n bonus_hp = arcade.Sprite(\"images/hp_bonus.png\", 0.45)\n bonus_hp.center_x = random.randrange(0, SCREEN_WIDTH)\n bonus_hp.center_y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT * 1.25)\n self.bonus.append(bonus_hp)\n # else:\n # if self.frame_count % 3600 == 3599:\n # bonus_hp = arcade.Sprite(\"images/hp_bonus.png\", 0.45)\n # bonus_hp.center_x = random.randrange(0, SCREEN_WIDTH)\n # bonus_hp.center_y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT * 1.25)\n # self.bonus.append(bonus_hp)\n\n if self.frame_count % 120 == 0 and not self.boss and not 1 <= explode <= 4:\n#TODO\n for _ in range(1 + level):\n # randomly generate enemy planes of different levels\n ranNum = random.randint(0, 1000)\n if ranNum < 300:\n enemy = Enemy(\"images/plane_small.png\", 0.85, level+2, 10, 4)\n elif ranNum < 550:\n enemy = Enemy(\"images/enemy_2.png\", 0.7, level+4, 15, 4)\n elif ranNum < 750:\n enemy = Enemy(\"images/enemy_1.png\", 0.6, level+6, 50, 3)\n elif ranNum < 900:\n enemy = Enemy(\"images/boss0.png\", 0.35, level+8, 100, 2)\n else:\n enemy = Enemy(\"images/enemy_3.png\", 0.7, level+16, 200, 2)\n\n enemy.center_x = random.randrange(0, SCREEN_WIDTH)\n enemy.center_y = random.randrange(SCREEN_HEIGHT, SCREEN_HEIGHT * 1.25)\n enemy.angle = 180\n self.enemy_list.append(enemy)\n\n # create a boss and ensure no small enemies appear during the boss battle\n elif self.frame_count - fps == (1799 * (level + 1)) and not self.boss and not 1 <= explode <= 4:\n # boss prompt\n boss_create_fps = self.frame_count\n prompt = True\n prompt_time = self.frame_count\n\n # update boss image based on game level\n enemy = Boss(\"images/boss_\"+str(level)+\".png\",\n 0.8,\n (level+1)*50,\n (level**2+1)*500,\n min(level+1.5, 3))\n\n enemy.center_x = random.randrange(0, SCREEN_WIDTH)\n enemy.center_y = SCREEN_HEIGHT * 2\n enemy.angle = 180\n self.enemy_list.append(enemy)\n self.boss = True\n boss_hp = enemy.ehp\n\n # update player's hp based on different damage levels from boss\n for boss in self.enemy_list:\n if 1 <= laser_effect <= 6:\n # realize the disappearance of self bullet when it hits boss\n for e in self.bullet_self_list:\n if boss.center_x - 20 <= e.center_x <= boss.center_x + 20:\n e.kill()\n # calculate different damage levels of laser from boss\n if level == 0:\n if self.player.center_x - 36 < boss.center_x < self.player.center_x + 36:\n self.hp = max(0, self.hp - 0.8)\n if level == 1:\n if self.player.center_x - 36 < boss.center_x < self.player.center_x + 36:\n self.hp = max(0, self.hp - 0.9)\n if level == 2:\n if self.player.center_x - 36 < boss.center_x - 45 < self.player.center_x + 36 \\\n or self.player.center_x - 36 < boss.center_x + 15 < self.player.center_x + 36:\n self.hp = max(0, self.hp - 1)\n if level == 3:\n if self.player.center_x - 36 < boss.center_x - 45 < self.player.center_x + 36 \\\n or self.player.center_x - 36 < boss.center_x < self.player.center_x + 36 \\\n or self.player.center_x - 36 < boss.center_x + 15 < self.player.center_x + 36:\n self.hp = max(0, self.hp - 1.1)\n if level == 4:\n if self.player.center_x - 36 < boss.center_x - 45 < self.player.center_x + 36 \\\n or self.player.center_x - 36 < boss.center_x < self.player.center_x + 36 \\\n or self.player.center_x - 36 < boss.center_x + 15 < self.player.center_x + 36:\n self.hp = max(0, self.hp - 1.5)\n\n # update the background position (speed is 1)\n position_y_1 -= 1\n position_y_2 -= 1\n\n if position_y_1 == -300:\n position_y_1 = 900\n if position_y_2 == -300:\n position_y_2 = 900\n\n # collision with bullet\n bullet_collide_list = arcade.check_for_collision_with_list(self.player, self.bullet_list)\n for collide_bullet in bullet_collide_list:\n collide_bullet.kill()\n self.hp = max(0, self.hp - 5)\n\n # collision with enemy\n enemy_collide_list = arcade.check_for_collision_with_list(self.player, self.enemy_list)\n for collide_enemy in enemy_collide_list:\n collide_enemy.kill()\n if self.boss:\n self.hp = 0\n self.hp = max(0, self.hp - 30)\n\n# TODO\n # calculate different damage of player's bullet or bomb makes on enemy or boss\n for e in self.enemy_list:\n if type(e).__name__ == \"Boss\":\n boss_hp_current = e.ehp\n bullet_hit_list = arcade.check_for_collision_with_list(e, self.bullet_self_list)\n bullet_hit_list_2 = arcade.check_for_collision_with_list(e, self.bullet_pet_list)\n bullet_hit_list_3 = arcade.check_for_collision_with_list(e, self.assist)\n for bullet_hit in bullet_hit_list:\n bullet_hit.kill()\n if level == 0:\n boss_hit = e.hitted(2)\n elif level < 2:\n boss_hit = e.hitted(1)\n elif level == 2:\n boss_hit = e.hitted(3)\n else:\n boss_hit = e.hitted(5)\n if boss_hit[0] == 1:\n self.boss = False\n explode = 1\n explode_x = boss_hit[1]\n explode_y = boss_hit[2]\n fps = self.frame_count\n for bullet_hit in bullet_hit_list_2:\n bullet_hit.kill()\n boss_hit = e.hitted(4)\n if boss_hit[0] == 1:\n self.boss = False\n explode = 1\n explode_x = boss_hit[1]\n explode_y = boss_hit[2]\n fps = self.frame_count\n for bullet_hit in bullet_hit_list_3:\n boss_hit = e.hitted(0.4)\n if boss_hit[0] == 1:\n self.boss = False\n explode = 1\n explode_x = boss_hit[1]\n explode_y = boss_hit[2]\n fps = self.frame_count\n\n for bomb in self.assist:\n bullet_hit_list = arcade.check_for_collision_with_list(bomb, self.bullet_list)\n\n for b in bullet_hit_list:\n b.kill()\n\n # boss explode animation\n if explode == 1 and self.frame_count - fps == 20:\n explode += 1\n elif explode == 2 and self.frame_count - fps == 40:\n explode += 1\n elif explode == 3 and self.frame_count - fps == 60:\n explode += 1\n elif explode == 4 and self.frame_count - fps == 180:\n explode += 1\n level += 1\n #bomb_sound.pause()\n #game_sound_on = 0\n\n # use loop to make all enemies facing to the player\n for enemy in self.enemy_list:\n\n start_x = enemy.center_x\n start_y = enemy.center_y\n\n dest_x = self.player.center_x\n dest_y = self.player.center_y\n\n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n\n # use if statement to exclude the boss angle\n if type(enemy).__name__ == \"Boss\":\n enemy.angle = 0\n else:\n enemy.angle = math.degrees(angle) - 270\n# TODO A.I???\n # determine the shooting characteristics of enemy / boss planes\n if type(enemy).__name__ == \"Boss\" and self.frame_count % ((120 - 20 * level) // 2) == 0:\n\n def add_bullet(n: int) -> None:\n \"\"\" Use recursion to create boss bullet's trajectory\n\n Args:\n n: number of times it creates bullet\n\n Returns:\n None\n \"\"\"\n\n if n != 4:\n bullet_1 = arcade.Sprite(\"images/boss_bullet.png\", 0.5)\n bullet_2 = arcade.Sprite(\"images/boss_bullet.png\", 0.5)\n\n dx = math.sin(n/2) * BULLET_SPEED * 1.5\n dy = math.cos(n/2+math.pi) * BULLET_SPEED * 1.5\n\n bullet_1.center_x = start_x\n bullet_1.center_y = start_y\n bullet_1.angle = 0\n bullet_1.change_x = dx\n bullet_1.change_y = dy\n self.bullet_list.append(bullet_1)\n\n bullet_2.center_x = start_x\n bullet_2.center_y = start_y\n bullet_2.angle = 0\n bullet_2.change_x = -dx\n bullet_2.change_y = dy\n self.bullet_list.append(bullet_2)\n\n # recursive step\n add_bullet(n+1)\n\n add_bullet(0)\n\n elif self.frame_count % (120 - 20 * level) == 0:\n bullet = arcade.Sprite(\"images/enemy_bullet.png\", 0.5)\n bullet.center_x = start_x\n bullet.center_y = start_y\n bullet.angle = math.degrees(angle)\n bullet.change_x = math.cos(angle) * BULLET_SPEED * 1.5\n bullet.change_y = math.sin(angle) * BULLET_SPEED * 1.5\n self.bullet_list.append(bullet)\n\n\n# TODO youhua\n # determine the shooting frequency of the player airplane\n if self.frame_count % (15 - 2 * level) == 0:\n\n if level == 0:\n self.create_bullet(\"Bomb2\", 0.85, self.player.center_x, self.player.center_y + 10, 0,\n BULLET_SPEED * 3)\n\n if level == 1:\n self.create_bullet(\"Bomb2\", 0.85, self.player.center_x - 15, self.player.center_y, 0,\n BULLET_SPEED * 3)\n self.create_bullet(\"Bomb2\", 0.85, self.player.center_x + 15, self.player.center_y, 0,\n BULLET_SPEED * 3)\n\n if level == 2:\n self.create_bullet(\"Bomb3\", 0.55, self.player.center_x, self.player.center_y + 10, 0,\n BULLET_SPEED * 3)\n if level > 2:\n self.create_bullet(\"Bomb5\", 0.55, self.player.center_x, self.player.center_y, 0,\n BULLET_SPEED * 4)\n\n # determine the shooting frequency of the pet airplane based on the level\n if self.frame_count % (60 - 2 * level) == 0:\n self.create_bullet(\"pet_bullet_3\", 0.25, self.pet.center_x, self.pet.center_y, 0,\n BULLET_SPEED * min(2 + level, 3))\n self.create_bullet(\"pet_bullet_3\", 0.25, self.pet2.center_x, self.pet2.center_y, 0,\n BULLET_SPEED * min(2 + level, 3))\n\n # store the enemy number to a list\n sorted_enemy = MyGame.sort_enemy(self.enemy_list)\n current_enemy = []\n for enemy in sorted_enemy:\n current_enemy.append(enemy.number)\n\n if self.frame_count % 50 == 0:\n bullet_1 = arcade.Sprite(\"images/pet_bullet.png\", 0.5)\n bullet_1.center_x = self.player.center_x - 20\n bullet_1.center_y = self.player.center_y\n bullet_1.angle = 0\n\n bullet_2 = arcade.Sprite(\"images/pet_bullet.png\", 0.5)\n bullet_2.center_x = self.player.center_x + 20\n bullet_2.center_y = self.player.center_y\n bullet_2.angle = 0\n\n if sorted_enemy:\n # select two targets that have biggest hp\n target_1 = sorted_enemy[0]\n target_2 = sorted_enemy[min(len(sorted_enemy)-1, 1)]\n # determine the enemy, ensure no missile on the A.I level\n if level != 0:\n self.bullet_pet_list.append(bullet_1)\n self.bullet_pet_list.append(bullet_2)\n # make a tracking list, with key being the number, the value being the bullet\n if target_1.number not in enemy_track:\n enemy_track[target_1.number] = []\n enemy_track[target_1.number].append(bullet_1)\n\n if target_2.number not in enemy_track:\n enemy_track[target_2.number] = []\n enemy_track[target_2.number].append(bullet_2)\n\n if sorted_enemy:\n for number, bullets in enemy_track.items():\n # determine if the enemy exists in the current fps\n if number in current_enemy:\n # make the bullet track the enemy\n for bullet in bullets:\n # set the position of the bullet\n start_x = bullet.center_x\n start_y = bullet.center_y\n\n dest_x = Enemy.enemies[number].center_x\n dest_y = Enemy.enemies[number].center_y\n\n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n bullet.change_x = math.cos(angle) * BULLET_SPEED * (level // 3 + 3)\n bullet.change_y = math.sin(angle) * BULLET_SPEED * (level // 3 + 3)\n\n bullet.angle = math.degrees(angle) - 90\n\n # use loops to remove the bullet when it flies off-screen\n for bullet in self.bullet_self_list:\n if bullet.bottom > 600 or bullet.top < 0 or bullet.center_x > 800 or bullet.center_x < 0:\n bullet.kill()\n\n for bullet in self.bullet_pet_list:\n if bullet.bottom > 600 or bullet.top < 0 or bullet.center_x > 800 or bullet.center_x < 0:\n bullet.kill()\n\n for bullet in self.assist:\n if bullet.bottom > 600:\n bullet.kill()\n\n for bullet in self.bullet_list:\n if bullet.top < 0:\n bullet.kill()\n\n # use loops to control the dropping of hp_bonus\n for hp_bonus in self.bonus:\n hp_bonus.center_y -= 5\n # update player's hp when it catches hp_bonus\n if arcade.check_for_collision(self.player, hp_bonus):\n self.hp = min(100, self.hp + 30)\n #arcade.play_sound(hp_bonus_sound)\n hp_bonus.kill()\n # remove hp_bonus when it gets out of windows\n elif hp_bonus.top < 0:\n hp_bonus.kill()\n\n # move pet with self plane\n self.pet.center_x = self.player.center_x + 80\n self.pet.center_y = self.player.center_y\n self.pet2.center_x = self.player.center_x - 80\n self.pet2.center_y = self.player.center_y\n\n # trigger the missile\n if laser_bomb and self.laser_player > 0 and len(self.assist) <= 1:\n assist_bomb = arcade.Sprite(\"images/assisent1_1.png\", 1)\n assist_bomb.center_x = self.player.center_x - 25\n assist_bomb.center_y = self.player.center_y\n assist_bomb.angle = 0\n assist_bomb.change_x = 0\n assist_bomb.change_y = 10\n self.assist.append(assist_bomb)\n\n assist_bomb = arcade.Sprite(\"images/assisent1_1.png\", 1)\n assist_bomb.center_x = self.player.center_x + 25\n assist_bomb.center_y = self.player.center_y\n assist_bomb.angle = 0\n assist_bomb.change_x = 0\n assist_bomb.change_y = 10\n self.assist.append(assist_bomb)\n\n self.laser_player -= 1\n\n # use if statement to set the laser shooting period to be 8s\n if self.boss and (self.frame_count - boss_create_fps) % 480 == 0 and (\n self.frame_count - boss_create_fps) != 0:\n laser_effect = 1\n laser_fps = self.frame_count\n\n # use if statement to animate laser\n if laser_effect == 1 and self.frame_count - laser_fps == 20:\n laser_effect += 1\n elif laser_effect == 2 and self.frame_count - laser_fps == 40:\n laser_effect += 1\n elif laser_effect == 3 and self.frame_count - laser_fps == 60:\n laser_effect += 1\n elif laser_effect == 4 and self.frame_count - laser_fps == 80:\n laser_effect += 1\n elif laser_effect == 5 and self.frame_count - laser_fps == 100:\n laser_effect += 1\n elif laser_effect == 6 and self.frame_count - laser_fps == 120:\n laser_effect += 1\n\n # set time for boss prompt to be 3s\n if self.frame_count - prompt_time == 180 and prompt:\n prompt = False\n\n # realize the dropping of boss and enemy planes\n for e in self.enemy_list:\n e.drop()\n\n if level == 5:\n self.current_state = WIN\n self.set_mouse_visible(True)\n\n self.bullet_list.update()\n self.bullet_self_list.update()\n self.bullet_pet_list.update()\n self.assist.update()\n\n# TODO youhua\n if self.current_state == GAME_RUNNING and mode == 0 and self.frame_count % 5 == 0:\n\n if Communication.get_x() != '' and Communication.get_y() != '':\n try:\n self.player.center_x = min(800-int(Communication.get_x()), 764)\n self.player.center_y = min(300-int(Communication.get_y()), 552)\n # if len(move_list) == 2:\n # if -5 <= (move_list[1][0] - move_list[0][0]) <= 5:\n # self.player.center_x = move_list[0][0]\n # else:\n # self.player.center_x = move_list[1][0]\n # if -5 <= (move_list[1][1] - move_list[0][1]) <= 5:\n # self.player.center_y = move_list[0][1]\n # else:\n # self.player.center_y = move_list[1][1]\n\n except:\n self.player.center_x = 400\n self.player.center_y = 300\n # print(self.player.center_x, self.player.center_y)\n else:\n print(\"face not detected\")\n\n\n\n if level >= 2:\n mode = 2\n # AutoPilot Mode\n if mode == 1 and level < 2:\n # Decide the next move within three frame\n if self.frame_count % 3 == 0:\n def make_moves(x: int, y: int, n: int) -> Tuple:\n \"\"\" Make the player's plane move\n\n Args:\n x: horizontal position\n y: vertical position\n n: number of moves\n\n Returns:\n a \"decision\"\n \"\"\"\n # consider 4 moves\n if n == 4:\n return [], 0\n\n # should only consider moves within the screen\n valid_move = []\n for m in directions:\n if 36 <= x + m[0] <= 764 and 48 <= y + m[1] <= 552:\n valid_move.append(m)\n\n # choose best \"child\" move, do a recursion on state tree\n decision = []\n new_moves = []\n for m in valid_move:\n result = make_moves(x + m[0], y + m[1], n + 1)\n new_moves.append(result[0])\n decision.append(result[1])\n new_i = decision.index(min(decision))\n\n # calculate the heuristic of a particular state\n # wants to hit enemy\n e = None\n for target in self.enemy_list:\n e = target\n break\n if e:\n tot_dist = MyGame.get_distance(e.center_x, e.center_y - e.speed*4 - 400, x, y)\n else:\n tot_dist = MyGame.get_distance(SCREEN_WIDTH//2, 200, x, y)\n\n # hp bonus is more important!\n e = None\n for h in self.bonus:\n e = h\n break\n if e:\n tot_dist = MyGame.get_distance(e.center_x, e.center_y - 20, x, y)\n\n return [valid_move[new_i]] + new_moves[new_i], decision[new_i] + tot_dist\n\n moves = make_moves(self.player.center_x, self.player.center_y, 0)[0]\n mm = 0\n\n print(moves)\n moves.pop(3)\n print(\"done\")\n # avoid 抽搐\n # for i in range(1, 4):\n # if moves[i][0]+moves[i-1][0] == 0 and moves[i][1]+moves[i-1][1] == 0:\n # moves[i] = S\n\n\n #print(self.player.center_x, self.player.center_y)\n\n self.player.center_x += moves[mm][0]\n self.player.center_y += moves[mm][1]\n\n mm += 1\n if mm == 3:\n mm = 0\n\n # keyboard control the movement of the player\n if up_pressed:\n self.player.center_y = min(552, self.player.center_y + 5)\n if down_pressed:\n self.player.center_y = max(48, self.player.center_y - 5)\n if left_pressed:\n self.player.center_x = max(36, self.player.center_x - 5)\n if right_pressed:\n self.player.center_x = min(764, self.player.center_x + 5)\n\n # update the frame_count\n self.frame_count += 1\n # if self.frame_count % 60 == 0:\n # print(Communication.get_x(), Communication.get_y())", "title": "" }, { "docid": "357227e322cdef7b1982707dae401ae7", "score": "0.5534425", "text": "def update(self,input,dt):\n if self._ship != None:\n self._ship.moveShip(input)\n self.makeShipBolt(input)\n self.moveShipBolts()\n self.moveAlienBolts()\n self.isEachPlayerBolt()\n if self._time >= self._alienSpeed:\n self.moveAliensH()\n if self._musicSounds != None:\n self._musicNote.play()\n self._musicNotePos = (self._musicNotePos + 1)%4\n if self._alienBoltTime >= self._randBoltRate * self._alienSpeed:\n self.makeAlienBolt()\n if self._bgTime >= BACKGROUND_SPEED:\n self.animateBground()\n else:\n self._time = self._time + dt\n self._bgTime = self._bgTime + dt\n self._alienBoltTime += dt\n self.alienCollision()\n self.dLineCollision()\n self.shipCollision()\n self.noAliens()\n self.makeLives()\n self.mute(input)\n self.unmute(input)\n self.updateScore()\n self.updateMusicNote()", "title": "" }, { "docid": "f6683b275b8b789632d021e04df85b2a", "score": "0.5526928", "text": "def update(self, delta_time):\n self.frame_count += 1\n\n if not self.current_state == GAME_RUNNING:\n return\n\n if len(self.message_queue) > 0:\n return\n\n start_time = timeit.default_timer()\n\n # Move player\n self.physics_engine.update()\n\n # Move creatures\n for level in self.room_list:\n for enemy in level.enemy_list:\n enemy.update()\n\n for enemy in self.current_level.enemy_list:\n if enemy.tag == \"imp\" and random.randrange(100) == 0:\n javelin = arcade.Sprite(\"images/javelin.png\", 1)\n javelin.tag = \"javelin\"\n\n # Position the bullet at the player's current location\n start_x = enemy.center_x\n start_y = enemy.center_y\n javelin.center_x = start_x\n javelin.center_y = start_y\n\n # Get from mouse destination for impact\n dest_x = self.player_sprite.center_x\n dest_y = self.player_sprite.center_y\n\n # Trajectory for javelin throw\n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n javelin.angle = math.degrees(angle)\n\n # velocity\n javelin.change_x = math.cos(angle) * JAVELIN_SPEED\n javelin.change_y = math.sin(angle) * JAVELIN_SPEED\n\n self.current_level.javelin_list.append(javelin)\n\n # Generate a list of all item sprites that player picked up.\n objects_hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.current_level.objects_list)\n for my_object in objects_hit_list:\n self.current_level.objects_list.remove(my_object)\n self.player_sprite.inventory.append(my_object)\n\n # Move javelins\n self.current_level.javelin_list.update()\n for javelin in self.current_level.javelin_list:\n sprites_hit = arcade.check_for_collision_with_list(javelin, self.current_level.wall_list)\n if len(sprites_hit) > 0:\n javelin.kill()\n\n # Did javelin hit player?\n sprites_hit = arcade.check_for_collision_with_list(self.player_sprite, self.current_room.javelin_list)\n if (len(sprites_hit) > 25) or (self.player.hp < 1):\n self.current_state = GAME_OVER\n\n nearest_sprite, distance = get_closest_sprite(self.player_sprite, self.current_level.enemy_list)\n\n if distance < TEAM_SPRITE_SIZE * 2 and nearest_sprite.tag == \"cliff\":\n self.message_queue.append(\"Oh look a cliff... HUH WAIT WHAT??? AHHHH!!!\")\n self.current_state = GAME_OVER\n\n if not self.player_sprite.respawning:\n enemy = arcade.check_for_collision_with_list(self.player_sprite, self.enemy_list)\n if len(enemy) > 0:\n if self.lives > 0:\n self.lives -= 1\n self.player_sprite.respawn()\n enemy[0].kill()\n self.party_life_list.pop().kill()\n else:\n self.current_state = GAME_OVER\n\n # --- Manage Scrolling ---\n self.scroll()\n\n # Save the time it took to do this.\n self.processing_time = timeit.default_timer() - start_time", "title": "" }, { "docid": "c1f1dfd67da2ee4c6dd060ff07d31625", "score": "0.55188614", "text": "def advance(self):\n dist_x = self.center.x - self.outside_ship.center.x\n dist_y = self.center.y - self.outside_ship.center.y\n actual_dist = math.sqrt((dist_x ** 2) + (dist_y ** 2))\n\n self.set_speed(actual_dist)\n if actual_dist > SPAWN_DISTANCE:\n self.velocity.dx = math.cos(math.radians(self.angle)) * self.enemy_speed\n self.velocity.dy = math.sin(math.radians(self.angle)) * self.enemy_speed\n elif abs(dist_x) < SPAWN_DISTANCE:\n self.velocity.dx = 0\n elif abs(dist_y) < SPAWN_DISTANCE:\n self.velocity.dy = 0\n super().advance()", "title": "" }, { "docid": "a2aa96020ec4f520fe4968c3b55ffc53", "score": "0.55162054", "text": "def animate(self):\n now = pygame.time.get_ticks()\n\n if self.state == 'WALKING':\n if now - self.anim_update > 200:\n self.anim_update = now\n self.current_frame = (self.current_frame + 1) % len(\n self.walk_frames[LEFT])\n \n self.image = self.walk_frames[(self.lastdir.x, \n self.lastdir.y)][self.current_frame]\n \n elif self.state == 'IDLE':\n cfg.PLAYER_FRICTION = 0.5\n self.image = self.idle_frames[(self.lastdir.x, self.lastdir.y)][0]\n\n \n elif self.state == 'HITSTUN':\n self.image = self.idle_frames[(self.lastdir.x, self.lastdir.y)][0]\n # flicker to indicate damage\n try:\n alpha = next(self.damage_alpha)\n # self.image = self.lastimage.copy()\n # self.image.fill((255, 255, 255, alpha), \n # special_flags=pygame.BLEND_RGBA_MULT)\n except:\n self.state = 'IDLE'", "title": "" }, { "docid": "1207de719dd8cc4977fc1bfaee3ab69f", "score": "0.5495518", "text": "def increaseSpeed(self):\n\t\t#self.shipSpeed *= self.speedUp\n\t\t#self.bulletSpeed *= self.speedUp\n\t\tif self.alienSpeed <= 1.5:\n\t\t\tself.alienSpeed *= self.speedUp\n\t\t\tself.fleetDropSpeed *= self.speedUp\n\t\tself.alienPoints = int(self.alienPoints * self.scoreSpeedUp)", "title": "" }, { "docid": "1207de719dd8cc4977fc1bfaee3ab69f", "score": "0.5495518", "text": "def increaseSpeed(self):\n\t\t#self.shipSpeed *= self.speedUp\n\t\t#self.bulletSpeed *= self.speedUp\n\t\tif self.alienSpeed <= 1.5:\n\t\t\tself.alienSpeed *= self.speedUp\n\t\t\tself.fleetDropSpeed *= self.speedUp\n\t\tself.alienPoints = int(self.alienPoints * self.scoreSpeedUp)", "title": "" }, { "docid": "e6ce78614d611b0799ab7028669f4144", "score": "0.5487583", "text": "def _update(self, dt):\n # walking\n \n if self.flying:\n speed = FLYING_SPEED\n current_speed = FLYING_SPEED\n if self.running:\n speed = current_speed * 2\n elif self.sneaking and not self.flying and not self.running:\n speed = SNEAK_SPEED\n elif self.running and not self.sneaking:\n speed = WALKING_SPEED\n current_speed = WALKING_SPEED\n speed = current_speed * 1.5\n else:\n speed = WALKING_SPEED\n current_speed = WALKING_SPEED\n d = dt * speed # distance covered this tick.s\n dx, dy, dz = self.get_motion_vector()\n # New position in space, before accounting for gravity.\n #dx, dy, dz = dx * d, dy * d, dz * d\n dx, dy, dz = dx * d, dy * d, dz * d\n # gravity\n if not self.flying:\n # Update your vertical speed: if you are falling, speed up until you\n # hit terminal velocity; if you are jumping, slow down until you\n # start falling.\n self.dy -= dt * GRAVITY\n self.dy = max(self.dy, -TERMINAL_VELOCITY)\n dy += self.dy * dt\n # collisions\n x, y, z = self.position\n x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)\n self.position = (x, y, z)", "title": "" }, { "docid": "dbb3cdd72de4e58274e9eafac5fd52c3", "score": "0.5476235", "text": "def moveAliensV(self):\n for row in self._aliens:\n for alien in row:\n if alien != None:\n alien.setY(alien.getY() - ALIEN_V_WALK)\n alien.frame = (alien.frame+1)%2\n self._time = 0", "title": "" }, { "docid": "373189d9397312083717a9fd2ed0d825", "score": "0.547033", "text": "def update(self, delta_time):\n if(self.state == GameStates.START):\n self.moveGround()\n self.startAnimation(self.i)\n self.i+=1\n pass\n if(self.state == GameStates.END):\n self.endAnimation()\n pass\n if(self.state == GameStates.RUNNING):\n \n bird = self.list_birds[0]\n if(bird.dead):\n self.startEndAnimation()\n pass\n self.updateScore()\n \n bird.move(self.list_pipes, 0)\n # manage pipes\n self.moveGround()\n self.updatePipes()\n \n pass", "title": "" }, { "docid": "b0b0cadf64dd83ff2693f7e96a77e240", "score": "0.5456008", "text": "def on_update(delta_time):\n global up_pressed, down_pressed, right_pressed, left_pressed, player_x, player_y\n\n # Left right movement\n if right_pressed:\n player_x += 5\n if left_pressed:\n player_x -= 5\n\n # Calling all functions\n player_start()\n jumped()\n level_switch()\n camera()", "title": "" }, { "docid": "989d6c0628fa37aa6e2ad2751e194c3a", "score": "0.5454818", "text": "def update(self):\n # update actor clock\n now = self.world.get_time()\n self.timediff = now - self.last_update\n self.last_update = now\n \n if self.feel_magic:\n windfield = self.WindField.value(self.pos)\n timefield = self.TimeField.value(self.pos)\n lifefield = self.LifeField.value(self.pos)\n \n # update movement\n if self.const_speed or self.const_accel:\n # normal movement\n self.speed += self.timediff * self.accel\n self.yspeed += self.timediff * self.yaccel\n # magical movement\n if self.feel_magic:\n magic_speed = windfield * 10.0\n magic_mult = timefield\n if magic_mult > 0:\n magic_mult *= 5.0\n else:\n magic_mult *= 1.0\n magic_mult += 1.0\n else:\n magic_speed = 0.0\n magic_mult = 1.0\n # update position\n self.pos += magic_mult * self.timediff * (self.speed + magic_speed)\n self.ypos += magic_mult * self.timediff * self.yspeed\n if self.animate:\n self.movement_sound()\n\n # update hp\n if self.initial_hp and self.feel_magic:\n time_damage = abs(timefield) * 10.0\n wind_damage = abs(windfield) * 10.0\n life_damage = max(lifefield, 0) * 25.0\n self.hp -= self.timediff * (time_damage + wind_damage + life_damage)\n if self.hp < self.initial_hp:\n magic_regen = max(-lifefield, 0) * 12.5\n self.hp += self.timediff * (self.regeneration + magic_regen)\n if self.hp > self.initial_hp:\n self.hp = self.initial_hp\n # death\n if self.hp <= 0 and self.initial_hp:\n self.dead = True\n self.death_sound()\n self.destroy()\n\n # set magic energy\n if self.initial_energy and self.feel_magic:\n magic_mult = lifefield / 2.0 + 1.0\n self.magic_energy = magic_mult * self.initial_energy\n \n # controlled actors most likely want to do something\n if self.controller:\n if self.last_control + self.controller.control_interval < self.world.get_time():\n self.controller.update()\n self.last_control = self.world.get_time()", "title": "" }, { "docid": "12f0bbc4549482eb1c0a8ddde8a96b71", "score": "0.54433805", "text": "def update(self, elapsed_time=0):\n # Check player holding left ONLY\n if self.key_handler[Player.left_key] and \\\n not self.key_handler[Player.right_key]:\n self.move(-Player.speed, elapsed_time)\n # Alternatively holding right ONLY\n elif self.key_handler[Player.right_key] and \\\n not self.key_handler[Player.left_key]:\n self.move(Player.speed, elapsed_time)", "title": "" }, { "docid": "639ac7423c137e9df907ad79c96c4fff", "score": "0.54376453", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y+=ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "2fb9c418fff82253a2278684860e1d04", "score": "0.5420517", "text": "def on_update(self, delta_time):\n\n # Animate all the snowflakes falling\n for snowflake in self.snowflake_list:\n snowflake.y -= snowflake.speed * delta_time\n\n # Check if snowflake has fallen below screen\n if snowflake.y < 0:\n snowflake.reset_pos()\n\n # Some math to make the snowflakes move side to side\n snowflake.x += snowflake.speed * math.cos(snowflake.angle) * delta_time\n snowflake.angle += 1 * delta_time", "title": "" }, { "docid": "6c1c59619203794ac0536608d643e122", "score": "0.5416831", "text": "def move(self, times: int):\n self._move_count = 0\n self._wanted_moves = times\n # Start moving\n self.start()\n while self.is_moving:\n # Wait for the count\n time.sleep(0.01)\n # Stop moving\n self.stop()\n self._move_count = 0", "title": "" }, { "docid": "5487103d651adf40a644c96b268d1efd", "score": "0.5408817", "text": "def update(self, delta_time):\n self.check_keys()\n self.check_collision()\n\n # TODO: Tell everything to advance or move forward one step in time\n for asteroids in self.asteroids:\n if asteroids.alive == True:\n asteroids.advance()\n self.wrap(asteroids)\n\n for bullets in self.bullets:\n if bullets.alive == True:\n bullets.advance()\n self.wrap(bullets)\n bullets.age += 1\n\n if self.ship.alive == True:\n self.ship.advance()\n self.wrap(self.ship)\n\n # TODO: Check for collisions", "title": "" }, { "docid": "f1e7923b1dcd60632154a8a9aa5890ef", "score": "0.54088014", "text": "def animate(self):\n\t\t#Move the player object to the new position:\n\t\tif self.x + self.dx >= 0 and self.x + self.dx <= MAIN_WINDOW_SIZE - GAME_ELEMENT_PLAYER_SIZE:\n\t\t\tself.x = self.x + self.dx\n\t\tif self.y + self.dy >= 0 and self.y + self.dy <= MAIN_WINDOW_SIZE - GAME_ELEMENT_PLAYER_SIZE:\n\t\t\tself.y = self.y + self.dy\n\t\t\n\t\t#Pick up the right sprite to be showned:\n\t\tif self.dx > 0:\n\t\t\tself.SpriteKey = \"right\"\n\t\telif self.dx < 0:\n\t\t\tself.SpriteKey = \"left\"\n\t\telif self.dx == 0:\n\t\t\tself.SpriteKey = \"idle\"\n\n\t\t#Move the fireshots if any :\n\t\tfor shot_id, shot in self.ListofFireShot.items():\n\t\t\tshot.y = shot.y - 4", "title": "" }, { "docid": "947442b46118eacc8f8ca6db97ecec35", "score": "0.54067504", "text": "def change_fleet_directions(ai_settings, aliens):\n for alien in aliens:\n alien.rect.y += ai_settings.drop_speed_factor\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "656d9f3d180615ccdec9c87ef22b5043", "score": "0.5403471", "text": "def on_update(self, delta_time):\n self.player_sprite.change_x = 0\n self.player_sprite.change_y = 0\n\n if self.up_pressed and not self.down_pressed:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif self.down_pressed and not self.up_pressed:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n if self.left_pressed and not self.right_pressed:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif self.right_pressed and not self.left_pressed:\n self.player_sprite.change_x = MOVEMENT_SPEED\n\n self.player_list.update()\n\n for enemy in self.enemy_list:\n enemy.follow_sprite(self.player_sprite)\n \n for enemy2 in self.enemy_list:\n enemy2.follow_sprite(self.player_sprite)\n \n for enemy3 in self.enemy_list:\n enemy3.follow_sprite(self.player_sprite)\n\n # update all sprites\n self.bullet_list.update()\n\n if len(self.enemy_list) == 0 and self.window.level > self.updated_level:\n self.window.level += 1\n self.good = True\n self.levels()\n self.amount_of_enemies += 2\n #self.enemy_health += 1\n self.speed += .20\n arcade.play_sound(self.newLevel_sound)\n\n\n for enemy in self.enemy_list:\n\n player_hit = arcade.check_for_collision_with_list(enemy, self.player_list)\n\n if len(player_hit) > 0:\n enemy.remove_from_sprite_lists()\n\n for player in player_hit:\n # Make sure this is the right sprite\n if not isinstance(player, PLAYER):\n raise TypeError(\"List contents must be all ints\")\n\n # Remove one health point\n player.player_cur_health -= 1\n\n # Check health\n if player.player_cur_health <= 0:\n arcade.play_sound(self.gameOver_sound)\n game_over = GameOverView()\n self.window.show_view(game_over)\n arcade.run()\n # enemy dead\n player.remove_from_sprite_lists()\n else:\n # Not dead\n arcade.play_sound(self.playerDeath_sound)\n\n \n\n # Loop through each bullet\n for bullet in self.bullet_list:\n \n # Check this bullet to see if it hit a enemy\n hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list)\n \n # If it did, get rid of the bullet\n if len(hit_list) > 0:\n bullet.remove_from_sprite_lists()\n\n # For every enemy we hit, process\n for enemy in hit_list:\n # Make sure this is the right sprite\n if not isinstance(enemy, ENEMY):\n raise TypeError(\"List contents must be all ints\")\n\n # Remove one health point\n enemy.enemy_cur_health -= 1\n\n # Check health\n if enemy.enemy_cur_health <= 0:\n # enemy dead\n enemy.remove_from_sprite_lists()\n arcade.play_sound(self.death_sound)\n else:\n # Not dead\n arcade.play_sound(self.hit_sound)\n\n \n # If the bullet flies off-screen, remove it.\n if bullet.bottom > self.width or bullet.top < 0 or bullet.right < 0 or bullet.left > self.width:\n bullet.remove_from_sprite_lists()", "title": "" }, { "docid": "16b8fbae52fcfc5865526504530aaf0a", "score": "0.5400978", "text": "def move(self):\n self.times_moved += 1", "title": "" }, { "docid": "743415473e57ad62724ad14b1c7058ff", "score": "0.53957546", "text": "def move(self, dt):\n if(self.direc == 1):\n self.xpos -= self.speed[self.which_speed] * dt * (self.play_field_width/100)\n elif(self.direc == 2):\n self.xpos += self.speed[self.which_speed] * dt * (self.play_field_width/100)\n elif(self.direc == 3):\n self.ypos -= self.speed[self.which_speed] * dt * (self.play_field_height/100)\n elif(self.direc == 4):\n self.ypos += self.speed[self.which_speed] * dt * (self.play_field_height/100)", "title": "" }, { "docid": "14bd3bd9ec81da5f14919081e8b8ef52", "score": "0.53881747", "text": "def _animatealienright(self):\n lastalien = self._findLastAlien()\n firstalien = self._findFirstAlien()\n if lastalien != None:\n if lastalien.x < GAME_WIDTH - ALIEN_H_SEP:\n self._steps += 1\n for row in self._aliens:\n for item in row:\n if item != None:\n item.x += ALIEN_H_WALK\n if lastalien.x >= GAME_WIDTH - ALIEN_H_SEP:\n self._steps += 1\n for row in self._aliens:\n for item in row:\n if item != None:\n item.y -= ALIEN_V_WALK\n self._direction = 'left'", "title": "" }, { "docid": "6d46a260a114c0e4d63387b878bef8f1", "score": "0.53818125", "text": "def update(self,uInput,dt,alienSpeed):\n assert isinstance(uInput,GInput)\n assert type(dt) in [int,float]\n assert type(alienSpeed) in [int,float]\n if self._ship is not None:\n self._updateShip(uInput)\n if not self._aliensDead():\n self._moveAliens(dt,alienSpeed)\n if self._walks == self._alienFireRate:\n self._createAlienBolts()\n self._walks = 0\n if uInput.is_key_down('up'):\n self._createPlayerBolt()\n self._removeBolts()\n self._updateBolts()\n aliensDead = True\n self.setGameOverStatus(None)\n for bolt in self._bolts:\n self.collisionWithBarrier(bolt)\n self.collisionWithAliens(bolt)\n self.collisionWithShip(bolt)\n for row in self._aliens:\n for alien in row:\n if alien is not None and\\\n alien.getY() - ALIEN_HEIGHT/2 < DEFENSE_LINE:\n self.setGameOverStatus('lose')\n if self._aliensDead():\n self.setGameOverStatus('win')", "title": "" }, { "docid": "20370a42072187a7f419714151f5d985", "score": "0.53717643", "text": "def update(self):\r\n self.timer-=1\r\n if self.timer<0:\r\n c=random.choice([0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\r\n self.timer=random.randint(25, 150)\r\n if c==0:\r\n e=enemy()\r\n games.screen.add(e)\r\n elif c==1:\r\n t=turret()\r\n games.screen.add(t)", "title": "" }, { "docid": "87f8eda35d5361bf10a4c4fdd27f8c10", "score": "0.5368013", "text": "def _animateAlienleft(self):\n lastalien = self._findLastAlien()\n firstalien = self._findFirstAlien()\n if firstalien != None:\n if firstalien.x <= ALIEN_H_SEP + ALIEN_WIDTH // 2:\n self._steps += 1\n for row in self._aliens:\n for item in row:\n if item != None:\n item.y -= ALIEN_V_WALK\n self._direction = 'right'\n print(self._direction)\n if firstalien.x > ALIEN_H_SEP + ALIEN_WIDTH // 2:\n self._steps += 1\n for row in self._aliens:\n for item in row:\n if item != None:\n item.x -= ALIEN_H_WALK", "title": "" }, { "docid": "7eb739289d5994aa58985cf9195c0bbd", "score": "0.5361569", "text": "def turn_around():\n turn_left()\n turn_left()", "title": "" }, { "docid": "9acd4d56752c82ba6010ad732650fa5e", "score": "0.5352242", "text": "def on_update(self, delta_time: float):\n\n # Process left/right and jumping\n if self.space_pressed and self.physics_engine.is_on_ground(self.player_sprite):\n # Change texture when loading jump DOES NOT WORK\n self.player_sprite.texture = self.player_sprite.pre_jump_texture_pair[\n self.player_sprite.character_face_direction]\n if self.player_jump_impulse < MAX_JUMP_IMPULSE:\n self.player_jump_impulse += 60\n else:\n self.space_pressed = False\n self.player_sprite.change_x = 0\n elif self.player_jump_impulse > 320 and self.physics_engine.is_on_ground(self.player_sprite):\n impulse = (0, self.player_jump_impulse)\n self.physics_engine.apply_impulse(self.player_sprite, impulse)\n arcade.play_sound(self.jump_sound, volume=0.1)\n if self.right_pressed and not self.left_pressed:\n force = (PLAYER_MOVE_FORCE_IN_AIR, 0)\n self.physics_engine.apply_force(self.player_sprite, force)\n elif self.left_pressed and not self.right_pressed:\n force = (-PLAYER_MOVE_FORCE_IN_AIR, 0)\n self.physics_engine.apply_force(self.player_sprite, force)\n elif self.right_pressed and not self.left_pressed and self.physics_engine.is_on_ground(self.player_sprite):\n if self.jump_timer == 0:\n force = (PLAYER_MOVE_FORCE_ON_GROUND, 0)\n self.physics_engine.apply_force(self.player_sprite, force)\n elif self.left_pressed and not self.right_pressed and self.physics_engine.is_on_ground(self.player_sprite):\n if self.jump_timer == 0:\n force = (-PLAYER_MOVE_FORCE_ON_GROUND, 0)\n self.physics_engine.apply_force(self.player_sprite, force)\n elif self.physics_engine.is_on_ground(self.player_sprite):\n self.player_sprite.change_x = 0\n\n if self.physics_engine.is_on_ground(self.player_sprite):\n self.jump_timer = 0\n else:\n self.player_jump_impulse = 0\n self.jump_timer += 1\n\n # Move the player with the physics engine\n self.physics_engine.step()\n\n # See if we hit any coins, doors or princesses\n coin_hit_list = arcade.check_for_collision_with_list(self.player_sprite,\n self.coin_list)\n princess_hit = arcade.check_for_collision_with_list(self.player_sprite,\n self.princess_list)\n doors_hit = arcade.check_for_collision_with_list(self.player_sprite,\n self.door_list)\n if princess_hit or doors_hit:\n if not self.is_tutorial:\n f = open(\"record.txt\", \"r\")\n new_time = float(f\"{self.time_elapsed:7.1f}\")\n old_record = float(f.read())\n if new_time < old_record:\n f = open(\"record.txt\", \"w\")\n f.write(str(new_time))\n win_view = WinView()\n self.window.show_view(win_view)\n\n # Loop through each coin we hit (if any) and remove it\n for coin in coin_hit_list:\n # Remove the coin\n coin.remove_from_sprite_lists()\n # Play sound\n arcade.play_sound(self.collect_coin_sound, volume=0.3)\n # Add one to the score\n self.score += 1\n\n # --- Manage Scrolling ---\n\n # Track if we need to change the viewport\n\n changed = False\n\n # Scroll up\n top_boundary = self.view_bottom + SCREEN_HEIGHT - TOP_VIEWPORT_MARGIN\n if self.player_sprite.top > top_boundary:\n self.view_bottom += self.player_sprite.top - top_boundary\n changed = True\n\n # Scroll down\n bottom_boundary = self.view_bottom + BOTTOM_VIEWPORT_MARGIN\n if self.player_sprite.bottom < bottom_boundary:\n self.view_bottom -= bottom_boundary - self.player_sprite.bottom\n changed = True\n\n if changed:\n # Only scroll to integers. Otherwise we end up with pixels that\n # don't line up on the screen\n self.view_bottom = int(self.view_bottom)\n self.view_left = int(self.view_left)\n\n # Do the scrolling\n arcade.set_viewport(self.view_left,\n SCREEN_WIDTH + self.view_left,\n self.view_bottom,\n SCREEN_HEIGHT + self.view_bottom)\n # Managing speedrun timer\n self.time_elapsed += delta_time", "title": "" }, { "docid": "5c96a686c73c1263ffc33d9accd05a9d", "score": "0.53476673", "text": "def Move(self, screen, p, ini_pos, new_pos):\n if ini_pos[0] > new_pos[0]:\n diff = (-screen.tileSize, 0)\n animation = self.sprite['walking_left']\n static = self.sprite['static_left']\n self.direction = 1\n elif ini_pos[0] < new_pos[0]:\n diff = (screen.tileSize, 0)\n animation = self.sprite['walking_right']\n static = self.sprite['static_right']\n self.direction = 3\n elif ini_pos[1] > new_pos[1]:\n diff = (0, -screen.tileSize)\n animation = self.sprite['walking_up']\n static = self.sprite['static_up']\n self.direction = 0\n elif ini_pos[1] < new_pos[1]:\n diff = (0, screen.tileSize)\n animation = self.sprite['walking_down']\n static = self.sprite['static_down']\n self.direction = 2\n else :\n return\n self.cara['PM'] -= p\n pix_pos = self.pos['px']\n ini_bar1 = self.lifebar[0].pixel\n ini_bar2 = self.lifebar[1].pixel\n screen.objects[self.index[0]][0] = animation\n screen.objects[self.index[0]][2] = 'character'\n n = screen.frameNumber\n for i in range(n+1):\n temp_pos = int(diff[0]*i/n), int(diff[1]*i/n)\n full_temp_pos = pix_pos[0]+temp_pos[0], pix_pos[1]+temp_pos[1]\n self.lifebar[0].pixel = (ini_bar1[0] + temp_pos[0], ini_bar1[1] + temp_pos[1])\n self.lifebar[1].pixel = (ini_bar2[0] + temp_pos[0], ini_bar2[1] + temp_pos[1])\n self.UpdatePos(screen.tileSize, posPixel = full_temp_pos)\n screen.objects[self.index[0]][1] = self.pos['px']\n screen.objects[self.index[1]][1] = self.lifebar[0].pixel\n screen.objects[self.index[2]][1] = self.lifebar[1].pixel\n screen.MoveCircle(pos = self.pos['px'])\n screen.refresh()\n screen.objects[self.index[0]][0] = static\n screen.objects[self.index[0]][2] = 'sprite'\n screen.UpdateStatus(self)", "title": "" }, { "docid": "529b505f921823ce7e26d1ba21268223", "score": "0.5335729", "text": "def move(self, game):\n\n self.timer -= game.time_passed_seconds\n self.pos += self.dir * c.BULLET_SPEED\n\n self.rect = self.rect.move(self.pos.x, self.pos.y)\n\n if self.timer <= 0:\n self.on_hit()\n \n if self.rect.x > (c.SCREEN_X + c.GAME_SCALE * 2) or self.rect.y > (c.SCREEN_Y + c.GAME_SCALE * 2):\n self.on_hit()\n elif self.rect.x < (0 - c.GAME_SCALE * 2) or self.rect.y < (0 - c.GAME_SCALE * 2):\n self.on_hit()", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "c4aba3dee6f6ed1f27f857438a137c1c", "score": "0.5335226", "text": "def change_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "d3ab4a20939142f8110fd6778f32c0d1", "score": "0.53312665", "text": "def update(self, delta_time):\r\n\r\n # Move the ball forward one element in time\r\n self.ball.advance()\r\n\r\n # Check to see if keys are being held, and then\r\n # take appropriate action\r\n self.check_keys()\r\n\r\n # check for ball at important places\r\n self.check_miss()\r\n self.check_hit_paddle()\r\n self.check_hit_brick() \r\n self.check_bounce()\r\n self.create_brick()\r\n self.cleanup_zombies()", "title": "" }, { "docid": "f2103951ae6b37657e0e0f6c4e5511a8", "score": "0.5328809", "text": "def update_animation(self, delta_time: float = 1/60):\n\n # Figure out if we need to flip face left or right\n if self.change_x < 0 and self.character_face_direction == constants.RIGHT_FACING:\n self.character_face_direction = constants.LEFT_FACING\n elif self.change_x > 0 and self.character_face_direction == constants.LEFT_FACING:\n self.character_face_direction = constants.RIGHT_FACING\n\n # Idle animation\n if self.change_x == 0 and self.change_y == 0:\n self.texture = self.idle_texture_pair[self.character_face_direction]\n return\n else:# Walking animation\n self.cur_texture += 1\n if self.cur_texture > 7 * constants.UPDATES_PER_FRAME:\n self.cur_texture = 0\n frame = self.cur_texture // constants.UPDATES_PER_FRAME\n direction = self.character_face_direction\n self.texture = self.walk_textures[frame][direction]", "title": "" }, { "docid": "2be4dbb0cff584cd718ded6990bdc1f3", "score": "0.5321877", "text": "def update(self, iinput, dt):\n if self._ship != None:\n if iinput.is_key_down('right'):\n if self._ship.getx() < GAME_WIDTH-(SHIP_WIDTH/2):\n self._ship.setx(self._ship.getx() + SHIP_MOVEMENT)\n elif iinput.is_key_down('left'):\n if self._ship.getx() > SHIP_WIDTH/2:\n self._ship.setx(self._ship.getx() - SHIP_MOVEMENT)\n \n if iinput.is_key_down('w') and self._ship != None:\n self.firebolt()\n \n if self._alienstep >= self._alienshotrate:\n self.alienshotsequence()\n\n self.movebolts()\n self.checksshipcollision()\n self.removealiens()\n self._time = self._time + dt\n self.checkifempty()\n self.movealiens()", "title": "" }, { "docid": "1086929f6cbc679e193a83aae550bd0e", "score": "0.5316143", "text": "def change_fleet_direction(ai_settings,aliens):\n for alien in aliens.sprites(): #Calls each alien in the \"sprites list\", aliens\n alien.rect.y += ai_settings.fleet_drop_speed #Moves down the amount defined in fleet drop speed\n ai_settings.fleet_direction *= -1 #After it drops, it makes it go the opposite direction (re-stores the value)", "title": "" }, { "docid": "0129b2d695899d42fcc5335ea1c40e21", "score": "0.53080815", "text": "def update(self,input,dt):\n self._animateShip(input)\n self._animateAliens(dt)\n self._animateBolt(input)\n self._shootfromAliens()\n self._handleShipBolts()\n self._handleAlienBolts()\n self._playerWins()\n self._alienWins()", "title": "" }, { "docid": "357fae07126c41c2093a1e6f93d17006", "score": "0.53040653", "text": "def update(self):\r\n self.x += (self.ai_settings.enemy_speed_factor *\r\n self.ai_settings.fleet_direction)\r\n self.rect.x = self.x", "title": "" }, { "docid": "dac25d1e8979f437ce2cc9cab89d5006", "score": "0.529858", "text": "def enemy_move():\n global gravity1\n global enemy_speed\n for enemy in enemies:\n # enemy.move_speed()\n already_touched = False\n for obstacle in noninteractives:\n if (enemy.left_touches(obstacle) or enemy.right_touches(obstacle)) and already_touched == False:\n already_touched = True\n # if not enemy.bottom_touches(obstacle): #Originally changed enemy direction if it touched obstacle at all. I changed this so that it only switches direction if it touches the side of an obstacle\n enemy.speedx *= -1\n enemy.move_speed()\n\n for enemy in enemies:\n camera.draw(enemy)\n for enemy in enemies:\n for image in walker:\n if image.bottom_touches(enemy):\n if enemy.top_touches(image):\n enemies.remove(enemy)\n break\n for enemy in enemies: # Gravity\n for obstacle in noninteractives:\n enemy.move_to_stop_overlapping(obstacle)\n if enemy.bottom_touches(obstacle):\n gravity1.append(True)\n else:\n gravity1.append(False)\n if True not in gravity1:\n enemy.speedy += 3\n enemy.move_speed()\n gravity1 = []", "title": "" }, { "docid": "3fd24aa52002e9313f02e410f68953b3", "score": "0.5284566", "text": "def change_fleet_direction(ai_settings, aliens):\r\n for alien in aliens.sprites():\r\n alien.rect.y += ai_settings.fleet_drop_speed\r\n ai_settings.fleet_direction *= -1", "title": "" }, { "docid": "3df261445f010ce9cf8f1187da951fbe", "score": "0.52838653", "text": "def move(self):\r\n\r\n\t\tif(self.xcord + self.xvel > self.xbound or self.xcord + self.xvel < 0):\r\n\t\t\tself.xvel = -self.xvel\r\n\r\n\t\t\tif(self.incr >= 3):\r\n\t\t\t\tself.direction = random.random() * math.pi * 2\r\n\t\t\t\tself.move()\r\n\t\t\t\treturn\r\n\r\n\t\tif(self.ycord + self.yvel > self.ybound or self.ycord + self.yvel < 0):\r\n\t\t\tself.yvel = -self.yvel\r\n\r\n\t\t\tif(self.incr >= 3):\r\n\t\t\t\tself.direction = random.random() * math.pi * 2\r\n\t\t\t\tself.move()\r\n\t\t\t\treturn\r\n\r\n\t\tself.xcord += self.xvel\r\n\t\tself.ycord += self.yvel", "title": "" }, { "docid": "65e837a3cccbf0d5ac0c067c818ad6b2", "score": "0.5282985", "text": "def update(self):\n self.y += (self.settings.alien_speed * self.settings.alien_direction)\n self.rect.y = self.y", "title": "" }, { "docid": "a75e126e50c65d21f36dc3c00947d898", "score": "0.52739567", "text": "def update(self):\n self.y += self.speed\n self.rect.y = self.y\n if pygame.time.get_ticks() % 20 == 0:\n self.switch_image()", "title": "" }, { "docid": "228fd8a58cd0e88bfd01f75a82c0707e", "score": "0.52734315", "text": "def update(self):\n self.x += self.settings.alien_speed * self.settings.fleet_direction\n self.rect.x = self.x", "title": "" }, { "docid": "0b03d853325501ac078d3da79147e098", "score": "0.52733", "text": "def move(self, events):\n for event in events:\n if event.type == KEYDOWN:\n if event.key == UP:\n self.upPressed = True\n # self.direction = Direction.UP\n self.speedY = -self.movementSpeed\n elif event.key == DOWN:\n self.downPressed = True\n # self.direction = Direction.DOWN\n self.speedY = self.movementSpeed\n elif event.key == LEFT:\n self.leftPressed = True\n # self.direction = Direction.LEFT\n self.speedX = -self.movementSpeed\n elif event.key == RIGHT:\n self.rightPressed = True\n # self.direction = Direction.RIGHT\n self.speedX = self.movementSpeed\n elif event.key == K_SPACE:\n # Create sprite and spawn in world.\n # Check for collisions with flies\n # Remove flies from game\n self.attack = True\n self.net.spawnWeapon(self.direction, self.pos)\n elif event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(UserEvents.RESUMEGAME))\n else:\n pygame.event.post(event)\n elif event.type == KEYUP:\n if event.key == UP:\n self.upPressed = False\n if not self.downPressed:\n # self.direction = Direction.IDLE_UP\n self.speedY = 0\n else:\n self.speedY = self.movementSpeed\n elif event.key == DOWN:\n self.downPressed = False\n if not self.upPressed:\n # self.direction = Direction.IDLE_DOWN\n self.speedY = 0\n else:\n self.speedY = -self.movementSpeed\n elif event.key == LEFT:\n self.leftPressed = False\n if not self.rightPressed:\n # self.direction = Direction.IDLE_LEFT\n self.speedX = 0\n else:\n self.speedX = self.movementSpeed\n elif event.key == RIGHT:\n self.rightPressed = False\n if not self.leftPressed:\n # self.direction = Direction.IDLE_RIGHT\n self.speedX = 0\n else:\n self.speedX = -self.movementSpeed\n elif event.key == K_SPACE:\n self.attack = False\n self.net.despawn()\n else:\n pygame.event.post(event)\n\n else:\n pygame.event.post(event)\n\n self.getPlayerDirection()", "title": "" }, { "docid": "21b61acff66b560c02d00a6c5f60ed4a", "score": "0.52673703", "text": "def update_aliens(ai_settings,screen,stats,sb,ship,aliens,bullets):\n check_fleet_edges(ai_settings,aliens) #Checks if it hit an edge. If so, drops and changes direction\n aliens.update() #moves each alien in the group right or left\n\n #Looking for alien-ship collisions\n if pygame.sprite.spritecollideany(ship,aliens):\n ship_hit(ai_settings,screen,stats,sb,ship,aliens,bullets)\n\n #Checking if ship hit bottom of screen\n check_aliens_bottom(ai_settings,screen,stats,sb,ship,aliens,bullets)", "title": "" }, { "docid": "6f9905d8bbf07d7489baca20910dea7e", "score": "0.5266056", "text": "def update(self, delta_time):\n self.check_keys()\n\n # TODO: Tell everything to advance or move forward one step in time\n self.ship.advance(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n for rock in self.rocks:\n rock.advance(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n for bullet in self.bullets:\n bullet.advance(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n # TODO: Check for collisions\n self.check_collision()\n self.clear_zombies()", "title": "" }, { "docid": "a57230c1eaa88d750673321c9546d1eb", "score": "0.52604353", "text": "def increase_speed(self):\n self.ship_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.alien_speed_factor *= self.speedup_scale\n self.alien_bullet_chance += 0.2\n self.bonus_speed *= self.speedup_scale\n self.bonus_time_limit -= 2\n \n self.alien_points = int(self.alien_points * self.score_scale)\n \n self.level += 1", "title": "" }, { "docid": "9874385ad43c303fe7c3ff92728c8889", "score": "0.5258329", "text": "def shake(self):\n self.move(5,5)\n self.move_down()\n for i in range(10):\n self.move(5,5)\n self.move(4,5)", "title": "" }, { "docid": "06f8cd310e98fe9b1bd529e34a70e62d", "score": "0.52561027", "text": "def update(self, dt):\n\n self.game.player.move()\n for enemy in self.game.enemies:\n enemy.move()", "title": "" }, { "docid": "60e95ee4a6260de47cb226fc8a317306", "score": "0.52523094", "text": "def update(self, setting, screen, ship, aliens, eBullets):\n self.ship = ship\n self.aliens = aliens\n self.eBullets = eBullets\n self.x += (self.setting.alienSpeed * self.setting.fleetDir)\n self.rect.x = self.x\n self.shoot(setting, screen, self.ship, self.aliens, self.eBullets)", "title": "" }, { "docid": "80d074c1e124d04ea532592722fe1d65", "score": "0.52251554", "text": "def moveAliensH(self):\n alien_positions = []\n for row in self._aliens:\n for alien in row:\n if alien != None:\n alien_positions.append(alien.getX())\n if alien_positions != []:\n rightmost = max(alien_positions)\n leftmost = min(alien_positions)\n\n if (self._direction == 'right' and rightmost >= GAME_WIDTH -\n ALIEN_H_SEP - ALIEN_WIDTH/2):\n self.moveAliensV()\n self._direction = 'left'\n elif self._direction == 'left' and leftmost <= ALIEN_H_SEP + ALIEN_WIDTH/2:\n self.moveAliensV()\n self._direction = 'right'\n else:\n for row in self._aliens:\n for alien in row:\n if alien != None and self._direction == 'right':\n alien.setX(alien.getX() + ALIEN_H_WALK)\n alien.frame = (alien.frame+1)%2\n self._time = 0\n if alien != None and self._direction == 'left':\n alien.setX(alien.getX() - ALIEN_H_WALK)\n alien.frame = (alien.frame+1)%2\n self._time = 0", "title": "" }, { "docid": "52d832ae4236b1b7cf3d1051a9da33b3", "score": "0.5215398", "text": "def HCR_animacion(P):\n global x, y, left, right, vel\n global Side_A, Side_B\n\n clock = pygame.time.Clock()\n run = True\n move = 0\n while run:\n clock.tick(27)\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n left = True\n right = False\n if move < len(P):\n direction, p1, p2 = get_characters(P[move], P[move + 1], P[move + 2])\n Embark_characters(Side_B, p1, p2)\n for step in range(65):\n x -= vel\n redrawGameWindow(direction, p1, p2)\n pygame.time.delay(70)\n move += 3\n Disembark_characters(Side_A, p1, p2)\n\n elif keys[pygame.K_RIGHT]:\n right = True\n left = False\n if move < len(P):\n direction, p1, p2 = get_characters(P[move], P[move + 1], P[move + 2])\n Embark_characters(Side_A, p1, p2)\n for step in range(65):\n x += vel\n redrawGameWindow(direction, p1, p2)\n pygame.time.delay(70)\n move += 3\n Disembark_characters(Side_B, p1, p2)\n else:\n redrawGameWindow ('Standby','Unknown', 'Unknown')\n \n pygame.quit()", "title": "" }, { "docid": "79b64bf889b4c9284c2e13670e5a3266", "score": "0.5212312", "text": "def update(self):\r\n self.time-=1\r\n if self.time<0:\r\n self.shoot()\r\n self.time=50\r\n if self.left>games.screen.width or self.right<0 or self.top>games.screen.width or self.bottom<0:\r\n self.destroy()", "title": "" }, { "docid": "f1ede89f14351606d46375bcb901e7da", "score": "0.52089334", "text": "def _update(self, dt):\n # walking\n \n speed = 5\n d = dt * speed # distance covered this tick.s\n dx, dy, dz = self.get_motion_vector()\n # New position in space, before accounting for gravity.\n dx, dy, dz = dx * d, dy * d, dz * d\n # gravity\n if not self.flying:\n # Update your vertical speed: if you are falling, speed up until you\n # hit terminal velocity; if you are jumping, slow down until you\n # start falling.\n self.dy -= dt * GRAVITY\n self.dy = max(self.dy, -TERMINAL_VELOCITY)\n dy += self.dy * dt\n # collisions\n x, y, z = self.position\n x, y, z = self.collide((x + dx, y + dy, z + dz), PLAYER_HEIGHT)\n self.position = (x, y, z)", "title": "" }, { "docid": "586050dc20792b78ae92416c0a0617ba", "score": "0.5207719", "text": "def on_update(self, delta_time):\n\n # Calculate speed based on the keys pressed\n self.player_sprite.change_x = 0\n self.player_sprite.change_y = 0\n\n if self.up_pressed and not self.down_pressed:\n self.player_sprite.change_y = PLAYER_MOVEMENT_SPEED\n elif self.down_pressed and not self.up_pressed:\n self.player_sprite.change_y = -PLAYER_MOVEMENT_SPEED\n if self.left_pressed and not self.right_pressed:\n self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED\n elif self.right_pressed and not self.left_pressed:\n self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED\n\n # Call update on all sprites (The sprites don't do much in this\n # example though.)\n self.physics_engine.update()\n\n # Scroll the screen to the player\n self.scroll_to_player()", "title": "" }, { "docid": "371cf0aad2a79e3042f2fd415576d3e9", "score": "0.5195686", "text": "def playAction(self, dt:int) -> str:\n if self.enemyTarget:\n \n if self.playerNextTo(self.enemyTarget):\n self.attack(self.enemyTarget)\n \n if not self.currentTarget or self.getActionPoint() > 0:\n print(\"move after atk\")\n if self.friendInMemory():\n self.goTowardFriend()\n else:\n self.escapeFrom(self.enemyTarget)\n self.enemyTarget = None\n\n if self.currentTarget:\n \n self.state = 'run'\n\n if not self.positions:\n self.positions = self.positionsIter()\n\n try:\n self.rect.midbottom = next(self.positions)\n\n except StopIteration:\n self.positions = None\n self.nextTarget()\n else:\n self.state = 'idle'\n\n return self.state", "title": "" }, { "docid": "5f615200682dc63f5b9a2e4ea0191419", "score": "0.51926196", "text": "def move(self, check, delta_x, delta_y, arg):\n # Check for moving multiple times.\n if arg.isdigit():\n times = int(arg)\n else:\n times = 1\n for movement in range(times):\n # Check for a valid move\n if check not in self.current:\n self.ow()\n break\n # Update the player's postion.\n self.x += delta_x\n self.y += delta_y\n self.current = self.map[self.y][self.x]\n print('moving...')\n # Check for solving the maze.\n if (self.x, self.y) == self.end:\n print('You made it out of the maze!')\n return True\n # Show the next location if not solved.\n self.show_directions()", "title": "" }, { "docid": "d4742ee94f52b1f6d7d2533bf9d5d396", "score": "0.5182295", "text": "def moveleft(self):\n firstalien = self.findfirstcolalien()\n if firstalien != None:\n if firstalien.getx() > ALIEN_H_SEP + (ALIEN_WIDTH/2):\n for rows in self._aliens:\n for alien in rows:\n if alien != None:\n alien.setx(alien.getx() - ALIEN_H_WALK)\n else:\n self._ismovingright = True\n for rows in self._aliens:\n for alien in rows:\n if alien != None:\n alien.sety(alien.gety() - ALIEN_V_WALK)\n if alien.gety() <= DEFENSE_LINE + (ALIEN_HEIGHT/2):\n self._lost = True\n self._time = 0", "title": "" }, { "docid": "82e1f3901fb9f9690ef75d552f3a1de5", "score": "0.5181334", "text": "def gametime_incrementer(game_state, MAX_IDLE_TIME=60 * 60 * 2):\n while True:\n time.sleep(1)\n\n # this is incremented since we only get gametime about every 5-20 seconds\n game_state.time += 1\n\n # this should only run the first time global game state is set\n # otherwise (as it is now) we check this way more than necessary...\n # funky hack to make sure we aren't incrementing from zero still\n # it's acceptable to use a known global game time since it increments forever but still sloppy\n if game_state.time > 100000 and game_state.time_last_command == 0:\n game_state.time_last_command = game_state.time\n\n # quit if idle too long\n time_since_last_command = game_state.time - game_state.time_last_command\n if time_since_last_command >= MAX_IDLE_TIME:\n game_state.command_queue.put(b\"quit\")", "title": "" }, { "docid": "5f91adf3e7ee104bc6d40cea18aa065f", "score": "0.51720417", "text": "def update(self):\r\n self.guntime-=1\r\n self.bombtime-=1\r\n if games.keyboard.is_pressed(games.K_SPACE) and self.guntime<=0:\r\n self.shoot()\r\n self.guntime=25\r\n if games.keyboard.is_pressed(games.K_LEFT):\r\n self.x-=2\r\n if games.keyboard.is_pressed(games.K_RIGHT):\r\n self.x+=2\r\n if games.keyboard.is_pressed(games.K_UP):\r\n self.y-=2\r\n if games.keyboard.is_pressed(games.K_DOWN):\r\n self.y+=2\r\n if games.keyboard.is_pressed(games.K_RALT) and self.bombtime<=0:\r\n self.bombs()\r\n self.bombtime=50\r\n if games.keyboard.is_pressed(games.K_LALT) and self.bombtime<=0:\r\n self.bombs()\r\n self.bombtime=50\r\n if self.bottom>games.screen.height:\r\n self.bottom=games.screen.height\r\n if self.top<0:\r\n self.top=0\r\n if self.left<0:\r\n self.left=0\r\n if self.right>games.screen.width:\r\n self.right=games.screen.width\r\n self.sights.x=self.x\r\n self.sights.y=self.y-230", "title": "" }, { "docid": "f76bc9d4b6d8d2c7382be59ae840f4e3", "score": "0.5165607", "text": "def change_fleet_direction(game_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += game_settings.fleet_drop_speed\n game_settings.fleet_direction *= -1", "title": "" }, { "docid": "974fd336e4c5a49e83053cd2deb1bfd4", "score": "0.5165333", "text": "def _move_Aliens_Down_Main(self):\n \n if self._rightmost_Alien_pos() >= GAME_WIDTH:\n \n self._move_Aliens_Left()\n self._move_Aliens_Down_Helper()\n self._direction = 'left'\n \n if self._leftmost_Alien_pos() <= 0:\n \n self._move_Aliens_Right()\n self._move_Aliens_Down_Helper()\n self._direction = 'right'\n \n else:\n pass", "title": "" }, { "docid": "0fb37fd81aff40be83f75b21c0504ce3", "score": "0.51624024", "text": "def update(self, grid):\n\t#if it is the first or last image in the list then it reverses the animation order\n\tif self.__image_num == 0 and not self.__dying:\n\t self.__scroll = 1\n\telif self.__image_num == 2:\n\t self.__scroll = -1\n\t \n\t#if the animation is not paused and the player is not dying it goes to the\n\t#next frame\n\tif self.__animation_pause == 0 and not self.__dying:\n\t if self.__moving == True:\n\t\tself.__image_num += self.__scroll\n\t\tself.image = self.__current_list[self.__image_num]\n\t\tself.__animation_pause = 5\n\telif not self.__dying:\n\t self.__animation_pause -= 1\n\t#if the player is invincible it takes one frame off of the invinciblility\n\tif self.__invincible > 0:\n\t self.__invincible -= 1\n\t \n\t#movement control\n self.rect.x += self.__dx\n self.rect.y += self.__dy\n\tif (str(self.rect.left - 6) + str(self.rect.top)) in grid and self.__current_dir == (0,0) and not self.__dying:\n\t self.__dx = 0\n\t self.__dy = 0\n\t self.__moving = False\n\t self.__image_num = 1\n\t self.image = self.__current_list[self.__image_num]\n\telif (str(self.rect.left - 6) + str(self.rect.top)) in grid and\\\n\t self.__dx > 0:\n\t if grid[(str(self.rect.left - 6 + 32) + str(self.rect.top))] != 'empty' or self.__current_dir != (1,0):\n\t\tself.__dx = 0\n\t\tself.__dy = 0\n\t\tself.__moving = False\n\t\tself.__image_num = 1\n\t\tself.image = self.__current_list[self.__image_num]\n\t\tself.__current_dir = (0,0)\n\telif (str(self.rect.left - 6) + str(self.rect.top)) in grid and\\\n\t self.__dx < 0:\n\t if grid[(str(self.rect.left - 6 - 32) + str(self.rect.top))] != 'empty' or self.__current_dir != (-1,0):\n\t\tself.__dx = 0\n\t\tself.__dy = 0\n\t\tself.__moving = False\n\t\tself.__image_num = 1\n\t\tself.image = self.__current_list[self.__image_num]\n\t\tself.__current_dir = (0,0)\n\telif (str(self.rect.left - 6) + str(self.rect.top)) in grid and\\\n\t self.__dy < 0:\n\t if grid[(str(self.rect.left - 6) + str(self.rect.top - 32))] != 'empty' or self.__current_dir != (0,1):\n\t\tself.__dx = 0\n\t\tself.__dy = 0\n\t\tself.__moving = False\n\t\tself.__image_num = 1\n\t\tself.image = self.__current_list[self.__image_num]\n\t\tself.__current_dir = (0,0)\n\telif (str(self.rect.left - 6) + str(self.rect.top)) in grid and\\\n\t self.__dy > 0:\n\t if grid[(str(self.rect.left - 6) + str(self.rect.top + 32))] != 'empty' or self.__current_dir != (0,-1):\n\t\tself.__dx = 0\n\t\tself.__dy = 0\n\t\tself.__moving = False\n\t\tself.__image_num = 1\n\t\tself.image = self.__current_list[self.__image_num]\n\t\tself.__current_dir = (0,0)\n\n\t#makes the bombs available\n\tfor num in range(4):\n\t if self.__bomb_available[num] > 0:\n\t\tself.__bomb_available[num] -= 1\n\t\t\n\t#scrolls through the death animations\n\tif self.__dying:\n\t self.__dx = 0\n\t self.__dy = 0\n\t self.__dying -= 1\n\t if self.__dying == 0:\n\t\tself.kill()\n\t elif self.__dying % 10 == 9:\n\t\tself.__image_num += 1\n\t\tself.image = self.__current_list[self.__image_num]", "title": "" }, { "docid": "3facc0220d67799b2007bf4b37c015f1", "score": "0.5157613", "text": "def update(self):\n self.move()\n self.eat_food()", "title": "" }, { "docid": "5fdb83564e68a6681183892bebbd0404", "score": "0.51567763", "text": "def change_fleet_direction(ai_settings,aliens):\r\n\tfor alien in aliens.sprites():\r\n\t\talien.rect.y += ai_settings.fleet_drop_speed\r\n\tai_settings.fleet_direction *=-1", "title": "" }, { "docid": "fcb56c83ca69c18db0740295d64d701b", "score": "0.5154021", "text": "def setAlienSpeed(self, speed):\n # To speed up aliens, factor should be < 1\n # To slow down aliens, factor should be > 1\n self._alienspeed = speed", "title": "" }, { "docid": "bbbfc0424e0ef91985d15f4491e8c840", "score": "0.514322", "text": "def __init__(self, ai_settings, screen): \n ship = Ship(screen, ai_settings) \n super(Alien, self).__init__() \n self.screen = screen \n self.ai_settings = ai_settings\n # Load the alien image and set its rect attribute.\n self.image = pygame.image.load('enemy1.1.png')\n self.image = pygame.transform.scale(self.image, (75, 75))\n self.rect = self.image.get_rect()\n # Start each new alien near the top left of the screen.\n self.rect.x = 625\n self.rect.y = 300\n self.hp = 4\n self.move_m = [self.upload_img(\"enemy1.2.png\"), self.upload_img(\"enemy1.2.png\"), self.upload_img(\"enemy1.3.png\")]\n self.i = 0\n self.fps = 7\n # Store the alien's exact position.\n self.x = float(self.rect.x)", "title": "" }, { "docid": "f2d09cbfc0bc7b74988657c34fd6f3a3", "score": "0.51374024", "text": "def change_fleet_direction(ai_settings, aliens):\n\tfor alien in aliens.sprites():\n\t\talien.rect.y += ai_settings.fleet_drop_speed\n\tai_settings.fleet_direction *= -1", "title": "" }, { "docid": "528072a4d8f4ac0e13514ad3a1637710", "score": "0.5133847", "text": "def playerTwoUpdate(self):\n if 106 < self.frameCount < 147 or 170 < self.frameCount < 211 or 232 < self.frameCount:\n self.swing()\n if 5 < self.swingFrameCount < 15 or 23 < self.swingFrameCount < 35 or 44 < self.swingFrameCount < 54 or\\\n 62 < self.swingFrameCount < 73:\n if self.frameCount % 8 < 4:\n self.image = self.animationFrames[2]\n else:\n self.image = self.animationFrames[3]\n if 23 < self.swingFrameCount < 35 and not self.clockwise:\n self.image = pg.transform.flip(self.image, False, True)\n elif 14 < self.swingFrameCount < 24 or 53 < self.swingFrameCount < 63:\n if self.frameCount % 8 < 4:\n self.image = self.animationFrames[4]\n else:\n self.image = self.animationFrames[5]\n\n if self.swingFrameCount == 6:\n if self.clockwise:\n DemoPlayerSprite.facingDirection = c.Directions.DOWN\n else:\n DemoPlayerSprite.facingDirection = c.Directions.UP\n elif self.swingFrameCount == 24 or self.swingFrameCount == 45:\n DemoPlayerSprite.facingDirection = c.Directions.LEFT\n elif self.swingFrameCount == 63:\n DemoPlayerSprite.facingDirection = c.Directions.RIGHT\n\n if self.frameCount == 112 or self.frameCount == 176:\n self.coordinates = (self.coordinates[0], self.coordinates[1] - 4)\n elif self.frameCount == 129 or self.frameCount == 193:\n self.coordinates = (self.coordinates[0], self.coordinates[1] + 4)\n elif self.frameCount == 147:\n DemoPlayerSprite.swingValue = (0, 0)\n self.swingFrameCount = 39\n DemoPlayerSprite.paused = False\n self.clockwise = False\n elif self.frameCount == 211:\n DemoPlayerSprite.swingValue = (0, 0)\n self.swingFrameCount = 0\n DemoPlayerSprite.paused = False", "title": "" }, { "docid": "4ff587906d211b51ded3ee0c2934d6d6", "score": "0.5132217", "text": "def n_move(self, agents, dt):\n center = np.zeros(2)\n counter = 0\n for a in agents:\n if a != self:\n d = np.linalg.norm(a.pos-self.pos)\n if (d < 1.0):\n center += a.pos\n counter += 1\n if counter > 0:\n center /= counter\n speed = self.speed*self.direction + self.alpha*(self.pos-center)\n else:\n speed = self.speed*self.direction\n self.pos += speed * dt", "title": "" }, { "docid": "e8ed7956ee8a7aafe6904ba56c976bbc", "score": "0.51263905", "text": "def timeStep(self, t=1, text=True):\n\n for i in range(t):\n move_place = random.choice(list(range(self.spot)))\n\n if self.ecosys[move_place] is None:\n print ('No movement')\n pass\n\n else:\n new_spot = self.ecosys[move_place].movement()\n if new_spot < 0 or new_spot > len(self.ecosys) -1:\n self.ecosys[move_place] = None\n elif isinstance(self.ecosys[move_place], bear):\n if isinstance(self.ecosys[new_spot], bear):\n self.add_random(bear(0))\n elif isinstance(self.ecosys[new_spot], fish):\n self.ecosys[new_spot] = bear(new_spot)\n self.ecosys[move_place] = None\n else:\n self.ecosys[new_spot] = bear(new_spot)\n self.ecosys[move_place] = None\n\n elif isinstance(self.ecosys[move_place], fish):\n if isinstance(self.ecosys[new_spot], fish):\n self.add_random(fish(0))\n elif isinstance(self.ecosys[new_spot], bear):\n self.ecosys[move_place] = None\n else:\n self.ecosys[new_spot] = fish(new_spot)\n self.ecosys[move_place] = None\n else:\n raise ValueError(\"Undefined Creature\")\n # else:\n # orgin = self.ecosys[move_place]\n # destinationIndex = new_spot\n # finalDestination = self.ecosys[destinationIndex]\n # animalCreationSpot = self.ecosys[new_spot]\n # animalDeletionSpot = self.ecosys[move_place]\n\n # self.manageAnimalInteraction(orgin, destinationIndex, finalDestination, animalCreationSpot, animalDeletionSpot)\n\n if text:\n self.display()\n\n\n bearCount = len([i for i, x in enumerate(self.ecosys) if isinstance(x,bear)])\n\n fishCount = len([i for i, x in enumerate(self.ecosys) if isinstance(x,fish)])\n\n print('Total number of bears in the ecosystem: ' , bearCount)\n print('Total number of fish in the ecosystem: ', fishCount)\n \n self.gdata[self.itterationCount] = self.parsecosys()\n\n if self.fullList != '':\n pass\n elif bearCount == len(self.ecosys):\n self.fullList = 'The list became full of bears at ' + str(self.itterationCount) + ' timestep. '\n\n elif fishCount == len(self.ecosys):\n self.fullList = 'The list became full of fish at ' + str(self.itterationCount) + ' timestep. '\n\n\n self.itterationCount += 1\n\n print (self.fullList)", "title": "" }, { "docid": "291289b8daa109c5065aaf6f0485ea52", "score": "0.5125915", "text": "def showWelcomeAnimation():\n\n # index of player to blit on screen\n playerIndex = 0\n playerIndexGen = cycle([0, 1, 2, 1])\n # iterator used to change playerIndex after every 5th iteration\n loopIter = 0\n\n playerx = int(nScrWidth // 3 ) if nScrWidth > 440 else int(nScrWidth // 5 )\n playery = int((nScrHeight - fImages['player'][0].get_height()) / 2)\n\n messagex = int((nScrWidth - fImages['message'].get_width()) / 2)\n messagey = int(nScrHeight * 0.12)\n\n furymodex = int((nScrWidth - fImages['furymode'].get_width()) / 2)\n furymodey = int(nScrHeight * 0.90)\n\n basex = 0\n # amount by which base can maximum shift to left\n baseShift = fImages['base'].get_width() - fImages['background'].get_width()\n\n # player shm for up-down motion on welcome screen\n playerShmVals = {'val': 0, 'dir': 1}\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):\n # make first flap sound and return values for mainGame\n fSounds['wing'].play()\n return {\n 'playery': playery + playerShmVals['val'],\n 'basex': basex,\n 'playerIndexGen': playerIndexGen,\n }\n\n # adjust playery, playerIndex, basex\n if (loopIter + 1) % 5 == 0:\n playerIndex = next(playerIndexGen)\n loopIter = (loopIter + 1) % 30\n basex = -((-basex + 4) % baseShift)\n playerShm(playerShmVals)\n\n # draw sprites\n oScreen.blit(fImages['background'], (0,0))\n oScreen.blit(fImages['player'][playerIndex],\n (playerx, playery + playerShmVals['val']))\n oScreen.blit(fImages['message'], (messagex, messagey))\n oScreen.blit(fImages['base'], (basex, nBaseY))\n oScreen.blit(fImages['furymode'], (furymodex, furymodey))\n\n pygame.display.update()\n oFPSClock.tick(nFPS)", "title": "" }, { "docid": "8c052c96772d1256135231375eb110c2", "score": "0.51229423", "text": "def change_fleet_direction(ai_settings,aliens):\n\tfor alien in aliens.sprites():\n\t\talien.rect.y += ai_settings.fleet_drop_speed\n\tai_settings.fleet_direction *= -1", "title": "" }, { "docid": "4ce2444be7da280665afd5ef0d1758c2", "score": "0.5121901", "text": "def move_student(keys):\n global part\n global gravity\n if pygame.K_RIGHT in keys:\n if pygame.K_UP in keys:\n camera.draw(walker[8])\n for image in walker:\n image.x += 10\n for obstacle in noninteractives:\n if image.bottom_touches(obstacle) and obstacle.top_touches(image):\n image.speedy = -17\n gravity = []\n\n image.move_speed()\n else:\n camera.draw(walker[int(part) % 7])\n part += 1\n for image in walker:\n image.x += 10\n for image in walker:\n camera.center = [image.center[0], 300]\n elif pygame.K_LEFT in keys:\n if pygame.K_UP in keys:\n camera.draw(walker[8])\n for image in walker:\n image.x -= 10\n for each in noninteractives:\n if image.bottom_touches(each):\n image.speedy = -17\n gravity = []\n break\n image.move_speed()\n else:\n camera.draw(walker[int(part) % 7])\n part -= 1\n for image in walker:\n image.x -= 10\n for image in walker:\n camera.center = [image.center[0], 300]\n elif pygame.K_UP in keys:\n camera.draw(walker[8])\n for image in walker:\n for each in noninteractives:\n if image.bottom_touches(each):\n image.speedy = -17\n image.move_speed()\n elif pygame.K_UP not in keys and (pygame.K_LEFT not in keys) and pygame.K_RIGHT not in keys:\n camera.draw(walker[7])\n for image in walker: # Gravity\n for each in noninteractives:\n if image.bottom_touches(each):\n gravity.append(True)\n else:\n gravity.append(False)\n if True not in gravity:\n image.speedy += 3\n image.move_speed()\n gravity = []\n\n for frame in walker:\n for item in noninteractives:\n frame.move_to_stop_overlapping(item)\n # Background movement\n for images in walker:\n background.x = (images.x*0.9) + 500", "title": "" }, { "docid": "720b118b6dc7e7d88db5a8a82b3b03b0", "score": "0.5121038", "text": "def playerOneUpdate(self):\n if DemoPlayerSprite.paused:\n if self.frameCount == 1:\n self.coordinates = (self.coordinates[0] + 18, self.coordinates[1])\n if self.frameCount < 6:\n self.image = self.animationFrames[2]\n elif self.frameCount < 8:\n self.image = self.animationFrames[3]\n elif self.frameCount < 11:\n self.image = self.animationFrames[4]\n elif self.frameCount == 11:\n DemoPlayerSprite.paused = False\n DemoPlayerSprite.facingDirection = c.Directions.LEFT\n self.coordinates = (self.coordinates[0] - 16, self.coordinates[1])", "title": "" } ]
9e8cffa87aa18190b39d065b730b3025
Iterates over all text nodes and merges all text nodes that are close to each other. This is useful for text extraction.
[ { "docid": "3aa34874c7a425fadb84b8a6805e4bb7", "score": "0.7767277", "text": "def merge_text_nodes(self):\n ...", "title": "" } ]
[ { "docid": "39590391492dc3733a65e910084f4590", "score": "0.57207865", "text": "def normalizeDocument(self): # TODO - test\n for each in self.childNodes:\n if each.nodeType == Node.TEXT_NODE:\n if each.nodeValue.strip() == '':\n each.parentNode.removeChild(each)\n else:\n each.normalize()\n else:\n each.normalize()\n return", "title": "" }, { "docid": "137abb171d1b61430bd5a9021addb393", "score": "0.56507224", "text": "def split_text_nodes(self) -> None:\n new_child_nodes = []\n\n for node_id in self.child_nodes:\n node = in_scope(node_id)\n\n if isinstance(node, TextNode):\n list_ids = node.split_by_paragraph()\n for ids in list_ids:\n new_child_nodes.append(ids)\n else:\n new_child_nodes.append(node.id)\n\n out_scope(node)\n\n self.child_nodes = new_child_nodes", "title": "" }, { "docid": "d5120860c6e233cdb2cf47419f0fe26a", "score": "0.5505909", "text": "def flatten_links(text):\n\n while True:\n text, sub_count = link_inside_link_regex.subn(\n r\"\\g<outer_link>\\g<inner_content>\", text)\n if sub_count == 0:\n return text", "title": "" }, { "docid": "708d267d2459c1a1e95d462d5ca9ffac", "score": "0.5442562", "text": "def flatten(self):\n flattened_text = [self.text or '']\n for child in list(self):\n flattened_text.append(child.flatten())\n flattened_text.append(child.tail or '')\n self.remove(child)\n return ''.join(flattened_text)", "title": "" }, { "docid": "b8ead417ca0e861e4524204fe94db4a8", "score": "0.5433667", "text": "def _combine(text_elements):\n full_text = ' '.join(text_elements)\n return full_text.replace(\"\\n\", \" \").replace(\"\\r\", \" \")", "title": "" }, { "docid": "247391a1751875c72206dcb6aec4eb06", "score": "0.533118", "text": "def text_children(self, element, trailing_space, text_root=False):\n children = []\n space = '{http://www.w3.org/XML/1998/namespace}space'\n preserve = self.get(space) == 'preserve'\n self._etree_node.text = self.process_whitespace(\n element.etree_element.text, preserve)\n if trailing_space and not preserve:\n self._etree_node.text = self.text.lstrip(' ')\n\n original_rotate = [\n float(i) for i in\n normalize(self.get('rotate')).strip().split(' ') if i]\n rotate = original_rotate.copy()\n if original_rotate:\n self.pop_rotation(original_rotate, rotate)\n if self.text:\n trailing_space = self.text.endswith(' ')\n for child_element in element.iter_children():\n child = child_element.etree_element\n if child.tag in ('{http://www.w3.org/2000/svg}tref', 'tref'):\n child_node = Node(child_element, self._style)\n child_node._etree_node.tag = 'tspan'\n # Retrieve the referenced node and get its flattened text\n # and remove the node children.\n child = child_node._etree_node\n child._etree_node.text = child.flatten()\n child_element = ElementWrapper.from_xml_root(child)\n else:\n child_node = Node(child_element, self._style)\n child_preserve = child_node.get(space) == 'preserve'\n child_node._etree_node.text = self.process_whitespace(\n child.text, child_preserve)\n child_node.children, trailing_space = child_node.text_children(\n child_element, trailing_space)\n trailing_space = child_node.text.endswith(' ')\n if original_rotate and 'rotate' not in child_node:\n child_node.pop_rotation(original_rotate, rotate)\n children.append(child_node)\n if child.tail:\n anonymous_etree = ElementTree.Element(\n '{http://www.w3.org/2000/svg}tspan')\n anonymous = Node(\n ElementWrapper.from_xml_root(anonymous_etree), self._style)\n anonymous._etree_node.text = self.process_whitespace(\n child.tail, preserve)\n if original_rotate:\n anonymous.pop_rotation(original_rotate, rotate)\n if trailing_space and not preserve:\n anonymous._etree_node.text = anonymous.text.lstrip(' ')\n if anonymous.text:\n trailing_space = anonymous.text.endswith(' ')\n children.append(anonymous)\n\n if text_root and not children and not preserve:\n self._etree_node.text = self.text.rstrip(' ')\n\n return children, trailing_space", "title": "" }, { "docid": "af6ebe9eadda487732f3149059dfb891", "score": "0.52973646", "text": "def find_nodes(self):\n for word in self.input:\n self.additional_nodes += [\n new_word\n for new_word, *_ in self.model.most_similar(word, n=self.topn)\n if new_word not in self.additional_nodes + self.input\n ]\n self.additional_nodes = list(set(self.additional_nodes))", "title": "" }, { "docid": "0997f0f956b8c49b6039c349714ce34e", "score": "0.5239275", "text": "def optimizedChildren(self):\n\n all = []\n texts = []\n\n def flush():\n if texts:\n all.append(\n self.literalFactory(self, xml=unicodeJoin(texts))\n )\n texts[:]=[]\n\n for child in self.children:\n t = child.staticText\n if t is None:\n flush()\n all.append(child)\n else:\n texts.append(t)\n\n flush()\n return all", "title": "" }, { "docid": "a067c50bc97fcb37f9748c592369cfed", "score": "0.52309406", "text": "def _merge_sentences(text):\n\n return [word for sentence in text for word in sentence]", "title": "" }, { "docid": "fe8c7eaf6e7bdb9284ce00fa74c512d3", "score": "0.5224719", "text": "def filter_unrelated_lines(text_blocks, _config):\n new_text_blocks=[]\n for tb in text_blocks:\n new_tb=copy.copy(tb)\n new_tb['blockText']=[]\n next_top=tb['blockTop']\n for t in tb['blockText']:\n if t['top'] < next_top + t['height'] / 2:\n next_top=t['top'] + t['height']\n new_tb['blockText'].append(t)\n if new_tb['blockText']:\n new_text_blocks.append(new_tb)\n return new_text_blocks", "title": "" }, { "docid": "7c1d8104adff6d4750916a6de4f21db4", "score": "0.52213496", "text": "def extractAll(text, startText, endText):\n result = []\n start = 0\n pos = text.find(startText, start)\n while pos != -1:\n start = pos + startText.__len__()\n end = text.find(endText, start)\n result.append(text[start:end].replace('\\n', '').replace('\\t', '').lstrip())\n pos = text.find(startText, end)\n return result", "title": "" }, { "docid": "a55efc10e15493f1edc9e36a51e51b56", "score": "0.517794", "text": "def _merge_entities_with_whitespace_between(\n self,\n text: str,\n analyzer_results: List[RecognizerResult]\n ) -> List[RecognizerResult]:\n merged_results = []\n prev_result = None\n for result in analyzer_results:\n if prev_result is not None:\n if prev_result.entity_type == result.entity_type:\n if re.search(r'^( )+$', text[prev_result.end:result.start]):\n merged_results.remove(prev_result)\n result.start = prev_result.start\n merged_results.append(result)\n prev_result = result\n return merged_results", "title": "" }, { "docid": "5ade9a0dca586253ccf45a47103bb1d3", "score": "0.51553494", "text": "def end_nodes(self):\n\n ret_set = set(\n (n for n in self if cast_text(n.Class()) not in (\"Viewer\",)),\n ) # type: Set[nuke.Node]\n other = list(n for n in self if n not in ret_set)\n\n for n in list(ret_set):\n dep = n.dependencies(nuke.INPUTS)\n if set(self).intersection(dep):\n ret_set.difference_update(dep)\n ret = sorted(\n ret_set, key=lambda x: len(node_deep_dependencies(x)), reverse=True\n )\n ret.extend(other)\n return ret", "title": "" }, { "docid": "1212a1b805731bb43175d00627654762", "score": "0.51374435", "text": "def _test_process(self, text):\n token_lists = self.__sentencize(text)\n tags, trees, node_dicts, _ = self.__process_tokens(token_lists, True)\n stuples = [self._extract_stuples(node_dict.values()) for node_dict in node_dicts]\n return token_lists[0], tags[0], trees[0], node_dicts[0], stuples[0]", "title": "" }, { "docid": "a4825b285477371ac9ece37d040723d3", "score": "0.51054436", "text": "def mergeCells(cells):\n current = 0\n while len(cells) > 1:\n count = 0\n while count < len(cells):\n if cells[current].merge(cells[count]):\n if current > count:\n current -= 1\n cells.pop(count)\n else:\n count += 1\n current += 1\n if current >= len(cells):\n current = 0\n return cells[0].text", "title": "" }, { "docid": "2436c23d3a4b82d47497594040f855ae", "score": "0.5082904", "text": "def links_to_text(self):\n self.parser.stripTags(self.get_top_node(), 'a')", "title": "" }, { "docid": "f0c64a80609021ec1e0e053252842f89", "score": "0.5003436", "text": "def iterate_over_text_elements(self, sections=False):\n running_offset = 0\n if self.title:\n yield self.title, 0\n running_offset += len(self.title) + 1\n if self.abstract:\n yield self.abstract, running_offset\n running_offset += len(self.abstract) + 1\n\n if sections and self.sections:\n for sec in self.sections:\n yield sec.title, running_offset\n running_offset += len(sec.title) + 1\n yield sec.text, running_offset\n running_offset += len(sec.text) + 1", "title": "" }, { "docid": "9636a9e4993d14d093ac408ed392ed19", "score": "0.5001456", "text": "def __create_graph_all_words(self):\n # for each of the parts of speach\n # connections are supported only for nouns & verbs\n for synset in wn.all_synsets():\n parent = synset\n children = parent.hyponyms()\n # self.__recurse_down_tree(parent, children)\n self.__add_to_graph(parent, children)", "title": "" }, { "docid": "0cbf5b7db2aeb2b91deadaeaa22ae8ce", "score": "0.49970114", "text": "def getAnnotationTree(self):\n root = self.document().rootFrame()\n stringDocumenttext = unicode(self.document().toPlainText())\n tree = []\n for frame in root.childFrames():\n if frame.__class__.__name__ == \"QTextTable\":\n utteranceCell = frame.cellAt(0, 0)\n strUtteranceId = unicode(utteranceCell.format().anchorNames()[0])\n strUtteranceId = re.sub(r\"^utterance-\", \"\", strUtteranceId)\n utteranceCell = frame.cellAt(0, 1)\n iStart = int(utteranceCell.firstCursorPosition().position())\n iEnd = int(utteranceCell.lastCursorPosition().position())\n strUtterance = stringDocumenttext[iStart:iEnd].strip()\n arrWords = []\n for i in range((frame.columns()-1)/2):\n strWordId = unicode(frame.cellAt(1, i*2+1).format().anchorNames()[0])\n strWordId = re.sub(r\"^word-\", \"\", strWordId)\n wordCell = frame.cellAt(1, i*2+2)\n iStart = int(wordCell.firstCursorPosition().position())\n iEnd = int(wordCell.lastCursorPosition().position())\n strWord = stringDocumenttext[iStart:iEnd]\n morphemesCell = frame.cellAt(2, i*2+2)\n iStart = int(morphemesCell.firstCursorPosition().position())\n iEnd = int(morphemesCell.lastCursorPosition().position())\n strMorphemes = stringDocumenttext[iStart:iEnd]\n glossesCell = frame.cellAt(3, i*2+2)\n iStart = int(glossesCell.firstCursorPosition().position())\n iEnd = int(glossesCell.lastCursorPosition().position())\n strGlosses = stringDocumenttext[iStart:iEnd]\n strInterlinear = \"%s %s %s\" % (strWord, strMorphemes, strGlosses)\n strInterlinear =strInterlinear.strip()\n arrWords.append([strWordId, strInterlinear])\n tree.append([strUtteranceId, strUtterance, arrWords])\n return tree", "title": "" }, { "docid": "df7c04893cc8bbc295ed4519ff161dbc", "score": "0.49935487", "text": "def _get_GraphNodeText(self, doc, domNdPage, ndBlock, ctxt=None): \n lNdText = ndBlock.xpath(self.sxpTextual, namespaces=self.dNS)\n if len(lNdText) != 1:\n if len(lNdText) > 1: raise ValueError(\"More than 1 textual content for this node: %s\"%etree.tostring(ndBlock))\n \n #let's try to get th etext of the words, and concatenate...\n # traceln(\"Warning: no text in node %s => looking at words!\"%ndBlock.prop(\"id\")) \n # lsText = [ntext.content.decode('utf-8').strip() for ntext in ctxt.xpathEval('.//pc:Word/pc:TextEquiv//text()')] #if we have both PlainText and UnicodeText in XML, :-/\n lsText = [_nd.text.strip() for _nd in ctxt.xpathEval('.//pc:Word/pc:TextEquiv')] #if we have both PlainText and UnicodeText in XML, :-/\n return \" \".join(lsText)\n \n return PageXml.makeText(lNdText[0])", "title": "" }, { "docid": "b377d73813ba74089f6d30fe30d3aa70", "score": "0.49913737", "text": "def merge_nc_xml(xml1, xml2):\n\n xml1_list = xml1.split(\"</data>\")[0].split(\"\\n\")\n xml2_list = xml2.split(\"<data>\")[1].split(\"\\n\")\n\n while True:\n xml1_ele1 = get_xml_line(xml1_list, -1)\n xml1_ele2 = get_xml_line(xml1_list, -2)\n xml2_ele1 = get_xml_line(xml2_list, 0)\n xml2_ele2 = get_xml_line(xml2_list, 1)\n if not xml1_ele1 or not xml1_ele2 or not xml2_ele1 or not xml2_ele2:\n return xml1\n\n if \"xmlns\" in xml2_ele1:\n xml2_ele1 = xml2_ele1.lstrip().split(\" \")[0] + \">\"\n if \"xmlns\" in xml2_ele2:\n xml2_ele2 = xml2_ele2.lstrip().split(\" \")[0] + \">\"\n if xml1_ele1.replace(\" \", \"\").replace(\"/\", \"\") == xml2_ele1.replace(\" \", \"\").replace(\"/\", \"\"):\n if xml1_ele2.replace(\" \", \"\").replace(\"/\", \"\") == xml2_ele2.replace(\" \", \"\").replace(\"/\", \"\"):\n xml1_list.pop()\n xml2_list.pop(0)\n else:\n break\n else:\n break\n\n return \"\\n\".join(xml1_list + xml2_list)", "title": "" }, { "docid": "a363f27b460c87aec8599ddd913289f3", "score": "0.49708256", "text": "def concatenate_all_text(data_list):\n all_text = []\n for index, value in enumerate(data_list):\n all_text.extend(value['text'])\n \n return all_text", "title": "" }, { "docid": "860f40dd1f9a59d8de5ae2d547775738", "score": "0.49547598", "text": "def tag_text(text):\n return sum(tag_sentences(text), [])", "title": "" }, { "docid": "11e44a94d03d9fa79378c6c4538e4d3e", "score": "0.4949794", "text": "def cleanup(target_node):\n node = add_siblings(target_node)\n for e in node.getchildren():\n if e.tag != 'p':\n if is_high_link_density(e) \\\n or is_table_tag_and_no_paragraphs_exist(e) \\\n or not node_score_threshold_met(node, e):\n parser.remove(e)\n return node", "title": "" }, { "docid": "cbc20f0aa3a2fc7ed88686ff42a05fd6", "score": "0.4946932", "text": "def merge_x2p(self, to_merge):\n merge_tree = ET.parse(to_merge)\n # input_root = merge_tree.getroot()\n paths = set(self._file_paths(self.tree))\n for elem in XPathCommand._files(merge_tree):\n path = elem.attrib[\"path\"]\n if path not in paths:\n self.root.append(elem)\n input_props = dict(XPathCommand._properties(merge_tree))\n for prop in self._props(self.tree):\n name = prop.attrib[\"name\"]\n if name in input_props:\n XPathCommand._merge_text(prop, input_props[name])", "title": "" }, { "docid": "0ceae388046ec6c2c708dfd1c84b61b8", "score": "0.49249494", "text": "def merge(self, tree):\n pass", "title": "" }, { "docid": "5c2b387192ed2456ba6a97b2a8a4d948", "score": "0.4910533", "text": "def simplify_labels(self):\r\n self.sort()\r\n for axis in self.world:\r\n t1 = self.text[axis][0]\r\n for i in range(1, len(self.world[axis])):\r\n t2 = self.text[axis][i]\r\n if len(t1) != len(t2):\r\n t1 = self.text[axis][i]\r\n continue\r\n start = 0\r\n for j in range(len(t1)):\r\n if t1[j] != t2[j]:\r\n break\r\n if t1[j] not in '-0123456789.':\r\n start = j + 1\r\n if start == 0:\r\n t1 = self.text[axis][i]\r\n else:\r\n self.text[axis][i] = self.text[axis][i][start:]", "title": "" }, { "docid": "50c0921df4db97a037d78d8ccfcbf800", "score": "0.49004057", "text": "def collapse_all(self):\n for node in self.nodes:\n node.collapse_all()", "title": "" }, { "docid": "b42a7125c6893564459dc81ac612fe3a", "score": "0.4895957", "text": "def test_hydrate_nodes(add_text, add_doc, add_citation):\n\n t1 = add_text(title='title1', authors=['author1', 'author2'])\n t2 = add_text(title='title2', authors=['author3', 'author4'])\n\n doc = add_doc()\n\n add_citation(document=doc, text=t1)\n add_citation(document=doc, text=t2)\n\n g = Text_Graph()\n\n g.add_edges()\n g.hydrate_nodes()\n\n assert g.graph.node[t1.id]['authors'] == t1.pretty('authors')\n assert g.graph.node[t2.id]['authors'] == t2.pretty('authors')\n\n assert g.graph.node[t1.id]['title'] == t1.pretty('title')\n assert g.graph.node[t2.id]['title'] == t2.pretty('title')", "title": "" }, { "docid": "18b01a54673e10e9b7440d69aba635ab", "score": "0.48803574", "text": "def merge_tag_text(self, o):\n r = \"\"\n if isinstance(o, list):\n for i in o:\n r = r+self.merge_tag_text(i)\n elif isinstance(o, str):\n return o\n elif isinstance(o, (int, float)):\n return str(o)\n elif isinstance(o, dict):\n if o.get(\"#alldata\") is None:\n for v in o.values():\n r = r+self.merge_tag_text(v)\n else:\n r = r+self.merge_tag_text(o[\"#alldata\"])\n return r", "title": "" }, { "docid": "a6689c177d091ee72e3b753461f35713", "score": "0.48699176", "text": "def doc_to_text(doc):\n\n text_parts = []\n\n for tok in doc:\n if tok.tag_ == 'NNP':\n new_part = 'someone' + tok.whitespace_\n text_parts.extend(new_part)\n elif tok.tag_ == 'NNPS':\n new_part = 'they' + tok.whitespace_\n text_parts.extend(new_part)\n elif tok.tag_ == 'PRP':\n new_part = 'they' + tok.whitespace_\n text_parts.extend(new_part)\n elif tok.tag_ == 'PRP$':\n new_part = 'their' + tok.whitespace_\n text_parts.extend(new_part)\n else:\n new_part = tok.text_with_ws \n text_parts.extend(new_part)\n\n anon_text = ''.join(text_parts)\n \n split_words = anon_text.split(' ')\n no_consec_duplicates = [i[0] for i in itertools.groupby(split_words)] \n output_text = ' '.join(no_consec_duplicates)\n\n return(output_text)", "title": "" }, { "docid": "3d6e375b4d9a3017f3d3c06777ad1b04", "score": "0.4860601", "text": "def merge_all_adjacent(self):\r\n for i in range(data.windows.__len__()):\r\n while self.mergeable(i,i+1):\r\n self.merge(i,i+1,False,False)\r\n self.reload()", "title": "" }, { "docid": "adf83da0a3e52f623f897f667d2b8e14", "score": "0.4858169", "text": "def _move_words_to_links(self):\n\n visited = {self.initial_node.id}\n\n def visit_link(link):\n \"\"\"A function that is called recursively to move a word from the\n link end node to the link.\n \"\"\"\n end_node = link.end_node\n if hasattr(end_node, 'word'):\n if link.word is None:\n link.word = end_node.word\n else:\n raise InputError(\"SLF lattice contains words both in nodes \"\n \"and links.\")\n if end_node.id not in visited:\n visited.add(end_node.id)\n for next_link in end_node.out_links:\n visit_link(next_link)\n\n for link in self.initial_node.out_links:\n visit_link(link)\n\n for node in self.nodes:\n if hasattr(node, 'word'):\n del node.word", "title": "" }, { "docid": "88768b873278a742a28d4b93d117827d", "score": "0.48562792", "text": "def replace_texts(docs, subdocs):\n for i, doc in enumerate(docs):\n doc.text = '\\n' + subdocs[i] #newline after opening tag", "title": "" }, { "docid": "a7555e51ea309bc648b4ae5319c5e938", "score": "0.48513833", "text": "def do_analysis(node1, node2):\r\n result = list()\r\n\r\n for child1 in node1.children:\r\n for child2 in node2.children:\r\n if child1.xpath == child2.xpath and child1.md5 != child2.md5:\r\n if are_different(child1, child2):\r\n result.append(child1.xpath)\r\n else:\r\n result += do_analysis(child1, child2)\r\n\r\n return result", "title": "" }, { "docid": "9794ce6ff6c3b8ac3c9548f4c2c9f9a3", "score": "0.48396114", "text": "def get_text(el, buf):\n for child in el:\n if child.text:\n buf.append(child.text)\n get_text(child, buf)\n if child.tail:\n buf.append(child.tail)", "title": "" }, { "docid": "bc9d85dbdc5ee8892ffe795e4765c51b", "score": "0.48371637", "text": "def traverse(self, include_text: bool = False) -> Iterator[\"Node\"]:\n ...", "title": "" }, { "docid": "b8f746544b007c2446652cbb947d87b7", "score": "0.48344222", "text": "def _deleteNodesForWordEndingWith(self, terminalNode):\n\n lastNode = terminalNode\n character = lastNode.value\n while lastNode.isLeaf():\n lastNode = lastNode.parentNode\n del lastNode.children[character]\n character = lastNode.value\n if lastNode.isEnd or lastNode.parentNode is None:\n break", "title": "" }, { "docid": "c7ad4ebb561905d781f3e12ecf4ac9e3", "score": "0.4823729", "text": "def _merge_text(elem, text, sep=\" \"):\n new_text = text.strip()\n if not new_text:\n # incoming (stripped) text it blank, nothing to do\n return\n elem_text = (elem.text or \"\").strip()\n if elem_text:\n # existing text exists (and stripped, is not blank), merge new text, preserving order\n elem_words = elem_text.split(sep)\n new_words = new_text.strip().split(sep)\n to_add = [val for val in new_words if val not in elem_words]\n newtext = sep.join(elem_words + to_add)\n else:\n # existing text is blank, just use incoming text\n newtext = new_text\n # no matter what, the element's new text is stripped\n elem.text = newtext.strip()", "title": "" }, { "docid": "b5f53a0babb1b9707d2a65c815adf138", "score": "0.47902432", "text": "def group_lines(\n texts: List[List[Any]],\n iou_threshold: float = 0.4\n) -> List[List[Any]]:\n grouped = []\n texts = sorted(texts, key=lambda x: (x[-1][1] + x[-1][3]) / 2)\n current_line = []\n for text in texts:\n if not current_line:\n current_line.append(text)\n continue\n y0s = [t[-1][1] for t in current_line]\n y1s = [t[-1][3] for t in current_line]\n inter = np.minimum(y1s, text[-1][3]) - np.maximum(y0s, text[-1][1])\n inter = np.maximum(inter, 0)\n union = np.maximum(y1s, text[-1][3]) - np.minimum(y0s, text[-1][1])\n iou = inter / union\n if iou.mean() > iou_threshold:\n current_line.append(text)\n else:\n current_line = sorted(current_line, key=lambda x: (x[-1][0] + x[-1][2]) / 2)\n current_line.append(''.join([w[0] for w in current_line]))\n grouped.append(current_line)\n current_line = [text]\n current_line = sorted(current_line, key=lambda x: (x[-1][0] + x[-1][2]) / 2)\n current_line.append(''.join([w[0] for w in current_line]))\n grouped.append(current_line)\n return grouped", "title": "" }, { "docid": "833bac3ecc69cd4030692d621c9589b5", "score": "0.47881782", "text": "def process(self, text):\r\n filtered_text = self.__filter_text(text)\r\n simplified_data = self.__break_up_data(filtered_text)\r\n rotated_text = []\r\n for sent in simplified_data:\r\n rotated_text.append(self.__rotate_sent(sent))\r\n return self.__sort_list(rotated_text)", "title": "" }, { "docid": "7ee6927b230ecac9ccfdf82c162d4e33", "score": "0.47878805", "text": "def transform(self,w1,w2):\n n1, n2 = self.word_to_node[w1.upper()], self.word_to_node[w2.upper()]\n \n output = []\n for word_index in bidirectional_search(self.g,n1,n2):\n output.extend(list(self.g[word_index].data))\n output.extend(list(\" -> \"))\n \n if len(output) > 0:\n print(\"\".join(output[:-4])) # Exclude the last \" -> \".\n else:\n print(\"Transformation is impossible.\")", "title": "" }, { "docid": "a35a8a494b387a5cd072ff3c8f651520", "score": "0.4767881", "text": "def _combine_neighbouring_extents(extents, distance_px=10):\n left, right = extents[0]\n for x in extents[1:]:\n if x[0] <= (right + distance_px):\n right = x[1]\n else:\n yield (left, right)\n left, right = x\n yield (left, right)", "title": "" }, { "docid": "46c5a229c4769f3e056c5f9006f389fe", "score": "0.47652975", "text": "def stripWhitespace(node):\n # Process element child nodes.\n textnodes = []\n for ch in node.childNodes:\n if ch.nodeType==ch.ELEMENT_NODE:\n stripWhitespace(ch)\n elif ch.nodeType==ch.TEXT_NODE:\n textnodes.append(ch)\n \n if textnodes:\n # Reuse first text node to absorbed merged and whitespace-stripped text, and delete any other text nodes.\n text = ''.join([ch.data for ch in textnodes]).strip()\n if text:\n textnodes[0].data = text\n textnodes = textnodes[1:]\n for ch in textnodes: node.removeChild(ch)", "title": "" }, { "docid": "24ca40e8599547fb7293f0e0c6658f9e", "score": "0.4765079", "text": "def text_space_separated(self):\r\n return \" \".join([child.text for child in self.children])", "title": "" }, { "docid": "c7088ab0586b123b26e60de6602cc0be", "score": "0.4760724", "text": "def iter(self, include_text: bool = False) -> Iterator[\"Node\"]:\n ...", "title": "" }, { "docid": "ee4eb6e8b57f8f340833db218a77ad11", "score": "0.47302854", "text": "def remove_tags(tree):\n return \" \".join(tree.itertext())", "title": "" }, { "docid": "dc99ace633d22b86f425d68235d9a48b", "score": "0.47282338", "text": "def merge(self, other):\n if self._word != other.word():\n print self.word(), other.word()\n raise RuntimeError(\"Word is not a match for merge.\")\n\n for pos in other.pos():\n self.add_pos(pos)", "title": "" }, { "docid": "973510f6d9982a8fbceb4ce38d3cf0f7", "score": "0.47265708", "text": "def mergeNodes(G,nodeList,new_node):\r\n \r\n if len(nodeList) > 1:\r\n\r\n G.add_node(new_node) # Add the 'merged' node\r\n\r\n addEdgeList = []\r\n\r\n combinedEdgeList = []\r\n\r\n for node in nodeList:\r\n successor_list = G.successors(node)\r\n predecessor_list = G.predecessors(node)\r\n for successor_node in successor_list:\r\n weight_data = G.get_edge_data(new_node,successor_node)['weight']\r\n addEdgeList.append([node,successor_node,weight_data])\r\n for predecessor_node in predecessor_list:\r\n weight_data = G.get_edge_data(predecessor_node,node)['weight']\r\n addEdgeList.append([predecessor_node,new_node,weight_data])\r\n\r\n for n1,n2,data in addEdgeList:\r\n matching = [elem for elem in combinedEdgeList if n1 in elem and n2 in elem]\r\n if len(matching) == 1:\r\n matching_index = combinedEdgeList.index(matching[0])\r\n combinedEdgeList[matching_index][2] = combinedEdgeList[matching_index][2] + 1\r\n elif len(matching) == 0:\r\n combinedEdgeList.append([n1,n2,data])\r\n else: \r\n print(\"Error state occured in combinedEdgeList!\")\r\n\r\n for n1,n2,weightValue in combinedEdgeList:\r\n G.add_edge(n1,n2,weight=weightValue)\r\n \r\n for n in nodeList: # remove the merged nodes\r\n G.remove_node(n)", "title": "" }, { "docid": "40a70e64d1de5692c79f6e490b0bd32d", "score": "0.47197935", "text": "def propagate_leaves(self):\n for i in range(self.size-1, 0, -1):\n left = self.tree[i << 1]\n right = self.tree[i << 1 | 1]\n self.tree[i] = left.union(right)", "title": "" }, { "docid": "93b6f9968c4a4edc66bba80684132592", "score": "0.47076473", "text": "def extract(self):\r\n if self.parent is not None:\r\n del self.parent.contents[self.parent.index(self)]\r\n\r\n #Find the two elements that would be next to each other if\r\n #this element (and any children) hadn't been parsed. Connect\r\n #the two.\r\n last_child = self._last_descendant()\r\n next_element = last_child.next_element\r\n\r\n if self.previous_element is not None:\r\n self.previous_element.next_element = next_element\r\n if next_element is not None:\r\n next_element.previous_element = self.previous_element\r\n self.previous_element = None\r\n last_child.next_element = None\r\n\r\n self.parent = None\r\n if self.previous_sibling is not None:\r\n self.previous_sibling.next_sibling = self.next_sibling\r\n if self.next_sibling is not None:\r\n self.next_sibling.previous_sibling = self.previous_sibling\r\n self.previous_sibling = self.next_sibling = None\r\n return self", "title": "" }, { "docid": "b0f225e53e60498b0c85646fabbedec9", "score": "0.4705273", "text": "def getedutext(self):\n edunodelist = getedunode(self.tree)\n texts = []\n for node in edunodelist:\n texts.append(node.text)\n return texts", "title": "" }, { "docid": "473bac020b0a8ca4d85690c5bfc24c93", "score": "0.47048306", "text": "def _flattenAllLevels(self, source):\n\n for root, directories, files in os.walk(source):\n for file in files:\n filePath = os.path.join(root, file)\n if os.path.basename(os.path.dirname(filePath)) == \"Text\":\n continue\n destination = os.path.join(source, file)\n if filePath != destination:\n shutil.move(filePath, destination)\n\n for directory in os.listdir(source):\n if os.path.isdir(os.path.join(source, directory)) and directory != \"Text\":\n shutil.rmtree(os.path.join(source,directory))", "title": "" }, { "docid": "aae5e9aa4f524496f8da7ee53b8ed60a", "score": "0.46960604", "text": "def get_all_text_as_one (root, element_name_or_path, sep=os.linesep):\n \n text_list = get_all_text_as_list(root, element_name_or_path)\n \n return sep.join(text_list)", "title": "" }, { "docid": "891a04c412361b80b89fd52fefda8a1c", "score": "0.469353", "text": "def __processElementText(self, node, subnode, isText=True):\r\n if isText:\r\n text = subnode.text\r\n subnode.text = None\r\n else:\r\n text = subnode.tail\r\n subnode.tail = None\r\n\r\n childResult = self.__processPlaceholders(text, subnode)\r\n\r\n if not isText and node is not subnode:\r\n pos = node.getchildren().index(subnode)\r\n node.remove(subnode)\r\n else:\r\n pos = 0\r\n\r\n childResult.reverse()\r\n for newChild in childResult:\r\n node.insert(pos, newChild)", "title": "" }, { "docid": "a2eedef9125c91d3a2166e3907b5a50a", "score": "0.46901572", "text": "def get_unique_nodes(node_list):\n good_nodes = []\n nodes_as_txt = []\n for node in node_list:\n txt = etree.tostring(node).strip()\n if txt in nodes_as_txt:\n # Duplicate. Move on.\n pass\n else:\n nodes_as_txt.append(txt)\n good_nodes.append(node)\n return good_nodes", "title": "" }, { "docid": "8996b1ed4fc08da06eab4c9da1fc8fd9", "score": "0.4687706", "text": "def add_text(self, text, bos=True):\n for sent in nltk.sent_tokenize(text):\n tokens = nltk.word_tokenize(sent)\n if bos:\n tokens.insert(0, self.bos_sentinel)\n trigrams = sorted(list(zip(tokens, tokens[1:], tokens[2:]))) # all dimensions get sorted\n for k, g in groupby(trigrams, lambda x: (x[0], x[1])):\n for k2, g2 in groupby(g, lambda x: x[2]):\n self.counts[k][k2] += len(list(g2))\n return self", "title": "" }, { "docid": "3bf2dd8bd4c944e74793be707cc614d8", "score": "0.4685891", "text": "def merge(self, other):\n\n self_lines = self.text.split(\"\\n\")\n other_lines = other.text.split(\"\\n\")\n\n if self.mergeableDirection(other) == \"RIGHT\":\n for i in range(len(self_lines)):\n self_lines[i] = self_lines[i] + other_lines[i][1::]\n self.text = \"\\n\".join(self_lines)\n self.column_count += other.column_count\n return True\n elif self.mergeableDirection(other) == \"TOP\":\n self_lines.pop(0)\n other_lines.extend(self_lines)\n self.text = \"\\n\".join(other_lines)\n self.row_count += other.row_count\n self.row = other.row\n self.column = other.column\n return True\n elif self.mergeableDirection(other) == \"BOTTOM\":\n other_lines.pop(0)\n self_lines.extend(other_lines)\n self.text = \"\\n\".join(self_lines)\n self.row_count += other.row_count\n return True\n elif self.mergeableDirection(other) == \"LEFT\":\n for i in range(len(self_lines)):\n self_lines[i] = other_lines[i] + self_lines[i][1::]\n self.text = \"\\n\".join(self_lines)\n self.column_count += other.column_count\n self.row = other.row\n self.column = other.column\n return True\n else:\n return False", "title": "" }, { "docid": "3a1008404254fd6e9012fba43e07655d", "score": "0.4682746", "text": "def align(self, toks, sent):\n toks_ind = 0\n sent_ind = 0\n ret = []\n while sent_ind < len(sent):\n #logging.debug(\"sent_ind = {}, toks_ind = {}\".format(sent_ind, toks_ind))\n cur_tok = str(toks[toks_ind])\n cur_word = sent[sent_ind][1]\n #logging.debug(\"{} vs. {}\".format(cur_tok, cur_word))\n #logging.debug(\"flag = {}\".format(cur_word.endswith(cur_tok)))\n if cur_tok == \".\" and cur_word == \". . .\":\n with toks.retokenize() as retokenizer:\n retokenizer.merge(toks[toks_ind: toks_ind + 3])\n continue\n\n if cur_tok.isspace() and cur_word.isspace():\n toks_ind += 1\n sent_ind += 1\n continue\n\n if (cur_tok == cur_word) or \\\n (cur_word.endswith(cur_tok) and \\\n (toks_ind >= (len(toks) - 1) or ((cur_tok + str(toks[toks_ind + 1])) not in cur_word))):\n toks_ind += 1\n sent_ind += 1\n\n elif cur_tok in cur_word:\n print(\"merging: {}\".format(toks[toks_ind : toks_ind + 2]))\n with toks.retokenize() as retokenizer:\n retokenizer.merge(toks[toks_ind: toks_ind + 2])\n print(toks)\n else:\n print(toks)\n raise Exception(\"Unknown case: {}\".format((toks,\n cur_tok,\n cur_word)))\n assert (toks_ind == len(toks))\n return ret", "title": "" }, { "docid": "1bf459c996ffc021fad092f7e60f6587", "score": "0.46826598", "text": "def node_text_without_xref(node):\n if node is None:\n return\n\n node = deepcopy(node)\n\n for xref in node.findall(\".//xref\"):\n if xref.tail:\n _next = xref.getnext()\n if _next is None or _next.tag != \"xref\":\n e = etree.Element(\"EMPTYTAGTOKEEPXREFTAIL\")\n xref.addnext(e)\n for xref in node.findall(\".//xref\"):\n parent = xref.getparent()\n parent.remove(xref)\n etree.strip_tags(node, \"EMPTYTAGTOKEEPXREFTAIL\")\n return node_text(node)", "title": "" }, { "docid": "a7d750debf33a03df4393a8e349a997c", "score": "0.46779794", "text": "def expand(self):\r\n old_expanded = [[]]\r\n for sub in self._tree:\r\n sub_expanded = sub.expand()\r\n new_expanded = []\r\n while len(old_expanded) > 0:\r\n sentence = old_expanded.pop()\r\n for new in sub_expanded:\r\n new_expanded.append(sentence + new)\r\n old_expanded = new_expanded\r\n return old_expanded", "title": "" }, { "docid": "a328c401369617c5eb6fe928752c25f0", "score": "0.4674231", "text": "def _remove_non_text_nodes(self, t):\n return re.sub(r'(?u)\\((CODE|ID|CODING|META)[^)]*\\)', '', t)", "title": "" }, { "docid": "020e058f634c94e773b67c14830143b2", "score": "0.467347", "text": "def combine_element(self, one, other):\n # Create a mapping from tag name to element, as that's what we are fltering with\n mapping = {el.tag: el for el in one}\n for el in other:\n if len(el) == 0:\n # Not nested\n try:\n # Update the text\n mapping[el.tag].text = el.text\n except KeyError:\n # An element with this name is not in the mapping\n mapping[el.tag] = el\n # Add it\n one.append(el)\n else:\n try:\n # Recursively process the element, and update it in the same way\n self.combine_element(mapping[el.tag], el)\n except KeyError:\n # Not in the mapping\n mapping[el.tag] = el\n # Just add it\n one.append(el)", "title": "" }, { "docid": "45af661d3ce0c522144e4a8290eee425", "score": "0.46694967", "text": "def processLayoutText(self, layoutText):\n maxY = self.height - 1\n for y in range(self.height):\n for x in range(self.width):\n layoutChar = layoutText[maxY - y][x]\n self.processLayoutChar(x, y, layoutChar)\n self.agentPositions.sort()\n self.agentPositions = [ ( i == 0, pos) for i, pos in self.agentPositions]", "title": "" }, { "docid": "45af661d3ce0c522144e4a8290eee425", "score": "0.46694967", "text": "def processLayoutText(self, layoutText):\n maxY = self.height - 1\n for y in range(self.height):\n for x in range(self.width):\n layoutChar = layoutText[maxY - y][x]\n self.processLayoutChar(x, y, layoutChar)\n self.agentPositions.sort()\n self.agentPositions = [ ( i == 0, pos) for i, pos in self.agentPositions]", "title": "" }, { "docid": "84bf6432279d28aef216740adb5461d6", "score": "0.4666063", "text": "def expand_nodes(filtered_nodes, wordnet_tree):\n pending = set(filtered_nodes)\n final_set = set(filtered_nodes)\n\n while len(pending) > 0:\n node = pending.pop()\n predecessors = [p for p in wordnet_tree.predecessors(node) if p not in pending and p not in final_set]\n\n if len(predecessors) > 0:\n pending.update(predecessors)\n final_set.update(predecessors)\n\n return final_set, wordnet_tree.subgraph(final_set)", "title": "" }, { "docid": "a9dbb7aa656320fbc2482d3308effec3", "score": "0.46633235", "text": "def init_pairs(self):\n for text in self.corpus:\n for i in range(len(text)-1):\n yield (text[i], text[i+1])", "title": "" }, { "docid": "f2fdfa1921f7c2305fc7bb3215cb1bbe", "score": "0.46540692", "text": "def textToHtml(node, text):\n\n\tfor (i, part) in enumerate(text.split(\"\\n\")):\n\t\tif i > 0:\n\t\t\tnode.appendChild(html5.Br())\n\n\t\tnode.appendChild(html5.TextNode(part))", "title": "" }, { "docid": "1159d540c1a57eaf84a2d932acbbb1a9", "score": "0.46502742", "text": "def _merge_node(self, x, i):\n y = x.children[i]\n z = x.children.pop(i + 1)\n y.keys.append(x.keys.pop(i))\n y.keys.extend(z.keys)\n y.children.extend(z.children)", "title": "" }, { "docid": "a2163b31d648eab3c5578b19557c7267", "score": "0.46454042", "text": "def get_ne(self, txt):\n tree = ne_chunk( pos_tag( word_tokenize( txt ) ) )\n return [n[0] if type(n)==tuple else ' '.join(e[0] for e in n) for n in tree]", "title": "" }, { "docid": "e91ee93e3a884c2a93906a7f0ed84adb", "score": "0.4640729", "text": "def merge_children(self):\n self.active = []\n for ch in self.children:\n self.active += ch.active", "title": "" }, { "docid": "4a97172fe6ad6d1dcbe910a644e68438", "score": "0.46340513", "text": "def merge_multilinestrings(network): \n edges = network.edges.copy()\n edges['geometry']= edges.geometry.apply(lambda x: merge_multilinestring(x))\n return Network(edges=edges,\n \tnodes=network.nodes)", "title": "" }, { "docid": "a14d425f4b122a2798170074f4ced8c1", "score": "0.4630092", "text": "def expand_all(self):\n for node in self.nodes:\n node.expand_all()", "title": "" }, { "docid": "e5e353dddba4085add28042f9d9eaa4c", "score": "0.46241546", "text": "def combine_word_documents(files, entstring):\n merged_document = Document()\n\n for index, file in enumerate(files):\n sub_doc = Document(file)\n\n # Don't add a page break if you've reached the last file.\n if index < len(files) - 1:\n sub_doc.add_page_break()\n\n for element in sub_doc.element.body:\n merged_document.element.body.append(element)\n\n merged_document.save(finalmerge('.docx', entstring))", "title": "" }, { "docid": "2e73ac7f4f9dcd2a91960fd2b541ac64", "score": "0.4616007", "text": "def merge_text_fields(A,B):\n B['text'] = re.sub(\" +\",\" \",(A['text'] +' ' +B['text']))\n B['height'] = max(bottom(A),bottom(B)) - min(top(A),top(B))\n B['width'] = max(right(A),right(B)) - min(left(A),left(B))\n B['top'] = min(top(A),top(B))\n B['left'] = min(left(B),left(A))", "title": "" }, { "docid": "225b234eeb133d5ede0ff992962f1bed", "score": "0.46150392", "text": "def _flush_text(self):\n if self._text:\n if not self._devnull:\n self._nodestack[-1].append_text(''.join(self._text))\n self._text = []", "title": "" }, { "docid": "9f6deda7a9c3ec219059ed79079715e4", "score": "0.46110156", "text": "def whitespace_append(self, tk, start = \"StartElement\", stop = \"EndElement\", direct = False):\n\t\t\tfor itk in self.sequence(tk, start):\n\t\t\t\tif (itk.empty or (itk.name == stop and itk.descendant_mixed is False) or (itk.name == start and abs(tk - itk) == 1)):\n\t\t\t\t\tbreak\n\t\t\t\telif (itk.not_empty or (itk.name == start and itk.descendant_mixed)):\n\t\t\t\t\tself.insert_empty(itk, direct)\n\t\t\t\t\tbreak", "title": "" }, { "docid": "fc2a1f3e87c1c7e27acb9c2341a77733", "score": "0.4610386", "text": "def fasttextise():\n for classification, class_dir in [('initial', INITIAL_BILLS_DIR), ('amending', AMENDING_BILLS_DIR)]:\n label = '__label__' + classification\n for subdir in ['', '_' + TEN_PERCENT, '_' + TEN_LINES, '_' + ONE_LINE]:\n path = class_dir + subdir\n entries_tra = []\n entries_tes = []\n entries_val = []\n for name, path_2 in generate_names_and_paths(path):\n bill = file_content(os.path.join(path_2, name))\n bill = re.sub(r'\\n', ' ', bill) # substitute line breaks with spaces\n bill = re.sub(r' +', ' ', bill) # at most one space in row\n if name.startswith('tes'):\n entries_tes.append(label + '\\t' + bill)\n elif name.startswith('tra'):\n entries_tra.append(label + '\\t' + bill)\n elif name.startswith('val'):\n entries_val.append(label + '\\t' + bill)\n with open(os.path.join(path, 'fasttext_tra.csv'), 'w+', encoding=\"utf8\") as fasttext:\n fasttext.write('\\n'.join(entries_tra))\n with open(os.path.join(path, 'fasttext_tes.csv'), 'w+', encoding=\"utf8\") as fasttext:\n fasttext.write('\\n'.join(entries_tes))\n with open(os.path.join(path, 'fasttext_val.csv'), 'w+', encoding=\"utf8\") as fasttext:\n fasttext.write('\\n'.join(entries_val))", "title": "" }, { "docid": "1abd056bbb7608af255f48fb918c541c", "score": "0.46026397", "text": "def traverse(self):\n buff = []\n for c in self._traverse(self.root.eq, self.leaf):\n buff += c\n return buff", "title": "" }, { "docid": "92488534202bd0509811c3054470768a", "score": "0.45961088", "text": "def do_alpino_ds(self, xmlnode, container):\n for elem in xmlnode.childNodes:\n if elem.nodeType == elem.TEXT_NODE:\n continue\n self.parse(elem, container)", "title": "" }, { "docid": "2267e26e1db92ce9bb0433a82a420c81", "score": "0.4595696", "text": "def find_root_and_text(self, path):\n fetched = None\n while not fetched:\n fetched = self.fetch_path(path)\n self.html = fetched.text\n self.tree = lxml.etree.parse(io.StringIO(self.html), self.parser)\n \n # Clean\n before_removing = len(list(self.tree.iter()))\n self.remove_undesirable_elements()\n after_removing = len(list(self.tree.iter()))\n self.debug_print('\\nRemoved {} elements'.\n format(before_removing-after_removing))", "title": "" }, { "docid": "56bba75296db6b4ed4fe3491f8b05d62", "score": "0.45929474", "text": "def get_text_and_links(wikitext):\n parsed = wtp.parse(wikitext)\n basic_info = parsed.sections[0]\n saved_links = {}\n\n num_links = len(basic_info.wikilinks)\n for i in range(num_links):\n index = num_links - i - 1\n link = basic_info.wikilinks[index]\n original_span = link.span\n start = original_span[0]\n end = original_span[1]\n target = link.target\n text = link.text\n if not target.startswith('w:'):\n basic_info[start:end] = \"\"\n move_to_left = end - start\n else:\n basic_info[original_span[0]:original_span[1]] = text\n move_to_left = end - start - len(text)\n saved_links = shift_all(saved_links, move_to_left)\n if target.startswith('w:'):\n new_end = end - move_to_left\n saved_links[tuple([start, new_end])] = target\n\n return basic_info, saved_links", "title": "" }, { "docid": "30d45051a3feadf7b6e4f624c9d25771", "score": "0.4589696", "text": "def apply(self):\n text = \"\"\n for b in self.blocks:\n text += b.getText(opts={})\n return text", "title": "" }, { "docid": "aa6e75d2639883436f9ef2545b40c060", "score": "0.45877042", "text": "def extract_entities_results_html(text, normalize):\n try:\n result = rester.get_ner_tags(\n text, concatenate=True, normalize=normalize\n )\n except MatScholarRestError:\n rester_error_txt = RESTER_ERROR_TEXT\n return common_rester_error_html(rester_error_txt)\n tagged_doc = result[\"tags\"]\n relevance = result[\"relevance\"]\n highlighted = highlight_entities_html(tagged_doc)\n\n # Add the warning\n if not relevance:\n warning_header_txt = \"Warning! Abstract not relevant.\"\n warning_body_txt = (\n \"Our classifier has flagged this document as not relevant to \"\n \"inorganic materials science. Expect lower than optimum \"\n \"performance.\"\n )\n warning = common_warning_html(\n warning_header_txt, warning_body_txt, \"is-fullwidth\"\n )\n else:\n warning = html.Div(\"\")\n\n # Update download link\n doc = {\"sentences\": []}\n for sent in tagged_doc:\n new_sent = []\n for token, tag in sent:\n new_sent.append({\"token\": token, \"tag\": tag})\n doc[\"sentences\"].append(new_sent)\n json_string = json.dumps(doc)\n json_string = \"data:text/csv;charset=utf-8,\" + urllib.parse.quote(\n json_string\n )\n download_link = html.A(\n \"Download entities as json\",\n id=\"entity-download-link\",\n href=json_string,\n download=\"tagged_docs.json\",\n target=\"_blank\",\n )\n download_container = html.Div(\n download_link, className=\"has-text-size-4 has-margin-top 10\"\n )\n\n label = html.Label(\"Extracted Entity Tags:\")\n label_container = html.Div(label, className=\"is-size-4 has-margin-top-30\")\n\n highlighted_container = html.Div(highlighted)\n\n label_label = html.Label(\"Labels:\")\n label_label_container = html.Div(\n label_label, className=\"is-size-4 has-margin-top-30\"\n )\n\n entity_colormap_key = copy.deepcopy(entity_color_map_extended)\n entities_keys = []\n for e, color in entity_colormap_key.items():\n # don't need the \"other\" label\n if e == \"other\":\n continue\n entity_key = html.Div(\n e, className=f\"is-size-4 msweb-is-{color}-txt has-text-weight-bold\"\n )\n entity_key_container = html.Div(\n entity_key, className=\"flex-column is-narrow has-margin-5 box\"\n )\n entities_keys.append(entity_key_container)\n\n entity_key_container = html.Div(\n entities_keys, className=\"columns is-multiline has-margin-5\"\n )\n\n results = html.Div(\n [\n warning,\n label_container,\n highlighted_container,\n label_label_container,\n entity_key_container,\n download_container,\n ]\n )\n return results", "title": "" }, { "docid": "f8051ab34a9cde142f8065437f9b0fd3", "score": "0.45858335", "text": "def test_no_headlines(self):\n # text should have trailing \"\\n\" character, like most textfiles\n org_text1 = textwrap.dedent(\"\"\"\\\n\n Some non-headline text...\n Another line of it.\n \"\"\")\n org_text2 = \"\" # empty file\n\n for org_text in [org_text1, org_text2]:\n tasktree = m.parse_text_to_tree(org_text)\n # a dummy headline will be added to contain the initial text\n self.assertEqual(str(tasktree), \"* \\n\" + org_text)", "title": "" }, { "docid": "af7be3e9d1a690d5b3ae5e6a7fc3fba6", "score": "0.45849717", "text": "def extract_raw_text(text):\n clean_text = strip_all_tags(text)\n extracted_links = extract_links(clean_text)\n extracted_hashtags = extract_hashtags(clean_text)\n return remove_words_from_text(\n clean_text, extracted_links + extracted_hashtags)", "title": "" }, { "docid": "6724423684460078a9c34a718b0e9fff", "score": "0.4579801", "text": "def collapse_timex_nodes(nodes):\n return_nodes = []\n for node in nodes:\n if node.isTimex():\n for tok in node:\n return_nodes.append(tok)\n else:\n return_nodes.append(node)\n return return_nodes", "title": "" }, { "docid": "65295146026678f286aa3077f6c00527", "score": "0.45737746", "text": "def remove_empty_tags(self):\n all_nodes = self.parser.getElementsByTags(\n self.get_top_node(), ['*'])\n all_nodes.reverse()\n for el in all_nodes:\n tag = self.parser.getTag(el)\n text = self.parser.getText(el)\n if (tag != 'br' or text != '\\\\r') \\\n and not text \\\n and len(self.parser.getElementsByTag(\n el, tag='object')) == 0 \\\n and len(self.parser.getElementsByTag(\n el, tag='embed')) == 0:\n self.parser.remove(el)", "title": "" }, { "docid": "b27eb070916ac0bcb0aa17efa62b1a2f", "score": "0.45714396", "text": "def merge_nodes(self, other_vertex):\n self._node_list += other_vertex.get_nodes()\n new_edge_list = []\n for merge_edge in other_vertex.get_edges() + self._edge_list:\n if merge_edge not in self._node_list:\n new_edge_list.append(merge_edge)\n self._edge_list = new_edge_list", "title": "" }, { "docid": "9fcd30eef03d19e488191b76a8e2d03f", "score": "0.45529914", "text": "def _process(self, mos, text, wiki_elements,element_store, environ):\r\n frags = []\r\n end = 0\r\n for mo in mos:\r\n if end != mo.start():\r\n # call again for leading text and extend the result list \r\n frags.extend(fragmentize(text[end:mo.start()],wiki_elements[1:],\r\n element_store, environ))\r\n # append the found wiki element to the result list\r\n built = self._build(mo,element_store, environ)\r\n if built is not None:\r\n frags.append(built)\r\n # make the source output easier to read\r\n if self.append_newline:\r\n frags.append('\\n')\r\n end = mo.end()\r\n # call again for trailing text and extend the result list\r\n if end < len(text):\r\n if not isinstance(wiki_elements[0],(list,tuple)):\r\n wiki_elements = wiki_elements[1:]\r\n frags.extend(fragmentize(text[end:],wiki_elements,\r\n element_store, environ))\r\n\r\n return frags", "title": "" }, { "docid": "9fcd30eef03d19e488191b76a8e2d03f", "score": "0.45529914", "text": "def _process(self, mos, text, wiki_elements,element_store, environ):\r\n frags = []\r\n end = 0\r\n for mo in mos:\r\n if end != mo.start():\r\n # call again for leading text and extend the result list \r\n frags.extend(fragmentize(text[end:mo.start()],wiki_elements[1:],\r\n element_store, environ))\r\n # append the found wiki element to the result list\r\n built = self._build(mo,element_store, environ)\r\n if built is not None:\r\n frags.append(built)\r\n # make the source output easier to read\r\n if self.append_newline:\r\n frags.append('\\n')\r\n end = mo.end()\r\n # call again for trailing text and extend the result list\r\n if end < len(text):\r\n if not isinstance(wiki_elements[0],(list,tuple)):\r\n wiki_elements = wiki_elements[1:]\r\n frags.extend(fragmentize(text[end:],wiki_elements,\r\n element_store, environ))\r\n\r\n return frags", "title": "" }, { "docid": "36408d2002ad98aedfc76f3e021885a8", "score": "0.45456243", "text": "def condense(context, nodelist, max_newlines=3):\n text = nodelist.render(context).strip()\n text = re.sub(r'\\n{%d,}' % (max_newlines + 1), '\\n' * max_newlines, text)\n return text", "title": "" }, { "docid": "842d3f5e57a000e5b79c0abe76cc3836", "score": "0.45427123", "text": "def itertext(elem):\r\n if elem.text:\r\n yield elem.text\r\n for e in elem:\r\n for s in itertext(e):\r\n yield s\r\n if e.tail:\r\n yield e.tail", "title": "" }, { "docid": "bf55508eb45150c6a925d4847339effd", "score": "0.45412877", "text": "def _merge_entries(path, tree1, tree2):\r\n entries1 = _tree_entries(path, tree1)\r\n entries2 = _tree_entries(path, tree2)\r\n i1 = i2 = 0\r\n len1 = len(entries1)\r\n len2 = len(entries2)\r\n\r\n result = []\r\n while i1 < len1 and i2 < len2:\r\n entry1 = entries1[i1]\r\n entry2 = entries2[i2]\r\n if entry1.path < entry2.path:\r\n result.append((entry1, _NULL_ENTRY))\r\n i1 += 1\r\n elif entry1.path > entry2.path:\r\n result.append((_NULL_ENTRY, entry2))\r\n i2 += 1\r\n else:\r\n result.append((entry1, entry2))\r\n i1 += 1\r\n i2 += 1\r\n for i in xrange(i1, len1):\r\n result.append((entries1[i], _NULL_ENTRY))\r\n for i in xrange(i2, len2):\r\n result.append((_NULL_ENTRY, entries2[i]))\r\n return result", "title": "" }, { "docid": "c59b34a37c53c5820f7b4106c97e88ef", "score": "0.45408732", "text": "def doc_normalize(self, doc):\r\n return [\r\n self.lemmatize(token,tag).lower()\r\n for paragraph in doc\r\n for sent in paragraph\r\n for (token,tag) in sent\r\n if not self.is_punct(token) and not self.is_stopword(token)\r\n ]", "title": "" }, { "docid": "87ba144b3ad30fc33efaffc3ea08b8ff", "score": "0.45403177", "text": "def get_tagged_tree(text: str):\n return ne_chunk(pos_tag(word_tokenize(text)))", "title": "" }, { "docid": "2791fc01417d86016e71124f240b274f", "score": "0.45337158", "text": "def normalizeTotally(dom):\n removeWsNodes(dom)\n dom.normalize()", "title": "" }, { "docid": "b9cf184742f3b70b053e91a3fa32d98e", "score": "0.45302013", "text": "def process(self, text: Text, **kwargs: Any) -> List:\n # match regex entities\n extracted = []\n extracted += self.match_regex(text)\n extracted = self.remove_overlap(extracted)\n\n # extract start/end date\n start_end = extract_dates_from_to(text=text, entities=extracted, today=self.today)\n for key in start_end.keys():\n entity = {\n \"start\": -1,\n \"end\": -1,\n \"value\": start_end.get(key),\n \"confidence\": 1.0,\n \"entity\": key,\n }\n extracted.append(entity)\n return extracted", "title": "" }, { "docid": "d3323cd7d2f897dfca8624f3a0ce0534", "score": "0.45292792", "text": "def get_block_texts(xmls, poem_block_ids):\n\n poems = []\n nonpoems = []\n\n for xml in xmls:\n text_blocks = block_xpath(xml)\n\n for block in text_blocks:\n text = parse_text_lines(list(block))\n text = text.replace('w', 'v').replace('W', 'V')\n\n if block.get('ID') in poem_block_ids:\n poems.append(text)\n else:\n nonpoems.append(text)\n\n return poems, nonpoems", "title": "" } ]
65246afaff55f26d0806cad0ac86c7db
Decorator to cause a method to cache it's results in self for each combination of inputs and return the cached result on subsequent calls. Does not support named arguments or arg values that are not hashable.
[ { "docid": "9f8ae71fe1c2e3f7a8edb01c79c208de", "score": "0.0", "text": "def memoizedproperty(func):\n inner_attname = '__%s' % func.__name__\n\n def new_fget(self):\n if not hasattr(self, '_cache_'):\n self._cache_ = dict()\n cache = self._cache_\n if inner_attname not in cache:\n cache[inner_attname] = func(self)\n return cache[inner_attname]\n\n return property(new_fget)", "title": "" } ]
[ { "docid": "539f3b61f9b3e2132bdcc815438fc149", "score": "0.73366696", "text": "def wrapper(*args, **kwargs):\n if (args, kwargs) not in cache:\n cache[(args, kwargs)] = func(*args, **kwargs)\n #### regardless of seen before or not, return computed result\n return cache[(args, kwargs)] # this is the computed function result, added to the dict", "title": "" }, { "docid": "6336900f266bd9ed37ce7d1fe4f21415", "score": "0.71946645", "text": "def memoize(func, cache, num_args):\n @functools.wraps(func)\n def wrapper(*args):\n mem_args = args[:num_args]\n if mem_args in cache:\n return cache[mem_args]\n result = func(*args)\n cache[mem_args] = result\n return result\n return wrapper", "title": "" }, { "docid": "89276521c4470f5049a21f5094361806", "score": "0.71922797", "text": "def memoize(func, cache, num_args):\r\n @functools.wraps(func)\r\n def wrapper(*args):\r\n mem_args = args[:num_args]\r\n if mem_args in cache:\r\n return cache[mem_args]\r\n result = func(*args)\r\n cache[mem_args] = result\r\n return result\r\n return wrapper", "title": "" }, { "docid": "89276521c4470f5049a21f5094361806", "score": "0.71922797", "text": "def memoize(func, cache, num_args):\r\n @functools.wraps(func)\r\n def wrapper(*args):\r\n mem_args = args[:num_args]\r\n if mem_args in cache:\r\n return cache[mem_args]\r\n result = func(*args)\r\n cache[mem_args] = result\r\n return result\r\n return wrapper", "title": "" }, { "docid": "198106222a83b1fc55d9229dca4ca397", "score": "0.71571916", "text": "def cached(func):\n @functools.wraps(func)\n def caching_wrapper(*args, **kwargs):\n try:\n if (args, tuple(kwargs.items())) in cache:\n return cache[args, tuple(kwargs.items())]\n else:\n res = func(*args, **kwargs)\n cache[args, tuple(kwargs.items())] = res\n return res\n except TypeError:\n warnings.warn(\n \"arguments cannot be hashed so caching is impossible.\",\n RuntimeWarning)\n return func(*args, **kwargs)\n cache = {}\n return caching_wrapper", "title": "" }, { "docid": "34057d6d131e116aca90a8ba74a02e19", "score": "0.71060795", "text": "def __call__(self, *args):\n return self.cache_get(self.memoized, args, lambda: self.func(*args))", "title": "" }, { "docid": "a5fb68fe59b431cdabe1be3eb563684d", "score": "0.70057404", "text": "def cache(self, *args, **kwargs):\r\n cache = [None]\r\n key = \" \".join(str(x) for x in args)\r\n \r\n def decorate(func):\r\n namespace = util.func_namespace(func)\r\n def cached(*args):\r\n if not cache[0]:\r\n cache[0] = self.get_cache(namespace, **kwargs)\r\n cache_key = key + \" \" + \" \".join(str(x) for x in args)\r\n def go():\r\n return func(*args)\r\n return cache[0].get_value(cache_key, createfunc=go)\r\n cached._arg_namespace = namespace\r\n return cached\r\n return decorate", "title": "" }, { "docid": "d139f2f09ceb418276729fe0da504389", "score": "0.69754165", "text": "def cache(self, *args, **kwargs):\n cache = [None]\n key = \" \".join(str(x) for x in args)\n \n def decorate(func):\n namespace = util.func_namespace(func)\n def cached(*args):\n if not cache[0]:\n cache[0] = self.get_cache(namespace, **kwargs)\n cache_key = key + \" \" + \" \".join(str(x) for x in args)\n def go():\n return func(*args)\n return cache[0].get_value(cache_key, createfunc=go)\n cached._arg_namespace = namespace\n return cached\n return decorate", "title": "" }, { "docid": "8d1d6a7ca1e3d385362d05a65e708189", "score": "0.6900945", "text": "def memoize(func):\n cache = {}\n def wrapper(*args, **kwargs):\n if(args, kwargs) not in cache:\n cache[(args, kwargs)] = func(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "499d61a92f3ee90a24b0e33dd517eb08", "score": "0.6892416", "text": "def memoize(func):\n @functools.wraps(func)\n def wrapper_memoize(*args, **kwargs):\n cache_key = args + tuple(kwargs.items())\n if cache_key not in wrapper_memoize.cache:\n wrapper_memoize.cache[cache_key] = func(*args, **kwargs)\n return wrapper_memoize.cache[cache_key]\n wrapper_memoize.cache = dict()\n return wrapper_memoize", "title": "" }, { "docid": "6ae3187913633968c9156be956bc8290", "score": "0.67603076", "text": "def memoized_with_single_argument(cache):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(arg):\n if arg in cache:\n return cache[arg]\n res = func(arg)\n cache[arg] = res\n return res\n return wrapper\n return decorator", "title": "" }, { "docid": "b7248556d61e880558ae4f16c7cf5807", "score": "0.675281", "text": "def memoize(func):\n cache = {}\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs): # pylint: disable=missing-docstring\n key = \"%r%r\" % (args, kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n return wrapper", "title": "" }, { "docid": "3ffa4ddb761d6af4d8872480ab649a73", "score": "0.671054", "text": "def memoize_generator(func):\n cache = Cache()\n\n def wrapped_func(*args, **kwargs):\n# params = (args, kwargs)\n params = (args, tuple(sorted(kwargs.items())))\n\n # Look if cached\n try:\n cached = cache[params]\n\n # Not cached, exec and store it\n except KeyError:\n cached = []\n\n for item in func(*args, **kwargs):\n cached.append(item)\n yield item\n\n cache[params] = cached\n\n # Cached, yield its items\n else:\n for item in cached:\n yield item\n\n return wrapped_func", "title": "" }, { "docid": "1534bb0192e6d63018d87df7421ab4c2", "score": "0.6701436", "text": "def cache(func):\n _cache = {}\n def inner(*args, **kwargs):\n key = (args, frozenset(kwargs.items()))\n if key in _cache:\n return _cache[key]\n _cache[key] = out = func(*args, **kwargs)\n return out\n return functools.wraps(func)(inner)", "title": "" }, { "docid": "a85a5a0b9342c6b9e9a7f47f6dca1925", "score": "0.6680324", "text": "def memoized(f):\n\n\tcache = f.cache = {}\n\n\t@wraps(f)\n\tdef wrapper(*args, **kwargs):\n\t\tkey = (args, frozenset(kwargs.items()))\n\n\t\tif key in cache:\n\t\t\treturn cache[key]\n\n\t\tresult = f(*args, **kwargs)\n\t\tcache[key] = result\n\n\t\treturn result\n\n\treturn wrapper", "title": "" }, { "docid": "8452aa3a0179591d4ab5a800982b440e", "score": "0.664469", "text": "def memoised(func, *args, **kw):\n if not hasattr(func, '_cache'):\n func._cache = dict()\n key = args\n if kw: # This is an unpleasant hack.\n key = (args, tuple(list(kw.items())))\n d = func._cache\n if key in d:\n return d[key]\n result = func(*args, **kw)\n d[key] = result\n return result", "title": "" }, { "docid": "ee981ad4dbd7c961fa674b89028f2a47", "score": "0.66319996", "text": "def cache(func):\n @functools.wraps(func)\n def wrapper_cache(*args, **kwargs):\n cache_key = args + tuple(kwargs.items())\n if cache_key not in wrapper_cache.cache:\n wrapper_cache.cache[cache_key] = func(*args, **kwargs)\n return wrapper_cache.cache[cache_key]\n wrapper_cache.cache = dict()\n return wrapper_cache", "title": "" }, { "docid": "ee981ad4dbd7c961fa674b89028f2a47", "score": "0.66319996", "text": "def cache(func):\n @functools.wraps(func)\n def wrapper_cache(*args, **kwargs):\n cache_key = args + tuple(kwargs.items())\n if cache_key not in wrapper_cache.cache:\n wrapper_cache.cache[cache_key] = func(*args, **kwargs)\n return wrapper_cache.cache[cache_key]\n wrapper_cache.cache = dict()\n return wrapper_cache", "title": "" }, { "docid": "ee981ad4dbd7c961fa674b89028f2a47", "score": "0.66319996", "text": "def cache(func):\n @functools.wraps(func)\n def wrapper_cache(*args, **kwargs):\n cache_key = args + tuple(kwargs.items())\n if cache_key not in wrapper_cache.cache:\n wrapper_cache.cache[cache_key] = func(*args, **kwargs)\n return wrapper_cache.cache[cache_key]\n wrapper_cache.cache = dict()\n return wrapper_cache", "title": "" }, { "docid": "0f7f2745488bc359d9e6e08cc60384dd", "score": "0.6628002", "text": "def memoize(func):\n cache = {}\n @functools.wraps(func)\n def memoizer(*args, **kwargs):\n key = str(args) + str(kwargs)\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n memoizer.cache = cache\n return memoizer", "title": "" }, { "docid": "a55b513a9b03c5ff665d2817a2cccba7", "score": "0.65665054", "text": "def cached(func):\n cache = {}\n\n @wraps(func)\n def wrapper(*args):\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n\n return wrapper", "title": "" }, { "docid": "de0fb7f260fb1176abf433da341930bc", "score": "0.6552604", "text": "def memoized(func):\n\n cache = {}\n\n @functools.wraps(func)\n def memoizer(*args):\n if not isinstance(args, collections.Hashable):\n return func(*args)\n if args not in cache:\n cache[args] = func(*args)\n return cache[args]\n return memoizer", "title": "" }, { "docid": "972f19280476d9af4a9cbe985416dfbb", "score": "0.6544149", "text": "def memoize(func):\n cache = {}\n\n @functools.wraps(func)\n def memo_wrapper(*args, **kwargs):\n nonlocal cache\n # Create unique identifier from args and kwargs passed to the function\n # Probably overkill for the use cases in which this is applied ... but hey ...\n # it's cool!!\n key = hash(args + tuple(kwargs.items()))\n if key not in cache:\n cache[key] = func(*args, **kwargs)\n return cache[key]\n\n return memo_wrapper", "title": "" }, { "docid": "faeb62ad0883ec943989689613d4780e", "score": "0.6538905", "text": "def __call__(self, *args, **kw):\n #=======================================================================\n # !WARNING CACHE OFFSWITCH!\n # return self.operation(*args, **kw)\n #=======================================================================\n\n # 1: Check whether we have forced recompute arguments:\n if len(self.force_kwargs) != 0:\n for k in self.force_kwargs:\n if k in kw and kw[k] is not None:\n return self.operation(*args, **kw)\n\n # 2: prepare_cache_id and get the unique self.id string for this call\n inputs = self.combine_inputs(args, kw, self.ignore_args)\n cache_id = self.prepare_cache_id(inputs)\n # 2: if anything is not cachable, we will just return the operation, without caching\n if reduce(lambda a, b: a or (not (isinstance(b, Observable) or b is None or isinstance(b,int))), inputs, False):\n# print 'WARNING: '+self.operation.__name__ + ' not cacheable!'\n# print [not (isinstance(b, Observable)) for b in inputs]\n return self.operation(*args, **kw)\n # 3&4: check whether this cache_id has been cached, then has it changed?\n try:\n if(self.inputs_changed[cache_id]):\n # 4: This happens, when elements have changed for this cache self.id\n self.inputs_changed[cache_id] = False\n self.cached_outputs[cache_id] = self.operation(*args, **kw)\n except KeyError:\n # 3: This is when we never saw this chache_id:\n self.ensure_cache_length(cache_id)\n self.add_to_cache(cache_id, inputs, self.operation(*args, **kw))\n except:\n self.reset()\n raise\n # 5: We have seen this cache_id and it is cached:\n return self.cached_outputs[cache_id]", "title": "" }, { "docid": "a3f5d03dbb32cf1235d62fb78d779da3", "score": "0.65253526", "text": "def memoize(func):\n func._result_cache = {} # pylint: disable-msg=W0212\n\n @wraps(func)\n def _memoized_func(*args, **kwargs):\n key = (args, tuple(sorted(kwargs.items())))\n if key in func._result_cache: # pylint: disable-msg=W0212\n return func._result_cache[key] # pylint: disable-msg=W0212\n else:\n result = func(*args, **kwargs)\n if isinstance(result, GeneratorType) or not isinstance(result, Hashable):\n raise TypeError(\"Can't memoize a generator or non-hashable object!\")\n func._result_cache[key] = result # pylint: disable-msg=W0212\n return result\n\n return _memoized_func", "title": "" }, { "docid": "27e28443747bd147bd8452ace0c74adc", "score": "0.6524906", "text": "def memoizeWrapper(*args, **kwargs):\n\n\t\t\tif kwargs:\n\t\t\t\tkey = args, frozenset(kwargs.iteritems())\n\t\t\telse:\n\t\t\t\tkey = args\n\n\t\t\tif key not in cache:\n\t\t\t\tcache[key] = object(*args, **kwargs)\n\n\t\t\treturn cache[key]", "title": "" }, { "docid": "773955790a579a8524d45d0847d9407b", "score": "0.6524903", "text": "def cached(func):\n\n func.cache = {}\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if kwargs:\n key = args, frozenset(kwargs.items())\n else:\n key = args\n if key not in func.cache:\n func.cache[key] = func(*args, **kwargs)\n return func.cache[key]\n\n return wrapper", "title": "" }, { "docid": "0b669b39d9dca80ca39695a3d21758ae", "score": "0.6476411", "text": "def cacheit(f):\n\n cache_dict = {}\n @wraps(f)\n def _cacheit(*args, **kwargs):\n if args:\n key = ''.join(map(str, args))\n if kwargs:\n key += ''.join(map(str, d.items()))\n if cache_dict.get(key):\n return cache_dict[key]\n else:\n cache_dict[key] = f(*args, **kwargs)\n return cache_dict[key]\n else:\n return f(*args, **kwargs)\n\n return _cacheit", "title": "" }, { "docid": "f551851d5a7ebfa05d92460f58b59f08", "score": "0.6474881", "text": "def memoizer(self, *args, **kwargs):\r\n if args not in self.mem:\r\n self.mem[args] = self.func(*args, **kwargs)\r\n return self.mem[args]", "title": "" }, { "docid": "585bde17be988c731c027316c4d7cd88", "score": "0.6469559", "text": "def memoize(f):\n\n cache = {}\n\n @functools.wraps(f)\n def memf(*args, **kwargs):\n fkwargs = frozenset(kwargs.iteritems())\n if (args, fkwargs) not in cache:\n cache[args, fkwargs] = f(*args, **kwargs)\n return cache[args, fkwargs]\n\n return memf", "title": "" }, { "docid": "911b57680ddad486ab5323acdc0cfb22", "score": "0.6462174", "text": "def auto_memoize(func):\n\n @wraps(func)\n def wrapper(*args):\n inst = args[0]\n inst._memoized_values = getattr(inst, '_memoized_values', {})\n key = (func, args[1:])\n if key not in inst._memoized_values:\n inst._memoized_values[key] = func(*args)\n return inst._memoized_values[key]\n return wrapper", "title": "" }, { "docid": "d31504ff3423e8fef357bf01239211bf", "score": "0.64500695", "text": "def memo(f):\n f.cache = {}\n @wraps(f)\n def wrapper(*args):\n try:\n return f.cache[args]\n except KeyError:\n f.cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return wrapper", "title": "" }, { "docid": "36dae54b1e9cb11a50bc8b70c12020cb", "score": "0.6434075", "text": "def memoize(f):\n cache = {}\n def decorated(*args):\n key = marshal.dumps(args)\n if key in cache:\n return cache[key]\n r = f(*args)\n cache[key] = r\n return r\n return decorated", "title": "" }, { "docid": "c02471b0ac55a26ccb5f6a381f675409", "score": "0.6412828", "text": "def memoize(fn):\n cache = {}\n def memoized(*args):\n if args in cache:\n return cache[args]\n result = fn(*args)\n cache[args] = result\n return result\n return memoized", "title": "" }, { "docid": "c02471b0ac55a26ccb5f6a381f675409", "score": "0.6412828", "text": "def memoize(fn):\n cache = {}\n def memoized(*args):\n if args in cache:\n return cache[args]\n result = fn(*args)\n cache[args] = result\n return result\n return memoized", "title": "" }, { "docid": "11b1d2193624d9ab60bde33cc1dd0037", "score": "0.6376908", "text": "def cache(func):\n func._cache = {}\n\n def wrapper(value):\n if value not in func._cache:\n result = func(value)\n try:\n func._cache[value] = result\n except TypeError: # Unhashable type\n return result\n return func._cache[value]\n return wrapper", "title": "" }, { "docid": "58660754b5d9e2226b573636dbd5d851", "score": "0.6360546", "text": "def memo(f):\n cache = {}\n def new_f(*args, **kwargs):\n ids = tuple(map(id, args)) + tuple(map(id, kwargs))\n if ids not in cache:\n cache[ids] = f(*args, **kwargs)\n return cache[ids]\n return new_f", "title": "" }, { "docid": "8457333c1985c1cd396fb1f1bea6ab13", "score": "0.6359405", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n result = f(*args)\n cache[args] = result\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(*args)\n _f.cache = cache\n return _f", "title": "" }, { "docid": "28ba955404574e0483ca082892c9b7f5", "score": "0.635339", "text": "def _memoize(key: CacheKeyType, fn: Any, *args: Any, **kwargs: Any) -> ArrayType:\n cache = get_sharing_cache()\n if key in cache:\n return cache[key]\n result = fn(*args, **kwargs)\n cache[key] = result\n return result", "title": "" }, { "docid": "4913976e55d2e5edf3f027825426135b", "score": "0.63241535", "text": "def _memoizeAux(func, cache, args, kwargs, limit):\n try:\n return cache[args, tuple(kwargs.items())]\n except KeyError:\n value = func(*args, **kwargs)\n if type(value) == str:\n value = intern(value)\n cache[args, tuple(kwargs.items())] = value\n if limit and len(cache) > limit:\n cache.popitem()\n return value", "title": "" }, { "docid": "51c4227eea8383f10516fe1ab5c991b3", "score": "0.62991333", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return (cache[args])\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "2b00b1ac558ad06d8d2aefb1888f8385", "score": "0.62741417", "text": "def wrapper(self,\n *args,\n **kwargs):\n\n self.__dict__[CLASS_CACHED_RESULTS] = {}\n\n return func(self,\n *args,\n **kwargs)", "title": "" }, { "docid": "ae16ac727df53292ab6576e365deeaa4", "score": "0.62670505", "text": "def memo_one(fn):\n cache = {}\n\n def wrapper(self, arg):\n value = cache.get(arg)\n if value is not None:\n return value\n value = fn(self, arg)\n cache[arg] = value\n return value\n return wrapper", "title": "" }, { "docid": "88248d00fe882d53ad81b05276417592", "score": "0.62646246", "text": "def memo(f):\n\tcache={}\n\tdef _f(*args):\n\t\ttry:\n\t\t\treturn cache[args]\n\t\texcept KeyError:\n\t\t\tcache[args]=result=f(*args)\n\t\t\treturn cache[args]\n\t\texcept TypeError:\n\t\t\treturn f(args)\n\treturn _f", "title": "" }, { "docid": "f41c78463f61d24283f7cc6d36f4d010", "score": "0.6256817", "text": "def __call__(self, graph, *args):\n if graph not in self.cache:\n self.cache[graph] = {}\n cache = self.cache[graph]\n if args not in cache:\n cache[args] = self.compute(graph, *args)\n return cache[args]", "title": "" }, { "docid": "c9f253118d90ce15a653c47e361a1e0b", "score": "0.62447894", "text": "def memoize(f):\n memo = {}\n def helper(*args):\n key = tuple(args)\n if key not in memo:\n memo[key] = f(*args)\n return memo[key]\n return helper", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.6228443", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.6228443", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.6228443", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.6228443", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "2b03c3ef99212c194aae73787c8224ea", "score": "0.6228443", "text": "def memo(f):\n cache = {}\n def _f(*args):\n try:\n return cache[args]\n except KeyError:\n cache[args] = result = f(*args)\n return result\n except TypeError:\n # some element of args can't be a dict key\n return f(args)\n return _f", "title": "" }, { "docid": "78ba9a2ebc43bcac4b2ff959398f1270", "score": "0.6216067", "text": "def memoize(unsafe=False):\n results = {}\n def wrapper(func):\n def inner_f(*n):\n if len(n) > 1 and not unsafe:\n raise Exception(\"Using memoizer with several arguments is\\\n not safe. Set `unsafe=True` if want to use anyway.\")\n else:\n _n = n[0]\n if _n not in results:\n results[_n] = func(*n)\n return results[_n]\n return inner_f\n return wrapper", "title": "" }, { "docid": "134515e09419babb7058088de30bb915", "score": "0.6203237", "text": "def memoize(function):\n memo = {}\n\n def wrapper(*args):\n if args in memo:\n return memo[args]\n else:\n rv = function(*args)\n memo[args] = rv\n return rv\n\n return wrapper", "title": "" }, { "docid": "0df7bd8024e26d2f8530591a4795cacf", "score": "0.6185709", "text": "def memoize(func) :\n\tdef check_square(*args) :\n\t\t\"\"\" Finds the square \"\"\"\n\t\tif not hasattr(func,'results') : \n\t\t\tfunc.results = {}\n\t\tif args not in func.results : \n\t\t\tfunc.results[args] = func(*args)\n\t\t\tprint('Not From Cache')\n\t\treturn func.results[args]\n\treturn check_square", "title": "" }, { "docid": "db801c29401bf27135de82c3f4d41a54", "score": "0.61559767", "text": "def memoizef(f):\n cache = {}\n def deco(*args):\n key = tuple(args)\n if key in cache:\n return cache[key]\n r = f(*args)\n cache[key] = r\n return r\n return deco", "title": "" }, { "docid": "36ce7345162949495ccf86aa0437e40a", "score": "0.6151545", "text": "def memo(f):\n cache = {} # a dictionary\n\n def _f(*args):\n try:\n return cache[args]\n except KeyError: # not computed previously\n result = f(*args)\n cache[args] = result\n return result\n except TypeError: # the args has an unexpected type such as a list (mutable is not allowed)\n return f(*args)\n _f.cache = cache # WHY ???\n print(\"id in memo %s\" %id(_f))\n return _f", "title": "" }, { "docid": "c652bc49bb79c2a2bfe3948e293e70cb", "score": "0.6149608", "text": "def memoizemethod(method):\n\n @wraps(method)\n def _wrapper(self, *args, **kwargs):\n # NOTE: a __dict__ check is performed here rather than using the\n # built-in hasattr function because hasattr will look up to an object's\n # class if the attr is not directly found in the object's dict. That's\n # bad for this if the class itself has a memoized classmethod for\n # example that has been called before the memoized instance method,\n # then the instance method will use the class's result cache, causing\n # its results to be globally stored rather than on a per instance\n # basis.\n if '_memoized_results' not in self.__dict__:\n self._memoized_results = {}\n memoized_results = self._memoized_results\n\n key = (method.__name__, args, tuple(sorted(kwargs.items())))\n if key in memoized_results:\n return memoized_results[key]\n else:\n try:\n result = method(self, *args, **kwargs)\n except KeyError as e:\n if '__wrapped__' in str(e):\n result = None # is this the right thing to do? happened during py3 conversion\n else:\n raise\n if isinstance(result, GeneratorType) or not isinstance(result, Hashable):\n raise TypeError(\"Can't memoize a generator or non-hashable object!\")\n return memoized_results.setdefault(key, result)\n\n return _wrapper", "title": "" }, { "docid": "02da29e0725ea2b7abda4459fed8a518", "score": "0.6147785", "text": "def memoize(f):\n cache = {}\n\n @functools.wraps(f)\n def memoizing_wrapper(*args):\n result = cache.get(args, _SENTINEL)\n if result is _SENTINEL:\n result = cache[args] = f(*args)\n return result\n\n return memoizing_wrapper", "title": "" }, { "docid": "ec042320a23f4e5a3c6b8ea7d355c3e3", "score": "0.6127601", "text": "def compute_once(func):\n\n @functools.wraps(func)\n def _compute_once(self, *args, **kwargs):\n cache_name = func.__name__ + '__cache__'\n if not hasattr(self, cache_name):\n # On first call, the cache is created and stored in self\n setattr(self, cache_name, {})\n cache = getattr(self, cache_name)\n key = _cache_key(args, kwargs)\n if key in cache:\n return cache[key]\n else:\n result = func(self, *args, **kwargs)\n cache[key] = result\n return result\n return _compute_once", "title": "" }, { "docid": "3847781159128899bc7ac838c40234ab", "score": "0.6120401", "text": "def cache(self, func, **cache_args):\n return self._mem(func)", "title": "" }, { "docid": "f6427e93119642fa92709444c980a8b9", "score": "0.6111115", "text": "def memoize(fn: Callable, key=eq) -> Callable:\n cache: List[Any] = []\n\n def memoized_fn(argument):\n cached_result = find(cache, lambda cacheItem: key(cacheItem[0], argument))\n if cached_result is not None:\n return cached_result[1]\n fn_result = fn(argument)\n cache.append((argument, fn_result))\n\n return fn_result\n\n return memoized_fn", "title": "" }, { "docid": "9e6d25c9d5af389afb6764d6a0ab5c97", "score": "0.6095858", "text": "def temporary_cache(func):\n cache = {}\n not_exist = object()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n result = cache.get(args, not_exist)\n if result is not_exist:\n result = func(*args, **kwargs)\n cache[args] = result\n return result\n\n return wrapper", "title": "" }, { "docid": "349682397263bc20b4b688913880e5bc", "score": "0.606438", "text": "def _cache_on_arguments(cache_name, expiration_time=None):\n def decorator(fn):\n key_generator = _atramhasis_key_generator(None, fn)\n\n @functools.wraps(fn)\n def wrapped(*args, **kwargs):\n self = args[0]\n key = key_generator(*args, **kwargs)\n return self.caches[cache_name].get_or_create(\n key, fn, expiration_time, _dont_cache_false, (args, kwargs)\n )\n return wrapped\n return decorator", "title": "" }, { "docid": "b018c0d23eedb6804243d6713d1f0cf8", "score": "0.6050181", "text": "def cache_result(function):\n # NOTE: We're cheating a little here, by using a mutable type (a list),\n # we're able to read and update the value from within in inline\n # wrapper method. If we used an immutable type, the assignment\n # would not work as we want.\n cache = []\n\n def wrapper(cls_instance):\n if not cache:\n cache.append(function(cls_instance))\n return cache[0]\n return wrapper", "title": "" }, { "docid": "01a17ffa3301478c6c2fb9cbfae81dfb", "score": "0.6045117", "text": "def cache(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n with _data_lock:\n try:\n self._cache\n except AttributeError:\n self._cache = {}\n\n # function call key adapted from http://stackoverflow.com/a/10220908/1236542\n key = (f,) + args + (_kwd_mark,) + tuple(sorted(kwargs.items()))\n if key in self._cache:\n return self._cache[key]\n else:\n from pyspark.rdd import RDD\n from pyspark.sql import DataFrame\n\n result = f(self, *args, **kwargs)\n self._cache[key] = result\n\n if isinstance(result, RDD):\n st = result.getStorageLevel()\n if not st.useDisk and not st.useMemory and not st.useOffHeap:\n raise ValueError('An RDD returned by a @cache function should be persisted with .cache() or .persist().')\n elif isinstance(result, DataFrame):\n st = result.storageLevel\n if not st.useDisk and not st.useMemory and not st.useOffHeap:\n raise ValueError('A DataFrame returned by a @cache function should be persisted with .cache() or .persist().')\n\n return result\n\n return wrapper", "title": "" }, { "docid": "22b5236223158e0ca7299d543fc16801", "score": "0.60080075", "text": "def memoize(f):\n class memodict(dict):\n \"\"\"\n Memoization decorator class using dictionary\n \"\"\"\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "title": "" }, { "docid": "71834f9ff867309a7a30e6f95afbf0a2", "score": "0.60037106", "text": "def arg_wrapper(func):\n cache_id = get_cache_id(name=func.__name__)\n\n timer = Timeout(seconds=timeout,\n start_in_an_expired_state=True) if timeout > 0 else None\n\n @functools.wraps(func)\n def wrapper(self,\n *args,\n **kwargs):\n\n \"\"\" Simple run time cache for the function result.\n\n :param self: As this is for class methods we have to add self.\n :param args: Args for the function.\n :param kwargs: Kwargs for the function. Additional function kwargs made available by decorator:\n refresh - setting to True will force this cached result to refresh itself.\n :return: The result of the function.\n \"\"\"\n\n # Check refresh param\n refresh = kwargs.get('refresh', False)\n\n # Ensure refresh is not still in kwargs when passed to function or cache_key generated\n if 'refresh' in kwargs:\n del kwargs['refresh']\n\n cache_key = get_cache_key(cache_id,\n *args,\n **kwargs)\n action = u'Caching'\n\n # Check whether we are using a timeout\n if timer is not None:\n # When the timer expires refresh the cache and restart the timer.\n if timer.expired:\n refresh = True\n timer.restart()\n\n if CLASS_CACHED_RESULTS not in self.__dict__:\n # Cached results dict does not exist (first cache on this instance)\n self.__dict__[CLASS_CACHED_RESULTS] = {}\n\n try:\n # Attempt to retrieve cached value\n cached_result = self.__dict__[CLASS_CACHED_RESULTS][cache_key]\n action = u'Refreshing'\n\n except KeyError:\n # Value has not been cached yet\n pass\n\n else:\n if not refresh:\n # logging.debug(u'Returning cached result for {id}'.format(id=cache_key))\n return cached_result\n\n logging.debug(u'{action} result for {id}'.format(action=action,\n id=cache_key))\n\n self.__dict__[CLASS_CACHED_RESULTS][cache_key] = func(self,\n *args,\n **kwargs)\n\n return self.__dict__[CLASS_CACHED_RESULTS][cache_key]\n\n # Return the decorated function\n return wrapper", "title": "" }, { "docid": "29efa8517f7757aecb1987dc78f1ec6d", "score": "0.5968973", "text": "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n return self[args]\n def __missing__(self, key):\n ret = self[key] = self.f(*key)\n return ret\n return memodict(f)", "title": "" }, { "docid": "bd342ee8ff752b658c55411e0990b77c", "score": "0.59676903", "text": "def memoize(self, func, key=memo_key):\n def cached_func(*args, **kwargs):\n k = (func, key(args, kwargs))\n\n result = self.get(k)\n if result is None:\n start = time.time()\n result = func(*args, **kwargs)\n end = time.time()\n\n nb = nbytes(result)\n\n self.put(k, result, cost(nb, end - start), nbytes=nb)\n return result\n return cached_func", "title": "" }, { "docid": "388ac600d7dbe12ea07115082648ffcc", "score": "0.5962657", "text": "def wrapper(self,\n *args,\n **kwargs):\n\n # Check refresh param\n refresh = kwargs.get('refresh', False)\n\n # Ensure refresh is not still in kwargs when passed to function or cache_key generated\n if 'refresh' in kwargs:\n del kwargs['refresh']\n\n cache_key = get_cache_key(cache_id,\n *args,\n **kwargs)\n action = u'Caching'\n\n # Check whether we are using a timeout\n if timer is not None:\n # When the timer expires refresh the cache and restart the timer.\n if timer.expired:\n refresh = True\n timer.restart()\n\n if CLASS_CACHED_RESULTS not in self.__dict__:\n # Cached results dict does not exist (first cache on this instance)\n self.__dict__[CLASS_CACHED_RESULTS] = {}\n\n try:\n # Attempt to retrieve cached value\n cached_result = self.__dict__[CLASS_CACHED_RESULTS][cache_key]\n action = u'Refreshing'\n\n except KeyError:\n # Value has not been cached yet\n pass\n\n else:\n if not refresh:\n # logging.debug(u'Returning cached result for {id}'.format(id=cache_key))\n return cached_result\n\n logging.debug(u'{action} result for {id}'.format(action=action,\n id=cache_key))\n\n self.__dict__[CLASS_CACHED_RESULTS][cache_key] = func(self,\n *args,\n **kwargs)\n\n return self.__dict__[CLASS_CACHED_RESULTS][cache_key]", "title": "" }, { "docid": "9b41ece0dfab515c9532a3f267b56f27", "score": "0.59384334", "text": "def memoize(f):\n class memodict(dict):\n def __init__(self, f):\n self.f = f\n def __call__(self, *args):\n\n key = str([str(x) for x in args ])\n if self.has_key( key ):\n return self[ key ]\n else:\n rval = self.f( *args )\n self[key] = rval\n return rval\n # return self[args] # XXX more elegant, but numpy arrays are unhashable\n# def __missing__(self, key):\n# ret = self[key] = self.f( *args )\n# return ret\n\n return memodict(f)", "title": "" }, { "docid": "2f89e1c678b6596990fcef8dc1d8f054", "score": "0.59004205", "text": "def memoDeco(func):\n\n cache = {}\n\n def mem(*args, **kwargs):\n\n # Check if we alreasy have a solution stored\n if args in cache:\n return cache[args]\n\n # If not, calculate it and add to cache\n else:\n\n # Calculate the result\n result = func(*args, **kwargs)\n\n # Store the result to cache\n cache[args] = result\n\n return result\n\n return mem", "title": "" }, { "docid": "d18a5c23a97702558773251e15c9d30a", "score": "0.5898249", "text": "def memoize(func):\n memo = {}\n\n @functools.wraps(func)\n def wrapper(key):\n if key in memo:\n return memo[key]\n else:\n memo[key] = func(key)\n return memo[key]\n return wrapper", "title": "" }, { "docid": "1a4f98c3bbaac5e945161177b271082a", "score": "0.5889989", "text": "def cache_arguments():\n yield from ()", "title": "" }, { "docid": "08b1a265c70053c02b760b6d0bc35c0e", "score": "0.588038", "text": "def cached_generator(function):\r\n cache_variable = '_cached_' + function.__name__\r\n @wraps(function)\r\n def function_wrapper(obj, *args, **kwargs):\r\n # values are cached in a list stored in the object\r\n try:\r\n for item in getattr(obj, cache_variable):\r\n yield item\r\n except AttributeError:\r\n setattr(obj, cache_variable, [])\r\n cache = getattr(obj, cache_variable)\r\n for item in function(obj, *args, **kwargs):\r\n cache.append(item)\r\n yield item\r\n return function_wrapper", "title": "" }, { "docid": "e5172a9ea3faa4e281bf96fc808d47ae", "score": "0.5875158", "text": "def apply(self, function, *args, **kwargs):\n from conx import Network\n results = []\n for (category, exp_name) in self.results:\n if exp_name in self.cache:\n results.append(function(category, self.cache[exp_name], *args, **kwargs))\n else:\n results.append(function(category, exp_name, *args, **kwargs))\n return results", "title": "" }, { "docid": "88cb10f92204a24755b55fffd990030f", "score": "0.5835588", "text": "def cached(func: Callable[..., Tensor]):\n @wraps(func)\n def cache_func(*args, **kwargs):\n rnd_state = RandContext()\n with torch.no_grad():\n reps_no_grad = func(*args, **kwargs)\n leaf_reps = reps_no_grad.detach().requires_grad_()\n\n @wraps(func)\n def forward_backward_func(cache_reps: Tensor):\n with rnd_state:\n reps = func(*args, **kwargs)\n surrogate = torch.dot(reps.flatten(), cache_reps.grad.flatten())\n surrogate.backward()\n return leaf_reps, forward_backward_func\n return cache_func", "title": "" }, { "docid": "98bb0ef672af320d21aec1d0641f54e1", "score": "0.5827723", "text": "def cachedmethod(cache, key=keys.methodkey, lock=None):\n\n def decorator(method):\n if lock is None:\n\n def wrapper(self, *args, **kwargs):\n c = cache(self)\n if c is None:\n return method(self, *args, **kwargs)\n k = key(self, *args, **kwargs)\n try:\n return c[k]\n except KeyError:\n pass # key not found\n v = method(self, *args, **kwargs)\n try:\n c[k] = v\n except ValueError:\n pass # value too large\n return v\n\n def clear(self):\n c = cache(self)\n if c is not None:\n c.clear()\n\n else:\n\n def wrapper(self, *args, **kwargs):\n c = cache(self)\n if c is None:\n return method(self, *args, **kwargs)\n k = key(self, *args, **kwargs)\n try:\n with lock(self):\n return c[k]\n except KeyError:\n pass # key not found\n v = method(self, *args, **kwargs)\n # in case of a race, prefer the item already in the cache\n try:\n with lock(self):\n return c.setdefault(k, v)\n except ValueError:\n return v # value too large\n\n def clear(self):\n c = cache(self)\n if c is not None:\n with lock(self):\n c.clear()\n\n wrapper.cache = cache\n wrapper.cache_key = key\n wrapper.cache_lock = lock\n wrapper.cache_clear = clear\n\n return functools.update_wrapper(wrapper, method)\n\n return decorator", "title": "" }, { "docid": "f31b19487aae2119cf21812c1b197a76", "score": "0.57930833", "text": "def __call__(self, func, instance, args, kwargs):\n if asyncio.iscoroutinefunction(func):\n raise TypeError('Function decorated with LyricalCache must not be async.')\n\n if self.func is None:\n self._initialize_decorated(func)\n\n key = self.form_key(*args, **kwargs)\n\n if key in self:\n value = self[key]\n self.move_to_end(key)\n return value\n\n value = self.func(*args, **kwargs)\n self[key] = value\n\n if len(self) > self.maxsize:\n self.popitem(last=False)\n\n return value", "title": "" }, { "docid": "df17138d3745a9b4f70e7dd6ee10df70", "score": "0.57880414", "text": "def memoizeDecorator(object):\n\n\t\t@functools.wraps(object)\n\t\tdef memoizeWrapper(*args, **kwargs):\n\t\t\t\"\"\"\n\t\t\tThis decorator is used for object memoization.\n\t\n\t\t\t:param \\*args: Arguments. ( \\* )\n\t\t\t:param \\*\\*kwargs: Keywords arguments. ( \\*\\* )\n\t\t\t:return: Object. ( Object )\n\t\t\t\"\"\"\n\n\t\t\tif kwargs:\n\t\t\t\tkey = args, frozenset(kwargs.iteritems())\n\t\t\telse:\n\t\t\t\tkey = args\n\n\t\t\tif key not in cache:\n\t\t\t\tcache[key] = object(*args, **kwargs)\n\n\t\t\treturn cache[key]\n\n\t\treturn memoizeWrapper", "title": "" }, { "docid": "5e90c958f077fb39772986a56b441a32", "score": "0.57781976", "text": "def memoize(f):\n memo = {}\n\n def helper(*x):\n if x not in memo:\n memo[x] = f(*x)\n return memo[x]\n\n return helper", "title": "" }, { "docid": "29415b8f8ed06335dac8e0d92a6987c9", "score": "0.57741416", "text": "def cache_on_instance(func):\n @wraps(func)\n def wrapper(instance, n):\n cache = getattr(instance, \"{}_cache\".format(func.__name__))\n if n not in cache:\n output = func(instance, n)\n cache[n] = output\n return output\n else:\n return func(instance, n)\n return wrapper", "title": "" }, { "docid": "f5b3f23239c4d8e7ee879495df043eb1", "score": "0.5742042", "text": "def memoize(func):\n\tclass memodict(dict):\n\t\tdef __missing__(self, key):\n\t\t\tret = self[key] = func(key)\n\t\t\treturn ret\n\treturn memodict().__getitem__", "title": "" }, { "docid": "7ddf55455999a499584e698534074d84", "score": "0.5739012", "text": "def memoize(cache=None):\n\n\tif cache is None:\n\t\tcache = {}\n\n\tdef memoizeDecorator(object):\n\t\t\"\"\"\n\t\tThis decorator is used for object memoization.\n\n\t\t:param object: Object to decorate. ( Object )\n\t\t:return: Object. ( Object )\n\t\t\"\"\"\n\n\t\t@functools.wraps(object)\n\t\tdef memoizeWrapper(*args, **kwargs):\n\t\t\t\"\"\"\n\t\t\tThis decorator is used for object memoization.\n\t\n\t\t\t:param \\*args: Arguments. ( \\* )\n\t\t\t:param \\*\\*kwargs: Keywords arguments. ( \\*\\* )\n\t\t\t:return: Object. ( Object )\n\t\t\t\"\"\"\n\n\t\t\tif kwargs:\n\t\t\t\tkey = args, frozenset(kwargs.iteritems())\n\t\t\telse:\n\t\t\t\tkey = args\n\n\t\t\tif key not in cache:\n\t\t\t\tcache[key] = object(*args, **kwargs)\n\n\t\t\treturn cache[key]\n\n\t\treturn memoizeWrapper\n\n\treturn memoizeDecorator", "title": "" }, { "docid": "f67b153fc2cc4398112a75020477b0ee", "score": "0.57257485", "text": "def memoize(func):\n return functools.lru_cache(maxsize=None)(func)", "title": "" }, { "docid": "784fd498f7c124330f5f70c837239f48", "score": "0.5720474", "text": "def cachedproperty(func):\n values = {}\n\n @property\n @functools.wraps(func)\n def wrapper(self):\n if self not in values:\n values[self] = func(self)\n return values[self]\n\n return wrapper", "title": "" }, { "docid": "f4d7082f10d50d4b30d13701d4f51339", "score": "0.57129425", "text": "def cached(function):\r\n cache_variable = '_cached_' + function.__name__\r\n @wraps(function)\r\n def function_wrapper(obj, *args, **kwargs):\r\n # values are cached in a dict stored in the object\r\n try:\r\n cache = getattr(obj, cache_variable)\r\n except AttributeError:\r\n cache = {}\r\n setattr(obj, cache_variable, cache)\r\n args_kwargs = args + tuple(kwargs.values())\r\n try:\r\n return cache[args_kwargs]\r\n except KeyError:\r\n cache_value = function(obj, *args, **kwargs)\r\n cache[args_kwargs] = cache_value\r\n return cache_value\r\n return function_wrapper", "title": "" }, { "docid": "74e936a187ee373d8a5b81fe140f911a", "score": "0.5709387", "text": "def cache_function(length):\n def _decorated(func):\n\n def _cache(*args, **kwargs):\n\n key = sha1(str(func.__module__) + str(func.__name__)).hexdigest()\n\n # Search in cache if it exists\n if cache.has_key(key):\n\n # Get value in cache\n value = cache.get(key)\n\n # If was locked\n if value == LOCK:\n # Try until unlock\n while value == LOCK:\n time.sleep(1)\n value = cache.get(key)\n\n # Return value of cache\n return value\n\n # If not exists in cache\n else:\n # Function can be called several times before it finishes and is put into the cache,\n # then lock it to others wait it finishes.\n cache.set(key, LOCK, length)\n\n # Execute method\n result = func(*args, **kwargs)\n\n # Set in cache the result of method\n cache.set(key, result, length)\n\n return result\n\n return _cache\n return _decorated", "title": "" }, { "docid": "cd2af268224b7996e65c00ff12f7ce39", "score": "0.57045585", "text": "def memoize(h):\n\n memo = {}\n\n def helper(state, *args):\n key = (str(state), tuple(args))\n if key not in memo:\n # *args allows for heuristic functions to have a different amount of variables\n memo[key] = h(state, *args)\n return memo[key]\n\n return helper", "title": "" }, { "docid": "18c4c9db2977a22f99fde5536b218334", "score": "0.5704546", "text": "def _get_cached(self, attr: str, func: Callable[..., _T], *args, **kwargs) -> _T:\n cache = self._cache\n if attr in cache:\n return TYPE_CAST(_T, cache[attr])\n ret = cache[attr] = func(*args, **kwargs)\n return ret", "title": "" }, { "docid": "f00954a20647ad6871845039c259089b", "score": "0.5698879", "text": "def memoize(keyformat, time=0, cache_null=False):\n def decorator(fxn):\n def wrapper(self, *args, **kwargs):\n key = keygen(keyformat, *args, **kwargs)\n data = cache.get(key)\n if data is not None:\n logging.info('From memcache: %s' % key)\n return data\n logging.info('Computed value: %s' % key)\n data = fxn(self, *args, **kwargs)\n if data or cache_null:\n cache.set(key, data, time)\n return data\n wrapper.__doc__ = fxn.__doc__\n wrapper.__dict__ = fxn.__dict__\n return wrapper\n return decorator", "title": "" }, { "docid": "202e0985c88eb07cca855becc732ed18", "score": "0.5693016", "text": "def memoize(f):\n proxy_name = '_' + f.__name__\n @functools.wraps(f)\n def wrapper(self):\n if hasattr(self, proxy_name):\n return getattr(self, proxy_name)\n setattr(self, proxy_name, f(self))\n return getattr(self, proxy_name)\n return wrapper", "title": "" }, { "docid": "7ea947e88ddf7f1fb664b7e0730c23ef", "score": "0.56766397", "text": "def lazy(func):\n def cached(self, *args):\n name = \"_\"+func.__name__\n try:\n return getattr(self, name)\n except AttributeError:\n value = func(self, *args)\n setattr(self, name, value)\n return value\n return cached", "title": "" }, { "docid": "895593139ec0a207277ddf1cef82d004", "score": "0.5668514", "text": "def memoize(obj):\n cache = obj.cache = {}\n\n @functools.wraps(obj)\n def memoizer(*args, **kwargs):\n key = args[1]\n if key not in cache:\n cache[key] = obj(*args, **kwargs)\n return cache[key]\n\n return memoizer", "title": "" }, { "docid": "dd5dd51acd8d8393a88553ae5a8a58d2", "score": "0.566781", "text": "def call(self, cache_key: str, func: Callable, call_args: Dict, result: Any):\n raise NotImplementedError", "title": "" }, { "docid": "562f96cbae1c6a8fddb27757ab1dd642", "score": "0.56559426", "text": "def get_cached(self, call_args) -> Union[Any, NotSet]:\n if self._skip_get:\n return NOTSET\n\n self._lazy_init()\n\n for action in self._actions:\n for key in self._get_action_keys(action, call_args):\n result = action.proxy.get(key, default=NOTSET)\n if result is not NOTSET:\n log.debug(\n \"cache hit: cache=%s key=%s function=%s\", action.proxy.cache_name, key, self._func.__name__\n )\n return result\n log.debug(\n \"cache miss: cache=%s key=%s function=%s\", action.proxy.cache_name, key, self._func.__name__\n )\n return NOTSET", "title": "" }, { "docid": "db30f43b844e74d5ffe71a0bb4f8a16c", "score": "0.56379074", "text": "def memoized(fn):\n def wrapper(*args, **kwargs):\n cached = client_cache.get(fn.__name__)\n if not cached:\n client_cache[fn.__name__] = fn(*args, **kwargs)\n return client_cache[fn.__name__]\n return wrapper", "title": "" }, { "docid": "4ccc0ca1dadd49912756f9d85890baa6", "score": "0.5631318", "text": "def _cache_key(cls, *args, **kwargs):\n return tuple(args) + tuple([(k, v) for k, v in kwargs.items()])", "title": "" }, { "docid": "96213aa192b1679caa24332f5f4f695c", "score": "0.56293947", "text": "def cache(func):\n memory = {}\n\n def helper(state: List[List[int]]):\n func_key = func.__name__\n if func_key not in memory:\n memory[func_key] = {}\n\n state_key = str(state)\n if state_key not in memory:\n memory[func_key][state_key] = func(state)\n\n return memory[func_key][state_key]\n return helper", "title": "" }, { "docid": "30eedad75ea8d119c7cb22ea789d04c2", "score": "0.56173277", "text": "def cache_call(self, method, *options):\n\t\toptions_hash = self.encode(options)\n\t\tif len(options_hash) > 20:\n\t\t\toptions_hash = hashlib.new('sha1', options_hash).digest()\n\t\toptions_hash = sqlite3.Binary(options_hash)\n\n\t\twith self.cache_lock:\n\t\t\tcursor = self.cache_db.cursor()\n\t\t\tcursor.execute('SELECT return_value FROM cache WHERE method = ? AND options_hash = ?', (method, options_hash))\n\t\t\treturn_value = cursor.fetchone()\n\t\tif return_value:\n\t\t\treturn_value = bytes(return_value[0])\n\t\t\treturn self.decode(return_value)\n\t\treturn_value = self.call(method, *options)\n\t\tstore_return_value = sqlite3.Binary(self.encode(return_value))\n\t\twith self.cache_lock:\n\t\t\tcursor = self.cache_db.cursor()\n\t\t\tcursor.execute('INSERT INTO cache (method, options_hash, return_value) VALUES (?, ?, ?)', (method, options_hash, store_return_value))\n\t\t\tself.cache_db.commit()\n\t\treturn return_value", "title": "" }, { "docid": "11ed101c0be5a32038aaa7cc6f0ce8b9", "score": "0.5617021", "text": "def __call__(self, *args, **kwargs):\n return self.compute(args, kwargs)", "title": "" } ]
a8dbefb486e82630481dbfd5d1fefc72
takes the players guess and adds it to list. Takes next guess of goes to resultpage if all guesses are made.
[ { "docid": "3342969e50a1336429a4eec64af52a5c", "score": "0.0", "text": "def next(self):\n suit = Suit(self.suit_var.get())\n card = Card( self.num_var.get(),suit)\n print(\"going to next page with card: \", card.card_id())\n self.gamestate.add_answer(card)\n if self.gamestate.all_answers_done():\n self.handle_show_result_view(self.gamestate)\n return\n self.destroy()\n self.initialize()\n self.pack()", "title": "" } ]
[ { "docid": "02495b3588d0cc6606ec2b6f7f7f6269", "score": "0.6550348", "text": "def startGuessGame(self, totalNumberOfGuessGames):\n i=0\n gameList = []\n print('The Guessing Game')\n while (i<totalNumberOfGuessGames):\n choice = ''\n #generate random number\n randomNumber = randint(0, len(StringDatabase.wordsList))\n #initialize current game object\n g = Game(StringDatabase.getNextWordForGame(randomNumber), '', 0, 0, 0, '----', False)\n #skeleton logic\n while(True):\n choice = ''\n print()\n print('curent Guess : ' + g.progressWord)\n print()\n print(Guess.mainMenu)\n choice = input(\"Enter your choice: \")\n \n if(choice.lower() == 'g'):\n g.attempted = True\n if(self.checkGuess(g)):\n g.status = Guess.successStatus\n break\n elif(choice.lower() == 't'):\n g.attempted = True\n g.status = Guess.failureStatus\n self.tellAnswer(g)\n break\n elif(choice.lower() == 'l'):\n g.attempted = True\n if(self.checkLetter(g)):\n g.status = Guess.successStatus\n break\n elif(choice.lower() == 'q'):\n actualGuessWord = list(g.word)\n #scoring\n if(g.attempted):\n score = 0\n for l in [i for i, letter in enumerate(g.progressWord) if letter == '-']:\n score = score + StringDatabase.getFrequencyOfLetter(actualGuessWord[l])\n g.score = g.score - score\n g.status = Guess.failureStatus\n break\n \n gameList.append(g)\n i = i+1\n if(choice.lower() == 'q'):\n break\n\n self.calculateScore(gameList)", "title": "" }, { "docid": "d84d1ba40349dcd3bfe22429c9898406", "score": "0.6192401", "text": "def get_players_guess(self):\r\n pass", "title": "" }, { "docid": "5afd829d99f74d0d4f3f302a519459ed", "score": "0.61251336", "text": "def try_guess(self, guess_letters):\n used = []\n used.extend(self.hits)\n used.extend(self.misses)\n for guess_letter in guess_letters:\n if guess_letter in used:\n continue\n if guess_letter in self.name.lower():\n self.hits.append(guess_letter)\n used.append(guess_letter)\n for i in range(len(self.name)):\n if self.name[i].lower() == guess_letter:\n self.guessed[i] = self.name[i]\n else:\n self.misses.append(guess_letter)\n used.append(guess_letter)\n if len(self.misses) == len(hangman_art.HANGMAN_PICS) - 1:\n return", "title": "" }, { "docid": "e7b16b9cc74e4072eb8065c691ad852c", "score": "0.61174023", "text": "def guess(self, guess):\n i_am_guessing_statement = \"Okay, I'm guessing {}.\"\n long_names = self._pretty_print_pattern(guess)\n print termcolors.OKGREEN + i_am_guessing_statement.format(guess) + \\\n termcolors.ENDC\n self.say(i_am_guessing_statement.format(long_names))\n\n self.past_guesses.append(guess)\n\n if self.autoplay_solution:\n result = self._compute_red_white(guess, self.autoplay_solution)\n red = result['red']\n white = result['white']\n print \"(My guess returned {} red, {} white.)\".format(red, white)\n\n else:\n red = self._prompt_for_integer(\"How many are the correct color in \" +\n \"the correct position (red): \")\n white = self._prompt_for_integer(\"How many are the correct color but \" +\n \"in the wrong position (white): \")\n\n if red == 4:\n self.say('Yay, I am a winner!')\n print termcolors.HEADER + \\\n \"Yay, I won in {0} guesses!\".format(len(self.past_guesses))\n print self.serialize() + ' ✔' + termcolors.ENDC\n sys.exit()\n\n self.filter_possibilities(guess, red, white)\n self.select_next_guess()", "title": "" }, { "docid": "9c53148e6ca749e88053428baf738887", "score": "0.6086226", "text": "def add_guess(self, temp_guess):\n allowed_guess_input = set(\"abcdefghijklmnopqrstuvwxyz\")\n if str(temp_guess).isdigit():\n messagebox.showinfo(\"Invalid Guess\", \"Please only guess letters!\")\n raise InvalidGuess\n else:\n a_guess = temp_guess.lower()\n if allowed_guess_input.issuperset(a_guess):\n self.guessed_list.append(a_guess)\n else:\n raise InvalidGuess", "title": "" }, { "docid": "e8afdaae6dcffe9176c25f9ce6da65c8", "score": "0.6030484", "text": "def update_guess_lists(letter_guessed, mystery_word, correct_guesses, incorrect_guesses):\n\n if letter_guessed in mystery_word:\n correct_guesses.append(letter_guessed)\n\n else:\n incorrect_guesses.append(letter_guessed)", "title": "" }, { "docid": "9ac6498b4151785115fd8b0400830038", "score": "0.57814497", "text": "def play_hangman(hangman_list, word_to_guess, play_count, incorrect_guesses, guesses_made_so_far):\n # Based on game rules if number of incorrect guesses reaches 0. Game is Over\n if incorrect_guesses == 0:\n print(\"You have used all your guesses. You lose.\\nTime to play again\")\n initialize_hangman_game()\n else:\n user_guess = input(\n \"Please enter a guess. You have {} blanks to fill and have {} incorrect guesses left\\n\".format(\n hangman_list.count(\"_\"), incorrect_guesses))\n match_index = []\n # User input validation so that only upper case values are compared\n if user_guess != \"\" and len(user_guess.strip()) == 1:\n user_guess = user_guess.upper()\n play_count = play_count + 1\n # Check if user input is somethign the user already guessed before else proceed with game\n # Also check if user has made aduplicate guess. Purpose is not penalise them\n if user_guess in hangman_list or user_guess in guesses_made_so_far :\n print(\"This has already been guessed. Try again\")\n guesses_made_so_far.add(user_guess)\n play_hangman(hangman_list, word_to_guess, play_count, incorrect_guesses, guesses_made_so_far)\n else:\n guesses_made_so_far.add(user_guess)\n if user_guess in word_to_guess:\n for index, word in enumerate(word_to_guess):\n if user_guess == word:\n # Index to fill the words into the hangman list that is shown to the user\n match_index.append(index)\n else:\n incorrect_guesses = incorrect_guesses - 1\n print(\"This word does not match any blank. Try again\\n\")\n play_hangman(hangman_list, word_to_guess, play_count, incorrect_guesses, guesses_made_so_far)\n # This loop replaces the blanks in the hangman list with the correct user guesses\n for i in match_index:\n hangman_list[i] = user_guess\n print(\"\".join(hangman_list))\n # If there is still a blank left in hangman_list, game continues else it ends\n if \"_\" in hangman_list:\n play_hangman(hangman_list, word_to_guess, play_count, incorrect_guesses, guesses_made_so_far)\n else:\n print(\"Game Over. Youn won. It took {} counts\\nTime to play again\".format(play_count))\n initialize_hangman_game()\n else:\n print(\"User input is valid. Please try again\\n\")\n play_hangman(hangman_list, word_to_guess, play_count, incorrect_guesses, guesses_made_so_far)", "title": "" }, { "docid": "aee0ea1a8f8c73b169d334bc8e426870", "score": "0.5699036", "text": "def recordResults(self, parent, controller):\n x = 0\n matchList = []\n for match in tourny.matchUps:\n if match.blue == None or match.blue == \"\":\n match.setWinner(0, \"by bye\", 4)\n elif self.frameStorage[x][1].get() == \"\":\n match.setWinner(2, \"by draw\", 1)\n else:\n matchText = self.frameStorage[x][1].get()\n matchList = matchText.split(',')\n print(matchList)\n if len(matchList) == 2:\n matchList.append(\"\")\n if matchList[0] == match.red:\n if matchList[1] == \"2-0\":\n match.setWinner(0, matchList[2], 3)\n elif matchList[1] == \"2-1\":\n match.setWinner(0, matchList[2], 2)\n else:\n print(\"hey1\")\n return 0\n elif matchList[0] == match.blue:\n if matchList[1] == \"2-0\":\n match.setWinner(1, matchList[2], 3)\n elif matchList[1] == \"2-1\":\n match.setWinner(1, matchList[2], 2)\n else:\n return 0\n else:\n print(matchList[0])\n return 0\n x += 1\n \n for match in tourny.matchUps:\n if match.winner == \"\":\n redL = tourny.findPlayer(controller, match.red)\n tourny.players[redL].addScore(1)\n blueL = tourny.findPlayer(controller, match.blue)\n tourny.players[blueL].addScore(1)\n else:\n loc = tourny.findPlayer(controller, match.winner)\n tourny.players[loc].addScore(match.points)\n tourny.matches.append(match.winnerStr())\n return 1", "title": "" }, { "docid": "c1bc2337360e4a3ae9737e9ff9f67e2d", "score": "0.5593114", "text": "def next_plays(self):\n if self.game_over:\n return []\n return []", "title": "" }, { "docid": "8ad60c0217bcb6d6d976f4d8a6050d8b", "score": "0.5578534", "text": "def take_turn(self, opponent):\n\n # --------- BEGIN YOUR CODE ----------\n\n # 1.) Prompt user for a guess. Valid input would be a string like c,4\n prompt = input('Enter a valid guess: ')\n prompt = prompt.split(',')\n # If the guess is not valid ask the user to enter another guess\n if len(prompt)<2 or len(prompt)>2:\n prompt = input('Enter another guess (a-j,0-9): ')\n prompt = prompt.split(',')\n while prompt[0] not in 'a,b,c,d,e,f,g,h,i,j'.split(',') or prompt[1] not in '0,1,2,3,4,5,6,7,8,9'.split(','):\n prompt = input('Enter another guess (a-j,0-9): ')\n prompt = prompt.split(',')\n prompt[1] = int(prompt[1])\n n = 0\n for i in 'a,b,c,d,e,f,g,h,i,j'.split(','):\n if prompt[0] == i:\n prompt[0] = n\n n+=1\n row = int(prompt[0])\n col = int(prompt[1])\n # 2.) Call opponent.guess() to check whether the guess is a hit or miss\n check = opponent.guess(row, col)\n # 3.) Update my_hits, my_misses, and sunk_ships accordingly\n if check[0] == 0:\n self._my_misses.append((row,col))\n if check[0] == 1:\n self._my_hits.append((row,col))\n if check[0] == 2:\n self._my_hits.append((row, col))\n self._sunk_ships.append(check[1])\n # 4.) If the sunk_ships array has 5 ships in it set self.complete to True\n if len(self._sunk_ships) == 5:\n self.complete = True\n # --------- END YOUR CODE ----------", "title": "" }, { "docid": "07b0d06d03622fbfdf0a014720fe6205", "score": "0.5520644", "text": "def incorrect_guess(self, bad_guesses, guess):\n\t\tself.clear()\n\t\tprint(\"That is not a letter in the secret word\")\n\t\tself.guesses += 1\n\t\tself.bad_guesses.append(guess)\n\t\treturn self.bad_guesses", "title": "" }, { "docid": "8b2751f63aeb1a9585b8b176717cb499", "score": "0.5500813", "text": "def generate_matches_other_rounds(self):\n for i in range(self.round_number, 4):\n self.round_number += 1\n added_list = []\n joined_player_list = []\n if self.round_list == [[], [], [], []]:\n joined_player_list = self.updated_joined_list\n else:\n joined_player_list = self.joined_player_list()\n added_list = sorted(joined_player_list,\n key=lambda x: ((x[1] + x[2] + x[3] + x[4]),\n x[0]['rank']),\n reverse=True)\n match_list = self.generate_pairs_other_rounds(added_list)\n self.add_round_time()\n for match in match_list:\n self.clean_console()\n self.round_view.show_round_number(self.round_number)\n new = self.match_controller.update_score(match, i)\n match[0][i+1] = new[0]\n match[1][i+1] = new[1]\n self.round_list[i].append(match)\n self.add_round_time()\n self.convert_tuple(self.round_number)\n updated_round = self.update_instance()\n self.save_to_db(updated_round, self.tournament_id)\n i = 0\n tournament_result = self.get_tournament_result()\n self.clean_rounds()\n return tournament_result", "title": "" }, { "docid": "48855b596081390c1afdecb69bb8df09", "score": "0.5496794", "text": "def restGuess():\r\n # Give morty 5 guesses (count is now 2, counting the first guess)\r\n for guessesTaken in range(2, 6): \r\n print('*burrrp* guess again Moertyy')\r\n # print('DEBUG: Rick chose episode ' + str(rickNum))\r\n print()\r\n try:\r\n guess = int(input('Guess a number: '))\r\n print()\r\n if guess == rickNum:\r\n break \r\n elif guess >= minNum and guess <= maxNum:\r\n if guess > rickNum:\r\n print('Little too high Mortyy, just like grandpa!')\r\n elif guess < rickNum:\r\n print('Tooo looowww brooo...')\r\n elif guess > maxNum:\r\n print('I just said there\\'re only ' + str(maxNum) + ' episodes...you deserve to lose')\r\n elif guess < 0:\r\n print('I\\'m not asking you about your IQ Morty, pick a positive number...')\r\n else:\r\n print('Nice, wasting a precious guess on a goose egg, real smooth...')\r\n except ValueError:\r\n print('Yeah, that\\'s not a number you little turd...')\r\n\r\n if guess == rickNum:\r\n print('You guessed it Morty, we\\'re watching episode ' + str(rickNum) + '!')\r\n if guessesTaken == 5: # condition for last minute win\r\n print('On the last guess too, close one! I was about to verbally abuse the \\n' +\r\n 'shit out of you if you didn\\'t guess that haha')\r\n print()\r\n else:\r\n print(str(guessesTaken) + ' guesses, not bad Morty!')\r\n print()\r\n else:\r\n print('Your feeble capacity to guess an episode from a finite number set \\n' +\r\n 'is making me SOBER, Morty! I was thinking of episode ' + str(rickNum) + '...')\r\n print()", "title": "" }, { "docid": "5f04c93d6c62d0d34819b0b22ce7bd6e", "score": "0.54590654", "text": "def get_guesses(self):\n\n guess_collection = []\n\n for guess in self.guesses:\n # Create a new GuessForm representation of this Guess\n guessForm = GuessForm()\n guessForm.guess = guess['guess']\n guessForm.miss = guess['miss']\n guessForm.message = guess['message']\n guessForm.state = guess['state']\n\n guess_collection.append(guess)\n\n return guess_collection", "title": "" }, { "docid": "2c6b35f818a88ba6294089f6e640a001", "score": "0.54588425", "text": "def get_players_list(self):\n added_list = []\n for rounds in self.round_list:\n for match in rounds:\n for player in match:\n i = 0\n player_to_add = player\n for matches in rounds[i]:\n for player in matches:\n self.add_score_for_player(player_to_add, player)\n i += 1\n return added_list", "title": "" }, { "docid": "a19d843ec379eb24ba0ee7e7e043d847", "score": "0.54328376", "text": "def transfer_finished_players(self):\n new_players = []\n for player in self.players:\n if player.get_card_count() == 0:\n self.lobby.append(player)\n else:\n new_players.append(player)\n self.players = new_players", "title": "" }, { "docid": "1b3751e9c99d17f1dcdbd93d3aa932bd", "score": "0.5411613", "text": "def start(self):\n if (self.players):\n self.word = get_random_words()\n\n # temp is 0 if turn < players. Then use temp at this case. Else use turn mod players. Turn = realistic turn.\n guesser_index = self.turn % len(self.players)\n self.guesser = self.players[guesser_index - 1]\n\n self.turn += 1\n\n self.hints = []\n self.trys = []", "title": "" }, { "docid": "9181f9b6b499d46a9dc787c0ef5297de", "score": "0.5398353", "text": "def guessed_letter(letter):\n Guessed_letters.add(letter)", "title": "" }, { "docid": "53f6c54429cc96c90093b5ed18905a9d", "score": "0.5346756", "text": "def check_guess(self, temp_guess):\n matched_letters = 0\n allowed_guess_input = set(\"abcdefghijklmnopqrstuvwxyz\")\n if str(temp_guess).isdigit():\n messagebox.showinfo(\"Invalid Guess\", \"Please guess only letters!\")\n raise InvalidGuess\n else:\n a_guess = temp_guess.lower()\n if allowed_guess_input.issuperset(a_guess):\n for x in range(0, len(letters_for_game.listed_magic_word)):\n if a_guess == letters_for_game.listed_magic_word[x]:\n letters_for_game.blanks_list[x] = a_guess\n matched_letters += 1\n if letters_for_game.blanks_list == letters_for_game.listed_magic_word:\n messagebox.showinfo(\"You Win!\", \"CONGRATULATIONS, YOU WIN!\\nYou have correctly guessed the \"\n \"word '\" + letters_for_game.magic_word +\n \"'!\\nThe program will now close, thank you for playing.\")\n print(\"Thanks for playing!\")\n exit_button.invoke()\n exit()\n if matched_letters > 0:\n messagebox.showinfo(\"Correct!\", \"Yes! There is \" + str(matched_letters) + \" \" + a_guess)\n tk.Label(m, text=letters_for_game.guessed_list).grid(row=5, column=1)\n tk.Label(m, text=letters_for_game.blanks_list).grid(row=2, column=1)\n tk.Label(m, text=str(letters_for_game.num_of_guesses)).grid(row=6, column=1)\n else:\n messagebox.showinfo(\"Sorry!\", \"Unfortunately there are no '\" + a_guess + \"' in the word.\")\n self.num_of_guesses -= 1\n tk.Label(m, text=letters_for_game.guessed_list).grid(row=5, column=1)\n tk.Label(m, text=letters_for_game.blanks_list).grid(row=2, column=1)\n tk.Label(m, text=str(letters_for_game.num_of_guesses)).grid(row=6, column=1)\n if self.num_of_guesses == 0:\n messagebox.showinfo(\"You Lose!\",\n \"Uh oh! You are out of guesses and have lost!\\nThe magic word was '\"\n + letters_for_game.magic_word +\n \"'\\nThe program will now close, thank you for playing.\")\n print(\"Thanks for playing!\")\n exit_button.invoke()", "title": "" }, { "docid": "80346c2f43cc499e13698289be8a10f0", "score": "0.53114355", "text": "def get_turn(self):\n return self._guess_list", "title": "" }, { "docid": "21c7e8d1f3d5dfb523e9e81fd3587540", "score": "0.53037083", "text": "def select_next_guess(self):\n\n print(\"{0} possible solutions remain...\".format(len(self.possibilities)))\n start_time = time.time()\n\n self.say(\"Now I'm thinking\")\n\n if len(self.possibilities) == 0:\n print termcolors.FAIL + 'Oh no! No solutions found. Did ' + \\\n 'you perhaps make an error in inputting the red/white ' + \\\n 'counts?\\n' + self.serialize() + ' ❌' + termcolors.ENDC\n sys.exit()\n elif len(self.possibilities) == 1:\n print \"Only one possibility left, so...\"\n return self.guess(self.possibilities[0])\n elif len(self.possibilities) == 2:\n print \"There are only two possibilities left, so I'll just guess.\"\n return self.guess(random.choice(self.possibilities))\n\n final_scores = {}\n\n def score_guess(guess):\n if guess in self.past_guesses:\n return\n\n guess_eliminates = None\n\n for red, white in RED_WHITE_COMBOS:\n combo_eliminates = 0\n for possibility in self.possibilities:\n if not self._check_red_white(guess, possibility, red, white):\n combo_eliminates += 1\n\n if guess_eliminates is None:\n guess_eliminates = combo_eliminates\n elif combo_eliminates < guess_eliminates:\n guess_eliminates = combo_eliminates\n\n final_scores[guess] = guess_eliminates\n\n for guess in self.all_possibilities:\n score_guess(guess)\n\n final_guess, eliminates = max(final_scores.iteritems(),\n key=operator.itemgetter(1))\n\n seconds_elapsed = round(time.time() - start_time, 2)\n\n print((\"My best option is guessing {}, to eliminate at worst {}/{} \" +\n \"possibilties ({}s elapsed).\").format(final_guess, eliminates,\n len(self.possibilities), seconds_elapsed))\n\n self.guess(final_guess)", "title": "" }, { "docid": "70b0b156d3ec5fd87978a74ac4bff3b7", "score": "0.5295775", "text": "def __players(self):\n for player in self.players.players:\n log.debug(' player %s turn' % player.nume)\n while True:\n answer = ' '\n while answer != 'h' and answer != 's':\n answer = input('%s : (h)it or (s)tand? ' %\n player.display_name()).lower()\n # hit\n if answer == 'h':\n if self.hit(player) == -1:\n break\n continue\n # stand\n if answer == 's':\n self.stand(player)\n break", "title": "" }, { "docid": "133e44c0574cf7f371872d91a24781d2", "score": "0.5280454", "text": "def findMatchUps(self, controller = None):\n self.sortPlayers()\n x=0\n y=1\n result = []\n self.matchUps = []\n while y < len(self.players):\n self.matchUps.append(Match(str(self.players[x]), str(self.players[y]), self.bout, self.currentRound))\n x += 2\n y += 2\n self.bout += 1\n if (len(self.players) % 2) == 1:\n self.matchUps.append(Match(str(self.players[-1]), \"\", self.bout, self.currentRound))\n self.bout += 1", "title": "" }, { "docid": "897d34c7d72a6eab5632bbccc0d4b472", "score": "0.5278724", "text": "def check_guess(guess, hidden_word, vis_word, inc_guesses):\n vis_word = list(vis_word)\n hid_word = list(hidden_word)\n \n if guess not in inc_guesses and guess not in vis_word:\n # has not been attempted/guessed before\n if guess in set(hid_word):\n for i, elem in enumerate(hid_word):\n if guess == elem:\n vis_word[i] = guess\n else:\n inc_guesses.append(guess)\n \n return ''.join(vis_word), inc_guesses", "title": "" }, { "docid": "f4720aff4b8c09842763722cac865432", "score": "0.52770144", "text": "def play_game(self):\n while True:\n letter = input('Guess a letter: ').lower()\n if len(letter) > 1:\n print('Please only guess one letter at a time')\n continue\n elif letter in self.guessed:\n print('You have already guessed that letter')\n continue\n elif letter not in ASCII:\n print('please guess a letter')\n continue\n elif letter not in self.word_set:\n self.graphics = next(self.graphics_gen)\n self.guessed.add(letter)\n self.game_board = self.display_game_board()\n self.print_game_board()\n self.check_end_of_game()", "title": "" }, { "docid": "16c3e24658b525d0d7ce756f1fdbac9e", "score": "0.5270503", "text": "def scrape_player_list(self):\r\n scraped_players = TCS_Scraper.scrape_players(self.url)\r\n\r\n player_list = []\r\n for battle_tag in scraped_players:\r\n player_list.append(Player(battle_tag))\r\n self.players = player_list", "title": "" }, { "docid": "407e02e80567dda8533d96589e92c928", "score": "0.52682817", "text": "def play_game(word_list):\n number_of_hands = int(input(\"Enter total number of hands: \"))\n total_score = 0\n can_substitute_hand = True\n can_replay_hand = True\n \n #Game Loop\n for i in range(number_of_hands):\n hand = deal_hand(HAND_SIZE) #Current hand\n \n if can_substitute_hand: #If hand can be substituted\n display_hand(hand)\n substitute_hand_input = input(\"Would you like to substitute a letter? \")\n if substitute_hand_input.lower() == \"yes\": #Hand has been substituted\n can_substitute_hand = False\n letter = input(\"Which letter would you like to replace: \")\n hand = substitute_hand(hand, letter)\n \n #Starts the current Round \n round_score = play_hand(hand, word_list)\n print_dashes()\n \n if can_replay_hand: #Checks if can and if the player wants to replay a round\n replay_hand_input = input(\"Would you like to replay the hand? \")\n if replay_hand_input.lower() == \"yes\":\n can_replay_hand = False\n replay_hand_score = play_hand(hand, word_list)\n if replay_hand_score > round_score: #Picks the biggest score out of the 2 rounds with same hand\n round_score = replay_hand_score\n \n \n total_score += round_score #Updates total score and moves to next round\n \n #\n #Game Loop has ended\n #\n print_dashes()\n print(\"Total score over all hands: {}\".format(total_score))", "title": "" }, { "docid": "9b7a7eba98d8350d78e61cc5bb9b893e", "score": "0.526002", "text": "def guess():\n allowed_guess_input = set(\"abcdefghijklmnopqrstuvwxyz\")\n letter_guessed = new_guess.get().lower()\n if allowed_guess_input.issuperset(letter_guessed):\n if len(letter_guessed) <= 0 or len(letter_guessed) > 1:\n messagebox.showinfo(\"Invalid Guess\", \"Please guess 1 letter at a time!\")\n raise InvalidGuess(\"Please only guess 1 letter!\")\n else:\n if letter_guessed not in letters_for_game.guessed_list:\n letters_for_game.add_guess(letter_guessed)\n letters_for_game.check_guess(letter_guessed)\n else:\n messagebox.showinfo(\"Sorry!\", \"This letter has already been guessed, try guessing another letter!\")\n else:\n messagebox.showinfo(\"Invalid Guess\", \"Please guess only letters!\")\n raise InvalidGuess(\"Please only guess letters!\")\n new_guess.delete(0, 1)", "title": "" }, { "docid": "c582dae0c96ff0529a72e9283382859a", "score": "0.5219919", "text": "def play_game(screen, game_data, results_list):\n\n done = False\n clock = pygame.time.Clock()\n question_index = 0\n advance_question = False\n response = \"\"\n score = 0\n score_update = 0\n question_state = 0\n\n while done == False:\n\n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n return \"quit\",score\n # done = True # Flag that we are done so we exit this loop\n\n elif event.type == pygame.KEYUP:\n if event.key != pygame.K_RETURN:\n response = chr(event.key)\n elif response != \"\":\n if game_data[question_index][1].lower() == response.lower():\n score_update = 1\n advance_question = True\n\n\n if advance_question == True:\n # update the score and reset things for the next question\n score = score + score_update\n question_index = question_index + 1\n response = \"\"\n advance_question = False\n score_update = 0\n\n elif advance_question != True:\n show_question_response(screen,game_data[question_index],response)\n\n if question_index >= len(game_data):\n done = True\n\n clock.tick(20) #limit to 20 frames per second\n pygame.display.flip() #update screen with what's drawn\n else:\n return \"final\",score", "title": "" }, { "docid": "21f6c9396c8432db2e537e26ebcb721b", "score": "0.52050745", "text": "def get_players():\n p_count = 0 # control the players print list\n hinches = \"\" # will be the argument validated\n values = [] # list of players\n\n if len(sys.argv) != 2:\n print(\"Must pass height parameter. Try again\")\n exit()\n\n try:\n hinches = int(sys.argv[1])\n except ValueError:\n print(\"Error: Must be a number. Try again\")\n exit()\n\n res = req.get('https://mach-eight.uc.r.appspot.com/')\n if res.status_code != 200:\n print(\"Error {}\".format(res.status_code))\n exit()\n\n hinches = str(hinches)\n if res.text.count(hinches) == 0: # test fastly if matches with a player\n print(\"No matches found\")\n exit()\n\n try:\n values = res.json()[\"values\"]\n except req.exceptions.JSONDecodeError:\n print(\"Error: Can't convert response to JSON\")\n exit()\n\n print(\"> app {}\\n\".format(hinches))\n for p in values: # loop by all players\n if p[\"h_in\"] == hinches:\n st = \"{} {}\".format(p[\"first_name\"], p[\"last_name\"])\n if p_count % 2 == 0:\n print(\"- {:<15}\".format(st), end='\\t')\n else:\n print(st)\n\n p_count += 1\n if p_count == res.text.count(hinches):\n if p_count % 2 != 0:\n print()\n break # ends the loop if gets all the players", "title": "" }, { "docid": "cdd5e6f79fd6b92a3bf897b399319c2c", "score": "0.52038604", "text": "def game_loop():\n mylist = [' ','O',' '] #default values\n \n #shuffle the list first\n mixedup_list = shuffle_list(mylist)\n game_is_active = True\n\n while game_is_active: #Actual game logic\n inputed_guess = player_guess() #gets player's guess\n\n if check_guess(mixedup_list, inputed_guess): #checks, and if it's correct, it should break out of the while loop\n game_is_active = False\n else:\n pass\n #reviewing my code: I could just set the game_is_active = check_guess(mixedup_list, inputed_guess) instead", "title": "" }, { "docid": "34af82c879cce27de5f2b56ea4825ba4", "score": "0.5201297", "text": "def game(word, list_of_guessed_letters):\r\n number_of_tries = 1\r\n show_hidden_word(word, list_of_guessed_letters)\r\n while not check_win(word, list_of_guessed_letters):\r\n letter = input(\"\\nGuess a letter: \")\r\n if try_update_letter_guessed(letter, list_of_guessed_letters):\r\n if (number_of_tries < MAX_TRIES) and letter.lower() not in word:\r\n number_of_tries += 1\r\n print(\":(\")\r\n print_hangman(number_of_tries)\r\n if number_of_tries == MAX_TRIES:\r\n print(\"\\nGAME OVER - YOU LOSE\\n\")\r\n return\r\n print(\"\\nreminder of the secret word pattern:\")\r\n show_hidden_word(word, list_of_guessed_letters)\r\n print(\"\\nGAME OVER - YOU ARE A WINNER!\\n\")", "title": "" }, { "docid": "7cc00e01f623c485b29d0b95cc3da95d", "score": "0.5198512", "text": "def _get_inputs(self, guess):\n \n # get next player's move\n player = self._roster.get_current()\n self._console.write(f\"{player.get_name()}'s turn:\")\n guess = self._console.read_number(\"Guess a number bewtween 1000 - 9999: \")", "title": "" }, { "docid": "429da83a2a8dfceded356813c0f33d71", "score": "0.51633936", "text": "def play_game(word, guesses, completed_word):\n playerGuess = guess(guesses)\n if check_guess(playerGuess, word):\n for index in range(len(completed_word)):\n if word[index] == playerGuess:\n completed_word[index] = playerGuess\n check_completion(completed_word)\n else:\n print(\"Sorry \\'\" + playerGuess + \"\\' is not in the word\")\n guesses.append(playerGuess)\n print(\"Guesses: \" + str(guesses))\n print(''.join(completed_word))", "title": "" }, { "docid": "820e6c1ec5d1115ef2f1b613dd18ff4a", "score": "0.5159401", "text": "def finish(self):\r\n print \"user used \" + str(self.num_guesses) + \" guesses.\"", "title": "" }, { "docid": "83c7f5fccb2273e57842145eb528d588", "score": "0.51508003", "text": "def guess(self):\n guess = input(\"please guess a letter or word:\\n\").upper()\n if len(guess) == 1 and guess.isalpha():\n if guess in self.guessed_letters:\n print(\"you already guessed the letter \" + guess)\n elif guess not in self.word:\n print(guess + \" is not in the word.\")\n self.trials -= 1\n self.guessed_letters.append(guess)\n else:\n print(\"good job,\" + guess + \"is in the word\")\n self.guessed_letters.append(guess)\n word_as_list = list(self.word_completion)\n indices = [\n i for i, letter in enumerate(self.word) if letter == guess\n ]\n for index in indices:\n word_as_list[index] = guess\n self.word_completion = \"\".join(word_as_list)\n if \"_\" not in self.word_completion:\n self.guessed = True\n elif len(guess) > 0 and guess.isalpha():\n if guess in self.guessed_words:\n print(\"You already guessed the word \" + guess)\n elif guess != self.word:\n print(guess + \" is not the word.\")\n self.trials -= 1\n self.guessed_words.append(guess)\n else:\n self.guessed = True\n self.word_completion = self.word\n else:\n print(\"not a valid guess.\")\n print(DISPLAY_HANGMAN[self.trials])\n print(self.word_completion)\n print(\"\\n\")", "title": "" }, { "docid": "17dd0fd04a49d1531fba70464f92a74c", "score": "0.5147255", "text": "def buddyfinder():\n\n # Accounts for any cards that were \"left over\" that might screw up later\n db.execute(\"UPDATE matches SET currentcard = 0\")\n\n # Select randomly one non seen match and display information\n person = db.execute(\"SELECT id_matchee FROM matches WHERE id_matcher = :user AND matchstatusmatcher = 0\",\n user=session[\"user_id\"])\n other = db.execute(\"SELECT id_matcher FROM matches WHERE id_matchee = :user AND matchstatusmatchee = 0\",\n user=session[\"user_id\"])\n\n selection = 0\n other_is_matchee = False\n\n if len(person) > 0:\n selection = person[0][\"id_matchee\"]\n other_is_matchee = True\n\n elif len(other) > 0:\n selection = other[0][\"id_matcher\"]\n\n # If there are no users you can match with, you get a special version of the apology page\n else:\n return render_template(\"apology.html\", other=True, message=\"There are no other students in your class. Wait a few days for more users to join!\", route=\"/dashboard\")\n\n cardinfo = db.execute(\"SELECT * FROM users WHERE user_id = :selection\", selection=selection)\n\n # Again, need to account for both possibilities\n if other_is_matchee:\n db.execute(\"UPDATE matches SET currentcard = 1 WHERE id_matchee = :selection AND id_matcher = :user\",\n selection=selection, user=session[\"user_id\"])\n\n else:\n db.execute(\"UPDATE matches SET currentcard = 1 WHERE id_matcher = :selection AND id_matchee = :user\",\n selection=selection, user=session[\"user_id\"])\n\n # Render profile with the info\n return render_template(\"buddyfinder.html\", cardinfo=cardinfo[0])", "title": "" }, { "docid": "21d5327d6f99b6b94604ff23711e683f", "score": "0.513434", "text": "def guess(self, letter):\n self.guesses.add(letter.lower())\n\n if letter.lower() not in self.phrase.lower():\n self.tries += 1", "title": "" }, { "docid": "ca9bcc93b0bb9864c426f0f34ea34054", "score": "0.5109813", "text": "def players_turn(player, dealer, deck):\n # creating the list of all human player's Blackjack hands left to play:\n hand_list = [player.hand]\n # variable score_list will be used to store the list of final scores of all hands played:\n score_list = []\n # variable natural_list will be used to store the list of booleans showing if there was a Natural Blackjack for each\n # hand played:\n natural_list = []\n # variable natural_list will be used to store the list of names of corresponding wagers for each hand played:\n wager_list = []\n # variable sr_requested will be used to store the boolean showing whether or not surrender is requested\n sr_requested = False\n\n # while loop checking if there are any hands left to play:\n while len(hand_list) > 0:\n # removing a hand to be played from the list:\n hand = hand_list.pop(0)\n\n # clearing the screen:\n print('\\n' * 100)\n # showing dealer's cards one face up, one face down:\n dealer.hand.display_one_face_down('Dealer')\n # showing human player's cards face up:\n hand.display_face_up(player.name)\n\n # determining which wager corresponds to the hand being played:\n if hand.split_wager_number == 0:\n wager_name = 'Main Wager'\n else:\n wager_name = f'Split Wager {hand.split_wager_number}'\n\n # checking for the natural Blackjack:\n if hand.score == 21:\n natural = True\n print('\\nBLACKJACK!')\n press_enter_to_continue()\n else:\n natural = False\n\n # checking if surrender is possible on this hand:\n if hand.type == 'Normal' and dealer.hand.score != 21 and player.wagers[wager_name]['Amount'] > 1:\n\n # checking if player has been dealt a 14, 15, 16, or 17:\n if surrender_offered(hand, dealer.hand.cards[0].rank):\n sr_requested = surrender_requested()\n\n if sr_requested:\n player.surrender()\n break\n\n # checking if splitting is allowed for this hand:\n if hand.type == 'Normal' or hand.split_hand_number in ['1', '2']:\n\n # checking if there is a pair of same-rank cards and if human player has enough chips for a split:\n if hand.cards[0].rank == hand.cards[1].rank and player.chips['Amount'] >= \\\n player.wagers[wager_name]['Amount']:\n\n # displaying all human player's chips and wagers:\n print('\\n')\n player.display_chips('Chips')\n print('\\n')\n player.display_chips(*player.wagers.keys())\n\n # asking if human player wants to split the pair:\n if split_requested():\n\n # splitting:\n # determining the new split wager numbers:\n if hand.type == 'Normal':\n new_split_wager_numbers = (0, 1)\n elif hand.type == 'Split Hand' and hand.split_hand_number == '1':\n new_split_wager_numbers = (0, 2)\n elif hand.type == 'Split Hand' and hand.split_hand_number == '2':\n\n if len(player.split_hands) == 2:\n new_split_wager_numbers = (1, 2)\n elif len(player.split_hands) == 4:\n new_split_wager_numbers = (1, 3)\n\n # splitting the wager:\n player.double_wager('Split', *new_split_wager_numbers)\n # creating two new split hands:\n player.start_split_hand(NEW_SPLIT_HAND_NUMBERS[hand.split_hand_number][0],\n new_split_wager_numbers[0])\n player.start_split_hand(NEW_SPLIT_HAND_NUMBERS[hand.split_hand_number][1],\n new_split_wager_numbers[1])\n # splitting the pair:\n split_card1, split_card2 = hand.split_pair()\n\n # adding one of the split cards to each split hand:\n player.split_hands[-2].add_card_from_split(split_card1)\n player.split_hands[-1].add_card_from_split(split_card2)\n\n # adding one card from deck to each split hand:\n player.split_hands[-2].add_card_from_deck(deck)\n player.split_hands[-1].add_card_from_deck(deck)\n hand_list = [player.split_hands[-2], player.split_hands[-1]] + hand_list\n # clearing the screen:\n print('\\n' * 100)\n # displaying the updated human player's chips and wagers:\n player.display_chips('Chips')\n print('\\n')\n player.display_chips(*player.wagers.keys())\n # asking the player to press enter to continue:\n press_enter_to_continue()\n continue\n\n # checking if doubling down is possible:\n if hand.score in [10, 11] and player.chips['Amount'] >= player.wagers[wager_name]['Amount']:\n # clearing the screen:\n print('\\n' * 100)\n # showing dealer's cards one face up, one face down:\n dealer.hand.display_one_face_down('Dealer')\n # showing human player's cards face up:\n hand.display_face_up(player.name)\n print('\\n')\n # displaying all human player's chips and wagers:\n player.display_chips('Chips')\n print('\\n')\n player.display_chips(*player.wagers.keys())\n # asking if human player wants to double down:\n dd_requested = double_down_requested()\n\n # doubling down:\n if dd_requested:\n # doubling the wager:\n player.double_wager('Double Down', hand.split_wager_number)\n # clearing the screen:\n print('\\n' * 100)\n # displaying the updated human player's chips and wagers:\n player.display_chips('Chips')\n print('\\n')\n player.display_chips(*player.wagers.keys())\n # asking human player to press enter to continue:\n press_enter_to_continue()\n # clearing the screen:\n print('\\n' * 100)\n # showing dealer's cards one face up, one face down:\n dealer.hand.display_one_face_down('Dealer')\n # showing human player's cards face up:\n hand.display_face_up(player.name)\n\n # doubling down not possible:\n else:\n dd_requested = False\n\n # checking if human player has split a pair of Aces:\n if hand.type == 'Split Hand' and hand.cards[0].rank == 'A':\n # the player is only allowed to draw one card on each split Ace:\n print(\"\\nYou can't take any more cards to this hand (split Aces)\")\n hit = False\n # asking human player to press enter to continue:\n press_enter_to_continue()\n # in all other cases, player is allowed to draw at least one more card:\n else:\n hit = True\n\n # while loop checking if the hand score is still less than 21, and human player is allowed and willing to hit\n # one more card:\n while hand.score < 21 and hit:\n # asking human player to choose hit or stand:\n hit = hit_or_stand()\n # hitting:\n if hit:\n # adding one card from deck to the hand:\n hand.add_card_from_deck(deck)\n # clearing the screen:\n print('\\n' * 100)\n # showing dealer's cards one face up, one face down:\n dealer.hand.display_one_face_down('Dealer')\n # showing human player's cards face up:\n hand.display_face_up(player.name)\n\n # checking if there was a double down:\n if dd_requested and hand.score < 21:\n # the player is only allowed to draw one card after doubling down:\n print(\"\\nYou can't take any more cards to this hand (Double Down)\")\n hit = False\n # asking human player to press enter to continue:\n press_enter_to_continue()\n\n # checking for a bust:\n if hand.score > 21:\n print('\\nBUST!')\n # asking human player to press enter to continue:\n press_enter_to_continue()\n # checking for a 21:\n elif hand.score == 21:\n print('\\n'\n 'YOU HAVE A 21!')\n # asking human player to press enter to continue:\n press_enter_to_continue()\n\n # adding the final hand score to the score list:\n score_list.append(hand.score)\n # adding the boolean showing whether there was a natural Blackjack to the natural list:\n natural_list.append(natural)\n # adding the name of corresponding wager to the wager list:\n wager_list.append(wager_name)\n\n # after all hands have been played, return the score list, the natural list, the wager list, and the boolean\n # showing if Surrender has been requested:\n return score_list, natural_list, wager_list, sr_requested", "title": "" }, { "docid": "832f3721ffc7df722e383428a982dc43", "score": "0.5087872", "text": "def play_hand(hand, word_list):\n points = 0\n new_hand = hand.copy()\n\n while True:\n current_hand = display_hand(new_hand)\n\n print(\"Current Hand: \", current_hand)\n\n word = input(\"Enter word, or . to indicate that you are finished: \")\n\n if word == \".\":\n print(\"Total score: \", points, \" points.\")\n return\n\n if is_valid_word(word, new_hand, word_list):\n score = get_word_score(word, HAND_SIZE)\n points += score\n\n print(word, \" earned \", score, \" points. Total: \", points, \" points\")\n new_hand = update_hand(new_hand, word)\n else:\n print(\"Invalid word, Please try again.\")\n\n if new_hand == {}:\n print(\"Total score: \", points, \" points.\")\n return", "title": "" }, { "docid": "c0188e0fe3964cd03cef6ef9ff642738", "score": "0.5080543", "text": "def afterMove(lastPlayer, lastPlayerResult, nextPlayer, board, bag):\n\n if (lastPlayerResult is None):\n #player 'pass', no change\n #print(\"Player \", lastPlayer.name, \" is passing \")\n return 0\n \n if isinstance(lastPlayerResult, str):\n \n newBag = replaceLetters(lastPlayer, lastPlayerResult, bag) \n lastPlayer.update(board, newBag, lastPlayer.myScore, nextPlayer.myScore, len(bag))\n nextPlayer.update(board, nextPlayer.bag, nextPlayer.myScore, lastPlayer.myScore, len(bag))\n #the second player does not need to be updated, as the board didn't\n #change\n return 0\n\n if isinstance(lastPlayerResult,list):\n #player is placing some letters to the game\n #print(\"Player \", lastPlayer.name , \" is placing stuff .. \", lastPlayerResult)\n lettersToReplace = \"\"\n board_changes = 0\n for change in lastPlayerResult:\n assert(isinstance(change, list) and len(change) == 3)\n row, col, letter = change\n assert(in_board(row, col) and board[row][col] == \"\")\n board[row][col] = letter\n lettersToReplace += letter\n #print(\"Placing \", letter, \"at \", row,col)\n board_changes+=1\n new_score = lastPlayer.myScore + compute_score_gain(lastPlayerResult, board)\n #print(\"Score increased by %d\" % score_gain)\n\n\n newBag = [letter for letter in lastPlayer.bag if letter not in lettersToReplace] if len(bag) == 0 else replaceLetters(lastPlayer, lettersToReplace, bag)\n lastPlayer.update(board, newBag, new_score, nextPlayer.myScore, len(bag))\n nextPlayer.update(board, nextPlayer.bag, nextPlayer.myScore, new_score, len(bag))\n return board_changes\n\n print(\"Wrong result from player.move; exit now\")\n input()\n quit()\n return 0", "title": "" }, { "docid": "9efe22d64e3c5bee71ea764c5dcfc099", "score": "0.5080346", "text": "def guess_letter(self, letter):\n letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n letter = letter.lower()\n \n if len(letter) != 1 or letter not in letters:\n raise ValueError(\"Input must be a single letter\")\n\n if letter in self.guessed:\n print \"You already guessed the letter \\'%s\\'\" % letter\n return False\n \n else:\n self.guessed.append(letter) #add the letter to the list of guessed letters\n self.guessed.sort()\n self.attempts += 1 #increment the number of attempts made\n\n if letter in self.word: \n #with word: \"joe\", would create [(j, 0), (o, 1), (e, 2)]\n positions = [(l, p) for l, p in zip(self.word, range(len(self.word)))]\n\n #x cycles through the positions of the letters that match the word\n for x in [p for l, p in positions if l == letter]:\n #add the letter to self.progess\n if x == 0:\n self.progress = letter + self.progress[1:]\n else:\n self.progress = self.progress[:x] + letter + self.progress[x + 1:]\n\n return True\n \n else:\n self.chances -= 1\n return False", "title": "" }, { "docid": "6e4491ec8f1af8be038a07feb2228081", "score": "0.5075009", "text": "def handle_winners(self, results, participants):\n for i in results:\n if i == 1:\n participants[0].handle_win(participants[1])\n participants[0].wins += 1\n participants[1].losses += 1\n else:\n participants[1].handle_win(participants[0])\n participants[1].wins += 1\n participants[0].losses += 1\n if results.count(1) > results.count(2):\n return participants[0]\n else:\n return participants[1]", "title": "" }, { "docid": "f04bc05d763c11236455de497a4c7430", "score": "0.5071085", "text": "def input_guess(guess):\n global allowed_guesses\n\n print \"-\"\n print \"You made a guess! Your guess was %s.\" % guess\n\n allowed_guesses = allowed_guesses - 1\n print \"You have %s guesses remaining.\" % allowed_guesses\n\n if allowed_guesses == 0:\n print \"* You're out of guesses. Game over! The number was %s.\" % secret_number\n new_game()\n else:\n guess_int = int(guess)\n\n if guess_int < secret_number:\n print \"Guess higher!\"\n elif guess_int > secret_number:\n print \"Guess lower!\"\n else:\n print \"* Your guess is correct! Nice job. Starting new game...\"\n new_game()", "title": "" }, { "docid": "89316ea42d4c0e9cfd57b4a774925a99", "score": "0.5069358", "text": "def generate_pairs_other_rounds(self, players_list):\n match_list = [[] for i in repeat(None, 4)]\n i = 0\n for i in range(0, 4):\n j = 0\n if i == 3:\n match_list[i] = [players_list[0], players_list[1]]\n return match_list\n match_list[i] = [players_list[0], players_list[j+1]]\n while self.check_player_already_played(match_list[i]):\n j += 1\n match_list[i] = [players_list[0], players_list[j+1]]\n del players_list[0]\n del players_list[j]\n return match_list", "title": "" }, { "docid": "14b36cc51f00be2861a43750f73cf274", "score": "0.5068599", "text": "def get_tournament_result(self):\n joined_player_list = []\n joined_player_list = self.joined_player_list()\n added_list = sorted(joined_player_list,\n key=lambda x: ((x[1] + x[2] + x[3] + x[4]),\n x[0]['rank']),\n reverse=True)\n return added_list", "title": "" }, { "docid": "ff66eda8fdd3ea0e436847aa70fe6816", "score": "0.5064942", "text": "def play(self) -> Tuple[List[Player], List[Dict], List[Player]]:\n start_time = time()\n game = []\n for i, card in enumerate(self.card_list_):\n turn_winner, player_card_list = self.turn(card, i)\n game.append(\n {\n \"card\": card,\n \"player_card_list\": player_card_list,\n \"turn_winner\": str(turn_winner),\n }\n )\n winner_score = max(self.player_score_list_)\n winner_list = []\n if self.verbose_:\n print(\"Final scores:\")\n for player, score in zip(self.player_list_, self.player_score_list_):\n if score == winner_score:\n print(f\"{player}'s score: {score} | Winner\")\n else:\n print(f\"{player}'s score:\", score)\n print(f\"Game took {time() - start_time}s\")\n for player, score in zip(self.player_list_, self.player_score_list_):\n if score == winner_score:\n player.win(self.n_player_)\n winner_list.append(player)\n else:\n player.lose(self.n_player_)\n player.end_game()\n self.card_list_ = list(range(-self.n_cards, 0)) + list(range(1, 2*self.n_cards + 1)) ##\n np.random.shuffle(self.card_list_)\n self.n_game_ = self.n_game_ - 1\n self.player_score_list_ = [0 for _ in range(self.n_player_)]\n return self.player_list_, game, winner_list", "title": "" }, { "docid": "6728bf596a6aab2877fdc18b6f2696c5", "score": "0.50551605", "text": "def player_input(word):\n used_list = []\n used_list.extend(word.guessed)\n used_list.extend(word.misses)\n used_list = [x.lower() for x in used_list]\n guess_letters = \"\"\n valid_turn = False # if not a single letter was accepted, repeat the turn\n while True:\n guess_letters = raw_input(\"Your guess[? for hint]: \").lower()\n if re.match(r'^[a-zA-Z]+$', guess_letters) is None:\n if guess_letters == '?':\n break\n print \"Please enter only alphabets[or ?] as a guess.\"\n continue\n for guess_letter in guess_letters:\n if guess_letter in used_list:\n print \"You have already used %s.\" % guess_letter\n else:\n valid_turn = True\n if valid_turn:\n break\n return guess_letters", "title": "" }, { "docid": "f1ccf8189d473f6d45591ee82f729d08", "score": "0.5041857", "text": "def guessing() -> None:\n global guess_taken\n global MAX_GUESS\n print_guesses_taken(guess_taken, MAX_GUESS)\n\n guess = str(e1.get())\n if not guess in ALPHABET: #checking input\n m6=str(\"Enter a letter from a-z ALPHABET\")\n elif guess in letter_storage: #checking if letter has been already used\n m6=str(\"You have already guessed that letter!\")\n else:\n letter_storage.append(guess)\n if guess in SECRET_WORD:\n m6=str(\"You guessed correctly!\")\n for i in range(0, LENGTH_WORD):\n if SECRET_WORD[i] == guess:\n GUESS_WORD[i] = guess\n print_word_to_guess(GUESS_WORD)\n print_guesses_taken(guess_taken, MAX_GUESS)\n if not '-' in GUESS_WORD:\n m6=str(\"You won! Game Over!\")\n\n else:\n m6=str(\"The letter is not in the word. Try Again!\")\n guess_taken += 1\n print_guesses_taken(guess_taken, MAX_GUESS)\n if guess_taken == 5:\n m6=str(\" Sorry Mate, You lost :<! The secret word was {0}\".format(SECRET_WORD))\n exit()\n l6 = Label(root, text=m6)\n l6.grid(row=7, column=0, columnspan=2)", "title": "" }, { "docid": "9c63b23d590e9e61fba6bb6dc2d1ce41", "score": "0.5041241", "text": "def recitfy_game_history(self):\n # Overwrite the lists of played positions/tokens and empty cells to reflect the new board\n board_iter = iter(self)\n self.empty_cells = []\n self.played_positions = []\n self.played_tokens = []\n \n for cell in board_iter:\n position = (board_iter.row_id, board_iter.col_id)\n if cell is self.empty:\n self.empty_cells.append(position)\n else:\n self.played_tokens.append(cell)\n self.played_positions.append(position)", "title": "" }, { "docid": "ffb51b626926f65c345404a23126212f", "score": "0.5030276", "text": "def guess(self, char):\n if self.status == STATUS_ONGOING:\n if char in self.guesses_so_far:\n self.remaining_guesses -= 1\n elif char not in self.guesses_so_far:\n self.guesses_so_far.append(char)\n if char not in self.word:\n self.remaining_guesses -= 1\n self.status = self.get_status()\n else:\n raise ValueError(\"Game is over, no more guesses allowed\")\n return", "title": "" }, { "docid": "98d9b3108c82d84e94f64cb7750463be", "score": "0.5015186", "text": "def show_players(self):\n self.replay = get_replay(self.fn)\n if self.replay:\n # show map name in right show board\n map_info = \"%s %s\" % (self.TEXT.get(\"map_name\"), self.replay.map_name)\n self.show_result(map_info)\n # show players info in the left player board\n for i in range(len(self.replay.players)):\n p = self.replay.players[i]\n p_info = \"{:15} \".format(p.name)\n if p.is_human_player():\n # TODO, guess the faction if random\n faction = p.decode_faction()\n faction = self.TEXT.get(\"factions\", {}).get(faction)\n p_info += \"{:10} {:5}\".format(faction, p.team)\n\n self.plist.insert(i + 1, p_info)", "title": "" }, { "docid": "bd460455ff1c21fa47beee76637aad11", "score": "0.5008659", "text": "def swissPairings():\r\n # first, get a list of all the players to loop through, in order of\r\n # standings, and create a blank list for pairings\r\n players = playerStandings()\r\n pairings = []\r\n\r\n # loop through all the players one at a time and try to find them the\r\n # closest match\r\n for p in players:\r\n # make sure this player hasn't already been paired with someone else\r\n if [i for i in pairings if p[0] == i[0] or p[0] == i[2]]:\r\n # the player is already paired, so move on to the next\r\n continue\r\n\r\n else:\r\n # get a list of possible opponents who haven't been matched with\r\n # the player previously, and aren't currently paired with someone\r\n # else\r\n DB = connect()\r\n c = DB.cursor()\r\n c.execute(\"select id, playerName, (select sum(score) from match\\\r\n where players.id = match.playerID) as score from players\\\r\n where id not in (select playerID from match where\\\r\n opponentID = %s) and id <> %s group by id order by score\\\r\n desc;\", ((p[0],), (p[0])))\r\n opponents = c.fetchall()\r\n\r\n for o in opponents:\r\n # now check to see if the opponent in question is already in\r\n # the current pairing list\r\n if [i for i in pairings if i[0] == o[0] or i[2] == o[0]]:\r\n # the opponent has already been paired, move to the next\r\n continue\r\n else:\r\n pairings.append((p[0], p[1], o[0], o[1]))\r\n break\r\n\r\n # NOTE: Because this function returns player pairings based on rank of\r\n # players who have not played each other, once a clear winner has been\r\n # reached assuming each player is paired with another only once, it will\r\n # return a list with a length less than the total player count/2. For\r\n # this reason, it could be used in a counting function to calculate the\r\n # number of necessary rounds in a more robust application. Another\r\n # possible methodolgy for this function, which would be appropriate for\r\n # large tournaments, would be splitting the players in half by the top\r\n # ranked and bottom ranked, then pairing them up. The basics of this\r\n # function would still be used, but called from within another function\r\n # which splits the player list in half based on rankings.\r\n\r\n return pairings", "title": "" }, { "docid": "f50612b0ea8b11500ded2f41f6fde709", "score": "0.5007234", "text": "def game_ended(game):\n res = (game.result1, game.result2)\n guesses = Guess.objects.filter(game_id=game.id).all()\n points = list(map(lambda g: g.calc(res), guesses))\n penalty = max(p[0] for p in points) if len(points) > 0 else None\n only = sum(1 for p in points if p[1]) == 1\n for g in guesses:\n g.calc(res, penalty, only)\n g.save()\n\n for p in Points.objects.annotate(total_points=Sum('user__guesses__points'), total_correct=Sum(Case(When(user__guesses__points__lte=-3, then=1), default=0, output_field=models.IntegerField()))):\n p.points = p.total_points\n p.correct = p.total_correct\n p.save()", "title": "" }, { "docid": "9f4b79ec16fce345045830b1c054943f", "score": "0.5005999", "text": "def play_game(self):\n while True:\n result = self.find_winner()\n if result:\n return result\n self.deal()\n self.list_of_stacks = []\n self.create_initial_stacks()\n self.collect_discards()\n self.fix_stacks()\n self.make_deck()\n random.shuffle(self.deck)", "title": "" }, { "docid": "fc0a1b6c20cab39b2f13d0ee934c1d57", "score": "0.4992263", "text": "def update_phrase_guessed(self):\n count = len(self.char_in_phrase_list)\n for char in self.char_in_phrase_list:\n if char.char_guessed == True:\n count -= 1\n if count == 0:\n self.phrase_guessed = True", "title": "" }, { "docid": "7e96d58fae91404d4f4fe9635719d928", "score": "0.49859312", "text": "def getGuess(self):\n\n\n usrGuess = input('Enter a character that has not been guessed or * for a hint: ')\n if usrGuess not in self.lettersGuessed and usrGuess != '*':\n self.secretWord.update(usrGuess)\n if usrGuess not in self.secretWord.__str__(): # decrement allotedGuesses if letter is not in word\n self.allotedGuesses -= 1\n self.lettersGuessed.append(usrGuess)\n elif usrGuess == '*':\n self.allotedGuesses -= 1\n print('Hint: %s' %self.wordDic[self.secretWord.__str__()])\n else:\n self.getGuess()", "title": "" }, { "docid": "7e2e3544cfad5d57767a4ef02fca8db6", "score": "0.49843055", "text": "def user_try(self):\n\t\tprint(self.display_info(self.blank_word))\n\t\tguess = input(\"\\nGuess a letter \")\n\t\tif guess == '':\n\t\t\treturn self.nothing_guessed()\n\t\telif not guess.isalpha():\n\t\t\treturn self.not_letter()\n\t\telif guess in self.bad_guesses:\n\t\t\treturn self.same_letter_guess()\n\t\treturn [guess, self.blank_word]", "title": "" }, { "docid": "8b883c26ec32c57c12d57cfad25573b2", "score": "0.49796996", "text": "def swissPairings(tournament='blnk'):\n db = connect()\n db_cursor = db.cursor()\n getPlayers = \"SELECT id, name FROM v_standings\"\n fromTournament = \" WHERE tournament = %s\"\n if tournament == 'blnk':\n db_cursor.execute(getPlayers)\n else:\n db_cursor.execute(getPlayers + fromTournament, (tournament,))\n players = db_cursor.fetchall()\n swissPairs = []\n alreadyPlayed = []\n recordBye = []\n countOfPlayers = len(players)\n # Assign a bye week if there is an odd number of players in the round\n if countOfPlayers % 2:\n playersByLeastWins = \"\"\"\n SELECT v_standings.id AS id, v_standings.name AS name\n FROM v_standings\n ORDER BY v_standings.wins, v_standings.omw\n \"\"\"\n db_cursor.execute(playersByLeastWins)\n playersByLeastWins = db_cursor.fetchall()\n playersAlreadyBye = \"\"\"\n SELECT player_id AS id, player_name AS name\n FROM v_results\n WHERE opponent_id=0\n GROUP BY player_id, player_name\n \"\"\"\n db_cursor.execute(playersAlreadyBye)\n playersAlreadyBye = db_cursor.fetchall()\n byeCandidates = [player for player in playersByLeastWins\n if player not in playersAlreadyBye]\n playerWithBye = [byeCandidates[0],]\n players = [player for player in players if player not in playerWithBye]\n recordBye = (byeCandidates[0][0], byeCandidates[0][1], 0, 'BYE')\n # print '==> Bye week for ' + str(playerWithBye)\n # Pair players based on the stipulations in the doc string\n while countOfPlayers > 1:\n player = players[0]\n findOpponents = \"\"\"\n SELECT waldo.id, waldo.name\n FROM (\n SELECT v_standings.*, oppid.played\n FROM v_standings LEFT OUTER JOIN (\n SELECT v_results.opponent_id AS played\n FROM v_results\n WHERE v_results.player_id = %s\n GROUP BY v_results.opponent_id) AS oppid\n ON v_standings.id = oppid.played) AS waldo\n WHERE waldo.played IS NULL AND waldo.id <> %s\n \"\"\"\n withTournament = \" AND waldo.tournament =%s\"\n byeInEffect = \" AND waldo.id <> %s\"\n if tournament == 'blnk':\n if recordBye == []:\n db_cursor.execute(findOpponents,\n (str(player[0]), str(player[0])))\n else:\n db_cursor.execute(findOpponents + byeInEffect,\n (str(player[0]), str(player[0]), str(playerWithBye[0][0])))\n else:\n if recordBye == []:\n db_cursor.execute(findOpponents + withTournament,\n (str(player[0]), str(player[0]), tournament))\n else:\n db_cursor.execute(findOpponents + withTournament + byeInEffect, (str(player[0]), str(player[0]), tournament,\n str(playerWithBye[0][0])))\n\n opponentList = db_cursor.fetchall()\n opponentList = [opponent for opponent in opponentList\n if opponent not in alreadyPlayed]\n try:\n opponent = opponentList[0]\n # print '==> ' + str(player) + ' ... vs ... ' + str(opponent)\n match = player + opponent\n swissPairs += (match,)\n alreadyPlayed += (player, opponent)\n players = [x for x in players if x not in (player, opponent)]\n countOfPlayers = len(players)\n except:\n print str(player) + ' has played all opponents in Tournament: ' + tournament\n print 'Aborting swissPairings().'\n break\n db.rollback()\n db.close()\n recordBye = [recordBye,]\n for pair in swissPairs:\n print '==> ' + str(pair)\n if recordBye == [[]]:\n return swissPairs\n else:\n print '==> ' + str(recordBye)\n return recordBye + swissPairs", "title": "" }, { "docid": "8d6cd34e17bc3d5b32720c355b8674ee", "score": "0.49764806", "text": "def display_board(self, players):\n self.console.write(\"--------------------\")\n for player in players:\n guess = player.get_guess()\n self.console.write(f\"Player {player.get_name()}: {guess.get_guess()}, {guess.get_hint()}\")\n self.console.write(\"--------------------\")", "title": "" }, { "docid": "f3e0eabd7e28858f909a8605ae72957b", "score": "0.4975844", "text": "def checkGuess(self, game):\n actualGuessWord = list(game.word)\n guessWord = input(\"Enter your guess: \")\n if(guessWord.lower() == game.word):\n print(\"Correct guess\")\n print('word - ' + game.word)\n #scoring\n score = 0\n for l in [i for i, letter in enumerate(game.progressWord) if letter == '-']:\n score = score + StringDatabase.getFrequencyOfLetter(actualGuessWord[l])\n if(game.missedLetters > 0):\n score = score / game.missedLetters\n \n game.score = game.score + score\n return True\n else:\n print('Wrong guess')\n game.badGuesses = game.badGuesses + 1\n #scoring\n score = 0\n for l in [i for i, letter in enumerate(game.progressWord) if letter == '-']:\n score = score + StringDatabase.getFrequencyOfLetter(actualGuessWord[l])\n score = score * 0.10\n game.score = game.score - score\n return False", "title": "" }, { "docid": "16832909a662ae8ebfbdc6451157166a", "score": "0.49745584", "text": "def check_fallback_guesses(next_char, guesses):\n guess_count = 0\n for guess in guesses:\n guess_count += 1\n if check_guess(next_char, guess):\n return guess, guess_count\n\n # If that failed, we're screwed!\n print('Exhaused all fallbacks! Failed to guess phrase.')\n sys.exit(1)", "title": "" }, { "docid": "b4ceed8223c404ae71b3d649d297600e", "score": "0.49608305", "text": "def populate_friends_list(self):\n # Grab whatever is in the main box, format it and send it out to SteamQuery\n steamid = self.SteamUsername.displayText()\n steamid = str(steamid)\n s = steam.SteamQuery(steamid)\t\n (in_game_friends,online_friends) = s.get_friend_status()\n\n # Handle errors from SteamQuery class \n if in_game_friends is None and online_friends is None:\n self.ListSteamFriends.addItem(\"Unable to retrieve friends listing.\")\n return\n elif len(in_game_friends) == 0 and len(online_friends) == 0:\n self.ListSteamFriends.addItem(\"No friends returned.\")\n return\n\n # Add in game friends\n for friend in in_game_friends:\n item = \"%s\\nIn-Game\\n%s\" % (friend['friend'],friend['game'])\n if friend['server'] is not 'No server':\n item += \"\\nServer: \"+friend['server']\n else:\n item += \"\\n\"+NOT_ON_SERVER\n self.ListSteamFriends.addItem(item)\n # Write an index for future reference\n self.serverlist[self.ListSteamFriends.count() - 1] = friend['server']\n\n # Add on-line friends\t\n for online_friend in online_friends:\n line = \"%s\\nOnline\" % (online_friend)\n self.ListSteamFriends.addItem(line)", "title": "" }, { "docid": "6919f2dadaf9c82ff680ebfe5773581b", "score": "0.49563497", "text": "def swissPairings():\n pairs = []\n num_players = countPlayers()\n db, cursor = connect()\n query = \"SELECT max(round) FROM matches;\"\n cursor.execute(query)\n curr_round = cursor.fetchone()[0]\n db.close()\n # Number of players have to be even\n if num_players % 2 != 0:\n print \"Number of players must be even\"\n standings = playerStandings()\n # This check is to avoid pairing where a round of match is not yet finished\n if len(standings) != num_players * curr_round:\n print \"Current match is not finished yet\"\n pair_count = 0\n pair = ()\n # Assign pair for players with equal or closest points\n for s in standings:\n if s[3] == curr_round:\n if len(pair)/2 < 2:\n pair += (s[0], s[1],)\n if pair_count % 2 == 1:\n pairs.append(pair)\n pair = ()\n pair_count += 1\n return pairs", "title": "" }, { "docid": "3236f144934af6c455c9121781668254", "score": "0.49538162", "text": "def onPlayerNext(self):\n if self.PlayerCurPage < self.PlayerMaxPage:\n self.PlayerCurPage += 1\n else:\n return\n self.entPlayerPage.set(self.PlayerCurPage)\n self.updatePlayerSearch()", "title": "" }, { "docid": "870826c053ef18ec884acd98c08c7d51", "score": "0.49442756", "text": "def run_single_game(words_list):\r\n new_word = hangman_helper.get_random_word(words_list)\r\n wrong_guesses = []\r\n error_count=0\r\n new_pattern = \"_\"*len(new_word)\r\n msg = hangman_helper.DEFAULT_MSG\r\n while(\"_\" in new_pattern and error_count < hangman_helper.MAX_ERRORS):\r\n #here start each round of the game, based on the player's guesses.\r\n hangman_helper.display_state\\\r\n (new_pattern,error_count,wrong_guesses,msg,ask_play=False)\r\n guess = hangman_helper.get_input()\r\n if guess[0] == hangman_helper.LETTER:\r\n if guess[1] not in string.ascii_lowercase or len(guess[1]) != 1:\r\n msg = hangman_helper.NON_VALID_MSG\r\n elif guess[1] in new_pattern or guess[1] in wrong_guesses:\r\n msg = hangman_helper.ALREADY_CHOSEN_MSG + guess[1]\r\n elif guess[1] in new_word:\r\n new_pattern = update_word_pattern\\\r\n (new_word,new_pattern,guess[1])\r\n msg = hangman_helper.DEFAULT_MSG\r\n else:\r\n error_count +=1\r\n wrong_guesses.append(guess[1])\r\n msg = hangman_helper.DEFAULT_MSG\r\n elif guess[0] == hangman_helper.HINT:\r\n hint_letter = choose_letter\\\r\n (filter_words_list(words_list,new_pattern,wrong_guesses),\\\r\n new_pattern)\r\n msg = hangman_helper.HINT_MSG + hint_letter\r\n if new_word == new_pattern:\r\n msg = hangman_helper.WIN_MSG\r\n else:\r\n msg = hangman_helper.LOSS_MSG + new_word\r\n hangman_helper.display_state\\\r\n (new_pattern,error_count,wrong_guesses,msg,True)", "title": "" }, { "docid": "de199ddab29fc4762ddf942326f86e5f", "score": "0.4941043", "text": "def parse_list(self, response):\r\n self.logger.debug('Got successful response from {}'.format(response.url))\r\n players_table_view = '//*[@id=\"ctl00_cplhMainContent_dgrdRoster\"]/tr'\r\n players = response.xpath(players_table_view);\r\n school_url = response.url[response.url.index('/')+2:response.url.index('.com')] #domain for school\r\n\r\n roster_year = (response.xpath(\"//div[@id='ctl00_divPageTitle']/h1/text()\")\r\n .extract_first()\r\n .split(\" \")[0])\r\n\r\n for player in players:\r\n #extracting data from table\r\n playerItem = Player()\r\n player_name = player.xpath(\".//td['@class = roster_dgrd_full_name']/a/text()\").extract() #array [fn, ln]\r\n\r\n if(len(player_name) == 0):\r\n continue #skipping header row\r\n\r\n player_first_name = player_name[0].strip()\r\n player_last_name = \" \".join(player_name[1:]).strip()\r\n\r\n player_position = player.xpath('.//td['+ self.reference_index(school_url, 'PLAYER_POSITION', roster_year) + ']/text()').extract_first().strip() #'position'\r\n\r\n player_class_year = player.xpath('.//td['+ self.reference_index(school_url, 'ACADEMIC_YEAR', roster_year) + ']/text()').extract_first().strip()\r\n\r\n player_height = player.xpath('.//td['+ self.reference_index(school_url, 'HEIGHT', roster_year) + ']/nobr/text()').extract() #array['feet-inches']\r\n\r\n if(len(player_height) == 0):\r\n player_height = 'NA'\r\n else:\r\n player_height = player_height[0].strip()\r\n\r\n player_location = player.xpath('.//td['+ self.reference_index(school_url, 'LOCATION', roster_year) + ']/text()').extract_first().strip().split(\"/\")\r\n\r\n number = player.xpath('.//td['+ self.reference_index(school_url, 'NUMBER', roster_year) + ']/text()').extract_first().strip()\r\n\r\n weight = player.xpath('.//td['+ self.reference_index(school_url, 'WEIGHT', roster_year) + ']/text()').extract_first().strip()\r\n\r\n #Item Processing\r\n playerItem['college'] = get_college_from_url(urlDomain=response.url[response.url.index('/')\r\n + 2:response.url.index('.com')+4])\r\n playerItem['previousSchool'] = 'NA'\r\n self.process_player_location(playerItem, player_location)\r\n playerItem['rosterYear'] = roster_year\r\n\r\n playerItem['collegeLeague'] = check_league(urlDomain=response.url[response.url.index('/')\r\n + 2:response.url.index('.com')+4])\r\n\r\n href = player.xpath(\".//td['@class = roster_dgrd_full_name']/a/@href\").extract_first().strip()\r\n link = response.url[0:response.url.index('.com')+4] + href\r\n\r\n playerItem['profileLink'] = link\r\n\r\n SoccerSpider.process_other_attribute(playerItem, player_first_name, 'firstName')\r\n SoccerSpider.process_other_attribute(playerItem, player_last_name, 'lastName')\r\n SoccerSpider.process_other_attribute(playerItem, player_position, 'position')\r\n SoccerSpider.process_other_attribute(playerItem, player_class_year, 'classYear')\r\n TableSpider.process_other_attribute(playerItem, player_height, 'height')\r\n SoccerSpider.process_other_attribute(playerItem, number, 'number')\r\n SoccerSpider.process_other_attribute(playerItem, weight, 'weight')\r\n\r\n yield playerItem\r\n\r\n pages_to_visit = response.xpath('//*[@id=\"ctl00_cplhMainContent_ddlPastRosters\"]/option/@value').extract()\r\n\r\n num_pages_to_visit = SoccerSpider.CURRENT_ROSTER_YEAR - 2006 + 1\r\n\r\n for i in range(1, num_pages_to_visit): #skipping index 0 because that is the starting page already parsed. Only parsing 10 years if applicable\r\n if(i < len(pages_to_visit)):\r\n urlToVisit = (response.url[0:response.url.index('?')+1] + \"roster=\" + pages_to_visit[i]\r\n + \"&\" + response.url[response.url.index('path'):])\r\n\r\n yield scrapy.Request(url = urlToVisit, callback=self.parse_list)", "title": "" }, { "docid": "e990bb7a340ca4b0ef981dd6a8ccc2c7", "score": "0.49406877", "text": "def generate_guess(clue: List[str], already_guessed: List[str]) -> str:\n for guess in guesser(clue):\n if not bad_clue(guess, clue):\n if guess not in already_guessed:\n return guess\n return \"I'm sorry. I don't know what else to say...\"", "title": "" }, { "docid": "bcf8537bc575ad1af933a0560ccb8c01", "score": "0.49399117", "text": "def swissPairings(): \n conn = connect()\n cursor = conn.cursor()\n\t\n\t# Get all registered players sorted by number of wins.\n cursor.execute(\"select * from players order by noOfWins desc;\")\n players = cursor.fetchall()\n pairings = []\n\n # Get a player and match with the next one closest in no. of wins.\n # Case of even number of players.\n if (len(players)%2) == 0:\n i=0\n \twhile i < len(players):\n pairings.append((players[i][0], players[i][1], players[i+1][0], players[i+1][1]))\n i=i+2\n # Create games for each match up and add to database.\n for pair in pairings:\n\t\tcursor.execute(\"insert into games (player1, player2) values (%s, %s)\",(pair[0],pair[2]))\n\t\n conn.commit()\n cursor.close()\n conn.close()\n return pairings", "title": "" }, { "docid": "8919d50ceb703f66d09761e64a9abffa", "score": "0.49391133", "text": "def forward(self, observation):\r\n self.game_step = observation[2]\r\n if self.game_step == -1:\r\n if self.current_fight_number == len(self.agents_combinations)-1:\r\n self.current_fight_number = -1\r\n self.current_fight_number += 1\r\n\r\n # Print number of the pair playing right now / total number of pairs.\r\n # print(self.current_fight_number, \"/\", len(self.agents_combinations)-1)\r\n self.current_fight = self.agents_combinations[self.current_fight_number]\r\n self.player = self.current_fight[0]\r\n self.player_one = self.current_fight[1]\r\n\r\n ## print(\"#### AGENTS PLAYING:\",self.player, self.player_one)\r\n self.current_game[0].append([self.player, self.player_one])\r\n self.current_game[1].append([self.player_one, self.player])\r\n\r\n if self.current_fight_number == 0:\r\n self.current_game_number += 1\r\n\r\n current_players_actions = [self.agents[self.player].forward([observation[0],\r\n self.game_step,\r\n self.player_one,\r\n observation[3][1]\r\n ]),\r\n self.agents[self.player_one].forward([observation[1],\r\n self.game_step,\r\n self.player,\r\n observation[3][0]\r\n ])]\r\n\r\n if self.game_step != 19:\r\n self.current_game[0].append(current_players_actions[0])\r\n self.current_game[1].append(current_players_actions[1])\r\n\r\n return current_players_actions", "title": "" }, { "docid": "3a877b7b4bb0e0e9a7920e800601fb86", "score": "0.4939105", "text": "def reportMatch(winner, loser):\n # winner = 1\n # loser = 2\n #reportMatch(id1, id2)\n #reportMatch(id3, id4)\n conn = psycopg2.connect(\"dbname=tournament\")\n cur = conn.cursor()\n\n # get the latest standings:\n cur.execute(\"SELECT player_id, player_name, total_wins, total_matches FROM player_list ORDER BY player_id;\")\n standings_before_match = cur.fetchall()\n\n\n winner_current_wins = standings_before_match[winner-1][2]\n #print \"winner_current_wins = \", winner_current_wins\n winner_adjusted_wins = winner_current_wins + 1\n #print \"winner_adjusted_wins = \", winner_adjusted_wins\n\n winner_current_matches = standings_before_match[winner-1][3]\n #print \"winner_current_matches = \", winner_current_matches\n winner_adjusted_matches = winner_current_matches + 1\n #print \"winner_adjusted_matches = \", winner_adjusted_matches\n\n loser_current_matches = standings_before_match[loser-1][3]\n #print \"loser_current_matches = \", loser_current_matches\n loser_adjusted_matches = loser_current_matches + 1\n #print \"loser_adjusted_matches = \", loser_adjusted_matches\n\n cur.execute(\"UPDATE player_list SET total_wins = %s WHERE player_id = %s\",(winner_adjusted_wins, winner))\n cur.execute(\"UPDATE player_list SET total_matches = %s WHERE player_id = %s\", (winner_adjusted_matches, winner))\n cur.execute(\"UPDATE player_list SET total_matches = %s WHERE player_id = %s\", (loser_adjusted_matches, loser))\n conn.commit()\n\n cur.execute(\"SELECT player_id, player_name, total_wins, total_matches FROM player_list ORDER BY player_id;\")\n standings_after_match = cur.fetchall()\n #print \"\\nstandings_after_match: \", standings_after_match\n\n cur.execute(\"SELECT player_id, player_name, total_wins, total_matches FROM player_list ORDER BY player_id;\")\n standings = cur.fetchall()\n #print \"\\ncheck standings variable: \", standings\n return standings_after_match\n\n\n cur.close()\n conn.close()", "title": "" }, { "docid": "4b0307f53c67af6f0f16f2423781a117", "score": "0.49386308", "text": "def results(self):\n dealer_hand = self.players[-1].hands[0].value\n if dealer_hand == 0:\n print('Dealer busted!')\n else:\n print('Dealer Stand with {}'.format(dealer_hand))\n\n for xx in range(len(self.players) - 1):\n player = self.players[xx]\n for hand in player.hands:\n hand_value = hand.value\n if hand_value == 0:\n continue\n else:\n if hand_value > 21:\n print('{} bust! {}'.format(player.name, hand_value))\n elif hand_value == dealer_hand:\n print('{} push! {}'.format(player.name, hand_value))\n elif hand_value < dealer_hand:\n print('{} loss! {}'.format(player.name, hand_value))\n elif hand_value > dealer_hand:\n print('{} win! {}'.format(player.name, hand_value))\n else:\n print('!!! Forgot something !!!')\n print('\\n')", "title": "" }, { "docid": "3c9ce3444443b50ca48bd5b75b72a99b", "score": "0.49343082", "text": "def hangman():\n\n word_list = [\"Alexander the Great\", \"Napoleon\", \"Philip of Macedon\",\n \"Henry of England\", \"Judas Iscariot\", \"The Enlightenment\",\n \"Leonardo da Vinci\", \"Adolf Hitler\", \"Winston Churchill\",\n \"Teddy Roosevelt\", \"Martin van Buren\", \"Abraham Lincoln\",\n \"George Washington\", \"Peter the Great\", \"Pope Francis\",\n \"Ludwig van Beethoven\", \"Maximillien Robespierre\", \"Leopold of Belgium\",\n \"Augustus Caesar\", \"Plato\", \"Maximilian Holy Roman Emperor\", \"Louis XVI\",\n \"Otto von Bismarck\", \"Emperor Hirohito\", \"Emperor Meiji\", \"Erwin Rommel\",\n \"Robert E Lee\", \"Catherine the Great\", \"Queen Victoria\", \"Chandragupta\",\n \"Joseph Stalin\", \"Benito Mussolini\", \"Charlemagne\", \"Charles the Bold\",\n \"Dirk Fock\", \"Pieter Mijer\", \"Alexander Graham Bell\", \"Neil Armstrong\",\n \"Woodrow Wilson\", \"Mao Zedong\", \"Cleopatra\", \"Ho Chi Minh\",\n \"William Shakespeare\"]\n\n guess_count = 7\n\n # Print the header\n print(\"Hangman Game - Seven Chances\")\n print(\"Type \\\"Hint\\\" to get a hint.\")\n\n # Set up the secret\n secret = random.choice(word_list)\n guessed_letters = \"\"\n if print_secret:\n print(\"DEV MODE: The secret is \\\"\", secret, \"\\\"\", sep=\"\")\n\n # Print the format of the string being guessed\n print(hangman_display(guessed_letters, secret))\n while not is_done_guessing(guessed_letters, secret) and guess_count > 0:\n # Get the guess\n guess = raw_input(\"\\nMake a guess for a letter: \").lower()\n # Checks that the input is only one letter\n if guess.lower() == \"hint\":\n # This will reveal one character which has not been guessed\n for secret_char in secret:\n # This means that the character has already been guessed by the\n # user\n if secret_char in guessed_letters:\n continue\n # This is under the condition that this secret_char has not been\n # guessed by the user yet, which means it can be used as a hint\n secret_char = secret_char.lower()\n guessed_letters += secret_char\n print(\"Your hint is: One of the characters is\", secret_char)\n break\n print(hangman_display(guessed_letters, secret))\n continue\n \n if guess == secret.lower():\n print(\"You guessed the word!\")\n guessed_letters += secret.lower()\n break\n \n if len(guess) == 0:\n print(\"You didn't guess one letter!\")\n continue\n \n if len(guess) != 1:\n print(\"You failed to guess the whole word.\")\n guess_count -= 1\n print(\"You have\", guess_count, \"guesses left.\")\n # Skips the current loop and goes back to the while condition\n continue\n # If the letter has already been guessed, this will print an error\n # statement.\n if guess in guessed_letters:\n print(\"You already guessed this.\")\n continue\n guessed_letters += guess\n\n # Determines if the guess is correct\n if guess.lower() not in secret.lower():\n guess_count -= 1\n print(\"You guessed incorrectly.\")\n print(\"You have\", guess_count, \"guesses left.\")\n\n print(hangman_display(guessed_letters, secret))\n if is_done_guessing(guessed_letters, secret):\n # Condition upon which the user has guessed remaining when the game ends\n # This is a win\n print(\"\\nCongratulations! You won!\")\n else:\n # This is a lose\n print(\"\\nYou lost!\")\n print(\"The word was:\", secret)\n replay_in = raw_input(\"Replay? (y/n) \")\n if replay_in.lower() == \"y\":\n hangman()", "title": "" }, { "docid": "2f721ac3894653cfb1c80d67297d17d7", "score": "0.49327767", "text": "def player_turn_guess(turn_number, correct_pattern):\n # print(\"DEV CHEAT!!! correct is \" + \"\".join(correct_pattern))\n str_input = raw_input(\"Turn:\" + str(turn_number) + \" / \" + str(MAX_TURNS) +\n \". Input guess (\" + str(len(correct_pattern)) +\n \" digits): \")\n num_correct_in_place = 0\n num_contained_out_of_place = 0\n already_counted = []\n\n # Pick digits that are in correct place.\n min_len = min(len(str_input), len(str_input))\n for i in range(min_len):\n if(correct_pattern[i] == str_input[i]):\n num_correct_in_place += 1\n already_counted.append(str_input[i])\n\n # only count a value as out of place iff\n # 1. It wasn't counted correctly as \"in place\"\n # 2. It wasn't counted before, that is the user inputted the same number\n # twice\n for i in range(len(str_input)):\n if((not str_input[i] in already_counted) and (str_input[i] in correct_pattern)):\n num_contained_out_of_place += 1\n already_counted.append(str_input[i])\n\n print(\"Digits in place: \" + str(num_correct_in_place))\n print(\"Digits contained but out of place: \" +\n str(num_contained_out_of_place))\n # newline for readability\n print(\"\\n\")\n if(num_correct_in_place == len(correct_pattern)):\n end_input = raw_input(\"You Win! Would you like to play again (Y/N)?\")\n if('n' in end_input.lower()):\n return STATE_QUIT\n return STATE_WIN\n elif(turn_number >= MAX_TURNS):\n end_input = raw_input(\"You Lose, out of turns! Answer was: \" +\n \"\".join(correct_pattern) +\n \". Would you like to play again (Y/N)?\")\n if('n' in end_input.lower()):\n return STATE_QUIT\n return STATE_LOSE\n else:\n return STATE_GAMEPLAY", "title": "" }, { "docid": "28b1e0d11db136eb5dccd77cc8205efb", "score": "0.49308494", "text": "def arrange_singlegame(self):\n singlegame = SingleGame(self.player_1, self.player_2)\n singlegame.perform_game()\n singlegame.show_results()", "title": "" }, { "docid": "8c6976196cf37a0342220d56dfb69ed1", "score": "0.4929615", "text": "def guess(self, user_guess):\n\n\n try:\n number = int(user_guess)\n if (number > MAX_GUESS) or (number < MIN_GUESS):\n return Result.INVALID_INPUT\n except ValueError:\n return Result.INVALID_INPUT\n\n self.attempts += 1\n\n correct_guess = number == self.target_number\n last_chance = self.attempts == MAX_ATTEMPTS\n game_already_over = self.state == GameState.GAME_OVER or (self.attempts > MAX_ATTEMPTS)\n\n # If the game is over, do not process a turn.\n if game_already_over:\n self.state = GameState.GAME_OVER\n return Result.GAME_OVER\n\n # Last turn\n if last_chance:\n self.state = GameState.GAME_OVER\n\n if not correct_guess:\n score(-1)\n return Result.YOU_LOST\n\n if correct_guess:\n score(1)\n self.state = GameState.GAME_OVER\n return Result.YOU_WON\n elif number > self.target_number:\n return Result.GUESS_LOWER\n else:\n return Result.GUESS_HIGHER", "title": "" }, { "docid": "aa30de17da2d7e31d4ed37fde78ece43", "score": "0.49280897", "text": "def user(username):\n global riddle_index, counter, final_score\n data = []\n with open(\"data/riddles.json\", \"r\") as json_data:\n data = json.load(json_data)\n \n if request.method == \"POST\" and \"check\" in request.form:\n #Click check button to see if answer is correct\n if request.form[\"answer\"].lower().strip() == data[riddle_index][\"answer\"]:\n \"\"\"\n to prevent score increasing if check button pressed multiple\n times on same question\n \"\"\"\n if riddle_index >= counter:\n counter += 1\n riddle_index += 1\n \n if len(data) == riddle_index:\n #move to exitgame.html page if game is finished\n #save username to user_played.txt for leaderboard\n #save score to score.txt\n riddle_index = 0\n save_user(username, \"user_played\")\n save_score(counter)\n final_score = counter\n counter = 0\n return redirect(url_for('show_exitgame'))\n \n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"])\n \n else:\n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"], correct = \"You've already answered this question correctly\")\n \n else:\n #if answer is incorrect\n if request.form[\"answer\"] == \"\":\n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"], correct = \"Enter your answer\")\n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"], correct = \"Wrong answer, try again\")\n \n elif request.method == \"POST\" and \"next\" in request.form:\n #to next riddle\n \n riddle_index += 1\n \n if len(data) == riddle_index:\n #move to exitgame.html page if game is finished\n #save username to user_played.txt for leaderboard\n #save score to score.txt\n riddle_index = 0\n save_user(username, \"user_played\")\n save_score(counter)\n final_score = counter\n counter = 0\n return redirect(url_for('show_exitgame'))\n \n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"])\n \n elif request.method == \"POST\" and \"previous\" in request.form:\n if riddle_index >= 1:\n #to previous riddle\n riddle_index -= 1\n if len(data) < 0:\n riddle_index = 0\n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"])\n else:\n riddle_index = 0\n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"], correct=\"You're only on the first question\")\n\n if request.method == \"POST\" and \"exit-game\" in request.form:\n return redirect(\"/\") \n \n if request.method == \"POST\" and \"leaderboard\" in request.form:\n return redirect('/leaderboard') \n \n return render_template(\"riddles.html\", username = username, data = data[riddle_index][\"question\"])", "title": "" }, { "docid": "8a327b46e8c4c62af42e99f8379b0f36", "score": "0.49268144", "text": "def swissPairings():\n DB, cursor = connect()\n cursor.execute('''CREATE VIEW allplayers as \n SELECT players.playerid, players.playername, matches.wins, \n matches.draws, matches.matchnum \n FROM players, matches \n WHERE players.playerid = matches.playerid''')\n cursor.execute('''SELECT * \n FROM allplayers''')\n all_players = cursor.fetchall()\n numplayers = len(all_players)\n if numplayers % 2 == 1:\n playerBye(numplayers, all_players)\n numplayers -= 1\n cursor.execute('''SELECT * \n FROM allplayers \n ORDER BY matchnum, wins desc, draws desc''')\n p = 0\n pairings = []\n while numplayers > p:\n pair = cursor.fetchmany(size = 2)\n pairings.append((pair[0][0], pair[0][1], pair[1][0], pair[1][1])) \n p += 2 # advance 2 players at a time\n DB.close()\n return pairings", "title": "" }, { "docid": "c0d01fe1413518fef1c17499e93220f1", "score": "0.4920894", "text": "def play(self, player, game):\n pickup_amount = self.get_pickup_amount()\n pick_cards = game.pickup_pile.pick(pickup_amount)\n next_player = game.get_turns().peak()\n next_player.get_deck().add_cards(pick_cards)", "title": "" }, { "docid": "c0d01fe1413518fef1c17499e93220f1", "score": "0.4920894", "text": "def play(self, player, game):\n pickup_amount = self.get_pickup_amount()\n pick_cards = game.pickup_pile.pick(pickup_amount)\n next_player = game.get_turns().peak()\n next_player.get_deck().add_cards(pick_cards)", "title": "" }, { "docid": "ad0d9a3f845bd4c05c78aadcc248b35d", "score": "0.49205646", "text": "def game_loop(players_number):\n \n players_indeces = range(players_number)\n\n print_turn_results = lambda index, weapon: print(f'Bot {index}: {weapon}') if index != player_index else print(f'You: {weapon}')\n print_leaderboard = lambda place, index : print(f'{place}. Bot {index}') if index != player_index else print(f'{place}. You')\n\n while True:\n\n turn_weapons = form_weapons_set(players_indeces)\n\n [print_turn_results(index, available_weapons[weapon]) for index, weapon in turn_weapons.items()]\n\n turn_winners = game_turn(turn_weapons)\n\n players_indeces = turn_winners\n\n if len(turn_winners) == 1:\n \n winner_index = turn_winners[0]\n \n print(f'Bot {winner_index}: won!') if winner_index!= player_index else print(f'You: won!')\n \n leaderboard.append(winner_index)\n [print_leaderboard(len(leaderboard) - leaderboard.index(player_number), player_number) for player_number in leaderboard[::-1]]\n \n break", "title": "" }, { "docid": "bd1ddcc100949abb1b342b8e687cd055", "score": "0.49088734", "text": "def hit_user(playing_cards, user_hand):\n user_hand.append(playing_cards[0])\n playing_cards.pop(0)", "title": "" }, { "docid": "3f79ee6b8bfe5e2ca59dd6c5d50a3ce8", "score": "0.4905156", "text": "def add_players(self):\n for index in range(self.player_count):\n self.players[index] = Player(index, self.deck.draw_hand())", "title": "" }, { "docid": "3edaffb1bcd4988eb6479ff23674ce27", "score": "0.48953596", "text": "def playGame(wordList):\n user_input = ''\n comlasthand = {}\n humanlasthand = {}\n lastplayer = None\n while user_input is not 'e':\n user_input = raw_input(\"Enter n to play a new hand, r to replay the last hand, e to end the game: \")\n if user_input is 'e':\n print 'Game ended!'\n break\n elif user_input is 'n':\n userinput = raw_input(\"Enter u for user play, c for comp play: \")\n if userinput is 'u':\n lastplayer = 'Human'\n humanlasthand = dealHand(HAND_SIZE)\n playHand(humanlasthand.copy(), wordList, HAND_SIZE)\n elif userinput is 'c':\n lastplayer = 'Comp'\n comlasthand = dealHand(HAND_SIZE)\n compPlayHand(comlasthand.copy(), wordList, HAND_SIZE)\n else:\n print 'Invalid Command'\n elif user_input is 'r' and lastplayer is 'Human':\n playHand(humanlasthand.copy(), wordList, HAND_SIZE)\n elif user_input is 'r' and lastplayer is 'Comp':\n compPlayHand(comlasthand.copy(), wordList, HAND_SIZE)\n elif user_input is 'r' and lastplayer is None:\n print 'You haven\\'t played a hand yet, please play a hand.\\n'\n else:\n print 'Invalid Command!'", "title": "" }, { "docid": "7b2b3f28e49610fc830429bed3fa1ef6", "score": "0.48911345", "text": "def do_guess_round():\n while True:\n players_guess = input(PROMPT)\n if computers_number == int(players_guess):\n print('Correct!')\n break\n elif computers_number > int(players_guess):\n print('Too low')\n else:\n print('Too high')", "title": "" }, { "docid": "73a54f9ea55557b6634c1c216285c8ca", "score": "0.48901728", "text": "def _record_results(self, game, game_answers, game_questions, guess, result, number_of_objects):\n\t\t# TODO: make sure this can be used for new objects and when playing with the human's choice of object\n\n\t\tlog.info('Recording object results to the database')\n\t\tfor i in range(0, len(game_questions)):\n\t\t\tT = questions.get_t(self.id, game_questions[i], number_of_objects)\n\t\t\tif game_answers[i] == True:\n\t\t\t\tdb.cursor.execute(\"SELECT yes_answers FROM Pqd where t_value = %s\", (T,))\n\t\t\t\tyes_count = db.cursor.fetchone()[0]\n\t\t\t\t#print yes_count, 'yes'\n\t\t\t\tdb.cursor.execute(\"UPDATE Pqd SET yes_answers = %s WHERE t_value = %s\", (yes_count + 1, T))\n\n\t\t\tdb.cursor.execute(\"SELECT total_answers FROM Pqd where t_value = %s\", (T,))\n\t\t\ttotal_count = db.cursor.fetchone()[0]\n\t\t\t#print total_count\n\t\t\tdb.cursor.execute(\"UPDATE Pqd SET total_answers = %s WHERE t_value = %s\", (total_count + 1, T))\n\n\t\t\tdb.cursor.execute(\"INSERT INTO answers (oid, qid, answer) VALUES (%s, %s, %s)\", (str(self.id), game_questions[i], game_answers[i]))\n\n\t\t\tdb.connection.commit()\n\n\t\tif result == 0:\n\t\t\tresult = 'lose'\n\t\telse:\n\t\t\tresult = 'win'\n\n\t\t# TODO: clean up all the text files because this is kind of ridiculous\n\t\twith open(\"game.txt\", \"a\") as myfile:\n\t\t\tmyfile.write(str(game.id)+','+ str(self.id) +','+ str(guess.name)+\",\" + str(self.name) + \",\" + str(len(game_questions)) + \",\" + result + \"\\n\")\n\t\tmyfile.close()\n\n\t\twith open(\"answers.txt\", \"a\") as answerfile:\n\t\t\tanswerfile.write(\"\\n\" + str(game.id) + \" \" + str(self.id) + \" \" + result + \"\\n\")\n\t\t\tfor i in range(0, len(game_questions)):\n\t\t\t\tanswerfile.write(tags.get(game_questions[i]) + \" -> \" + str(game_answers[i]) + \"\\n\")\n\t\tanswerfile.close()", "title": "" }, { "docid": "65e476b4f893a1f3fe673cd60f8c4f24", "score": "0.48846197", "text": "def start_turn(self):\n # fetch the word revealed so far\n self.console.display_output(f\"{self.WordObject.revealed_word} ({self.WordObject.word_length}-letter word)\")\n # display state of parachute\n self.draw_parachute()\n \n # displays remaining wrong guesses left\n self.status_report()\n\n # ask for input:\n is_valid_input = False\n\n while not is_valid_input:\n guess = self.console.take_input(\"[ ? ] Guess a letter [a-z]: \")\n guess = guess.lower()\n\n if len(guess) != 1:\n self.console.display_output(f\" !!! Invalid input. Enter one letter only.\")\n continue\n elif guess in self.already_guessed:\n self.console.display_output(f\" !!! You already guessed \\\"{guess},\\\" try another letter.\")\n continue\n else:\n # add the letter to the list\n self.already_guessed.append(guess)\n is_valid_input = True\n break\n\n return guess", "title": "" }, { "docid": "6a1287ef3dbcfa583ad99a63d8638b8d", "score": "0.48795512", "text": "def find_winner(self):\n any_winner = False\n for player in self.list_of_players:\n if player.bull_points < -66:\n any_winner = True\n if any_winner:\n result = sorted(self.list_of_players, key=lambda (p): p.bull_points, reverse=True)\n return [(player.name, player.bull_points) for player in result]\n else:\n return None", "title": "" }, { "docid": "4da3b23efbfc08aacc05a9bc1fab6523", "score": "0.4874494", "text": "def deal(self):\n for player in self.list_of_players:\n player.hand = []\n for x in range(self.starting_player, self.starting_player + len(self.list_of_players)):\n player_index = x % len(self.list_of_players)\n cards_dealt = self.deck[0:initial_hand_size]\n self.list_of_players[player_index].hand += cards_dealt\n self.deck = self.deck[initial_hand_size:len(self.deck)]", "title": "" }, { "docid": "0de23615b1b55356bbfe0ff5dd0206c3", "score": "0.48741046", "text": "def swissPairings():\n db = connect()\n cursor = db.cursor()\n offset = 0\n matchingList = []\n sql = \"SELECT * from players;\"\n playerTableResults = cursor.execute(sql)\n tableInfo = cursor.fetchall()\n tableLength = len(tableInfo)\n for row in tableInfo:\n if tableLength-1 >= offset: #so we don't get a bunch of None's after\n sql = \"SELECT player_id, player_name FROM getPlayerTable LIMIT 2 OFFSET %s;\"\n cursor.execute(sql, (offset,))\n offset += 2\n playerOneInfo = cursor.fetchone() #first row\n playerTwoInfo = cursor.fetchone() #second row\n if playerOneInfo != None or playerTwoInfo!= None:\n matchTuple = (int(playerOneInfo[0]), playerOneInfo[1], int(playerTwoInfo[0]), playerTwoInfo[1])\n matchingList.append(matchTuple)\n\n return matchingList\n\n\n \n\n db.close()", "title": "" }, { "docid": "196e99da1ef66207a3da5dbfd20782ad", "score": "0.48722947", "text": "def insertAIPlayers(self, playerList):\n while len(playerList) < (self.myGalaxy.numEmpires - 1):\n playerList.append('ai')\n return playerList", "title": "" }, { "docid": "e6793596a3cfb9b0a3b48b27ad74dd91", "score": "0.48691028", "text": "def append_to_list(list_lineup: list, set_player: set, num_of_players: int, get_best: bool):\n temp_set = set_player.copy()\n temp_list = list()\n\n while len(temp_list) < num_of_players:\n max_player = get_max_player_ovr(temp_set) if get_best else get_random_player(temp_set)\n temp_list.append(max_player)\n temp_set.remove(max_player)\n\n for item in temp_list:\n list_lineup.append(item)", "title": "" }, { "docid": "c2ce0cdede65605da19428289a3e7690", "score": "0.48676205", "text": "def display(HANGMANPICS, wrongGuess):\n \n global hidden\n print HANGMANPICS[len(wrongGuess)] \n #logging.info(HANGMANPICS[len(wrongGuess)])\n for i in range(len(word)):\n if word[i] in rightGuess:\n hidden = hidden[:i] + word[i] +hidden[i+1:]\n print hidden \n #logging.info(hidden) ", "title": "" }, { "docid": "60c5777eb808612e86e2652366b90aa8", "score": "0.48587573", "text": "def get_players(config: dict) -> list:\n # Change in future to use wrapper function\n num_players_json = run_query(config, 'persons?per_page=1')\n num_players = num_players_json['count']\n num_player_pages = ceil(num_players / PAGE_LIMIT)\n for page in range(num_player_pages):\n json_file = DATA_PATH + \"playersPage\" + str(page+1).zfill(5) + \".json\"\n save_query(config, 'persons?fields=Location&per_page=' + str(PAGE_LIMIT) + '&page=' + str(page+1),\n json_file)\n players_to_return = []\n for page in range(num_player_pages):\n players_json_file = DATA_PATH + \"playersPage\" + str(page + 1).zfill(5) + \".json\"\n with open(players_json_file) as f:\n players_json = json.load(f)\n players_list = players_json['result']\n for player in players_list:\n try:\n zip = player['Location']['postal_code']\n except (KeyError, TypeError):\n zip = \"unknown\"\n # Pad zeroes to zip codes and reduce to 5 digits\n try:\n birth_date = player['birth_date']\n except (KeyError, TypeError):\n birth_date = \"unknown\"\n try:\n email = player['email_address']\n except (KeyError, TypeError):\n email = \"unknown\"\n players_to_return.append({\n 'player_id': player['id'],\n 'email': email,\n 'first_name': player['first_name'],\n 'last_name': player['last_name'],\n 'full_name': player['full_name'],\n 'gender': player['gender'],\n 'birth_date': birth_date,\n 'zip': zip,\n })\n return players_to_return", "title": "" }, { "docid": "97fdc321731808b8952af49c03547f8f", "score": "0.4857593", "text": "def dealer_hit(self, my_cards, deck):\n calc_class = Calculation()\n points = calc_class.point_calc(my_cards)\n results = [my_cards, deck, points]\n while points <= 16:\n results = super().player_hit(my_cards, deck)\n points = results[2]\n\n return results", "title": "" }, { "docid": "d1230707004ccaa5d7cfb9c058f02b6d", "score": "0.4856929", "text": "def start_game(self): \n\n # pick a word list\n word_list = self.pick_list()\n\n # create Word()\n self.keep_playing = True\n self.WordObject = Word(word_list)\n word_length = self.WordObject.word_length\n self.already_guessed = [ ]\n\n self.console.display_output(f\"The word is {word_length} letters long.\")\n self.console.display_output()\n\n # guessing loop\n #while True:\n while self.keep_playing:\n # draw parachute\n \n #if self.keep_playing:\n # pass\n #else:\n # # game end code\n # break\n\n # display the revealed word, take player input\n guess = self.start_turn()\n\n # check guess, update parachute_hp\n # draw parachute \n self.mid_turn(guess)\n\n # display stats\n #self.status_report()\n\n # check for game-ending conditions before \n self.keep_playing = self.is_keep_playing()\n continue \n\n \n if not self.keep_playing and self.parachute_hp == 0:\n self.game_over()\n\n elif not self.keep_playing and self.parachute_hp > 0:\n self.victory()", "title": "" }, { "docid": "08f2c9a4a9cec64418d8a7a3a936d352", "score": "0.4854419", "text": "def playGame(wordList):\n flag = 1\n ans = raw_input(\"Enter 'n' to play a new hand, 'r' to play the lasthand, 'e' to exit the game: \")\n while flag != 0:\n if ans == 'n':\n hand = dealHand(HAND_SIZE)\n hand2 = copy.deepcopy(hand)\n playHand(hand2, wordList)\n ans = raw_input(\"\\nEnter 'n' to play new hand, 'e' to exit the game: \")\n continue\n elif ans == 'r':\n playHand(hand2)\n elif ans == 'e':\n flag = 0\n else:\n print \"\\nInvalid Entry. Please input again.\"\n print \"\\nExiting...\\n\"", "title": "" }, { "docid": "7ea7220a82fdca87a4eb99af391b198a", "score": "0.48512158", "text": "def scrape_search(page):\n # initialize games list\n games = []\n\n # scrape browsing page \n url = \"https://boardgamegeek.com/browse/boardgame/page/{}\".format(page)\n response = requests.get(url)\n soup = BeautifulSoup(response.text, features=\"lxml\")\n \n # loop through the hundred games on a single page \n for x in range(1,101):\n time.sleep(5)\n # pull and clean name to compare later\n name = soup.find('div', id='results_objectname{}'.format(x)).text\n name_clean = re.sub(r'\\n+(.+)\\n.*\\n',r'\\1', name)\n \n # pull link id to retrieve stats from api\n link = soup.find('div', id='results_objectname{}'.format(x)).a['href']\n link_id = re.sub(r'.*/(\\d+)/.*', r'\\1', link)\n\n rank = soup.find_all(\"td\", class_=\"collection_rank\")[x-1].a[\"name\"]\n \n # create dictionary\n search_page = {\n 'name_clean' : name_clean,\n }\n \n # pull stats as dictionary from api page for the currently selected game\n url = \"https://www.boardgamegeek.com/xmlapi2/thing?id={}&stats=1\".format(link_id)\n print('game id', link_id)\n parameters = scrape_game_api(url)\n \n # combine dictionaries into one\n search_page.update(parameters)\n\n # update games list\n games.append(search_page)\n\n # scraping count\n print('the following rank just been pulled:', rank)\n return games", "title": "" }, { "docid": "3cd8a217326ddd52c78a54d6bfcdab8d", "score": "0.48496708", "text": "def start_game():\r\n # write your code inside this function.\r\n \r\n print(\"Hi, Welcome to the Guessing Game!\\n\")\r\n # initializing the variables for the loop\r\n attempts = 0\r\n\r\n guess = 0\r\n\r\n the_number = random.randint(1,10)\r\n\r\n high_score = 0\r\n\r\n # the main loop\r\n while True:\r\n # error checking\r\n try:\r\n # checks to make sure there is a high score then displays it\r\n if high_score != 0:\r\n print(\"High Score: \", high_score)\r\n else:\r\n pass\r\n # takes in players guess\r\n guess = int(input(\"Make a guess between 1 and 10: \"))\r\n\r\n # increments attempts by one to track guesses\r\n attempts += 1\r\n\r\n # main decision filter\r\n if guess > 10 or guess < 1:\r\n print(\"That guess is outside of the range\\n\")\r\n # decrements by one on a bad attempt\r\n attempts -= 1\r\n elif guess > the_number:\r\n print(\"It's lower\\n\")\r\n elif guess < the_number:\r\n print(\"It's higher\\n\")\r\n # ends the round if the players guesses the number\r\n elif guess == the_number:\r\n print(\"Got it\\n\")\r\n print(\"It took you {} attempts !\".format(attempts)) \r\n print(\"************ROUND OVER************\\n\")\r\n \r\n except ValueError or TypeError:\r\n print(\" Sorry that was not a valid entry please try again\\n\")\r\n\r\n # checks to see if the game is over and if you want to keep playing\r\n if guess == the_number:\r\n # error checking for input\r\n try:\r\n keep_playing = str(input(\"Would you like to keep playing? \"\r\n \"Type Y for yes or anything else for no: \"))\r\n # if yes to continue\r\n if keep_playing.lower() == 'y':\r\n # resets the number to guess\r\n the_number = random.randint(1,10)\r\n if high_score == 0 or attempts < high_score:\r\n # sets the high score\r\n high_score = attempts\r\n else:\r\n pass\r\n # resets the attempts variable\r\n attempts = 0\r\n continue\r\n # if no to continue\r\n else:\r\n return print(\"\\n************GAME OVER Thanks for playing************\")\r\n \r\n except ValueError or TypeError:\r\n print(\" Sorry that was not a valid entry please try again\")\r\n else:\r\n continue", "title": "" }, { "docid": "3d60d5046a22516e5fcf17f399c91569", "score": "0.48484918", "text": "def guess_from_clue(self, clue):\n sorted_card_score_pairs = []\n for card in self.field:\n if card.taken_by == \"None\" and clue in self.wv:\n w_vec = self.wv[clue].reshape(-1, len(self.wv[clue]))\n c_vec = self.wv[card.name].reshape(-1, len(self.wv[card.name]))\n similarity = np.asscalar(cosine_similarity(w_vec, c_vec))\n card_score_pair = (card, similarity)\n elif card.taken_by == \"None\" and clue not in self.wv:\n self.logger.info(\"clue: {} is not included in guesser embedding. similarity is set to 0.0\".format(clue))\n card_score_pair = (card, 0.0)\n else:\n continue\n sorted_card_score_pairs.append(card_score_pair)\n sorted_card_score_pairs = sorted(sorted_card_score_pairs, key=lambda x: x[1], reverse=True)\n\n self.logger.info(\"Guesser ranking for all not-taken cards:\")\n for card in sorted_card_score_pairs:\n print_text = \"{} {} {}\".format(card[0].name, card[1], card[0].color)\n self.logger.info(print_text)\n\n return sorted_card_score_pairs", "title": "" }, { "docid": "493a5426b2d8fbde27c088c25947aad2", "score": "0.48479873", "text": "def tournament(self, rounds):\n participants = copy(self.population)\n losers = []\n winners = []\n\n while len(participants) > 1:\n self.log.debug(\"\\nPARTS \" + str(len(participants)) + \": \")\n random.shuffle(participants)\n #self.log.debug(pprint.pformat(participants))\n self.output_statistics()\n args = []\n winners = []\n\n # pair individuals\n for i in range(0, len(participants), 2):\n if i + 1 >= len(participants):\n break\n args.append((participants[i], participants[i+1], rounds,))\n\n # get results, add to losers\n results = self.fiteval.run(args, get_winner)\n for x,a in zip(results, args):\n winners.append(self.handle_winners(x,a))\n\n # if we have an odd individual, they compete against a random winner\n if len(participants) % 2 != 0:\n opponent = random.sample(winners, 1)[0]\n args = [(opponent,participants[-1],rounds,)]\n result = self.fiteval.run(args, get_winner)\n winner = self.handle_winners(result[0], args[0])\n\n if winner is not opponent: # they get to be included... no risk to winner\n winners.append(participants[-1])\n \n losers.append([x for x in participants if x not in winners])\n\n #self.log.debug(\"losers:\")\n #self.log.debug(pprint.pformat(losers))\n participants = winners\n\n losers.append(winners)\n losers.reverse()\n return losers", "title": "" } ]
eddd6b33c727fb40e0dbf06dd805dd9a
This function enlarges a given number by 100.
[ { "docid": "6082666add84985420c2dc8f993f869a", "score": "0.8559718", "text": "def enlarge(n):\n return n * 100", "title": "" } ]
[ { "docid": "f948822003c9915cb32121da0de29efa", "score": "0.6154489", "text": "def __enlarge(self, number, multiplier, pattern=None):\n if pattern == 'linear':\n return number * 2\n else:\n return number * multiplier", "title": "" }, { "docid": "0c83e3c11549dc4434aab93849394e22", "score": "0.57685167", "text": "def roundup(x):\n return int(math.ceil(x / 100.0)) * 100", "title": "" }, { "docid": "f58b1fceda693308764dd575592bceff", "score": "0.5616741", "text": "def _scaleforsize(a):\n\n mx=np.percentile(a,98)\n mn=np.percentile(a,2)\n\n if mx > mn:\n a=np.array([(i-mn)/(mx-mn+0.0) for i in a])\n\n for index in np.arange(len(a)):\n if a[index] > 1:\n a[index]=1\n if a[index] < 0.001:\n a[index]=0.001\n\n return a", "title": "" }, { "docid": "007bd15a16464f52244a44f62ae0855b", "score": "0.55810535", "text": "def _denormalize(value):\r\n return int(value * 100000.0)", "title": "" }, { "docid": "f6f3bed61674ceceed849c9febfae7ba", "score": "0.5558897", "text": "def enlargement(self):\n self.__square += 10\n self.__population += 10000", "title": "" }, { "docid": "7f972d5bc5915e52413a55c0e2818924", "score": "0.54773515", "text": "def shrink():\n global size\n if size > 0:\n size = size - 1\n ben.pensize(size)", "title": "" }, { "docid": "a0d90b5b7c3e99f9dc75de73eaed997f", "score": "0.5427965", "text": "def shrinkify(n):\n if n < 4:\n return 0\n elif 64 < n and n < 128:\n return 1\n elif 128 < n and n < 128 + 64:\n return 2\n else:\n return 3", "title": "" }, { "docid": "3d06f236bab4afa2aee7bafe4143a039", "score": "0.5403082", "text": "def scale(num, num_start, num_end, scale_start, scale_end):\n orig_s = (num - num_start) / (num_end - num_start)\n new_s = orig_s * (scale_end - scale_start) + scale_start\n return new_s", "title": "" }, { "docid": "bb102d2b6004500c13e1582a563593d7", "score": "0.5385454", "text": "def __truncate__(self, n, decimals=-1):\n multiplier = 10 ** decimals\n return int(n * multiplier) / multiplier", "title": "" }, { "docid": "c815a4bf0f6f9123fd04f011c52e2e32", "score": "0.53656113", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "c815a4bf0f6f9123fd04f011c52e2e32", "score": "0.53656113", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "c815a4bf0f6f9123fd04f011c52e2e32", "score": "0.53656113", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "c815a4bf0f6f9123fd04f011c52e2e32", "score": "0.53656113", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "c815a4bf0f6f9123fd04f011c52e2e32", "score": "0.53656113", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "c815a4bf0f6f9123fd04f011c52e2e32", "score": "0.53656113", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "014714a3d95a675e7e476e58088ce959", "score": "0.5355852", "text": "def convert_bytes(num):\r\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\r\n if num < 1024.0:\r\n return \"%3.1f %s\" % (num, x)\r\n num /= 1024.0", "title": "" }, { "docid": "c7c3a62e653ad813c64645b6425eaf7c", "score": "0.53306526", "text": "def _adjust_component(amount): \r\n if amount < 64:\r\n amount = 31\r\n elif amount < 128:\r\n amount = 95\r\n elif amount < 192:\r\n amount = 159\r\n else:\r\n amount = 223\r\n \r\n return amount", "title": "" }, { "docid": "0b70ba5dee78d639befbe64b3c5589d5", "score": "0.5316659", "text": "def reformat_big_integer(\n integer: int, postfix: str = \"k\", divisor: int = 1000\n) -> str:\n\n # Just return a string of integer if less than divisor\n if integer < divisor:\n return str(integer)\n else:\n if integer % divisor == 0:\n return str(int(integer / divisor)) + postfix\n return str(integer / divisor) + postfix", "title": "" }, { "docid": "62be57aa08ba555ca063d1663d8852eb", "score": "0.5309474", "text": "def MapToSingleIncrease(val):\n return val/5", "title": "" }, { "docid": "08432bb0699f281356934dcf45e793a8", "score": "0.53060734", "text": "def scale(self, n: int) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "3e8d0bb46241f17eb1fad7db99f010c6", "score": "0.52889806", "text": "def change_base(x: int, base: int):\n # [SOLUTION]\n\n ret = \"\"\n while x > 0:\n ret = str(x // base) + ret\n x = x % base\n return ret", "title": "" }, { "docid": "aa8212db5f93f09313fbe216640c09b6", "score": "0.52639586", "text": "def scale(num, base):\n\n if isinstance(num, float):\n return num * base\n else:\n return num", "title": "" }, { "docid": "b9c6c3290ea7c56c61923741c7de2e35", "score": "0.5229953", "text": "def round_large_num(self, num):\n if num > 1000:\n r = int(np.log10(num))-1\n return round(num, -r)\n return num", "title": "" }, { "docid": "75e013cd37720e307f5f1117e61b04b5", "score": "0.52274233", "text": "def amount_to_grow(self):\n raise NotImplementedError()", "title": "" }, { "docid": "7a2a9ff8b1ed4bc62e9b00bdb8a93165", "score": "0.52198654", "text": "def humanize_size(size):\n\tfor limit, suffix in ((1024**3, 'GiB'), (1024**2, 'MiB'), (1024, 'KiB'), (1, 'B')):\n\t\thsize = float(size) / limit\n\t\tif hsize > 0.5:\n\t\t\treturn '%.2f %s' % (hsize, suffix)", "title": "" }, { "docid": "96b6523caa4e77c0dfe56074ed608199", "score": "0.5207529", "text": "def ackley_small(x):\n return ackley(x) / 20", "title": "" }, { "docid": "eaf6daa4bdd99fe64113a619b4ca944e", "score": "0.5204002", "text": "def scale(val, range):\n return val * (range[1] - range[0]) + range[0]", "title": "" }, { "docid": "3e870e8f6f03a554b79ea7fa5fe208d8", "score": "0.5200904", "text": "def scale(self, factor):\n pass", "title": "" }, { "docid": "0c7615bf8f6deb24cb6a9bbb8fedad40", "score": "0.5198913", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0", "title": "" }, { "docid": "9a2481965cfc87e0908fdc81a61eaf72", "score": "0.5195513", "text": "def enlarge(self, dx):\n\n self.inf -= dx\n self.sup += dx\n\n return self", "title": "" }, { "docid": "f4255685e513c5693d1af9b8a8b57ab8", "score": "0.5172939", "text": "def truncate(f, n):\n return math.floor(f * 10 ** n) / 10 ** n", "title": "" }, { "docid": "9cf3f62eca7df6a6ba62f8ead485259f", "score": "0.51701003", "text": "def hundred_million_pass():\n\n alloc_and_print(100, 1000, 1000)", "title": "" }, { "docid": "8b02386ba9448cc3e722c189f69385a5", "score": "0.5153846", "text": "def rosenbrock_small(x):\n return rosenbrock(x) / 50", "title": "" }, { "docid": "3540ec9ba4c3d2ee0439f203aa3071de", "score": "0.5150097", "text": "def scale(a_number, max_number, max_scale):\n return (a_number * max_scale / max_number)", "title": "" }, { "docid": "dd3061daa6779014a95ddb2799a75f0b", "score": "0.5129292", "text": "def human_bytes(n):\n\n step = 1000\n abbrevs = ['KB', 'MB', 'GB', 'TB']\n\n if n < step:\n return f\"{n}B\"\n\n for abbrev in abbrevs:\n n /= step\n if n < step:\n break\n\n return f\"{n:.2f}{abbrev}\"", "title": "" }, { "docid": "70b0b50fdbbd955af732bdca11066b74", "score": "0.51205486", "text": "def orig(numb):\n return numb / 1000000000", "title": "" }, { "docid": "98264597ae3e0efdfcbd606847e17218", "score": "0.51204216", "text": "def chunk_size(value):\n return round(value / 1000 + 0.5) * 1000", "title": "" }, { "docid": "9578e2f0ad7208239b4e7cd7f3e5186b", "score": "0.5109293", "text": "def stretch(self, scale_factor, callback=True):\n self.scale_pct *= scale_factor\n self.scale_and_shift(self.scale_pct, 0.0, callback=callback)", "title": "" }, { "docid": "1e9dcc28b60cc6bace74362dad2fb88d", "score": "0.5109119", "text": "def percent(self, value) -> 'Size':\n raise_not_number(value)\n self.maximum = '{}%'.format(value)\n return self", "title": "" }, { "docid": "198bf27444e72e9210319e00a9c3a328", "score": "0.5108541", "text": "def convert_size(size):\n if (size == 0):\n return '0 bytes'\n size_name = (\"bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\")\n i = int(math.floor(math.log(size,1024)))\n p = math.pow(1024,i)\n s = round(size/p)\n s = str(s)\n s = s.replace('.0', '')\n return '%s %s' % (s,size_name[i])", "title": "" }, { "docid": "50a8b19b479bad816873606afc8ad7dc", "score": "0.5104899", "text": "def adjust_salary_percentile():\n\n return Work.update_percentiles()", "title": "" }, { "docid": "87be66daa551b3d146b22cf12433996d", "score": "0.51009923", "text": "def increase_field_limit():\n\tmax_int = sys.maxsize\n\tdecrement = True\n\n\twhile decrement:\n\t\tdecrement = False\n\t\ttry:\n\t\t\tcsv.field_size_limit(max_int)\n\t\texcept OverflowError:\n\t\t\tmax_int = int(max_int / 10)\n\t\t\tdecrement = True", "title": "" }, { "docid": "d741d509e064e52ce6274ca2bdab2215", "score": "0.5086993", "text": "def adjust_number(number, max_len=3):\n\n return (max_len - len(str(number))) * '0' + str(number)", "title": "" }, { "docid": "e38f41ebc8631a7bf2fb10576f376e39", "score": "0.5074454", "text": "def round_up(x):\n if len(str(x)) <= 2:\n result = int(math.ceil(x/10.0)) * 10\n else:\n result = int(math.ceil(x/100.00)) * 100\n return result", "title": "" }, { "docid": "373f1ed5d921c2fb0be376dfa19bac60", "score": "0.5065391", "text": "def change_hp(self, value):\n self._hp += value\n if self._hp > self._max_hp:\n self._hp = self._max_hp\n elif self._hp < 0:\n self._hp = 0", "title": "" }, { "docid": "d52ff9bbe4f2125df5f818f3b265de36", "score": "0.5063726", "text": "def twenty_twenty():\n return (100-80)*100+20", "title": "" }, { "docid": "8c68fc135c289a28299bd37bcbebbcb2", "score": "0.50538445", "text": "def scale(self,k):\n scale = 0.25\n return scale", "title": "" }, { "docid": "02c428d788a54fce142cded2fedaf68c", "score": "0.5020023", "text": "def clamp255(n):\n return clamp(n, 0, 255)", "title": "" }, { "docid": "1ca91642a0eddf6df5036589e0aaca11", "score": "0.50164044", "text": "def set_shrink(self, shrink=1):\r\n self.shrink = shrink", "title": "" }, { "docid": "8845342696f9ea7b40ee414e8e153f89", "score": "0.5012698", "text": "def scale(value, factor, baseline=0):\n return factor * (value - baseline) + baseline", "title": "" }, { "docid": "8b6de2e4fefdccc3b9e69b9fd24f29c5", "score": "0.49987707", "text": "def half_billion_pass():\n\n alloc_and_print(500, 1000, 1000)", "title": "" }, { "docid": "b89b8e3fdfbbaa522f4e0b06f2623877", "score": "0.4997055", "text": "def floor(value, size, offset=200):\r\n return float(((value + offset) // size) * size - offset)", "title": "" }, { "docid": "8907ee14cc6d54a0258aa3ead7376821", "score": "0.49954993", "text": "def pad_up(size, factor):\n x = size + factor - 1\n return x - (x % factor)", "title": "" }, { "docid": "9e5ce216af5369f267b8349559eeb23b", "score": "0.49906397", "text": "def bytes_2_megabytes(value):\n return value/float(1048576)", "title": "" }, { "docid": "9b7909d785de9f43a67d0d354b46c3ba", "score": "0.4990332", "text": "def binarify(num):\n if num<=0: return '0'\n digits = []\n powers = [2048,1024,512,256,128,64,32,16,8,4,2,1]\n for i in powers:\n \tdigits.append(str(num / i))\n \tnum %= i\n return ''.join(digits)", "title": "" }, { "docid": "19c3758c5ed9ca47a5d1c4d0f7d174a5", "score": "0.49773613", "text": "def scale_tracking_amount(self, multiplier):\r\n self.tracking_amount = self.tracking_amount * multiplier\r\n self._do_track()", "title": "" }, { "docid": "4060f63aa0c9f1aab084a110b150cca5", "score": "0.49758163", "text": "def enlarge(a, x=2, y=None):\n\n import numpy as np\n a = np.asarray(a)\n assert a.ndim == 2\n if y == None:\n y = x\n for factor in (x, y):\n assert factor.__class__ == int\n assert factor > 0\n return a.repeat(y, axis=0).repeat(x, axis=1)", "title": "" }, { "docid": "90d928cb3e58208f746f17f7be1fbe82", "score": "0.49629426", "text": "def format_size(size):\n for unit in (\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\"):\n if size < 1024:\n return f\"{size:.f} {unit}\"\n size /= 1024.0", "title": "" }, { "docid": "3d71fc6955effd36aeb3b75ea70a13f0", "score": "0.494559", "text": "def beautify_size (size):\n\n result = None\n\n try:\n bytes = int (size)\n except:\n bytes = -1\n\n if bytes >= 1024 * 1024 * 1024:\n result = '%i GB' % int (round (bytes / float (1024 * 1024 * 1024)))\n elif bytes >= 1024 * 1024:\n result = '%i MB' % int (round (bytes / float (1024 * 1024)))\n elif bytes >= 1024:\n result = '%i KB' % int (round (bytes / float (1024)))\n elif bytes >= 0:\n result = '%i B' % bytes\n\n return result", "title": "" }, { "docid": "09d45d0e5a8da898d8f95d6b4f485647", "score": "0.49368694", "text": "def reporthook(a, b, c):\n print \"% 3.1f%% of %.2f MB\\r\" % (min(100, float(a * b) / c * 100), c/1000000.),", "title": "" }, { "docid": "1532283f281a25283a3df218e1a5c955", "score": "0.49311456", "text": "def convert_bytes(num):\n for x in ['bytes', 'KB', 'MB']: # , 'GB', 'TB']: Keep to MB in this case.\n if num < 1024.0:\n# return \"%3.1f %s\" % (num, x)\n return [num, x]\n num /= 1024.0", "title": "" }, { "docid": "31645f4e86cf961a1f184dd068046ae2", "score": "0.49280655", "text": "def throttle_pos(value):\n return __digit(value) * 100 / 255", "title": "" }, { "docid": "59bc1cc3088040d6d56db1f476192d15", "score": "0.49235317", "text": "def pers(numb):\n return int(numb * 10000000)", "title": "" }, { "docid": "9d47930d46df23b3c164cd6b4e64c5d1", "score": "0.49225008", "text": "def gp(num):\n return int(float(num) * 100)", "title": "" }, { "docid": "4a0819aab90acec0a96dc9e5df10675a", "score": "0.49194244", "text": "def scale( self, factor=1.0 ):\n\n pass # REPLACE", "title": "" }, { "docid": "64aa8c65efea62fe53ab3fdc5a0f1c4c", "score": "0.49158818", "text": "def format_number(x):\n if 0 < x and x < 1:\n return '{x:1.2}'.format(x=x)\n return humanize.intword(x)", "title": "" }, { "docid": "accea99986f5ab8977ab1ded8b8f197c", "score": "0.49031487", "text": "def clamp(value, floor=-100, ceil=100):\n return max(min(value, ceil), floor)", "title": "" }, { "docid": "06b571573a6cfe9f1fa49738f9e08646", "score": "0.49004427", "text": "def __progress(x):\r\n global progress_x\r\n x = int(x * 40 // 100)\r\n sys.stdout.write(\"-\" * (x - progress_x))\r\n sys.stdout.flush()\r\n progress_x = x", "title": "" }, { "docid": "67943a34e2c4ad62edd275b77c87b1fb", "score": "0.48966372", "text": "def scale_width(w, w_factor):\n w *= w_factor\n new_w = (int(w+4) // 8) * 8\n new_w = max(8, new_w)\n if new_w < 0.9*w:\n new_w += 8\n return int(new_w)", "title": "" }, { "docid": "a2410dcdaeb9eb902022b83e66d6a21d", "score": "0.48742208", "text": "def round_down(num, divisor=32):\n return num - (num%divisor)", "title": "" }, { "docid": "1f26403ddf8cde9a5a67faf843e6e9b0", "score": "0.48714098", "text": "def _PrettyPrintSize(x):\n if x < 1e3:\n return str(x)\n elif 1e3 <= x < 1e6:\n return '%.2fkB' % (x / 1e3)\n elif 1e6 <= x < 1e9:\n return '%.2fMB' % (x / 1e6)\n else:\n return '%.2fGB' % (x / 1e9)", "title": "" }, { "docid": "94c01b33c9870201cd4115324501609a", "score": "0.4868972", "text": "def _make_divisible(v, divisor = 8, min_value = None):\n v = v * 1.0\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v", "title": "" }, { "docid": "3f20e0c9a28fa9df3f87f8dcc8080969", "score": "0.48639986", "text": "def human_bytes(n):\n if n < 1024:\n return '%i B' % n\n k = (n - 1) / 1024 + 1\n if k < 1024:\n return '%i KB' % k\n return '%.2f MB' % (float(n) / (2 ** 20))", "title": "" }, { "docid": "f679158c783214dda4ac596f4d7279fb", "score": "0.4861277", "text": "def convert1(v):\n\n try:\n x = float(v)\n except ValueError:\n return \"-9999\"\n return \"{:5.0f}\".format(x*100)", "title": "" }, { "docid": "a7f8bcea32bbe7bfc230e15a12acf82a", "score": "0.4859748", "text": "def ems(self, value) -> 'Size':\n raise_not_number(value)\n self.maximum = '{}em'.format(value)\n return self", "title": "" }, { "docid": "4019e61f08af9637a9502f20c9022162", "score": "0.4856031", "text": "def progress_bar(self, n):\r\n self.current_input.progress_bar.setValue(n)", "title": "" }, { "docid": "3ae7611c81fefe679571fd2aa68cb140", "score": "0.48539802", "text": "def grow():\n global size\n size = size + 1\n ben.pensize(size)", "title": "" }, { "docid": "d177f44005a8bca4ca693143dfca2d32", "score": "0.48497915", "text": "def truncate_number(number, maximum):\n return min(abs(number), maximum) * (1 if number >= 0 else -1)", "title": "" }, { "docid": "c62feeaf782b32903179a22039f7f613", "score": "0.48361892", "text": "def _enlarge_buffer(self, width, height):\n new_width = self._width\n if self._cursor[0] + width >= self._width:\n new_width = self._cursor[0] + width\n\n new_height = self._height\n if self._cursor[1] + height >= self._height:\n new_height = self._cursor[1] + height\n\n if new_width > self._width or new_height > self._height:\n new_buffer = displayio.Bitmap(new_width, new_height, 65535)\n if self._mask_color is not None:\n bitmaptools.fill_region(\n new_buffer, 0, 0, new_width, new_height, self._mask_color\n )\n bitmaptools.blit(new_buffer, self._buffer, 0, 0)\n self._buffer = new_buffer\n self._width = new_width\n self._height = new_height", "title": "" }, { "docid": "fbcd6dd1d2ffb83d7420aa34c6944d53", "score": "0.4834577", "text": "def setProgressionPercent(progressBar,value):\n\tassertProgressBar(progressBar)\n\tassert type(value) is int\n\tassert value >= 0, \"value parameter have to be in [0,100], current value is : %r\" % (value)\n\tassert value <= 100, \"value parameter have to be in [0,100], current value is : %r\" % (value)\n\n\tvalue = int(round(value/100.*progressBar[\"gutsCount\"]))\n\n\tprogressBar[\"progression\"] = value\n\tItem.setSprite(progressBar,value)\n\n\treturn", "title": "" }, { "docid": "018711eb3d025d97ecd0fdb1ee7a4ea5", "score": "0.48332244", "text": "def stretch(a,b,factor=1,int=False):\n c = (a+b)/2\n a = c + factor*(a-c) \n b = c + factor*(b-c) \n if int:\n a = floor(a)\n b = ceil(b)\n return a, b", "title": "" }, { "docid": "73283c6107ca50401e4e15973f3e86f5", "score": "0.4819431", "text": "def format_bytes(n: int) -> str:\n for x in ['B', 'kB', 'MB', 'GB', 'TB', 'PB']:\n if n < 1000.0:\n return \"%3.2f %s\" % (n, x)\n n /= 1000.0", "title": "" }, { "docid": "aaabd218d6921b2440de303498d209be", "score": "0.48145756", "text": "def intake(self, amount: int):\n self.money += amount", "title": "" }, { "docid": "8474ec133c737868f8fe936021c60ca5", "score": "0.48076797", "text": "def isize(n):\n if n >= 1024 ** 3:\n return \"%.2f GB\" % (n / 1024.0 ** 3)\n elif n >= 1024 ** 2:\n return \"%.2f MB\" % (n / 1024.0 ** 2)\n elif n >= 1024:\n return \"%.2f KB\" % (n / 1024.0)\n else:\n return \"%d B\" % n", "title": "" }, { "docid": "cf6a609e779611597d690ab29b13710f", "score": "0.48026052", "text": "def trunc(v, n=3):\n factor = pow(10, n)\n return int(v*factor)/factor", "title": "" }, { "docid": "0ddf8698e07ed101390a65ed1982c21d", "score": "0.4797588", "text": "def wrap(value, min_value, max_value):\n return ((value - min_value) % (max_value - min_value)) + min_value", "title": "" }, { "docid": "7d356f3e05f02bed536039bb90573348", "score": "0.4795077", "text": "def truncate(number, digits = 0):\n return _trunc(number * 10 ** digits) / 10 ** digits", "title": "" }, { "docid": "b79adad59ad7de0eeeb1a1c4807819dd", "score": "0.4792916", "text": "def smooth(self,number):\n return round(math.log(number+1),self.digits)", "title": "" }, { "docid": "2b0a046bf2f4aafbcf3c791a21079a3f", "score": "0.47879735", "text": "def human_readable(size,precision=2):\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n return '{0:.{1}f} {2}'.format(size, precision, suffixes[suffixIndex])", "title": "" }, { "docid": "51577399a8121fece28f0a77b9834623", "score": "0.47827476", "text": "def rescale(val, in_min, in_max, out_min, out_max):\n return out_min + (val - in_min) * ((out_max - out_min) / (in_max - in_min))", "title": "" }, { "docid": "9ef525d50cbe24aee5f5719b636eb393", "score": "0.47805783", "text": "def small_to_human(num):\n out = []\n if not num:\n return small_nums.get(num)\n while num:\n if num > 99:\n # get hundreds quotient and continue with remainder as num\n quotient, remainder = divmod(num, 100)\n out.extend([small_nums.get(quotient), big_nums.get(2)])\n num = remainder\n elif num > 19:\n # all two-digit numbers > 19 - are tens and ones combination\n # so get tens quotient and continue with remainder as num\n quotient, remainder = divmod(num, 10)\n out.append(small_nums.get(quotient * 10))\n num = remainder\n else:\n out.append(small_nums.get(num))\n break\n return ' '.join(out)", "title": "" }, { "docid": "ee0ab173721695f00692df8eab844ece", "score": "0.47737312", "text": "def spell_hundreds(number):\n\n spelled_number = \"\"\n\n hundreds = number // 100\n\n if hundreds != 0:\n spelled_number += NUMBERS[hundreds] + ' ' + NUMBERS[100]\n\n number %= 100\n\n if number > 20:\n\n spelled_number += ' ' + NUMBERS[number - (number % 10)]\n\n if number % 10 > 0:\n spelled_number += '-' + NUMBERS[number % 10]\n\n elif number > 0:\n spelled_number += ' ' + NUMBERS[number]\n\n return spelled_number", "title": "" }, { "docid": "9e6e8df48d0a84c428d33478cd5bff6a", "score": "0.4770568", "text": "def ten_by_ten():\n for number in range(1, 101):\n if number % 10 == 0:\n print(\"{:<3} \\n\".format(number))\n else:\n print(\"{:<3}\".format(number), end=\" \")", "title": "" }, { "docid": "6547a5200aa6031ff3fc36cd98f70bd8", "score": "0.47690833", "text": "def rescale(x, lo, hi):\n assert(lo < hi), \"[rescale] lo={0} must be smaller than hi={1}\".format(lo,hi)\n old_width = torch.max(x)-torch.min(x)\n old_center = torch.min(x) + (old_width / 2.)\n new_width = float(hi-lo)\n new_center = lo + (new_width / 2.)\n # shift everything back to zero:\n x = x - old_center\n # rescale to correct width:\n x = x * (new_width / old_width)\n # shift everything to the new center:\n x = x + new_center\n # return:\n return x", "title": "" }, { "docid": "4d5e9d75b277431ec699ed8b72dc194c", "score": "0.4768465", "text": "def change_number(self, nr_hugepages, per_node=None, hugepage_size=DEFAULT_HUGEPAGE_SIZE):\n self._hugepages_helper({'mode': 'echo {} >'.format(nr_hugepages), 'per_node': per_node,\n 'hugepage_size': hugepage_size, 'param_name': 'nr_hugepages'})", "title": "" }, { "docid": "e0c7170e6043724dc5fe050f535f1a16", "score": "0.47669104", "text": "def large():\n lst = []\n for i1 in range(2, 32, 7):\n for i2 in range(3, 32, 7):\n for i3 in range(5, 32, 7):\n for i4 in range(6, 32, 7):\n for i5 in range(7, 32, 7):\n lst.append(mknum(32, {1, i1, i2, i3, i4, i5, 32}))\n divs = [str(b*b*b*b*b*b + b*b*b*b*b + b*b*b*b + b*b*b + b*b + b \\\n + 1) for b in range(2, 11)]\n g.write(\"Case #1:\\n\")\n g.write(\"\\n\".join([\" \".join([s] + divs) for s in lst[:500]]))", "title": "" }, { "docid": "2755fed4e3ac85e68e244534e247e7e8", "score": "0.4766668", "text": "def scale(v, c):\n return c*v", "title": "" }, { "docid": "ab90fd8ff452029c86ec213815335ffb", "score": "0.4755393", "text": "def _convert_nnn(val):\n word = ''\n (mod, rem) = (val % 100, val // 100)\n if rem > 0:\n word = to_19[rem] + ' Hundred'\n if mod > 0:\n word += ' '\n if mod > 0:\n word += _convert_nn(mod)\n return word", "title": "" } ]
94935ef11b62088bf66b2de396c412e6
create arbitrary obsolete marker With no arguments, displays the list of obsolescence markers.
[ { "docid": "1ba9b1dd877414dc7085fcf6dde19aad", "score": "0.5471082", "text": "def debugobsolete(ui, repo, precursor=None, *successors, **opts):\n\n opts = pycompat.byteskwargs(opts)\n\n def parsenodeid(s):\n try:\n # We do not use revsingle/revrange functions here to accept\n # arbitrary node identifiers, possibly not present in the\n # local repository.\n n = bin(s)\n if len(n) != repo.nodeconstants.nodelen:\n raise TypeError()\n return n\n except TypeError:\n raise error.InputError(\n b'changeset references must be full hexadecimal '\n b'node identifiers'\n )\n\n if opts.get(b'delete'):\n indices = []\n for v in opts.get(b'delete'):\n try:\n indices.append(int(v))\n except ValueError:\n raise error.InputError(\n _(b'invalid index value: %r') % v,\n hint=_(b'use integers for indices'),\n )\n\n if repo.currenttransaction():\n raise error.Abort(\n _(b'cannot delete obsmarkers in the middle of transaction.')\n )\n\n with repo.lock():\n n = repair.deleteobsmarkers(repo.obsstore, indices)\n ui.write(_(b'deleted %i obsolescence markers\\n') % n)\n\n return\n\n if precursor is not None:\n if opts[b'rev']:\n raise error.InputError(\n b'cannot select revision when creating marker'\n )\n metadata = {}\n metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())\n succs = tuple(parsenodeid(succ) for succ in successors)\n l = repo.lock()\n try:\n tr = repo.transaction(b'debugobsolete')\n try:\n date = opts.get(b'date')\n if date:\n date = dateutil.parsedate(date)\n else:\n date = None\n prec = parsenodeid(precursor)\n parents = None\n if opts[b'record_parents']:\n if prec not in repo.unfiltered():\n raise error.Abort(\n b'cannot used --record-parents on '\n b'unknown changesets'\n )\n parents = repo.unfiltered()[prec].parents()\n parents = tuple(p.node() for p in parents)\n repo.obsstore.create(\n tr,\n prec,\n succs,\n opts[b'flags'],\n parents=parents,\n date=date,\n metadata=metadata,\n ui=ui,\n )\n tr.close()\n except ValueError as exc:\n raise error.Abort(\n _(b'bad obsmarker input: %s') % stringutil.forcebytestr(exc)\n )\n finally:\n tr.release()\n finally:\n l.release()\n else:\n if opts[b'rev']:\n revs = scmutil.revrange(repo, opts[b'rev'])\n nodes = [repo[r].node() for r in revs]\n markers = list(\n obsutil.getmarkers(\n repo, nodes=nodes, exclusive=opts[b'exclusive']\n )\n )\n markers.sort(key=lambda x: x._data)\n else:\n markers = obsutil.getmarkers(repo)\n\n markerstoiter = markers\n isrelevant = lambda m: True\n if opts.get(b'rev') and opts.get(b'index'):\n markerstoiter = obsutil.getmarkers(repo)\n markerset = set(markers)\n isrelevant = lambda m: m in markerset\n\n fm = ui.formatter(b'debugobsolete', opts)\n for i, m in enumerate(markerstoiter):\n if not isrelevant(m):\n # marker can be irrelevant when we're iterating over a set\n # of markers (markerstoiter) which is bigger than the set\n # of markers we want to display (markers)\n # this can happen if both --index and --rev options are\n # provided and thus we need to iterate over all of the markers\n # to get the correct indices, but only display the ones that\n # are relevant to --rev value\n continue\n fm.startitem()\n ind = i if opts.get(b'index') else None\n cmdutil.showmarker(fm, m, index=ind)\n fm.end()", "title": "" } ]
[ { "docid": "32511ebf74660e381cf17ed2e4b7e868", "score": "0.5585488", "text": "def markHelpUnnecessary(self) -> None:\n ...", "title": "" }, { "docid": "3ee0ed96189f38d1f0bbecea79758cfe", "score": "0.5508379", "text": "def marker(self):\n return ''", "title": "" }, { "docid": "7add59691c5758c059e2cce17e2e6eb0", "score": "0.5456977", "text": "def _debugobsmarkers(ui, part, indent=0, **opts):\n opts = pycompat.byteskwargs(opts)\n data = part.read()\n indent_string = b' ' * indent\n try:\n version, markers = obsolete._readmarkers(data)\n except error.UnknownVersion as exc:\n msg = b\"%sunsupported version: %s (%d bytes)\\n\"\n msg %= indent_string, exc.version, len(data)\n ui.write(msg)\n else:\n msg = b\"%sversion: %d (%d bytes)\\n\"\n msg %= indent_string, version, len(data)\n ui.write(msg)\n fm = ui.formatter(b'debugobsolete', opts)\n for rawmarker in sorted(markers):\n m = obsutil.marker(None, rawmarker)\n fm.startitem()\n fm.plain(indent_string)\n cmdutil.showmarker(fm, m)\n fm.end()", "title": "" }, { "docid": "b7df58a20c0b372baacba688e241ee3e", "score": "0.5449578", "text": "def OverlineOff(self) -> str:", "title": "" }, { "docid": "a02b9b21fd7df6f3befbf6c165214b04", "score": "0.54123986", "text": "def UnsetToolTip(self):", "title": "" }, { "docid": "091475e7911fe70095c0c4e36b3c42e9", "score": "0.5333085", "text": "def create_replace_with_nothing_marker(replace_this_string: str) -> str:\n return (\n f\"{ParserHelper.__alert_character}{replace_this_string}{ParserHelper.__alert_character}\"\n + f\"{ParserHelper.replace_noop_character}{ParserHelper.__alert_character}\"\n )", "title": "" }, { "docid": "bbf74e31340386b85cc5ed4e316466d0", "score": "0.5203022", "text": "def OverlineOn(self) -> str:", "title": "" }, { "docid": "3eac9bb63c16b11b3356238a523e7441", "score": "0.50938404", "text": "def __str__(self):\n obs = str(self.obsolete)\n return \"OboTerm: \"+ self.id+\", obsolete: \"+obs", "title": "" }, { "docid": "29d8af9693742c3f7c36aeb85414b742", "score": "0.50583076", "text": "def nb_markers(model):\n return pyorbdl.nb_markers(model)", "title": "" }, { "docid": "aac1dc2c40c50b208c8162ef41d8fd73", "score": "0.50549173", "text": "def DrawCheckMark(self, *args, **kw):", "title": "" }, { "docid": "284892296a87334a2f62fc1c947ce5ce", "score": "0.4984373", "text": "def __create_anomaly_annotation(self, pos, label):\n annot = self.ax.annotate(label, xy=(pos[0], pos[1]), xytext=(5,5), textcoords=\"offset points\",\n bbox=dict(boxstyle=\"round\", fc=\"r\"), transform=ccrs.PlateCarree(), zorder=99)\n annot.set_visible(False)\n self.anomalies_annotations.append(annot)", "title": "" }, { "docid": "500c96683f7f4b8b886e43ea0ecca902", "score": "0.4982095", "text": "def test_deprecate_non_existing_conflict(self, conflicts):\n assert len(conflicts.conflicts) == 7\n with pytest.raises(ValueError) as exc:\n conflicts.deprecate([\"dummy object\"])\n assert \"'dummy object' is not in list\" in str(exc.value)", "title": "" }, { "docid": "5c418e4f4e0d459d09e1c7ebb3636f5c", "score": "0.49677706", "text": "def DPxSetMarker():\n return setMarker()", "title": "" }, { "docid": "9b7a1e9b92ac8efeddb95a662147a2f7", "score": "0.4935304", "text": "def _remove_non_deprecated_descriptions(op_def):\n for input_arg in op_def.input_arg:\n input_arg.description = \"\"\n for output_arg in op_def.output_arg:\n output_arg.description = \"\"\n for attr in op_def.attr:\n attr.description = \"\"\n\n op_def.summary = \"\"\n op_def.description = \"\"", "title": "" }, { "docid": "2529fef658940a7c81a66bba1912f894", "score": "0.4928449", "text": "def create_mark(self, name='', left_gravity=True):\n pass # implemented in Ada", "title": "" }, { "docid": "48670929106692ca4c3ae830d7f925d6", "score": "0.49171934", "text": "def drawAnnote(self, ax, x, y, annote):\n if (x, y) in self.drawnAnnotations:\n markers = self.drawnAnnotations[(x, y)]\n for m in markers:\n m.set_visible(not m.get_visible())\n self.ax.figure.canvas.draw_idle()\n else:\n t = ax.text(x, y, \" - %s\" % (annote),)\n m = ax.scatter([x], [y], marker='d', c='r', zorder=100)\n self.drawnAnnotations[(x, y)] = (t, m)\n self.ax.figure.canvas.draw_idle()", "title": "" }, { "docid": "ee6d5f57ab525a652cede782d58bfe38", "score": "0.49138638", "text": "def update_descriptions(self):\n try:\n fastighetsbeteckning = utils.get_text_inside_brackets(self.namn)\n except ValueError:\n fastighetsbeteckning = self.namn\n self.add_to_report(\"malformed_label\", self.namn)\n self.add_alias(\"sv\", fastighetsbeteckning)", "title": "" }, { "docid": "5817b4dd42bd194996b822bff4863411", "score": "0.48747376", "text": "def convert_old_index_markers(self):\n for im in self.getMarks():\n at = im.AlternativeText\n im.AlternativeText = at.translate(str.maketrans(\"<>\", \"{}\"))", "title": "" }, { "docid": "01297e61c75fa36d7782aca7f749fe49", "score": "0.48219013", "text": "def remove_special_lines(self, mark, lines):\n pass # implemented in Ada", "title": "" }, { "docid": "b73ec5fc8207e7394c2f8f632c2fb876", "score": "0.48198494", "text": "def developers():\n draw.showInfo(\"\"\"\nThis is an alphabetic list of the\nmajor contributors to pyFormex:\n\nMatthieu De Beule\nGianluca De Santis\nBart Desloovere\nPeter Mortier\nTim Neels\nSofie Van Cauter\nBenedict Verhegghe\n\nIf you feel that your name was\nleft out in error, please contact us.\n\"\"\")", "title": "" }, { "docid": "f06c1aee5f737e23210d17e97edb2f1d", "score": "0.47991124", "text": "def __str__(self):\n\n obs = str(self.obsolete)\n id = str(self.id)\n return \"OboTerm: \" + id + \", \" + self.name + \", obsolete: \" + obs", "title": "" }, { "docid": "a292a4d9c33c598233b2fed2877a406f", "score": "0.47939926", "text": "def __str__(self):\n result = \"\"\n for i in range(len(self._marker)):\n result += \" \".join(self._marker[i]) + \"\\n\"\n return result", "title": "" }, { "docid": "bfe74ebf4e465c7fbb76c9410eb8bb8c", "score": "0.47707465", "text": "def get_marker():\n yield from product(range(2, 6), range(1, 3), [52])\n # if that wasn't enough, create some more just at a different angle\n yield from product(range(2, 8), range(1, 3), [0])\n # if we ever get to this point, we need more than 20 markers\n yield from product([2]+list(range(3, 6, 2)), range(1, 3), [90])", "title": "" }, { "docid": "6ae6646f07d319932ba00f8044fe18e8", "score": "0.4746366", "text": "def precipitation_marker(self):\n if (self.color):\n return _BLUE + 'x' + _END_COLOR\n else:\n return \"x\"", "title": "" }, { "docid": "be3b8293fee3d022a0fda13c364ee601", "score": "0.47449318", "text": "def drawAnnote(self, axis, x, y, annote):\n\t\tif (x,y) in self.drawnAnnotations:\n\t\t\tmarkers = self.drawnAnnotations[(x,y)]\n\t\t\tfor m in markers:\n\t\t\t\tm.set_visible(not m.get_visible())\n\t\t\t\tself.axis.figure.canvas.draw()\n\t\telse:\n\t\t\tt = axis.text(x,y, \"(%3.2f, %3.2f) - %s\"%(x,y,annote), )\n\t\t\tm = axis.scatter([x],[y], marker='d', c='r', zorder=100)\n\t\t\tself.drawnAnnotations[(x,y)] =(t,m)\n\t\t\tself.axis.figure.canvas.draw()", "title": "" }, { "docid": "3a85fa67cf2b7e3b89f5d60730d6249a", "score": "0.47428513", "text": "def deprecated_library_keyword_with_stuff_to_ignore(self):\n pass", "title": "" }, { "docid": "39e526a61491f8329d4bc767821b091c", "score": "0.47403714", "text": "def obsolete_entries(self):\n return []", "title": "" }, { "docid": "c269a9babb3a4f8357c5d5344c03a4c9", "score": "0.47323155", "text": "def at_defeat(defeated):\n defeated.location.msg_contents(\"%s has been defeated!\" % defeated)", "title": "" }, { "docid": "cbbfd541ef0a002b64572b7fef4a5e65", "score": "0.47109455", "text": "def test_deprecate_existing_conflict(self, new_dimension_conflict, conflicts):\n assert len(conflicts.conflicts) == 7\n conflicts.deprecate([new_dimension_conflict])\n assert len(conflicts.conflicts) == 6", "title": "" }, { "docid": "fadc6997808a2097034ad177468d166f", "score": "0.47014272", "text": "def _remove_markers(self, text):\n for char in self.metadata.markers:\n text = text.replace(char, \"\")\n return text", "title": "" }, { "docid": "af11893cf3c0af284542bfc4956c8f12", "score": "0.46996164", "text": "def marks():\n return ['*']", "title": "" }, { "docid": "bb27d29c6dfd5e304dd02350af84584b", "score": "0.46984306", "text": "def drawAnnote(self, ax, x, y, annote):\n if (x, y) in self.drawnAnnotations:\n markers = self.drawnAnnotations[(x, y)]\n for m in markers:\n m.set_visible(not m.get_visible())\n self.ax.figure.canvas.draw_idle()\n else:\n t = ax.text(x, y, \"%s %.2f\" % (str(annote[0]),annote[1]),)\n m = ax.scatter([x], [y], marker='+', c='r', zorder=100)\n self.drawnAnnotations[(x, y)] = (t, m)\n self.ax.figure.canvas.draw_idle()", "title": "" }, { "docid": "cc8723bed751cb1fdb413fe9dc138d7d", "score": "0.46674284", "text": "def StrikethroughOff(self) -> str:", "title": "" }, { "docid": "be5ccc23418b1236a00f645ab0785a93", "score": "0.46665424", "text": "def hide_annotation(self):\n try:\n self.annotation_artist.remove()\n except:\n pass\n self._axes.figure.canvas.draw_idle()", "title": "" }, { "docid": "1e5d878fd8038ae246959f38b7badc0e", "score": "0.4662826", "text": "def displayWarning(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1e5d878fd8038ae246959f38b7badc0e", "score": "0.4662826", "text": "def displayWarning(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "1e5d878fd8038ae246959f38b7badc0e", "score": "0.4662826", "text": "def displayWarning(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "232b2b91d65f9c6c81ed8b4b49077cd6", "score": "0.46473932", "text": "def GetAllMarkers():\n return ['o','x','d','^','+','v','8','s','p','>','<']", "title": "" }, { "docid": "203a6df3c31acb5e13a7f2ede19a5a3a", "score": "0.46363455", "text": "def drawAnnote(self, axis, x, y, annote):\n if (x, y) in self.drawnAnnotations:\n markers = self.drawnAnnotations[(x, y)]\n for m in markers:\n m.set_visible(not m.get_visible())\n self.axis.figure.canvas.draw()\n else:\n t = axis.text(x+0.03, y+0.03, u\"{0:s}\".format(annote),\n bbox=dict(facecolor='#cccccc', alpha=0.75, edgecolor='#404040', linewidth=0.5), fontsize=9)\n m = axis.scatter([x], [y], marker='x', c='black', s=20, alpha=.8, zorder=100)\n self.drawnAnnotations[(x, y)] = (t, m)\n self.axis.figure.canvas.draw()", "title": "" }, { "docid": "60b583a15e0032f6a318a934a4de0187", "score": "0.46327415", "text": "def get_marker(rate):\n if rate == \"awesome\":\n return \"(做得不错的点)\"\n elif rate == \"suggestion\":\n return \"\"\n elif rate == \"require\":\n return \"(下面这一点需要修改才能通过项目)\"", "title": "" }, { "docid": "3bc60abff31a9193a765812d4799bdb8", "score": "0.4624319", "text": "def cursor_misplaced_msg(self):\n msg = 'Cursor misplaced - try again'\n self.upd_msg(msg)", "title": "" }, { "docid": "52dc784e29c4fa8e27e156828e90c4b8", "score": "0.46115807", "text": "def UnderlineOff(self) -> str:", "title": "" }, { "docid": "742041bed0ee7d0567b5db0391b6424b", "score": "0.46110922", "text": "def skip_deprecated(app, what, name, obj, skip, options):\r\n doc = obj.__doc__\r\n return skip or doc and '.. deprecated::' in doc", "title": "" }, { "docid": "9b92a9a221d39522d288f4ba0c3ee368", "score": "0.45817885", "text": "def hide():\n pass # implemented in Ada", "title": "" }, { "docid": "c3b24e2864305c6c04b2991b8170cbef", "score": "0.45715317", "text": "def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,\n deprecated_names):\n\n deprecation_string = ', '.join(sorted(deprecated_names))\n\n return decorator_utils.add_notice_to_docstring(\n doc,\n instructions,\n 'DEPRECATED FUNCTION ARGUMENTS',\n '(deprecated arguments)', [\n 'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '\n 'They will be removed %s.' %\n (deprecation_string, 'in a future version' if date is None else\n ('after %s' % date)), 'Instructions for updating:'\n ],\n notice_type='Deprecated')", "title": "" }, { "docid": "ad6389dd290c8ae63306f811b415aa6a", "score": "0.45652035", "text": "def create_replacement_markers(\n replace_this_string: str, with_this_string: str\n ) -> str:\n return (\n f\"{ParserHelper.__alert_character}{replace_this_string}{ParserHelper.__alert_character}\"\n + f\"{with_this_string}{ParserHelper.__alert_character}\"\n )", "title": "" }, { "docid": "8417d359ae61cc13e1ed6af27ff5f1ae", "score": "0.45532498", "text": "def createConnectionMarkingMenu(ned):\n\n pass", "title": "" }, { "docid": "bd6ed4d0c35aac0448957a39d74a16b0", "score": "0.4537742", "text": "def create_empties(self, ms):\n unames = {}\n use_existing = self.properties.use_existing\n s = self.properties.size\n empty_size = (s, s, s)\n for ml in ms.markerLabels:\n name = self.properties.prefix + ml\n if use_existing and name in bpy.context.scene.objects:\n o = bpy.context.scene.objects[name]\n else:\n bpy.ops.object.add()\n o = bpy.context.active_object\n o.name = name\n unames[name] = o.name\n bpy.ops.transform.resize(value=empty_size)\n o.show_name = self.properties.show_names\n o.show_in_front = self.properties.x_ray\n for name in unames.values():\n bpy.context.scene.objects[name].select_set(True)\n return unames", "title": "" }, { "docid": "b63b82ff47fd776b031d4bba1076691b", "score": "0.45361316", "text": "def _setupMarker(self, style, label):\n if not style:\n self.plotmarker.setVisible(False)\n return\n self.plotmarker.setVisible(True)\n self._symbol.setStyle(self.QwtSymbolStyles.get(style.symbol, QwtSymbol.Cross))\n self._font.setPointSize(style.label_size)\n symbol_color = QColor(style.symbol_color)\n label_color = QColor(style.label_color)\n # dots have a fixed size\n if style.symbol == \"dot\":\n self._symbol.setSize(2)\n else:\n self._symbol.setSize(self._size)\n self._symbol.setPen(QPen(symbol_color, style.symbol_linewidth))\n self._symbol.setBrush(QBrush(Qt.NoBrush))\n lab_pen = QPen(Qt.NoPen)\n lab_brush = QBrush(Qt.NoBrush)\n self._label = label or \"\"\n self.plotmarker.setSymbol(self._symbol)\n txt = QwtText(self._label)\n txt.setColor(label_color)\n txt.setFont(self._font)\n txt.setBorderPen(lab_pen)\n txt.setBackgroundBrush(lab_brush)\n self.plotmarker.setLabel(txt)\n self.plotmarker.setLabelAlignment(Qt.AlignBottom | Qt.AlignRight)", "title": "" }, { "docid": "7deb906c10ec635685151ab25bde8ba7", "score": "0.45352253", "text": "def RandomMarker(num):\n \n _num = num\n _markerList = ['o','^','v','p','*','s','P','8','X']\n _listLen = len(_markerList)\n while(_num>_listLen):\n _num = _num-_listLen\n\n _marker = ''\n \n try:\n _marker = _markerList[_num-1]\n except IndexError:\n _marker = _markerList[0]\n print(\"Went Out Of Index - Marker\")\n \n return _marker", "title": "" }, { "docid": "91b06fb497b2ed4e9a54a8a7a2ddc4a4", "score": "0.45311522", "text": "def _marker(entry, cheapest=False):\n if isinstance(entry,HomeEntry):\n return '''var marker_{2} = new google.maps.Marker({{\n position: {{ lat: {0}, lng: {1} }},\n map: map,\n title: \"id: {2}\",\n icon: {4}\n}});\nvar infowindow_{2} = new google.maps.InfoWindow({{ content: \"{3}\" }});\ngoogle.maps.event.addListener(marker_{2}, 'click', function() {{ infowindow_{2}.open(map,marker_{2}); }});\n'''.format(entry.latitude, entry.longitude,entry.id,entry.to_html(),\n 'greenMakerIcon' if cheapest else 'circleIcon')\n raise ValueError(\"Not a good entry\")", "title": "" }, { "docid": "5fa119b78c3ac5febcc25f26abc7ddce", "score": "0.4521329", "text": "def shortHelpString(self):\n return self.tr(\n \"<p><mark style='color:green'><strong>Layout di stampa con mappe multiple su singolo foglio\\n\\\n <mark style='color:blue'><strong>OPZIONI</strong></mark>\\n\\\n <mark style='color:black'>- Titolo composizione</mark>\\n\\\n <mark style='color:black'>- Sottotitolo</mark>\\n\\\n <mark style='color:black'>- Selezione estensione</mark>\\n\\\n <mark style='color:black'>- Selezione layer da rappresentare e loro ordine</mark>\\n\\\n <mark style='color:black'>- Impostazione dimensioni e orientamento carta</mark>\\n\\\n <mark style='color:black'>- Impostazione numero colonne, le righe vengono di conseguenza</mark>\\n\\\n <mark style='color:black'>- Impostazione scala, di default adatta</mark>\\n\\\n <mark style='color:black'>- Scelta percorso e tipo file di salvataggio (tutti i formati del salva come immagine)</i></mark>\\n\\\n <mark style='color:black'>- Il layout prodotto ha nome <mark style='color:red'><strong>'MultiMap_[<i>orientamento</i>]_[<i>formato</i>]\\n\\\n <mark style='color:blue'><strong>NOTA BENE</strong></mark>\\n\\\n <mark style='color:black'><strong>Tutti i layer devon avere lo stesso SR</strong></mark>\\n\\\n <mark style='color:black'><strong>I layer che vogliamo in tutte le mappe devono essere visibili</strong></mark>\\n\\\n <mark style='color:black'><strong>Di default l'ordinamento è alfanumerico e gli elementi sul layout bloccati</strong></mark>\\n\\\n <mark style='color:black'><strong>Modificare l'ordinamento in stampa agendo nella casella di selezione layer\\n\\\n <mark style='color:red'><i><strong>Qualora l'adattamento non dia quanto voluto agire sulla scala\\n\\\n\t\t\")", "title": "" }, { "docid": "8bd7137e51283a139609cae18abd0def", "score": "0.4520925", "text": "def make_a_marker(coords, label):\n color = \"f71c01\"\n lon, lat = coords.replace(\"Point(\", \"\").replace(\")\", \"\").split(\" \")\n return \"pin-s-{label}+{color}({lon},{lat})\".format(color = color, label = label, lon = lon, lat = lat)", "title": "" }, { "docid": "ece8e8bcd19733248cbb390ea6591921", "score": "0.45208976", "text": "def ObliqueChange(self) -> str:", "title": "" }, { "docid": "5f190c6718f4596d5dd6f0a3d2a20088", "score": "0.45006725", "text": "def ToolTip(self) -> str:", "title": "" }, { "docid": "5f190c6718f4596d5dd6f0a3d2a20088", "score": "0.45006725", "text": "def ToolTip(self) -> str:", "title": "" }, { "docid": "5f190c6718f4596d5dd6f0a3d2a20088", "score": "0.45006725", "text": "def ToolTip(self) -> str:", "title": "" }, { "docid": "cd23e0248348c5859e0aeb09a91c9bb4", "score": "0.44926664", "text": "def to_marker(self, i):\n ret = \"\"\n ret += 'var marker{:} = new L.Marker.SVGMarker([{:}, {:}],'.format(\n i, self.lat, self.long)\n ret += ' { iconOptions: { color: \"rgb(0,0,100)\" }});\\n'\n ret += 'marker{:}.desc = \\'<a href=\"{webpage}\"><b>{name}</b></a><br />{institution}, {country}\\';\\n'.format(\n i, name=self.name.replace('\\'', '\\\\\\''), webpage=self.webpage.replace('\\'', '\\\\\\''),\n institution=self.institution.replace('\\'', '\\\\\\''), country=self.country)\n ret += 'mymap.addLayer(marker{:});\\n'.format(i)\n ret += 'oms.addMarker(marker{:});\\n'.format(i)\n ret += \"marker{i}.on('mouseover', function(e)\".format(i=i)\n ret += \"{\" + \"\"\"\n popup.setContent(marker{i}.desc);\n popup.setLatLng(marker{i}.getLatLng());\n mymap.openPopup(popup);\n\"\"\".format(i=i)\n ret += \"});\"\n ret += \" marker{i}.on('mouseout', function(e)\".format(i=i)\n ret += \"\"\"{ mymap.closePopup(); });\n\"\"\"\n return ret", "title": "" }, { "docid": "0489c52ca6455ef5bbe18c6233f2caa4", "score": "0.44874063", "text": "def remove_mark(self, x, y1, y2):\n print 'plotting at', x, y1\n self.axes.plot(x, y1, 'xr')\n self.axes.plot(x, y2, 'xr')\n self.canvas.draw()", "title": "" }, { "docid": "0bc00cf7860eeecaddb160c21e91b28e", "score": "0.44797727", "text": "def draw_vector(p0, p1, color):\n\tax = plt.gca()\n\t# this plot is fixing the problem of hiding annotations because of their not markers origin\n\tax.plot(p1[0], p1[1], '.', alpha=0)\n\tax.annotate('', p1, p0, arrowprops=dict(facecolor=color, linewidth=1.0))", "title": "" }, { "docid": "a9b5e608d795d661eecba9e8f767491a", "score": "0.4479439", "text": "def banner(comment_mark: str) -> str:\n result = ('{} This is generated code. If it\\'s broken, then you'\n ' should\\n').format(comment_mark)\n result += ('{} fix the generation script, not this file.\\n'\n ).format(comment_mark)\n result += '\\n\\n'\n return result", "title": "" }, { "docid": "223cad033d5ecb1463b37b47a4391261", "score": "0.44758815", "text": "def not_your_fathers_function(old_spice_count, cougars_crowding):\n print \"You have %d bottles of old spice!\" % old_spice_count\n print \"There are %d cougars crowding...\" % cougars_crowding\n print \"Man that's enough for a party!\"\n print \"Get a blanket.\\n\"", "title": "" }, { "docid": "2fc87861da3e574ce8dac72aafac2a17", "score": "0.44681612", "text": "def replace_markers(mol):\n # [atom.OBAtom.SetAtomicNum( original_atomic_num_mapping[ atom.atomicnum ] ) for atom in mol.atoms if atom.atomicnum in range(89,109)]\n for atom in mol.atoms:\n if atom.atomicnum in range(89, 109):\n atom.OBAtom.SetAtomicNum(original_atomic_num_mapping[atom.atomicnum])\n return mol", "title": "" }, { "docid": "a5cd555628f8baea9c42972e8151557f", "score": "0.44628662", "text": "def test_render_operation_deprecated(testrenderer, oas_fragment):\n\n markup = textify(\n testrenderer.render_operation(\n \"/evidences\",\n \"post\",\n oas_fragment(\n \"\"\"\n responses:\n '201':\n description: An evidence created.\n deprecated: true\n \"\"\"\n ),\n )\n )\n assert markup == textwrap.dedent(\n \"\"\"\\\n .. http:post:: /evidences\n :deprecated:\n\n :statuscode 201:\n An evidence created.\n \"\"\".rstrip()\n )", "title": "" }, { "docid": "717ca5f705c1b4aa2fb7116e73342adf", "score": "0.4460233", "text": "def screen_bad_label(self, notation_list):\n print(\"There is {} records before screen the empty notation\".format(len(notation_list)))\n i = 0\n new_notation_list = []\n for image_detail in notation_list:\n '''\n Data format: {u'69139.jpg': [[419.66656, 133.99992, 532.33344, 248.00003999999998, 1], \n [232.49984, 161.66664, 368.0, 257.83344, 1]]}\n '''\n for key in image_detail.keys():\n if len(image_detail[key]) != 0:\n new_notation_list.append(image_detail)\n i += 1\n print(\"{} empty notations have been removed from list. {} useful data left.\"\n .format(str(len(notation_list) - len(new_notation_list)), len(new_notation_list)))\n return new_notation_list", "title": "" }, { "docid": "f0c55eba108e74476e99649b2222589b", "score": "0.44588187", "text": "def _add_deprecation_to_docstring(doc, date, instructions):\n if not doc:\n lines = ['DEPRECATED FUNCTION']\n else:\n lines = doc.splitlines()\n lines[0] += ' (deprecated)'\n\n notice = [\n '',\n 'THIS FUNCTION IS DEPRECATED. It will be removed after %s.' % date,\n 'Instructions for updating:',\n '%s' % instructions,\n ]\n\n if len(lines) > 1:\n # Make sure that we keep our distance from the main body\n if lines[1].strip():\n notice += ['']\n\n lines = [lines[0]] + notice + lines[1:]\n else:\n lines += notice\n\n return '\\n'.join(lines)", "title": "" }, { "docid": "263da38dfb56cbc1d14028fd14a12b32", "score": "0.44505417", "text": "def not_deprecated_with_deprecated_prefix(self):\n pass", "title": "" }, { "docid": "e4e97102f9622691c10f181e3bbbc285", "score": "0.44473296", "text": "def deprecated(func):\n ...", "title": "" }, { "docid": "1ff0f2800a3271c5c33e01a2ed2b246b", "score": "0.44401452", "text": "def warning(s, end=\"\\n\", seq=-1):\n stdout(s, end=end, start=\"[Warn]\", seq=seq)", "title": "" }, { "docid": "cec0f9e22edeb8306cea8255ee0c6a73", "score": "0.44339174", "text": "def print_simple_annotation(code, linenos):\n for lineno, line in enumerate(code.splitlines(), start=1):\n print(\" {0:s} {1}\".format(\"X\" if lineno in linenos else \" \", line))", "title": "" }, { "docid": "56c7891f9e0dac602e7a36383936f5df", "score": "0.44311038", "text": "def _add_deprecated_function_notice_to_docstring(doc, date, instructions):\n main_text = [\n 'THIS FUNCTION IS DEPRECATED. It will be removed %s.'\n % ('in a future version' if date is None else ('after %s' % date))\n ]\n if instructions:\n main_text.append('Instructions for updating:')\n return decorator_utils.add_notice_to_docstring(\n doc,\n instructions,\n 'DEPRECATED FUNCTION',\n '(deprecated)',\n main_text,\n notice_type='Deprecated')", "title": "" }, { "docid": "a9469c4be455202ecb33e06c3f379875", "score": "0.4431091", "text": "def postfix_marker(self):\n return ''", "title": "" }, { "docid": "60184293925dce95d698e6b89eab4949", "score": "0.4427081", "text": "def warning(self, message):\n padding,message = self.__split_padding(message)\n print(padding + self.term.bold + message + self.term.normal)", "title": "" }, { "docid": "dcd63ad4d071e92cce08a3e167437988", "score": "0.4424313", "text": "def SetSashInvisible(self, invisible=True):", "title": "" }, { "docid": "af9c802924f1753ccb11568afd2be508", "score": "0.4422174", "text": "def add_marks(self, event):\n self.model.generate_marks()", "title": "" }, { "docid": "74b2b1badda36d698b414c02097c8b49", "score": "0.44216335", "text": "def test_mark_skip(existing_type, new_type) -> None:\n line_markers = LineMarkers([''], 0)\n line_markers.set(0, existing_type)\n\n result = line_markers.set(0, new_type)\n\n assert result is False\n assert line_markers.types == [existing_type]", "title": "" }, { "docid": "83eef0eb212edc2c4273fa4846b7f15c", "score": "0.44175714", "text": "def _erase_and_show(oldlen, buf, cursor):\n print(\"{}{}\".format(Console.seq['leftarrow']*oldlen, ' '*oldlen), end='')\n Console._show(buf, cursor, oldlen)", "title": "" }, { "docid": "8d1cbd0f89ea4615b9e7029f843bd2e7", "score": "0.44154546", "text": "def obsolete_entries(self):\n return [e for e in self if e.obsolete]", "title": "" }, { "docid": "d07bd046240fdc67e182e6ef51c83d4c", "score": "0.44131142", "text": "def build_overlay(places):\n overlay = \"\"\n label = 1\n for place in places:\n overlay = overlay + \",{}\".format(make_a_marker(place[\"coords\"], label))\n label += 1\n if overlay[0] == \",\":\n overlay = overlay[1:]\n return overlay", "title": "" }, { "docid": "470a56bd543727b0c3f65c94ff3dc27d", "score": "0.441242", "text": "def warning(functino, message):\n pass\n # WARNING = '\\033[93m'\n # END = '\\033[0m'\n # print(WARNING, end='', file=sys.stderr)\n # print(\"WARNING {0}: {1}\".format(functino, message), end='', file=sys.stderr)\n # print(END, file=sys.stderr)", "title": "" }, { "docid": "f007705250f051a2c8b2a807dcd95e68", "score": "0.44122663", "text": "def _repr_defn(self):\n i = self.domain().ambient_space()._repr_generic_point()\n o = self.codomain().ambient_space()._repr_generic_point(self.defining_polynomials())\n return \"Defined on coordinates by sending %s to\\n%s\"%(i,o)", "title": "" }, { "docid": "e2f2103c45d7de40cd431db0eafe8155", "score": "0.4402021", "text": "def createNodeEditorMarkingMenu(ned):\n\n pass", "title": "" }, { "docid": "bcdd3ecc8849b9a79e985d23e7d3b00e", "score": "0.440131", "text": "def off_and_on_without_data():\n # fmt: off\n\n # hey, that won't work\n\n # fmt: on\n pass", "title": "" }, { "docid": "e681a76513e0a31c8a2736246616daee", "score": "0.44012672", "text": "def show_art_oovs(article, vocab):\n unk_token = vocab.word2id(UNKNOWN_TOKEN)\n words = article.split(' ')\n words = [(\"__%s__\" % w) if vocab.word2id(w) == unk_token else w for w in words]\n out_str = ' '.join(words)\n return out_str", "title": "" }, { "docid": "8f65f93cd7f609e2f87bf6a74058fd76", "score": "0.43977827", "text": "def negative_markers2(markers,positive):\n global cpt\n global op\n op = 0\n cpt = 0\n positive = sorting.merge_sort(positive, experience.compare)\n negative = []\n for i in markers:\n if positive[recherche_dichotomique(i, positive)] != i:\n negative.append(i)\n #print(\"Nbre Opération: \", op)\n #print(\"Nbre Compteur:\", cpt)\n return negative,cpt,op", "title": "" }, { "docid": "76275fe063e0cc0922b13d8366f08061", "score": "0.43953627", "text": "def off_and_on_without_data():\n # fmt: off\n\n\n #hey, that won't work\n\n\n # fmt: on\n pass", "title": "" }, { "docid": "d6c1c6b7151a33a92a0cc98ed5bbfb4b", "score": "0.43944067", "text": "def deprecated(alternative=None, since=None, impact=None):\n\n def deprecated_decorator(func):\n since_str = \" since %s\" % since if since else \"\"\n impact_str = impact if impact else \"This method will be removed in a future release.\"\n\n notice = \"``{qual_function_name}`` is deprecated{since_string}. {impact}\".format(\n qual_function_name=\".\".join([func.__module__, func.__qualname__]),\n since_string=since_str,\n impact=impact_str,\n )\n if alternative is not None and alternative.strip():\n notice += \" Use ``%s`` instead.\" % alternative\n\n @wraps(func)\n def deprecated_func(*args, **kwargs):\n warnings.warn(notice, category=FutureWarning, stacklevel=2)\n return func(*args, **kwargs)\n\n if func.__doc__ is not None:\n deprecated_func.__doc__ = \".. Warning:: \" + notice + \"\\n\" + func.__doc__\n\n return deprecated_func\n\n return deprecated_decorator", "title": "" }, { "docid": "04d21221ffe894947a60f384d7d0acbc", "score": "0.43903127", "text": "def set_package_mark(self, package, marker, text=''):\n ipath = self.get_install_path(package)\n hd = os.path.join(ipath, self.history_dir, 'markers')\n if not os.path.exists(hd):\n os.makedirs(hd)\n fic = open(os.path.join(hd, marker), 'w')\n fic.write(text)\n fic.close()", "title": "" }, { "docid": "a4140b1d2e08f1d649c0b39dae7983b9", "score": "0.43897864", "text": "def show_no_style(self):\n\n formated_text = \"\"\n if (self.valid):\n formated_text = \"\".join([formated_text,\n \"{}\\n {}\\n\".format(self.source_name,\n self.word_text)])\n for entry in self.entries:\n formated_text = \"\\n \".join([formated_text,\n \"Pronounciation:\\{}\\ \\n POS:{}\\n\".\n format(entry.pronounciation,\n entry.pos)])\n formated_text = \" \".join([formated_text,\n \" \".join(entry.explanation[0])])\n else:\n formated_text = self.source_name + \"\\n\\n No result\"\n return formated_text + \"\\n\\n\"", "title": "" }, { "docid": "be8b2e1b28f1e4567c306cafc7d36013", "score": "0.43871883", "text": "def hide(self):\n pass # implemented in Ada", "title": "" }, { "docid": "be8b2e1b28f1e4567c306cafc7d36013", "score": "0.43871883", "text": "def hide(self):\n pass # implemented in Ada", "title": "" }, { "docid": "be8b2e1b28f1e4567c306cafc7d36013", "score": "0.43871883", "text": "def hide(self):\n pass # implemented in Ada", "title": "" }, { "docid": "463cdf161ad31946fa326b990a92c8a4", "score": "0.43759313", "text": "def CustomMarking(type='x-custom-marking', properties=None):\n def wrapper(cls):\n return _custom_marking_builder(cls, type, properties, '2.1', _STIXBase21)\n return wrapper", "title": "" }, { "docid": "c8c055a86e8e73d4b964134d73eeb191", "score": "0.43672064", "text": "def missing_arc_description(\n self, start: int, end: int, executed_arcs: Set[Tuple[int, int]] = None\n ) -> str:\n return self.parser.missing_arc_description(start, end, executed_arcs)", "title": "" }, { "docid": "3ac0f3da5a7deebe90645d9f078eeb80", "score": "0.43612963", "text": "async def remove_tip(self, mount: Mount) -> None:\n ...", "title": "" }, { "docid": "84e58a8a85771a8ff2268b17349e3c74", "score": "0.43609607", "text": "def make_marker_trace(well_name, df):\n\n well_coord = df.loc[df['NEW_WELL_NAME'] == well_name] # filtered df\n well_coord = well_coord[pd.notnull(well_coord['MRKNAME'])] # filter out null values for marker name\n grouped = well_coord.groupby('MRKNAME').first()\n marker_index = grouped.index\n mrk_clr = marker_index.map(marker_colordict)\n\n x = np.concatenate([grouped['MAP_EASTING']], axis = 0)\n y = np.concatenate([grouped['MAP_NORTHING']], axis = 0)\n z = np.concatenate([grouped['TVDSS']], axis = 0)\n\n trace = go.Scatter3d(\n x = x, y = y, z = z,\n mode = 'markers+text',\n marker = dict(color = mrk_clr,\n size = 5),\n name = well_name + \"<br>\" + \"Markers\",\n text = marker_index,\n textposition = \"middle right\",\n textfont = dict(\n size = 10),\n legendgroup = well_name,\n # hoverinfo = 'text', <--------- gives only the text\n showlegend = True\n )\n\n return trace", "title": "" }, { "docid": "847f111c61ef25cc52b246bda0d34600", "score": "0.43604183", "text": "def drawMarks(X,M,color='black',leader='',ontop=True):\n _large_ = 20000\n if len(M) > _large_:\n if not ack(\"You are trying to draw marks at %s points. This may take a long time, and the results will most likely not be readible anyway. If you insist on drawing these marks, anwer YES.\" % len(M)):\n return None\n M = marks.MarkList(X,M,color=color,leader=leader,ontop=ontop)\n pf.canvas.addAnnotation(M)\n pf.canvas.numbers = M\n pf.canvas.update()\n return M", "title": "" }, { "docid": "6490f7e5cc45de21dda05c619db5b0d2", "score": "0.43576244", "text": "def replace_anchor(cls, *args):\n if not cls.depwarn_printed:\n cls.depwarn_printed = True\n print(Merger.DEPRECATION_WARNING, file=sys.stderr)\n Anchors.replace_anchor(*args)", "title": "" }, { "docid": "0110b7543b9a1e62d6893fb3c6215948", "score": "0.435645", "text": "def __create_purpose():\n\n return 'Outright'", "title": "" }, { "docid": "097656c93c7f172551dae5984f7666a1", "score": "0.43554184", "text": "def epilog():\n result = \"\"\n for clz in all_fixer_classes():\n result += fixer_class_to_fixer_key(clz)\n result += \"\\n \"\n result += clz.__doc__\n result += \"\\n\"\n return result", "title": "" } ]
60c512e59b628f5fdeb197bb02dd4ba3
Load the keywords for the page object identified by the type and object name The page type / object name pair must have been registered using the cumulusci.robotframework.pageobject decorator.
[ { "docid": "1f16bf2aad9aa0d0cea64ca5ddbe1c87", "score": "0.6187196", "text": "def load_page_object(self, page_type, object_name=None):\n pobj = self._get_page_object(page_type, object_name)\n self._set_current_page_object(pobj)\n return pobj", "title": "" } ]
[ { "docid": "527b8070fd3ab07ee9407f79410b3c6b", "score": "0.7067027", "text": "def test_load_single_page_object(self, get_context_mock, get_library_instance_mock):\n\n po = PageObjects(FOO_PATH)\n\n # Until we request the page object, we shouldn't be able to\n # see the page-specific keywords\n self.assertEqual(po.get_keyword_names(), CORE_KEYWORDS)\n\n # Now load the page object and verify the Foo keyword shows up\n po.load_page_object(\"Test\", \"Foo\")\n self.assertEqual(po.get_keyword_names(), CORE_KEYWORDS + [\"foo_keyword_1\"])", "title": "" }, { "docid": "10eac4f5788070539bce98eec22c197e", "score": "0.7022465", "text": "def test_load_multiple_page_objects(\n self, get_context_mock, get_library_instance_mock\n ):\n po = PageObjects(FOO_PATH, BAR_PATH)\n\n # Until we request the page object, we shouldn't be able to\n # see any keywords except the core page object keywords\n self.assertEqual(po.get_keyword_names(), CORE_KEYWORDS)\n\n # now load the \"Foo\" page object and verify only the Foo keyword\n # shows up and is callable.\n po.load_page_object(\"Test\", \"Foo\")\n self.assertEqual(po.get_keyword_names(), CORE_KEYWORDS + [\"foo_keyword_1\"])\n self.assertEqual(po.foo_keyword_1(\"hello\"), \"foo keyword 1: hello\")\n\n # now load the \"Bar\" page object and verify only the Bar keyword\n # shows up and is callable.\n po.load_page_object(\"Test\", \"Bar\")\n self.assertEqual(\n po.get_keyword_names(), CORE_KEYWORDS + [\"bar_keyword_1\", \"bar_keyword_2\"]\n )\n self.assertEqual(po.bar_keyword_1(\"hello\"), \"bar keyword 1: hello\")", "title": "" }, { "docid": "7f036a211d27e66de806437c89a45f5b", "score": "0.64688563", "text": "def test_page_object_keyword_is_callable(\n self, get_context_mock, get_library_instance_mock\n ):\n po = PageObjects(FOO_PATH)\n po.load_page_object(\"Test\", \"Foo\")\n result = po.foo_keyword_1(\"hello\")\n self.assertEqual(result, \"foo keyword 1: hello\")", "title": "" }, { "docid": "3460f6d4fa4db67f21863d35aeb7dac2", "score": "0.61951244", "text": "def log_page_object_keywords(self):\n for key in sorted(self.registry.keys()):\n pobj = self.registry[key]\n keywords = get_keyword_names(pobj)\n logger.info(\"{}: {}\".format(key, \", \".join(keywords)))", "title": "" }, { "docid": "871fafbdb5193aa7242fb959478f7ba8", "score": "0.5784443", "text": "def instantiate_page_objects(ctx: Context) -> None:\n for cls in PAGE_CLASSES:\n setattr(ctx, cls.__name__, cls(ctx))", "title": "" }, { "docid": "53795a4ab64ebdbe1b1f30c9b64608ac", "score": "0.56938374", "text": "def test_log_page_object_keywords(\n self, log_mock, get_context_mock, get_library_instance_mock\n ):\n po = PageObjects(FOO_PATH, BAR_PATH)\n po.log_page_object_keywords()\n expected_calls = [\n mock.call(\"('Test', 'Foo'): foo_keyword_1\"),\n mock.call(\"('Test', 'Bar'): bar_keyword_1, bar_keyword_2\"),\n ]\n log_mock.assert_has_calls(expected_calls, any_order=True)", "title": "" }, { "docid": "353cee24d23734ac9758ca2bed57e0e8", "score": "0.53215164", "text": "def FindObject (self, object):\n texts = []\n res = []\n titles = []\n\n for page in self.object['pages']:\n for element in page['elements']:\n try: \n if element['type'] == 'heading':\n title = self.GetText(element)\n titles.append(title)\n texts.append(res)\n res = []\n\n if element['type'] in ['word', 'line', 'character', 'paragraph', 'heading', 'list']:\n res.append(element)\n except TypeError:\n continue\n\n texts.append(res)\n return texts[1:], titles", "title": "" }, { "docid": "05bb373aea61678e6912a7fce026da35", "score": "0.51670617", "text": "def populate_pages(words):\n PAGES.add(words[0])", "title": "" }, { "docid": "19ca6cf9d12ac1650dfcd1f260fcde60", "score": "0.5126057", "text": "def keywords(context):\n\n data = get_keywords(10)\n\n return {\n 'data': data,\n }", "title": "" }, { "docid": "6f6093b26489426cf4a15cd78486a9e1", "score": "0.5054584", "text": "def keywords(self):", "title": "" }, { "docid": "f8c82955aaf9fd3aeec6bb36d7ea5b4e", "score": "0.488223", "text": "def go_to_page(self, page_type, object_name, **kwargs):\n\n pobj = self._get_page_object(page_type, object_name)\n try:\n pobj._go_to_page(**kwargs)\n except Exception:\n self.selenium.capture_page_screenshot()\n raise", "title": "" }, { "docid": "9875ebfdf4f5592ad78383d7b0a90195", "score": "0.48557243", "text": "def get_keywords(object_data: list):\n values = []\n for value in object_data:\n if type(value) is OrderedDict and 'string' in value.keys() and '#text' in value['string'].keys():\n values.append(value['string']['#text'])\n elif type(value) is OrderedDict and '#text' in value.keys():\n values.append(value['#text'])\n return values", "title": "" }, { "docid": "224a30d38e929c1a2af63df1ccef462f", "score": "0.484584", "text": "def keywords(self):\n pass", "title": "" }, { "docid": "97f3a9fd442bca28c05d3e1be94fe4eb", "score": "0.47911662", "text": "def force_load():\n import itk\n for k in dir(itk):\n getattr(itk, k)", "title": "" }, { "docid": "260c818601bc679da25a4f8b745adde3", "score": "0.47803766", "text": "def DetectObjects(self, robot, **kw_args):\n from prpy.perception.base import PerceptionException\n env = robot.GetEnv()\n # Detecting empty list will detect all possible objects\n detections = self._GetDetections([])\n\n for (obj_name, obj_pose) in detections:\n if (obj_name not in self.query_to_kinbody_map):\n continue\n\n kinbody_name = self.query_to_kinbody_map[obj_name]\n\n if env.GetKinBody(kinbody_name) is None:\n from prpy.rave import add_object\n kinbody_file = '%s.kinbody.xml' % kinbody_name\n new_body = add_object(\n env,\n kinbody_name,\n os.path.join(self.kinbody_path, kinbody_file))\n print kinbody_name\n body = env.GetKinBody(kinbody_name)\n body.SetTransform(obj_pose)", "title": "" }, { "docid": "e22a6e55069e8a9f6860c2d724b52114", "score": "0.47617966", "text": "def populate():\n pages_dict = {\n \"Python\": {\n \"cat_args\": {\"likes\": 64, \"views\": 128},\n \"pargs_list\":\n [\n {\"title\": \"Official Python Tutorial\",\n \"url\": \"http://docs.python.org/2/tutorial/\",\n \"views\": 100, },\n {\"title\": \"How to Think like a Computer Scientist\",\n \"url\": \"http://www.greenteapress.com/thinkpython/\",\n \"views\": 3, },\n {\"title\": \"Learn Python in 10 Minutes\",\n \"url\": \"http://www.korokithakis.net/tutorials/python/\",\n \"views\": 11, },\n ],\n },\n \"Django\": {\n \"cat_args\": {\"likes\": 32, \"views\": 64},\n \"pargs_list\":\n [\n {\"title\": \"Official Django Tutorial\",\n \"url\": \"https://docs/djangoproject.com/en/1.5/intro/tutorial01/\",\n \"views\": 23, },\n {\"title\": \"Django Rocks\",\n \"url\": \"http://www.djangorocks.com/\",\n \"views\": 8, },\n {\"title\": \"How to Tango with Django\",\n \"url\": \"http://www.tangowithdjango.com/\",\n \"views\": 20, },\n ],\n },\n \"Other Frameworks\": {\n \"cat_args\": {\"likes\": 16, \"views\": 32},\n \"pargs_list\":\n [\n {\"title\": \"Bottle\",\n \"url\": \"http://bottlepy.org/docs/dev/\",\n \"views\": 10, },\n {\"title\": \"Flask\",\n \"url\": \"http://flask.pocoo.org\",\n \"views\": 4, },\n ],\n },\n \"Cars\": {\n \"pargs_list\":\n [\n {\"title\": \"Shelby Mustang\",\n \"url\": \"http://www.ford.com/cars/mustang/trim/shelbygt500/\",},\n {\"title\": \"VW Touareg\",\n \"url\": \"http://www.vw.com/models/touareg/\",},\n {\"title\": \"Porsche 911\",\n \"url\": \"http://www.porsche.com/usa/models/911/\",},\n {\"title\": \"Testla Model X\",\n \"url\": \"http://www.teslamotors.com/modelx\",},\n ]\n }\n }\n for cat_name, a_dict in pages_dict.iteritems():\n # Category.url is set in __init__ after super, based off Category.name attribute\n a_cat, is_created = add_to_model(Category, name=cat_name, **a_dict[\"cat_args\"])\n for kwargs in a_dict[\"pargs_list\"]:\n a_page, is_created = add_to_model(Page, category=a_cat, **kwargs)\n print \" - {0} - {1}\".format(str(a_cat), str(a_page))", "title": "" }, { "docid": "7f9058eafeaf61c43edbd80e1ccd67ad", "score": "0.47515494", "text": "def jsonld_type(self):\n return ['HomePage', 'StaticPage'] + super().jsonld_type()", "title": "" }, { "docid": "7d8e1104cfe59e4d9909cb5e3d02d0cf", "score": "0.4739784", "text": "def testPageLoad(self):\n\n # TODO: Better tests, at least squeeze out a 200OK for these views\n self.loadPage('/objectbrowser/')\n self.loadPage('/objectbrowser/new_edit', 301)\n self.loadPage('/objectbrowser/copy', 404)\n self.loadPage('/objectbrowser/search')\n self.loadPage('/objectbrowser/delete', 404)\n self.loadPage('/objectbrowser/bulk_edit')\n self.loadPage('/objectbrowser/bulk_delete')\n self.loadPage('/objectbrowser/bulk_copy')\n\n self.loadPage('/objectbrowser/edit_all', 404)\n self.loadPage('/objectbrowser/copy_and_edit', 301)\n\n self.loadPage('/objectbrowser/confighealth')\n self.loadPage('/objectbrowser/plugins')\n self.loadPage('/objectbrowser/nagios.cfg')\n self.loadPage('/objectbrowser/geek_edit', 404)\n self.loadPage('/objectbrowser/advanced_edit', 404)\n\n #self.loadPage('/objectbrowser/add_to_group')\n self.loadPage('/objectbrowser/add/host', 200)\n self.loadPage('/objectbrowser/add/hostgroup', 200)\n self.loadPage('/objectbrowser/add/service', 200)\n self.loadPage('/objectbrowser/add/servicegroup', 200)\n self.loadPage('/objectbrowser/add/contact', 200)\n self.loadPage('/objectbrowser/add/contactgroup', 200)\n self.loadPage('/objectbrowser/add/timeperiod', 200)\n self.loadPage('/objectbrowser/add/command', 200)\n self.loadPage('/objectbrowser/add/template', 200)", "title": "" }, { "docid": "3a84151b36eb07dced1b9257e234a86c", "score": "0.47356558", "text": "def _discover_objects(self):\n pass", "title": "" }, { "docid": "f9c6bd412b4324822713edb0ebe3e965", "score": "0.47337055", "text": "def generate_objects(self, *args):\n switcher = {\n 0: self.generate_login,\n 1: self.generate_page_patientID,\n 2: self.generate_page_measure,\n 3: self.generate_page_results,\n 4: self.generate_boot,\n 5: self.generate_new_account,\n 6: self.generate_settings,\n }\n switcher[self.page]()", "title": "" }, { "docid": "41af8c426020a87a44c3c05c44184c24", "score": "0.47307608", "text": "def _set_current_page_object(self, pobj):\n\n # Note: at the moment only one object is loaded at a time. We might want\n # to consider pushing and popping page objects on a stack so that more than\n # one can be active at a time.\n self.current_page_object = pobj\n libname = self.current_page_object.__class__.__name__\n\n # rename this library to be the name of our page object,\n # and make sure it is at the front of the library search order\n BuiltIn()._namespace._kw_store.get_library(self).name = libname\n BuiltIn().reload_library(self)\n BuiltIn().set_library_search_order(libname)\n return pobj", "title": "" }, { "docid": "07c6507391d55fef4f7c9a891b256e44", "score": "0.46874958", "text": "def _load_by_oclass(self, oclass):", "title": "" }, { "docid": "07c6507391d55fef4f7c9a891b256e44", "score": "0.46874958", "text": "def _load_by_oclass(self, oclass):", "title": "" }, { "docid": "491eefa9691c0c42cbdc9b08f69f7aa7", "score": "0.4657643", "text": "def parse_lt_objs (lt_objs, page_number, text_content=[], page_height=801, dpi_calculation_factor=300/(72)):\n for lt_obj in lt_objs:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine) or isinstance(lt_obj, LTTextBoxHorizontal):\n # text\n text_content.append({\"class\":lt_obj.__class__.__name__, \"page\":page_number, \"x0\":int(lt_obj.x0*dpi_calculation_factor), \"y0\":int((page_height - lt_obj.y0)*dpi_calculation_factor), \"x1\":int(lt_obj.x1*dpi_calculation_factor), \"y1\":int((page_height - lt_obj.y1)*dpi_calculation_factor), \"height\":int(lt_obj.height*300/72), \"width\":int(lt_obj.width*300/72), \"text\":lt_obj.get_text()})\n # elif isinstance(lt_obj, LTFigure):\n # # LTFigure objects are containers for other LT* objects, so recurse through the children\n # text_content.append(parse_lt_objs(lt_obj._objs, page_number, text_content, page_height, dpi_calculation_factor))\n if text_content!=[]:\n return text_content\n else:\n return", "title": "" }, { "docid": "e3baf026751f11bc9b34dd946b7ee6ab", "score": "0.46152517", "text": "def keywords(self):\r\n return [v['keyword'] for v in self.types.values()]", "title": "" }, { "docid": "b677fbb2b0a5fbd81a025ea4843426d0", "score": "0.46108988", "text": "def keywords(self, keywords):\n\n self._keywords = keywords", "title": "" }, { "docid": "a4254962d5b0e91fb0d2fb031a2ff76b", "score": "0.46104968", "text": "def init_screen(context, page_name):\n page_class = pages.factory(page_name)\n context.current_page = page_class(context.driver)", "title": "" }, { "docid": "0f8a872d1b721c35b1caefce0a2d19cd", "score": "0.45963678", "text": "async def build_docs_lookup_table(self) -> None:\n cache = {}\n\n for key, page in self.page_types.items():\n async with http.session.get(page + \"/objects.inv\") as resp:\n data = await resp.read()\n\n if resp.status != 200:\n cache[key] = {\"status\": resp.status, \"message\": \"Failed to fetch objects.\", \"response\": data}\n else:\n stream = utils.SphinxObjectFileReader(data)\n cache[key] = {\"status\": resp.status, \"response\": utils.parse_object_inv(stream, page)}\n\n self.cache = cache", "title": "" }, { "docid": "7553011af80668bb4c3a00ed077089f4", "score": "0.45897758", "text": "def _load_objects(self):\n object_dict = dict(\n [\n (name, cls)\n for name, cls in objects.__dict__.items()\n if isinstance(cls, type)\n ]\n )\n for name, object_cls in object_dict.items():\n setattr(self, name, object_cls)\n setattr(getattr(self, name), \"data_directory\", self.data_directory)", "title": "" }, { "docid": "6a3f9e7c27c151896bf051c1ff191ec4", "score": "0.45616296", "text": "def create_keyword_file(self):\n\n self.driver.get(self.start_page)\n element = self.driver.find_elements_by_class_name(\n \"form-keyboard-letter\")\n for each in element:\n url = each.get_attribute(\"href\")\n if url.startswith(\"https://www.mskcc.org/cancer-care\"):\n self.pages[each.text] = url\n else:\n url = self.domain + url\n self.pages[each.text] = url", "title": "" }, { "docid": "9b6b0ccf7655b3c45eeafa0c55cce908", "score": "0.45570046", "text": "def get_keywords(self, num_keywords):\n pass", "title": "" }, { "docid": "e5841f9361b2acf151cdb7643596eb43", "score": "0.45455265", "text": "def get_document_keywords(document,\n keywords,\n preprocess_type=PreprocessWordType.NORMALIZE):\n matches = set()\n for page in document:\n for word in page.words:\n preprocessed_word = query_utils.preprocess_word(word,\n preprocess_type)\n if preprocessed_word in keywords:\n matches.add(preprocessed_word)\n return sorted(list(matches))", "title": "" }, { "docid": "513035b6008e754033fc4248f84b8499", "score": "0.45435277", "text": "def requestPage(self, term):", "title": "" }, { "docid": "73d50f5acd659ceb6acedafce54b1d3c", "score": "0.45310715", "text": "def load_obj():\n pass", "title": "" }, { "docid": "d91186f051737490124611772dfddca4", "score": "0.45259234", "text": "def __init__(self, *args, **kwargs):\n # per instance cache\n self._languages = None\n self._content_dict = None\n self._is_first_root = None\n self._complete_slug = None\n super(Page, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "460c5935d8d06c713dfa2a8e8e36edc0", "score": "0.45116097", "text": "def pickup_class(cls, link, **kwargs):\n type = Utils.page_type(link)\n if type in parameters.classes:\n return cls.magic_import(type, link, **kwargs)\n return cls.magic_import('page', link, **kwargs)", "title": "" }, { "docid": "acc4c97fac62491e8cf78636931e8447", "score": "0.45084918", "text": "def Load(self, obj):\n pass", "title": "" }, { "docid": "783e3aa41e0c7fd500a32b1e08838003", "score": "0.44953918", "text": "def test_page_tags():", "title": "" }, { "docid": "9f405a66a367ec59dd3370aaea41865b", "score": "0.4487754", "text": "def metasearch(keyword, fname=None):\r\n if type(fname) == type(''):\r\n pickle = pathjoin(META_DIR, fname[:-3] + 'meta')\r\n return cPickle.load(file(pickle, 'r')).get(keyword, '')\r\n elif type(fname) == type([]):\r\n for f in fname:\r\n pickle = pathjoin(META_DIR, fname.replace('.txt', '.meta'))\r\n return cPickle.load(file(pickle, 'r'))\r\n data = (keyword, {})\r\n os.path.walk(META_DIR, walkgetmeta, data)\r\n return data[1]", "title": "" }, { "docid": "26c12dad400ae78e48139d844c39f3ae", "score": "0.44842523", "text": "def load_interactive_objects(self, env):\n interactive_objects = []\n object_paths = [\n '002_master_chef_can',\n '003_cracker_box',\n '004_sugar_box',\n '005_tomato_soup_can',\n '006_mustard_bottle',\n ]\n\n for object_path in object_paths:\n obj = YCBObject(object_path)\n env.simulator.import_object(obj)\n interactive_objects.append(obj)\n return interactive_objects", "title": "" }, { "docid": "e9d2ff3845266a0c2dca21a68f62a5fd", "score": "0.4483138", "text": "def prepare(self, obj):\n data = super(FluentPageIndex, self).prepare(obj)\n # get all text plugins\n try:\n text_contentitems = obj.contentitem_set.instance_of(TextItem)\n except:\n text_contentitems = []\n try:\n file_contentitems = obj.contentitem_set.instance_of(FileItem)\n except:\n file_contentitems = []\n try:\n picture_contentitems = obj.contentitem_set.instance_of(PictureItem)\n except:\n picture_contentitems = []\n t = loader.select_template(('search/indexes/fluentpage/fluentpage_text.txt', ))\n data['text'] = t.render(Context({'object': obj,\n 'content_data': { 'text': text_contentitems, 'file': file_contentitems, 'picture': picture_contentitems }}))\n return data", "title": "" }, { "docid": "b2a5db608f0ebad1d40a5f295649064d", "score": "0.44677135", "text": "def populate_sobjects():\n\n # Get username\n toolingapi_settings = context.get_toolingapi_settings()\n username = toolingapi_settings[\"username\"]\n\n # If sobjects is exist in sobjects_completion.sublime-settings, just return it\n sobjects_completions = sublime.load_settings(\"sobjects_completion.sublime-settings\")\n if sobjects_completions.has(username):\n return sobjects_completions.get(username).keys()\n\n # If sobjects is not exist in globals(), post request to pouplate it\n api = SalesforceApi(toolingapi_settings)\n thread = threading.Thread(target=api.describe_global, args=())\n thread.start()\n\n while thread.is_alive() or api.result == None:\n time.sleep(1)\n\n sobjects = []\n for sobject in api.result[\"sobjects\"]:\n sobjects.append(sobject[\"name\"])\n\n globals()[username + \"sobjects\"] = sobjects\n return sobjects", "title": "" }, { "docid": "f711692fc28e4d0e488b3ac0684fc27f", "score": "0.44674292", "text": "def test_non_namespaced_object_name(\n self, get_context_mock, get_library_instance_mock\n ):\n with mock.patch.object(CumulusCI, \"get_namespace_prefix\", return_value=\"\"):\n pobj = PageObjects().load_page_object(\"Listing\", \"CustomObject__c\")\n self.assertEqual(pobj.object_name, \"CustomObject__c\")", "title": "" }, { "docid": "c380c9e386f1ac2ee0f33488706c4b7c", "score": "0.44439945", "text": "def parse_data(self, page: str, **kwargs) -> dict:", "title": "" }, { "docid": "a419e6d7d500fd966c9b7db9ff236996", "score": "0.44413304", "text": "def get_urls(keywords, obj_type):\n if obj_type == OBJECT_TYPES[0]:\n github_url = 'https://github.com/search?utf8=%E2%9C%93&q=topic%3A{}+topic%3A{}+topic%3A{}&\\\nref=simplesearch'.format(keywords[0], keywords[1], keywords[2])\n elif obj_type == OBJECT_TYPES[1]:\n github_url = 'https://github.com/search?q={}+{}+{}+type%3Aissue'.format(\n keywords[0], keywords[1], keywords[2]\n )\n elif obj_type == OBJECT_TYPES[2]:\n github_url = 'https://github.com/search?q={}+{}+{}&type=Wikis'.format(\n keywords[0], keywords[1], keywords[2]\n )\n r = requests.get(url=github_url)\n result_html = fromstring(r.text)\n urls = [\n {'url': 'https://github.com' + i.get('href')} for i in result_html.cssselect(\n 'div.f4.text-normal a'\n )\n ]\n return urls", "title": "" }, { "docid": "d597e420056608eb2334e61784ed3d14", "score": "0.44357446", "text": "def run_object_suggestion():\n #Not tested fully on this version of the code\n from latent_dataset import LatentDataset\n save_vis = False\n train_size = 5000\n test_size = 20\n trials = 10\n dataset = LatentDataset(\n data_folder = data_dir,\n scene_indices = (train_size, train_size + test_size),\n importance_order = True\n )\n synth = SceneSynth(data_dir=data_dir)\n for scene_i in range(test_size):\n # Grab a random scene, sort and slice its objects, then\n # run multiple completions onf it\n scene, input_nodes, output_node = dataset.get_scene(scene_i)\n scene.object_nodes = input_nodes\n synth.suggest_next_object(scene, save_dir, trials)", "title": "" }, { "docid": "de83e0055b1ee8351afce2b2797e4d28", "score": "0.44352487", "text": "def pages(self):\n ...", "title": "" }, { "docid": "2890dd7670915d8790a1b52d091e749b", "score": "0.43997684", "text": "def at_object_creation(self):\n # this defines the available types along with the lockstring\n # restricting access to them. Anything not defined in this\n # dictionary is forbidden to script at all. Just because\n # a script type is -available- does not mean there is any\n # code yet in that slot!\n self.db.evlang_locks = {}\n # This stores actual code snippets. Only code with codetypes\n # matching the keys in db.evlang_locks will work.\n self.db.evlang_scripts = {}\n # store Evlang handler non-persistently\n self.ndb.evlang = self.init_evlang()", "title": "" }, { "docid": "52f0b7f126fa82a7ea7f2e70d9d9d6b7", "score": "0.4381697", "text": "def init_search_keywords(self):\n if not 'keywords' in self.inputs:\n self.inputs['keywords'] = []\n for keyword in self.inputs['keywords']:\n if not keyword in self.result['search_keywords']:\n self.result['search_keywords'][keyword] = {}\n if not keyword in self.recent['search_keywords']:\n self.recent['search_keywords'][keyword] = {}", "title": "" }, { "docid": "14f61c80ea64a20bf8a1337bae18e6e7", "score": "0.43763202", "text": "def Initialize(self, keyword) -> None:\n self.keyword = keyword", "title": "" }, { "docid": "8f4ec57b8095f4db4ccb054bc401873e", "score": "0.4358647", "text": "def current_page_should_be(self, page_type, object_name, **kwargs):\n pobj = self._get_page_object(page_type, object_name)\n try:\n pobj._is_current_page(**kwargs)\n self.load_page_object(page_type, object_name)\n except Exception:\n self.selenium.capture_page_screenshot()\n raise", "title": "" }, { "docid": "2d850dddba33d2807f91ac64039f9969", "score": "0.43451437", "text": "def loadPara(self):\r\n pass", "title": "" }, { "docid": "1587cb29ed58f78d5d6ac400f458902e", "score": "0.43448037", "text": "def install_global_objects(self):\n import uliweb\n for k, v in settings.GLOBAL_OBJECTS.items():\n setattr(uliweb, k, import_attr(v))", "title": "" }, { "docid": "c699570ee187c751611a77fb3c35cafa", "score": "0.43398649", "text": "def get_input(content):\n obj = json.loads(content)\n keywords = obj.get('keywords')\n obj_type = obj.get('type')\n return keywords, obj_type", "title": "" }, { "docid": "c48b6d996debbac8529a4f659db05366", "score": "0.4339427", "text": "def AddPage(self, *args, **kw):", "title": "" }, { "docid": "c1ba55fb470211106b57792c39b4fd4a", "score": "0.4335256", "text": "def load_supplemental_object(step_name, name, content_type, required=True):\n if (content_type == 'pickle'):\n with open(os.path.join(_disk_store, step_name+'-'+name+'.pkl'), 'rb') as f:\n return pickle.load(f)", "title": "" }, { "docid": "5341e499c5fda487e2e2cfb628ae492e", "score": "0.43294102", "text": "def load(self, object_type, refresh=False):\n if refresh:\n records = self.get_all(object_type, ids_only = False)\n try:\n os.mkdir('insightly_data')\n except:\n pass\n f = open('insightly_data/' + object_type + '.json', 'w')\n f.write(json.dumps(records))\n f.close()\n else:\n f = open(object_type + '.json', 'r')\n records = json.loads(f.read())\n f.close()\n return records", "title": "" }, { "docid": "4c7e15993607d8a246164c9f3196b096", "score": "0.4314351", "text": "def memcache_keywords(self, url):\n from urlparse import urlparse\n parts = urlparse(url)\n if parts.scheme and parts.netloc:\n mkey = _MEMCACHE_SLUGS['KEYWORDS'] + url\n if not memcache.get(key=mkey):\n q = self._query.filter(WebResource.url == url).fetch(1)\n results = q[0].get_indexers() if len(q) == 1 else []\n memcache.add(key=mkey, value=results, time=15000)\n else:\n results = memcache.get(key=mkey)\n return results\n else:\n return None", "title": "" }, { "docid": "7cd7234021ea12dd9ee2526ebccf31b3", "score": "0.43127888", "text": "def __init__(self, keywords):\n self.strategy = 0\n self.keywords = keywords\n \n # Before beginning, get the initial list of keywords\n self.scraper = Scraper()\n self.searched_ids = self.scraper.search_videos(self.keywords)", "title": "" }, { "docid": "e7e2578ac461ea736ea204a8e5913e3c", "score": "0.4308556", "text": "def __contains__(self, type_str):\n ret = False\n if type_str in self._keywords:\n ret = True\n return ret", "title": "" }, { "docid": "05b50a0178a1220c3b28929095887afa", "score": "0.4307479", "text": "def keywords(self, keywords):\r\n visit = self.visit\r\n return dict((kw.arg, visit(kw.value)) for kw in keywords)", "title": "" }, { "docid": "f158e356a0d2fc9ac2003b25765ee488", "score": "0.42945105", "text": "def load(type_name):\n\n assert type_name not in content, 'Content loader collision: {} already '\\\n 'has a loader'.format(type_name)\n\n def loader_decorator(loader_func):\n path = os.path.join(content_path, type_name)\n assert os.path.isdir(path), '{} does not seem to be a folder in {}'\\\n .format(type_name, content_path)\n file_pairs = get_file_pairs(path)\n\n content[type_name] = loader_func(file_pairs)\n\n return loader_func # probably never used since it's called above\n # ...kind of an abuse of decorators, I know...\n return loader_decorator", "title": "" }, { "docid": "908c7c688c825e8ba0f3ecfc5f6ea8e1", "score": "0.429218", "text": "def baseComponents(self):\n # Create an instance (=object) of the text component to be placed on the page.\n hw = HelloWorldText()\n # Create an instance (=object) of the page, containing the \"hw\" component.\n # The page class is also the page name in the url: http://localhost:8060/index\n # Components can be a single component or a list of components.\n homePage = Page(class_=self.C.TEMPLATE_INDEX, components=hw, title=self.TITLE)\n # Answer a list of types of pages for this site.\n return [homePage]", "title": "" }, { "docid": "be1f13a1d8dd7bf67760e5522b7a869b", "score": "0.42884856", "text": "def __init__(self):\n self.wiki = wikipediaapi.Wikipedia('en')\n self.page = None\n self.medical_term_cues = ['medical', 'medicine', 'biological', 'biology', 'scientific', 'science']\n self.disambiguation_page_suffix = '_(disambiguation)'", "title": "" }, { "docid": "a12caad99805e28893164da0f1e80ea3", "score": "0.4279479", "text": "def concept():\n print(\"request*****\", request.files)\n\n if not request.files:\n abort(400)\n body = request.files[\"document\"]\n keyphrases = extract_concepts(body.read().decode(\"utf-8\"))\n \n return jsonify({\"keywords\": keyphrases})", "title": "" }, { "docid": "ca7543e77913b3eaf739e693a8a9d7c4", "score": "0.42784932", "text": "def get_page_matches(document,\n keywords,\n preprocess_type=PreprocessWordType.NORMALIZE):\n matches = []\n for keyword in keywords:\n for page in document:\n match = None\n for word in page.words:\n preprocessed_word = query_utils.preprocess_word(\n word, preprocess_type)\n if preprocessed_word == keyword:\n match = (document.year, document, page, keyword)\n break\n if match:\n matches.append(match)\n continue # move to next page\n return matches", "title": "" }, { "docid": "4a79b96e7d18059edcac1584e325268c", "score": "0.4276614", "text": "def load_dictionary(keyword, depth):\n dictionary = corpora.Dictionary.load('{0}_{1}.dict'.format(keyword, depth))\n corpus = corpora.MmCorpus('{0}_{1}_corpus.mm'.format(keyword, depth))\n return dictionary, corpus", "title": "" }, { "docid": "b611ad1d0aea538255ace533cab6ccaa", "score": "0.42604384", "text": "def laterPages(self, c, doc):", "title": "" }, { "docid": "4a74f040d00193ac82c0c5ec43019c9a", "score": "0.42538223", "text": "def __init__(self, **keywords):\n\t\t\t# 2011-7-7\n\t\t\tfor keyword, value in keywords.iteritems():\n\t\t\t\tsetattr(self, keyword, value)", "title": "" }, { "docid": "fabc91f6bc5792d31e33bca579643604", "score": "0.42513916", "text": "def types(self):\r\n types = { \r\n 'Address Locator': {'keyword': 'DEAddressLocator', 'description': 'A dataset, used for geocoding, that stores the address attributes, associated indexes, and rules that define the process for translating nonspatial descriptions of places to spatial data.'},\r\n 'Address Locator Style': {'keyword': 'GPAddressLocatorStyle', 'description': 'A template on which to base the new address locator.'},\r\n 'Analysis Cell Size': {'keyword': 'analysis_cell_size', 'description': 'The cell size used by raster tools.'},\r\n 'Any Value': {'keyword': 'GPType', 'description': 'A data type that accepts any value.'},\r\n 'ArcMap Document': {'keyword': 'DEMapDocument', 'description': 'A file that contains one map, its layout, and its associated layers, tables, charts, and reports.'},\r\n 'Areal Unit': {'keyword': 'GPArealUnit', 'description': 'An areal unit type and value such as square meter or acre.'},\r\n 'Boolean': {'keyword': 'GPBoolean', 'description': 'A Boolean value.'},\r\n 'CAD Drawing Dataset': {'keyword': 'DECadDrawingDataset', 'description': 'A vector data source with a mix of feature types with symbology. The dataset is not usable for feature class-based queries or analysis.'},\r\n 'Calculator Expression': {'keyword': 'GPCalculatorExpression', 'description': 'A calculator expression.'},\r\n 'Catalog Root': {'keyword': 'DECatalogRoot', 'description': 'The top-level node in the Catalog tree.'},\r\n 'Cell Size': {'keyword': 'GPSACellSize', 'description': 'The cell size used byArcGIS Spatial Analyst extension.'},\r\n 'Cell Size XY': {'keyword': 'GPCellSizeXY', 'description': 'Defines the two sides of a raster cell.'},\r\n 'Composite Layer': {'keyword': 'GPCompositeLayer', 'description': 'A reference to several children layers, including symbology and rendering properties.'},\r\n 'Compression': {'keyword': 'GPSAGDBEnvCompression', 'description': 'Specifies the type of compression used for a raster.'},\r\n 'Coordinate System': {'keyword': 'GPCoordinateSystem', 'description': 'A reference framework&mdash;such as the UTM system&mdash;consisting of a set of points, lines, and/or surfaces, and a set of rules, used to define the positions of points in two- and three-dimensional space.'},\r\n 'Coordinate Systems Folder': {'keyword': 'DESpatialReferencesFolder', 'description': 'A folder on disk storing coordinate systems.'},\r\n 'Coverage': {'keyword': 'DECoverage', 'description': 'A coverage dataset, a proprietary data model for storing geographic features as points, arcs, and polygons with associated feature attribute tables.'},\r\n 'Coverage Feature Class': {'keyword': 'DECoverageFeatureClasses', 'description': 'A coverage feature class, such as point, arc, node, route, route system, section, polygon, and region.'},\r\n 'Data Element': {'keyword': 'DEType', 'description': 'A dataset visible in ArcCatalog.'},\r\n 'Data File': {'keyword': 'GPDataFile', 'description': 'A data file.'},\r\n 'Database Connections': {'keyword': 'DERemoteDatabaseFolder', 'description': 'The database connection folder in ArcCatalog.'},\r\n 'Dataset': {'keyword': 'DEDatasetType', 'description': 'A collection of related data, usually grouped or stored together.'},\r\n 'Date': {'keyword': 'GPDate', 'description': 'A date value.'},\r\n 'dBase Table': {'keyword': 'DEDbaseTable', 'description': 'Attribute data stored in dBASE format.'},\r\n 'Decimate': {'keyword': 'GP3DADecimate', 'description': 'Specifies a subset of nodes of a TIN to create a generalized version of that TIN.'},\r\n 'Disk Connection': {'keyword': 'DEDiskConnection', 'description': 'An access path to a data storage device.'},\r\n 'Double': {'keyword': 'GPDouble', 'description': 'Any floating-point number will be stored as a double-precision, 64-bit value.'},\r\n 'Encrypted String': {'keyword': 'GPEncryptedString', 'description': 'Encrypted string for passwords.'},\r\n 'Envelope': {'keyword': 'GPEnvelope', 'description': 'The coordinate pairs that define the minimum bounding rectangle the data source falls within.'},\r\n 'Evaluation Scale': {'keyword': 'GPEvaluationScale', 'description': 'The scale value range and increment value applied to inputs in a weighted overlay operation.'},\r\n 'Extent': {'keyword': 'GPExtent', 'description': 'Specifies the coordinate pairs that define the minimum bounding rectangle (xmin, ymin and xmax, ymax) of a data source. All coordinates for the data source fall within this boundary.'},\r\n 'Extract Values': {'keyword': 'GPSAExtractValues', 'description': 'An extract values parameter.'},\r\n 'Feature Class': {'keyword': 'DEFeatureClass', 'description': 'A collection of spatial data with the same shape type: point, multipoint, polyline, and polygon.'},\r\n 'Feature Dataset': {'keyword': 'DEFeatureDataset', 'description': 'A collection of feature classes that share a common geographic area and the same spatial reference system.'},\r\n 'Feature Layer': {'keyword': 'GPFeatureLayer', 'description': 'A reference to a feature class, including symbology and rendering properties.'},\r\n 'Feature Set': {'keyword': 'GPFeatureRecordSetLayer', 'description': 'Interactive features; draw the features when the tool is run.'},\r\n 'Field': {'keyword': 'Field', 'description': 'A column in a table that stores the values for a single attribute.'},\r\n 'Field Info': {'keyword': 'GPFieldInfo', 'description': 'The details about a field in a FieldMap.'},\r\n 'Field Mappings': {'keyword': 'GPFieldMapping', 'description': 'A collection of fields in one or more input tables.'},\r\n 'File': {'keyword': 'DEFile', 'description': 'A file on disk.'},\r\n 'Folder': {'keyword': 'DEFolder', 'description': 'Specifies a location on a disk where data is stored.'},\r\n 'Formulated Raster': {'keyword': 'GPRasterFormulated', 'description': 'A raster surface whose cell values are represented by a formula or constant.'},\r\n 'Fuzzy function': {'keyword': 'GPSAFuzzyFunction', 'description': 'Fuzzy function.'},\r\n 'Geodataset': {'keyword': 'DEGeodatasetType', 'description': 'A collection of data with a common theme in a geodatabase.'},\r\n 'GeoDataServer': {'keyword': 'DEGeoDataServer', 'description': 'A coarse-grained object that references a geodatabase.'},\r\n 'Geometric Network': {'keyword': 'DEGeometricNetwork', 'description': 'A linear network represented by topologically connected edge and junction features. Feature connectivity is based on their geometric coincidence.'},\r\n 'Geostatistical Layer': {'keyword': 'GPGALayer', 'description': 'A reference to a geostatistical data source, including symbology and rendering properties.'},\r\n 'Geostatistical Search Neighborhood': {'keyword': 'GPGASearchNeighborhood', 'description': 'Defines the searching neighborhood parameters for a geostatistical layer.'},\r\n 'Geostatistical Value Table': {'keyword': 'GPGALayer', 'description': 'A collection of data sources and fields that define a geostatistical layer.'},\r\n 'GlobeServer': {'keyword': 'DEGlobeServer', 'description': 'A Globe server.'},\r\n 'GPServer': {'keyword': 'DEGPServer', 'description': 'A geoprocessing server.'},\r\n 'Graph': {'keyword': 'GPGraph', 'description': 'A graph.'},\r\n 'Graph Data Table': {'keyword': 'GPGraphDataTable', 'description': 'A graph data table.'},\r\n 'Group Layer': {'keyword': 'GPGroupLayer', 'description': 'A collection of layers that appear and act as a single layer. Group layers make it easier to organize a map, assign advanced drawing order options, and share layers for use in other maps.'},\r\n 'Horizontal Factor': {'keyword': 'GPSAHorizontalFactor', 'description': 'The relationship between the horizontal cost factor and the horizontal relative moving angle.'},\r\n 'Image Service': {'keyword': 'DEImageServer', 'description': 'An image service.'},\r\n 'Index': {'keyword': 'Index', 'description': 'A data structure used to speed the search for records in geographic datasets and databases.'},\r\n 'INFO Expression': {'keyword': 'GPINFOExpression', 'description': 'A syntax for defining and manipulating data in an INFO table.'},\r\n 'INFO Item': {'keyword': 'GPArcInfoItem', 'description': 'An item in an INFO table.'},\r\n 'INFO Table': {'keyword': 'DEArcInfoTable', 'description': 'A table in an INFO database.'},\r\n 'LAS Dataset': {'keyword': 'DELasDataset', 'description': 'A LAS dataset stores reference to one or more LAS files on disk, as well as to additional surface features. A LAS file is a binary file that is designed to store airborne lidar data.'},\r\n 'LAS Dataset Layer': {'keyword': 'GPLasDatasetLayer', 'description': 'A layer that references a LAS dataset on disk. This layer can apply filters on lidar files and surface constraints referenced by a LAS dataset.'},\r\n 'Layer': {'keyword': 'GPLayer', 'description': 'A reference to a data source, such as a shapefile, coverage, geodatabase feature class, or raster, including symbology and rendering properties.'},\r\n 'Layer File': {'keyword': 'DELayer', 'description': 'A file with a .lyr extension that stores the layer definition, including symbology and rendering properties.'},\r\n 'Line': {'keyword': 'GPLine', 'description': 'A shape, straight or curved, defined by a connected series of unique x,y coordinate pairs.'},\r\n 'Linear Unit': {'keyword': 'GPLinearUnit', 'description': 'A linear unit type and value such as meter or feet.'},\r\n 'Long': {'keyword': 'GPLong', 'description': 'An integer number value.'},\r\n 'M Domain': {'keyword': 'GPMDomain', 'description': 'A range of lowest and highest possible value for m coordinates.'},\r\n 'MapServer': {'keyword': 'DEMapServer', 'description': 'A map server.'},\r\n 'Mosaic Dataset': {'keyword': 'DEMosaicDataset', 'description': 'A collection of raster and image data that allows you to store, view, and query the data. It is a data model within the geodatabase used to manage a collection of raster datasets (images) stored as a catalog and viewed as a mosaicked image.'},\r\n 'Mosaic Layer': {'keyword': 'GPMosaicLayer', 'description': 'A layer that references a mosaic dataset.'},\r\n 'Neighborhood': {'keyword': 'GPSANeighborhood', 'description': 'The shape of the area around each cell used to calculate statistics.'},\r\n 'Network Analyst Class FieldMap': {'keyword': 'NAClassFieldMap', 'description': 'Mapping between location properties in a Network Analyst layer (such as stops, facilities and incidents) and a point feature class.'},\r\n 'Network Analyst Hierarchy Settings': {'keyword': 'GPNAHierarchySettings', 'description': 'A hierarchy attribute that divides hierarchy values of a network dataset into three groups using two integers. The first integer, high_rank_ends, sets the ending value of the first group; the second number, low_rank_begin, sets the beginning value of the third group.'},\r\n 'Network Analyst Layer': {'keyword': 'GPNALayer', 'description': 'A special group layer used to express and solve network routing problems. Each sublayer held in memory in a Network Analyst layer represents some aspect of the routing problem and the routing solution.'},\r\n 'Network Dataset': {'keyword': 'DENetworkDataset', 'description': 'A collection of topologically connected network elements (edges, junctions, and turns), derived from network sources and associated with a collection of network attributes.'},\r\n 'Network Dataset Layer': {'keyword': 'GPNetworkDatasetLayer', 'description': 'A reference to a network dataset, including symbology and rendering properties.'},\r\n 'Parcel Fabric': {'keyword': 'DECadastralFabric', 'description': 'A parcel fabric is a dataset for the storage, maintenance, and editing of a continuous surface of connected parcels or parcel network.'},\r\n 'Parcel Fabric Layer': {'keyword': 'GPCadastralFabricLayer', 'description': 'A layer referencing a parcel fabric on disk. This layer works as a group layer organizing a set of related layers under a single layer.'},\r\n 'Point': {'keyword': 'GPPoint', 'description': 'A pair of x,y coordinates.'},\r\n 'Polygon': {'keyword': 'GPPolygon', 'description': 'A connected sequence of x,y coordinate pairs, where the first and last coordinate pair are the same.'},\r\n 'Projection File': {'keyword': 'DEPrjFile', 'description': 'A file storing coordinate system information for spatial data.'},\r\n 'Pyramid': {'keyword': 'GPSAGDBEnvPyramid', 'description': 'Specifies if pyramids will be built.'},\r\n 'Radius': {'keyword': 'GPSARadius', 'description': 'Specifies which surrounding points will be used for interpolation.'},\r\n 'Random Number Generator': {'keyword': 'GPRandomNumberGenerator', 'description': 'Specifies the seed and the generator to be used when creating random values.'},\r\n 'Raster Band': {'keyword': 'DERasterBand', 'description': 'A layer in a raster dataset.'},\r\n 'Raster Calculator Expression': {'keyword': 'GPRasterCalculatorExpression', 'description': 'A raster calculator expression.'},\r\n 'Raster Catalog': {'keyword': 'DERasterCatalog', 'description': 'A collection of raster datasets defined in a table; each table record defines an individual raster dataset in the catalog.'},\r\n 'Raster Catalog Layer': {'keyword': 'GPRasterCatalogLayer', 'description': 'A reference to a raster catalog, including symbology and rendering properties.'},\r\n 'Raster Data Layer': {'keyword': 'GPRasterDataLayer', 'description': 'A raster data layer.'},\r\n 'Raster Dataset': {'keyword': 'DERasterDataset', 'description': 'A single dataset built from one or more rasters.'},\r\n 'Raster Layer': {'keyword': 'GPRasterLayer', 'description': 'A reference to a raster, including symbology and rendering properties.'},\r\n 'Raster Statistics': {'keyword': 'GPSAGDBEnvStatistics', 'description': 'Specifies if raster statistics will be built.'},\r\n 'Raster Type': {'keyword': 'GPRasterBuilder', 'description': 'Raster data is added to a mosaic dataset by specifying a raster type. The raster type identifies metadata, such as georeferencing, acquisition date, and sensor type, along with a raster format.'},\r\n 'Record Set': {'keyword': 'GPRecordSet', 'description': 'Interactive table; type in the table values when the tool is run.'},\r\n 'Relationship Class': {'keyword': 'DERelationshipClass', 'description': 'The details about the relationship between objects in the geodatabase.'},\r\n 'Remap': {'keyword': 'GPSARemap', 'description': 'A table that defines how raster cell values will be reclassified.'},\r\n 'Route Measure Event Properties': {'keyword': 'GPRouteMeasureEventProperties', 'description': 'Specifies the fields on a table that describe events that are measured by a linear reference route system.'},\r\n 'Schematic Dataset': {'keyword': 'DESchematicDataset', 'description': 'A schematic dataset contains a collection of schematic diagram templates and schematic feature classes that share the same application domain, for example, water or electrical. It can reside in a personal, file, or ArcSDE geodatabase.'},\r\n 'Schematic Diagram': {'keyword': 'DESchematicDiagram', 'description': 'A schematic diagram.'},\r\n 'Schematic Folder': {'keyword': 'DESchematicFolder', 'description': 'A schematic folder.'},\r\n 'Schematic Layer': {'keyword': 'GPSchematicLayer', 'description': 'A schematic layer is a composite layer composed of feature layers based on the schematic feature classes associated with the template on which the schematic diagram is based.'},\r\n 'Semivariogram': {'keyword': 'GPSASemiVariogram', 'description': 'Specifies the distance and direction representing two locations that are used to quantify autocorrelation.'},\r\n 'ServerConnection': {'keyword': 'DEServerConnection', 'description': 'A server connection.'},\r\n 'Shapefile': {'keyword': 'DEShapefile', 'description': 'Spatial data in shapefile format.'},\r\n 'Spatial Reference': {'keyword': 'GPSpatialReference', 'description': 'The coordinate system used to store a spatial dataset, including the spatial domain.'},\r\n 'SQL Expression': {'keyword': 'GPSQLExpression', 'description': 'A syntax for defining and manipulating data from a relational database.'},\r\n 'String': {'keyword': 'GPString', 'description': 'A text value.'},\r\n 'Table': {'keyword': 'DETable', 'description': 'Tabular data.'},\r\n 'Table View': {'keyword': 'GPTableView', 'description': 'A representation of tabular data for viewing and editing purposes, stored in memory or on disk.'},\r\n 'Terrain Layer': {'keyword': 'GPTerrainLayer', 'description': 'A reference to a terrain, including symbology and rendering properties. It\\'s used to draw a terrain.'},\r\n 'Text File': {'keyword': 'DETextfile', 'description': 'Data stored in ASCII format.'},\r\n 'Tile Size': {'keyword': 'GPSAGDBEnvTileSize', 'description': 'Specifies the width and the height of a data stored in block.'},\r\n 'Time configuration': {'keyword': 'GPSATimeConfiguration', 'description': 'Specifies the time periods used for calculating solar radiation at specific locations.'},\r\n 'TIN': {'keyword': 'DETin', 'description': 'A vector data structure that partitions geographic space into contiguous, nonoverlapping triangles. The vertices of each triangle are sample data points with x-, y-, and z-values.'},\r\n 'Tin Layer': {'keyword': 'GPTinLayer', 'description': 'A reference to a TIN, including topological relationships, symbology, and rendering properties.'},\r\n 'Tool': {'keyword': 'DETool', 'description': 'A geoprocessing tool.'},\r\n 'Toolbox': {'keyword': 'DEToolbox', 'description': 'A geoprocessing toolbox.'},\r\n 'Topo Features': {'keyword': 'GPSATopoFeatures', 'description': 'Features that are input to the interpolation.'},\r\n 'Topology': {'keyword': 'DETopology', 'description': 'A topology that defines and enforces data integrity rules for spatial data.'},\r\n 'Topology Layer': {'keyword': 'GPTopologyLayer', 'description': 'A reference to a topology, including symbology and rendering properties.'},\r\n 'Value Table': {'keyword': 'GPValueTable', 'description': 'A collection of columns of values.'},\r\n 'Variant': {'keyword': 'GPVariant', 'description': 'A data value that can contain any basic type: Boolean, date, double, long, and string.'},\r\n 'Vertical Factor': {'keyword': 'GPSAVerticalFactor', 'description': 'Specifies the relationship between the vertical cost factor and the vertical, relative moving angle.'},\r\n 'VPF Coverage': {'keyword': 'DEVPFCoverage', 'description': 'Spatial data stored in Vector Product Format.'},\r\n 'VPF Table': {'keyword': 'DEVPFTable', 'description': 'Attribute data stored in Vector Product Format.'},\r\n 'WCS Coverage': {'keyword': 'DEWCSCoverage', 'description': 'Web Coverage Service (WCS) is an open specification for sharing raster datasets on the web.'},\r\n 'Weighted Overlay Table': {'keyword': 'GPSAWeightedOverlayTable', 'description': 'A table with data to combine multiple rasters by applying a common measurement scale of values to each raster, weighing each according to its importance.'},\r\n 'Weighted Sum': {'keyword': 'GPSAWeightedSum', 'description': 'Specifies data for overlaying several rasters multiplied each by their given weight and then summed.'},\r\n 'WMS Map': {'keyword': 'DEWMSMap', 'description': 'A WMS Map.'},\r\n 'Workspace': {'keyword': 'DEWorkspace', 'description': 'A container such as a geodatabase or folder.'},\r\n 'XY Domain': {'keyword': 'GPXYDomain', 'description': 'A range of lowest and highest possible values for x,y coordinates.'},\r\n 'Z Domain': {'keyword': 'GPZDomain', 'description': 'A range of lowest and highest possible values for z coordinates.'}}\r\n return types", "title": "" }, { "docid": "c55e9a24fdcfa40f6bd9e1f8e0ac6486", "score": "0.42485747", "text": "def start_object(self, attrs):\n\n adict = utils.CaselessDict(attrs)\n link = adict.get('data','')\n if link:\n self.urls.append(link)", "title": "" }, { "docid": "b42179f38979639e27215a717800efa4", "score": "0.42466995", "text": "def GetKeywords(self):\n return [SQUIRREL_KW, SQUIRREL_TYPES, _cpp.DOC_KEYWORDS]", "title": "" }, { "docid": "63e5e27b46968b56781a879e40aa604e", "score": "0.42439273", "text": "def parser_pdf_pages(layout_objs):\n objects = []\n for lt_obj in layout_objs:\n if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):\n objects.append([lt_obj.get_text(), lt_obj.bbox])\n elif isinstance(lt_obj, LTImage):\n rawdata = lt_obj.stream.get_rawdata()\n bs64data = base64.b64encode(rawdata).decode(\"utf-8\")\n objects.append([f'<img src=\"data:image/jpeg;base64,{bs64data}\" />', lt_obj.bbox])\n elif isinstance(lt_obj, LTFigure):\n objects.extend(parser_pdf_pages(lt_obj._objs))\n return objects", "title": "" }, { "docid": "6479cbd02baddcf8e5039781a06cf8f0", "score": "0.42382255", "text": "def page_in_objects(self, key_list, page_in_chan, timeout, kv_config, state_config):\n return NotImplemented", "title": "" }, { "docid": "f3844173e3cf2e9f1791d5dd421d421b", "score": "0.42381272", "text": "def scan_words(self):\n for page in self:\n for word in page.words:\n yield page, word", "title": "" }, { "docid": "aaf5a672fce56f4f51e5dd712525966e", "score": "0.42371625", "text": "def CategoriesServei(self):\n urltool = getToolByName(self.context, 'portal_url')\n path = '/'.join(urltool.absolute_url().split('/')[:-1]) + '/'\n\n results = []\n try:\n cat1 = self.context.category1\n except:\n cat1 = ()\n\n objects = cat1\n\n for value in objects:\n try:\n obj = self.context.portal_catalog.searchResults(portal_type='SimpleVocabularyTerm', id=value)[0]\n\n results.append({'title': obj.Title,\n 'key': value,\n 'href': path + 'keywordsListing?' + value,\n })\n except:\n # When an object is migrated, can come with keywords, but perhaps, doesn't exists still in Plone\n None\n\n return results", "title": "" }, { "docid": "4704261f81353e7f18b49895244be4ce", "score": "0.42337608", "text": "def get_parser_keywords(self, *, fields=None, **kwargs):\n function_endpoint = urljoin(self._baseurl, 'parser_keywords')\n return self._call('GET', function_endpoint, **kwargs)", "title": "" }, { "docid": "196cdb1e3205115a03fe2e5a2cbae875", "score": "0.42289498", "text": "def Keywords(self) -> KeywordCollection:", "title": "" }, { "docid": "196cdb1e3205115a03fe2e5a2cbae875", "score": "0.42289498", "text": "def Keywords(self) -> KeywordCollection:", "title": "" }, { "docid": "ab3b30627e6b0477e37399672705b49c", "score": "0.4226073", "text": "async def declare_objects(robot):\n\n global custom_marker_types, custom_cube_types\n \n decl_marker = robot.world.define_custom_wall\n custom_marker_types = [\n CustomObjectTypes.CustomType00,\n CustomObjectTypes.CustomType01,\n CustomObjectTypes.CustomType02,\n CustomObjectTypes.CustomType03\n ]\n\n await decl_marker(CustomObjectTypes.CustomType00,\n CustomObjectMarkers.Circles2,\n 40, 40, 40, 40, True)\n\n await decl_marker(CustomObjectTypes.CustomType01,\n CustomObjectMarkers.Triangles2,\n 40, 40, 40, 40, True)\n\n await decl_marker(CustomObjectTypes.CustomType02,\n CustomObjectMarkers.Diamonds2,\n 40, 40, 40, 40, True)\n\n await decl_marker(CustomObjectTypes.CustomType03,\n CustomObjectMarkers.Hexagons2,\n 40, 40, 40, 40, True)\n\n\n# Markers for containers\n custom_container_types = [\n CustomObjectTypes.CustomType04,\n CustomObjectTypes.CustomType05\n ]\n\n await decl_marker(CustomObjectTypes.CustomType04,\n CustomObjectMarkers.Circles3,\n 40, 40, 40, 40, False)\n\n await decl_marker(CustomObjectTypes.CustomType05,\n CustomObjectMarkers.Triangles3,\n 40, 40, 40, 40, False)\n\n\n\n# Markers for cubes\n\n decl_cube = robot.world.define_custom_cube\n\n custom_cube_types = [\n CustomObjectTypes.CustomType10,\n CustomObjectTypes.CustomType11,\n CustomObjectTypes.CustomType12,\n CustomObjectTypes.CustomType13,\n CustomObjectTypes.CustomType14,\n CustomObjectTypes.CustomType15\n ]\n\n await decl_cube(CustomObjectTypes.CustomType10,\n CustomObjectMarkers.Circles5,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType11,\n CustomObjectMarkers.Diamonds5,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType12,\n CustomObjectMarkers.Hexagons5,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType13,\n CustomObjectMarkers.Triangles4,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType14,\n CustomObjectMarkers.Circles4,\n 50, 40, 40, True)\n await decl_cube(CustomObjectTypes.CustomType15,\n CustomObjectMarkers.Diamonds4,\n 50, 40, 40, True)", "title": "" }, { "docid": "e15cd88489f55349539aa543efe75dcc", "score": "0.42253822", "text": "def __init__(self, url, generate_depth=1):\n\n rp = crawl.get_robots_parser_if_exists(url)\n self.root = PageNode(Page(url, rp), generate_depth=generate_depth)\n\n if self.root.page.html is None:\n self.error = self.root.page.error\n else:\n self.pages = {self.root.page.url: {'page': self.root.page, 'freq': 0}}\n self.text = ''\n self.total_word_count = 0\n\n self.outbound_links = set(self.root.page.outbound_links)\n\n self.traverse_all_pages()\n\n divisor = len(self.pages)\n if '*' in self.pages:\n divisor -= 1\n self.average_word_count = self.total_word_count / divisor\n self.key_phrases = helpers.get_key_phrases_from_text(self.text, max_length=3)", "title": "" }, { "docid": "8ee6b97cb0dafba8da8ba15db4636b9c", "score": "0.42173377", "text": "def __init__(self, path=None, text=None, pageno=None):\n self._path = path # Path to where PDF page is stored\n self._text = text # Raw text extracted from page\n self._pageno = pageno # Page Number\n self._words = None # Tokenized List of words on page\n self._label = None # The page classification (label)\n self._size = 0 # byte size of the page\n\n if path is not None:\n if isinstance(path, str) == False:\n raise TypeError(\"String expected for path parameter\")\n if os.path.isfile(path) == False:\n raise FileNotFoundError(\"Not a valid path for the page\")\n if text is not None:\n if isinstance(text, list):\n self._size = 0\n for segment in text:\n if isinstance(segment, dict) == False:\n raise TypeError(\"Dictionary expected for text segment:\", type(segment))\n if segment['tag'] == Segment.PARAGRAPH:\n self._size += 2 + len(segment['text'])\n else:\n self._size += 1 + len(segment['text'])\n elif isinstance(text, str):\n self._text = text.strip()\n self._size = len(text)\n else:\n raise TypeError(\"String expected for text parameter:\" , type(text))", "title": "" }, { "docid": "a30c81619515e08368884014527862e1", "score": "0.42154956", "text": "def load_parameters(self, obj):\n\n for (k,field) in self.fields.items():\n field.load(obj, k)", "title": "" }, { "docid": "61dcb59bb59d5e9200c3f9e837b60b1b", "score": "0.42110696", "text": "def test_page_methods(self):\n mysite = self.get_site()\n mainpage = self.get_mainpage()\n self.assertIsInstance(mysite.page_restrictions(mainpage), dict)\n self.assertIsInstance(mysite.page_can_be_edited(mainpage), bool)\n self.assertIsInstance(mysite.page_isredirect(mainpage), bool)\n if mysite.page_isredirect(mainpage):\n self.assertIsInstance(mysite.getredirtarget(mainpage),\n pywikibot.Page)\n else:\n with self.assertRaises(IsNotRedirectPageError):\n mysite.getredirtarget(mainpage)\n a = list(mysite.preloadpages([mainpage]))\n self.assertLength(a, int(mainpage.exists()))\n if a:\n self.assertEqual(a[0], mainpage)", "title": "" }, { "docid": "3e973b411dfb26a5a2c99aea3fcdd9b3", "score": "0.4204774", "text": "def load_page(context):\n TrainingsPage(context).navigate()\n at_page = CalendarPage(context).at()\n assert not at_page # just a sample assert, not a real use", "title": "" }, { "docid": "a9b99e31cbea6e9d4732568fa2cef7e4", "score": "0.42022976", "text": "def search_object(args):\n oerpenv = OdooEnvironment(config_filename=args.config)\n oerpenv.set_python_environment(args.environment)\n\n print \"Definition:\"\n for addon in oerpenv.addons(object_filter=args.objects[0]):\n print \"%35s:%s\" % (addon.token, addon.name)\n\n print \"Inherited:\"\n for addon in oerpenv.addons(inherited_filter=args.objects[0]):\n print \"%35s:%s\" % (addon.token, addon.name)", "title": "" }, { "docid": "fff7c7013509acb45281460ec091fd32", "score": "0.42000812", "text": "def get_instances(self, pageToken: str = None) -> Dict:\n pass", "title": "" }, { "docid": "29f7602834d366dffb314604ea14436a", "score": "0.4191683", "text": "def doAllUserTagsThisPage(bunch, text, env):\n f = anydbm.open(env[\"context\"][\"userstatefile\"],\"c\")\n tagsets = [ (f[x]) for x in f.keys() if \":tags:\"+ env[\"context\"][\"pagename\"] in x ]\n f.close()\n tags = {}\n for tagset in tagsets:\n for tag in [ x.rstrip().strip() for x in tagset.split(\",\") ]:\n # make a wiki word for linking\n wikiTag = \"\".join([ y.capitalize() for y in tag.split(\" \") if y != ''])\n tag = tag.replace(\" \", \"&nbsp;\")\n tag = (\"<a href='%s'>\" + tag + \"</a>\") % ( env[\"context\"][\"cgipath\"]+wikiTag, )\n tags[tag] = tags.get(tag,0) + 1\n if tags.keys() == []:\n return \"No tags are defined for this page yet - how would you classify/think of this page? <a href= '#usertags'>Add your notes below!</a>\"\n else:\n return \"All tags for this page<p align='centre'>\"+ \"&nbsp;&nbsp; \".join(tags.keys())+ \"</p>\"", "title": "" }, { "docid": "42f9cc7996f64e65fd5c65990ae2bf69", "score": "0.41904667", "text": "def get_object_tags(self, spec, byAbout):\n objParts = self.path_parts(byAbout, spec)\n status, (value, value_type) = self._get_tag_value(objParts)\n if status == STATUS.OK:\n result = json.loads(value)\n return result[u'tagPaths']\n else:\n raise ObjectNotFoundError(u'Couldn\\'t find object %s' % obj)", "title": "" }, { "docid": "1e3f310e4991834f2fb07e2868c3b5d7", "score": "0.41839382", "text": "def test_pages(self):\n class MyTemplate(BaseTemplate):\n vars = BaseTemplate.vars + self.vars\n\n template = MyTemplate('some_name')\n pages = template.pages\n self.assertEqual(len(pages), 4)\n\n page = pages.pop(0)\n self.assertEqual(page['name'], 'Begin')\n questions = page['vars']\n self.assertEqual(len(questions), 1)\n self.assertEqual(questions[0].name, 'expert_mode')\n page = pages.pop(0)\n self.assertEqual(page['name'], 'Main')\n questions = page['vars']\n self.assertEqual(len(questions), 3)\n self.assertEqual(questions[0].name, 'basic_var')\n self.assertEqual(questions[1].name, 'bool_var')\n self.assertEqual(questions[2].name, 'dot_var')\n\n page = pages.pop(0)\n self.assertEqual(page['name'], 'Carl')\n questions = page['vars']\n self.assertEqual(len(questions), 1)\n self.assertEqual(questions[0].name, 'str_var')\n\n page = pages.pop(0)\n self.assertEqual(page['name'], 'Martin')\n questions = page['vars']\n self.assertEqual(len(questions), 1)\n self.assertEqual(questions[0].name, 'txt_var')", "title": "" }, { "docid": "14a12ffeb50b509a04b2b896d066580f", "score": "0.41817164", "text": "def __init__(self, **keywords):\n\t\t\tself.real_counter = 0\n\t\t\t\n\t\t\t# 2010-7-29\n\t\t\tfor keyword, value in keywords.iteritems():\n\t\t\t\tsetattr(self, keyword, value)", "title": "" }, { "docid": "14a12ffeb50b509a04b2b896d066580f", "score": "0.41817164", "text": "def __init__(self, **keywords):\n\t\t\tself.real_counter = 0\n\t\t\t\n\t\t\t# 2010-7-29\n\t\t\tfor keyword, value in keywords.iteritems():\n\t\t\t\tsetattr(self, keyword, value)", "title": "" }, { "docid": "14a12ffeb50b509a04b2b896d066580f", "score": "0.41817164", "text": "def __init__(self, **keywords):\n\t\t\tself.real_counter = 0\n\t\t\t\n\t\t\t# 2010-7-29\n\t\t\tfor keyword, value in keywords.iteritems():\n\t\t\t\tsetattr(self, keyword, value)", "title": "" }, { "docid": "14a12ffeb50b509a04b2b896d066580f", "score": "0.41817164", "text": "def __init__(self, **keywords):\n\t\t\tself.real_counter = 0\n\t\t\t\n\t\t\t# 2010-7-29\n\t\t\tfor keyword, value in keywords.iteritems():\n\t\t\t\tsetattr(self, keyword, value)", "title": "" }, { "docid": "14a12ffeb50b509a04b2b896d066580f", "score": "0.41817164", "text": "def __init__(self, **keywords):\n\t\t\tself.real_counter = 0\n\t\t\t\n\t\t\t# 2010-7-29\n\t\t\tfor keyword, value in keywords.iteritems():\n\t\t\t\tsetattr(self, keyword, value)", "title": "" }, { "docid": "3a12bca8a560b615078967423cbdf957", "score": "0.41733828", "text": "def get_keyword_names(obj):\n names = [\n member[0]\n for member in inspect.getmembers(obj, inspect.isroutine)\n if (not member[0].startswith(\"_\")) and member[0] != \"get_keyword_names\"\n ]\n return names", "title": "" }, { "docid": "f91a7535d5671f1353bbcce916d83294", "score": "0.417167", "text": "def process_page(page, detectors):\n logger.info(\"Running LDA detection on page %d\", page.id)\n global lda_model_lookup\n lda_model_lookup = {}\n\n detectors = set(detectors)\n detector_ids_to_delete = set()\n\n for detector in detectors:\n logger.info('Running LDA detection (page_id:%d detector:%s)', page.id, detector.name)\n if classify_text(page.title_and_text, detector):\n logger.info(\"LDA true detection (page_id:%d, detector:%s)\", page.id, detector.name)\n detector.save_result(page.id)\n else:\n detector_ids_to_delete.add(detector.id)\n\n LdaDetector.delete_detector_results(page, detector_ids_to_delete)\n logger.info(\"Finished LDA detection on page %d\", page.id)", "title": "" }, { "docid": "b7ce33ad8d7daddd6474799650489b4d", "score": "0.41689733", "text": "def visit_keyword(self, kw):", "title": "" }, { "docid": "f3ac402bdbe9e1ad8f7a595d53078aef", "score": "0.41653314", "text": "def test_crawl_request_with_robots_in_meta(self, objects):\n host = self.host\n inst = objects.return_value\n inst.first.return_value = host\n\n crawl_result = self.crawl_result\n soup = BeautifulSoup(crawl_result.body)\n soup.find(\"head\").append(\n Tag(builder=soup.builder, name=\"meta\",\n attrs=dict(name=\"robots\", content=\"NOINDEX, NOFOLLOW\"))\n )\n\n crawl_result.body = soup.prettify(\"utf8\")\n cm = self.crawl_manager\n\n self.assertEqual(\n len(list(cm.extract_requests(crawl_result))),\n 0\n )", "title": "" }, { "docid": "4f5ed9fcb1966b512fd879bdb8cb6b17", "score": "0.41637433", "text": "def parsePage(self, url, type):\n print \"Parsing page %s [%s]\" % (url, type)\n\n if self.url == url:\n return\n self.url = url\n\n parser = org.cyberneko.html.parsers.DOMParser()\n parser.parse(self.url)\n\n document = parser.getDocument()\n domReader = org.dom4j.io.DOMReader()\n self.doc = domReader.read(document)\n\n if type == PageType.MANGA_INFO:\n self.setMangaInfo()\n\n if type == PageType.GENRES:\n self.setCatalogGenres()\n\n if type == PageType.MANGA_LIST:\n self.setCatalogManga()\n\n if type == PageType.TRANSLATORS:\n self.setCatalogTranslators()\n\n return", "title": "" } ]
ed39a3a013ae5b8f461617359320f645
Calculates the exponent of input.
[ { "docid": "f380fbbaade742babf8802c984f6ea5c", "score": "0.0", "text": "def exp(x: Union[Rnode, Dual, float]) -> Union[Rnode, Dual, float, List[float]]:\n try:\n z = Rnode(np.exp(x.value))\n x.children.append((np.exp(x.value), z))\n return z\n except AttributeError:\n try:\n return Dual(np.exp(x.val), np.exp(x.val) * np.asarray(x.der))\n except AttributeError:\n return np.exp(x) # Default to numpy implementation", "title": "" } ]
[ { "docid": "1aea5fbc09f22496651026edb239b347", "score": "0.7366763", "text": "def exponent(x, y):\n return x ** y", "title": "" }, { "docid": "b5cc273b95ee4c93ad6f9a071e880419", "score": "0.7241899", "text": "def exp(exponent):\n return e**exponent", "title": "" }, { "docid": "8d7d9f4e688f8d010f33cea23a9bcbba", "score": "0.7049825", "text": "def private_exponent(self) -> int:", "title": "" }, { "docid": "64738bcdebe7b0968015b43d38391bcb", "score": "0.68442374", "text": "def exp(x):\r\n \r\n result = math.exp(x)\r\n print(f'{result}')\r\n return result", "title": "" }, { "docid": "ea3601b1928093a097b9c271de4f4962", "score": "0.6760848", "text": "def calcExponent(walker):\r\n\tR_N = e2edist(walker)\r\n\treturn np.log(R_N**2)/np.log(walker.N)", "title": "" }, { "docid": "c3ea8bd85f04b5b8cc5dc7d7e1e7e30d", "score": "0.67250013", "text": "def exponential(x, halflife):\n return 2 ** (-x / halflife)", "title": "" }, { "docid": "1a6e814a42e17f298454f3d7315201ae", "score": "0.6477517", "text": "def exp(operand, e):\n if e < 0:\n # special case\n return 1 / exp(operand, -e)\n elif e == 1:\n return operand\n r = exp(operand, e / 2)\n result = r * r\n if e % 2 == 1:\n # if exponent is odd\n result *= operand\n return result", "title": "" }, { "docid": "c67aead81ec185e4fa9086fe533b5ec2", "score": "0.6404727", "text": "def pow(base, exp):\n\treturn base**exp", "title": "" }, { "docid": "2eb27b1b84d5b91f725c804be4a22c90", "score": "0.63711935", "text": "def calculate_power(self, base, exponent):\n calculated_value = 1\n\n # return 1 if exponent is 0\n if exponent == 0:\n return 1\n\n # return 0 if the base is 0 to bypass unnecessary calculations\n if base == 0:\n return 0\n\n #if exponent is negative\n for i in range(abs(exponent)):\n calculated_value *= base\n\n # if exp is negative\n if exponent < 0:\n # answer is going to be 1 / base^exp\n calculated_value = 1 / calculated_value\n\n return calculated_value", "title": "" }, { "docid": "26f7e367a5b4acc486954c7340305797", "score": "0.63665676", "text": "def exponent(self):\n return Integer(self._gap_().Exponent())", "title": "" }, { "docid": "baea082af5ae8319051b605316f3487c", "score": "0.6342536", "text": "def mypow(base, exp):\n return base**exp", "title": "" }, { "docid": "7fc1a82c7b53386a3142a50b03405991", "score": "0.6335501", "text": "def exponential_form(q): \n logq = np.log10(q)\n exponent = np.floor(logq)\n factor = q / 10**(exponent) \n return factor, exponent", "title": "" }, { "docid": "917a1ff8a750221b17a86ca1cbed7204", "score": "0.63161856", "text": "def exponent(num1, num2):\r\n print(num1, \"**\", num2, \"=\", num1 ** num2)", "title": "" }, { "docid": "0e06acc31c20b5c6c84d5ecaaf35c464", "score": "0.6277947", "text": "def get_exp(self, signal):\r\n offant = self.lockin.ask('oexp?'+str(signal))\r\n offantsplit =[float(i) for i in offant.split(',')]\r\n self.exp=int(10**offantsplit[1])\r\n return self.exp", "title": "" }, { "docid": "47abdc88c7701349e33e7f2a8a151457", "score": "0.62628686", "text": "def exponentiation_recursive(x, e):\n if e == 0:\n return 1\n else:\n return x * exponentiation_recursive(x, e-1)", "title": "" }, { "docid": "0e00b13933d112f0e17fa557144c2e76", "score": "0.62602955", "text": "def square(x):\n return exponent(x, 2)", "title": "" }, { "docid": "51fc5c203ded1646298d1aa4480b90c7", "score": "0.6256677", "text": "def power(self) -> int:", "title": "" }, { "docid": "3f3c991427dcee3f0cda630efaaa608f", "score": "0.6256301", "text": "def exp(x):\n return np.exp(x)", "title": "" }, { "docid": "0d1003b06857946ef7a72c40c2a85a82", "score": "0.62373924", "text": "def _exp(x):\n if _USE_NUMEXPR:\n return ne.evaluate(\"exp(x)\", optimization='moderate', )\n else:\n return np.exp(x)", "title": "" }, { "docid": "eb884fc5f18e179387fe6b032d6cd6d9", "score": "0.6234807", "text": "def power_in(self) -> int:", "title": "" }, { "docid": "bbca2062d3c4f3ff80c061852a122ba0", "score": "0.6234034", "text": "def raise_to_power(base, exp): # function w/ multiple parameters\n new_value = base ** exp\n return new_value", "title": "" }, { "docid": "298e43c58405029130f51bcf867ec037", "score": "0.6233597", "text": "def powerlaw_deterrence_func(x, exponent):\n return np.power(x, exponent)", "title": "" }, { "docid": "436a18f3d24227984697fdced59f89cc", "score": "0.6194577", "text": "def exp(x):\n # check on single exp\n if is_numeric(x):\n return _exp_single(x)\n # try vectorized\n try:\n return _vectorize(exp, x)\n except ValueError as v:\n # bail\n raise ValueError(\"don't know how to compute exp for type %s\" % type(x))", "title": "" }, { "docid": "ea073218b2f6468cac068cd88aeca7cf", "score": "0.61640096", "text": "def exp(z):\n pass", "title": "" }, { "docid": "b1f6792ed7879c507d83078e9cf3bffa", "score": "0.6129035", "text": "def dif_expo(arg):\r\n \r\n result = math.exp(arg)\r\n print(f'{result}')\r\n return result", "title": "" }, { "docid": "6822536a1f66232d2f6e3c2c822c8880", "score": "0.6105764", "text": "def get_exp(self,num):\r\n\t\tif num == 0: return 0\r\n\t\treturn int(math.floor(math.log10(abs(num))))", "title": "" }, { "docid": "9478ce143c4677212a84dfdfa88afbbc", "score": "0.6096482", "text": "def exponential(init, final, total_steps, step):\n return init * (final / init) ** (step / (total_steps - 1))", "title": "" }, { "docid": "6d3b5beb9b00b49926c95af78b5ff7ab", "score": "0.605352", "text": "def exp(x: str, width: int, int_width: int, is_signed: bool, print_results=False):\n frac_width = width - int_width\n bin_string = FixedPoint(x, width, int_width, is_signed=False).bit_string(\n with_prefix=False\n )\n\n int_b = bin_string[:int_width]\n int_bin = int(int_b, 2)\n frac_b = bin_string[int_width:width]\n frac_bin = int(frac_b, 2)\n\n # Split e^x into e^i * e^f.\n e_i = Decimal(\"2.71828\") ** int_bin\n\n e_table = compute_exp_frac_table(frac_width)\n e_f = e_table[frac_bin]\n\n # Compute e^i * e^f.\n actual = Decimal(e_i) * Decimal(e_f)\n\n if print_results:\n accepted = Decimal(\"2.71828\") ** Decimal(x)\n print(\n f\"e^{x}: {accepted}, actual: {actual}\"\n f\"relative difference: {(actual - accepted) / actual * 100} %\"\n )\n\n return actual", "title": "" }, { "docid": "a8f4c03e94079d59017cc5ea7d948bb4", "score": "0.6052873", "text": "def power_out(self) -> int:", "title": "" }, { "docid": "c26c53b5fcffdc0ac0a3f1bcd701329d", "score": "0.6009875", "text": "def power(base, exp):\n if exp == 0:\n return 1\n if exp != 0:\n return base * power(base, exp - 1)", "title": "" }, { "docid": "365e207587f0b8173f3bb1224687e19d", "score": "0.6005401", "text": "def raise_to_power(base, exp=1):\n new_value = base ** exp\n return new_value", "title": "" }, { "docid": "93c2f459f4f77710a625e1f936218f01", "score": "0.5989663", "text": "def multiply_by_exponent(val, exponent=3, base=10):\n\n if type(val) is int:\n int_val = int(val)\n else:\n int_val = 0\n\n return int_val * (base**exponent)", "title": "" }, { "docid": "7e20695f05b7f557908022592e94db77", "score": "0.59811705", "text": "def mEXP(x: N_, y: N_) -> N_:\n return x ** y", "title": "" }, { "docid": "d5e3ec3af6c357b6473f2e95639ef28b", "score": "0.5970851", "text": "def exp(self, a, out=None):\n return OpTreeNode.build(\"exp\", a, None, out=out)", "title": "" }, { "docid": "b90857f3216aa84cf290375ea653bc02", "score": "0.59550434", "text": "def my_exp(x):\n\tsum = 1.0\n\tterm = 1.0 # temp value to hold term\n\ti = 1\n\t# keep iterating until the added term is less than the machine precision\n\t# alternatively, one could also use a fixed number of terms in the Taylor expansion,\n\t# however the number required to get sufficient accuracy depends on the value of x\n\t# and therefore a while loop is better\n\twhile term > sys.float_info.epsilon:\n\t\tterm = pow(x, i) / factorial(i)\n\t\tsum += term\n\t\ti += 1\n\tprint \"Required %i terms for convergence\" % (i)\n\treturn sum", "title": "" }, { "docid": "60548b38b308d51d6b1b3dafb084a27d", "score": "0.59529984", "text": "def pow(x, a):\n return np.power(x, a)", "title": "" }, { "docid": "0ddde39b69b23eb95cd5a751f51e37dc", "score": "0.59498", "text": "def generate_exponential(self, image, mask, **kwargs):\n exponentialImage = imageoperations.applyExponential(image)\n yield exponentialImage, mask, 'exponential', kwargs", "title": "" }, { "docid": "37449847245b021190edea678c507efe", "score": "0.5942994", "text": "def fast_exponentiation(a, p, n):\n\t result = a % n\n\t remainders = []\n\t while p != 1:\n\t remainders.append(p & 1)\n\t p = p >> 1\n\t while remainders:\n\t rem = remainders.pop()\n\t result = ((a ** rem) * result ** 2) % n\n\t return result", "title": "" }, { "docid": "8a82e664af4002d933834288e8c875a0", "score": "0.5942467", "text": "def pow(n, x):\n return _math.pow(x, n)", "title": "" }, { "docid": "bb7c1de250f82339a9a27caae8b39cc3", "score": "0.59301347", "text": "def ldexp(x, i):\n return x * 2 ** i", "title": "" }, { "docid": "341b8c242424d19840ab504f91e5ed44", "score": "0.5894733", "text": "def mEXP2(n: N_) -> N_:\n return 2 ** n", "title": "" }, { "docid": "fe54539fce97c1589ea51ec6d5cd0491", "score": "0.5890044", "text": "def power(base, exponent):\n result = base ** exponent\n print \"%d to the power of %d is %d.\" % (base, exponent, result)", "title": "" }, { "docid": "acd76d0b0f9611e271eb7b85dfca5b0b", "score": "0.58843946", "text": "def approximate_exp(power, _accuracy):\n \n devider = 100\n \n result = _accuracy(0.0)\n \n exp_dev_cnt = power / devider\n exp_remain = power % devider\n \n exp_result = np.exp(_accuracy(exp_remain))\n \n exp_dev = np.exp(_accuracy(devider))\n \n warnings.filterwarnings(\"error\")\n \n for i in range(exp_dev_cnt):\n try:\n exp_result *= exp_dev\n\n except:\n exp_result = np.inf\n break\n \n return exp_result", "title": "" }, { "docid": "da6b2060f4af75ee6bc15c94ad0e7e5b", "score": "0.5857872", "text": "def __pow__(self, exponent: float) -> \"FieldBase\":\n if not np.isscalar(exponent):\n raise NotImplementedError('Only scalar exponents are supported')\n return self.copy(data=self.data ** exponent)", "title": "" }, { "docid": "f9a7808582634ba7655d41fbe84f10ec", "score": "0.58394337", "text": "def fast_exponentiation(a, p, n):\r\n result = a % n\r\n remainders = []\r\n while p != 1:\r\n remainders.append(p & 1)\r\n p = p >> 1\r\n while remainders:\r\n rem = remainders.pop()\r\n result = ((a ** rem) * result ** 2) % n\r\n return result", "title": "" }, { "docid": "ac27169c37979cf99b485e3dd7c7158e", "score": "0.5839011", "text": "def calculate_mant_exp(value, precision=8):\n des_value = value\n exponent = 0\n while abs(value) > 2 ** precision:\n value /= 2\n exponent += 1\n if not int(value) * 2 ** exponent == des_value:\n print('desired value of normalized max weight:', des_value)\n print('actual value:', int(value) * 2 ** exponent, 'mantissa:', int(value), 'exponent:', exponent)\n return int(value), exponent", "title": "" }, { "docid": "d7ae6b172da3b6388203206d17dbaf78", "score": "0.5812858", "text": "def pow(x, y):\n return x**y", "title": "" }, { "docid": "c6a641be4fb27d5cd1499d2ac344f25b", "score": "0.5788653", "text": "def test_exponentiation(self):\n steps = 512\n modulus = 2**256 - 2**32 * 351 + 1\n mod = IntegersModP(modulus)\n Gorig = pow(7, (modulus - 1) // steps, modulus)\n G = mod(7)**((modulus - 1) // steps)\n assert int(G) == Gorig", "title": "" }, { "docid": "5b857fb67b94f7fede2712e67f21eff8", "score": "0.5781514", "text": "def get_exponent(vector):\n if not isinstance(vector, np.ndarray):\n raise TypeError('vector should be of type numpy.ndarray. Provided object of type: {}'.format(type(vector)))\n if np.isnan(vector).any():\n raise TypeError('vector should not contain NaN values')\n if np.max(np.abs(vector)) == np.max(vector):\n exponent = np.log10(np.max(vector))\n else:\n # negative values\n exponent = np.log10(np.max(np.abs(vector)))\n \n return int(np.floor(exponent))", "title": "" }, { "docid": "ace38f8323cfc2d45c6cf97528f772a8", "score": "0.5773285", "text": "def safe_exp(value):\n try:\n ans = math.exp(value)\n except OverflowError:\n ans = float(\"inf\")\n return ans", "title": "" }, { "docid": "ace38f8323cfc2d45c6cf97528f772a8", "score": "0.5773285", "text": "def safe_exp(value):\n try:\n ans = math.exp(value)\n except OverflowError:\n ans = float(\"inf\")\n return ans", "title": "" }, { "docid": "99960898c826edb92b04695ed290f719", "score": "0.57612187", "text": "def my_pow(b,e,m):\r\n if e == 0:\r\n print('exponent is zero')\r\n return 1\r\n ## enter your source code here\r\n \r\n r = 1\r\n bn = bin(e)[2:] # convert the exponent to binary format, discard the prefix ‘0b’\r\n i = 0\r\n while (i < len(bn)):\r\n r = (r * r) % m # square is done automatically for every exponent digit\r\n if int(bn[i]) != 0: # multiply is done only if the exponent is 1\r\n r = (r * b) % m\r\n i = i + 1 \r\n return r", "title": "" }, { "docid": "65585f450690c779aa6b6a74855f28d9", "score": "0.5741698", "text": "def exponentiate(term: PauliTerm) -> Program:\n return exponential_map(term)(1.0)", "title": "" }, { "docid": "c6a995729a196870107cf338681ded16", "score": "0.57312685", "text": "def exponent(rho, phi, z, w0, kk, p=0, l=0):\n bb = b(w0, kk)\n ww = w(z, w0, bb)\n FF = F(rho, z, bb)\n aa = alpha(rho, ww, FF)\n RR = R(z, bb)\n PP = Phi0(z, bb)\n\n return -(aa**2)/2 -j*kk*RR*(FF-1) -j*kk*z +j*(2*p + abs(l) + 1)*PP +j*l*phi", "title": "" }, { "docid": "2e9e234881a30a545bd24be27b3e1b10", "score": "0.5723759", "text": "def exp_func(x, *varargs):\n sum = 0.\n for z in x:\n sum += np.exp(-z ** 2)\n\n return sum", "title": "" }, { "docid": "53f06b9fca110f4f69cb48e32fe250f6", "score": "0.57202727", "text": "def exponential(t,tau):\n return fexponential(tau)(t)", "title": "" }, { "docid": "1d31a9943197550ccad61b66f15f8332", "score": "0.570796", "text": "def exp(b, n):\n if n == 0:\n return 1\n else:\n return b * exp(b, n-1)", "title": "" }, { "docid": "c75e0940f43673a63c3812cb7c2bf3f6", "score": "0.5685633", "text": "def exp1(inp1, inp2, parameter):\n\n exp1 = np.expm1(inp1)\n exp1 = exp1 / (np.exp(1) - 1)\n\n return exp1", "title": "" }, { "docid": "6ad1be15c91cbb45bb16be814a3e5cfc", "score": "0.5678249", "text": "def compute(self, node, input_vals):\r\n assert(len(input_vals) == 1)\r\n return np.exp(input_vals[0])", "title": "" }, { "docid": "8d97f2ad622140c664f42ea145f00e13", "score": "0.56586444", "text": "def power(x, y):\n return math.pow(x, y)", "title": "" }, { "docid": "f7c131ce33945aec66d0315c8b198afe", "score": "0.5640994", "text": "def exp_fast(b, n):\n if n == 0:\n return 1\n elif n % 2 == 0:\n return square(exp_fast(b, n//2))\n else:\n return b * exp_fast(b, n-1)", "title": "" }, { "docid": "e47928c7bab084796af956296752ebd5", "score": "0.5637873", "text": "def power_stored(self) -> int:", "title": "" }, { "docid": "3a4877dd15fd2293ca005739b92aa3e2", "score": "0.5636454", "text": "def power(num1, num2):\n return num1 ** num2 # ** = exponent operator", "title": "" }, { "docid": "d99065f086d351fcd3ab3af648306152", "score": "0.56309295", "text": "def negative_exp(n):\n return '1{}e-{}'.format('0' * n, n)", "title": "" }, { "docid": "d43d189173c9458512e84cf311efb547", "score": "0.56172585", "text": "def pow(a, b):\n return a**b", "title": "" }, { "docid": "0e639d63dbecc186d5282344e92dee50", "score": "0.56080735", "text": "def input_power(self):\n power = np.zeros(self.frequency.amount)\n #print self.linked_excitations\n for excitation in self.linked_excitations:\n power = power + excitation.power\n return power", "title": "" }, { "docid": "5101e85a4284c8143f2bcab0c302aff5", "score": "0.55802387", "text": "def as_coeff_exponent(self, x) -> tuple[Expr, Expr]:\n from sympy.simplify.radsimp import collect\n s = collect(self, x)\n c, p = s.as_coeff_mul(x)\n if len(p) == 1:\n b, e = p[0].as_base_exp()\n if b == x:\n return c, e\n return s, S.Zero", "title": "" }, { "docid": "fc14a480d9f1af151e9fc2436cf5779b", "score": "0.5579939", "text": "def pow(x, y):\n return pow(x, y)", "title": "" }, { "docid": "9e600db861fe6db29775c6dde169e15d", "score": "0.5573907", "text": "def power(x, y):\n return x ** y", "title": "" }, { "docid": "c8f5394504e1e6acf9d60e66508884cb", "score": "0.55635697", "text": "def exponentiate_base_g(self, x):\n r = pow(self.g, x, self.p)\n return r", "title": "" }, { "docid": "f43a14eb3a71d7d925cf6c0fe7ca7c9b", "score": "0.5559621", "text": "def intpow(x, n):\n if n == 0:\n return 1\n return x * intpow(x, n - 1)", "title": "" }, { "docid": "5ab8e14ccdebfdee7c0a5359214b1f2b", "score": "0.553588", "text": "def exponential_scaling(x, factor=6):\n return 1-math.exp(-max(0, x)*factor)", "title": "" }, { "docid": "15b85dcafd091c0402065614fa6ef860", "score": "0.55286986", "text": "def modPow(x, e, n):\n result = 1\n while e > 0:\n if e & 1:\n result = (result * x) % n\n e >>= 1\n x = x * x % n\n return result", "title": "" }, { "docid": "9548b41927a6dffe579709fbb55c1c7c", "score": "0.5501119", "text": "def power(num1, num2):\n\n return num1 ** num2", "title": "" }, { "docid": "0a1fd4e4dd4e66e42c1a7c72c4ef6c35", "score": "0.5490087", "text": "def exp_generic(input_x):\n exp = P.Exp()\n cast = P.Cast()\n dtype = P.DType()\n checktype = inner.IsSubClass()\n\n if not checktype(dtype(input_x), mstype.float_):\n input_x = cast(input_x, mstype.float32)\n return exp(input_x)", "title": "" }, { "docid": "cd73b508be0c45b5ee460d294a997aa3", "score": "0.54817086", "text": "def exp_approx(n: int, x: float) -> float:\n value = 0\n for i in range(0, n+1):\n value += (x**i) / (math.factorial(i))\n return value", "title": "" }, { "docid": "ddd26087f713b3697e80e745f522899c", "score": "0.5473483", "text": "def exp(rv):\n return rv.exp()", "title": "" }, { "docid": "85c1f73690534fcc89d76d44b63a9e41", "score": "0.5472353", "text": "def power(num1, num2):\n return num1**num2", "title": "" }, { "docid": "884479c2fa4f28787f3d949ef9c090a2", "score": "0.54692876", "text": "def math(x):\n return (x + 3) ** 2", "title": "" }, { "docid": "ecca842061dc1fb17fa83cecf2a60ebe", "score": "0.54685235", "text": "def raise_val(n) :\r\n \r\n def inner(x) :\r\n raised = x ** n\r\n return raised\r\n \r\n return inner", "title": "" }, { "docid": "9e3eaf49ca41dadc9f6d8e5da5113347", "score": "0.54656893", "text": "def _convert_exponent(self, field: bytes) -> float:\n return float(field.decode(self.file_encoding or \"utf-8\").replace(\"D\", \"E\"))", "title": "" }, { "docid": "394d865c6f6843b8e0ff86dbf5f1039d", "score": "0.54561746", "text": "def bias_exponents(a, max_exponent):\n a_max = np.max(a)\n basetype = type(a_max)\n unused_mantissa, current_max_exponent = mp.frexp(a_max) # WRONG\n power = max_exponent - current_max_exponent\n if power != 0.0:\n a *= basetype(2.0)**power\n return power", "title": "" }, { "docid": "b3b99f6f1adae8b303bfe0d4d4b43fae", "score": "0.5452828", "text": "def erf(self, x):\n\n π = self.π\n root_π = π**(1 / 2)\n x3 = x**3\n x5 = x**5\n x7 = x**7\n x9 = x**9\n\n return (2 / root_π) * x - x3 / 3 + x5 / 10 - x7 / 42 + x9 / 216", "title": "" }, { "docid": "2af1f2d8790dcbc0e0882d0e24179822", "score": "0.5443027", "text": "def power(x, a, b):\n return a*x**b", "title": "" }, { "docid": "e807c96378361b43a34e85313c247c95", "score": "0.5439225", "text": "def square_func(init, exponent):\n if exponent <= 1:\n return init\n return square_func(init, exponent - 1) ** 2", "title": "" }, { "docid": "155a5b5f585c52d8d8af305ada6fbcb6", "score": "0.54359555", "text": "def calculate_keys_custom_exponent(p: int, q: int, exponent: int) -> typing.Tuple[int, int]:\n\n phi_n = (p - 1) * (q - 1)\n\n try:\n d = rsa.common.inverse(exponent, phi_n)\n except rsa.common.NotRelativePrimeError as ex:\n raise rsa.common.NotRelativePrimeError(\n exponent, phi_n, ex.d,\n msg=\"e (%d) and phi_n (%d) are not relatively prime (divider=%i)\" %\n (exponent, phi_n, ex.d))\n\n if (exponent * d) % phi_n != 1:\n raise ValueError(\"e (%d) and d (%d) are not mult. inv. modulo \"\n \"phi_n (%d)\" % (exponent, d, phi_n))\n\n return exponent, d", "title": "" }, { "docid": "1c61618b18a1ee4ece99221aa75d7933", "score": "0.5434808", "text": "def power_of(x):\n\n def inner_func(y):\n return x**y\n\n return inner_func", "title": "" }, { "docid": "81f0d48f36792da35ba76714f6f3a3a8", "score": "0.54325294", "text": "def RSAenc(m,e,N): # return m^e mod N\r\n return pow(m,e,N)", "title": "" }, { "docid": "07dcfaf170d4f37a0d1dd9f5e508bb43", "score": "0.54223436", "text": "def power(x, n):\n power_of = 1\n\n for i in range(1, n + 1):\n power_of = multiply(power_of, x)\n\n return power_of", "title": "" }, { "docid": "0fac0a895c988e836f2d77c2be18b0c5", "score": "0.5418532", "text": "def exponential(feature_i, feature_j, i_symbol=None, j_symbol=None, sigma=1.0):\n\n if i_symbol != j_symbol:\n return 0.0\n else:\n if isinstance(sigma, list) or isinstance(sigma, np.ndarray):\n assert len(sigma) == len(feature_i) and len(sigma) == len(feature_j), (\n \"Length of sigma does not \" \"match atomic fingerprint \" \"length.\"\n )\n sigma = np.array(sigma)\n anisotropic_exp = np.exp(\n -(\n np.sqrt(\n np.sum(\n np.square(\n np.divide(\n np.subtract(feature_i, feature_j),\n (2.0 * np.square(sigma)),\n )\n )\n )\n )\n )\n )\n return anisotropic_exp\n else:\n exponential = np.exp(\n -(np.linalg.norm(feature_i - feature_j)) / (2.0 * sigma ** 2)\n )\n return exponential", "title": "" }, { "docid": "dd88dbd52d6ac8bcd9657ca5f9073fce", "score": "0.5405274", "text": "def _exponential_decay(x: np.ndarray, a: float, b: float, c: float = 0) -> np.ndarray:\n return a * np.exp(-b * x) + c", "title": "" }, { "docid": "86b0fcd4351069f2667169c2d0ebea7d", "score": "0.5403381", "text": "def f(x):\n return np.exp(-(x**2))", "title": "" }, { "docid": "de0db0892c46947512365aa7423cabe9", "score": "0.5393994", "text": "def distribute_exponent(self):\n # multiply each tensor by the nth root of 10**exponent\n x = 10**(self.exponent / self.num_tensors)\n self.multiply_each_(x)\n\n # reset the exponent to zero\n self.exponent = 0.0", "title": "" }, { "docid": "e6fe0a45bb6ba94b6729fe0911224986", "score": "0.5391864", "text": "def power(args):\n base = args[0]\n for num in args[1:]:\n base = base ** num\n return base", "title": "" }, { "docid": "5211d0853bf943533ebb87dc6dc10720", "score": "0.5383744", "text": "def prev_pow_2(x): #consider converting to generator\n return 2**math.floor(math.log2(x))", "title": "" }, { "docid": "c3858768f09e0c2d1ef8485998d9da5d", "score": "0.5375805", "text": "def power(x,n): \n x = abs(x)\n if n == 0:\n return 1\n elif n == 1 :\n return x\n else:\n for i in range(1,n):\n x *= x\n return x", "title": "" }, { "docid": "fb7e4e7f70d1e435bf1f6dcf60fd0245", "score": "0.5375452", "text": "def set_exponential(self, total_num, batchsize,\n learning_rate=0.1,\n decay_epochs=30,\n decay_rate=0.1):\n self.name = 'exponential'\n self.learning_rate = learning_rate\n self.decay_steps = self._decay_steps(total_num, batchsize, decay_epochs)\n self.decay_rate = decay_rate", "title": "" }, { "docid": "d0e04e10e3f6e9e75cd6715efc32993e", "score": "0.53750527", "text": "def power(self):\n return self._power * self.inversion_multiplier", "title": "" }, { "docid": "daf828f030c5d82e97ba147b3ea4bb07", "score": "0.5371486", "text": "def f(x):\n return np.exp(-x**2)", "title": "" }, { "docid": "58086c0171759f0775dd1a14386b0a0f", "score": "0.53714395", "text": "def erf(x):\n return special.erf(x)", "title": "" }, { "docid": "ba049a3f0e161b70240c7a0d65350e3d", "score": "0.53575915", "text": "def power(self, x,n):\n if n==0:\n return 1\n if n==1:\n return x\n a = self.power(x, n//2)\n if n%2==1:\n return x*a*a\n else:\n return a*a", "title": "" } ]
8770fb55163d080a5ff236efe6ab58ff
Checks whether `chars` is a punctuation character.
[ { "docid": "f89d82d411a50a54ac53773eabec0df4", "score": "0.8179238", "text": "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False", "title": "" } ]
[ { "docid": "fc4aa7b34e3b974c738c014df45d4aea", "score": "0.8229112", "text": "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if (33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False", "title": "" }, { "docid": "85f65453c2670f43c0472fb383728651", "score": "0.818407", "text": "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False", "title": "" }, { "docid": "92dbaace95b0a8dfa501100f41d435df", "score": "0.81837153", "text": "def _is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False", "title": "" }, { "docid": "0a452fb2122048421edb14a7a096e9ae", "score": "0.81405276", "text": "def is_punctuation(char):\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if (33 <= cp <= 47) or (58 <= cp <= 64) or (91 <= cp <= 96) or (123 <= cp <= 126):\n return True\n cat = unicodedata.category(char)\n if cat.startswith('P'):\n return True\n return False", "title": "" }, { "docid": "f8e5f1d1cca31954477679c4c86b3ad0", "score": "0.7607812", "text": "def is_punctuation_mark(c):\n return c in punc_str", "title": "" }, { "docid": "0ac5f410bdd8800c96db53592a64bd26", "score": "0.7504023", "text": "def is_punctuation(token):\n if isinstance(token, bytes): # python 2.x ascii str\n token = token.decode('ascii')\n\n return (\n bool(token) and\n not token.isspace() and\n all(unicodedata.category(ch)[0] == 'P' for ch in token if not ch.isspace())\n )", "title": "" }, { "docid": "b63f89c8954877e9122d1e3b09f3fedf", "score": "0.7411357", "text": "def punct(mychr):\r\n\tif mychr == '!' or mychr == ';' or mychr == '?' or mychr == '.' or mychr == ':' or mychr == '(' or mychr == ')':\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False", "title": "" }, { "docid": "bc85e8b540ba76d39c886c2387b18f7a", "score": "0.7161815", "text": "def check_embedded_punctuation(input_str):\n from string import punctuation\n \n punct_count = 0\n \n for char in input_str:\n if char in punctuation:\n punct_count += 1\n if punct_count == 2:\n return True\n else:\n punct_count = 0\n \n return False", "title": "" }, { "docid": "928499322274095e6024915e1a7cded1", "score": "0.6795956", "text": "def test_punctuate_non_space_last_char_punctuation_not_applied(self):\n self.assertEqual(ParseTools.punctuate(\"String with already present last punctuation!\"), \"String with already present last punctuation!\")\n self.assertEqual(ParseTools.punctuate(\"String with already present last punctuation.\"), \"String with already present last punctuation.\")\n self.assertEqual(ParseTools.punctuate(\"String with already present last punctuation?\"), \"String with already present last punctuation?\")\n self.assertEqual(ParseTools.punctuate(\".\"), \".\")\n self.assertEqual(ParseTools.punctuate(\"!\"), \"!\")\n self.assertEqual(ParseTools.punctuate(\"?\"), \"?\")", "title": "" }, { "docid": "4d4274419afd32dfadbf4938393fd537", "score": "0.6716283", "text": "def count_punctuations( s ):\n\tcount = 0\n\tfor i in s:\n\t\tif (i.isalpha() or i.isdigit()) == False:\n\t\t\tcount += 1\n\treturn count", "title": "" }, { "docid": "8651dab4c918574dc2e3a87ef456890d", "score": "0.66342753", "text": "def test_punctuate_non_space_last_char_punctuation_applied(self):\n original_string = \"String with character end z\"\n expected_outputs = {\"String with character end\" + '.', \"String with character end\" + '?', \"String with character end\" + '!'}\n self.assertIn(ParseTools.punctuate(original_string), expected_outputs)", "title": "" }, { "docid": "ee6ce746527b3e999e8c3362995058ad", "score": "0.6620878", "text": "def f_punctuation(s):\n continuous_excl = 0\n continuous_quest = 0\n continuous_excl_quest = 0\n\n excl_flag = 0\n quest_flag = 0\n excl_quest_flag = 0\n for char in s:\n if char == '!':\n excl_flag += 1\n else:\n if excl_flag > 1:\n continuous_excl += 1\n excl_flag = 0\n if char == '?':\n if quest_flag > 1:\n continuous_quest += 1\n quest_flag += 1\n else:\n quest_flag = 0\n if char == '!' or char == '?':\n excl_quest_flag += 1\n else:\n excl_quest_flag = 0\n if excl_quest_flag > 1:\n continuous_excl_quest += 1\n last_word = s.split(' ')[-1]\n last_excl_or_quest = '!' in last_word or '?' in last_word\n\n return [continuous_excl,\n continuous_quest,\n continuous_excl_quest,\n last_excl_or_quest * 1]", "title": "" }, { "docid": "897c0d051c1e3963f9d881f76e4c6c01", "score": "0.6595744", "text": "def remove_punct(text):\n no_punct = \"\"\n for char in text:\n if not (char in string.punctuation):\n no_punct = no_punct + char\n\n return no_punct", "title": "" }, { "docid": "19a83d4f4962bad8ab01834783167403", "score": "0.6553231", "text": "def test_punctuate_space_last_char_punctuation_not_applied(self):\n self.assertEqual(ParseTools.punctuate(\"String with already present last punctuation then space! \"), \"String with already present last punctuation then space!\")\n self.assertEqual(ParseTools.punctuate(\"String with already present last punctuation then space. \"), \"String with already present last punctuation then space.\")\n self.assertEqual(ParseTools.punctuate(\"String with already present last punctuation then space? \"), \"String with already present last punctuation then space?\")", "title": "" }, { "docid": "4fb074ddc9c83827e624ef82a4478ecb", "score": "0.64929265", "text": "def test_word_punct():\n\n assert tokenize.word_punct_tokens(\"Who are you??? Stop, now!\") == \\\n [\"Who\", \"are\", \"you\", \"???\", \"Stop\", \",\", \"now\", \"!\"]", "title": "" }, { "docid": "66bf249541ea4eb30a5aae97e34aa972", "score": "0.64719945", "text": "def check_text(text):\n\n strip_punc = text.strip(string.punctuation).strip()\n no_punc = text.translate(str.maketrans(\"\", \"\", string.punctuation)).strip()\n\n if len(text) == 0 or len(strip_punc) == 0 or len(no_punc) == 0:\n return False\n\n punc_percent = (len(strip_punc) - len(no_punc)) / len(strip_punc)\n\n return len(no_punc) > 2 and punc_percent <= 0.1", "title": "" }, { "docid": "941b6cb86f5cea03a05e867dca201aa2", "score": "0.64323914", "text": "def num_punctuation(self)->int:\n return len([s for s in self.str_inp if s in self.punct])", "title": "" }, { "docid": "6b418138279ef3f4190d8a0dace00b80", "score": "0.6357841", "text": "def test_punctuate_space_last_char_punctuation_applied(self):\n original_string = \"String with character-space end \"\n expected_outputs = {original_string[:-1] + '.', original_string[:-1] + '?', original_string[:-1] + '!'}\n self.assertIn(ParseTools.punctuate(original_string), expected_outputs)", "title": "" }, { "docid": "b3d1e1ad91cbea23218e9b88dacb713a", "score": "0.6310232", "text": "def bad_chars(chars):\n import re\n bad_chars = re.compile('.*[\\$\\n\\!\\;&` |<>].*')\n if bad_chars.match(chars):\n\treturn True\n return False", "title": "" }, { "docid": "48531aba8eead93aa46301bd777861a3", "score": "0.62545794", "text": "def check_special_characters_in_string(word, alloweds):\n for allowed in alloweds:\n invalid_chars = set(string.punctuation.replace(allowed, \"\"))\n if any(letter in invalid_chars for letter in word):\n raise ValueError('Special characters are not allowed')\n return True", "title": "" }, { "docid": "1814c08a77b06eeff3f26d17e6a94de9", "score": "0.6207864", "text": "def list_punctuations(sentence):\n punctuations = []\n\n for index, char in enumerate(sentence):\n if char in '`\"[](){}:,-_.!?;/\\|*^#@':\n punctuations.append([index, char])\n\n return punctuations", "title": "" }, { "docid": "199edab2d3e8d2d0f1012b3e623bbb80", "score": "0.60489494", "text": "def test_spacy_parser_on_punctuation(self):\n parser = SpaCyParser()\n result = parser.parse_and_tag_text(\"?\")\n self.assertEqual(result, [('?', '.')])", "title": "" }, { "docid": "dfee2e523c9d81d5c67105bc5ff5c49c", "score": "0.6047375", "text": "def delete_punctuation(s):\n result = ''\n for char in s:\n if char.lower() not in PUNCTUATION:\n result += char\n return result", "title": "" }, { "docid": "390b04bbbb7183e9aa0c0f2e4c4a4f83", "score": "0.6015875", "text": "def _check_exclude_char(word_chars, i=None):\n \n if word_chars is None:\n return False\n \n start = i if i is not None else 0\n end = i + 1 if i is not None else len(word_chars)\n \n if start < 0:\n return False\n \n if start == end:\n return False\n \n if start > len(word_chars):\n return False\n \n for j in range(start, end):\n char = word_chars[j]\n \n if not SPECIAL_CHARS_PATTERN.match(char):\n return False \n \n # Don't split the word if the special char lies between\n # two digits (i.e. keep things like '1.23')\n if char == \".\" and j > 0 and j < len(word_chars) - 1:\n prev_char = word_chars[j - 1]\n next_char = word_chars[j + 1]\n if prev_char.isdigit() and next_char.isdigit():\n return False\n \n return True", "title": "" }, { "docid": "250e5c871362b6bb8820d35ccaaececc", "score": "0.60141504", "text": "def remove_punc(words):\n\n trans = {ord(c): None for c in string.punctuation}\n no_punc = words.translate(trans)\n\n return no_punc", "title": "" }, { "docid": "e29df72ad4e47f7f9f904c0424d5b138", "score": "0.6010643", "text": "def contains_symbol(text: str, symbols: Optional[str] = None) -> bool:\n if symbols is None:\n for character in text:\n if character.isascii() and (not character.isalnum()):\n return True\n return False\n else:\n for character in text:\n if character in symbols:\n return True\n return False", "title": "" }, { "docid": "7c96daba56067eed19db19650450fa29", "score": "0.59261775", "text": "def remove_punctuation(string):\r\n # punctuation marks\r\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~,。、'''\r\n for x in string:\r\n if x in punctuations:\r\n string = string.replace(x, \" \")\r\n return string", "title": "" }, { "docid": "cdb0dcffe03020307593824cf455eecc", "score": "0.58993155", "text": "def len_no_punctuation(w):\n n = 0\t\t\t\t\t\t# loop over the letters and count up just the ascii_letters\n for letter in w:\n if letter in string.ascii_letters:\n n += 1\n return n", "title": "" }, { "docid": "e06cc7a7a94257aaa04bf680d56892a3", "score": "0.58741474", "text": "def punctuations_wants():\n\n response = str(\n input(\"Do you want punctuations ? (yes or no) : \")).lower().strip()\n\n if response == \"y\" or response == \"yes\":\n return True\n elif response == \"n\" or response == \"no\":\n return False\n else:\n print(\"\\n--------\\nERROR: Please anwer the questions yes or no.\")\n return punctuations_wants()", "title": "" }, { "docid": "d103574ef021929ceb219b37c9321bd0", "score": "0.5848991", "text": "def nopunc_len(word):\n for punc in string.punctuation:\n word = word.replace(punc, \"\")\n if len(word) > 0:\n return len(word)", "title": "" }, { "docid": "1d87deffb76f8f68e797a462a16f52a5", "score": "0.5835474", "text": "def contain_prep(sent):\n\n does_contain = False\n tokens = wordpunct_tokenize(sent)\n for token in tokens:\n if token.lower() in PREPS:\n does_contain = True\n break\n\n return does_contain", "title": "" }, { "docid": "81774a652380ffb8886168c4158e8e2e", "score": "0.58192384", "text": "def token_filter(token: Token) -> bool:\n return not (token.is_punct | token.is_space)", "title": "" }, { "docid": "75a2f4fb41c3425ef4cb637f6676553b", "score": "0.58106107", "text": "def get_punctuation(weak=False):\n if not weak:\n if random.random() < 0.2:\n punctuation = '?'\n elif random.random() < 0.2:\n punctuation = '!'\n else:\n punctuation = '.'\n else:\n if random.random() < 0.75: # 75% chance to have a space\n punctuation = ''\n else: # 25% to have a comma\n punctuation = ','\n\n # Ten percent chance to triple punctuation if '.' or add a '!' if '?' or '!'\n if random.random() < 0.1:\n if punctuation in ['.']:\n punctuation *= 3\n elif punctuation in ['?', '!']:\n punctuation += '!'\n\n if punctuation == ' ':\n punctuation = ''\n return punctuation", "title": "" }, { "docid": "ac06b355b220546454d5ab73954b4f7e", "score": "0.5774208", "text": "def remove_punctuation(text):\n return ''.join(char for char in text if char not in punctuation)", "title": "" }, { "docid": "4b28cf348fdfc8bba303d1d717b39e4e", "score": "0.5767635", "text": "def _remove_punctuation(self, sentence):\n\n sentence = self.tokenizer(sentence)\n\n prhase_no_punc = [word for word in sentence if word not in punctuation]\n\n prhase_no_punc = ' '.join(prhase_no_punc)\n\n return prhase_no_punc", "title": "" }, { "docid": "79f17ed54f6133c97a3eca0dd7da46e8", "score": "0.57420903", "text": "def post_process_punct(input: str, normalized_text: str, add_unicode_punct: bool = False):\n # in the post-processing WFST graph \"``\" are repalced with '\"\" quotes (otherwise single quotes \"`\" won't be handled correctly)\n # this function fixes spaces around them based on input sequence, so here we're making the same double quote replacement\n # to make sure these new double quotes work with this function\n if \"``\" in input and \"``\" not in normalized_text:\n input = input.replace(\"``\", '\"')\n input = [x for x in input]\n normalized_text = [x for x in normalized_text]\n punct_marks = [x for x in string.punctuation if x in input]\n\n if add_unicode_punct:\n punct_unicode = [\n chr(i)\n for i in range(sys.maxunicode)\n if category(chr(i)).startswith(\"P\") and chr(i) not in punct_default and chr(i) in input\n ]\n punct_marks = punct_marks.extend(punct_unicode)\n\n for punct in punct_marks:\n try:\n equal = True\n if input.count(punct) != normalized_text.count(punct):\n equal = False\n idx_in, idx_out = 0, 0\n while punct in input[idx_in:]:\n idx_out = normalized_text.index(punct, idx_out)\n idx_in = input.index(punct, idx_in)\n\n def _is_valid(idx_out, idx_in, normalized_text, input):\n \"\"\"Check if previous or next word match (for cases when punctuation marks are part of\n semiotic token, i.e. some punctuation can be missing in the normalized text)\"\"\"\n return (idx_out > 0 and idx_in > 0 and normalized_text[idx_out - 1] == input[idx_in - 1]) or (\n idx_out < len(normalized_text) - 1\n and idx_in < len(input) - 1\n and normalized_text[idx_out + 1] == input[idx_in + 1]\n )\n\n if not equal and not _is_valid(idx_out, idx_in, normalized_text, input):\n idx_in += 1\n continue\n if idx_in > 0 and idx_out > 0:\n if normalized_text[idx_out - 1] == \" \" and input[idx_in - 1] != \" \":\n normalized_text[idx_out - 1] = \"\"\n\n elif normalized_text[idx_out - 1] != \" \" and input[idx_in - 1] == \" \":\n normalized_text[idx_out - 1] += \" \"\n\n if idx_in < len(input) - 1 and idx_out < len(normalized_text) - 1:\n if normalized_text[idx_out + 1] == \" \" and input[idx_in + 1] != \" \":\n normalized_text[idx_out + 1] = \"\"\n elif normalized_text[idx_out + 1] != \" \" and input[idx_in + 1] == \" \":\n normalized_text[idx_out] = normalized_text[idx_out] + \" \"\n idx_out += 1\n idx_in += 1\n except:\n logging.debug(f\"Skipping post-processing of {''.join(normalized_text)} for '{punct}'\")\n\n normalized_text = \"\".join(normalized_text)\n return re.sub(r' +', ' ', normalized_text)", "title": "" }, { "docid": "b0a7c088205d79feb52333cbf93528a6", "score": "0.57227", "text": "def contains_alpha(text: str) -> bool:\n for character in text:\n if character.isalpha():\n return True\n return False", "title": "" }, { "docid": "84d54e061fcbdde697a9fd2cf3aa1413", "score": "0.5688659", "text": "def post_process_punctuation(text: str) -> str:\n text = (\n text.replace('( ', '(')\n .replace(' )', ')')\n .replace('{ ', '{')\n .replace(' }', '}')\n .replace('[ ', '[')\n .replace(' ]', ']')\n .replace(' ', ' ')\n .replace('”', '\"')\n .replace(\"’\", \"'\")\n .replace(\"»\", '\"')\n .replace(\"«\", '\"')\n .replace(\"\\\\\", \"\")\n .replace(\"„\", '\"')\n .replace(\"´\", \"'\")\n .replace(\"’\", \"'\")\n .replace('“', '\"')\n .replace(\"‘\", \"'\")\n .replace('`', \"'\")\n .replace('- -', \"--\")\n )\n\n for punct in \"!,.:;?\":\n text = text.replace(f' {punct}', punct)\n return text.strip()", "title": "" }, { "docid": "02cd7f9e3954650d9cbe6121f56879ae", "score": "0.5673713", "text": "def remove_punctuations(text):\r\n\r\n arabic_punctuations = \"\"\"`÷×؛<>_()*&^%][ـ،/:\"؟.,'{}~¦+|!”…“–ـ\"\"\"\r\n english_punctuations = string.punctuation\r\n all_punctuations = set(\r\n arabic_punctuations + english_punctuations\r\n ) # remove all non verbatim punctuations\r\n\r\n for p in all_punctuations:\r\n if p in text:\r\n text = text.replace(p, \" \")\r\n return text", "title": "" }, { "docid": "3b9037966a98ca1af0fa8cac1f0519bd", "score": "0.5663751", "text": "def clean_punctuation(doc):\r\n while True:\r\n for i in doc:\r\n if i.text in (\"--\", \"...\", \"…\"):\r\n continue\r\n elif i.text == \"-\":\r\n doc[i.i-1:i.i+2].merge()\r\n break\r\n elif i.is_left_punct and i.text not in \"'’\":\r\n doc[i.i:i.i+2].merge()\r\n break\r\n elif i.is_punct or i.text in(\"’s\", \"'s\", \"n't\", \"n’t\"):\r\n doc[i.i-1:i.i+1].merge()\r\n break\r\n # for-else\r\n else:\r\n break\r\n\r\n return doc", "title": "" }, { "docid": "1a063a640124d867f9f60b42d876527e", "score": "0.5613471", "text": "def is_a_paladrome(chars):\n \n if len(chars) <= 1:\n return True\n elif chars[0] == chars[-1]:\n return is_a_paladrome (chars[1:-1])\n else:\n return False", "title": "" }, { "docid": "fcaadc69751cb490665f92987c074066", "score": "0.56037664", "text": "def isword(cls, nextchar):\n return nextchar.isalpha()", "title": "" }, { "docid": "25bbeb8404df2ff9aa1e43cb32d5ad4e", "score": "0.5602563", "text": "def __is_alnum(features: list, pos: int, **_kwargs) -> bool:\n word, _ = features[pos] # type: str\n\n return word.isalnum()", "title": "" }, { "docid": "1c6b9504b995797b6ae2bb645753d929", "score": "0.5591186", "text": "def clean_special_chars(text):\n punct = \"/-'?!.,#$%\\'()*+-/:;<=>@[\\\\]^_`{|}~`\" + '\"\"“”’' + '∞θ÷α•à−β∅³π‘₹´°£€\\×™√²—–&'\n for p in punct:\n text = text.replace(p, ' ')\n return text", "title": "" }, { "docid": "ea1eb0b02753c62d0863c9a61a790cab", "score": "0.5580273", "text": "def remove_punctuations(input_str: str) -> str:\n exclude = set(string.punctuation)\n return ''.join(ch for ch in input_str if ch not in exclude)", "title": "" }, { "docid": "cbad089acb94c718b1bf9d0f2a167817", "score": "0.55611134", "text": "def cleanword(text):\n import string\n\n not_letters = string.punctuation\n clean_string = \"\"\n for char in text:\n if char not in not_letters:\n clean_string += char\n return clean_string", "title": "" }, { "docid": "8237eea695e868861e0d34191a38a421", "score": "0.55591047", "text": "def _remove_punctuation(cls, text: str) -> str:\n\n return \"\".join(x for x in text if x not in cls.PUNCTUATION)", "title": "" }, { "docid": "c353532a0e0171cb19c2b9715dae9f00", "score": "0.5557751", "text": "def remove_punctuation(s):\n for char in ['/', '//', '|', '?', '(', ')', '{', '}', '[', ']', ',', '.', '-', '!',\n '~', '_', '@', ':', '55', '*', ';', '14']:\n s = s.replace(char, \" \")\n s.strip()\n s = filter(lambda x: not re.match(\"[0-9~!@#$%^&*()_\\-+{}\\\":;\\']+\", x), s.split())\n return ' '.join(s)", "title": "" }, { "docid": "420caf037a8b7df6d3e907a066cb0ccb", "score": "0.55482197", "text": "def _remove_punctuations(self,text_):\n punctuations_=set(string.punctuation)\n return ''.join(word for word in text_ if word not in punctuations_)", "title": "" }, { "docid": "445d156fc213adf673e45ed1812f16ea", "score": "0.554702", "text": "def split_punctuation(word, ignore=[abbreviations, numeric, URI, entities]):\n if len(word) == 0 or word[0].isalnum() and word[-1].isalnum():\n # Nothing to do: there is no punctuation at the head or tail of the word.\n # This shortcut makes the function call 2x faster.\n return [word]\n if in_any(word, ignore):\n # If the word is in a known range (e.g. a HTML entity or abbreviation),\n # no further processing is required. We do split any number unit (e.g. 1000km => 1000 km).\n return split_numeric(word)\n if word in contractions_suffixes:\n return [word]\n if word in punctuation_subst:\n # For cases like: livers of bile duct ligation (BDL)- or ethinyl estradiol (EE)-injected rats\n return [word]\n \n # Find the head and the tail of the word containing punctuation.\n p, i, j = [], 0, len(word)-1\n while i<=j and word[i] in punctuation:\n if i==j:\n i+=1\n elif word[i+1] not in '0123456789':\n i+=1\n elif word[i] != '.':\n i+=1\n else:\n break\n\n \n while j>=0 and word[j] in punctuation: j-=1\n # Discard the head or tail if the punctuation is valid (e.g. .22 calibre).\n\n try:\n if word[j+1] == ')' and word[i:j+1].count('(') - word[i:j+1].count(')') > 0:\n # There are more closing than opening brackets in the part of the word that is kept\n # so leave the closing bracket(s) attached\n \n #This is a bit more intelligent than the old: if word in punctuation_tail: j=len(word)\n j += word[i:j].count('(') - word[i:j].count(')')\n except IndexError:\n pass\n\n if word in punctuation_head \\\n or word[:j+1] in contractions_apostrophe: i=0\n # If the head and the tail are the same,\n # i.e. the word consists entirely out of punctuation), discard the tail.\n a, b, c = word[:i], word[i:j+1], j>-1 and word[j+1:] or \"\"\n b, c, X = assert_split(b, c, \"\", ignore) # etc., => etc.][. not etc][.,\n if b == c == X == None: return [word]\n # Split the punctuation.\n a = split_chars(a, chunk=\".-'\\\"<>\") # Keep dashes, ellipsis etc. together: -- ...\n c = split_chars(c, chunk=\".-'\\\"<>\")\n # Split units from numbers: We ran a 100km. => We ran a 100 km .\n b = split_numeric(b)\n return [x for x in a+b+c if x != \"\"]", "title": "" }, { "docid": "fef36c2197d4fc6a7849f596f1645bfd", "score": "0.55322886", "text": "def valid_token(token: str, extra_allowed_chars: str) -> bool:\n if '..' in token:\n return False\n for echar in extra_allowed_chars:\n token = token.replace(echar, '')\n return token.isalnum()", "title": "" }, { "docid": "ef9d048350f0a7f5ca745c9e970b4000", "score": "0.5523973", "text": "def remove_punctuation(text: str) -> str:\n return \"\".join(t for t in text if t not in string.punctuation)", "title": "" }, { "docid": "c57c00f86ed8f14ef12455eae62bf957", "score": "0.5519084", "text": "def test_no_punctuation_please():\n\n assert is_palindrome(\"Eva, can I see bees in a cave?\") is True", "title": "" }, { "docid": "c480942f2c9d7ea0fd53362ae777f4dc", "score": "0.55162334", "text": "def punct_clean(sent):\n for punct in PUNCT:\n sent = sent.replace(' %s' % punct, punct)\n sent = sent.replace(' %s' % punct, punct)\n sent = sent.replace(punct, '%s ' % punct)\n sent = sent.replace('%s ' % punct, '%s ' % punct)\n sent = sent.replace('%s ' % punct, '%s ' % punct)\n return sent.strip()", "title": "" }, { "docid": "372021fe36bb233df0cd30881b4eeb2b", "score": "0.5508402", "text": "def remove_punctuations(text, punctuations):\n if not isinstance(text, float):\n text = text.replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n no_punc = text.translate(str.maketrans(punctuations, ' ' * len(punctuations)))\n return re.sub('[ a-zA-Z0-9]', '', no_punc)\n else:\n return re.sub('[ a-zA-Z0-9]', '', text)", "title": "" }, { "docid": "26ff25b019c5c6d369494f397fbddd41", "score": "0.548458", "text": "def removePunctuation(lst):\n\tpunctuation = ['.', ',', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', ':', ';']\n\tpass", "title": "" }, { "docid": "46e83578205a257a98bd8db016629f45", "score": "0.54607207", "text": "def remove_punctuation(string):\r\n \r\n sans_punct = \"\"\r\n \r\n punctuation = \"!\\\"#$%&’()*+,-./:;<=>?@[\\\\]^_‘{|}~\"\r\n \r\n for char in string:\r\n \r\n if char not in punctuation:\r\n \r\n sans_punct += char\r\n \r\n wordlist = string_manip(sans_punct)\r\n \r\n e_words = word_count(\"e\", wordlist) \r\n \r\n list_len = len(wordlist)\r\n \r\n e_percent = (e_words/list_len) * 100\r\n \r\n return \"Your text contains %d words of which %d (%d percent) contain an 'e'\" %(list_len, e_words, e_percent)", "title": "" }, { "docid": "3fa8cc6311e229f99f7babf441998a8a", "score": "0.5458355", "text": "def non_alphanumeric(c):\n\tif alphanumeric(c):\n\t\treturn False\n\treturn True", "title": "" }, { "docid": "302aa3e34361be596e0d76eb9f01bb86", "score": "0.54424703", "text": "def remove_punctuation(words):\r\n new_words = []\r\n for word in words:\r\n new_word = re.sub(r'[^\\w\\s]', '', word)\r\n if new_word != '':\r\n new_words.append(new_word)\r\n return new_words", "title": "" }, { "docid": "274133a00a28b123a3e4895aa277d299", "score": "0.5426323", "text": "def check_badchars(data, chars=None):\n if to_int(data) is None:\n to_search = data\n else:\n data = to_hex(to_int(data))[2:]\n if len(data) % 2 != 0:\n data = \"0\" + data\n to_search = codecs.decode(data, 'hex')\n\n if not chars:\n chars = config.Option.get(\"badchars\")\n\n if chars:\n for c in chars:\n if c in to_search:\n return True\n return False", "title": "" }, { "docid": "62dc01ccaeee7b19ee583ddb676ce5ad", "score": "0.54117", "text": "def remove_punctuation(self, text_acc):\n table = string.punctuation\n temp = \" \".join(\n \"\".join(\" \" if ch in table else ch for ch in text_acc).split())\n return temp.split()", "title": "" }, { "docid": "0e484755a2e0ca09ee02000be5cdbc90", "score": "0.5393592", "text": "def remove_punctuations(text):\n table=str.maketrans('','',string.punctuation)\n return text.translate(table)", "title": "" }, { "docid": "5f4b52a0ad2972b0c4bb6ddc1e8005c4", "score": "0.538714", "text": "def remove_punctuation(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "5f4b52a0ad2972b0c4bb6ddc1e8005c4", "score": "0.538714", "text": "def remove_punctuation(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "5f4b52a0ad2972b0c4bb6ddc1e8005c4", "score": "0.538714", "text": "def remove_punctuation(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "5f4b52a0ad2972b0c4bb6ddc1e8005c4", "score": "0.538714", "text": "def remove_punctuation(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "5f4b52a0ad2972b0c4bb6ddc1e8005c4", "score": "0.538714", "text": "def remove_punctuation(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "5f4b52a0ad2972b0c4bb6ddc1e8005c4", "score": "0.538714", "text": "def remove_punctuation(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "1dc8483e319186cbbc7f9325258f69ee", "score": "0.53850174", "text": "def StripPunc(s):\n for c in string.punctuation:\n if c == '-': # Could be negative number, so don't remove '-'.\n continue\n else:\n s = s.replace(c, '')\n return s.strip()", "title": "" }, { "docid": "ae59fab74fa4f293dbe624fbc4cf0e8c", "score": "0.53848994", "text": "def remove_punctuation(words: List[str]) -> List[str]:\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "aaa241fb143bb8837bed115e3c04965f", "score": "0.5384317", "text": "def __split_on_punc(self, text):\n if self.never_split and text in self.never_split:\n return [text]\n chars = list(text)\n i = 0\n start_new_word = True\n output = []\n while i < len(chars):\n char = chars[i]\n if is_punctuation(char):\n output.append([char])\n start_new_word = True\n else:\n if start_new_word:\n output.append([])\n start_new_word = False\n output[-1].append(char)\n i += 1\n\n return [\"\".join(x) for x in output]", "title": "" }, { "docid": "562cc4c8f8baf9377da96181760efaca", "score": "0.5383729", "text": "def _isws(char):\n if \"\".join(char.split()) == \"\":\n return True\n \n return False", "title": "" }, { "docid": "497d26e13ec9fc57a91f854f6f000dd7", "score": "0.5383634", "text": "def check_mail(mail):\n\n characters_list = list(punctuation)\n no_forbidden_characters = True\n\n at_index = characters_list.index('@')\n dot_index = characters_list.index('.')\n del characters_list[at_index], characters_list[dot_index]\n #Remove from list characters, which are allowed\n for character in characters_list:\n\n if character in mail:\n no_forbidden_characters = False\n\n splitted_mail = mail.split(\"@\")\n\n if no_forbidden_characters:\n # Check if '@' appears only once in mail and if \".\" is used\n if mail.count(\"@\") == 1 and splitted_mail[1].count(\".\") >= 1:\n\n try:\n\n if not (splitted_mail[1][0] == \".\" or splitted_mail[1][-1] == \".\" or splitted_mail[1][0] == \" \"):\n return True\n\n except IndexError:\n return None", "title": "" }, { "docid": "94e568bc08779ea695f4f616a84381cc", "score": "0.5373064", "text": "def remove_punctuation(tweet):\n punctuation = set(string.punctuation)\n punctuation.add('»') # French quotes\n punctuation.add('«')\n punctuation.add(\"’\") # French apostrophe\n punctuation.add(u\"'️\")\n\n tweet = ''.join(ch for ch in tweet if ch not in punctuation)\n\n return tweet", "title": "" }, { "docid": "c6b735628a5061ff8e030879fc2f30f1", "score": "0.5370251", "text": "def is_alnum_or_underscore(ch: str):\n return ch.isalnum() or ch == '_'", "title": "" }, { "docid": "2c51cef9b90cd75b81d26bfc959e99c7", "score": "0.5369537", "text": "def remove_punctuation_and_splchars(words):\n new_words = []\n for word in words:\n new_word = re.sub(r'[^\\w\\s]', '', word)\n if new_word != '':\n new_word = remove_special_characters(new_word, True)\n new_words.append(new_word)\n return new_words", "title": "" }, { "docid": "0fbeedd542e1080168a3d246d3858c6c", "score": "0.5361159", "text": "def is_token(s):\r\n for c in s:\r\n if ord(c) < 32 or ord(c) > 128 or c in SEPARATORS:\r\n return False\r\n return True", "title": "" }, { "docid": "73dff10e428a04e53905afaa8b852c1f", "score": "0.53515065", "text": "def has_forbidden_characters(string):\n return any(char in string for char in [\"i\", \"o\", \"l\"])", "title": "" }, { "docid": "996b9bb35b70441290b96b5b34281bf8", "score": "0.53512734", "text": "def punct_tr(in_Text):\n punct_count = len([i for i\n in in_Text\n if re.match('[' + punct + ']+$', i)])\n return punct_count / len(in_Text)", "title": "" }, { "docid": "d935ddf2aa0d86ded885f6f06bc186a9", "score": "0.53494173", "text": "def test_tokenize_punctuation_marks(self):\n expected = ['the', 'first', 'sentence', 'nice', 'the', 'second', 'sentence', 'bad']\n actual = tokenize('The, first sentence - nice. The second sentence: bad!')\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "c68f5188cf797b1e73bda0ca96f33aa1", "score": "0.53410083", "text": "def removePunc(self, txtTokens):\n\t\tpunc = string.punctuation\n\t\tpuncList = [p for p in punc]\n\t\treturn [word for word in txtTokens if word not in puncList]", "title": "" }, { "docid": "47eab00249be9461a3ae31a00020f056", "score": "0.5338347", "text": "def test_punctuate_edge_cases(self):\n self.assertIn(ParseTools.punctuate(''), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate('a'), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(')'), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate('\\n'), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate('\\t'), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(' '), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate('\\n '), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate('\\t '), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(' '), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(' \\n'), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(' \\t'), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(' \\n '), {'.', '?', '!'})\n self.assertIn(ParseTools.punctuate(' \\t '), {'.', '?', '!'})", "title": "" }, { "docid": "6ac325fa72eca0b77b99e249e0c42a3b", "score": "0.5336243", "text": "def cleanword(word):\r\n char = list(word)\r\n new_word = []\r\n for i in char:\r\n if i not in string.punctuation:\r\n new_word.append(i)\r\n return \"\".join(new_word)", "title": "" }, { "docid": "c7106900b54f5c9fb4c5d96b163ed37c", "score": "0.5315077", "text": "def _is_printable(char):\n category = unicodedata.category(char)\n return (not category.startswith(\"C\") and\n (not category.startswith(\"Z\") or category == \"Zs\"))", "title": "" }, { "docid": "199683041e6a5308002011a5ed0e9372", "score": "0.531285", "text": "def isWord(s):\n if type(s) != type(''):\n return False\n s = s.replace('_', '')\n return s.isalnum()", "title": "" }, { "docid": "79b64603ae518eba1dd5ac9af9a58124", "score": "0.53086174", "text": "def test_all_punctuation(self):\n for ua in (\n '%7C%7C%27',\n ']]>><',\n ):\n dd = DeviceDetector(ua).parse()\n self.assertNotIn('client', dd.all_details)", "title": "" }, { "docid": "a86ad0d1571a9ee113e076fc99608190", "score": "0.53084594", "text": "def remove_punctuation(self):\r\n punctuation_set = set(string.punctuation)\r\n self.text = ''.join(char for char in self.text if char not in punctuation_set)\r\n return self.text", "title": "" }, { "docid": "9a74bbaad73b906deaac8fb6704c81db", "score": "0.5300859", "text": "def has_non_chinese_char(s):\n for c in s:\n if not is_chinese_char(c):\n return True\n return False", "title": "" }, { "docid": "62312cc997e6daf70c2b4d173f8daa36", "score": "0.52955246", "text": "def is_english(s):\n return all([c in string.printable for c in s])", "title": "" }, { "docid": "62312cc997e6daf70c2b4d173f8daa36", "score": "0.52955246", "text": "def is_english(s):\n return all([c in string.printable for c in s])", "title": "" }, { "docid": "79b95b3c3324f3a358239b61596a7c07", "score": "0.5295472", "text": "def remove_punctuation(input_text: str, punctuations: Optional[str] = None) -> str:\n if punctuations is None:\n punctuations = string.punctuation\n processed_text = input_text.translate(str.maketrans('', '', punctuations))\n return processed_text", "title": "" }, { "docid": "213afd724fd06c55b5ad1952f9908e1c", "score": "0.527211", "text": "def isAlphanum(c):\n return ((c >= 'a' and c <= 'z') or (c >= '0' and c <= '9') or\n (c >= 'A' and c <= 'Z') or c == '_' or c == '$' or c == '\\\\' or (c is not None and ord(c) > 126));", "title": "" }, { "docid": "17affde581a813b85b9870968fb6af0b", "score": "0.5247959", "text": "def remove_punc(self,punc_list): \r\n tokens = nltk.word_tokenize(self.text)\r\n take_tokens = True\r\n newtext = []\r\n \r\n for char in charlist: ## remove punctuations from each token\r\n newlist = []\r\n if take_tokens:\r\n newtext = tokens\r\n take_tokens = False\r\n for token in newtext:\r\n newlist.append(token.replace(char,' '))\r\n newtext = newlist\r\n \r\n newtext = ' '.join(newtext) ## join the tokens back into a string\r\n \r\n return newtext", "title": "" }, { "docid": "7195f8aea56cf5221b0b70eb2c8f3dd5", "score": "0.5226072", "text": "def remove_punctuation(html):\n return html.translate(string.maketrans(\"\", \"\"),\n \"[]{}\\\"';,|.*\\?!@#$%^&*()+-=:/\\n\")", "title": "" }, { "docid": "45c003c8270ed1151312d3017ac3db6d", "score": "0.5218421", "text": "def test_cpf_validator(self):\n cpf = cpf_with_punctuation()\n self.assertIsNone(cpf_validator(cpf))", "title": "" }, { "docid": "a8877c6980c4c6551cdea9d34b8bacfa", "score": "0.519142", "text": "def remove_punctuation(data: str) -> str:\n symbols = \"!\\\"#$%&()*+-./:;<=>?@[\\]^_`{|}~\\n\"\n for i in range(len(symbols)):\n data = np.char.replace(data, symbols[i], ' ')\n data = np.char.replace(data, \" \", \" \")\n data = np.char.replace(data, ',', '')\n data = np.char.replace(data, \"'\", \"\")\n data = np.char.replace(data, \".\", \"\")\n return data", "title": "" }, { "docid": "74aa211038015ab26caa511993293dc8", "score": "0.5177788", "text": "def test_text_complexifier_on_punctuation_only(self):\n tc = TextComplexifier()\n self.assertEqual(tc.complexify_text(\",\"), \",\")", "title": "" }, { "docid": "e71fd06a9bccc63990e2d16bc902b5ae", "score": "0.51765496", "text": "def remove_punctations_fun(self): \n self.doc = re.sub('[^a-zA-Z0-9]', ' ', self.doc)\n self.doc = self.doc.strip()\n self.doc = \" \".join(self.doc.split())", "title": "" }, { "docid": "2c4c439a041e96eb84b1b3f95b22bdec", "score": "0.51749027", "text": "def has_no_letter(word, letter):\n for char in word:\n if char == letter:\n return False\n return True", "title": "" }, { "docid": "e909991a8b5cf3c91589c89b0a27966e", "score": "0.51677334", "text": "def remove_punctuation(word):\n table = str.maketrans({char:None for char in word if char in string.punctuation})\n return word.translate(table)", "title": "" } ]
bd2a43d574079b697d6f76479bd76e66
Binary shift map. Optional parameter to include random noise in the 50th+ binary digits.
[ { "docid": "b80aa990a9f8502fd75da9c487d0d191", "score": "0.58863705", "text": "def modulo_map(x_n, noise=False):\n mapped = 2.*x_n % 1\n if noise:\n mapped = binary_noise(mapped)\n return mapped", "title": "" } ]
[ { "docid": "ccaaf1870abeed6117be85c37317a87d", "score": "0.63734806", "text": "def binary_noise(x):\n ## Create a bit mask based on the\n ## position passed in\n ## produces '10000' if we pass in position=4\n ## our bit in the '4th' position is set to 1\n binary = bin(x)[2:]\n for position in range(50, len(binary)):\n bit_mask = random.randint(0, 1) << position\n x = bit_mask | x\n return int(binary, 2)", "title": "" }, { "docid": "f50864a604f846f11d5843b8f9836bd5", "score": "0.6055695", "text": "def shift(buf: AnyWritableBuf, bits: int, shift: int, /,) -> None:", "title": "" }, { "docid": "7c9655331e7f7d77a9e64ce1b8106e97", "score": "0.59076214", "text": "def generate(self):\r\n for i in range(624):\r\n n = self.state[i] & 0x80000000\r\n n += self.state[(i+1) % 624] & 0x7fffffff\r\n self.state[i] = self.state[(i+397) % 624] ^ (n >> 1)\r\n if n % 2 != 0:\r\n self.state[i] ^= 0x9908b0df", "title": "" }, { "docid": "e0f99eddea71f43edd27cb74d4530e91", "score": "0.58828276", "text": "def bpsk(bits):\n return 1 - 2 * bits", "title": "" }, { "docid": "81292857b685a35923b6a9f00834695b", "score": "0.58494395", "text": "def _remapBits(inp: int, mapping: Dict[int, int]):\n f = 0\n for bit in itertools.count():\n if (1 << bit) > inp:\n break\n if (1 << bit) & inp:\n f = f | (1 << mapping[bit])\n\n return f", "title": "" }, { "docid": "cc39065b6f6436b005f985edfff6b627", "score": "0.5604494", "text": "def bits_stripping(bit_start,bit_count,value):\n bitmask=pow(2,bit_start+bit_count)-1\n return np.right_shift(np.bitwise_and(value,bitmask),bit_start)", "title": "" }, { "docid": "b722e594dfec0b1c756566c6184a65e1", "score": "0.55794287", "text": "def p8(bits):\n return permutation(bits, (6,3,7,4,8,5,10,9))", "title": "" }, { "docid": "11bfa1a056fcb0412bfc7860c0b2ab9a", "score": "0.5556215", "text": "def binary_map(input_file):\n \n ascii_grid = np.loadtxt(input_file, skiprows=6)\n ascii_zeros = np.where(ascii_grid<=0, 0, ascii_grid)\n ascii_binary = np.where(ascii_zeros>0, 1, ascii_zeros)\n \n return(ascii_binary)", "title": "" }, { "docid": "31bec26ba57e096cf4414c537038d294", "score": "0.55434245", "text": "def relevant(hsh, shift):\n return hsh >> shift & BMAP", "title": "" }, { "docid": "caba079e663161db5e10efb76bd7d17c", "score": "0.5533906", "text": "def test_convert_to_binary():\r\n # Create a result object to access `_convert_to_binary`\r\n state = np.zeros(4)\r\n state[0] = 1\r\n result = gates.M(0)(state, nshots=100)\r\n\r\n import itertools\r\n nbits = 5\r\n binary_samples = result._convert_to_binary(np.arange(2 ** nbits),\r\n nbits).numpy()\r\n target_samples = np.array(list(itertools.product([0, 1], repeat=nbits)))\r\n np.testing.assert_allclose(binary_samples, target_samples)", "title": "" }, { "docid": "6e9c7f0ff41547a1fb35bf5c6522f392", "score": "0.5529085", "text": "def binary_permutations(n_bits):\n return np.right_shift(np.arange(2**n_bits)[:, None], np.arange(n_bits-1, -1, -1)[None, :]) & 1", "title": "" }, { "docid": "7d1f1350c3d34d42e4f140d801e5b2ce", "score": "0.5522192", "text": "def tobin(x, count=8):\n return \"\".join(map(lambda y:str((x>>y)&1), range(count-1, -1, -1)))", "title": "" }, { "docid": "3e40535f0f98f73225efa335a833d161", "score": "0.55082935", "text": "def mutate(self):\n for x in range(len(self.bits)):\n if random.random() < self.mutation_rate:\n b = int(self.bits[x])\n self.bits = self.bits[0:x] + str(0 if b else 1) + self.bits[x+1:]", "title": "" }, { "docid": "38e07de902f6469218b76dfff570996e", "score": "0.54785246", "text": "def bits():\n for player in xrange(2):\n for i in xrange(1, 26):\n pt = 25 - i if player else i\n sign = -1 if player else 1\n for j in xrange(max(self[pt] * sign, 0)):\n yield 1\n yield 0\n for pad in xrange(sum(self.stones_off)):\n yield 0", "title": "" }, { "docid": "cc7b869dbdae0e4ea9c13869a9a17f31", "score": "0.54587287", "text": "def shift(string, bits):\n end = string[:len(string) - bits]\n start = ''\n for i in range(bits):\n start += '0'\n\n return start + end", "title": "" }, { "docid": "e08538e1d42eb441fad4d2b024c70741", "score": "0.54168844", "text": "def test_bypass_shift_register(self):\n instruction = JTAG_INSTR['BYPASS']\n self.jtag.change_state('shift_ir')\n retval = self.jtag.shift_and_update_register(instruction)\n print(\"retval: 0x%x\" % int(retval))\n self.jtag.go_idle()\n self.jtag.change_state('shift_dr')\n _in = BitSequence('011011110000'*2, length=24)\n out = self.jtag.shift_and_update_register(_in)\n self.jtag.go_idle()\n print(\"BYPASS sent: %s, received: %s (should be left shifted by one)\"\n % (_in, out))", "title": "" }, { "docid": "7082ab5367bc3edc44c32ba10684b75a", "score": "0.5410761", "text": "def generate_random_bits(b_amount, b_lenght):\n \n bits = np.random.randint(0, 2, size=(b_amount, b_lenght)) \n \n inds = is_in_that(bits, np.zeros(b_lenght)) # all zero vector(s) must be eliminated\n \n if inds[0] == -1:\n return bits\n else:\n for i in inds:\n bits[i] = np.bitwise_xor(bits[-1], bits[0])\n return bits", "title": "" }, { "docid": "4a4f22919032f0ed4d221abe1565177c", "score": "0.5350652", "text": "def bin(number):", "title": "" }, { "docid": "c0d7140248ca5a840a2f090080ea7f13", "score": "0.53254193", "text": "def shifty( b ):\n return 1 << b", "title": "" }, { "docid": "92b2632046b378963cf0c4a5eccdb772", "score": "0.5320673", "text": "def gen_mask(n: int) -> int:\n return (1 << n) - 1", "title": "" }, { "docid": "e6cac978f4231ddcf72af2f45af334fe", "score": "0.530767", "text": "def uint82bin(n, count=8):\n return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])", "title": "" }, { "docid": "335f9e88d7864c5d3016c2efe3735cf3", "score": "0.5304699", "text": "def set(self, n):\n\n index = n / 8\n position = n % 8\n self.bitarray[index] = self.bitarray[index] | 1 << (7 - position)", "title": "" }, { "docid": "bd1e83e0932f78f324ed4ea62cd5df48", "score": "0.52734905", "text": "def bitmap(sequence):\n return reduce(lambda x,y: x|y, map(lambda i: long('1'+'0'*i,2), sequence), 0)", "title": "" }, { "docid": "e1c45c1081de5c6bcd3e8c2f80ec318e", "score": "0.5270874", "text": "def _shift_left(bits):\n bits = list(bits)\n # Append the least significant bit to the end.\n res = ''.join(bits[1:]) + bits[0]\n _show(''.join(bits), 'LEFT-SHIFT', res)\n return res", "title": "" }, { "docid": "80675d720edbe30fd22dea3f577dd8d1", "score": "0.52685666", "text": "def get_binary_change_map(data, method='k_means'):\n if method == 'k_means':\n cluster_center = KMeans(n_clusters=2, max_iter=1500).fit(data.T).cluster_centers_.T # shape: (1, 2)\n # cluster_center = k_means_cluster(weight, cluster_num=2)\n print('k-means cluster is done, the cluster center is ', cluster_center)\n dis_1 = np.linalg.norm(data - cluster_center[0, 0], axis=0, keepdims=True)\n dis_2 = np.linalg.norm(data - cluster_center[0, 1], axis=0, keepdims=True)\n\n bcm = np.copy(data) # binary change map\n if cluster_center[0, 0] > cluster_center[0, 1]:\n bcm[dis_1 > dis_2] = 0\n bcm[dis_1 <= dis_2] = 255\n else:\n bcm[dis_1 > dis_2] = 255\n bcm[dis_1 <= dis_2] = 0\n elif method == 'otsu':\n bcm, threshold = otsu(data, num=400)\n print('otsu is done, the threshold is ', threshold)\n\n return bcm", "title": "" }, { "docid": "b16cc023ec1ae34da679a842323cb0c4", "score": "0.52598655", "text": "def bitmap(sequence):\n return reduce(lambda x,y: x|y, map(lambda i: long('1'+'0'*i,2), sequence), 0)", "title": "" }, { "docid": "ab17f8773fba10384af04bd52c4fa21d", "score": "0.5241787", "text": "def prefix1bits(b):\n return 0 if b&1==0 else 1+prefix1bits(b>>1)", "title": "" }, { "docid": "47679ab1552d067816118d062bc2d878", "score": "0.52381504", "text": "def gen_bits(n):\n \n bits = n*n; \n nedges = 2**bits \n \n ret = []\n for i in range(0, nedges):\n \n right = [int(x) for x in bin(i)[2:]]\n lst = ([0] * (bits - len(right)))\n lst.extend(right)\n \n ret.append(lst)\n return ret", "title": "" }, { "docid": "ef9cec7c4e363029be48778550cc6c7b", "score": "0.52363884", "text": "def SSbi(x, i):\n # x0 is the most significant bit, x7 the least significant\n x0 = (x >> 7) & 1\n x1 = (x >> 6) & 1\n x2 = (x >> 5) & 1\n x3 = (x >> 4) & 1\n x4 = (x >> 3) & 1\n x5 = (x >> 2) & 1\n x6 = (x >> 1) & 1\n x7 = x & 1\n binResult = []\n if i == 0:\n n0 = Sb1([x4, x1, x6, x3])\n n1 = Sb1([x0, x5, x2, x7])\n binResult = [n1[0], n0[1], n1[2], n0[3],\n n0[0], n1[1], n0[2], n1[3]]\n elif i == 1:\n n0 = Sb1([x1, x6, x7, x0])\n n1 = Sb1([x5, x2, x3, x4])\n binResult = [n0[3], n0[0], n1[1], n1[2],\n n1[3], n1[0], n0[1], n0[2]]\n elif i == 2:\n n0 = Sb1([x2, x3, x4, x1])\n n1 = Sb1([x6, x7, x0, x5])\n binResult = [n1[2], n0[3], n0[0], n0[1],\n n0[2], n1[3], n1[0], n1[1]]\n elif i == 3:\n n0 = Sb1([x7, x4, x1, x2])\n n1 = Sb1([x3, x0, x5, x6])\n binResult = [n1[1], n0[2], n0[3], n1[0],\n n0[1], n1[2], n1[3], n0[0]]\n result = int(\"\".join(binResult), 2)\n return result", "title": "" }, { "docid": "afaf8da698705b6bf8e326df019870c9", "score": "0.52307826", "text": "def __ilshift__(self, n):\n if n < 0:\n raise ValueError(\"Cannot shift by a negative amount.\")\n if not self.len:\n raise ValueError(\"Cannot shift an empty bitstring.\")\n if not n:\n return self\n n = min(n, self.len)\n return self._ilshift(n)", "title": "" }, { "docid": "b6cd24c9553827871e50f5e3cad5c380", "score": "0.52307415", "text": "def binary_encode(x: int) -> List[int]:\n return [x >> i & 1 for i in range(10)]", "title": "" }, { "docid": "eb5bd2a5808e18c40f7eb4c92bcd9d8c", "score": "0.52295756", "text": "def uint82bin(self, n, count=8):\n return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])", "title": "" }, { "docid": "03709e2653d3007f5207c4548316b6ae", "score": "0.52257127", "text": "def buildCoder(shift):\n alphabet_shifted = {}\n # makes a list of uppercase letters shifted by a specific value\n upper_alphabet = string.ascii_uppercase\n upper_shifted = upper_alphabet\n letter_up = upper_shifted[0:shift]\n upper_shifted += letter_up\n upper_shifted = upper_shifted[shift:]\n\n #makes a list of lowercase letters shifted by a specific value\n lower_alphabet = string.ascii_lowercase\n lower_shifted = lower_alphabet\n letter_low = lower_shifted[0:shift]\n lower_shifted += letter_low\n lower_shifted = lower_shifted[shift:]\n\n # creates a dictionary of the original alphabet mapped to the shifted alphabet\n for lower_value, shifted_value in zip(lower_alphabet, lower_shifted):\n alphabet_shifted[lower_value] = shifted_value\n \n for upper_value, shifted_value in zip(upper_alphabet, upper_shifted):\n alphabet_shifted[upper_value] = shifted_value \n \n \n return alphabet_shifted", "title": "" }, { "docid": "1f366f6ed49d6db264a32fcc6acb8472", "score": "0.52126795", "text": "def _random_bits_value(width: int) -> str:\n return '0x%x' % random.getrandbits(width)", "title": "" }, { "docid": "961d19e25f3ef9317c8b671bd38c3562", "score": "0.52088803", "text": "def onbits(b):\n return 0 if b==0 else (1 if b&1==1 else 0) + onbits(b>>1)", "title": "" }, { "docid": "6ebf6ed9d17a6396cad6731878c11480", "score": "0.51951134", "text": "def binary_data():\n pool = range(0, 256)\n return random_data(pool)", "title": "" }, { "docid": "badae923b9f82f478c713b3f2c02de60", "score": "0.51824695", "text": "def test_5(self):\n result = swap_bits(5, 5, 12)\n self.assertEqual(result, 4097)", "title": "" }, { "docid": "69ff4d45f0e7422a2e3c3d6b5454bfea", "score": "0.5175169", "text": "def gen_data(self):\n rand = random.randint(0,(2**self.word_size)-1)\n data_bits = self.convert_to_bin(rand,False)\n return data_bits", "title": "" }, { "docid": "b7cf5e4f57180c349bca078e9097c1bc", "score": "0.5174953", "text": "def gen1mask(size):\n mask = 0\n for i in range(size):\n mask |= LSBCodec.genmask(i)\n return mask", "title": "" }, { "docid": "278721aa7848b4da5a4d45031f0aa65e", "score": "0.5171648", "text": "def rotate_through_carry(bits):\n pass", "title": "" }, { "docid": "c8cbe32d36dca1d89af202026b39dc3f", "score": "0.51685256", "text": "def left_rotate(bits, n):\n return right_rotate(bits,-n)", "title": "" }, { "docid": "0503f02110d878448fafaa4cf40bcc08", "score": "0.5167322", "text": "def int2bin(n, count=24):\n return \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])", "title": "" }, { "docid": "57d871562a03f3dcbc77a8d2a49f6c40", "score": "0.5161542", "text": "def setShift(x):\n global shift, shifted_lower, shifted_upper\n shift = x\n shifted_lower = lower_alph[x:] + lower_alph[:x]\n shifted_upper = upper_alph[x:] + lower_alph[:x]", "title": "" }, { "docid": "54c054de84bd4131098e7c809779aec1", "score": "0.5157684", "text": "def binary(num, length_list=6):\n binary = []\n for i in range(length_list):\n binary.insert(0, [num&1])\n num = num >> 1 \n return binary", "title": "" }, { "docid": "3e0204530d58281c8c879361dac2628e", "score": "0.5149511", "text": "def create_lbin(marker_index):\n l = [0] * 15\n l[marker_index] = 1\n return l", "title": "" }, { "docid": "8be0f30444e484ddfd3e542d6750dd14", "score": "0.51456773", "text": "def int2bin(n, count=24):\n\t#\n\t#\n\treturn \"\".join([str((n >> y) & 1) for y in range(count-1, -1, -1)])", "title": "" }, { "docid": "cff725d905147a2a19f5d86a8d4a3f3a", "score": "0.5104882", "text": "def col_map(self, val):\n val += 1\n val *= 128\n return int(val)", "title": "" }, { "docid": "9adf17029067a8f01b94829ea962623b", "score": "0.51019734", "text": "def grayCodeBinary(n: int) -> List[int]:\n # Zero bits!\n if not n:\n return ['0']\n # One bit case -> either 0 or 1\n if n == 1:\n return ['0', '1']\n # Get binary codes with one less bit \n lower = grayCodeBinary(n-1)\n codes = []\n zero = True\n # Add bit to binary codes\n for low in lower:\n # Alternate using 0 or 1 first\n if zero:\n x = '0'\n y = '1'\n else:\n x = '1'\n y = '0'\n code = low + x\n codes += [code]\n code = low + y\n codes += [code]\n zero = not zero\n return codes", "title": "" }, { "docid": "a3d31f5dae77e1d65b1fbcb17283c5f0", "score": "0.509823", "text": "def bits(n):\n while n:\n yield n & 1\n n >>= 1", "title": "" }, { "docid": "007e4ac26359f3818708b242d9fe63c7", "score": "0.509541", "text": "def test_6(self):\n result = swap_bits(6, 6, 1)\n self.assertEqual(result, 68)", "title": "" }, { "docid": "a9a016dc6478e371b393bc376d5382c8", "score": "0.50841385", "text": "def test_high_bits_encryption_regression(self):\n value = \"\\xFF\\xFF\\xFF\\xFF\\xFF\"\n with mock.patch(\"random.randint\") as random_mock:\n random_mock.side_effect=random.Random(34).randint\n\n for nbits in (10, 16, 20):\n K = rsa.RSAPrivateKey(10)\n k = K.GetPublicKey()\n\n msg = rsa.Message.Encode(value, K.N)\n\n self.assertEquals(K.Decrypt(k.Encrypt(msg)).Decode(), value)", "title": "" }, { "docid": "8c6630a34d4264b476831e056c220c69", "score": "0.50802064", "text": "def _bits_to_raw(self):\r\n self.raw = \"1\" * self.bits + \"0\" * (32 - self.bits)", "title": "" }, { "docid": "143cb932913abb7acadb3d9583c2232a", "score": "0.507397", "text": "def __rlshift__(self, value):\n\n return self._combine(self.BITLSHIFT, value, True)", "title": "" }, { "docid": "f5c28b06323f1a0053a0d75a2490beeb", "score": "0.5069328", "text": "def corrupt_bits(s, p=0.01, n=None):\n s = array.array(\"B\",str(s))\n l = len(s)*8\n if n is None:\n n = max(1,int(l*p))\n for i in random.sample(xrange(l), n):\n s[i/8] ^= 1 << (i%8)\n return s.tostring()", "title": "" }, { "docid": "75b23973c083b7117b09ea81ce37515c", "score": "0.50690794", "text": "def s(X):\n \n Xp = bytearray(4)\n \n # Process the first 6 bits in byte 0\n row = ((X[0] & (2**7)) >> 6) ^ ((X[0] & (2**2)) >> 2)\n column = ((X[0] & (2**7 - 1)) >> 4)\n Xp[0] = S1_TABLE[row][column] << 4\n \n # Process the last 2 bits in byte 0 and first 4 bits in byte 1\n row = (X[0] & (2**1)) & (X[1] ^ ((2**4) >> 4))\n column = ((X[0] & (2**1 - 1)) << 3) & (X[1] >> 5)\n Xp[0] = Xp[0] ^ S1_TABLE[row][column]\n \n # Process the last 4 bits in byte 1 and first 2 bits in byte 2\n row = ((X[1] & (2**3)) >> 2) ^ (X[2] & ((2**6) >> 6))\n column = ((X[1] & (2**3 - 1)) << 1) & (X[2] >> 7)\n Xp[1] = S1_TABLE[row][column] << 4\n \n # Process the last 6 bits in byte 2\n row = ((X[2] & (2**5)) >> 4) ^ (X[2] & (2**0))\n column = (X[2] & (2**5 - 1)) >> 1\n Xp[1] = Xp[1] ^ S1_TABLE[row][column]\n \n # Process the first 6 bits in byte 3\n row = ((X[3] & (2**7)) >> 6) ^ ((X[3] & (2**2)) >> 2)\n column = ((X[3] & (2**7 - 1)) >> 4)\n Xp[2] = S1_TABLE[row][column] << 4\n \n # Process the last 2 bits in byte 3 and first 4 bits in byte 4\n row = (X[3] & (2**1)) & (X[4] ^ ((2**4) >> 4))\n column = ((X[3] & (2**1 - 1)) << 3) & (X[4] >> 5)\n Xp[2] = Xp[2] ^ S1_TABLE[row][column]\n \n # Process the last 4 bits in byte 4 and first 2 bits in byte 5\n row = ((X[4] & (2**3)) >> 2) ^ (X[5] & ((2**6) >> 6))\n column = ((X[4] & (2**3 - 1)) << 1) & (X[5] >> 7)\n Xp[3] = S1_TABLE[row][column] << 4\n \n # Process the last 6 bits in byte 5\n row = ((X[5] & (2**5)) >> 4) ^ (X[5] & (2**0))\n column = (X[5] & (2**5 - 1)) >> 1\n Xp[3] = Xp[3] ^ S1_TABLE[row][column]\n \n return Xp", "title": "" }, { "docid": "a74156a2090120888c11b28ac8e4a36f", "score": "0.50661224", "text": "def left_shift(num, length=5):\n\t\n\tnum = (num << 1) + get_bit(num, 1, length)\n\t\n\tmask = 0\n\tfor x in range(length):\n\t\tmask = (mask << 1) + 1\n\t\n\treturn num & mask", "title": "" }, { "docid": "220e7a2daebe75fbf680ad7b92d1e989", "score": "0.50611323", "text": "def advance(ip_binary, bits):\n ip_binary = ip_binary & ((2L<<31) - (2L<<(31-bits)))\n ip_binary += 1<<(32-bits)\n return ip_binary", "title": "" }, { "docid": "2fac6dcb73488a2fce2e7ace96b995f2", "score": "0.50599504", "text": "def test_idcode_shift_register(self):\n instruction = JTAG_INSTR['IDCODE']\n self.jtag.change_state('shift_ir')\n retval = self.jtag.shift_and_update_register(instruction)\n print(\"retval: 0x%x\" % int(retval))\n self.jtag.go_idle()\n self.jtag.change_state('shift_dr')\n idcode = self.jtag.shift_and_update_register(BitSequence('0'*32))\n self.jtag.go_idle()\n print(\"IDCODE (idcode): 0x%08x\" % int(idcode))", "title": "" }, { "docid": "0f35cb0a4407f60f730d0911bb6d8617", "score": "0.5054743", "text": "def __lshift__(self, other):\n self_bin = self._to_bin()\n for i in range(int(str(other))):\n self_bin.tail.next = TwoWayNode('0', self_bin.tail, None)\n self_bin.tail = self_bin.tail.next\n return self_bin._from_bin()", "title": "" }, { "docid": "1d877afc6583739167c5216f042096f2", "score": "0.50496465", "text": "def noise():\n args = parse_arguments()\n with contextlib.closing(BitInputStream(open(args.input_file, \"rb\"))) as bit_in, \\\n contextlib.closing(BitOutputStream(open(args.output_file, \"wb\"))) as bit_out:\n bit = bit_in.read_bit()\n while bit > -1:\n bit_out.write_bit(random_swap(bit, args.p))\n bit = bit_in.read_bit()", "title": "" }, { "docid": "397faa14f644c150ecb715c6fbd336b2", "score": "0.50471854", "text": "def generate_bit_mask(n_bits):\n dist0 = 0\n dist1 = [1 << i for i in range(n_bits)]\n dist2 = []\n for i in range(n_bits-1):\n for j in range(i+1, n_bits):\n mask1 = 1 << i\n mask2 = 1 << j\n dist2.append(mask1 ^ mask2)\n mask = [dist0] + dist1 + dist2\n return mask", "title": "" }, { "docid": "a75b76e60fd4cc78cb13ec3adab92546", "score": "0.5044265", "text": "def rotl(x ,n):\n return ((x << n) | (x >> (32 - n))) & 0xffffffff", "title": "" }, { "docid": "f4a87196e9c02d03258919987ec594ee", "score": "0.5039154", "text": "def ExtendedBinaryGolayCode():\n B = [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1],\\\n [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0],\\\n [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1],\\\n [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],\\\n [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1],\\\n [0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1],\\\n [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1],\\\n [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0],\\\n [0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0],\\\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0],\\\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1],\\\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]\n V = span(B, GF(2))\n return LinearCode(V, d=8)\n # C = BinaryGolayCode()\n # return C.extended_code()", "title": "" }, { "docid": "ce412f3caba78a7c2b7700207b7c5445", "score": "0.502693", "text": "def buildCoder(shift):\n ### TODO \n dict_coder = {}\n\n # this function use ord() to convert ascii into interger\n # however, space should be handled as an exception due to discontinuity\n # ord('A') = 65, ord('B') = 66, ..., ord('Z') = 90\n # ord('a') = 97, ord('b') = 98, ..., ord('Z') = 122\n # using 65 as the base_shift for upper cases to make these letters align to A \n # using 97 as the base_shift for lower cases to make these letters align to a\n # i.e. ' ' = 0, 'a' = 1, 'b' = 2, ..., 'z' = 26\n baseC = 65\n baseL = 97\n letter_size = 26\n\n # dealing with lower cases\n # create mapping with given shift with cyclic buffer\n # dealing with space and letters\n for idx in range(letter_size):\n key = chr(idx+baseL)\n\n #if (idx+shift) % letter_size == 0:\n # value = 'z'\n #else:\n #print chr(idx+baseL), chr(((idx+shift) % letter_size) + baseL)\n value = chr(((idx+shift) % letter_size) + baseL) \n dict_coder[key] = value\n\n # dealing with upper cases\n for idx in range(letter_size):\n key = chr(idx+baseC)\n #if (idx+shift) % letter_size == 0:\n # value = 'Z'\n #else:\n \n #print chr(idx+baseC), chr(((idx+shift) % letter_size) + baseC)\n value = chr(((idx+shift) % (letter_size)) + baseC)\n dict_coder[key] = value\n\n return dict_coder", "title": "" }, { "docid": "27542c890430930e935ce5eae92f3d78", "score": "0.50261575", "text": "def nmap():", "title": "" }, { "docid": "ef7a1444127e43954334976856f2424a", "score": "0.5024641", "text": "def _shift_sample(sample, k, out):\n m = len(sample)\n for old_pos in range(k):\n new_pos = m - k + old_pos\n out[new_pos] = sample[old_pos]\n\n for new_pos, old_pos in enumerate(range(k, m)):\n out[new_pos] = sample[old_pos]", "title": "" }, { "docid": "edb35ab8b0b588774cd534f8cbdc66de", "score": "0.5019971", "text": "def __ilshift__(self, bindee):\n return self.bind(bindee)", "title": "" }, { "docid": "a88a17da94661212d5338dd3013697a0", "score": "0.50060385", "text": "def _shift_right(bits):\n bits = list(bits)\n lsb = bits[-1]\n # Append the most significant bit to the beginning.\n res = lsb + ''.join(bits[:-1])\n _show(''.join(bits), 'RIGHT-SHIFT', res)\n return res", "title": "" }, { "docid": "0ad7cd549030078076ce9f795959c1ab", "score": "0.5001131", "text": "def bits(n, width):\r\n assert n < 2**width\r\n bin = []\r\n for i in range(width):\r\n bin.insert(0, 1 if n&(1<<i) else 0)\r\n return bin", "title": "" }, { "docid": "eb905786d517f5f5ecb6de4e3b9ff2ed", "score": "0.50008816", "text": "def shift_throughputs(self, shift):\n try:\n self.newbp \n print '# self.newbp already exists - using existing newbp and adding this shift.'\n except:\n self.newbp = deepcopy(self.basebp)\n print '# creating newbp and applying shift.'\n for f in self.filterlist:\n # Shift each bandpass.\n if isinstance(shift, dict):\n self.newbp[f].wavelen += shift[f]\n else:\n self.newbp[f].wavelen += shift\n # Resample onto the original grid (so that wavelengths start/end at intended locations).\n self.newbp[f].resampleBandpass()\n self.newbp[f].sbTophi()\n return", "title": "" }, { "docid": "167cccb8b15bf26897c870a3e65342c7", "score": "0.4998474", "text": "def make_bump_map(self):\n import numpy as np\n a = np.array(self.im, dtype=np.uint8)\n a = np.average(a, axis=2, weights=[1.0, 1.0, 1.0, 0.0]).astype(int)\n b = [[0.01, 0.025, 0.05, 0.025, 0.01],\n [0.025,0.05, 0.065,0.05, 0.025],\n [0.05, 0.065, 0.1, 0.065, 0.05],\n [0.025,0.05, 0.065,0.05, 0.025],\n [0.01, 0.025, 0.05, 0.025, 0.01]]\n c = np.zeros(a.shape, dtype=np.uint8)\n steps = [i - 2 for i in range(5)]\n for i, istep in enumerate(steps):\n for j, jstep in enumerate(steps):\n c += (np.roll(np.roll(a, istep, 0), jstep, 1) * b[i][j]).astype(np.uint8)\n cx = np.roll(c, 1, 0)\n cy = np.roll(c, 1, 1)\n d = np.zeros((a.shape[0], a.shape[1], 4), dtype=np.uint8)\n d[:, :, 0] = ((c - cy) + 127).astype(int)\n d[:, :, 1] = ((c - cx) + 127).astype(int)\n d[:, :, 2] = (np.clip((65025 - (127 - d[:, :, 0]) ** 2 - (127 - d[:, :, 1]) ** 2) ** 0.5, 0, 255)).astype(int)\n d[:, :, 3] = 255\n return Image.fromarray(d)", "title": "" }, { "docid": "c3bc588c7a1777b805c02a0505e8b90c", "score": "0.49889207", "text": "def to_binary(num):\n # Your code goes here!\n pass # Remove me after starting", "title": "" }, { "docid": "bf02a02cd1acb6186f2d07af6fbe9b3a", "score": "0.49841186", "text": "def bits(data):\n\treturn [format(ord(c),'08b') for c in data]", "title": "" }, { "docid": "cec89b8e6885b8b03ae5db5203fc0e89", "score": "0.49765685", "text": "def __getBitRotatedBinaryString(self):\n maxBit = 32\n rotatedBlock = \"\"\n for i in range(maxBit):\n if i < self.block:\n rotatedBlock += \"1\"\n else:\n rotatedBlock += \"0\"\n logging.debug(\"CLASS subnetRange:__getBitRotated,rotatedBlock=\"+str(rotatedBlock))\n return rotatedBlock", "title": "" }, { "docid": "8bc0b818ed02fa63f8e1b3ac4124cfcb", "score": "0.4967482", "text": "def _bit_rotate_right(value, length):\n value=int(value)\n length=int(length)\n return (value >> 1) | ((value & 1) << (length - 1))", "title": "" }, { "docid": "603029add6f640ba0d05538950e2b6f2", "score": "0.49671462", "text": "def generate_binary(k: int) -> list:\n binaries = []\n i = 0\n\n while i < 2**k:\n if len(binaries) == 0:\n binaries.append(\"0\"*k)\n elif i % 2 == 1:\n num = str(int(binaries[len(binaries)-1]) + 1)\n n = \"0\"*(k-len(num)) + num\n binaries.append(n)\n elif i % 2 == 0:\n num = str(int(binaries[len(binaries)-1])-1+10)\n n = \"0\"*(k-len(num)) + num\n if \"2\" in n:\n ind = n.index(\"2\")\n if n[ind-1] != \"1\":\n n = n[0:ind-1] + \"1\" + n[ind:]\n n = n.replace(\"2\"+\"0\"*(k-1-ind), \"0\"*(k-ind))\n else:\n ind2 = 0\n c = 0\n a = n[0]\n while a != \"2\":\n c += 1\n a = n[c]\n if a == \"0\":\n ind2 = c\n n = n[0:ind2] + \"1\" + n[ind2+1:]\n n = n[0:ind2+1] + \"0\"*(len(n[ind2:])-1)\n binaries.append(n)\n i += 1\n return binaries", "title": "" }, { "docid": "235ee0aa69338ea67ea11131ee5b3db0", "score": "0.49596497", "text": "def MSP430shiftir8(self,ins):\n data=[ins];\n self.writecmd(self.MSP430APP,0x80,1,data);\n return ord(self.data[0]);", "title": "" }, { "docid": "708a545a3d8c30757e6e54e0cc84f8f6", "score": "0.4952538", "text": "def l2_generator():\n return random.choice([\n '1000',\n '0100',\n '0010',\n '0001',\n ])", "title": "" }, { "docid": "7be488709b951455a1ce4cec6ed426e3", "score": "0.49498835", "text": "def binary_transform(data):\n verboseprint(\"INFO:: Binarization done!\")\n return np.where(data > 0, 1, 0)", "title": "" }, { "docid": "64eebf8816dc44915c53554bb5a0b71e", "score": "0.49476048", "text": "def bit(h, i):\n return (ord(h[i//8]) >> (i%8)) & 1", "title": "" }, { "docid": "7f6413bbba303d18764cda7039f2d751", "score": "0.49406773", "text": "def gen_bitpattern(n, start=''):\n if not n:\n return [start]\n else:\n return (gen_bitpattern(n - 1, start + '0') +\n gen_bitpattern(n - 1, start + '1'))", "title": "" }, { "docid": "5dfb1de9b2253c0b28a1ee178464f7b8", "score": "0.49361503", "text": "def binario(x):\r\n if x<0:\r\n temp = bin(65536+x)\r\n else:\r\n temp = bin(x)\r\n return temp[2:]", "title": "" }, { "docid": "d313b8360fda1c84f425f913c0dd83c7", "score": "0.49316716", "text": "def mybin(x, zero='-', one = 'x'):\n return bin(x)[2:].translate(string.maketrans('01', zero + one))", "title": "" }, { "docid": "c74160a0c5b7dd6394c339347cc91ce3", "score": "0.4927914", "text": "def last1bit(b):\n return 0 if b==0 else 1+last1bit(b>>1)", "title": "" }, { "docid": "24b5f1353d27838522a238327a7fbbea", "score": "0.4922586", "text": "def bitfield(n):\n return np.array([n >> i & 1 for i in range(7,-1,-1)])", "title": "" }, { "docid": "15107010dcbf0162c79ec7f2549433a0", "score": "0.49223816", "text": "def replace_lsb(i, b):\n result = i | int(b) if int(b) == 1 else i & 0xfe\n return result", "title": "" }, { "docid": "56432a1cd58c2c1fb4dcb32621ede7ca", "score": "0.4919637", "text": "def generate_b(b):\n while True:\n b = b * gen_b_factor % divisor\n if b % 8 == 0:\n yield b & 0xffff", "title": "" }, { "docid": "d05b9e764e7dbf34f1f5db61bd38f7b3", "score": "0.49154556", "text": "def __rshift__(self, other):\n newBits = []\n newBits += [self.bits[0]]\n for i in xrange(1, other+1):\n newBits += [self.bits[0]]\n for i in xrange(i + 1, len(self)):\n newBits += [self.bits[i-other]]\n newInt = Int8(bits=newBits)\n return newInt", "title": "" }, { "docid": "b2acb6ea7c52925fc223d373259ff6f2", "score": "0.49136248", "text": "def hamming_weight(x):\n hw = 0\n while x != 0:\n hw += x & 1\n x = x >> 1\n return hw", "title": "" }, { "docid": "f92c27a949685623e87c487bcb1e62f7", "score": "0.49124885", "text": "def get_range_shift(cls,indexBits):\r\n seqrange = cls.get_range(indexBits)\r\n shiftamount = 0\r\n for i in range(0,32):\r\n if (seqrange >> i) == 0:\r\n shiftamount = i\r\n break\r\n return shiftamount", "title": "" }, { "docid": "e706a55d00b7e0e049305ea400d6b1f1", "score": "0.49111456", "text": "def hamming_weight(n):\n result = 0\n while n:\n n &= n - 1\n result += 1\n\n return result", "title": "" }, { "docid": "bac335f0ca3945be9fe33737ffb95d5f", "score": "0.49105203", "text": "def rand(n: int) -> State:\n\n bits = [random.randint(0, 1) for _ in range(n)]\n return bitstring(*bits)", "title": "" }, { "docid": "4fc0f2ee786deb434ab6cf4720d7e2b5", "score": "0.49087167", "text": "def __irshift__(self, n):\n if n < 0:\n raise ValueError(\"Cannot shift by a negative amount.\")\n if not self.len:\n raise ValueError(\"Cannot shift an empty bitstring.\")\n if not n:\n return self\n n = min(n, self.len)\n return self._irshift(n)", "title": "" }, { "docid": "77e38304a6489373a6bb3ee25c9ec1ca", "score": "0.4903937", "text": "def generateSDR(n, w):\n sdr = np.zeros((n, ))\n randomOrder = np.random.permutation(np.arange(n))\n activeBits = randomOrder[:w]\n sdr[activeBits] = 1\n return sdr", "title": "" }, { "docid": "9f2fd9e2f1930df98ca2d6bac52439b5", "score": "0.49035355", "text": "def bits_original(n):\n binary_value = '{0:b}'.format(n)\n return_value = []\n\n for num in binary_value:\n if num == \"1\":\n return_value.append(num)\n return len(return_value)", "title": "" }, { "docid": "518ad7e0561162aa8f92e72260ded6e7", "score": "0.4902006", "text": "def bin(s):\n return str(s) if s <= 1 else bin(s >> 1) + str(s & 1)", "title": "" }, { "docid": "6f1616a6180cc2ca6075c4afcada19e9", "score": "0.4889834", "text": "def map(x,y,ss = 27):\n np.random.seed(ss)\n ans = np.zeros([x,y])\n for i in range(0,x):\n for j in range(0,y):\n dice = np.random.random()\n if dice < 0.5:\n if j % 2 == 1:\n ans[i,j] = -1\n elif dice < 0.6:\n ans[i,j] = -5\n elif dice < 0.7:\n ans[i,j] = 10\n\n tx = np.random.randint(x)\n ty = np.random.randint(y)\n ans[tx,ty] = 50\n return ans", "title": "" }, { "docid": "1b3936cdb0decf39602c514ad4c39536", "score": "0.4889099", "text": "def test_high_bits_encoding_regression(self):\n value = \"\\xFF\\xFF\\xFF\\xFF\\xFF\"\n\n prime = 58579\n msg = rsa.Message.Encode(value, prime).Mapped(lambda n: n % prime)\n self.assertEquals(msg.Decode(), value)", "title": "" }, { "docid": "fc87a0962fd897a128b37b12fe946e9c", "score": "0.48871645", "text": "def __rrshift__(self, value):\n\n return self._combine(self.BITRSHIFT, value, True)", "title": "" }, { "docid": "0b822bf6432fbf8269e7e4e3cb24e04f", "score": "0.48838186", "text": "def binary_convert(pattern):\n # Replace the string operators with 0,1\n bin_pattern = (\n pattern.replace(\"F\", \"0\")\n .replace(\"B\", \"1\")\n .replace(\"L\", \"0\")\n .replace(\"R\", \"1\")\n )\n return int(bin_pattern, 2)", "title": "" } ]
a8e0c31277f421df23fae1cc1807645d
Creates a Surface Finish Symbol based on last selection
[ { "docid": "f9ab0940b876a25a228a7aebadda13bb", "score": "0.5583832", "text": "def InsertSurfaceFinishSymbol2(self, SymType=defaultNamedNotOptArg, LeaderType=defaultNamedNotOptArg, LocX=defaultNamedNotOptArg, LocY=defaultNamedNotOptArg\n\t\t\t, LocZ=defaultNamedNotOptArg, LaySymbol=defaultNamedNotOptArg, ArrowType=defaultNamedNotOptArg, MachAllowance=defaultNamedNotOptArg, OtherVals=defaultNamedNotOptArg\n\t\t\t, ProdMethod=defaultNamedNotOptArg, SampleLen=defaultNamedNotOptArg, MaxRoughness=defaultNamedNotOptArg, MinRoughness=defaultNamedNotOptArg, RoughnessSpacing=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(65944, LCID, 1, (11, 0), ((3, 1), (3, 1), (5, 1), (5, 1), (5, 1), (3, 1), (3, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1)),SymType\n\t\t\t, LeaderType, LocX, LocY, LocZ, LaySymbol\n\t\t\t, ArrowType, MachAllowance, OtherVals, ProdMethod, SampleLen\n\t\t\t, MaxRoughness, MinRoughness, RoughnessSpacing)", "title": "" } ]
[ { "docid": "81582f125eaad40d376f178912744790", "score": "0.56996644", "text": "def ModifySurfaceFinishSymbol(self, SymType=defaultNamedNotOptArg, LeaderType=defaultNamedNotOptArg, LocX=defaultNamedNotOptArg, LocY=defaultNamedNotOptArg\n\t\t\t, LocZ=defaultNamedNotOptArg, LaySymbol=defaultNamedNotOptArg, ArrowType=defaultNamedNotOptArg, MachAllowance=defaultNamedNotOptArg, OtherVals=defaultNamedNotOptArg\n\t\t\t, ProdMethod=defaultNamedNotOptArg, SampleLen=defaultNamedNotOptArg, MaxRoughness=defaultNamedNotOptArg, MinRoughness=defaultNamedNotOptArg, RoughnessSpacing=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(69, LCID, 1, (11, 0), ((3, 1), (3, 1), (5, 1), (5, 1), (5, 1), (3, 1), (3, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1)),SymType\n\t\t\t, LeaderType, LocX, LocY, LocZ, LaySymbol\n\t\t\t, ArrowType, MachAllowance, OtherVals, ProdMethod, SampleLen\n\t\t\t, MaxRoughness, MinRoughness, RoughnessSpacing)", "title": "" }, { "docid": "a3885a0427d301e111d42d2a7bd8c375", "score": "0.5681883", "text": "def InsertSurfaceFinishSymbol(self, SymType=defaultNamedNotOptArg, LeaderType=defaultNamedNotOptArg, LocX=defaultNamedNotOptArg, LocY=defaultNamedNotOptArg\n\t\t\t, LocZ=defaultNamedNotOptArg, LaySymbol=defaultNamedNotOptArg, ArrowType=defaultNamedNotOptArg, MachAllowance=defaultNamedNotOptArg, OtherVals=defaultNamedNotOptArg\n\t\t\t, ProdMethod=defaultNamedNotOptArg, SampleLen=defaultNamedNotOptArg, MaxRoughness=defaultNamedNotOptArg, MinRoughness=defaultNamedNotOptArg, RoughnessSpacing=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(68, LCID, 1, (11, 0), ((3, 1), (3, 1), (5, 1), (5, 1), (5, 1), (3, 1), (3, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1)),SymType\n\t\t\t, LeaderType, LocX, LocY, LocZ, LaySymbol\n\t\t\t, ArrowType, MachAllowance, OtherVals, ProdMethod, SampleLen\n\t\t\t, MaxRoughness, MinRoughness, RoughnessSpacing)", "title": "" }, { "docid": "57a87fcd91d8659fcc690e239345685f", "score": "0.55770606", "text": "def InsertSurfaceFinishSymbol3(self, SymType=defaultNamedNotOptArg, LeaderType=defaultNamedNotOptArg, LocX=defaultNamedNotOptArg, LocY=defaultNamedNotOptArg\n\t\t\t, LocZ=defaultNamedNotOptArg, LaySymbol=defaultNamedNotOptArg, ArrowType=defaultNamedNotOptArg, MachAllowance=defaultNamedNotOptArg, OtherVals=defaultNamedNotOptArg\n\t\t\t, ProdMethod=defaultNamedNotOptArg, SampleLen=defaultNamedNotOptArg, MaxRoughness=defaultNamedNotOptArg, MinRoughness=defaultNamedNotOptArg, RoughnessSpacing=defaultNamedNotOptArg):\n\t\tret = self._oleobj_.InvokeTypes(48, LCID, 1, (9, 0), ((3, 1), (3, 1), (5, 1), (5, 1), (5, 1), (3, 1), (3, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1), (8, 1)),SymType\n\t\t\t, LeaderType, LocX, LocY, LocZ, LaySymbol\n\t\t\t, ArrowType, MachAllowance, OtherVals, ProdMethod, SampleLen\n\t\t\t, MaxRoughness, MinRoughness, RoughnessSpacing)\n\t\tif ret is not None:\n\t\t\tret = Dispatch(ret, u'InsertSurfaceFinishSymbol3', '{83A33D8E-27C5-11CE-BFD4-00400513BB57}')\n\t\treturn ret", "title": "" }, { "docid": "05130febdff27c37c8abb9ecbfdd684d", "score": "0.5306872", "text": "def end_poly(self):\n self._creatingPoly = False", "title": "" }, { "docid": "4870a0711cfbba39502d87980334aa34", "score": "0.51920795", "text": "def end_drawing():\n turtle.done()", "title": "" }, { "docid": "8a05705fb881b8758b5a02e8ef4d2009", "score": "0.51627666", "text": "def addsymbol(dest_widget,x,y,w,h,c,s,f):\n # relocate the symbol into the destination widget\n x += dest_widget.x\n y += dest_widget.y\n # choose the color\n if c == client_graphics_value_red:\n clf = client_graphics_color_red\n clh = client_graphics_color_red_half\n elif c == client_graphics_value_green:\n clf = client_graphics_color_green\n clh = client_graphics_color_green_half\n elif c == client_graphics_value_blue:\n clf = client_graphics_color_blue\n clh = client_graphics_color_blue_half\n else:\n return {'status': \"ko\", 'reason': \"invalid color code\"}\n # add the 'right' shape\n if s == client_graphics_value_square:\n # it is a rectangle\n if f == client_graphics_value_empty:\n # draw the empty shape\n with dest_widget.canvas:\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Line(points = (x,y, x,y+h, x+w,y+h, x+w,y), \n joint = 'miter', close = True)\n elif f == client_graphics_value_greyed:\n # First fill the shape with half transparent color\n with dest_widget.canvas:\n Color(clh['r'], clh['g'], clh['b'], clh['a'])\n Rectangle(pos = (x,y), size = (w,h))\n # then draw the shape border in 'full' color\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Line(points=(x,y, x,y+h, x+w,y+h, x+w,y), \n joint = 'miter', close = True)\n elif f == client_graphics_value_full:\n # draw a full rectangle\n with dest_widget.canvas:\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Rectangle(pos = (x,y), size = (w,h))\n else:\n return {'status': \"ko\", 'reason': \"invalid filling code\"}\n elif s == client_graphics_value_circle:\n # it is an ellipse\n if f == client_graphics_value_empty:\n # draw the empty shape\n with dest_widget.canvas:\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Line(ellipse = (x,y,w,h))\n elif f == client_graphics_value_greyed:\n with dest_widget.canvas:\n # first fill the shape with half transparent color\n Color(clh['r'], clh['g'], clh['b'], clh['a'])\n Ellipse(pos = (x,y), size = (w,h))\n # then draw the shape border in 'full' color\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Line(ellipse = (x,y,w,h))\n elif f == client_graphics_value_full:\n # draw a full ellipse\n with dest_widget.canvas:\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Ellipse(pos = (x,y), size =( w,h))\n else:\n return {'status': \"ko\", 'reason': \"invalid filling code\"}\n elif s == client_graphics_value_diamond:\n # it is a diamond\n if f == client_graphics_value_empty:\n # draw the empty shape\n with dest_widget.canvas:\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Line(points = (x,y+h/2, x+w/2,y+h, x+w,y+h/2, x+w/2,y),\n joint = 'miter', close = True)\n elif f == client_graphics_value_greyed:\n with dest_widget.canvas:\n # first draw the shape with half transparent color\n Color(clh['r'], clh['g'], clh['b'], clh['a'])\n Quad(points = (x,y+h/2, x+w/2,y+h, x+w,y+h/2, x+w/2,y))\n # then draw the shape border in 'full' color\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Line(points = (x,y+h/2, x+w/2,y+h, x+w,y+h/2, x+w/2,y),\n joint = 'miter', close = True)\n elif f == client_graphics_value_full:\n with dest_widget.canvas:\n Color(clf['r'], clf['g'], clf['b'], clf['a'])\n Quad(points = (x,y+h/2, x+w/2,y+h, x+w,y+h/2 ,x+w/2,y))\n else:\n return {'status': \"ko\", 'reason': \"invalid filling code\"}\n else:\n return {'status': \"ko\", 'reason': \"invalid shape code\"}", "title": "" }, { "docid": "9e37e744fa607b3810b5e6b0752a0b4c", "score": "0.5077321", "text": "def render_composite(self, x, y, symbol, layer, bkcolor=None):\n blt.composition(blt.TK_ON)\n for s in symbol:\n self.render(x, y, s, layer, bkcolor)\n blt.composition(blt.TK_OFF)", "title": "" }, { "docid": "9135d425a5e07da289c227bfdcceff80", "score": "0.49513018", "text": "def Surface(self, *args):\n return _StepShape.StepShape_GeometricSetSelect_Surface(self, *args)", "title": "" }, { "docid": "e0322a0a01a7d95fb6109c7f0a58d0fe", "score": "0.4913673", "text": "def make_finish(self):\n if not self.made_finish:\n pos = py.mouse.get_pos()\n self.change_cell(pos,'finish')\n self.made_finish = True", "title": "" }, { "docid": "804919ca144f7674a719e6f9e679e941", "score": "0.4891694", "text": "def newShape(self, value=True):\n if len(self.labelHist) > 0:\n self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)\n\n if value:\n text = self.labelDialog.popUp(text=self.prevLabelText)\n self.lastLabel = text\n else:\n text = self.prevLabelText\n\n if text is not None:\n self.prevLabelText = self.stringBundle.getString('tempLabel')\n\n shape = self.canvas.setLastLabel(text, None, None, None) # generate_color, generate_color\n if self.kie_mode:\n key_text, _ = self.keyDialog.popUp(self.key_previous_text)\n if key_text is not None:\n shape = self.canvas.setLastLabel(text, None, None, key_text) # generate_color, generate_color\n self.key_previous_text = key_text\n if not self.keyList.findItemsByLabel(key_text):\n item = self.keyList.createItemFromLabel(key_text)\n self.keyList.addItem(item)\n rgb = self._get_rgb_by_label(key_text, self.kie_mode)\n self.keyList.setItemLabel(item, key_text, rgb)\n\n self._update_shape_color(shape)\n self.keyDialog.addLabelHistory(key_text)\n\n self.addLabel(shape)\n if self.beginner(): # Switch to edit mode.\n self.canvas.setEditing(True)\n self.actions.create.setEnabled(True)\n self.actions.createpoly.setEnabled(True)\n self.actions.undoLastPoint.setEnabled(False)\n self.actions.undo.setEnabled(True)\n else:\n self.actions.editMode.setEnabled(True)\n self.setDirty()\n\n else:\n # self.canvas.undoLastLine()\n self.canvas.resetAllLines()", "title": "" }, { "docid": "1380485ec0e44bbf8189c117ddd7b1ea", "score": "0.4884996", "text": "def Finish(self):", "title": "" }, { "docid": "ebb2facef48d544bd8294bc9dbef90fe", "score": "0.48487365", "text": "def CreateSurfaceCurve(self, Surf=defaultNamedNotOptArg, Error=pythoncom.Missing):\n\t\treturn self._ApplyTypes_(51, 1, (9, 0), ((9, 1), (16387, 2)), u'CreateSurfaceCurve', '{83A33D44-27C5-11CE-BFD4-00400513BB57}',Surf\n\t\t\t, Error)", "title": "" }, { "docid": "ffcc5ffe29659bb40afd15525e539114", "score": "0.48121944", "text": "def ready_symbol(self):\n return self.ReadySymbolAndColor()[0]", "title": "" }, { "docid": "52a29c27785fcdc8f82289c71cf57671", "score": "0.4810403", "text": "def surf_with_mark(self) -> str:\n if self._surf_with_mark is None:\n self._surf_with_mark = self.surf_with_mark_()\n return self._surf_with_mark", "title": "" }, { "docid": "ad8c34fd73359c75a41769063f2b8bf5", "score": "0.47798136", "text": "def create_context_plot(ra, dec, name=\"Your object\"):\n plot = K2FootprintPlot()\n plot.plot_galactic()\n plot.plot_ecliptic()\n for c in range(0, 20):\n plot.plot_campaign_outline(c, facecolor=\"#666666\")\n # for c in [11, 12, 13, 14, 15, 16]:\n # plot.plot_campaign_outline(c, facecolor=\"green\")\n plot.ax.scatter(ra, dec, marker='x', s=250, lw=3, color=\"red\", zorder=500)\n plot.ax.text(ra, dec - 2, name,\n ha=\"center\", va=\"top\", color=\"red\",\n fontsize=20, fontweight='bold', zorder=501)\n return plot", "title": "" }, { "docid": "64331bd3fb9810e59f51dda0f4702049", "score": "0.47620562", "text": "def InsertCurveFileEnd(self):\n\t\treturn self._oleobj_.InvokeTypes(65831, LCID, 1, (11, 0), (),)", "title": "" }, { "docid": "64331bd3fb9810e59f51dda0f4702049", "score": "0.47620562", "text": "def InsertCurveFileEnd(self):\n\t\treturn self._oleobj_.InvokeTypes(65831, LCID, 1, (11, 0), (),)", "title": "" }, { "docid": "911c1a8fe1d91c4d8378d02b885efb90", "score": "0.47080085", "text": "def finish_current_arc():\n index_b = add_node(last_coordinates)\n time = dist / (determine_speed(last_way_type, max_speed) / 3.6)\n cost = [time]\n if weightKeyword:\n cost.append(int(lastWeight))\n graph.add_edge(index_a, index_b, cost)\n graph.add_edge(index_b, index_a, cost)\n arc_to_fid[(index_a, index_b)] = lastIndex\n arc_to_fid[(index_b, index_a)] = lastIndex", "title": "" }, { "docid": "87ff2b1b78328a3b82a0a80ac57da5b6", "score": "0.4706641", "text": "def ending(self, *args):\n return _ida_kernwin.place_t_ending(self, *args)", "title": "" }, { "docid": "116b60f1ff4b6502962179de4a998a37", "score": "0.47007897", "text": "def pathStroke(skeleton, radius, join, cap):\n\n l = PathParallel.pathParallel(skeleton, radius, join)\n r = PathParallel.pathParallel(skeleton, -radius, join)\n\n if not(isinstance(skeleton[-1], pyx.path.closepath)):\n if cap == 'butt':\n return [Cap.capButt(l, r), None]\n else:\n raise Exception(\"Unknown cap type %s\" % cap)\n\n return [l, r]", "title": "" }, { "docid": "cf955e3a3f3b852d3e0b526cfe0faec9", "score": "0.46877196", "text": "def EndSketchEdit(self):\n\t\treturn self._oleobj_.InvokeTypes(32, LCID, 1, (24, 0), (),)", "title": "" }, { "docid": "e33640b88e5396875be00c14cd9e5592", "score": "0.46715704", "text": "def hard_decode(self):\n\n self.currsymbol = np.argmax(np.absolute(self.fft_energy_buffer[:,-1]))\n\n # Mixing causes the spectrum to be flipped, so our symbol output also needs to be flipped\n # TODO: Fix this. We shouldn't need to mix the received signal so far. It only has to be mixed so the tones align to\n # integer multiples of the symbol rate.\n #self.currsymbol = self.num_tones - self.currsymbol - 1\n\n self.last_symbol2 = self.last_symbol\n self.last_symbol = self.currsymbol\n\n return self.currsymbol#gray_decode(symbol)", "title": "" }, { "docid": "e2d4d13524eadfb680b0fe9bde8e6fbc", "score": "0.46678284", "text": "def endplate_chord(self):\n x1 = self.main_plate[0].surface.edges[-1].bbox.bounds[0]\n x2 = self.main_plate[-1].surface.edges[-1].bbox.bounds[3]\n return x2, (x2 - x1)", "title": "" }, { "docid": "b2311ea02a5cc9a70f12df18310cb150", "score": "0.4667665", "text": "def drawQuarterRest(self, centerX):\r\n print centerX\r\n yTop = self.trebleFLoc + 1 * self.pixelsPerLetter\r\n self.create_polygon(centerX - .5 * self.pixelsPerLetter,\r\n yTop,\r\n centerX + 1 * self.pixelsPerLetter,\r\n yTop + 2 * self.pixelsPerLetter,\r\n centerX - .5 * self.pixelsPerLetter,\r\n yTop + 4 * self.pixelsPerLetter,\r\n centerX + 1 * self.pixelsPerLetter,\r\n yTop + 6 * self.pixelsPerLetter)", "title": "" }, { "docid": "84a1911168df51228a5dcb783f0dca65", "score": "0.46432835", "text": "def end(self, idx):\n assert 0 <= idx <= 2\n if idx == 0:\n return self.z1\n elif idx == 1:\n return self.y1\n return self.x1", "title": "" }, { "docid": "fc336f425e56a2da57f01e78fdc91fe7", "score": "0.46288347", "text": "def merge():\n SL = selection.check(warn=False)\n if len(SL) < 2:\n warning(\"You should at least selct two surfaces!\")\n return\n\n S = TriSurface.concatenate(SL)\n name = '--merged-surface--'\n export({name:S})\n selection.set(name)\n selection.draw()", "title": "" }, { "docid": "85be6e1f01a95bb38d830c1f663e5a31", "score": "0.46173996", "text": "def make_map_symbols(self):\n n = self.keep_analysis_count\n pen = next(self.color_list)\n filledbrush = pen\n emptybrush = None\n symbol = next(self.symbol_list)\n if n == 0:\n clearFlag = True\n else:\n clearFlag = False\n self.currentSymDict = {'pen': pen, 'filledbrush': filledbrush,\n 'emptybrush': emptybrush, 'symbol': symbol,\n 'n': n, 'clearFlag': clearFlag}", "title": "" }, { "docid": "f85a083c2f34bb92359e101f03ddf6cf", "score": "0.45921573", "text": "def get_next_shape(self):\r\n this_shape = self.next_shape(self.board, offset=Coord(4, 0-self.next_shape.HEIGHT))\r\n for block in self.preview_shape.blocks:\r\n self.info_panel.preview.delete_block(block.id)\r\n del self.preview_shape\r\n self.get_preview_shape()\r\n return this_shape", "title": "" }, { "docid": "98c7e62efda5987b151377bd1f5da286", "score": "0.45844948", "text": "def draw_final_dot(self):\n self.painter.dot(2 * self.dot_radius, self.final_dot_color)", "title": "" }, { "docid": "d654010dfff145af8a943687e94d2cd3", "score": "0.45768198", "text": "def multipatch_draw(self, curr_shape):\r\n rand_color = [random.uniform(0, 1), random.uniform(0, 1),\r\n random.uniform(0, 1)]\r\n for k in range(1, len(curr_shape.parts), 1):\r\n j = k - 1\r\n xy_coors = curr_shape.points[\r\n curr_shape.parts[j]:curr_shape.parts[k]]\r\n z_part = curr_shape.z[curr_shape.parts[j]:curr_shape.parts[k]]\r\n\r\n self.draw_3d_shapes(xy_coors, z_part, rand_color)\r\n\r\n xy_coors = curr_shape.points[\r\n curr_shape.parts[-1]:-1]\r\n z_part = curr_shape.z[curr_shape.parts[-1]:-1]\r\n self.draw_3d_shapes(xy_coors, z_part, rand_color)", "title": "" }, { "docid": "fea63da92407ab1e0536f6f8949527f3", "score": "0.45748553", "text": "def draw_phase(self):\n self.bdmger.pick_card()", "title": "" }, { "docid": "d3ee4225102969f91274aa1be8ccc52f", "score": "0.45727038", "text": "def symbol(self):\r\n ET.SubElement(self.__root, SYMBOL).text = self.__cur_token", "title": "" }, { "docid": "8719e373b14de967dd6c11179c61066a", "score": "0.45686668", "text": "def select_area_end(self):\n\t\tself.__selected = 1\n\t\tself.__drawArea.queue_draw()", "title": "" }, { "docid": "0dfe11b5a7640170c09452c4c09d5156", "score": "0.45619965", "text": "def close_legend(self, legend):\r\n pass", "title": "" }, { "docid": "b38936f01d9aff9b286153f408507a1a", "score": "0.45586422", "text": "def draw_equitriangle(t,sz):\n polygon_gen(t, 3, sz, \"green\")", "title": "" }, { "docid": "746265a39e61c1c6c1a1e304ca9f82d3", "score": "0.45524967", "text": "def bezier_surfaces(event=None):\n display.EraseAll()\n array1 = TColgp_Array2OfPnt(1, 4, 1, 4)\n\n array1.SetValue(1, 1, gp_Pnt(1, 1, -1))\n array1.SetValue(1, 2, gp_Pnt(2, 1, 0))\n array1.SetValue(1, 3, gp_Pnt(3, 1, -1))\n array1.SetValue(1, 4, gp_Pnt(4, 1, 0))\n array1.SetValue(2, 1, gp_Pnt(1, 2, 3))\n array1.SetValue(2, 2, gp_Pnt(2, 2, 5))\n array1.SetValue(2, 3, gp_Pnt(3, 2, 2))\n array1.SetValue(2, 4, gp_Pnt(4, 2, 3))\n array1.SetValue(3, 1, gp_Pnt(1, 3, 2))\n array1.SetValue(3, 2, gp_Pnt(2, 3, 1))\n array1.SetValue(3, 3, gp_Pnt(3, 3, 0))\n array1.SetValue(3, 4, gp_Pnt(4, 3, 1))\n array1.SetValue(4, 1, gp_Pnt(1, 4, 0))\n array1.SetValue(4, 2, gp_Pnt(2, 4, -1))\n array1.SetValue(4, 3, gp_Pnt(3, 4, 0))\n array1.SetValue(4, 4, gp_Pnt(4, 4, -1))\n\n BZ1 = Geom_BezierSurface(array1)\n\n bezierarray = TColGeom_Array2OfBezierSurface(1, 1, 1, 1)\n bezierarray.SetValue(1, 1, BZ1.GetHandle())\n \n BB = GeomConvert_CompBezierSurfacesToBSplineSurface(bezierarray)\n if BB.IsDone():\n poles = BB.Poles().GetObject().Array2()\n uknots = BB.UKnots().GetObject().Array1()\n vknots = BB.VKnots().GetObject().Array1()\n umult = BB.UMultiplicities().GetObject().Array1()\n vmult = BB.VMultiplicities().GetObject().Array1()\n udeg = BB.UDegree()\n vdeg = BB.VDegree()\n\n BSPLSURF = Geom_BSplineSurface( poles, uknots, vknots, umult, vmult, udeg, vdeg, 0, 0 )\n BSPLSURF.Translate(gp_Vec(0,0,2))\n\n display.DisplayShape(BSPLSURF.GetHandle(), update=True)\n start_display()", "title": "" }, { "docid": "0cdfd23c06b301b3c72095b7c736ef8f", "score": "0.4549494", "text": "def draw_build_target(self, surf):\n round_half = lambda v, cond: round(v - 0.5) + 0.5 if cond else round(v)\n\n queued_action = self._queued_action\n if queued_action:\n radius = queued_action.footprint_radius\n if radius:\n pos = self.get_mouse_pos()\n if pos:\n pos = point.Point(round_half(pos.world_pos.x, (radius * 2) % 2),\n round_half(pos.world_pos.y, (radius * 2) % 2))\n surf.draw_circle(\n colors.PLAYER_ABSOLUTE_PALETTE[\n self._obs.observation.player_common.player_id],\n pos, radius)", "title": "" }, { "docid": "18c25af30ad4e0e165917ce3e2ee66c9", "score": "0.45358995", "text": "def draw(self):\n focusedColor = 0\n b = DrawBuffer()\n\n active_ = (self.state & sfSelected | sfActive) == (sfSelected | sfActive)\n if active_:\n normalColor = self.getColor(1)\n focusedColor = self.getColor(3)\n selectedColor = self.getColor(4)\n else:\n normalColor = self.getColor(2)\n selectedColor = self.getColor(4)\n\n if self.hScrollBar:\n indent = self.hScrollBar.value\n else:\n indent = 0\n\n colWidth = self.size.x // self.numCols + 1\n\n for i in range(self.size.y):\n for j in range(self.numCols):\n item = j * self.size.y + i + self.topItem\n curCol = j * colWidth\n\n if active_ and (self.focused == item) and (self._range > 0):\n color = focusedColor\n self.setCursor(curCol + 1, i)\n scOff = 2\n elif item < self._range and self.isSelected(item):\n color = selectedColor\n scOff = 2\n else:\n color = normalColor\n scOff = 4\n\n b.moveChar(curCol, ' ', color, colWidth)\n\n if item < self._range:\n text = self.getText(item, colWidth + indent)[indent:indent + colWidth]\n\n b.moveStr(curCol + 1, text, color)\n if showMarkers:\n b.putChar(curCol, SPECIAL_CHARS[scOff])\n b.putChar(curCol + colWidth - 2, SPECIAL_CHARS[scOff + 1])\n elif i == 0 and j == 0:\n b.moveStr(curCol + 1, self.emptyText, self.getColor(1))\n\n b.moveChar(curCol + colWidth - 1, self.separatorChar, self.getColor(5), 1)\n self.writeLine(0, i, self.size.x, 1, b)", "title": "" }, { "docid": "c6611ee83b97051db14325e7d92ddff8", "score": "0.45314", "text": "def scatterback(self):\r\n self.scatterwin.withdraw()\r\n self.forthwin.deiconify()", "title": "" }, { "docid": "7ebffa39cb63157a21560bd8ae178b91", "score": "0.45018506", "text": "def buildSurface(self):\r\n return None", "title": "" }, { "docid": "9874e9ef4be80de3083256a8e0a8473c", "score": "0.44858423", "text": "def __make_hands(self,name,length):\n turtle.reset()\n self.__skip(-length * 0.1)\n turtle.begin_poly() #The beginning of recoding the polygon vertices.\n turtle.forward(length * 1.1)\n turtle.end_poly() #The beginning of recoding the polygon vertices, and connects the last vertice with the first vertice.\n hand_form = turtle.get_poly()\n turtle.register_shape(name, hand_form)", "title": "" }, { "docid": "68defbedc60e9019a56ed3dd3acb65ce", "score": "0.44788888", "text": "def add_back(self, new_options: Optional[LayerType]) -> None:\n self._update_layers(new_options, self._layers.append)", "title": "" }, { "docid": "8b292f2248b830b5967aabd207ca043e", "score": "0.44534397", "text": "def finish_batch(self, batch_info, dst_curs):\r\n pass", "title": "" }, { "docid": "9497fb4ea121d316ef4c878555f9d037", "score": "0.4453292", "text": "def ReadySymbolAndColor(self):\n # NB: This can be overridden by subclasses to allow symbols for more\n # complex reasons the object isn't ready. Ex: Service overrides it to\n # provide '!' for \"I'm serving, but not the revision you wanted.\"\n encoding = console_attr.GetConsoleAttr().GetEncoding()\n if self.ready is None:\n return self._PickSymbol(\n '\\N{HORIZONTAL ELLIPSIS}', '.', encoding), 'yellow'\n elif self.ready:\n return self._PickSymbol('\\N{HEAVY CHECK MARK}', '+', encoding), 'green'\n else:\n return 'X', 'red'", "title": "" }, { "docid": "b21730b867079a2291da486203f7ef8c", "score": "0.4448959", "text": "def GetSymbolSurfaceTexture(self):\n\t\treturn self._oleobj_.InvokeTypes(45, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "77a22df5ee40cd709cce6c1872f2faa3", "score": "0.44414178", "text": "def render( self, surface, font, bSelected=False ):\n \n if bSelected:\n colButton = self.colSelectedButton\n else:\n colButton = self.colButton\n \n #~ txt_surface, rect = font.render(self.txt, colTxt)\n round_rect(surface,self.pos+self.size,colButton,11,0)\n #~ surface.blit(txt_surface,(self.pos[0]+self.margin[0],self.pos[1]+self.margin[1]))\n renderTxtMultilineCentered(surface,self.txt,(self.pos[0],self.pos[1]),font, self.colText,nWidthTotal = self.size[0], nHeightTotal=self.size[1])", "title": "" }, { "docid": "6d3853cd52c0d6697cae2de05faea43e", "score": "0.44336623", "text": "def remove_last_button(self):\n\n self.add_seg_btn.grid_remove()", "title": "" }, { "docid": "e12d4b29f2091f100837ab8210906a27", "score": "0.4428562", "text": "def stamp(self):\n screen = self.screen\n shape = screen._shapes[self.turtle.shapeIndex]\n ttype = shape._type\n tshape = shape._data\n if ttype == \"polygon\":\n stitem = screen._createpoly()\n if self._resizemode == \"noresize\":\n w = 1\n shape = tshape\n else:\n if self._resizemode == \"auto\":\n lx = ly = max(1, self._pensize/5.0)\n w = self._pensize\n tiltangle = 0\n elif self._resizemode == \"user\":\n lx, ly = self._stretchfactor\n w = self._outlinewidth\n tiltangle = self._tilt\n shape = [(lx*x, ly*y) for (x, y) in tshape]\n t0, t1 = math.sin(tiltangle), math.cos(tiltangle)\n shape = [(t1*x+t0*y, -t0*x+t1*y) for (x, y) in shape]\n shape = self._polytrafo(shape)\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(stitem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n stitem = screen._createimage(\"\")\n screen._drawimage(stitem, self._position, tshape)\n elif ttype == \"compound\":\n stitem = []\n for element in tshape:\n item = screen._createpoly()\n stitem.append(item)\n stitem = tuple(stitem)\n lx, ly = self._stretchfactor\n w = self._outlinewidth\n for item, (poly, fc, oc) in zip(stitem, tshape):\n poly = [(lx*x, ly*y) for (x, y) in poly]\n poly = self._polytrafo(poly)\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=w, top=True)\n self.stampItems.append(stitem)\n self.undobuffer.push((\"stamp\", stitem))\n return stitem", "title": "" }, { "docid": "a51af541e8f6865a40f199f646f29f79", "score": "0.44191518", "text": "def endchar(self, point=\"insert\"):\n point = str(point)\n test = self.box.output(point+\"-1c\", point)\n if test == \"#\":\n self.box.placetag(\"comment\", point+\"-1c\", point)\n elif test in ['\"', \"`\", \"\\u201c\", \"\\u201d\"]:\n self.box.placetag(\"string\", point+\"-1c\", point)\n elif test in self.e.digits:\n self.box.placetag(\"digit\", point+\"-1c\", point)\n elif not self.e.validvar(test):\n self.box.placetag(\"reserved\", point+\"-1c\", point)\n return test", "title": "" }, { "docid": "23ac1c5a2e8a5b7fef414906d56c54c0", "score": "0.4413882", "text": "def finish(self, newline=False):\n assert type(newline) is bool\n symbols_before_finished = self.symbols_printed\n for i in range(self.increments - symbols_before_finished):\n self.tick()\n if newline:\n printl('', flush=True)\n else:\n printl(' ', end='', flush=True) # Add a space after the ticks", "title": "" }, { "docid": "b2abdbc00790f0bd1d8d33f3415941c5", "score": "0.44094878", "text": "def _goto(self, end):\n ## Version mit undo-stuff\n go_modes = ( self._drawing,\n self._pencolor,\n self._pensize,\n isinstance(self._fillpath, list))\n screen = self.screen\n undo_entry = (\"go\", self._position, end, go_modes,\n (self.currentLineItem,\n self.currentLine[:],\n screen._pointlist(self.currentLineItem),\n self.items[:])\n )\n if self.undobuffer:\n self.undobuffer.push(undo_entry)\n start = self._position\n if self._speed and screen._tracing == 1:\n diff = (end-start)\n diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2\n nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed))\n delta = diff * (1.0/nhops)\n for n in range(1, nhops):\n if n == 1:\n top = True\n else:\n top = False\n self._position = start + delta * n\n if self._drawing:\n screen._drawline(self.drawingLineItem,\n (start, self._position),\n self._pencolor, self._pensize, top)\n self._update()\n if self._drawing:\n screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)),\n fill=\"\", width=self._pensize)\n # Turtle now at end,\n if self._drawing: # now update currentLine\n self.currentLine.append(end)\n if isinstance(self._fillpath, list):\n self._fillpath.append(end)\n ###### vererbung!!!!!!!!!!!!!!!!!!!!!!\n self._position = end\n if self._creatingPoly:\n self._poly.append(end)\n if len(self.currentLine) > 42: # 42! answer to the ultimate question\n # of life, the universe and everything\n self._newLine()\n self._update() #count=True)", "title": "" }, { "docid": "0445f626dd46325eee2184d0a928d85c", "score": "0.4404775", "text": "def get_final(char):\n if is_final(char):\n return char\n return FINALS[char_offset(char) % len(FINALS)]", "title": "" }, { "docid": "8c2805cf94d4bd651845b23d2d42b627", "score": "0.44009906", "text": "def render(self, x, y, symbol, layer, bkcolor=None):\n # Set terminal\n blt.layer(layer)\n if bkcolor is not None:\n blt.bkcolor(bkcolor)\n blt.color(symbol.color)\n\n draw_x, draw_y = x + self.x_offset, y + self.y_offset\n blt.put_ext(draw_x, draw_y, symbol.dx, symbol.dy, symbol.char, None)", "title": "" }, { "docid": "32fd961dd2ee5c6298a95f563e20d546", "score": "0.4395359", "text": "def draw_resize_symbol(self, col):\n\n self.delete('resizesymbol')\n w=self.table.cellwidth\n h=self.height\n #if x_pos > self.tablewidth:\n # return\n wdth=1\n hfac1=0.2\n hfac2=0.4\n x_start=self.table.x_start\n x1,y1,x2,y2 = self.table.getCellCoords(0,col)\n self.create_polygon(x2-3,h/4, x2-10,h/2, x2-3,h*3/4, tag='resizesymbol',\n fill='white', outline='gray', width=wdth)\n self.create_polygon(x2+2,h/4, x2+10,h/2, x2+2,h*3/4, tag='resizesymbol',\n fill='white', outline='gray', width=wdth)\n return", "title": "" }, { "docid": "2b31865ac6d6e0fe4be30c1077bd08a5", "score": "0.43900317", "text": "def GroundGridFlatEnd(builder):\n return End(builder)", "title": "" }, { "docid": "20a55840325f8135461bd38893534b1e", "score": "0.43886536", "text": "def place_symbol(self, coords, symbol):\n x, y = coords\n self.array[y - 1][x - 1] = symbol", "title": "" }, { "docid": "99a56df48e35cb953dbeca552a88338d", "score": "0.43866703", "text": "def frameCreateShape(self):\n self.refresh_board()\n for widgets in self.frameCreateShapes.winfo_children():\n widgets.destroy()\n self.canvas.bind('<Button-1>', self.click)\n frame1Canvas = Canvas(self.frameCreateShapes, width=200)\n frame1Canvas.pack(anchor=CENTER, expand=True)\n name = StringVar()\n frame1Input = Entry(frame1Canvas, textvariable=name,\n borderwidth=0, background=\"white\")\n frame1Input.pack(anchor=CENTER, expand=True)\n frame1Button = Button(frame1Canvas, padx=15,\n pady=10, activebackground=\"#000\", background=\"#fff\", text=\"Save\", command=lambda: self.saveShape(frame1Input, self.COORDS), borderwidth=0)\n frame1Button.pack(anchor=CENTER, expand=True)", "title": "" }, { "docid": "d28793e8dfb4efbd525d4ce7be12da47", "score": "0.43806437", "text": "def endPath(self):\n points = self.currentPath\n self.currentPath = None\n assert points is not None\n assert len(points) >= 1\n self._stroke_to_contour(points)", "title": "" }, { "docid": "d8c8b8eaf3528bcf3419e8481c5ea34b", "score": "0.43731463", "text": "def seizure(self):\n tk = Track()\n tk.add(0.0, self.head_mot.pantilt(0.8, 0.6, 0.5))\n tk.add(0.01, self.head_mot.moveeyes(0.0, 0.5))\n tk.add(0.5, self.head_mot.pantilt(-0.8, -0.6, 0.5))\n tk.add(0.51, self.head_mot.moveeyes(0.4, 0.5))\n tk.add(3.0, self.lights_mot.white_glow(255, 0.1, float('inf')))\n return tk", "title": "" }, { "docid": "0b213cc430652b37c4b7481cebd9209c", "score": "0.4371868", "text": "def makeklegend(fig, k):\n if not fig:\n fig = P.gcf()\n P.figure(fig.number)\n l = P.legend([r\"$k=\" + helpers.eto10(ks) + \"$\" for ks in k])\n P.draw()\n return l", "title": "" }, { "docid": "25887a7ac4c68b928c7db94496f0213b", "score": "0.43671235", "text": "def drawSurface(self):", "title": "" }, { "docid": "ea31277433821fd9012ea42a736396f5", "score": "0.43653837", "text": "def press_exit2(self, event):\n self.button3.config(image=self.exit)", "title": "" }, { "docid": "1bae8e9a37851d7fb729a3aaa73f8bb3", "score": "0.43647245", "text": "def __draw__(self) -> T:\n card = self.deck.popleft()\n self.drawn.update([card])\n return card", "title": "" }, { "docid": "f8df3e58eb49477a1c10d89a8d089005", "score": "0.43633217", "text": "def path_end_matter(self, last, chan_num):\n return ''", "title": "" }, { "docid": "3c47079140f7bc92c868aba140330594", "score": "0.43624043", "text": "def makeSymbol(val):\n return Value(Values.SYMBOL, val)", "title": "" }, { "docid": "64946b31549de172f24acf21b37f794d", "score": "0.43599257", "text": "def get_curve_shape_name(name=\"\"):\n return '{}Shape'.format(name)", "title": "" }, { "docid": "1c2584df02e5f091e7e4247883ec19a5", "score": "0.4347733", "text": "def Finalize(*args, **kwargs):\n return _propgrid.PGMultiButton_Finalize(*args, **kwargs)", "title": "" }, { "docid": "256bb52697f67881b6d20e260e81bb23", "score": "0.43244714", "text": "def create_exit_button_style(self):\n if self.is_light_theme:\n disabled_bg = Colors.update_hsv(self.colors.inputbg, vd=-0.2)\n else:\n disabled_bg = Colors.update_hsv(self.colors.inputbg, vd=-0.3)\n\n pressed_vd = -0.2\n self.settings.update(\n {\n \"exit.TButton\": {\n \"configure\": {\"relief\": tk.FLAT, \"font\": \"helvetica 12\"},\n \"map\": {\n \"background\": [\n (\"disabled\", disabled_bg),\n (\n \"pressed !disabled\",\n Colors.update_hsv(\n self.colors.primary, vd=pressed_vd\n ),\n ),\n (\"hover !disabled\", self.colors.danger),\n ]\n },\n }\n }\n )\n\n for color in self.colors:\n self.settings.update(\n {\n f\"exit.{color}.TButton\": {\n \"configure\": {\n \"relief\": tk.FLAT,\n \"font\": \"helvetica 12\",\n },\n \"map\": {\n \"background\": [\n (\"disabled\", disabled_bg),\n (\n \"pressed !disabled\",\n Colors.update_hsv(\n self.colors.get(color), vd=pressed_vd\n ),\n ),\n (\"hover !disabled\", self.colors.danger),\n ]\n },\n }\n }\n )", "title": "" }, { "docid": "c3c9e8b43bfe31a04c7f74ab94a8d35f", "score": "0.43201593", "text": "def get_symbol(self):\n # Create symbol object and record the previous line and position.\n symbol = Symbol()\n symbol.prev_line = self.location()[0]\n symbol.prev_position = self.location()[1]\n\n self.skip_spaces() # current character now not whitespace\n self.skip_comment() # current character now not comment\n\n if self.current_character.isalpha(): # name\n name_string = self.get_name()\n if name_string in self.keywords_list:\n symbol.type = self.KEYWORD\n elif name_string in self.logic_type_list:\n symbol.type = self.LOGIC_TYPE\n elif name_string in self.output_pin_list:\n symbol.type = self.OUT_PIN\n elif name_string in self.input_pin_list:\n symbol.type = self.IN_PIN\n else:\n symbol.type = self.NAME\n [symbol.id] = self.names.lookup([name_string])\n\n elif self.current_character.isdigit(): # number\n [symbol.id] = self.names.lookup([self.get_number()])\n symbol.type = self.NUMBER\n\n elif self.current_character == \"=\": # equals\n symbol.type = self.EQUALS\n self.advance()\n\n elif self.current_character == \",\": # comma\n symbol.type = self.COMMA\n self.advance()\n\n elif self.current_character == \";\": # semicolon\n symbol.type = self.SEMICOLON\n self.advance()\n\n elif self.current_character == \".\": # period\n symbol.type = self.PERIOD\n self.advance()\n\n elif self.current_character == \"\": # end of file\n symbol.type = self.EOF\n self.advance()\n\n elif self.current_character == \":\": # colon\n symbol.type = self.COLON\n self.advance()\n elif self.current_character == \"{\": # left curly\n symbol.type = self.LEFT_CURLY\n self.advance()\n\n elif self.current_character == \"}\": # right curly\n symbol.type = self.RIGHT_CURLY\n self.advance()\n\n else: # not a valid character\n self.advance()\n\n # Record the current line and position in the symbol.\n symbol.line = self.location()[0]\n symbol.position = self.location()[1]\n\n return symbol", "title": "" }, { "docid": "3c856e920273f4d3298e5a8a633811c1", "score": "0.43137434", "text": "def backward_char_extend_selection(self, e): #\n self.l_buffer.backward_char_extend_selection(self.argument_reset)\n self.finalize()", "title": "" }, { "docid": "0ea89846cf8c4f0fde69890f244a1eec", "score": "0.43024677", "text": "def run_last(self):\n if not self.ctx.workchain_base.is_finished_ok:\n return self.exit_codes.ERROR_MAIN_WC\n\n if self.ctx.need_fin_step:\n seekpath_parameters = self.inputs.seekpath_dict.get_dict()\n out_structure = self.ctx.workchain_base.outputs.output_structure\n result = get_explicit_kpoints_path(out_structure, **seekpath_parameters)\n new_calc = self.ctx.workchain_base.get_builder_restart()\n new_calc.structure = result['primitive_structure']\n new_calc.bandskpoints = result['explicit_kpoints']\n new_param = drop_md_keys(new_calc.parameters.get_dict())\n new_calc.parameters = orm.Dict(dict=new_param)\n running = self.submit(new_calc)\n self.report(f'Launched SiestaBaseWorkChain<{running.pk}> to calculate bands.')\n return ToContext(final_run=running)", "title": "" }, { "docid": "81ba031a6211909ac1cad3aca0b6e0c8", "score": "0.43021768", "text": "def lastChar(cursor,n=1):\n\t\tif cursor.atBlockStart():\n\t\t\treturn '\\n'\n\t\telse :\n\t\t\tcur_tmp=QtGui.QTextCursor(cursor)\n\t\t\tcur_tmp.clearSelection()\n\t\t\tfor i in range(n-1):\n\t\t\t\tcur_tmp.movePosition(QtGui.QTextCursor.Left,\n\t\t\t\t\t\t\t\t\t\t\t\tQtGui.QTextCursor.MoveAnchor)\n\t\t\t\tif cur_tmp.atBlockStart():\n\t\t\t\t\treturn '\\n'\n\t\t\tcur_tmp.movePosition (QtGui.QTextCursor.Left,\n\t\t\t\t\t\t\t\t\t\t\t\tQtGui.QTextCursor.KeepAnchor)\n\t\t\ttext = cur_tmp.selectedText ()\n\t\t\tfor k,v in TEDictCharReplace.items():\n\t\t\t\ttext = text.replace(v,k)\n\t\t\treturn text", "title": "" }, { "docid": "7efedee20f29df7cd0bd808989b8d84a", "score": "0.43017977", "text": "def chord_symbol_root(self, figure):\n pass", "title": "" }, { "docid": "876c84d768fdf0a1b39da50dbebda4d7", "score": "0.42994273", "text": "def _return_surface(grid, surface):\n return surface", "title": "" }, { "docid": "457fc77befd47369affa403ea217d9f1", "score": "0.429887", "text": "def lightning_finish(self):\n self.lightning_num -= 1\n self._grid_view.off('select', self.remove)\n self._grid_view.on('select', self.activate)\n self.lightning_button.configure(text=\"Lightning (\"+ str(self.lightning_num) +\")\",\n fg =\"black\")", "title": "" }, { "docid": "89a85d6337506ec454ee597daf00e3ab", "score": "0.42980072", "text": "def gl_end(self):\n gl_drawable = gtk.gtkgl.DrawingArea.get_gl_drawable(self)\n\tif gl_drawable.is_double_buffered():\n gl_drawable.swap_buffers()\n glFlush() \n gl_drawable.gl_end()", "title": "" }, { "docid": "f236c00326f43609034f44f6d53992c2", "score": "0.42973763", "text": "def _draw_back_button(self)->None:\n self.back_btn_rect.left = 20\n self.back_btn_hover = self._hovered(self.x, self.y, self.back_btn_rect)\n\n if self.back_btn_hover:\n self.back_btn_rect.left = self.shift\n else:\n self.back_btn_rect.left = 20\n self.screen.blit(self.back_btn, self.back_btn_rect)", "title": "" }, { "docid": "127879449c71b3148de06e6b4a78f00b", "score": "0.42965722", "text": "def wait():\r\n MessageDialog.showinfo('', message='Click to draw new shape')\r\n win.canvas.delete('all')", "title": "" }, { "docid": "e676939553dd7629f8af5889dcfa6041", "score": "0.42803866", "text": "def display_curr_shape(self):\n if not self.curr_shape: return\n self.shapeDisplay.reset_curr_shape()\n shape = deepcopy(self.shapes[self.curr_shape])\n self.shapeDisplay.place_shape(\n shape,\n self.shapeDisplay.rows//2-1,\n self.shapeDisplay.cols//2)", "title": "" }, { "docid": "63fbc38de89bf91d1f3fb501efe4f8bc", "score": "0.42793566", "text": "def finish(self):\n self.stop()\n self.R.stop_mode = 'coast' # free motor\n self.L.stop_mode = 'coast' # free motor\n self.C.stop_mode = 'coast'\n self.led.left.color = LED.COLOR.AMBER\n self.led.left.on()\n self.beep()\n self.led.left.color = LED.COLOR.GREEN\n self.led.left.on()\n sys.exit(0)", "title": "" }, { "docid": "b5c4362061d4158395598c928154dfd4", "score": "0.4279079", "text": "def press_exit1(self, event):\n self.button3.config(image=self.exit_press)", "title": "" }, { "docid": "8a6c7301388e64eb2b834226dfa51eb9", "score": "0.42744955", "text": "def rendSymbol(self, mcanv, va):\n sym = mcanv.syms.getSymByAddr(va)\n if sym != None:\n mcanv.addVaText(\"%s:\\n\" % repr(sym), va)", "title": "" }, { "docid": "82502e1238e79cfbe64136faa512ab37", "score": "0.42710477", "text": "def draw(self, pen):", "title": "" }, { "docid": "594c39d5833cb932995b145838c7167f", "score": "0.42710364", "text": "def close_rings(self):\r\n # Closing the open rings.\r\n geom_close_rings(self._ptr)", "title": "" }, { "docid": "60efbad54a9ad207c9ba52792bfa128b", "score": "0.42624617", "text": "def canvasPressEvent(self, e):\n if e.button() == Qt.LeftButton:\n if self.status == 0:\n self.rb.reset(QgsWkbTypes.PolygonGeometry)\n self.status = 1\n self.rb.addPoint(self.toMapCoordinates(e.pos()))\n else:\n if self.rb.numberOfVertices() > 2:\n self.status = 0\n self.selectionDone.emit()\n geometry = self.rb.asGeometry()\n self.parent.draw_poly(geometry)\n else:\n self.reset()", "title": "" }, { "docid": "fe3a1a2c9690700c3a7496d10d77e281", "score": "0.42581996", "text": "def get_finish_token():\n return Token('empty', '')", "title": "" }, { "docid": "0ae813218c4084a18512bff9e4357d84", "score": "0.42575604", "text": "def generateFooter(self):\n self.ofile.write('endfeature\\n')", "title": "" }, { "docid": "d760542a897e3a5bf2ebf34ad9176f1b", "score": "0.42521128", "text": "def paint_close_button(self):\n #print 1\n #print dir(e)\n #e.accept()\n #paint_rect = e.rect()\n\n for i in range(self.count()):\n rect = self.tabRect(i)\n\n if not self.rect().contains(rect):\n continue # would be nice to optimise\n\n #print 'tab: ', rect\n x,y,r,t = rect.getCoords()\n #print rect.getCoords()\n #rqt = QtCore.QRect(x+(r/2),y,r+1,t+1)\n rqt = self.tab_close_button_rect(i)\n #QtCore.QRect.right()\n\n #QtCore.QRect.adjust()\n #print 'rt side:', rqt\n #print e.region()\n p = QtGui.QPainter()\n #p.setRenderHint(QtGui.QPainter.Antialiasing)\n p.begin(self)\n p.setBrush(self.brush)\n if i == self.over_button and self.mouse_over_rect:\n brush = QtGui.QBrush(QtCore.Qt.gray)\n p.setBrush(brush)\n p.setPen(None)\n p.setRenderHint(QtGui.QPainter.Antialiasing)\n #p.drawEllipse(rqt)\n\n p.setPen(self.pen)\n self.pen.setWidth(2)\n if i == self.over_button and self.mouse_over_rect:\n pen = QtGui.QPen(QtCore.Qt.white)\n pen.setWidth(2)\n p.setPen(pen)\n\n #p.save()\n\n #p.drawText(rect, QtCore.Qt.AlignCenter, 'tab')\n\n #x,y,r,t = rqt.x, rqt.y, rqt.right(), rqt.top()\n #bl = rqt.bottomLeft()\n #tr = rqt.topRight()\n #p.drawLine(bl, tr)\n\n a = 2\n\n rqt.adjust(a,a,-a,-a)\n p.drawLine(rqt.bottomLeft(), rqt.topRight())\n p.drawLine(rqt.topLeft(), rqt.bottomRight())\n #p.restore()\n # print rqt\n p.end()", "title": "" }, { "docid": "009606bcb96d136f94ac59f1e3375b03", "score": "0.42514744", "text": "def surface(self):\n\t\tif self._c < 0:\n\t\t\treturn \"concave\"\n\t\telif self._c > 0:\n\t\t\treturn \"convex\"\n\t\telif self._c == 0:\n\t\t\treturn \"plane\"", "title": "" }, { "docid": "99ca10f8aa12da8303d90d1b32d60982", "score": "0.42497492", "text": "def make_save_and_close_button(self):\n # this button is bound in tkGUI_main_window, note the lack of a callback\n # method here\n button = tk.Button(self.root, text='Save and close', fg='black', bg='DeepSkyBlue2',\n height=2, width=9, wraplength=60)\n button.grid(row=self.row_number, column=1, pady=10)\n return button", "title": "" }, { "docid": "0d8ae4831ee7812af4d477ff6a423717", "score": "0.4241819", "text": "def streek_fatt(self):\n self._context.stroke_preserve()", "title": "" }, { "docid": "71cdce533b0424b657b0b7b847a77169", "score": "0.42399463", "text": "def black_king_graphic(self):\n #Make defined black_king graphic\n pass", "title": "" }, { "docid": "2b7e73a8b3d1e8541e22563bf9ecc1b4", "score": "0.42385155", "text": "def draw_shape(self, ctx, shape, style, fill_or_stroke=True, id=None):\n #print \"type: \", type, \"symbol\"\n type, symbol, inline_style, points = shape[0], shape[1], shape[2], shape[3:]\n #print \"shape: \", type, symbol, inline_style\n \n self.styler.setup_style(ctx, type, style) # reikia nustatyti figuros stiliu iki to kai padarome save(), nes kitaip po restore() mes ji prarastume\n # context_listener.run() reikia kviesti po restore, tam kad vel dirbtume su normaliom koordinatem (o ne transformuotom su translate ar rotate)\n\n ctx.save()\n \n if \"translate\" in style:\n ctx.translate(*style.get(\"translate\"))\n if \"rotate\" in style: \n ctx.rotate(style.get(\"rotate\"))\n \n if type == fshape.POINT:\n self.draw_point2(ctx, points, style, fill_or_stroke, id)\n \n elif type == fshape.POLYLINE:\n self.draw_line(ctx, points, style, fill_or_stroke, id)\n \n elif type == fshape.CURVE:\n self.draw_curve2(ctx, points, style, fill_or_stroke, id)\n \n elif type == fshape.POLYGON:\n self.draw_polygon(ctx, points, style, fill_or_stroke, id)\n\n elif type == fshape.POLYGON_CURVE:\n self.draw_polygon_curve(ctx, points, style, fill_or_stroke, id)\n\n elif type == fshape.TEXT: # unformatted text\n self.draw_text(ctx, points, style, fill_or_stroke, id)\n \n else:\n print \"WTF@\"\n \n ctx.restore() # toku budu bus atstatyti defaultiniai settingai! nereiks tikrinti kokie fill_rule ar panasiai nustatyti simboliu paisymo metu\n \n \n\n #if context_listener and not composite:\n # context_listener.run(ctx, type, transform)\n \n #if fill_or_stroke:\n # self.styler.render_shape(ctx, type, style) \n #- sita reikia nakinti ir padaryti kad metodai patys rupintusi fill/stroke ir tikrintu context listeneri.\n # nes pvz linija su dviem krastai iskarto nupaiso dvi savo sonines linijas tai ten reikia ir kviesti context listeneri?\n\n \"\"\"\n if composite:\n bbox = self.css.get_bbox(Shape.decompress(shape))\n ctx.rectangle(bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1])\n ctx.set_source_rgb(255,0,0)\n ctx.set_line_width(1.0)\n ctx.set_dash((), 0)\n ctx.stroke()\n \"\"\"", "title": "" }, { "docid": "1300342c08823a9d2c6d4643ea62ebd0", "score": "0.4234302", "text": "def TTF_CloseFont(font):\n return _ctypes[\"TTF_CloseFont\"](font)", "title": "" }, { "docid": "2ccc99012e0c1175d791a4c1e19158af", "score": "0.42332587", "text": "def draw_symbols(im, rects, stocks):\n\n for rect, stock in zip(rects, stocks):\n image.draw_text(im, stock.symbol, rect)", "title": "" }, { "docid": "b5f96a806a1e3b2274ff0a1683e9f6bf", "score": "0.42320928", "text": "def draw(self, surf):\n if self.lifespan == 0:\n return\n surf.set_at(self.pos, self.colour)\n self.pos = next(self.path_factory)\n self.lifespan -= 1", "title": "" }, { "docid": "48ce1f7ecf247ce1667c884d3ba5a3c3", "score": "0.4228171", "text": "def render(self):\n egi.set_pen_color(name=self.color)\n egi.circle(self.pos, self.radius, True)", "title": "" }, { "docid": "3717c8d0c079c246767779207695df1b", "score": "0.42272443", "text": "def draw_curve(self):\r\n t = turtle.Turtle()\r\n t.speed(0)\r\n t.shape(\"blank\")\r\n t.right(self.sta)\r\n for n in self.seq:\r\n if n == \"0\":\r\n t.right(self.ang)\r\n elif n == \"1\":\r\n t.left(self.ang)\r\n t.forward(self.size)", "title": "" }, { "docid": "6111ccf1121e359acc06b42a9d9cd2f6", "score": "0.42270496", "text": "def _exit_graphics():\n sys.exit()", "title": "" } ]
ed0f05833051f5ab1172a28aaf5117fc
sets up the key word hook and the unhook state
[ { "docid": "3d3e830b7ae4c2e4fd11f08eeea8e31b", "score": "0.6124911", "text": "def enter(self):\n super().enter()\n self.hook_handler = keyboard.add_word_listener(self.context.option_data_ref.substitute_keyword,\n self.key_word_replace_callback,\n triggers=self.context.option_data_ref.trigger_key,\n match_suffix=self.context.option_data_ref.ignore_prefix,\n timeout=self.context.option_data_ref.timeout)\n\n self.context.context_menu_unhook_ref.triggered.disconnect()\n self.context.context_menu_unhook_ref.triggered.connect(self.quit)", "title": "" } ]
[ { "docid": "fa7e7f9d3a0baac266a7baff1c29423e", "score": "0.67543864", "text": "def quick_hook_event(self):\n self.quit()\n self.context.state = self.context.keyword_hook_state\n self.context.state.enter()", "title": "" }, { "docid": "5e2fb428e4e4e7e465e7c4c5042348fa", "score": "0.66187304", "text": "def reset(self):\n with self.lock:\n self.hooks = dict()", "title": "" }, { "docid": "b71df49d5f062f19e1d4303a53de4f46", "score": "0.6176436", "text": "def Hook(self):\n pass", "title": "" }, { "docid": "1a44c459a48a061631f4128f4db81594", "score": "0.614566", "text": "def key_up(self, key):\n if key == self.ctrls['boost']:\n self.boosting = False\n self.snake.speed_bonus = 0\n elif key == self.ctrls['action']:\n self.weapons[0].set_firing(False)", "title": "" }, { "docid": "528b7b9165e2ece8d7339da6d6fb338b", "score": "0.60911125", "text": "def _unregister_key_bindings():", "title": "" }, { "docid": "072d24cfa8493b5fe3c3902fd5d12e67", "score": "0.5994231", "text": "def toggle_inputhook_flag(self, state):\r\n self.inputhook_flag = state", "title": "" }, { "docid": "ae12b28f42070743b0554445af191dea", "score": "0.5872949", "text": "def post_hook(self, state):\n self.state = state\n if self._user_defined_post_hook is not None:\n self._user_defined_post_hook(self)", "title": "" }, { "docid": "4d1e535deb7fbfb3f2c3b9c84b3c5298", "score": "0.58664966", "text": "def resumeHook(self):\n # Empty ", "title": "" }, { "docid": "b93e8b96f352b241b5f6f8da3556913c", "score": "0.5864768", "text": "def unhook(self):\n raise NotImplementedError", "title": "" }, { "docid": "fc735dfd7001585ff3106fe0dbc9666d", "score": "0.57140607", "text": "def prepare_intercept(callback):\n _setup_name_tables()\n \n def process_key(event_type, vk, scan_code, is_extended):\n global shift_is_pressed, altgr_is_pressed, ignore_next_right_alt\n #print(event_type, vk, scan_code, is_extended)\n\n # Pressing alt-gr also generates an extra \"right alt\" event\n if vk == 0xA5 and ignore_next_right_alt:\n ignore_next_right_alt = False\n return True\n\n modifiers = (\n ('shift',) * shift_is_pressed +\n ('alt gr',) * altgr_is_pressed +\n ('num lock',) * (user32.GetKeyState(0x90) & 1) +\n ('caps lock',) * (user32.GetKeyState(0x14) & 1) +\n ('scroll lock',) * (user32.GetKeyState(0x91) & 1)\n )\n entry = (scan_code, vk, is_extended, modifiers)\n if entry not in to_name:\n to_name[entry] = list(get_event_names(*entry))\n\n names = to_name[entry]\n name = names[0] if names else None\n\n # TODO: inaccurate when holding multiple different shifts.\n if vk in shift_vks:\n shift_is_pressed = event_type == KEY_DOWN\n if scan_code == 541 and vk == 162:\n ignore_next_right_alt = True\n altgr_is_pressed = event_type == KEY_DOWN\n\n is_keypad = (scan_code, vk, is_extended) in keypad_keys\n return callback(KeyboardEvent(event_type=event_type, scan_code=scan_code or -vk, name=name, is_keypad=is_keypad))\n\n def low_level_keyboard_handler(nCode, wParam, lParam):\n try:\n vk = lParam.contents.vk_code\n # Ignore the second `alt` DOWN observed in some cases.\n fake_alt = (LLKHF_INJECTED | 0x20)\n # Ignore events generated by SendInput with Unicode.\n if vk != VK_PACKET and lParam.contents.flags & fake_alt != fake_alt:\n event_type = KEY_UP if wParam & 0x01 else KEY_DOWN\n is_extended = lParam.contents.flags & 1\n scan_code = lParam.contents.scan_code\n should_continue = process_key(event_type, vk, scan_code, is_extended)\n if not should_continue:\n return -1\n except Exception as e:\n print('Error in keyboard hook:')\n traceback.print_exc()\n\n return CallNextHookEx(None, nCode, wParam, lParam)\n\n WH_KEYBOARD_LL = c_int(13)\n keyboard_callback = LowLevelKeyboardProc(low_level_keyboard_handler)\n handle = GetModuleHandleW(None)\n thread_id = DWORD(0)\n keyboard_hook = SetWindowsHookEx(WH_KEYBOARD_LL, keyboard_callback, handle, thread_id)\n\n # Register to remove the hook when the interpreter exits. Unfortunately a\n # try/finally block doesn't seem to work here.\n atexit.register(UnhookWindowsHookEx, keyboard_callback)", "title": "" }, { "docid": "30cc8b97388dc2558381b3692f0ae5de", "score": "0.5707791", "text": "def tearDown(self):\n super(HookTest, self).tearDown()\n\n config.set_app(self._old_app)\n config.set_session_user_setter(self._old_session_hook)\n config.set_db_select_hook(self._old_db_hook)", "title": "" }, { "docid": "922d43672fac223a564e06be7392861e", "score": "0.56973743", "text": "def doUnhook(self, handler):\n pass", "title": "" }, { "docid": "e4c6257dbfea2c6ffff53a91bd1748bd", "score": "0.5693018", "text": "def dispatch_hook(key, hooks, hook_data, **kwargs):\n ...", "title": "" }, { "docid": "b61ece174bc15567ac1a34ab5f92baeb", "score": "0.5605736", "text": "def startup_hook():\n pass", "title": "" }, { "docid": "228a820a801ca05570d27352dfe783d6", "score": "0.56044537", "text": "def process_keyboard_state(self):\n keys_to_append = []\n keys_to_remove = []\n\n for event in pygame.event.get():\n # test events, set key states\n if event.type == pygame.KEYDOWN:\n keys_to_append.append(event.key)\n elif event.type == pygame.KEYUP:\n keys_to_remove.append(event.key)\n elif event.type == pygame.QUIT:\n keys_to_append.append(27)\n\n for key in keys_to_append:\n self.pressed_keys.add(key)\n\n for key in keys_to_remove:\n if key in self.pressed_keys:\n self.pressed_keys.remove(key)", "title": "" }, { "docid": "e8a5f389c9274223ad76b2ff28d9eca3", "score": "0.55853033", "text": "def setup(requestbin, request, threescale):\n threescale.webhooks.setup(\"Keys\", requestbin.url)\n request.addfinalizer(threescale.webhooks.clear)", "title": "" }, { "docid": "d108cb4f285386eefcc60e21cf76e2fd", "score": "0.55702436", "text": "def callback(event):\r\n if keyboard.is_pressed(event.name):\r\n if event.name not in getExceptions() and len(event.name) > 1:\r\n # print event.name.decode(\"utf-8\").encode(\"windows-1252\").decode(\"utf-8\")\r\n return\r\n if event.name == \"backspace\":\r\n # print \"in backspace\"\r\n if len(cKeyboard.currInput) > 0:\r\n cKeyboard.currInput.pop()\r\n\r\n if bool(cKeyboard.dictionaries) and len(cKeyboard.dictionaries) > 0:\r\n\r\n cKeyboard.dictionaries.pop()\r\n cKeyboard.currentDict = cKeyboard.dictionaries[-1] if len(cKeyboard.dictionaries) > 0 else {}\r\n else:\r\n cKeyboard.currentDict = {}\r\n print cKeyboard.currInput\r\n return\r\n if event.name.isalpha() and keyboard.is_pressed('shift+' + event.name):\r\n cKeyboard.currInput.append(event.name.upper())\r\n else:\r\n cKeyboard.currInput.append(event.name)\r\n print cKeyboard.currInput\r\n # use ckcodes if currdict is empty\r\n this_dict = cKeyboard.codes if bool(cKeyboard.currentDict) is False else cKeyboard.currentDict\r\n # print this_dict\r\n # try:\r\n new_dict = updateDictionary(this_dict, cKeyboard.currInput)\r\n # except UnicodeDecodeError:\r\n\r\n print new_dict\r\n\r\n if bool(new_dict) and len(new_dict) == 1 and bool(cKeyboard.currentDict.get(\"\".join(cKeyboard.currInput))):\r\n print \"one item \"\r\n print new_dict\r\n cKeyboard.state = \"found_code\"\r\n\r\n elif not bool(new_dict):\r\n # Let's check whether the previous input key exists in the dictionary\r\n if bool(cKeyboard.currentDict.get(\"\".join(cKeyboard.currInput[:-1]))):\r\n print \"Yay I exist \" # + char\r\n cKeyboard.state = \"found_code_with_extra_char\"\r\n elif bool(updateDictionary(cKeyboard.codes, cKeyboard.currInput[-1])):\r\n print \"last character valid\"\r\n cKeyboard.state = \"last_valid\"\r\n else:\r\n print \"not found\"\r\n cKeyboard.currInput = []\r\n cKeyboard.dictionaries = []\r\n cKeyboard.currentDict = {}\r\n # it is not the case that if the length is on then the user typed all the character codes\r\n # an exception is ae+: if the user types ae, len(new_dict) is one as its the only code with ae\r\n\r\n else:\r\n print \"append: \"\r\n print new_dict\r\n cKeyboard.currentDict = new_dict\r\n cKeyboard.dictionaries.append(new_dict)\r\n\r\n if cKeyboard.state == \"found_code\" or cKeyboard.state == \"found_code_with_extra_char\" \\\r\n or cKeyboard.state == \"last_valid\":\r\n\r\n\r\n if cKeyboard.state == \"found_code\":\r\n string = cKeyboard.currentDict.get(\"\".join(cKeyboard.currInput))\r\n print \"found: \" + string\r\n writeString(string, len(cKeyboard.currInput))\r\n # keyboard.send(\"backspace\")\r\n cKeyboard.currInput = []\r\n cKeyboard.dictionaries = []\r\n cKeyboard.currentDict = {}\r\n elif cKeyboard.state == \"found_code_with_extra_char\":\r\n string = cKeyboard.currentDict.get(\"\".join(cKeyboard.currInput[:-1]))\r\n print \"found_code_with_extra_char\" + cKeyboard.currInput[-1]\r\n # keyboard.send(\"backspace\")\r\n writeString(string, len(cKeyboard.currInput))\r\n keyboard.send(cKeyboard.currInput[-1])\r\n else:\r\n last_char = cKeyboard.currInput[-1]\r\n cKeyboard.currInput = []\r\n cKeyboard.dictionaries = []\r\n cKeyboard.currentDict = {}\r\n writeString(last_char, 1)\r\n cKeyboard.state = \"nothing\"", "title": "" }, { "docid": "26e6c0be4bf35f3ebd59ffd603880e04", "score": "0.552001", "text": "def user32_SetKeyboardState(jitter):\n ret_ad, args = jitter.func_args_stdcall([\"lpKeyState\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "title": "" }, { "docid": "008a9a75fbfefcceff07cfa6e8fcebd5", "score": "0.54986674", "text": "def SetHook(*args):\n return _wingdi.MainWnd_SetHook(*args)", "title": "" }, { "docid": "4d27e3937444e47ab681d998f5839820", "score": "0.5488211", "text": "def unhandled_input(txt, key):\n if key in (\"`\"):\n if main_window_stack.current + 1 >= main_window_stack.widget_count:\n main_window_stack.current = 0\n else:\n # don't use the fake setter, it's doing some weird modulo stuff\n # maybe after reviewing the module code more...\n main_window_stack.current += 1\n\n if key in (\"tab\"):\n # rudimentary focus bouncer for now\n # ideally focus bounce will toggle buffers in the future\n if mainframe.focus_position == 2:\n mainframe.focus_position = 0\n else:\n mainframe.focus_position = 2\n return\n\n if key in (\"enter\"):\n\n game_state.history_scroll_mode = False # toggle history scroll mode off\n\n if len(txt.edit_text) == 0:\n \"\"\" ignore an empty command\n \"\"\"\n return\n\n submitted_command = txt.edit_text\n\n # used to have a command splitter here, decided not to use it\n game_state.input_history.append(submitted_command)\n game_state.command_queue.put(submitted_command.encode(\"utf-8\"))\n\n txt.set_edit_text(\"\")\n txt.set_edit_pos(0)\n\n return\n\n if key in (\"up\", \"down\"):\n\n # deal with the 0 history case here\n if len(game_state.input_history) == 0:\n return\n\n # enter history scroll mode until the user presses enter\n if game_state.history_scroll_mode == False:\n game_state.history_scroll_mode = True\n game_state.input_history_counter = len(game_state.input_history) - 1\n\n # don't do this if you just set it to true! (elif)\n elif game_state.history_scroll_mode == True:\n\n if key in (\"up\"):\n if game_state.input_history_counter > 0:\n game_state.input_history_counter -= 1\n\n if key in (\"down\"):\n if (\n game_state.input_history_counter\n < len(game_state.input_history) - 1\n ):\n game_state.input_history_counter += 1\n\n input_box.set_edit_text(\n game_state.input_history[game_state.input_history_counter]\n )\n input_box.set_edit_pos(len(txt.edit_text))\n return\n\n if key in (\"left\"):\n input_box.set_edit_text(\"\")\n input_box.set_edit_pos(len(txt.edit_text))\n return\n\n if key in (\"right\"):\n \"\"\"\n interestingly, because of urwid-readline, i can use right and left arrows\n but only when there is already text on the line, and not on the far edges\n so on the far left, a left key will trigger this\n on the far right, a right key will trigger unknown key: right\n \"\"\"\n # need the mutex because this uses a function of the underlying deque\n # see: https://stackoverflow.com/a/6518011\n with game_state.rt_command_queue.mutex:\n game_state.rt_command_queue.queue.clear()\n return\n\n # not working\n if key in (\"ctrl q\", \"ctrl Q\"):\n # raise urwid.ExitMainLoop()\n # quit()\n pass\n\n # input_box.set_edit_text(\"unknown key: \" + repr(key))\n # input_box.set_edit_pos(len(txt.edit_text))\n return", "title": "" }, { "docid": "d7d58b60d5adae056f4ccb6e997a275b", "score": "0.5481946", "text": "def breakHook(self):\n # Empty ", "title": "" }, { "docid": "ee3e139c482eedc8f40a2fc2659f6129", "score": "0.548072", "text": "def hook(self):\n raise NotImplementedError", "title": "" }, { "docid": "15aecf20c4257e0d355d7453822856fe", "score": "0.5464912", "text": "def changekey():", "title": "" }, { "docid": "97e72ecf9ba145721a6cb2dc338a47d2", "score": "0.545077", "text": "def hookwrapper(self,f):", "title": "" }, { "docid": "7ad4972b823be9509eb13f75239afdd3", "score": "0.5449053", "text": "def enable():\n from . import hook", "title": "" }, { "docid": "a5ed775541b9a4a5f1fcb00d9bdafba0", "score": "0.543276", "text": "def on_key_press(self, state, symbol, modifiers):\n pass", "title": "" }, { "docid": "bf926e8d1f0443b26cdf5a3cadf15769", "score": "0.5425722", "text": "def get_shell_unhook(self):\n pass", "title": "" }, { "docid": "a5c04c05ebcb04364d5bd78d2ee41bd4", "score": "0.54237795", "text": "def _hotkeys(self):\n if self.key != keys.NONE:\n self._wasd()\n self._move_idx()\n self._zoom()\n self._update_canvas()", "title": "" }, { "docid": "1a157ad3dac5cff535b4a734833e71d9", "score": "0.5413187", "text": "def afk(self):\n for key in self.pressed:\n self.event(key, down=False)\n self.pressed = set()", "title": "" }, { "docid": "4ccaa81a226424e4168baa15c26a4c80", "score": "0.54050964", "text": "def events(self):\n self.keys=self.context.press()", "title": "" }, { "docid": "4ccaa81a226424e4168baa15c26a4c80", "score": "0.54050964", "text": "def events(self):\n self.keys=self.context.press()", "title": "" }, { "docid": "c17cac2d67910409d070842266afd1df", "score": "0.5384919", "text": "def undo():\n pass", "title": "" }, { "docid": "c17cac2d67910409d070842266afd1df", "score": "0.5384919", "text": "def undo():\n pass", "title": "" }, { "docid": "916743cc2f6f9090564db220f120f4cc", "score": "0.5374963", "text": "def keyboard_up(self, *args):\n self._key_pressed = None", "title": "" }, { "docid": "1ce11c7ed3286f20ecb0647d0d89fb52", "score": "0.537423", "text": "def handleInput(self):\n c = self.screen.getch()\n if c in self.ignored_keys:\n return\n if c == 27:\n # ----------------------------------------\n # Escape: Terminate\n # ----------------------------------------\n self.running = False\n elif c == 260:\n # ----------------------------------------\n # Left arrow: Change mode left\n # ----------------------------------------\n self.modeIndex -= 1\n if self.modeIndex < 0:\n self.modeIndex = 3\n if isinstance(self.mode, DeleteState):\n self.mode.revertDeleteState()\n elif c == 261:\n # ----------------------------------------\n # Right arrow: Change mode Right\n # ----------------------------------------\n self.modeIndex += 1\n if self.modeIndex > 3:\n self.modeIndex = 0\n if isinstance(self.mode, DeleteState):\n self.mode.revertDeleteState()\n elif c in (127, curses.KEY_DC, curses.KEY_BACKSPACE):\n # ----------------------------------------\n # Backspace/Delete Char: pop old content from stack\n # ----------------------------------------\n self.searchString = self.searchString[:-1]\n if isinstance(self.mode, DeleteState):\n self.mode.revertDeleteState()\n elif c == 10:\n # ----------------------------------------\n # Enter/Return: <action> password\n # ----------------------------------------\n # Executes action and uses its return value to detect if npass\n # Should continue running\n self.running = self.mode.executeAction(\n pwid=self.filteredPasswordList[self.cursorIndex])\n if self.mode.requires_list_update:\n self.passwordList = getPasswordList()\n self.filteredPasswordList = FuzzyFilter(\n self.passwordList,\n self.searchString\n )\n oldfrec = self.frecency.get(self.filteredPasswordList[self.cursorIndex], [0, None])\n self.frecency[self.filteredPasswordList[self.cursorIndex]] = [oldfrec[0] - 1, date.today().isoformat()]\n\n elif c == 259 or c == curses.KEY_PPAGE:\n # ----------------------------------------\n # Up Arrow/PGUP: Go up in the menu\n # ----------------------------------------\n if self.cursorIndex == 0:\n self.cursorIndex = len(self.filteredPasswordList) - 1\n else:\n self.cursorIndex -= 1\n if isinstance(self.mode, DeleteState):\n self.mode.revertDeleteState()\n elif c == 258 or c == curses.KEY_NPAGE:\n # ----------------------------------------\n # Down Arrow: Go Down in the menu\n # ----------------------------------------\n if self.cursorIndex == len(self.filteredPasswordList) - 1:\n self.cursorIndex = 0\n else:\n self.cursorIndex += 1\n if isinstance(self.mode, DeleteState):\n self.mode.revertDeleteState()\n else:\n # ----------------------------------------\n # Letters/Numbers: perform search\n # ----------------------------------------\n self.searchString += chr(c)\n self.cursorIndex = 0\n if isinstance(self.mode, DeleteState):\n self.mode.revertDeleteState()", "title": "" }, { "docid": "27ee3f9540dbe0f9afa7b439b2907c03", "score": "0.53357184", "text": "def set_prewrite_hook(self, hook):\n self.prewrite_hook = hook", "title": "" }, { "docid": "8010f0054d88941c3292b235cce42a3b", "score": "0.5330612", "text": "def on_key_release(self, state, symbol, modifiers):\n pass", "title": "" }, { "docid": "483987129f9b943937b75467c3795614", "score": "0.53188777", "text": "def __post_init__(self):\r\n self.states = {\r\n State.MENU.value: self.process_menu,\r\n State.NEW_GAME.value: self.process_new_game,\r\n State.RESET.value: self.reset_game,\r\n State.PLAYING.value: self.process_playing,\r\n State.PLAYER_WON.value: self.process_win,\r\n State.PLAYER_LOST.value: self.process_loss,\r\n State.PLAYER_QUIT.value: self.process_quit,\r\n }", "title": "" }, { "docid": "3150f1e3fcd6b36c200e24a4489189ad", "score": "0.5314082", "text": "def bf2_init(self, hooker):\r\n pass", "title": "" }, { "docid": "c68393f4a47a6c0ca1159581b3a8911c", "score": "0.5287991", "text": "def bf2_deinit(self, hooker):\r\n pass", "title": "" }, { "docid": "949862f66ec87e9bac2b7132fcb66833", "score": "0.52871823", "text": "def _state_dict_hook(module, state_dict, prefix, local_metadata):\r\n if module.mode_8bit and module.mode != QuantizationMode.NONE:\r\n state_dict.pop(prefix + \"weight\", None)\r\n state_dict.pop(prefix + \"_step\", None)\r\n state_dict[prefix + \"quantized_weight\"] = state_dict[prefix + \"quantized_weight\"].char()\r\n else:\r\n state_dict.pop(prefix + \"quantized_weight\", None)\r\n state_dict.pop(prefix + \"_weight_scale\", None)\r\n # state_dict.pop(prefix + \"weight\", None)\r\n # state_dict.pop(prefix + \"_step\", None)\r\n # state_dict[prefix + \"quantized_weight\"] = state_dict[prefix + \"quantized_weight\"].char()\r", "title": "" }, { "docid": "a640e53abf8234856a30dfa9b30dc753", "score": "0.5286233", "text": "def overrideModifier(press=\"string\", clear=bool, release=\"string\"):\n pass", "title": "" }, { "docid": "019b56b7e7a7ce6cdd7d2a0c048fae58", "score": "0.5257038", "text": "def set_runtime_hooks(self):\n pass", "title": "" }, { "docid": "775424c423892b1d3dc8c59219c9d552", "score": "0.5256481", "text": "def reverse_run_hook(self, hook_type, args = [], kw = {}):\n return self._run_hook(hook_type, args, kw, reversed)", "title": "" }, { "docid": "b42b1ae0beee77d1fef1b759b4fb9a82", "score": "0.52233213", "text": "def make_rewind_key_up(self):\n if REWIND_KEY in self.pressed_keys:\n self.pressed_keys.remove(REWIND_KEY)", "title": "" }, { "docid": "12711af7d350704dad54ea883129b345", "score": "0.52133185", "text": "def on_key_press(self, event, sym, mod):\n pass", "title": "" }, { "docid": "f4e533e5ddc2f6d55593756a35ae8fa2", "score": "0.51988506", "text": "def setUp(self):\n # Ignore Pylint errors for catching any exception type, for this little\n # hacky section\n # pylint: disable=W0702\n try:\n self._old_app = config.get_app()\n except:\n self._old_app = None\n config.set_app(app)\n\n super(HookTest, self).setUp()\n\n try:\n self._old_session_hook = config.get_session_user_setter()\n except:\n self._old_session_hook = None\n\n try:\n self._old_db_hook = config.get_db_select_hook()\n except:\n self._old_db_hook = None\n # pylint: enable=W0702", "title": "" }, { "docid": "1d7688623eb772b92669fab306593cf6", "score": "0.5194025", "text": "def listener():\n \n def low_level_handler(nCode, wParam, lParam):\n \"\"\"\n Processes a low level Windows mouse event.\n \"\"\"\n event = KeyEvents(mouse_codes[wParam], lParam[0], lParam[1], lParam[2] == 32, lParam[3])\n\n if mouse_codes.get(wParam):\n returnval = None\n for handle in mouseHandlers:\n # return value from last handler will be used, obviously.\n returnval = handle(event)\n\n if returnval == -1: return -1\n if returnval == \"pass_event\":\n return windll.user32.CallNextHookEx(hook_id, nCode, wParam, lParam)\n\n #Be nice, return next hook\n return windll.user32.CallNextHookEx(hook_id, nCode, wParam, lParam)\n \n # Our low level handler signature.\n CMPFUNC = CFUNCTYPE(c_int, c_int, c_int, POINTER(c_void_p))\n # Convert the Python handler into C pointer.\n pointer = CMPFUNC(low_level_handler)\n #Added 4-18-15 for move to ctypes:\n windll.kernel32.GetModuleHandleW.restype = wintypes.HMODULE\n windll.kernel32.GetModuleHandleW.argtypes = [wintypes.LPCWSTR]\n # Hook both key up and key down events for common keys (non-system).\n windll.user32.SetWindowsHookExA.argtypes = (c_int, wintypes.HANDLE, wintypes.HMODULE, wintypes.DWORD)\n hook_id = windll.user32.SetWindowsHookExA(win32con.WH_MOUSE_LL, pointer, windll.kernel32.GetModuleHandleW(None), 0)\n global HOOK_ID\n HOOK_ID = hook_id\n\n # Register to remove the hook when the interpreter exits.\n atexit.register(windll.user32.UnhookWindowsHookEx, hook_id)\n try:\n msg = windll.user32.GetMessageW(None, 0, 0,0)\n windll.user32.TranslateMessage(byref(msg))\n windll.user32.DispatchMessageW(byref(msg))\n except:\n # print(\"Exception raised in mouse hook thread (maybe WM_QUIT)\")\n pass", "title": "" }, { "docid": "56cb6514b6ebf1d1d26c01ecdee538f5", "score": "0.51927394", "text": "def set_up_handlers(self):\n #called in __init__\n self.handlers = {\n curses.ascii.NL: self.h_exit_down,\n curses.ascii.CR: self.h_exit_down,\n curses.ascii.TAB: self.h_exit_down,\n curses.KEY_BTAB: self.h_exit_up,\n curses.KEY_DOWN: self.h_exit_down,\n curses.KEY_UP: self.h_exit_up,\n curses.KEY_LEFT: self.h_exit_left,\n curses.KEY_RIGHT: self.h_exit_right,\n \"^P\": self.h_exit_up,\n \"^N\": self.h_exit_down,\n curses.ascii.ESC: self.h_exit_escape,\n curses.KEY_MOUSE: self.h_exit_mouse,\n }\n self.complex_handlers = []", "title": "" }, { "docid": "f6037c0f28d37180cff6170e4eb85518", "score": "0.51916164", "text": "def rebind(self, opt):\n if not opt == \"key bindings\":\n self.u = self.keybind[opt][0]\n self.l = self.keybind[opt][1]\n self.d = self.keybind[opt][2]\n self.r = self.keybind[opt][3]\n self.title[\"text\"] = \"Snake - Use \" + opt + \" to control\"", "title": "" }, { "docid": "6af6df0d4999217eb6dc02269415e238", "score": "0.51911646", "text": "def __init__(self):\n\n #variables to store the states of the controll keys\n #values are true while the key is pressed and false when the key is not\n self.ping_keys = {'front':False,'back':False,'left':False,'right':False}\n self.move_keys = {'north':False,'south':False,'west':False,'east':False}\n self.interact_key = False\n self.hs_keys = {'space':False,'I':False,'C':False,'H':False}", "title": "" }, { "docid": "a5f3bdda7eeeaddbc1b0122ca2e03674", "score": "0.5179715", "text": "def setup(self, word: str):\n self.seen = set()\n \"\"\"keep track of what's been seen.\"\"\"\n self.mixer = LetterMixer()\n \"\"\"Main mixer class.\"\"\"\n self.alphabet_dict = self.mixer.alphabet_dict\n \"\"\"main alphabet dict.\"\"\"\n self.object_dict = self.mixer.object_dict\n \"\"\"letter object dictionary map.\"\"\"\n # Unset scroll values, we're going to reuse them\n try:\n del self.nav.values[curses.KEY_UP]\n del self.nav.values[curses.KEY_DOWN]\n except KeyError:\n pass\n\n self.set_word(word)\n # order is important here\n super().setup()\n\n self.let_idx = self._get_lpos()\n \"\"\"letter index.\"\"\"\n self.nav.add_item(\n NavItem(\n \"n\",\n \"Random Next\",\n ).set_func(self.new)\n )\n self.nav.add_item(\n NavItem(\n \"p\",\n \"Previous\",\n ).set_func(self.prev)\n )\n self.nav.add_item(\n NavItem(\n \"l\",\n \"shift right\",\n )\n .set_func(self.shift_left)\n .add_alias(curses.KEY_RIGHT)\n )\n self.nav.add_item(\n NavItem(\n \"h\",\n \"shift left\",\n )\n .set_func(self.shift_right)\n .add_alias(curses.KEY_LEFT)\n )\n self.nav.add_item(\n NavItem(\n \"k\",\n \"previous letter\",\n )\n .set_func(self.next_letter)\n .add_alias(curses.KEY_DOWN)\n )\n self.nav.add_item(\n NavItem(\n \"j\",\n \"next letter\",\n )\n .set_func(self.previous_letter)\n .add_alias(curses.KEY_UP)\n )\n self.nav.add_item(\n NavItem(\n \"c\",\n \"change text\",\n ).set_func(self.change_text)\n )\n self.is_mouse = False", "title": "" }, { "docid": "4355ef0b9ff03d910688c01596ec3447", "score": "0.51753706", "text": "def register_hooks(self):\r\n for module, name in self.nonpass_names.items():\r\n module.register_forward_hook(self.save_input_forward_hook)\r\n module.register_backward_hook(self.compute_fisher_backward_hook)", "title": "" }, { "docid": "f0cd2dc6b5c07d14b2cc8d0af70579c8", "score": "0.5175304", "text": "def key_interpretator(event):\n global direction, Pause, events\n events.append(event)", "title": "" }, { "docid": "1040d93c9ebce5bcc301d68883030fef", "score": "0.5171283", "text": "def flushUndo():\n pass", "title": "" }, { "docid": "4e36cf40a55b91d2f2c75b33329ed621", "score": "0.5141841", "text": "def before_after_hook(self, brok, obj):\n pass", "title": "" }, { "docid": "c8b4b0da30ffa117f5defde136469bf5", "score": "0.51185596", "text": "def handleKeyboard(self,key_event,map_obj):\r\n pass", "title": "" }, { "docid": "bc6f410cd3ff4980a80697c9bd3f1424", "score": "0.5116986", "text": "def on_key_release(self, symbol, modifiers):\n \"\"\" more if elif statements ..... ha \"\"\"\n if symbol == key.W:\n self.strafe[0] += 1\n elif symbol == key.S:\n self.strafe[0] -= 1\n elif symbol == key.A:\n self.strafe[1] += 1\n elif symbol == key.D:\n self.strafe[1] -= 1", "title": "" }, { "docid": "796470db595453469343bb7f5854355e", "score": "0.5110271", "text": "def clearHooks(self):\n for eachHook in self.info['hooks']:\n eachHook.setValid(False)\n del eachHook", "title": "" }, { "docid": "9bbb7a70b84875b94f1c2fb8d9f7ee05", "score": "0.510818", "text": "def init(self):\n self.connect_to_switches()\n self.reset_states()", "title": "" }, { "docid": "3b3adbd9ea4032f85b85761344eab8e1", "score": "0.5104156", "text": "def add_hook(self, hook):\n h = hook.hash\n self.hooks[h] = hook", "title": "" }, { "docid": "0fb8cea4c1663ecbac925552e7f2f55a", "score": "0.509362", "text": "def controllerHook(self):\n # Empty ", "title": "" }, { "docid": "39c9764a6d8223e1d0d00f497cf5fca7", "score": "0.5086405", "text": "def on_exit(self):\n cocos.director.director.window.remove_handlers(self)\n super(KeyboardHandler, self).on_exit()", "title": "" }, { "docid": "827329e1d055f35e746854964b9d273e", "score": "0.5067307", "text": "def _state_dict_hook(module, state_dict, prefix, local_metadata):\r\n super()._state_dict_hook(module, state_dict, prefix, local_metadata)\r\n if module.mode_8bit:\r\n if module.mode == QuantizationMode.EMA:\r\n state_dict.pop(prefix + \"bias\", None)\r\n try:\r\n state_dict[prefix + \"_quantized_bias\"] = state_dict[\r\n prefix + \"_quantized_bias\"\r\n ].int()\r\n except KeyError:\r\n # in case there is no bias dont do anything\r\n pass\r\n else:\r\n state_dict.pop(prefix + \"_quantized_bias\", None)\r\n state_dict.pop(prefix + \"bias_scale\", None)", "title": "" }, { "docid": "d986df6b8886e180cb50b89ed7bbe147", "score": "0.50645196", "text": "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n if key == glfw.KEY_SPACE:\n glfw.set_time(0)\n if key == glfw.KEY_S:\n self.screenshot(\"screenshot.png\")", "title": "" }, { "docid": "286fd564381e3b69991b7b154c4de40c", "score": "0.50511944", "text": "def hook_Pass(state):\n DeepManticore(state).api_pass()", "title": "" }, { "docid": "0591befea12bee95a99247140f0d8dc2", "score": "0.5051104", "text": "def fold_off(self,widget,e=None):\n # e.hardware_keycode contains the current key, and self.keycode contains the previous key (modifier probably)\n if e.hardware_keycode==53 and self.keycode==64: # X + Alt\n self.__unfold_all()\n self.keycode=53\n if e.hardware_keycode==19 and self.keycode==64: # 0 + Alt (but real shortcut is Alt+Q now)\n self.__fold_all()\n self.keycode=19\n else:\n self.keycode=e.hardware_keycode", "title": "" }, { "docid": "95cb9a9f17e17f1c09e82dae1c0db199", "score": "0.5047289", "text": "def refresh_dict(self) -> None:\n self.spell = PySpellChecker(language=self.language, distance=1)", "title": "" }, { "docid": "66b0d11d9ba6f3d3ae2daa106b1d3b44", "score": "0.5045731", "text": "def key_down(self, key):\n if key == self.ctrls['boost']:\n self.boosting = True\n self.snake.speed_bonus = BOOST_SPEED\n elif key == self.ctrls['action']:\n # Has the potential to cause an endless loop.\n while self.weapons[0].ammo <= 0:\n self.weapons.rotate(1)\n self.weapons[0].set_firing(True)", "title": "" }, { "docid": "dff28e1757ca0ade5c3afb78e2f532b5", "score": "0.50375974", "text": "def set_postwrite_hook(self, hook):\n self.postwrite_hook = hook", "title": "" }, { "docid": "8af620540e72682ac81672c1e2472691", "score": "0.50367314", "text": "async def modify_hook() -> bool:\n return not self._destroyed", "title": "" }, { "docid": "3513b298aa2509c15399aeeb78426b78", "score": "0.50338143", "text": "def event_m10_10_x95(z54=_):\r\n \"\"\"State 0,1: Disable key guide\"\"\"\r\n DisableObjKeyGuide(z54, 1)\r\n \"\"\"State 2: End state\"\"\"\r\n return 0", "title": "" }, { "docid": "c6f70391e65b5dd78374098956f387a4", "score": "0.5029451", "text": "def keyReleased():\n lowerKey = str(key).lower()\n if lowerKey == 'w':\n red_robot.accel = False\n elif lowerKey == 's':\n red_robot.decel = False\n elif lowerKey == 'a':\n red_robot.turn_l = False\n elif lowerKey == 'd':\n red_robot.turn_r = False\n elif lowerKey == 'i':\n blue_robot.accel = False\n elif lowerKey == 'k':\n blue_robot.decel = False\n elif lowerKey == 'j':\n blue_robot.turn_l = False\n elif lowerKey == 'l':\n blue_robot.turn_r = False", "title": "" }, { "docid": "01941dc5cb1851bf6db9f6df4b0e7f37", "score": "0.50288355", "text": "def set_up_handlers(self):\n super(MemoGrid, self).set_up_handlers()\n self.handlers.update({\n ord(\"q\"): self.parent.exit_editing,\n \"Q\": self.parent.exit_editing,\n \"^Q\": self.parent.exit_editing,\n curses.ascii.CR: self.open_memo,\n curses.ascii.NL: self.open_memo\n })", "title": "" }, { "docid": "a5c8211e06d3acc8879780227534509f", "score": "0.50229925", "text": "def checkKeybinds(self):\n keys = pygame.key.get_pressed()\n\n for key in self.keybinds.keys():\n if keys[int(key)]:\n self.keybinds[key]() # Trigger function", "title": "" }, { "docid": "10053b8134fe12998bdc6fec2f1cb7cd", "score": "0.50213844", "text": "def handle_events(cls):\n word_obj = Word.word_obj \n for e in pg.event.get():\n if e.type == pg.QUIT:\n pg.quit()\n return\n \n # If we have not started typing a new word\n if e.type == pg.KEYDOWN:\n keypress = e.unicode\n if not Word.initiated:\n Word.initiate_word(keypress) \n if Word.initiated: \n Word.word_obj = Word.instances.pop(0)\n elif not word_obj:\n pass\n\n # If we have started typing a new word AND:\n # First OR condition: That word is not done and the keypress matches the corresponding letter.\n # Second OR condition: The word is done. \n elif (word_obj.current_index < word_obj.word_length and keypress == word_obj.word[word_obj.current_index]) or word_obj.current_index >= word_obj.word_length:\n # If the word is not done, increase index and continue.\n word_obj.current_index += 1\n # If the word is done, make following changes:\n if word_obj.current_index >= word_obj.word_length:\n Word.initiated = False\n Word.word_obj = None\n Game.score += 10\n Word.words_left -= 1\n Game.play_sound(Config.word_typed_music_path)\n # If the wave is over, make a new wave.\n if not Word.instances and Word.words_left == 0:\n Game.wave += 1\n Game.show_wave_screen()\n Word.fill_word_queue(Game.wave)", "title": "" }, { "docid": "b3896aa3307e9c1d0def21161bc5d282", "score": "0.5017753", "text": "def multiexit_exit_hook():\n # Make sure to the exit hook on standard exit only\n log.info('Interpreter shutting down')\n run_exitfuncs(0)", "title": "" }, { "docid": "35c3092506307a7371829398d50804da", "score": "0.501437", "text": "def pre_hook_fx(self, fx):\n self._pre_hook_fx = fx", "title": "" }, { "docid": "4ae8c373352a953e7ec1deb33c57d829", "score": "0.50126344", "text": "def setup(self):\n for hook in self.hooks:\n try:\n hook.setup()\n except (ImportError, AttributeError, TypeError, ValueError, Exception):\n pass", "title": "" }, { "docid": "00fdd6c0416be858070eabbf115d881b", "score": "0.5011742", "text": "def __init__(self):\r\n\r\n '''\r\n After finishing the script, the function mentioned will be called automatically.\r\n It is responsible to release any key that that was holded down\r\n '''\r\n atexit.register(self.__EndScript)\r\n\r\n '''\r\n This is the function called in the beggining of any function created here.\r\n If needed to change the way it get the function name and parameters, just\r\n have to change it here\r\n '''\r\n self.__action = \"self._RPyA__StoreActions(sys._getframe().f_code.co_name, locals())\"\r\n\r\n # Flag to store or not actions called in a list\r\n self.__fl_store_actions = True\r\n # Flag to stop on error\r\n self.__fl_stop_on_error = True\r\n # Flag to log on error\r\n self.__fl_log_on_error = True\r\n # Flag to log any information\r\n self.__fl_log_actions = True\r\n\r\n # List with actions called\r\n self.__actions = []\r\n # Log file\r\n self.__log_file = 'c:\\\\users\\\\' + getpass.getuser() + '\\\\documents\\\\LogRPyA.txt'\r\n\r\n # Key that can be holded or released\r\n self.__keys_list = ['\\t', '\\n', '\\r', ' ', '!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+',\r\n ',', '-', '.', '/', '0','1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?',\r\n '@', '[', '\\\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e','f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\r\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~', 'accept',\r\n 'add', 'alt', 'altleft', 'altright', 'apps', 'backspace', 'browserback', 'browserfavorites',\r\n 'browserforward', 'browserhome', 'browserrefresh', 'browsersearch', 'browserstop', 'capslock',\r\n 'clear', 'convert', 'ctrl', 'ctrlleft', 'ctrlright', 'decimal', 'del', 'delete', 'divide', 'down',\r\n 'end', 'enter', 'esc', 'escape', 'execute', 'f1', 'f10', 'f11', 'f12', 'f13', 'f14', 'f15', 'f16',\r\n 'f17', 'f18', 'f19', 'f2', 'f20', 'f21', 'f22', 'f23', 'f24', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8',\r\n 'f9', 'final', 'fn', 'hanguel', 'hangul', 'hanja', 'help', 'home', 'insert', 'junja', 'kana', 'kanji',\r\n 'launchapp1', 'launchapp2', 'launchmail', 'launchmediaselect', 'left', 'modechange', 'multiply',\r\n 'nexttrack', 'nonconvert', 'num0', 'num1', 'num2', 'num3', 'num4', 'num5', 'num6', 'num7', 'num8',\r\n 'num9', 'numlock', 'pagedown', 'pageup', 'pause', 'pgdn', 'pgup', 'playpause', 'prevtrack', 'print',\r\n 'printscreen', 'prntscrn', 'prtsc', 'prtscr', 'return', 'right', 'scrolllock', 'select', 'separator',\r\n 'shift', 'shiftleft', 'shiftright', 'sleep', 'space', 'stop', 'subtract', 'tab', 'up', 'volumedown',\r\n 'volumemute', 'volumeup', 'win', 'winleft', 'winright', 'yen', 'command', 'option', 'optionleft', 'optionright']\r\n\r\n # List of keys holded during execution\r\n self.__key_down_list = []\r\n self.__mouse_holding = False", "title": "" }, { "docid": "be35b02cd964d89392b0d11e17396874", "score": "0.5006482", "text": "def enter(self):\n self.context.main_window.switch_display(MainFloatingView.DisplayMode.normal_no_hook)\n self.context.context_menu_quick_hook_ref.setEnabled(True)\n self.context.context_menu_unhook_ref.setEnabled(False)", "title": "" }, { "docid": "91c4e91bf020e5210dfa85cfdc88d622", "score": "0.5003535", "text": "def undoCmd(self):", "title": "" }, { "docid": "9c9feb7cd605867e4067c7b5b3c24533", "score": "0.4997154", "text": "def teardown(self):\n for hook in self.hooks:\n try:\n hook.teardown()\n except (ImportError, AttributeError, TypeError, ValueError, Exception):\n pass", "title": "" }, { "docid": "e6a4793328f163a66f6858dbd7f4ffc1", "score": "0.49752516", "text": "def __init__(self):\n\n self.undo_commands = []\n self.redo_commands = []", "title": "" }, { "docid": "8ad75873063f950182ab62ba90b62bda", "score": "0.49673736", "text": "def exit(self, state, forced=False):\n\n # only unregister my keybindings\n # other keybindings are restored when entering previous mode\n for key, action in self.get_bindings(state).items():\n if not start in action:\n state.nav.input.unregister_key(key)\n\n state.nav.undraw()\n return True", "title": "" }, { "docid": "6a6aee01e74d99532adc5f56f30872b8", "score": "0.4954453", "text": "def onUpdateKeyState(self):\n self.keyboardStatusWidget.updateStatusSignal.emit(\n self._keys, self._modifiers, self._unknown_keys)", "title": "" }, { "docid": "56f9c801038a46bf00e2607a7e2f33c4", "score": "0.49541733", "text": "def before_processing(self, func):\n raise FrozenBotError(\"Can't add hooks to a bot at runtime\")", "title": "" }, { "docid": "7f0a0af66d2774aacce1c8f7735cef87", "score": "0.49484968", "text": "def handle_events(self):\n for event in pygame.event.get():\n if pygame.QUIT == event.type:\n self.running = False\n if self.STOPPED_PLAYING == event.type:\n self.music_play()\n elif pygame.KEYDOWN == event.type:\n if pygame.K_ESCAPE == event.key:\n self.running = False\n elif pygame.K_F1 == event.key:\n self.help()\n elif pygame.K_F2 == event.key:\n self.turn_music()\n elif pygame.K_F3 == event.key:\n self.change_level()\n elif pygame.K_F4 == event.key:\n self.change_deck()\n elif pygame.K_F5 == event.key:\n self.new_game()\n elif pygame.K_F9 == event.key:\n self.change_language()\n elif pygame.K_TAB == event.key and pygame.key.get_mods() & pygame.KMOD_SHIFT:\n if not self.game_over:\n self.player.actions(Actions.ChangeZoneDown)\n elif pygame.K_TAB == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeZoneUp)\n elif pygame.K_LEFT == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeRowDown)\n elif pygame.K_RIGHT == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeRowUp)\n elif pygame.K_UP == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeCardUp)\n elif pygame.K_DOWN == event.key:\n if not self.game_over:\n self.player.actions(Actions.ChangeCardDown)\n elif pygame.K_SPACE == event.key:\n if not self.game_over:\n self.player.actions(Actions.Take)\n self.check_win()", "title": "" }, { "docid": "e3f31c1a02cc5a63ea6e77bfa8945a91", "score": "0.49421683", "text": "def az_hat_down_press():", "title": "" }, { "docid": "c87d38d110d455958b82c18d5ab6beaf", "score": "0.49383882", "text": "def register_hook(self, hook: str):\n if hook in self.hooks.keys():\n self.logger.warning(f\"{self.name}: hook {hook!r} is already declared.\")\n\n self.hooks[hook] = []", "title": "" }, { "docid": "ed2a03a8db56b10545a94ec462b653f4", "score": "0.49315685", "text": "def test_setup_git_hooks(self):\n repo = 'git@github.com:user/repository'\n self._add_path(os.path.join('repository', 'git_hooks'))\n os.path.islink.return_value = False\n unbox.main([repo])\n self.assertTrue(call('.git/hooks') in shutil.rmtree.call_args_list)\n self.assertTrue(call('../git_hooks', '.git/hooks') in\n os.symlink.call_args_list)", "title": "" }, { "docid": "71c60df68b65f0619986517de0de5fa3", "score": "0.49293432", "text": "def reset(self):\n self.unfinished_word = ''\n self.complete_command = ''\n self.leftover_args = ''\n self.current_command = ''\n self.subtree = None", "title": "" }, { "docid": "6306c6859f2ab04e2d516235a63d15ba", "score": "0.49272004", "text": "def stop(self) -> None:\n for h in self._hooks:\n h.remove()\n self._hooks.clear()\n self._previous_module_name = None\n self._memory_pre_forward = 0\n self._last_all_gather_memory = 0\n self._cumul_all_gather_memory.clear()", "title": "" }, { "docid": "7fe5add254b8d00bc70cfb5bb6889020", "score": "0.49229598", "text": "def reset(self):\n self.tab_presses = 0\n self.cycling = False\n self.completions = []\n self.compstr = \"\"\n self.vimiv.commandline.info.set_markup(self.compstr)", "title": "" }, { "docid": "720d574d8e6177741940fde9a0cb5c04", "score": "0.49136397", "text": "def setKeywords(self) -> None:\n # Add any new user keywords to leoKeywordsDict.\n d = self.keywordsDict\n keys = list(d.keys())\n for s in g.globalDirectiveList:\n key = '@' + s\n if key not in keys:\n d[key] = 'leokeyword'\n # Create a temporary chars list. It will be converted to a dict later.\n chars = [z for z in string.ascii_letters + string.digits]\n chars.append('_') # #2933.\n for key in list(d.keys()):\n for ch in key:\n if ch not in chars:\n chars.append(g.checkUnicode(ch))\n # jEdit2Py now does this check, so this isn't really needed.\n # But it is needed for forth.py.\n for ch in (' ', '\\t'):\n if ch in chars:\n # g.es_print('removing %s from word_chars' % (repr(ch)))\n chars.remove(ch)\n # Convert chars to a dict for faster access.\n self.word_chars: dict[str, str] = {}\n for z in chars:\n self.word_chars[z] = z", "title": "" }, { "docid": "9c6bcb2de1938e74f5f7577daa4172e5", "score": "0.49071124", "text": "def hook_TakeOver(state):\n return 1", "title": "" }, { "docid": "8bf2e625785c1a63922092d2298b31be", "score": "0.49062553", "text": "def unhandled_keypress(self, k):\n\n if k == \"f5\":\n self.save_file()\n elif k == \"f8\":\n raise urwid.ExitMainLoop()\n elif k == \"delete\":\n # delete at end of line\n self.walker.combine_focus_with_next()\n elif k == \"backspace\":\n # backspace at beginning of line\n self.walker.combine_focus_with_prev()\n elif k == \"enter\":\n # start new line\n self.walker.split_focus()\n # move the cursor to the new line and reset pref_col\n self.loop.process_input([\"down\", \"home\"])\n elif k == \"right\":\n w, pos = self.walker.get_focus()\n w, pos = self.walker.get_next(pos)\n if w:\n self.listbox.set_focus(pos, 'above')\n self.loop.process_input([\"home\"])\n elif k == \"left\":\n w, pos = self.walker.get_focus()\n w, pos = self.walker.get_prev(pos)\n if w:\n self.listbox.set_focus(pos, 'below')\n self.loop.process_input([\"end\"])\n else:\n return\n return True", "title": "" }, { "docid": "7993261e713b9798d47b232daf66824b", "score": "0.49037915", "text": "def keyboard(self, *args):\n self._key_pressed = args[0]", "title": "" }, { "docid": "1e6e0b803958c0515d3e5e100869fa53", "score": "0.49032888", "text": "def _reset_xt_alts47(self):\n self.clear_screen()\n self.switch_mainbuf()\n return True", "title": "" }, { "docid": "92eb743b367a234c964b5eb93bf4f6e6", "score": "0.49001333", "text": "def _setupLevelPreferenceHook():\n\n pass", "title": "" } ]
3ce530f4622894de87071fb3f7fbd997
Run the given command and return its output
[ { "docid": "293cfa1552dc903c6d00fda3ffe53f9c", "score": "0.0", "text": "def run(command, shell=None):\n out_stream = subprocess.PIPE\n err_stream = subprocess.PIPE\n\n if shell is not None:\n p = subprocess.Popen(command, shell=True, stdout=out_stream,\n stderr=err_stream, executable=shell)\n else:\n p = subprocess.Popen(command, shell=True, stdout=out_stream,\n stderr=err_stream)\n (stdout, stderr) = p.communicate()\n\n return stdout, stderr", "title": "" } ]
[ { "docid": "0f8e2a2fc52817196221cd93eae9665e", "score": "0.8390274", "text": "def run_command(command):\n output = subprocess.getoutput(command)\n return output", "title": "" }, { "docid": "0629b87a77882a05030d4ef9c359d4e9", "score": "0.8349085", "text": "def run_command(command):\n args = command.split()\n output = check_output(args, stderr=STDOUT)\n return output.decode()", "title": "" }, { "docid": "ec7ada111ee37cbab13511ae1dbf2551", "score": "0.7955658", "text": "def run_command(self, command):\n print(f\" > {command}\")\n p = subprocess.Popen(shlex.split(command),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n p.wait()\n output = p.communicate()[0].decode('utf-8').strip()\n if (self.verbose or p.returncode != 0) and output:\n print(indent(output, ' ' * 4))\n if p.returncode == 0:\n return output\n else:\n raise Exception(f\"Command '{command}' failed\")", "title": "" }, { "docid": "2244cca255808b8dd16bd3df7baab33b", "score": "0.7911083", "text": "def run_command(cmd):\n return subprocess.check_output(cmd.split(' '), stderr=subprocess.STDOUT)\n # try:\n # out = subprocess.check_output(cmd.split(' '), stderr=subprocess.STDOUT)\n # return out\n # except Exception as e:\n # raise RuntimeError('Error running %s' % str(e))", "title": "" }, { "docid": "f16bf3d1bbcb728cee76f4744756f4a3", "score": "0.7907083", "text": "def run_command(command):\n res = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n out, err = res.communicate()\n if res.returncode != 0:\n output = out.decode().strip() + \"\\n\\n\" + err.decode().strip()\n else:\n output = out.decode().strip()\n return (res.returncode, output)", "title": "" }, { "docid": "832c20fc63b197529f708251fcc3633f", "score": "0.7892767", "text": "def run(command):\n print(command)\n print(check_output(command, shell=True))", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.7816226", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "bac0658941157b5e28bbf556a4504f16", "score": "0.7731024", "text": "def run_command(self, command):\n\n command_str = tuple( map( str, command ) )\n process = subprocess.Popen(command_str, \n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n output, error = process.communicate()\n output_str = output.decode()\n error_str = error.decode()\n\n return (output_str, error_str)", "title": "" }, { "docid": "06c0d245ebf9a93dfb6cd2b6a9ae9671", "score": "0.77279115", "text": "def Run(self, cmd):\n return subprocess.check_output(cmd)", "title": "" }, { "docid": "06c0d245ebf9a93dfb6cd2b6a9ae9671", "score": "0.77279115", "text": "def Run(self, cmd):\n return subprocess.check_output(cmd)", "title": "" }, { "docid": "4fa0c66f011690c700f2bd392be1e8cc", "score": "0.77265406", "text": "def run_command(self, command):\n\n command_str = tuple( map( str, command ) )\n process = subprocess.Popen(command_str,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n output, error = process.communicate()\n output_str = output.decode()\n error_str = error.decode()\n\n return (output_str, error_str)", "title": "" }, { "docid": "08adc5717a66f23b80c3317240d6e527", "score": "0.7715235", "text": "def execute(command):\n p = sub.Popen(command, shell=True, stdout=sub.PIPE, stderr=sub.PIPE)\n stdout, stderr = p.communicate()\n return stdout", "title": "" }, { "docid": "4d2440f1ad9c4d1d2be0aa13a8611725", "score": "0.77028066", "text": "def run(cmd):\n logging.debug('\\nsubprocess.run: %s', cmd)\n return subprocess.run(\n cmd, stdout=subprocess.PIPE, cwd=cwd\n ).stdout.decode('utf-8')", "title": "" }, { "docid": "86530fbe5743f070b4482bab7ce6d5f8", "score": "0.76952463", "text": "def run_command(command):\n return subprocess.check_output(command,\n shell=True).decode(\"utf-8\").strip()", "title": "" }, { "docid": "febd793885fc09507aee2eafd2093c6f", "score": "0.76874715", "text": "def run(cmd):\n return subprocess.run(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n check=True).stdout.decode()", "title": "" }, { "docid": "deb530193993570a7074b1bd3d637d87", "score": "0.76847607", "text": "def execute_and_get_output(command):\n try:\n result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n print(result.decode('utf-8'))\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\n \"command '{}' return with error (code {}): {}\".format(e.cmd, e.returncode, e.output.decode('utf-8')))", "title": "" }, { "docid": "d2633faea2a8ad5b479a2574e712c28a", "score": "0.76588297", "text": "def _run_cmd(cmd):\n output = check_output(cmd, shell=True, stderr=STDOUT).decode(\"utf-8\")\n return output", "title": "" }, { "docid": "2f09ad91c19b0b161271ca7ed410601b", "score": "0.76580024", "text": "def run(cmd):\n import subprocess\n return subprocess.check_output(cmd, shell=True).decode(sys.stdout.encoding).rstrip()", "title": "" }, { "docid": "1dd2601b97a4eb57666fb3f6867c9256", "score": "0.7615784", "text": "def callOutput(command):\n process = Popen(command, shell=True, stdout=PIPE)\n output, error = process.communicate()\n return output", "title": "" }, { "docid": "7526b397489ed115c4e8031371a3c2a1", "score": "0.7613247", "text": "def RunCommand(args):\n return subprocess.check_output(args)", "title": "" }, { "docid": "5629593c5e6e2e4514d7de8d64d1b267", "score": "0.76112944", "text": "def do(cmd):\n\n if options.verbose:\n print(\"$ \" + cmd)\n result = subprocess.check_output(cmd, shell=True).strip()\n if options.verbose:\n print(result)\n return result", "title": "" }, { "docid": "15cfa4df37465cc39247b086809a62c2", "score": "0.7608783", "text": "def out(command):\n result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)\n return result", "title": "" }, { "docid": "f8120ebefdedad5c569831d100f4274c", "score": "0.75752306", "text": "def run_command(cmd):\n\n output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n return output.decode(\"ascii\")", "title": "" }, { "docid": "f8120ebefdedad5c569831d100f4274c", "score": "0.75752306", "text": "def run_command(cmd):\n\n output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n return output.decode(\"ascii\")", "title": "" }, { "docid": "efbf3a2258250b014d3913485e532d6b", "score": "0.7572608", "text": "def run_command(cmd):\n return subprocess.Popen(cmd,\n shell=True, # not recommended, but does not open a window\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE).communicate()", "title": "" }, { "docid": "cfdcc74599af9847ffc7489d61828a76", "score": "0.75574595", "text": "def runProcess(command):\n output = \"\"\n returnCode = 0\n if args.debug:\n logging.info(\"Running command: {0}\".format(command))\n try:\n output = subprocess.check_output(command)\n except subprocess.CalledProcessError as e:\n if args.debug:\n traceback.print_exc()\n return output", "title": "" }, { "docid": "cfae3d09afcd39444aae22efca098b93", "score": "0.7550097", "text": "def run(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n raw_output, raw_err = p.communicate()\n rc = p.returncode\n if get_platform() == 'win32':\n enc = 'oem'\n else:\n enc = locale.getpreferredencoding()\n output = raw_output.decode(enc)\n err = raw_err.decode(enc)\n return rc, output.strip(), err.strip()", "title": "" }, { "docid": "48727f6f53e9e2f3604d8b8ebe4fc582", "score": "0.7547377", "text": "def run_command(cmd):\n WRITE_LOG_DEBUG(\"Running command: %s\", cmd)\n try:\n p = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (output, _) = p.communicate()\n except Exception as details:\n raise RuntimeError(\"Failed to run command: %s\", details)\n WRITE_LOG_DEBUG(\"Command output: %s\", output)\n WRITE_LOG_DEBUG(\"Command return code: %s\", p.returncode)\n if p.returncode != 0:\n raise RuntimeError(\"Command failed with return code %s: %s\" % (p.returncode, output))\n return output.decode()", "title": "" }, { "docid": "fde69f733fda0834f48ffe4742e271c8", "score": "0.751739", "text": "def process(command):\n from subprocess import Popen, PIPE\n return Popen(command, stdout=PIPE, stderr=PIPE, shell=True)", "title": "" }, { "docid": "6a84505018193ceee20f85d21449692c", "score": "0.7501598", "text": "def execute(self):\n\n process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=True)\n (output, error) = process.communicate()\n\n if process.returncode != 0:\n return self.decode_output(error)\n return self.decode_output(output)", "title": "" }, { "docid": "787d702205b13296384e53ed64c49170", "score": "0.748014", "text": "def run_command(cmd, redirect_output=True, check_exit_code=True):\n if redirect_output:\n stdout = subprocess.PIPE\n else:\n stdout = None\n\n proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)\n output = proc.communicate()[0]\n if check_exit_code and proc.returncode != 0:\n die('Command \"%s\" failed.\\n%s', ' '.join(cmd), output)\n return output", "title": "" }, { "docid": "8381810b5c1ff4d2c62788c5de945dbf", "score": "0.7462678", "text": "def run_command(self, command):\n\n logging.info('Running %s', command)\n try:\n cmd = Popen(command.split(), stdout=PIPE, stderr=STDOUT)\n output, _ = cmd.communicate()\n logging.debug(str(output))\n except OSError:\n logging.warning('Failed to run %s', command)", "title": "" }, { "docid": "3584905b6c89314de96366ac9ec8164b", "score": "0.7434661", "text": "def run(self, cmd):\n print cmd\n cin, cout, cerr = os.popen3(cmd)\n\n return '', cout.read(), cerr.read()", "title": "" }, { "docid": "28bb7f55103f8c70775677a89fc06723", "score": "0.7394972", "text": "def run_command(command):\n try:\n pipe = os.popen(command)\n data = pipe.readlines()\n error = pipe.close()\n if error:\n return None\n\n except OSError:\n return None\n\n return data", "title": "" }, { "docid": "40b9b68fbfa9ddd7fe5ba4465d5062f3", "score": "0.73945564", "text": "def call(command):\n output = subprocess.Popen(\n command, stdout=subprocess.PIPE, shell=True).stdout.read()\n if len(output) == 0:\n return ''\n if output[-1] == '\\n':\n output = output[:-1]\n return output", "title": "" }, { "docid": "70d8db0f20120715e2ca16c805f846c9", "score": "0.73485184", "text": "def command(*args):\n print('command: Invoking ', ' '.join(args))\n rtn = subprocess.check_output(args).decode('utf-8')\n print(rtn)", "title": "" }, { "docid": "430305507d09c41e8b8c6e4797ba84dc", "score": "0.73421055", "text": "def run(cmd, *args, **kwargs):\n args = [cmd] + list(args)\n return subprocess.check_output(args, **kwargs)", "title": "" }, { "docid": "5c9a345a57642fe83349346b6ec4f8b5", "score": "0.7339175", "text": "def execute(command):\n from subprocess import check_output, STDOUT\n command = \"{}; exit 0\".format(command)\n\n # log command that is going to be run\n _Log.debug(\"Shell Command: {}\".format(command))\n\n return check_output(command, stderr=STDOUT, shell=True)", "title": "" }, { "docid": "d3d1c76d64fd137f8de2336ca0f0bd41", "score": "0.7336564", "text": "def run_command(cmd, **kwargs):\n cmd = [str(s) for s in cmd]\n return subprocess.check_output(cmd, **kwargs)", "title": "" }, { "docid": "e305aa68931e47b101b8eb564846c79b", "score": "0.7334535", "text": "def _Run(self, cmd):\n return os.popen(cmd).read()", "title": "" }, { "docid": "4bb267f47a1a00b0e69764708152c9df", "score": "0.7329651", "text": "def run(cmd) -> subprocess.CompletedProcess:\n cmdstring = shlex.join(cmd)\n logging.debug(\"Run: %r\", cmdstring)\n result = subprocess.run(\n cmd,\n stdin=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n stdout=subprocess.PIPE,\n encoding=\"utf-8\",\n errors=\"backslashreplace\",\n check=False,\n )\n if result.returncode:\n # Log last 20 lines, those are likely the interesting ones.\n logging.error(\n \"Run: %r KO:\\n%s\",\n cmdstring,\n indent(\"\\n\".join(result.stdout.split(\"\\n\")[-20:]), \" \"),\n )\n else:\n logging.debug(\"Run: %r OK\", cmdstring)\n result.check_returncode()\n return result", "title": "" }, { "docid": "320af0fab4e1dd09b789962d3f715498", "score": "0.7316001", "text": "def run_command(cmd, cwd=os.getcwd(), stdout=False, stderr=False, shell=False):\n stdout, stdout_str = output_handler(stdout)\n stderr, stderr_str = output_handler(stderr, redirect='2>')\n\n p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd,\n shell=shell)\n\n return p.communicate()", "title": "" }, { "docid": "2bb79f3e8dd804997c7475771f7e564b", "score": "0.7310162", "text": "def execute_cmd(cmd):\n logger = logging.getLogger(\"logger\")\n logger.debug(f\"Execution cmd: {cmd}\")\n output = subprocess.getoutput(cmd)\n logger.debug(f\"Result: {output}\")\n return output", "title": "" }, { "docid": "b5802d5dc0003309a8edb80bd6599bb8", "score": "0.73062825", "text": "def run_command(self, cmd: str) -> str:\n self.print(f\"\\n========================\\n$ {cmd}\")\n start = time.perf_counter()\n proc = subprocess.run(\n cmd,\n shell=True,\n check=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n output = proc.stdout.decode(\"utf-8\")\n self.last_duration = time.perf_counter() - start\n self.print(output, end=\"\")\n self.print(f\"(was: {cmd})\")\n self.print(f\"(in {os.getcwd()}, duration: {self.last_duration:.3f}s)\")\n\n if proc.returncode != 0:\n self.print(f\"ERROR: command returned {proc.returncode}\")\n raise Exception(\n f\"Command failed ({proc.returncode}): {cmd!r}, output was:\\n{output}\"\n )\n\n return output.strip()", "title": "" }, { "docid": "efbb97468d35f3f85721714a0e266e18", "score": "0.7304682", "text": "def run_command(command: List[str], return_stdout=False):\n try:\n output = subprocess.check_output(command, stderr=subprocess.STDOUT)\n if return_stdout:\n if hasattr(output, \"decode\"):\n output = output.decode(\"utf-8\")\n return output\n except subprocess.CalledProcessError as e:\n raise SubprocessCallException(\n f\"Command `{' '.join(command)}` failed with the following error:\\n\\n{e.output.decode()}\"\n ) from e", "title": "" }, { "docid": "03d535528ad5d1378ce5bc341ab39e0b", "score": "0.7302624", "text": "def run_cmd(cmd):\n logger.info(\"Running Shell Command: \" + cmd)\n return subprocess.check_output(shlex.split(cmd), universal_newlines=True)", "title": "" }, { "docid": "7f5ae8bc4f56fd0c412fb9c38999de2e", "score": "0.72849", "text": "def execute(cmd, verbose=False):\n if verbose:\n print(' ', cmd)\n out = sp.getoutput(cmd)\n if out:\n print(' ', out)", "title": "" }, { "docid": "e47b2d7d700466de2e061c1658fbb23e", "score": "0.7276018", "text": "def run( cmd ):\n return call( cmd.split( ' ' ) )", "title": "" }, { "docid": "aff0d9b4993e775060664862ec2d1b29", "score": "0.7267891", "text": "def command_output(cmd, directory):\n p = subprocess.Popen(cmd,\n cwd=directory,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n (stdout, _) = p.communicate()\n if p.returncode != 0:\n raise RuntimeError('Failed to run %s in %s' % (cmd, directory))\n return stdout", "title": "" }, { "docid": "bfb401b0d4a308fa38bcfc8eae2e990c", "score": "0.7260671", "text": "def _run(self, command, cwd=None):\n self.stdout.write(\"\\nExecuting command: {}\\n\".format(\" \".join(command)))\n self.stdout.write(\"=\"*119)\n self.stdout.write(\"\\n\")\n return subprocess.run(command, cwd=cwd, stdin=self.stdin, stdout=self.stdout, stderr=self.stderr)", "title": "" }, { "docid": "e753f0bf0a3cf745da80f893eabf499c", "score": "0.7242747", "text": "def runCommand(args):\n process = Popen(args, stdout=PIPE, stderr=STDOUT)\n stdout = process.stdout.read()\n exitCode = process.wait()\n if exitCode < 0:\n raise CommandFailed(None, -exitCode, stdout)\n elif exitCode > 0:\n raise CommandFailed(exitCode, None, stdout)\n return stdout", "title": "" }, { "docid": "01c019d1796ad99640db493b5ce70185", "score": "0.721788", "text": "def execute(cmd):\r\n return os.popen(cmd).readlines()[0].strip()", "title": "" }, { "docid": "2e5ead070e5517b22c57fddfe102e2bd", "score": "0.7215222", "text": "def get_command_output(self, command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n (output, error) = p.communicate()\n return output", "title": "" }, { "docid": "d20f7561b375ad2620e749d4335025f7", "score": "0.72117263", "text": "def exec_command(command):\r\n stoutdata = sterrdata = \"\"\r\n try:\r\n p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\r\n stoutdata, sterrdata = p.communicate()\r\n stoutdata = stoutdata.decode(\"utf-8\")\r\n except Exception as e:\r\n print (\"Error: stdout: {} \\nstderr: {} \\nException:{}\".format(\r\n stoutdata, sterrdata, str(e)))\r\n return 1, stoutdata\r\n return p.returncode, stoutdata", "title": "" }, { "docid": "0f216402feeff04bb1cc1dbd3ce22297", "score": "0.7209312", "text": "def run_cmd_get_result(cmd):\n\n cmds = cmd.split(' ')\n parent_dir = os.path.dirname(cmds[0])\n curr_dir = os.getcwd()\n if parent_dir and parent_dir != '.':\n #print '***** DEBUG, parent_dir=%s' % parent_dir\n os.chdir(parent_dir)\n\n ret = ''\n #cmd = cmd.encode('utf8')\n pipe = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)\n ret = pipe.stdout.read().strip()\n\n os.chdir(curr_dir)\n\n return ret", "title": "" }, { "docid": "74f9769bac0e6dbdebac41a9c0f11577", "score": "0.72025406", "text": "def _run_cmd(self, cmd):\n try:\n answer = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n message = answer.stdout.decode(\"ascii\")\n if message:\n return f'{cmd} output:\\n{message}'\n else:\n return f'{cmd} produced no output'\n except Exception as err:\n return f\"Can't run {cmd}: {err}\"", "title": "" }, { "docid": "72eebabb8d736086f46c4d7fb793c9cb", "score": "0.7202242", "text": "def run_with_output(command):\n popen = Popen(command, stdout=PIPE, universal_newlines=True)\n for line in popen.stdout:\n print(line, end='')\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)", "title": "" }, { "docid": "6e1a68d51eb6122ab4b72f81988b60ad", "score": "0.72014624", "text": "def runCommand(self, command):\n return self.processProtocol.runCommand(command)", "title": "" }, { "docid": "1fdfe3542cf1082142cb9385948b5c11", "score": "0.7200563", "text": "def execute(self, cmd, args):\n try:\n return Popen([cmd] + args, stdout=PIPE, stderr=STDOUT).communicate()[0].decode('utf-8')\n except OSError as e:\n self.log.exception(e)\n return \"Failed to run {0}: {1}\".format(cmd, e)", "title": "" }, { "docid": "4f1ff1225972ad931801fb6d3b237033", "score": "0.7198732", "text": "def run_command(cmd):\n proc = subprocess.run(cmd) \n #, stdout=subprocess.PIPE, stderr=subprocess.PIPE)", "title": "" }, { "docid": "2bd3a4fd624064cc6498ea3b3eb5bd9a", "score": "0.719265", "text": "def run_cmd(cmd):\n print cmd\n p = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n run_cmd.output = []\n\n def process(line):\n run_cmd.output.append(line)\n logging.info(line.rstrip())\n\n while p.poll() is None:\n process(p.stdout.readline())\n process(p.stdout.read())\n return '\\n'.join(run_cmd.output)", "title": "" }, { "docid": "3853591dca636d42be6002f48df5a282", "score": "0.71834624", "text": "def get_output(command):\n return subprocess.check_output(command.split())", "title": "" }, { "docid": "d91164a3a5747ba14d66da5479643fba", "score": "0.71652734", "text": "def Run(self, cmd):\n try:\n output = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as e:\n # gsutil exit code is 1 for --help depending on the context.\n if e.returncode != 1:\n raise\n output = e.output\n return output", "title": "" }, { "docid": "1030d335abad58465b1aa6735840f03e", "score": "0.716391", "text": "def run(command, timeout=None):\n completed = subprocess.run(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True,\n encoding=\"utf8\", timeout=timeout)\n\n return completed.returncode, completed.stdout, completed.stderr", "title": "" }, { "docid": "417c2fe5d52e4111b185fd14637a3dd1", "score": "0.7155935", "text": "def call_command(command, env=None, cwd=None):\n try:\n out = subprocess.check_output(command,\n bufsize=-1,\n env=env,\n stderr=subprocess.STDOUT,\n cwd=cwd)\n return out, 0\n except subprocess.CalledProcessError as ex:\n print(\"Running command '%s' failed: %d, %s\"\n % (' '.join(command), ex.returncode, ex.output),\n file=sys.stderr)\n return ex.output, ex.returncode\n except OSError as oerr:\n print(\"Standard error happened when running command '%s': %s.\"\n % (' '.join(command), str(oerr)),\n file=sys.stderr)\n return oerr.strerror, oerr.errno", "title": "" }, { "docid": "b219701f2dfb3ce801fcf3bf5c3b8ea2", "score": "0.71148765", "text": "def _execute(self, *args):\n logging.info('Running: %s', args)\n return subprocess42.check_output(args)", "title": "" }, { "docid": "00945c4af449e03648be4b321b5da14f", "score": "0.710456", "text": "def execute(self,cmd):\n stdin,stdout,stderr = self._client.exec_command(cmd)\n return (stdout,stderr)", "title": "" }, { "docid": "5f9e04a1fa341b5f443fd4950cdd23ea", "score": "0.7079269", "text": "def run_cmd(cmd):\n print('+ {}'.format(cmd))\n if isinstance(cmd, str):\n cmd = cmd.split()\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = p.communicate()\n\n # When running in Python 3, the output of subprocess.Popen.communicate()\n # is a bytes object. We need to convert it to a string.\n if isinstance(output, bytes):\n output = output.decode(\"utf-8\")\n if isinstance(err, bytes):\n err = err.decode(\"utf-8\")\n\n print(output)\n print(err)\n return output, err", "title": "" }, { "docid": "6bdc90e656686f20783bcd7f423f7986", "score": "0.7078348", "text": "def run_command(command,regexp,ssh):\n\tstdin, stdout, stderr = ssh.exec_command(command)\n\t\n\toutput = stdout.read()\n\tif regexp:\n\t\tif not re.match(regexp,output):\n\t\t\tca.log_fail(\"Unexpected result from command, failing.\",output)\n\t\t\traise StandardError\n\treturn output", "title": "" }, { "docid": "03ded86c93b4fe9eefd00a8af4ac7bb5", "score": "0.7075704", "text": "def execute(command, print_cmd=True, split=True):\n if print_cmd:\n debug(command)\n if split:\n command = shlex.split(command)\n return subprocess.run(command, check=True, capture_output=True).stdout", "title": "" }, { "docid": "f1d9213958c0c947088e52158a87b9a5", "score": "0.706201", "text": "async def run(cmd):\n proc = await asyncio.create_subprocess_shell(\n cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await proc.communicate()\n if stdout:\n return stdout.decode()\n if stderr:\n return stderr.decode()", "title": "" }, { "docid": "b0eb02111f208cb8aadb7f2a7c1b6c7f", "score": "0.7059068", "text": "def run(*cmd, capture=False, check=True, env=None):\n cmd = [str(p) for p in cmd]\n print(' '.join(cmd))\n return subprocess.run(cmd,\n capture_output=capture,\n check=check,\n env=env,\n text=True)", "title": "" }, { "docid": "8b67ea5bf1fd1904899c5cef9cb1ed59", "score": "0.7053954", "text": "def run_command(command, shell=False):\n p = check_output(command, shell=shell)\n return p.decode(\"utf-8\").splitlines()", "title": "" }, { "docid": "9162810c270aace435b401e4ac388699", "score": "0.70525783", "text": "def execute(self, cmd):\n\n self.ret = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n (self.out, self.err) = self.ret.communicate()\n self.out = self.out.decode(\"utf-8\")\n self.err = self.err.decode(\"utf-8\")\n return (self.out, self.err)", "title": "" }, { "docid": "741cd58e8a6785d5d4572c3626e4c204", "score": "0.70513284", "text": "def run_command(cmd, cwd=os.getcwd(), stdout=False, stderr=False):\n stdout, stdout_str = output_handler(stdout)\n stderr, stderr_str = output_handler(stderr, redirect='2>')\n\n p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd)\n\n out, err = p.communicate()\n return onfinish_handler(\n '{0} {1} {2}'.format(' '.join(cmd), stdout_str, stderr_str),\n byte_to_string(out), byte_to_string(err), p.returncode\n )", "title": "" }, { "docid": "4bf4b901625da4ba0c97350c142cb7e7", "score": "0.70465875", "text": "def run_command(cmd_args):\n proc = subprocess.Popen(cmd_args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = proc.communicate()\n return proc.returncode, out, err", "title": "" }, { "docid": "d7b1fd7e42af530164a9baaa82ed9989", "score": "0.7046135", "text": "def execute_command(cmd):\n output = \"\"\n LOG.info(\"Running: %s\" % \" \".join(cmd))\n try:\n output = subprocess.check_output(cmd, shell=False)\n except CalledProcessError as err:\n LOG.warning(\"Something went wrong with loqusdb\")\n raise err\n\n if not output:\n return output\n\n output = output.decode(\"utf-8\")\n return output", "title": "" }, { "docid": "9d0ae2ccf3df0e8dc15ef0c7e4ff340c", "score": "0.7037672", "text": "def call_cmd(self, cmd, abort_on_failure=True):\n print cmd\n self.log(\"Running \" + cmd[0] + \"...\", MD.TRACE)\n self.log(\"with parameters`\" + \" \".join(cmd[1:]) + \"`...\", MD.DEBUG)\n output = None\n try:\n output = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as e:\n self.log(\"Process \" + \" \".join(cmd) + \" returned non zero status \" + str(e.returncode) + \".\"\n \"with message\" + e.message,\n MD.ERROR)\n if abort_on_failure:\n self.log(\"Output: \" + e.output, MD.FATAL)\n else:\n return e.returncode, e.output\n except Exception as e :\n self.log(\"Unspecified failure \" + e.message + \"on command \" + \" \".join(cmd) + \". Abort.\", MD.FATAL)\n\n # 0 -- success code\n return 0, output", "title": "" }, { "docid": "dd9b5148c325d8b7bb9bdc0fd1ef57c8", "score": "0.70365137", "text": "def run_command(command):\n result = commands.getstatusoutput(command)\n if result[0] == 127:\n command = command.split()[0]\n raise SuitcaseCommandError(result, \"%s not found\" % command)\n if result[0] > 0:\n raise SuitcaseCommandError(result, \"An error occurred: %s\" % result[1])\n\n return result", "title": "" }, { "docid": "4ea2e3a8e6c07f3b8098c96155fd9982", "score": "0.70281756", "text": "def run_cmd( cmd, verbose=False ):\n if verbose:\n print ' '.join(cmd)\n proc = subprocess.Popen( cmd, stdout=subprocess.PIPE)\n \n (out, err) = proc.communicate()\n\n if out is not None:\n print out\n if err is not None:\n print err", "title": "" }, { "docid": "487454786e2e5e76701b97f322ba1425", "score": "0.6997015", "text": "def _run_command(cmd, **kwargs):\n kwargs.setdefault('redirect_stderr', True)\n kwargs.setdefault('combine_stdout_stderr', True)\n kwargs.setdefault('capture_output', True)\n kwargs.setdefault('error_code_ok', True)\n return rh.utils.run_command(cmd, **kwargs)", "title": "" }, { "docid": "ab35ddb1680631503d46d83625229baf", "score": "0.69955945", "text": "def run_command(c):\n command = shlex.split(c)\n com = Popen(command, shell=False, stdout=PIPE, stderr=PIPE)\n comout = ''.join(com.communicate()).strip()\n return comout, com.returncode", "title": "" }, { "docid": "c067fc463c644f6e3c9219d4c1cd59b2", "score": "0.6994733", "text": "def run_command(command):\n result = os.system(command)\n return 0 if result == 0 else 1", "title": "" }, { "docid": "c9260acd2fc53b034139234a0389ff9c", "score": "0.699378", "text": "def execute_cmd(cmd):\n logging.debug(\"[{}]\".format(cmd))\n\n process = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate()\n returncode = process.returncode\n if returncode != 0:\n msg = \"{} failed: {}\".format(cmd, stderr)\n logging.warning(msg)\n raise RuntimeError(msg)\n\n return stdout", "title": "" }, { "docid": "bc7956cc3533fb56e904b97bd39c48ba", "score": "0.6992038", "text": "def system(command: str) -> int:\n logging.info(command)\n\n process = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)\n output_buffer, stderr_buffer = process.communicate()\n stdout, stderr = str(output_buffer.decode()).strip(), str(stderr_buffer.decode()).strip()\n\n if len(stdout) > 0:\n logging.debug(stdout)\n\n if len(stderr) > 0:\n logging.error(stderr)\n\n logging.info(f'returned {process.returncode}')\n\n return process.returncode", "title": "" }, { "docid": "532d2de867181202465731118427e409", "score": "0.6980339", "text": "def execute_command(cmd):\n\n if not cmd:\n return 'enter cmd param'\n cmd = cmd.split(' ')\n p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n stdout, stderr = p.communicate()\n out = \"\"\n out += stdout.decode('utf8')\n out += '\\n------------------\\n'\n out += stderr.decode('utf8')\n return text_response(out)", "title": "" }, { "docid": "2f19004a892e0b45f38f28f8f355c2e7", "score": "0.6979855", "text": "def call(command, **kwargs): # from gitapi.py\n\tproc = Popen(split(command), stdout=PIPE, stderr=PIPE, **kwargs)\n\tout, err = [x.decode(\"utf-8\") for x in proc.communicate()]\n\n\treturn {'out': out, 'err': err, 'code': proc.returncode}", "title": "" }, { "docid": "2fe09f98482b6cf5e7e85ce60efff884", "score": "0.69781196", "text": "def run(cmd):\n logger.info(\"Running command: %s\", cmd)\n proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n\n for line in proc.stdout:\n logger.info(' :: %s', line.rstrip())\n\n retcode = proc.wait()\n if retcode:\n raise RuntimeError(\"Execution ended in {} for cmd {}\".format(retcode, cmd))", "title": "" }, { "docid": "5f0072378b4b6a279df36077dfff08a0", "score": "0.69671565", "text": "def execute(self, cmd: str = \"\") -> str:\n return subprocess.check_output(cmd, shell=True).decode()", "title": "" }, { "docid": "13714c6b158888167a20b4925057dd0f", "score": "0.69653845", "text": "def call_raw(self, command):\n proc = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = proc.communicate()\n return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')", "title": "" }, { "docid": "a9b95e6e852afe5db45a58cc9bc1867a", "score": "0.69638443", "text": "def call_cmd(cmd):\n\n try:\n child = Popen(list(str(cmd).split(' ')), stdout=PIPE)\n string = child.communicate()[0]\n child.stdout.close()\n except OSError:\n message = str(\"Error while executing \" + cmd + \"\\n\" + traceback.format_exc())\n logging.warning(message)\n sys.exit(message)\n\n return string.decode()", "title": "" }, { "docid": "587e208d8f10c882707c529961aa6115", "score": "0.695964", "text": "def execute_terminal_command_with_output(command: str) -> (bool, str):\n try:\n process = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n # The command you are trying to run does not exist - return failure\n except FileNotFoundError:\n return (False, \"Invalid Command\")\n stdout, stderr = process.communicate()\n if not process.poll():\n return (True, stdout.decode()[:-1])\n return (False, stderr.decode()[:-1])", "title": "" }, { "docid": "32bbc8fa321507679825246930a98d4f", "score": "0.69553596", "text": "def run_and_collect(self, cmd):\n cmd = cmd.replace('`','\\`')\n try:\n return os.popen(cmd).read().strip()\n except IOError,e:\n report(\"IOError: \" + str(e))\n return None", "title": "" }, { "docid": "cbda19a772a57dff5dcdeaf988a61c6c", "score": "0.69551724", "text": "def get_command_response(self, command):\n command_list = command.split(\" \")\n result = subprocess.run(command_list, stdout=subprocess.PIPE)\n return result.stdout.decode(\"utf-8\")", "title": "" }, { "docid": "31463a54a37697c1c69dbfcd80354fda", "score": "0.69523114", "text": "def run(command: str, check=True, input=None, cwd=None, silent=False, environment=None) -> subprocess.CompletedProcess:\n if not silent:\n log_msg = f\"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] \" \\\n f\"Executing: {command}\" + os.linesep\n print_out(log_msg)\n print_err(log_msg, print_=False)\n\n proc = subprocess.run(shlex.split(command), check=check,\n stderr=subprocess.PIPE, stdout=subprocess.PIPE,\n input=input, cwd=cwd, env=environment)\n\n if not silent:\n print_err(proc.stderr.decode())\n print_out(proc.stdout.decode())\n\n return proc", "title": "" }, { "docid": "9a236717abfa95b49e189cedb83741c0", "score": "0.6950904", "text": "def system(cmd):\n print cmd\n try:\n output = subprocess.check_output(cmd, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print 'Command\\n %s\\nfailed.' % cmd\n print 'Return code:', e.returncode\n print e.output\n sys.exit(1)", "title": "" }, { "docid": "650b2415c14a2ad452591eb4bf208557", "score": "0.6944053", "text": "def execute(command):\n process = sub.Popen([command], stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE, shell =True)\n process.stdin.write('Y')\n stdoutput, stderror = process.communicate()\n if stderror:\n\treturn stderror\n else:\n\treturn stdoutput", "title": "" }, { "docid": "7a630b373b831b33388c4d291b686f0a", "score": "0.69395", "text": "def subpout(cmd):\n output = subprocess.run(\"{0}\".format(cmd), shell=True, stdout=subprocess.PIPE, universal_newlines=True).stdout.strip()\n return output", "title": "" }, { "docid": "505ab45e512ec56695b26fde5f935415", "score": "0.6924805", "text": "def execute(self, command):\n result, error = execute(self.prefix_exec + command, verbose=self.verbose)\n return result, error", "title": "" }, { "docid": "b9bd54f25aafc19b28242c9878817eff", "score": "0.6922078", "text": "def run_command(cmd):\n child = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n out = [s.decode(\"utf-8\").strip() for s in child.stdout]\n err = [s.decode(\"utf-8\").strip() for s in child.stderr]\n w = child.wait()\n if not sys.platform.startswith(\"win\"):\n return os.WEXITSTATUS(w), out, err\n else:\n return w, out, err", "title": "" }, { "docid": "647edd70cc6f253c4c27035b016102ce", "score": "0.6921948", "text": "def run_command(command):\n\n # If it is a python script, we do not run it, but rather import the main\n # function. This ensures that the script is run using the same python\n # interpreter that this script uses and it is also faster because we do not\n # need to run multiple python interpreters simultaneously.\n Frank_trusts_this_approach = False\n if command[:6] == 'python' and Frank_trusts_this_approach:\n\n import importlib # only import this when it's needed\n\n command_list = command.split()[1:]\n #py_args = command_list[1:]\n\n # get the dir and file name\n py_script_dir, py_script_file = os.path.split(command_list[0][1:-1])\n\n # add the dir to the path\n sys.path.insert(0, py_script_dir)\n\n # import the file\n py_script = importlib.__import__(\n py_script_file[:-3], globals(), locals(), ['main',], 0)\n\n # remove the quotes from the arguments\n arg_list = [c[1:-1] for c in command_list[1:]]\n\n py_script.main(arg_list)\n\n return \"\", \"\"\n\n else:\n\n try:\n result = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True, text=True)\n returncode = 0\n except subprocess.CalledProcessError as e:\n result = e.output\n returncode = e.returncode\n\n if returncode != 0:\n log_error('return code: {}'.format(returncode))\n\n # if platform.system() == 'Windows':\n # return result.decode(sys.stdout.encoding), returncode\n # else:\n # #print(result, returncode)\n # return str(result), returncode\n\n return result, returncode", "title": "" } ]
5af6802750378385b9d7f09681c007a6
Private method to group the tree items by window.
[ { "docid": "3e8b693410b1b25e7e41c41c0119116f", "score": "0.7198706", "text": "def __groupByWindow(self):\n windows = self.__mw.mainWindows()\n \n self.__isRefreshing = True\n \n winCount = 0\n for mainWin in windows:\n winCount += 1\n winItem = self.__createEmptyItem()\n winItem.setText(0, self.tr(\"Window {0}\").format(winCount))\n winItem.setToolTip(0, self.tr(\"Double click to switch\"))\n if mainWin == self.__mw:\n font = winItem.font(0)\n font.setBold(True)\n winItem.setFont(0, font)\n winItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n for browser in mainWin.tabWidget().browsers():\n if self.__page == browser.page():\n self.__page = None\n continue\n \n tabItem = self.__createEmptyItem(winItem)\n if browser == mainWin.tabWidget().currentBrowser():\n font = tabItem.font(0)\n font.setBold(True)\n tabItem.setFont(0, font)\n if not browser.isLoading():\n tabItem.setIcon(0, browser.icon())\n else:\n tabItem.setIcon(0, UI.PixmapCache.getIcon(\"loading.png\"))\n tabItem.setText(0, browser.title())\n tabItem.setToolTip(0, browser.title())\n \n tabItem.setData(0, TabManagerWidget.WebBrowserRole, browser)\n tabItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n self.__makeWebBrowserViewConnections(browser)", "title": "" } ]
[ { "docid": "7b2a923dc7ea54af35026c00234ac715", "score": "0.5919494", "text": "def open_groups_wnd():\n head_text = head_groups.groups_win_text(LNG)\n db = data_training.OpenSaveDb().db\n data_text = proc_data.Processer(db).data_groups_wnd()\n db.close()\n open = window_groups.MyTk(title=head_text['title'],\n head_text=head_text['head'],\n add_btn_text=head_text['add'],\n open_btn_text=head_text['open'],\n data_text=data_text,\n back_btn_text=head_text['back'],\n back_cmd=make_main_win,\n open_gr_cmd=open_onegroup_wnd,\n add_gr_cmd=open_onegroup_wnd)", "title": "" }, { "docid": "f6a25507a3d98f38b9bf735bf7ed839e", "score": "0.5820493", "text": "def __groupByDomainName(self, useHostName=False):\n windows = self.__mw.mainWindows()\n \n tabsGroupedByDomain = {}\n \n for mainWin in windows:\n for browser in mainWin.tabWidget().browsers():\n if self.__page == browser.page():\n self.__page = None\n continue\n domain = self.domainFromUrl(browser.url(), useHostName)\n \n if domain not in tabsGroupedByDomain:\n groupItem = self.__createEmptyItem(None, False)\n groupItem.setText(0, domain)\n groupItem.setToolTip(0, domain)\n font = groupItem.font(0)\n font.setBold(True)\n groupItem.setFont(0, font)\n tabsGroupedByDomain[domain] = groupItem\n groupItem = tabsGroupedByDomain[domain]\n \n tabItem = self.__createEmptyItem(groupItem)\n if browser == mainWin.tabWidget().currentBrowser():\n font = tabItem.font(0)\n font.setBold(True)\n tabItem.setFont(0, font)\n if not browser.isLoading():\n tabItem.setIcon(0, browser.icon())\n else:\n tabItem.setIcon(0, UI.PixmapCache.getIcon(\"loading.png\"))\n tabItem.setText(0, browser.title())\n tabItem.setToolTip(0, browser.title())\n \n tabItem.setData(0, TabManagerWidget.WebBrowserRole, browser)\n tabItem.setData(0, TabManagerWidget.WebWindowRole, mainWin)\n \n self.__makeWebBrowserViewConnections(browser)\n \n self.__tree.insertTopLevelItems(0, tabsGroupedByDomain.values())", "title": "" }, { "docid": "421f26ea10532adc565815b4dc572f6b", "score": "0.5607868", "text": "def group():", "title": "" }, { "docid": "1476434e840e5e11ea5074dfc1291471", "score": "0.55897033", "text": "def expand_collapsed_items(self):\n num_items = self.ui.selectedTreeWidget.topLevelItemCount()\n results = []\n for i in range(0, num_items):\n widget = self.ui.selectedTreeWidget.topLevelItem(i)\n # If the current widget item indicates it is collapsed or collapsible\n if str_to_bool(widget.text(1)):\n # Get list of all file names gotten from the collapsed name\n names = Collapser.get_names_from_condensed(widget.text(0))\n for name in names:\n new_widg = QtWidgets.QTreeWidgetItem()\n new_widg.setText(0, name)\n new_widg.setText(1, widget.text(1))\n results.append(new_widg)\n else:\n new_widg = QtWidgets.QTreeWidgetItem()\n new_widg.setText(0, widget.text(0))\n new_widg.setText(1, widget.text(1))\n results.append(new_widg)\n results.sort()\n self.ui.selectedTreeWidget.clear()\n # Add all widgets to the tree\n self.ui.selectedTreeWidget.addTopLevelItems(results)", "title": "" }, { "docid": "2e23a4be92ce8b8bae8e54940fd334d0", "score": "0.5388321", "text": "def _setup_snap_view_groups(self, num_groups):\n for i in range(1, num_groups+1):\n try:\n # get on hold of three widgets with systematic naming\n graph_view = getattr(self.ui, 'graphicsView_snapView%d' % i)\n combo1 = getattr(self.ui, 'comboBox_g%d1' % i)\n combo2 = getattr(self.ui, 'comboBox_g%d2' % i)\n radio_button = getattr(self.ui, 'radioButton_plot%d' % i)\n assert isinstance(radio_button, QRadioButton)\n except AttributeError as e:\n raise RuntimeError(\n 'GUI changed but python code is not changed accordingly: %s' % (str(e)))\n else:\n # set up group\n graph_group = snapgraphicsview.SnapGraphicsView(graph_view, combo1, combo2, radio_button)\n self._groupedSnapViewList.append(graph_group)\n # END_FOR(i)\n\n return", "title": "" }, { "docid": "961b4054880f83447f21ecacf513f99b", "score": "0.53274125", "text": "def grouping(items, grps=None):\n if grps is None:\n grps = groups()\n for r in items:\n id, instid, inst = r['id'], r['repr_instid'], r['repr_inst']\n # print id, instid, inst\n\n if inst == '':\n grps.add(id)\n else:\n if instid != 0:\n grps.add(id, instid)\n else:\n grps.add(id, inst)\n return grps", "title": "" }, { "docid": "c239c82a825be485e95b7cd36ca4879a", "score": "0.52771765", "text": "def create_ungrouped_txns(self):\n self.create_grouping('ungrouped', self.non_grouped_txns)", "title": "" }, { "docid": "f215e3e5c97bd7782372b5e09092c95a", "score": "0.5211922", "text": "def make_windows(text, window_size=3):\n windows = []\n for msg_idx, line in enumerate(text): # count through each line of text\n print()\n print('Message index:', msg_idx)\n print('Message:', line)\n for idx in range(len(line) - window_size + 1): # slide a window along the line until it reaches the end\n window = line[idx: idx + window_size] # get the words that fall into that window as a list\n print('Window idx:', idx, '\\twindow:', window)\n windows.append(window) # add that list of tokens in a window to the list of windows\n print(\"All windows:\", windows)\n return windows", "title": "" }, { "docid": "e4c9cecdd143134ae0537c367572ab49", "score": "0.51829624", "text": "def group_window_add(self, func):\n return self._subscribe(\"group_window_add\", func)", "title": "" }, { "docid": "3d5e6752de7ea8007d6f8238e4d5322c", "score": "0.5161561", "text": "def appendGroup(self, *args):\n\n\t\tcurGroup = cmds.textScrollList('hairGroups', q=1, si=1)[0]\n\n\t\tfor x in cmds.ls(sl=1):\n\t\t\tcmds.parent(x, curGroup)\n\n\t\treturn", "title": "" }, { "docid": "7c35be4352d5d6835e56ae0993e7bc24", "score": "0.51572084", "text": "def process_xwin_tree(tree_str):\n tree_lines = tree_str.split(\"\\n\")\n tree = WindowTree()\n for line in tree_lines:\n line_indent = line.count(\" \") - line.lstrip().count(\" \")\n if not tree.source_initialised:\n if line_indent == 0:\n if line == \"\":\n # Skip empty unindented lines at the start of the tree source\n continue\n # Handle one non-empty unindented line at the start of the tree source\n # Only one opening line, so initialise and skip to indentation\n tree.initialise_source(line)\n continue\n elif not tree.root_initialised:\n if line == \"\":\n # Skip empty unindented lines after the start of the tree input\n continue\n # Reached the main tree section, can process it according to indentation\n tree.initialise_root(line, line_indent)\n continue\n elif tree.root_initialised and line_indent == 0:\n # Stop processing if you reach a blank or unindented line\n break\n # If/else block finished: the following will run for the indented tree section\n line_level = (line_indent - tree.root_indent_offset) // tree.indent_step_size\n if line_level > tree.deepest_open_level:\n # Opening a new deepest level (i.e. starting a further level of subnode/s).\n # This line will declare how many subnodes will be listed\n if line.endswith(\"children:\") or line.endswith(\"child:\"):\n # Don't open the new level yet, wait for the window info on next line\n continue\n tree.open_path.deepen(line)\n continue\n elif line_level < tree.deepest_open_level:\n # The deepest level is completed, this line is a sibling on a previous level\n tree.retract_to_level(line_level)\n tree.open_path.continue_level(line)\n continue\n elif line_level == tree.deepest_open_level:\n # This line will be declaring a new entry in the deepest indentation level\n if line_level == 0:\n # This is the parent of the source window\n tree.source.assign_parent(line)\n continue\n tree.open_path.continue_level(line)\n return tree", "title": "" }, { "docid": "980957ded90c4267c2720d98064fab60", "score": "0.51525503", "text": "def update_children(self):\n shell = self.shell_obj\n widget = self.widget\n # XXX using private _windows attribute. Is there no way\n # to query the splitter for it's windows?\n for widget_child in widget._windows:\n widget.DetachWindow(widget_child)\n\n for child in shell.splitter_children:\n child_widget = child.toolkit_widget\n child_widget.Reparent(widget)\n widget.AppendWindow(child_widget)", "title": "" }, { "docid": "8c2411b74c451661a104cc1a67a21606", "score": "0.5139608", "text": "def group_objects(self, group):\r\n return [self.hierarchy[element] for element in self.groups[group]]", "title": "" }, { "docid": "df9a5a93b06485bb20f9ab9b6614fbf2", "score": "0.51375675", "text": "def populate_selected_tree(self):\n # Clears any old widgets so duplicates are not added\n self.ui.selectedTreeWidget.clear()\n new_widgets = []\n for widget in self.widgets:\n new_widg = QtWidgets.QTreeWidgetItem()\n name = widget.text(0)\n new_widg.setText(0, name)\n new_widg.setText(1, widget.text(1))\n new_widgets.append(new_widg)\n new_widgets.sort()\n # Add all widgets to the tree\n self.ui.selectedTreeWidget.addTopLevelItems(new_widgets)", "title": "" }, { "docid": "6c7dac8e8a773be2ad8e81c585507686", "score": "0.51319903", "text": "def group(self):\n return self.item", "title": "" }, { "docid": "9ba798d55290b81b8441d999cfbd6024", "score": "0.51314425", "text": "def group(self):\n\t\tgroups = []\n\t\tborder = list(self.border)\n\t\twhile len(border) != 0:\n\t\t\tgroups.append([border[0]])\n\t\t\tborder.remove(border[0])\n\t\t\tfor grp in groups:\n\t\t\t\tfor field in grp:\n\t\t\t\t\tfor i in set(border).intersection(self.near(field)):\n\t\t\t\t\t\tgrp.append(i)\n\t\t\t\t\t\tborder.remove(i)\n\t\treturn groups", "title": "" }, { "docid": "a92667a3f43535d2f26f63e7e5dfcd34", "score": "0.5129556", "text": "def get_groups():\n return {'version': {'label': 'version', 'parents': ['root']}}", "title": "" }, { "docid": "90522ca47fa3cc2d94f5822f5eb6c836", "score": "0.5097074", "text": "def all_kinase_groups(self):\n raise NotImplementedError(\"Implement in your subclass!\")", "title": "" }, { "docid": "707677e44f19cf2e7003eec161c12a3c", "score": "0.5078915", "text": "def populateGroups(self):\n\n # get a list of the existing folders in projects\n selectedProject = self.projectMenu.currentText()\n project = os.path.join(self.projectPath, selectedProject)\n existingGroups = os.listdir(project)\n folders = []\n\n # find out which returned items are directories\n for each in existingGroups:\n if os.path.isdir(os.path.join(project, each)):\n folders.append(each)\n\n # otherwise, add each project to the combo box\n self.groupMenu.clear()\n self.groupMenu.addItem(\" \")\n for each in folders:\n self.groupMenu.addItem(each)\n\n # populate characters\n self.populateCharacters()", "title": "" }, { "docid": "e9c2db671188d9d8ebf3e120d40beab2", "score": "0.5078003", "text": "def window_graph(\n left: Union[Bed, str], right: Union[Bed, str], window_size: int,\n left_sorted: bool = False, right_sorted: bool = False,\n attr_fn: Optional[Callable[[Interval, Interval, float], Mapping[str, Any]]] = None\n) -> nx.MultiDiGraph:\n check_deps(\"bedtools\")\n if isinstance(left, Bed):\n pbar_total = len(left)\n left = left.to_bedtool()\n else:\n pbar_total = None\n left = pybedtools.BedTool(left)\n if not left_sorted:\n left = left.sort(stream=True)\n left = iter(left) # Resumable iterator\n if isinstance(right, Bed):\n right = right.to_bedtool()\n else:\n right = pybedtools.BedTool(right)\n if not right_sorted:\n right = right.sort(stream=True)\n right = iter(right) # Resumable iterator\n\n attr_fn = attr_fn or (lambda l, r, d: {})\n if pbar_total is not None:\n left = smart_tqdm(left, total=pbar_total)\n graph = nx.MultiDiGraph()\n window = collections.OrderedDict() # Used as ordered set\n for l in left:\n for r in list(window.keys()): # Allow remove during iteration\n d = interval_dist(l, r)\n if -window_size <= d <= window_size:\n graph.add_edge(l.name, r.name, **attr_fn(l, r, d))\n elif d > window_size:\n del window[r]\n else: # dist < -window_size\n break # No need to expand window\n else:\n for r in right: # Resume from last break\n d = interval_dist(l, r)\n if -window_size <= d <= window_size:\n graph.add_edge(l.name, r.name, **attr_fn(l, r, d))\n elif d > window_size:\n continue\n window[r] = None # Placeholder\n if d < -window_size:\n break\n pybedtools.cleanup()\n return graph", "title": "" }, { "docid": "7a4f6634554e5ea3995c541843902e65", "score": "0.5075971", "text": "def canvas_windows(self):\n windows={}\n \n for x in self.find_type('window'):\n try:\n w=self.nametowidget(self[x,'window'])\n except:\n continue\n windows[x]=w\n return windows", "title": "" }, { "docid": "4d0fb93e5fd67b40b8accf19e39e59b6", "score": "0.50738424", "text": "def organizeGrid(self):\n old_items = []\n while self.ui.grid.count() != 0:\n old_items.append(self.ui.grid.itemAt(0))\n self.ui.grid.removeItem(self.ui.grid.itemAt(0))\n for i in range( len(old_items) ):\n row,col = self.gridTable[i+1]\n self.ui.grid.addItem(old_items[i], row, col)", "title": "" }, { "docid": "7b0d30d2d80da4c5e1934a196ab40d67", "score": "0.50622934", "text": "def group_elements(flowable_list):\n\n return KeepTogetherSplitAtTop(flowable_list)", "title": "" }, { "docid": "bfcdb38e4677e7fa5ae4120fea5f9075", "score": "0.5059094", "text": "def nodeinfo_grp():\n pass", "title": "" }, { "docid": "f40c0c2f56cd199851bb22135b78ceed", "score": "0.50583565", "text": "def split_items(self, recursive=True):\n for item in self.items:\n if isinstance(item, BookmarkShortcut):\n self.shortcuts.append(item)\n\n elif isinstance(item, BookmarkFolder):\n self.children.append(item)\n if recursive:\n item.split_items()", "title": "" }, { "docid": "fe0c7483de19e0d604c20f74df6b5de3", "score": "0.4997214", "text": "def group(items):\n groups = [(items.count(x), x) for x in set(items)]\n return sorted(groups, reverse=True)", "title": "" }, { "docid": "2aa015a29e9802b0083b1be673848410", "score": "0.49707365", "text": "def window_add(x, i):\n # Remove any elements before the start of the current window.\n start = i - k\n while window and window[0].index < start:\n window.popleft()\n # Remove from the end of the window any elements that are dominated\n # by the new end element x.\n while window and window[-1].value <= x:\n window.pop()\n # Attach the new end element.\n window.append(WindowEntry(i - 1, x))", "title": "" }, { "docid": "12bb7f7f167fd3ac86bdc64263e0d5fb", "score": "0.4962316", "text": "def subGroups( self ):\n for rule in self.ruleList:\n if len( rule[0] ) >= 3:\n #self.reduceRule( rule )\n pass\n \n for i in range( self.size ):\n self.reduceRow( i )\n self.reduceColumn( i )", "title": "" }, { "docid": "663fa112d160cd230c4a6ce17d09131c", "score": "0.49601775", "text": "def getJournalArticleGroups(self):\n\t\t\n\t\tgroups = {}\n\t\t\n\t\tquery = QtSql.QSqlQuery()\n\t\tquery.prepare(\"\"\"\n\t\t\t\t\t\tselect distinct detail_gruppe\n\t\t\t\t\t\tfrom journal_details\n\t\t\t\t\t\twhere 1=1\n\t\t\t\t\t\tand detail_periode = ?\n\t\t\t\t\t\torder by detail_gruppe\n\t\t\t\t\t\t\"\"\")\n\t\tquery.addBindValue(self._getCurrentPeriodId())\n\t\tquery.exec_()\n\n\t\tif query.lastError().isValid():\n\t\t\tprint 'Error while selecting artikel gruppen from journal_details'\n\t\telse:\n\t\t\twhile query.next():\n\t\t\t\tgs = groupString = unicode(query.value(0).toString())\n\t\t\t\t\n\t\t\t\tgroupHierarchy = []\n\t\t\t\t\n\t\t\t\twhile len(groupString) > 0:\n\t\t\t\t\tindex = groupString.find('$')\n\t\t\t\t\tcharCount = int(groupString[1:index])\n\t\t\t\t\tgroup = groupString[index+1:index+1+charCount]\n\t\t\t\t\tgroupHierarchy.append(group)\n\t\t\t\t\t\n\t\t\t\t\tgroupString = groupString[index+1+charCount:]\n\t\t\t\t\t\n\t\t\t\tgroups[gs] = groupHierarchy\n\t\t\n\t\tprint groups\n\t\t\n\t\trow = []\n\t\tfor gs in sorted(groups.keys()):\n\t\t\titem = QtGui.QStandardItem(u'->'.join(groups[gs]))\n\t\t\titem.setData(gs, articleGroupRole)\n\t\t\trow.append(item)\n\t\t\t\n\t\treturn row", "title": "" }, { "docid": "88a07aa2ea423410e24fa6ce68cf9901", "score": "0.496006", "text": "def visit_grouping_expr(self, expr):\n return self.parenthesize(\"group\", exprs=[expr.expression])", "title": "" }, { "docid": "0c7bc272ed77cb0f0ea3dbb1f70c4a4a", "score": "0.49476916", "text": "def expand(self, data):\n retval = set()\n group_list = list(self.groups)\n for i in xrange(len(group_list)):\n before = group_list[:i]\n after = group_list[i + 1:]\n this = group_list[i]\n this = [this.add(data)]\n t_collection = GroupCollection(before + after + this)\n retval.add(t_collection)\n retval.add(GroupCollection(group_list + [Group(data)]))\n return retval", "title": "" }, { "docid": "70f0e8d92964acdd4c6ba37ff1dadaba", "score": "0.4945744", "text": "def group(items):\n groups = [(items.count(x), x) for x in set(items)]\n groups.sort(reverse = True)\n return groups", "title": "" }, { "docid": "2496b98072c9612c62da60a4341aa121", "score": "0.49457365", "text": "def _get_groups(scenario,elements):\n\t_groups = collections.OrderedDict()\n\tfor T in elements:\n\t\tif T.group is not None:\n\t\t\tgroup_name = T.group\n\t\t\tif group_name in _groups:\n\t\t\t\t_groups[group_name].append(T)\n\t\t\telse:\n\t\t\t\t_groups[group_name] = [T]\n\tgroups = collections.OrderedDict([ (_groups[group_name][0],_groups[group_name])\n\t\t\t\t\t\t\t\t\t\t for group_name in _groups])\n\tel_in_groups = [ T_ for T in groups for T_ in groups[T] ]\n\tgroups.update([ (T,[T]) for T in elements if T not in el_in_groups ])\n\treturn groups", "title": "" }, { "docid": "e3a45ea8dbd08ded64a409178ab100a4", "score": "0.4943574", "text": "def updateTree(self):\n self.tree.DeleteAllItems()\n \n self.root = self.tree.AddRoot(\"Project\")\n self.tree.SetPyData(self.root, None)\n self.tree.SetItemImage(self.root, self.fldridx, wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(self.root, self.fldropenidx, wx.TreeItemIcon_Expanded)\n \n \n dataTree = self.tree.AppendItem(self.root, \"Data Sets\")\n self.tree.SetPyData(dataTree, DATA_SET_ITEM)\n self.tree.SetItemImage(dataTree, self.fldridx, wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(dataTree, self.fldropenidx, wx.TreeItemIcon_Expanded)\n \n figsTree = self.tree.AppendItem(self.root, \"Figure Sets\")\n self.tree.SetPyData(figsTree, FIGURE_SET_ITEM)\n self.tree.SetItemImage(figsTree, self.fldridx, wx.TreeItemIcon_Normal)\n self.tree.SetItemImage(figsTree, self.fldropenidx, wx.TreeItemIcon_Expanded)\n\n # create tree\n self.buildDataTree(dataTree, DataStore.getData(), [])\n self.buildFigureTree(figsTree, FigureStore.getFigures())\n self.tree.Expand(self.root)\n if self.dataTreeExpanded:\n self.tree.Expand(dataTree)\n if self.figureTreeExpanded:\n self.tree.Expand(figsTree)", "title": "" }, { "docid": "0a9c05f606b88f19fdbe001320bf138c", "score": "0.49369285", "text": "def test_division_logistics_item_groups_get(self):\n pass", "title": "" }, { "docid": "449c2aacdd1bd776813eb467f2e5c066", "score": "0.49335968", "text": "def collapse_items(self):\n self.populate_selected_tree()", "title": "" }, { "docid": "f2c66a8c769775451206ce95b21b6fc4", "score": "0.49257174", "text": "def make_tree_widget_item(widg: Union[QtWidgets.QTreeWidget,\n QtWidgets.QTreeWidgetItem],\n object_item: Union[ItemN, LoopN, DataN, GlobalN],\n name: str = None) \\\n -> QtWidgets.QTreeWidgetItem:\n wi = QtWidgets.QTreeWidgetItem(widg)\n if name is None:\n wi.setText(0, f\"{object_item.get_name():}\")\n else:\n wi.setText(0, name)\n wi.object = object_item\n\n if ((type(object_item) == ItemN) | (type(object_item) == LoopN)):\n wi.setBackground(0, QtGui.QColor(237, 242, 255))\n else:\n wi.setBackground(0, QtGui.QColor(255, 255, 255))\n\n if object_item.is_defined():\n pass\n else:\n wi.setBackground(0, QtGui.QColor(255, 224, 224))\n\n if isinstance(object_item, (GlobalN, DataN)):\n l_name = [item.get_name() for item in object_item.items]\n l_name_sort = sorted(l_name)\n for name in l_name_sort:\n ind = l_name.index(name)\n item = object_item.items[ind]\n wii = make_tree_widget_item(wi, item)\n wi.addChild(wii)\n elif isinstance(object_item, ItemN):\n for attr_name in object_item.ATTR_INT_NAMES:\n if object_item.is_attribute(attr_name):\n int_obj = getattr(object_item, attr_name)\n if isinstance(int_obj, (ItemN, LoopN)):\n wii = make_tree_widget_item(wi, int_obj, name=attr_name)\n wi.addChild(wii)\n for attr_name in object_item.ATTR_INT_PROTECTED_NAMES:\n if attr_name in object_item.__dict__.keys():\n if object_item.is_attribute(attr_name):\n int_obj = getattr(object_item, attr_name)\n if isinstance(int_obj, (ItemN, LoopN)):\n wii = make_tree_widget_item(wi, int_obj, name=attr_name)\n wi.addChild(wii)\n return wi", "title": "" }, { "docid": "a12e391486bb9c279029177bdf8bd999", "score": "0.49120232", "text": "def create_windows(data, DeepForest_config, base_dir):\n #Compute list of sliding windows, assumed that all objects are the same extent and resolution \n sample_tile = data.rgb_path.iloc[0]\n image_path=os.path.join(base_dir, sample_tile)\n windows=compute_windows(image=image_path, pixels=DeepForest_config[\"patch_size\"], overlap=DeepForest_config[\"patch_overlap\"])\n \n #if none\n if windows is None:\n return None\n \n #Compute Windows\n #Create dictionary of windows for each image\n tile_windows={}\n \n all_images=list(data.rgb_path.unique())\n\n tile_windows[\"tile\"] = all_images\n tile_windows[\"window\"]=np.arange(0, len(windows))\n \n #Expand grid\n tile_data = expand_grid(tile_windows) \n \n #Merge with the site variable\n merge_site = data[[\"rgb_path\",\"site\"]].drop_duplicates()\n merge_site.columns = [\"tile\",\"site\"]\n tile_data = tile_data.merge(merge_site)\n \n return(tile_data)", "title": "" }, { "docid": "490e0dce5e39bfcb6b5a2e32f12198b5", "score": "0.49074155", "text": "def get_window_layers(self, window):\n return [x for x in self._members if x._window == window]", "title": "" }, { "docid": "55ced74acc9a88412f77ae3fa5d68d15", "score": "0.49060652", "text": "def __refreshTree(self):\n if self.__refreshBlocked:\n return\n \n if self.__isRefreshing and not self.__page:\n return\n \n # store selected items\n selectedBrowsers = []\n for index in range(self.__tree.topLevelItemCount()):\n winItem = self.__tree.topLevelItem(index)\n if winItem.checkState(0) == Qt.Unchecked:\n continue\n \n for row in range(winItem.childCount()):\n tabItem = winItem.child(row)\n if tabItem.checkState(0) == Qt.Unchecked:\n continue\n selectedBrowsers.append(\n tabItem.data(0, TabManagerWidget.WebBrowserRole))\n \n self.__tree.clear()\n \n if self.__groupType == TabManagerWidget.GroupByHost:\n self.__groupByDomainName(True)\n elif self.__groupType == TabManagerWidget.GroupByDomain:\n self.__groupByDomainName()\n else:\n # default is group by window\n self.__setGroupType(TabManagerWidget.GroupByWindow)\n self.__groupByWindow()\n \n # restore selected items\n for index in range(self.__tree.topLevelItemCount()):\n winItem = self.__tree.topLevelItem(index)\n \n for row in range(winItem.childCount()):\n tabItem = winItem.child(row)\n if tabItem.data(0, TabManagerWidget.WebBrowserRole) in (\n selectedBrowsers\n ):\n tabItem.setCheckState(0, Qt.Checked)\n \n self.__tree.expandAll()\n self.__isRefreshing = False\n self.__waitForRefresh = False", "title": "" }, { "docid": "8ffffe908727a92386579737ee0ec434", "score": "0.4898937", "text": "def build_widget_items(self, top_item):\n ## Column 0 - Input:\n self._folder_widget = QtWidgets.QLabel()\n self._folder_widget.setText(self._folder)\n top_item.setItemWidget(self, 0, self._folder_widget)\n\n ## Column 1 - filename:\n self._filename_widget = QtWidgets.QLabel()\n self._filename_widget.setText(self._filename)\n self._filename_widget.setAlignment(QtCore.Qt.AlignCenter)\n top_item.setItemWidget(self, 1, self._filename_widget)\n \n ## Column 1 - Sequence:\n self._sequence_widget = QtWidgets.QComboBox()\n self._sequence_widget.addItem(self._sequence)\n self._sequence_widget.setEditable(True)\n top_item.setItemWidget(self, 2, self._sequence_widget)\n {\n self._sequence_widget.addItem(os.path.basename(key)) \n for (key, value) in self._app_config.add_configuration.output_subfolders.items()\n }\n self._sequence_widget.currentIndexChanged.connect(self.update_shot_wdgs)\n \n ## Column 2 - Shot:\n self._shot_widget = QtWidgets.QComboBox()\n self._shot_widget.addItem(self._shot)\n self._shot_widget.setEditable(True)\n top_item.setItemWidget(self, 3, self._shot_widget)\n\n ## Column 3 - Location\n self._location_widget = QtWidgets.QComboBox()\n self._location_widget.addItem(str(self._location))\n self._location_widget.setEditable(True)\n top_item.setItemWidget(self, 4, self._location_widget)\n\n ## Column 4 - Option\n self._option_widget = QtWidgets.QComboBox()\n [self._option_widget.addItem(option) for option in self._OPTIONS]\n top_item.setItemWidget(self, 5, self._option_widget)\n\n self.option_override()\n self._option_widget.currentIndexChanged.connect(self.option_override)\n\n # If the header file is passed, some overrides need to be set to allow \n # an easier override option for all sub items\n if self._header:\n self._option_widget.currentIndexChanged.connect(self.header_option_override)\n self._location_widget.currentTextChanged.connect(self.header_location_override)\n self._sequence_widget.currentTextChanged.connect(self.header_sequence_override)\n self._shot_widget.currentTextChanged.connect(self.header_shot_override)", "title": "" }, { "docid": "a7eb8dfbf9a352af46175caf8bd9121b", "score": "0.48974252", "text": "def immediate_grouping(self):\n graph = CellGraph(self._numbered,\n lambda c: c.get_neighbors(is_unrevealed=True))\n\n # Deductive reasoning through grouping\n for cell in self._numbered:\n neighbors = cell.get_neighbors()\n numbered_neighbors = graph.relatives_of(cell)\n\n flagged = [c for c in neighbors if c.is_flagged()]\n unrevealed = {c for c in neighbors if c.is_unrevealed()}\n necessary = cell.number - len(flagged)\n\n for neighbor in numbered_neighbors:\n neighbor_neighbors = neighbor.get_neighbors()\n neighbor_unrevealed = {c for c in neighbor_neighbors\n if c.is_unrevealed()}\n if (unrevealed.issubset(neighbor_unrevealed) and\n unrevealed != neighbor_unrevealed):\n neighbor_num_flags_left = neighbor.num_flags_left\n\n unshared = neighbor_unrevealed - unrevealed\n if necessary == neighbor_num_flags_left:\n yield [('click', c) for c in unshared]\n else:\n necessary_diff = neighbor_num_flags_left - necessary\n if necessary_diff == len(unshared):\n yield [('right_click', c) for c in unshared]", "title": "" }, { "docid": "590ca357b560ef3fe0126fc210f6436d", "score": "0.4895962", "text": "def get_grouping(self):\r\n return self.group_func", "title": "" }, { "docid": "81e153ba048d122d258a4d3fa8b02d0a", "score": "0.48935255", "text": "def _group_keys(self):\n return [name for name, child in self._items() if isinstance(child, GroupNode)]", "title": "" }, { "docid": "81e153ba048d122d258a4d3fa8b02d0a", "score": "0.48935255", "text": "def _group_keys(self):\n return [name for name, child in self._items() if isinstance(child, GroupNode)]", "title": "" }, { "docid": "4db0fcf915b2d3c5e09ae57b6f462acb", "score": "0.48923385", "text": "def form_groups(self, merging_merged=False):\n F = self.merge_nodes('B', merging_merged)\n return F.merge_nodes('A', merging_merged)", "title": "" }, { "docid": "c2bf8b91d82dacbb4ebbda2bfb3352e7", "score": "0.48912162", "text": "def _setupModelsGroup(self):\r\n modelGroupLayout = QVBoxLayout()\r\n self.modelListWidget = QListWidget()\r\n modelGroupLayout.addWidget(self.modelListWidget)\r\n self.modelListWidget.setSelectionMode(QAbstractItemView.MultiSelection) # able to select multiple models\r\n self.modelListWidget.itemSelectionChanged.connect(self._emitModelChangedSignal)\r\n\r\n return modelGroupLayout", "title": "" }, { "docid": "5178f30501aed39a39c6b7065993a10b", "score": "0.48828214", "text": "def get_groups_of_work_from_booking(booking):\n all_panels_in_booking = BookingPackagePanel.objects.select_related('panel')\\\n .filter(booking_package__booking_id=booking.id)\n\n all_groups = {\n #2: [{'name':'fender_right_bumper_front', 'units': 2}]\n }\n for panel in all_panels_in_booking:\n if panel.panel.type_of_work in all_groups:\n # groups for this type of work exists, find group that have space left.\n groups_by_type_of_work = all_groups[panel.panel.type_of_work]\n panel_added = False\n for group in groups_by_type_of_work:\n # check against panels_in_group_limit if panel can be added here else create a new group.\n if group['units'] < PANELS_GROUP_SIZE:\n group['name'] = \"%s,%s\" % (group['name'], str(panel.panel.car_panel.name).replace(' ','_'))\n group['units'] += 1\n panel_added = True\n\n if not panel_added:\n groups_by_type_of_work.append({'name': str(panel.panel.car_panel.name).replace(' ','_'), 'units': 1})\n else:\n all_groups[panel.panel.type_of_work] = [{'name': str(panel.panel.car_panel.name).replace(' ','_'), 'units': 1}]\n\n return all_groups", "title": "" }, { "docid": "022bb399bceb97f57a6efd7054c4c763", "score": "0.4879245", "text": "def group(items, key=None, as_dict=True):\n key = key or (lambda item: item)\n groups = defaultdict(list)\n for item in items:\n groups[key(item)].append(item)\n if as_dict:\n groups = dict(groups)\n return groups", "title": "" }, { "docid": "b20eea6009fa4608edbc5a1c1b27a460", "score": "0.48770407", "text": "def on_action_expand_all_triggered(self):\n for top in range(self.treewidget.topLevelItemCount()):\n self.treewidget.expandItem(self.treewidget.topLevelItem(top))", "title": "" }, { "docid": "9b8df1b65180c9e135d1cecfb9e6e5ba", "score": "0.48728308", "text": "def loadItems(self, tree_widget):\n self.top_items = {}\n for name in [\"Contacts\", \"Molecules\", \"Surface Blocks\", \"Recently Used\"]:\n top_item = QtGui.QTreeWidgetItem(tree_widget)\n top_item.setText(0, name)\n top_item.setFlags(top_item.flags() & ~QtCore.Qt.ItemIsDragEnabled\n & ~QtCore.Qt.ItemIsSelectable)\n self.top_items[name] = top_item\n self.loadContacts()\n self.loadMolecules()\n self.loadSurfaceBlocks()", "title": "" }, { "docid": "8cc1049e7c13c7991c142fed5258a065", "score": "0.4868712", "text": "def createFolderStructure(site):\n\n actualidad_children = [\n {'id':'eventos',\n 'title':'Eventos',\n 'description': '',\n 'type':'Folder',\n 'layout':'icons_description',\n },\n {'id': 'comunicados',\n 'title': 'Comunicados',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'avisos',\n 'title': 'Avisos',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n ]\n\n aoi_children = [\n {'id':'objetivo',\n 'title':'Objetivos',\n 'description': '',\n 'type':'Document',\n 'layout':'Common',\n },\n {'id': 'asistencia',\n 'title': 'Asistencia Ofrecida',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'contacto',\n 'title': 'Contacto',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'formulario-solicitud',\n 'title': 'Formularios de Solicitud de Informacion',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'informes',\n 'title': 'Informes',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'boletin',\n 'title': 'Boletin',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_listing',\n },\n {'id': 'estadistica',\n 'title': 'Estadistica',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n ]\n\n qs_children = [\n {'id':'base-legal',\n 'title':'Base Legal',\n 'description': '',\n 'type':'Document',\n 'layout':'Common',\n },\n {'id': 'mision-y-vision',\n 'title': 'Mision y Vision',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'plan-estrategico',\n 'title': 'Plan Estrategico',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'politica-institucional',\n 'title': 'Politica Institucional',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'valores',\n 'title': 'Valores',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'funciones',\n 'title': 'Funciones',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'donde-estamos',\n 'title': 'Donde Estamos',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n ]\n\n ministro_children = [\n {'id': 'discursos',\n 'title': 'Discursos',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'presentaciones',\n 'title': 'Presentaciones',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'escritos',\n 'title': 'Escritos',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n ]\n\n documentos_children = [\n {'id':'normas',\n 'title':'Normas',\n 'description': '',\n 'type':'Folder',\n 'layout':'folder_tabular_view',\n },\n {'id': 'leyes',\n 'title': 'Leyes',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'procedimientos',\n 'title': 'Procedimientos',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'manuales',\n 'title': 'Manuales',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'acuerdos-ministeriales',\n 'title': 'Acuerdos Ministeriales',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'circulares',\n 'title': 'Circulares',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'publicaciones',\n 'title': 'Publicaciones',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'encuestas',\n 'title': 'Encuestas',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n {'id': 'otros',\n 'title': 'Otros',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_tabular_view',\n },\n ]\n\n direccion_children = [\n {'id':'base-legal',\n 'title':'Base Legal',\n 'description': '',\n 'type':'Document',\n 'layout':'Common',\n },\n {'id': 'mision-y-vision',\n 'title': 'Mision y Vision',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'estructura-organizativa',\n 'title': 'Estructura Organizativa',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'servicios',\n 'title': 'Servicios',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'documentos',\n 'title': 'Documentos',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'funciones',\n 'title': 'Funciones',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n {'id': 'preguntas-frecuentes',\n 'title': 'Preguntas Frecuentes',\n 'description': '',\n 'type': 'Document',\n 'layout': 'Common',\n },\n ]\n\n direcSup_children = [\n {'id': 'ministro',\n 'title': 'Ministro',\n 'description': 'Discursos, Presentaciones, Escritos',\n 'type': 'Folder',\n 'layout': 'icons_description',\n 'children':ministro_children,\n },\n {'id': 'viceministro',\n 'title': 'Vice Ministro',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'secretario-general',\n 'title': 'Secretario General',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n ]\n\n noticias_children = [\n {'id':'noticia-1',\n 'title':'Noticia 1',\n 'description': 'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.',\n 'type':'News Item',\n 'layout':'newsitem_view',\n },\n {'id':'noticia-2',\n 'title':'Noticia 2',\n 'description': 'Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.',\n 'type':'News Item',\n 'layout':'newsitem_view',\n },\n ]\n \n institucion_children = [\n {'id':'qs',\n 'title':'Quienes Somos',\n 'description': 'Base legal, Mision, Vision, Plan estrategico, Politica Institucional, valores, Funciones, Donde Estamos',\n 'type':'Folder',\n 'layout':'icons_description',\n 'children':qs_children,\n },\n {'id': 'direccion-superior',\n 'title': 'Direccion Superior',\n 'description': 'Ministro, ViceMinistro, Secretario General',\n 'type': 'Folder',\n 'layout': 'icons_description',\n 'children':direcSup_children,\n },\n {'id': 'organigrama',\n 'title': 'Organigrama',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'directorio',\n 'title': 'Directorio Institucional',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'poa',\n 'title': 'Plan Anual Operativo',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n ]\n\n direccionx = [\n {'id':'direccionx',\n 'title':'Direccion x',\n 'description': 'Base Legal, Mision y Vision, Estructura Organizativa, Funciones, Servicios, Documentos, Preguntas Frecuentes',\n 'type':'Folder',\n 'layout':'icons_description',\n 'children':direccion_children\n },\n ]\n \n top_folders = [\n {'id': 'actualidad',\n 'title': 'Actualidad',\n 'description': 'Eventos, Comunicados, Avisos',\n 'type': 'Folder',\n 'layout': 'icons_description',\n 'children': actualidad_children,\n },\n {'id': 'agenda',\n 'title': 'Agenda',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'galeria',\n 'title': 'Galeria de Fotos',\n 'description': 'Glarias de Foto',\n 'type': 'Folder',\n 'layout': 'atct_album_view',\n },\n {'id': 'video',\n 'title': 'VideoTeca',\n 'description': 'Conferencias, talleres',\n 'type': 'Folder',\n 'layout': 'folder_listing',\n },\n {'id': 'faq',\n 'title': 'Preguntas Frecuentes',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_listing',\n },\n {'id': 'ayuda',\n 'title': 'Ayuda del sitio',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_listing',\n },\n {'id': 'glosario',\n 'title': 'Glosario',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_listing',\n },\n {'id': 'institucion',\n 'title': 'La Institucion',\n 'description': 'Quines somos, Direccion Superior, Organigrama, Directorio, Plan Anual Operativo',\n 'type': 'Folder',\n 'layout': 'icons_description',\n 'children':institucion_children,\n },\n {'id': 'servicios',\n 'title': 'Servicios',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'direcciones-generales',\n 'title': 'Direcciones Generales',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n 'children':direccionx,\n },\n {'id': 'programas-y-proyectos',\n 'title': 'Programas y Proyectos',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'oai',\n 'title': 'Oficina de Acceso a la Informacion',\n 'description': 'Objetivo, Asistencia Ofrecida, Contacto, Formularios de Solicitud de Informacion, Informes, Boletin, Estadistica',\n 'type': 'Folder',\n 'layout': 'icons_description',\n 'children':aoi_children,\n },\n {'id': 'documentos',\n 'title': 'Documentos',\n 'description': 'Normas, Leyes, Procedimientos, Manuales, Acuerdo Ministeriales, Circulares, Publicaciones, Encuestas, Otros',\n 'type': 'Folder',\n 'layout': 'document_view',\n 'children':documentos_children,\n },\n {'id': 'arte-y-cultura',\n 'title': 'Arte y Cultura',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'icons_description',\n },\n {'id': 'noticias',\n 'title': 'Noticias',\n 'description': '',\n 'type': 'Folder',\n 'layout': 'folder_summary_view',\n 'children':noticias_children,\n },\n {'id': 'imagenes',\n 'title': 'Imagenes',\n 'description':'imangenes del portal',\n 'type':'Folder',\n 'layout':'folder_tabular_view',\n },\n {'id': 'ultimos-documentos',\n 'title': 'ultimos Documentos',\n 'description':'Ultimos Documentos del Portal',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'proximos-eventos',\n 'title': 'Proximos Eventos',\n 'description':'Proximos Eventos del portal',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'estudiantes-docentes',\n 'title': 'Estudiantes y Docentes',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'ciudadano',\n 'title': 'Ciudadano',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'instituciones',\n 'title': 'Instituciones',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'proveedores',\n 'title': 'Proveedores del Estado',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'ong',\n 'title': 'ONG',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'profesionales',\n 'title': 'Profesionales',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n {'id': 'empresa',\n 'title': 'Empresa Privada',\n 'description':'',\n 'type':'Topic',\n 'layout':'atct_topic_view',\n },\n ]\n createObjects(site,top_folders)\n objectCollection(site,top_folders[-9:])", "title": "" }, { "docid": "019757e776b399935e5b47ed570beecf", "score": "0.48616982", "text": "def layout_window(self, window):\n raise NotImplementedError()", "title": "" }, { "docid": "213f85ee54e74596fcc88e3e1aa3f18a", "score": "0.48613775", "text": "def _walk_netcdf_tree(top):\n values = top.groups.values()\n yield values\n for value in top.groups.values():\n for children in _walk_netcdf_tree(value):\n yield children", "title": "" }, { "docid": "70c312c7b3da18dff2313ae0e62191d6", "score": "0.48607463", "text": "def cmd_togroup(self, prompt=\"group: \", widget=\"prompt\"):\n if not self.currentWindow:\n self.log.warning(\"No window to move\")\n return\n\n mb = self.widgetMap.get(widget)\n if not mb:\n self.log.error(\"No widget named '%s' present.\" % widget)\n return\n\n mb.startInput(prompt, self.moveToGroup, \"group\")", "title": "" }, { "docid": "dd39f6bfb19d60e9bf664a2d3dab5fe7", "score": "0.48574802", "text": "def create_nodegroup(self, values):", "title": "" }, { "docid": "c01d3b0e3b674505cecfbda618cb5187", "score": "0.48516077", "text": "def _add_current_groups(self):\n self.keys_final += self._current_keys_final\n nr_gr_f = max(self.group_for_inputs_final.values()) + 1\n for inp, grs in self._current_group_for_inputs_final.items():\n if isinstance(grs, int):\n grs = grs + nr_gr_f\n else: # a list\n grs = [gr + nr_gr_f for gr in grs]\n self.group_for_inputs_final[inp] = grs\n for i, stack in enumerate(self._current_groups_stack_final):\n if i == 0:\n stack = [gr + nr_gr_f for gr in stack]\n self.groups_stack_final[-1] += stack\n else:\n stack = [gr + nr_gr_f for gr in stack]\n self.groups_stack_final.append(stack)", "title": "" }, { "docid": "377c60f8ff6fe6195b4a441ddeb38ef5", "score": "0.48429197", "text": "def window_handles(self):\n ...", "title": "" }, { "docid": "3233bc299b24358b38b739d679674414", "score": "0.4842468", "text": "def grouper(items: Iterable[T], n: int) -> Iterator[List[T]]:\n it = iter(items)\n while True:\n group = list(itertools.islice(it, n))\n if not group:\n return\n yield group", "title": "" }, { "docid": "0b2a5673d3344eeef12de02a617677e1", "score": "0.48312658", "text": "def group_images(self):\n order = list(range(self.size()))\n order.sort(key=lambda x: self.image_aspect_ratio(x))\n # divide into groups, one group = one batch\n self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]", "title": "" }, { "docid": "5de61a77689d5d31823362251d0ae139", "score": "0.48246834", "text": "def populate_system_tree(self):\n # gets all filenames from the current path\n files = self.fb.get_files_in_dir()\n self.collapser.make_final_list(files)\n collapsedFiles = self.collapser.get_result_files()\n collapsedTF = self.collapser.get_collapsed_list()\n widgets = []\n # Create widgets to add to the tree list\n for i in range(0, len(collapsedFiles)):\n widget = QtWidgets.QTreeWidgetItem()\n widget.setText(0, collapsedFiles[i])\n type = \"Folder\" if self.fb.is_dir(collapsedFiles[i]) else \"File\"\n widget.setText(2, type)\n collapsed = \"Yes\" if collapsedTF[i] else \"No\"\n widget.setText(1, collapsed)\n widgets.append(widget)\n # Add all widgets to the tree\n self.ui.systemTreeWidget.addTopLevelItems(widgets)", "title": "" }, { "docid": "3715f987452e326aa42d7b5ec6814620", "score": "0.48209894", "text": "def _create_rooms_group(klass, floor):\n rooms_group = svgwrite.container.Group(id = \"rooms\")\n rooms_by_cat = klass.get_grouped_rooms(floor)\n\n for category_name, cat_rooms in rooms_by_cat:\n group_cat_name = klass._prepare_cat_name(category_name)\n cat_group = svgwrite.container.Group(id = group_cat_name)\n\n for r_id, room in cat_rooms:\n room_group = klass._create_room_group(\n r_id,\n klass._create_polygon(room.get(\"polygon\"))\n )\n cat_group.add(room_group)\n\n rooms_group.add(cat_group)\n\n return rooms_group", "title": "" }, { "docid": "1817bb1a88bae798a8b7805a6d173997", "score": "0.48170137", "text": "def merge_windows(self, new_window: W, sorted_windows: List[W], merge_callback: MergeCallback):\n pass", "title": "" }, { "docid": "1ad68979ab20dd541790f71df9cbb7a5", "score": "0.4812073", "text": "def set_results_treeview_structure(self):\n self.ui.results_treeView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n self.results_tree_model = QtGui.QStandardItemModel()\n self.results_tree_model.setHorizontalHeaderLabels(['Elements'])\n self.ui.results_treeView.setModel(self.results_tree_model)\n self.ui.results_treeView.setUniformRowHeights(True)\n\n def pass_to_QStandardItem_list(list_):\n res = list()\n for elm in list_:\n elm1 = QtGui.QStandardItem(elm)\n elm1.setEditable(False)\n res.append(elm1)\n return res\n\n bus_results = pass_to_QStandardItem_list(['Voltages (p.u.)', 'Voltages (kV)'])\n per_bus_results = pass_to_QStandardItem_list(['Voltage (p.u.) series', 'Voltage (kV) series',\n 'Active power (MW)', 'Reactive power (MVar)',\n 'Active and reactive power (MW, MVar)', 'Aparent power (MVA)',\n 'S-V curve', 'Q-V curve'])\n\n branches_results = pass_to_QStandardItem_list(['Loading (%)', 'Current (p.u.)',\n 'Current (kA)', 'Losses (MVA)'])\n per_branch_results = pass_to_QStandardItem_list(['Loading (%) series', 'Current (p.u.) series',\n 'Current (kA) series', 'Losses (MVA) series'])\n\n generator_results = pass_to_QStandardItem_list(['Reactive power (p.u.)', 'Reactive power (MVar)'])\n per_generator_results = pass_to_QStandardItem_list(['Reactive power (p.u.) series',\n 'Reactive power (MVar) series'])\n\n self.family_results_per_family = dict()\n\n # nodes\n buses = QtGui.QStandardItem('Buses')\n buses.setEditable(False)\n buses.appendRows(bus_results)\n self.family_results_per_family[0] = len(bus_results)\n names = self.circuit.get_bus_labels()\n for name in names:\n bus = QtGui.QStandardItem(name)\n bus.appendRows(per_bus_results)\n bus.setEditable(False)\n buses.appendRow(bus)\n\n # branches\n branches = QtGui.QStandardItem('Branches')\n branches.setEditable(False)\n branches.appendRows(branches_results)\n self.family_results_per_family[1] = len(branches_results)\n names = self.circuit.get_branch_labels()\n for name in names:\n branch = QtGui.QStandardItem(name)\n branch.appendRows(per_branch_results)\n branch.setEditable(False)\n branches.appendRow(branch)\n\n # generators\n generators = QtGui.QStandardItem('Generators')\n generators.setEditable(False)\n generators.appendRows(generator_results)\n self.family_results_per_family[2] = len(generator_results)\n names = self.circuit.get_gen_labels()\n for name in names:\n gen = QtGui.QStandardItem(name)\n gen.appendRows(per_generator_results)\n gen.setEditable(False)\n generators.appendRow(gen)\n\n self.results_tree_model.appendRow(buses)\n self.results_tree_model.appendRow(branches)\n self.results_tree_model.appendRow(generators)", "title": "" }, { "docid": "e474b8bc2d4b3d807ea686eaac1e1191", "score": "0.4809163", "text": "def open_onegroup_wnd(key, new_group=None):\n head_text = head_onegroup.members_win_text(LNG)\n db = data_training.OpenSaveDb().db\n if new_group:\n key = data_shelve.AddExample(d_b=db, example='group_').choice()\n data_text = proc_data.AddNewGroup().make_dict()\n else:\n data_text = db[key]\n if 'members' not in data_text:\n data_text['members'] = None\n if 'training' not in data_text:\n data_text['training'] = None\n db.close()\n open = window_onegroup.MyTk(title=head_text['title'],\n change_text=head_text['change'],\n del_text=head_text['del'],\n group_name=data_text['groupName'],\n stud_btn_text=head_text['stud'],\n tr_btn_text=head_text['train'],\n stud_list=data_text['members'],\n stud_head=head_text['stud_head'],\n back_btn_text=head_text['back'],\n back_btn_cmd=open_groups_wnd,\n key=key, save_btn_cmd=save_group_name,\n members_cmd=open_members_wnd,\n head_tr_text=head_text['tr_head'],\n tr_list=data_text['training'],\n open_btn_text=head_text['open'],\n open_tr_cmd=open_trainig_wnd,\n tr_btn_cmd=open_change_tr_wnd,\n del_btn_cmd=del_group,\n del_msg=head_text['del_msg'],\n new_group=new_group,\n db_error_text=head_text['db_error'],\n save_msg=head_text['save_msg'],\n catch_error_func=catch_error)", "title": "" }, { "docid": "54e048033ac3b22e53b976862fa258eb", "score": "0.48083404", "text": "def GroupExpanded(self, Group, Expanded):", "title": "" }, { "docid": "f61d29315bb8d80d722b52d03f22d282", "score": "0.48082787", "text": "def layers_group():\n pass", "title": "" }, { "docid": "db3264e5aa7743dc3eff641eb2e3df76", "score": "0.48046485", "text": "def change_layer(self, up=True, top_bottom=False):\n if len(self.qtile.windows_map) < 2:\n return\n\n if self.group is None and not isinstance(self, Static):\n return\n\n # Use the window's group or current group if this isn't set (e.g. Static windows)\n group = self.group or self.qtile.current_group\n\n parent = self.window.get_wm_transient_for()\n if parent is not None and not up:\n return\n\n layering = self.get_layering_information()\n\n # Comparison of layer states: -1 if window is now in a lower state group,\n # 0 if it's in the same group and 1 if it's in a higher group\n moved = (self.previous_layer > layering) - (layering > self.previous_layer)\n self.previous_layer = layering\n\n stack = list(self.qtile.core._root.query_tree())\n if self.wid not in stack or len(stack) < 2:\n return\n\n # Get all windows for the group and add Static windows to ensure these are included\n # in the stacking\n group_windows = group.windows.copy()\n statics = [win for win in self.qtile.windows_map.values() if isinstance(win, Static)]\n group_windows.extend(statics)\n\n if group.screen is not None:\n group_bars = [gap for gap in group.screen.gaps if isinstance(gap, bar.Bar)]\n else:\n group_bars = []\n\n # Get list of windows that are in the stack and managed by qtile\n # List of tuples (XWindow object, transient_for, layering_information)\n windows = list(\n map(\n lambda w: (\n w.window,\n w.window.get_wm_transient_for(),\n w.get_layering_information(),\n ),\n group_windows,\n )\n )\n\n # Sort this list to match stacking order reported by server\n windows.sort(key=lambda w: stack.index(w[0].wid))\n\n # Get lists of windows on lower, higher or same \"layer\" as window\n lower = [w[0].wid for w in windows if w[1] is None and w[2] > layering]\n higher = [w[0].wid for w in windows if w[1] is None and w[2] < layering]\n same = [w[0].wid for w in windows if w[1] is None and w[2] == layering]\n\n # We now need to identify the new position in the stack\n\n # If the window has a parent, the window should just be put above it\n if parent:\n sibling = parent\n above = True\n\n # Now we just check whether the window has changed layer.\n\n # If we're forcing to top or bottom of current layer...\n elif top_bottom:\n # If there are no other windows in the same layer then there's nothing to do\n if not same:\n return\n\n if up:\n sibling = same[-1]\n above = True\n else:\n sibling = same[0]\n above = False\n\n # There are no windows in the desired layer (should never happen) or\n # we've moved to a new layer and are the only window in that layer\n elif not same or (len(same) == 1 and moved != 0):\n # Try to put it above the last window in the lower layers\n if lower:\n sibling = lower[-1]\n above = True\n\n # Or below the first window in the higher layers\n elif higher:\n sibling = higher[0]\n above = False\n\n # Don't think we should end up here but, if we do...\n else:\n # Put the window above the highest window if we're raising it\n if up:\n sibling = stack[-1]\n above = True\n\n # or below the lowest window if we're lowering the window\n else:\n sibling = stack[0]\n above = False\n\n else:\n # Window has moved to a lower layer state\n if moved < 0:\n if self.kept_below:\n sibling = same[0]\n above = False\n else:\n sibling = same[-1]\n above = True\n\n # Window is in same layer state\n elif moved == 0:\n try:\n pos = same.index(self.wid)\n except ValueError:\n pos = len(same) if up else 0\n if not up:\n pos = max(0, pos - 1)\n else:\n pos = min(pos + 1, len(same) - 1)\n sibling = same[pos]\n above = up\n\n # Window is in a higher layer\n else:\n if self.kept_above:\n sibling = same[-1]\n above = True\n else:\n sibling = same[0]\n above = False\n\n # If the sibling is the current window then we just check if any windows in lower/higher layers are\n # stacked incorrectly and, if so, restack them. However, we don't need to configure stacking for this\n # window\n if sibling == self.wid:\n index = stack.index(self.wid)\n\n # We need to make sure the bars are included so add them now\n if group_bars:\n for group_bar in group_bars:\n bar_layer = group_bar.window.get_layering_information()\n if bar_layer > layering:\n lower.append(group_bar.window.wid)\n elif bar_layer < layering:\n higher.append(group_bar.window.wid)\n\n # Sort the list to match the server's stacking order\n lower.sort(key=lambda wid: stack.index(wid))\n higher.sort(key=lambda wid: stack.index(wid))\n\n for wid in [w for w in lower if stack.index(w) > index]:\n self.qtile.windows_map[wid].window.configure(\n stackmode=xcffib.xproto.StackMode.Below, sibling=same[0]\n )\n\n # We reverse higher as each window will be placed above the last item in the current layer\n # this means the last item we stack will be just above the current layer.\n for wid in [w for w in higher[::-1] if stack.index(w) < index]:\n self.qtile.windows_map[wid].window.configure(\n stackmode=xcffib.xproto.StackMode.Above, sibling=same[-1]\n )\n\n return\n\n # Window needs new stacking info. We tell the server to stack the window\n # above or below a given \"sibling\"\n self.window.configure(\n stackmode=xcffib.xproto.StackMode.Above if above else xcffib.xproto.StackMode.Below,\n sibling=sibling,\n )\n # TODO: also move our children if we were moved upwards\n self.qtile.core.update_client_lists()", "title": "" }, { "docid": "5d1eacd5c59f929cc8e031d6b82c519b", "score": "0.48000294", "text": "def __treeWidget(self, columns=[]):\n sourceTree = QtWidgets.QTreeWidget()\n\n sourceTree.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)\n sourceTree.setSelectionBehavior(QtWidgets.QTreeWidget.SelectItems)\n sourceTree.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n\n header = QtWidgets.QTreeWidgetItem(columns)\n sourceTree.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n sourceTree.setHeaderItem(header)\n\n return sourceTree", "title": "" }, { "docid": "ce02745bc2c2120dc258689f22426385", "score": "0.4788416", "text": "def windowList(outFile):\n\twith open(outFile, 'w') as f:\n\t\tfor n in windowList:\n\t\t\tf.write(str(n) + '\\n')", "title": "" }, { "docid": "ab62f3a73dd2cca7dac546abd131b0b4", "score": "0.47796792", "text": "def EnumChildWindows(self):\n\t\tchildWindows = []\n\t\tdef enumChildProc(hWnd, lParam):\n\t\t\tchildWindows.append(Window(hWnd = hWnd))\n\t\tEnumChildWindows(self.handle, EnumChildProc(enumChildProc), 0)\n\t\treturn childWindows", "title": "" }, { "docid": "f8396972a64a02493128bd8b8e929dc7", "score": "0.47698897", "text": "def window( container, w ):\n jobWindowList = []; lC = len(container); maxIdx = 0;\n while maxIdx < lC:\n jobWindowList.append( container[maxIdx:maxIdx+w] )\n maxIdx += w\n return jobWindowList", "title": "" }, { "docid": "bcf239289c90e7a88fb42251de91e078", "score": "0.47666997", "text": "def hgroupise(tree):\n \n def has_content(el):\n \"\"\"\n Determine whether an element has significant content.\n Effectively, 'is there text here?'\n \"\"\"\n if el.__class__.__name__ == '_Comment':\n # There should be something better than this\n return False\n \n try:\n el_text = el.text.strip()\n except AttributeError:\n el_text = ''\n if el_text:\n return True\n for e in el.iterchildren():\n if not e.__class__.__name__ == \"_Comment\":\n try:\n text = e.text.strip()\n except AttributeError:\n text = ''\n try:\n tail = e.tail.strip()\n except AttributeError:\n tail = ''\n if text or tail or has_content(e):\n return True\n return False\n \n for heading in tree.xpath(heading_xpath):\n if len(heading.xpath('ancestor::hgroup')) > 0: # If the heading is in an hgroup already,\n continue # it's already done.\n \n hgroup = etree.Element(\"hgroup\") # The new hgroup\n \n parent = heading.getparent()\n start_index = parent.index(heading)\n parent.insert(start_index, hgroup) # Place the hgroup before parent\n \n for following in hgroup.itersiblings(): # Go through following siblings\n if following.tag in headings.keys() or not has_content(following): # If they're appropriate hgroup material,\n hgroup.append(following) # include them.\n else: # Otherwise,\n break # Stop including elements.\n \n if len(hgroup) < 2: # If it turns out the heading was lone,\n for child in reversed(hgroup): # put the moved siblings back,\n parent.insert(start_index+1, child)\n parent.remove(hgroup) # and remove the hgroup.", "title": "" }, { "docid": "3a84e75badae8bc3b8015110763b6104", "score": "0.47661707", "text": "def resizeWindows(sender, data):\n\n mainWindowWidth = get_main_window_size()[0]\n mainWindowHeight = get_main_window_size()[1]\n\n heightTopPanel = int(mainWindowHeight * 0.05)\n heightMiddlePanels = int(mainWindowHeight * 0.70)\n heightArchivePanel = int(mainWindowHeight * 0.6)\n heightBottomPanels = int(mainWindowHeight * 0.09)\n\n widthMiddlePanels = int(mainWindowWidth * 0.32)\n widthTopPanel = int(mainWindowWidth * 0.97)\n widthArchivePanel = int(mainWindowWidth * 0.97)\n\n yPosTopPanel = int(mainWindowHeight * 0.03)\n yPosMiddlePanels = int(mainWindowHeight * 0.15)\n yPosArchivePanel = int(mainWindowHeight * 0.1)\n yposBottomPanels = int(mainWindowHeight * 0.85)\n\n xPosTopPanel = int(mainWindowWidth * 0.009)\n xPosLeftPanel = int(mainWindowWidth * 0.008)\n xPosMiddlePanel = int(mainWindowWidth * 0.0073 + xPosLeftPanel + widthMiddlePanels)\n xPosRightPanel = int(mainWindowWidth * 0.0073 + xPosMiddlePanel + widthMiddlePanels)\n xPosArchivePanel = int(mainWindowWidth * 0.009)\n\n # Assigning dimensions to various windows\n\n # Top Panel\n set_window_pos(\"Top Panel\", x=xPosTopPanel, y=yPosTopPanel)\n set_item_width(\"Top Panel\", width=widthTopPanel)\n set_item_height(\"Top Panel\", height=heightTopPanel)\n\n # To Do Panel\n set_window_pos(\"To Do\", x=xPosLeftPanel, y=yPosMiddlePanels)\n set_item_width(\"To Do\", width=widthMiddlePanels)\n set_item_height(\"To Do\", height=heightMiddlePanels)\n\n # In Progress Panel\n set_window_pos(\"In Progress\", x=xPosMiddlePanel, y=yPosMiddlePanels)\n set_item_width(\"In Progress\", width=widthMiddlePanels)\n set_item_height(\"In Progress\", height=heightMiddlePanels)\n\n # Done Panel\n set_window_pos(\"Done\", x=xPosRightPanel, y=yPosMiddlePanels)\n set_item_width(\"Done\", width=widthMiddlePanels)\n set_item_height(\"Done\", height=heightMiddlePanels)\n\n # Archives Panel\n set_window_pos(\"Archives\", x=xPosArchivePanel, y=yPosArchivePanel)\n set_item_width(\"Archives\", width=widthArchivePanel)\n set_item_height(\"Archives\", height=heightArchivePanel)\n\n # To Do Status Panel\n set_window_pos(\"Status To Do\", x=xPosLeftPanel, y=yposBottomPanels)\n set_item_width(\"Status To Do\", width=widthMiddlePanels)\n set_item_height(\"Status To Do\", height=heightBottomPanels)\n\n # In Progress Status Panel\n set_window_pos(\"Status In Progress\", x=xPosMiddlePanel, y=yposBottomPanels)\n set_item_width(\"Status In Progress\", width=widthMiddlePanels)\n set_item_height(\"Status In Progress\", height=heightBottomPanels)\n\n # Done Status Panel\n set_window_pos(\"Status Done\", x=xPosRightPanel, y=yposBottomPanels)\n set_item_width(\"Status Done\", width=widthMiddlePanels)\n set_item_height(\"Status Done\", height=heightBottomPanels)", "title": "" }, { "docid": "df98f19df43a2983eb0282dca2530f2a", "score": "0.4758698", "text": "def feed_grouping(info):\r\n return info.feed_id", "title": "" }, { "docid": "52cc63773fe3c97c919c2e2fca0103a8", "score": "0.47513226", "text": "def _apply_groups(self, gdb):\n for chk_grp in self._groups:\n if not chk_grp in gdb:\n gdb[chk_grp] = Group(chk_grp, set([self.handle,]))\n logger.info ('%s: Created new group, %r', self.handle, chk_grp)\n # ensure membership in existing groups\n group = gdb[chk_grp]\n if not self.handle in group.members:\n group.add (self.handle)\n group.save ()\n for gname, group in gdb.iteritems():\n if gname not in self._groups and self.handle in group.members:\n group.remove (self.handle)\n group.save ()", "title": "" }, { "docid": "3b67cb45e0699cd1fc4152af4059489a", "score": "0.47499752", "text": "def re_arrange_nodes(node_group, Xmultiplier=1):\r\n\r\n nodes = { n.location.x:n for n in node_group.nodes }\r\n nodes = { k:nodes[k] for k in sorted(nodes) }\r\n\r\n for i,n in enumerate(nodes.values()):\r\n n.location.x = i*200*Xmultiplier\r\n n.width = 150\r\n\r\n return None", "title": "" }, { "docid": "3d6f9d83bbe21feeac536e848bc9947f", "score": "0.4747891", "text": "def splitWindows(self, split):\r\n code, out, err = self.call(self._CMD + \" split-window -\" + split)\r\n if code != 0:\r\n raise Exception(err)", "title": "" }, { "docid": "659ebedc60d3a3f745e984803b4cd06a", "score": "0.47462654", "text": "def get_windows(self, window):\n assert self.num_splits > 1, \"\"\"num_splits should be > 1\"\"\"\n for slice_idx in np.linspace(\n start=self.prediction_length, stop=len(window), num=self.num_splits\n ):\n\n yield window[: int(round(slice_idx))]", "title": "" }, { "docid": "c1af5fa8dc6364c92a046767cb521c14", "score": "0.4745736", "text": "def h5tree_to_QTree(base_node, base_tree_elt=None, pixmap_items=[]):\n\n if base_tree_elt is None:\n base_tree_elt = QtWidgets.QTreeWidgetItem([base_node.name, \"\", base_node.path])\n for node_name, node in base_node.children().items():\n child = QtWidgets.QTreeWidgetItem([node_name, \"\", node.path])\n if 'pixmap' in node.attrs.attrs_name:\n pixmap_items.append(dict(node=node, item=child))\n if node.attrs['CLASS'] == 'GROUP':\n h5tree_to_QTree(node, child, pixmap_items)\n base_tree_elt.addChild(child)\n return base_tree_elt, pixmap_items", "title": "" }, { "docid": "d94b1482c5b2325f53e4419946c416af", "score": "0.47446963", "text": "def group(list):\n prev = list[0]\n grouped = [[prev]]\n\n for file in list[1:]:\n gap = prev.gap_time(file)\n if gap < float(CONFIG[\"Awake Period\"]):\n grouped[-1].append(file)\n else:\n grouped.append([file])\n prev = file\n\n return grouped", "title": "" }, { "docid": "fa9f4459a8a07c3e546bafa8118a9eb1", "score": "0.4743523", "text": "def __groupCrawlers(self, crawlers):\n groupedCrawlers = OrderedDict()\n groupedCrawlers[None] = []\n for crawlerList in Crawler.group(crawlers):\n for crawler in crawlerList:\n # group\n if self.__checkedViewMode == 'Group' and 'group' in crawler.tagNames():\n groupName = crawler.tag('group')\n if groupName not in groupedCrawlers:\n groupedCrawlers[groupName] = []\n\n groupedCrawlers[groupName].append(crawler)\n\n # flat\n else:\n groupedCrawlers[None].append(crawler)\n\n return groupedCrawlers", "title": "" }, { "docid": "4a44f20c8c4993eb88821b9a5ce473b7", "score": "0.47407237", "text": "def make_windows(self, x):\n windows = []\n start = 0\n end = start + self.window_len\n while end <= x.shape[0]:\n xWindow = x[start:end, :]\n connectivity_mat = self.measure(xWindow.T)\n connectivity_mat = connectivity_mat[np.triu_indices(connectivity_mat.shape[0], k=1)]\n windows += [connectivity_mat]\n start += 1\n end = start+self.window_len\n return windows", "title": "" }, { "docid": "f990db6ca0af2f95555810adc2d1f7ff", "score": "0.47378758", "text": "def initUI(self):\n for category in self.expressions_data.keys():\n category_item = QtWidgets.QTreeWidgetItem(self)\n\n category_label = QtWidgets.QLabel(category)\n category_label.setObjectName(\"category_label\")\n\n category_font = fonts['categories']\n category_font.setItalic(True)\n\n category_label.setFont(category_font)\n\n self.setItemWidget(category_item, 0, category_label)\n expressions = self.expressions_data[category]\n\n\n for name in expressions.keys():\n name_item = QtWidgets.QTreeWidgetItem(category_item)\n name_item.setExpanded(True)\n\n name_label = QtWidgets.QLabel(str(name))\n name_label.setWordWrap(True)\n name_label.setObjectName(\"name_label\")\n name_label.setFont(QtGui.QFont(\"Helvetica\", 14))\n self.setItemWidget(name_item, 0, name_label)\n\n expression = expressions[name]\n\n reference_data = \"{name} {expression} {description}\".format(\n name=name,\n expression=expression[\"expression\"],\n description=expression[\"description\"]\n )\n name_item.setData(1, QtCore.Qt.ForegroundRole, reference_data)\n\n self.all_name_items.append(name_item)\n\n expression_item = QtWidgets.QTreeWidgetItem(name_item)\n expression_label = QtWidgets.QLabel(expression[\"expression\"])\n expression_label.setWordWrap(True)\n expression_label.setObjectName(\"expression_label\")\n expression_label.setFont(fonts['expressions'])\n\n self.setItemWidget(expression_item, 0, expression_label)\n\n if category == \"Waves\":\n picture_path = waves_images_folder + expression[\"description\"]\n illustration_pixmap = QtGui.QPixmap(picture_path)\n illustration_pixmap = illustration_pixmap.scaledToWidth(\n 500, QtCore.Qt.SmoothTransformation\n )\n\n illustration_label = QtWidgets.QLabel(self)\n illustration_label.setPixmap(illustration_pixmap)\n illustration_label.setScaledContents(True)\n\n self.setItemWidget(expression_item, 1, illustration_label)\n else:\n description_label = QtWidgets.QLabel(expression[\"description\"])\n description_label.setWordWrap(True)\n description_label.setObjectName(\"description_label\")\n description_label.setFont(fonts['descriptions'])\n\n self.setItemWidget(expression_item, 1, description_label)\n\n generate_button = QtWidgets.QPushButton(\"Generate\")\n generate_button.setObjectName(\"generate_button\")\n generate_button.setCursor(\n QtGui.QCursor(QtCore.Qt.PointingHandCursor)\n )\n generate_widget = QtWidgets.QWidget()\n generate_layout = QtWidgets.QVBoxLayout()\n generate_widget.setLayout(generate_layout)\n generate_layout.addWidget(generate_button)\n generate_button.setFixedSize(\n QtCore.QSize(100, 50)\n )\n\n\n if len(expression[\"example\"]) > 1:\n example_button = QtWidgets.QPushButton(\"Quick Example\")\n example_button.setObjectName(\"example_button\")\n example_button.setCursor(\n QtGui.QCursor(QtCore.Qt.PointingHandCursor)\n )\n example_button.clicked.connect(\n partial(self.generate_expression, expression[\"example\"])\n )\n\n example_widget = QtWidgets.QWidget()\n example_layout = QtWidgets.QVBoxLayout()\n example_widget.setLayout(example_layout)\n example_layout.addWidget(example_button)\n\n example_button.setFixedSize(\n QtCore.QSize(100, 50)\n )\n self.setItemWidget(expression_item, 2, example_widget)\n\n if expression.get(\"fields\"):\n form_item = QtWidgets.QTreeWidgetItem(name_item)\n form_widget = ExpressionFormWidget(self, expression)\n\n self.setItemWidget(form_item, 1, form_widget)\n self.setItemWidget(form_item, 2, generate_widget)\n generate_button.clicked.connect(\n partial(\n self.generate_expression,\n expression,\n form_widget.result\n )\n )\n\n else:\n generate_button.clicked.connect(\n partial(self.generate_expression, expression)\n )\n self.setItemWidget(expression_item, 2, generate_widget)\n\n self.sortItems(0, QtCore.Qt.AscendingOrder)\n self.setSortingEnabled(True)\n self.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.setFocusPolicy(QtCore.Qt.NoFocus)", "title": "" }, { "docid": "fc8a02d06b4f835eee518886a083e8d3", "score": "0.47368243", "text": "def _group_cases(self):\n groups = []\n for case in self.test_cases:\n for group in groups:\n # add to current group\n if group.add_case(case):\n break\n else:\n # create new group\n groups.append(Group(case))\n return groups", "title": "" }, { "docid": "3ce0a608a1e3aa0296337311022976b1", "score": "0.47330937", "text": "def createFileGroups(self):\n self.dataset.buildListOfFiles()\n self.dataset_details['FileGroups'] = dict() \n for fileName in self.dataset.files: \n\n name = removeIndex(fileName) \n if name in self.dataset_details['FileGroups']: \n self.dataset_details['FileGroups'][name]['Files'].append(fileName) \n else: \n instanceGroup = {\"SizeInTB\":None, \n \"IsCrab\":None,\n \"Files\":None,\n \"FileEntries\":None,\n \"PrimaryDatasetFraction\":None,\n \"BadFiles\":None,\n \"NumberBadFiles\":None,\n \"MissingFiles\":None,\n \"NumberMissingFiles\":None,\n \"GoodFiles\":None,\n \"NumberGoodFiles\":None,\n \"TotalJobs\":None}\n instanceGroup['Files']=[]\n instanceGroup['Files'].append(fileName) \n isCrab = isCrabFile( fileName ) \n instanceGroup['IsCrab']=isCrab \n self.dataset_details['FileGroups'][name]=instanceGroup \n for fg in self.dataset_details['FileGroups']: \n self.dataset_details['FileGroups'][fg]['Files'].sort(key=lambda x: int(getIndex(x)))", "title": "" }, { "docid": "97ead828f2de5a3235fd72236b2fe93c", "score": "0.4732334", "text": "def events_group():", "title": "" }, { "docid": "f7e30e96b66579b1cd7f9ea8e5b6c145", "score": "0.47317544", "text": "def test_gather_tabs_multiple_groups(self) -> None:\n window: sublime.Window = sublime.active_window()\n view: sublime.View = window.new_file()\n view.set_scratch(True)\n second_view: sublime.View = window.new_file()\n second_view.set_scratch(True)\n\n # 2 column layout\n layout: Dict[str, List] = {\n \"cells\": [[0, 0, 1, 1], [1, 0, 2, 1]],\n \"cols\": [0.0, 0.5, 1.0],\n \"rows\": [0.0, 1.0]\n }\n\n sublime.active_window().set_layout(layout)\n sublime.active_window().set_view_index(view, group=0, idx=0)\n sublime.active_window().set_view_index(second_view, group=1, idx=0)\n\n cmd: TabFilterCommand = TabFilterCommand(window)\n\n groups: List[int] = list(range(window.num_groups()))\n\n self.assertListEqual(\n [entities.Tab(view), entities.Tab(second_view)],\n cmd.gather_tabs(groups)\n )", "title": "" }, { "docid": "363b53b09801380038ba2cba04dc9db6", "score": "0.47273317", "text": "def workspaces(self):\n return sorted(super().workspaces, key=lambda w: w['id'] != self.wid)", "title": "" }, { "docid": "712eab7991263599ea35f9c2fbb04609", "score": "0.47255337", "text": "def mineral_groups_list(context):\n group_list = ['Silicates',\n 'Oxides',\n 'Sulfates',\n 'Sulfides',\n 'Carbonates',\n 'Halides',\n 'Sulfosalts',\n 'Phosphates',\n 'Borates',\n 'Organic_Minerals',\n 'Arsenates',\n 'Native_Elements',\n 'Other']\n\n try:\n context['g_selected']\n except Exception:\n g_selected = ''\n else:\n g_selected = context['g_selected']\n return {'group_list': group_list, 'g_selected': g_selected}", "title": "" }, { "docid": "19072f3560bfe0e8dc391b363b401a88", "score": "0.47179866", "text": "def grouped_items(self):\n\n items = (\n DBSession.query(TradeLotItem.item_id, func.count('*').label('qty'))\n .join(TrainerItem)\n .filter(TradeLotItem.trade_lot_id == self.id,\n TrainerItem.pokemon_id.is_(None))\n .group_by(TradeLotItem.item_id)\n .subquery()\n )\n\n return (\n DBSession.query(Item, items.c.qty)\n .join(items)\n .order_by(Item.name)\n .all()\n )", "title": "" }, { "docid": "7c4e3fb657e892d2408921082ce5c20b", "score": "0.471435", "text": "def h5tree_to_QTree(base_node, base_tree_elt=None, pixmap_items=[]):\n\n if base_tree_elt is None:\n base_tree_elt = QtWidgets.QTreeWidgetItem([base_node.name, \"\", base_node.path])\n for node_name, node in base_node.children().items():\n child = QtWidgets.QTreeWidgetItem([node_name, \"\", node.path])\n if 'pixmap' in node.attrs.attrs_name:\n pixmap_items.append(dict(node=node, item=child))\n klass = node.attrs['CLASS']\n if klass == 'GROUP':\n h5tree_to_QTree(node, child, pixmap_items)\n\n base_tree_elt.addChild(child)\n return base_tree_elt, pixmap_items", "title": "" }, { "docid": "7b47bd6f758bc1f369755675133a6f98", "score": "0.47138762", "text": "def _init_group_list(self, group_list, count):\n for i in range(count):\n group_list.append(TileContainer())", "title": "" }, { "docid": "d3eec8f2eedbf27fc07171c25d04431b", "score": "0.47110084", "text": "def _split_groups(array, j, slicer):\n results = {\"groups\": array[\"groups\"][..., slicer]}\n results[\"intermediates\"] = [v[..., slicer] for v in array[\"intermediates\"]]\n return results", "title": "" }, { "docid": "6b653cbeba36166555023dedd8fb97aa", "score": "0.47080487", "text": "def init_layout(self):\n super(WxSplitter, self).init_layout()\n widget = self.widget()\n for child in self.children():\n if isinstance(child, WxSplitItem):\n widget.AppendWindow(child.widget())\n widget.SizeWindows()", "title": "" }, { "docid": "753028f46d18c7f5b0dd99209020c8f6", "score": "0.47035506", "text": "def test_windowing():\n\n ngram_hashes = [0, 1, 2, 1]\n windowed = window_ngrams(ngram_hashes, window_size=2)\n\n assert list(windowed) == [\n [(0, 0), (1, 1)],\n [(1, 1), (2, 2)],\n [(2, 2), (3, 1)],\n ]", "title": "" }, { "docid": "927002d1158151f6ab20a97c8ec45cef", "score": "0.46965992", "text": "def compute_windows(self):\n for window_index in self.window_centers:\n window = self.audio_array[(window_index - self.window_size // 2) * self.sample_freq:\n (window_index + self.window_size // 2) * self.sample_freq]\n self.windows.append(window)", "title": "" }, { "docid": "a24d2c42da9fb6d4e818582b396a5c09", "score": "0.4692471", "text": "def parse_groups(self, doc):\n if \"groups\" in doc \\\n and hasattr(doc[\"groups\"], '__iter__'):\n for group in doc[\"groups\"]:\n self.parse_group(doc, group)", "title": "" }, { "docid": "62318154b057da0d80cf637ecfdd3395", "score": "0.46905172", "text": "def create_widgets(view_window: tkinter.Toplevel) -> None:\r\n\r\n # clear the existing widgets\r\n widget_list = view_window.winfo_children()\r\n\r\n for item in widget_list:\r\n if item.winfo_children():\r\n widget_list.extend(item.winfo_children())\r\n\r\n for item in widget_list:\r\n item.grid_forget()\r\n\r\n # make the machine labels and start buttons\r\n n = 2\r\n machine_labels = []\r\n for machine in machines:\r\n message = machine+\": \"+machines[machine][0]+\": \"+machines[machine][1]\r\n machine_label = Label(view_window, text=message)\r\n machine_label.grid(row=n, column=0)\r\n machine_label.config(bg=\"#515151\", fg=\"white\",\r\n font=(\"Arial Black\", \"10\"))\r\n machine_button = Button(view_window, text=\"Start\")\r\n machine_button.grid(row=n, column=1)\r\n machine_button.config(command=partial(start_machine,\r\n [machine, machine_label]),\r\n fg=\"#515151\", bg=\"#b3ecff\",\r\n font=(\"Arial Black\", \"10\"))\r\n machine_labels.append(machine_label)\r\n n += 1\r\n\r\n # make the batch labels and buttons\r\n batch_title = Label(view_window, text=\"Batch Status:\")\r\n batch_title.grid(row=n, column=0, sticky=\"W\")\r\n batch_title.config(bg=\"#515151\", fg=\"white\", font=(\"Arial Black\", \"10\"))\r\n n += 1\r\n for batch in status:\r\n if status[batch] != []:\r\n message = batch+\": \"+status[batch][0]\r\n batch_label = Label(view_window, text=message)\r\n batch_label.grid(row=n, column=0)\r\n batch_label.config(bg=\"#515151\", fg=\"white\",\r\n font=(\"Arial Black\", \"10\"))\r\n batch_button = Button(view_window, text=\"Next Stage\")\r\n batch_button.config(command=partial(next_stage,\r\n [batch, batch_label,\r\n batch_button, view_window,\r\n machine_labels]),\r\n fg=\"#515151\", bg=\"#b3ecff\",\r\n font=(\"Arial Black\", \"10\"))\r\n batch_button.grid(row=n, column=1, sticky=\"NSEW\")\r\n n += 1\r\n\r\n add_button = Button(view_window, text=\"Edit Brewing Process\",\r\n command=lambda: add_process(machine_labels,\r\n view_window))\r\n add_button.grid(row=0, column=0, sticky=\"NSEW\")\r\n add_button.config(fg=\"#515151\", bg=\"#b3ecff\", font=(\"Arial Black\", \"10\"))\r\n brewers_title = Label(view_window, text=\"Brewing Tank Status:\")\r\n brewers_title.config(bg=\"#515151\", fg=\"white\", font=(\"Arial Black\", \"10\"))\r\n brewers_title.grid(row=1, column=0, sticky=\"W\")", "title": "" }, { "docid": "005f11b5c698dbe52d03abb458b836a3", "score": "0.46899775", "text": "def group(self):\n\n return self.parent().group()", "title": "" } ]
bac775a0f2b4c28aedc888838fc529eb
Returns a random integer session ID for this flow.
[ { "docid": "b5104c4cbb829727818cc13cd5bd5b3c", "score": "0.6393333", "text": "def GetNewSessionID(self, **_):\n return rdfvalue.SessionID(base=\"aff4:/hunts\", queue=self.args.queue)", "title": "" } ]
[ { "docid": "53ae99991fe967be899a143c5c23b960", "score": "0.790744", "text": "def generate_session_id():\n return random.randint(1, 2 ** 53)", "title": "" }, { "docid": "360881e3d5f62773e10abe9f123531d3", "score": "0.7590223", "text": "def _generate_session_id(self):\n\n while True:\n rand = os.urandom(16)\n now = time.time()\n session_id = sha1(\"%s%s\" %(rand, now))\n session_id = session_id.hexdigest()\n if session_id not in self.store:\n break\n return session_id", "title": "" }, { "docid": "78e683c0e9c8f238346284039ffcea25", "score": "0.74557614", "text": "def gen_session_id() -> str:\n return hashlib.sha1(os.urandom(128)).hexdigest()", "title": "" }, { "docid": "67059c1015468727a9c15e732bdef41a", "score": "0.73018515", "text": "def _create_session_id(self):\n return str(uuid.uuid4())", "title": "" }, { "docid": "082ff2aeae0c444e03d74384740275f7", "score": "0.7286483", "text": "def rnd_id(self):\n return self._rnd_id", "title": "" }, { "docid": "55b7720ac48f589cc99937fada2945ef", "score": "0.7182314", "text": "def generateUniqueId(self):\n data = \"{:.20f}{:.20f}{:.20f}\".format(\n random.random(), time.time(), process_time())\n digest = sha1(data.encode()).digest()\n s = digestEncode(digest)\n # we store a HMAC of the random value together with it, which makes\n # our session ids unforgeable.\n mac = hmac.new(self.secret.encode(), s, digestmod=sha1).digest()\n return (s + digestEncode(mac)).decode()", "title": "" }, { "docid": "7d2b694cfa035c1d67134c70c22d03ed", "score": "0.71521765", "text": "def GenerateRandomSessionKey():\n # TODO: this is admittedly a shitty key\n # see: http://stackoverflow.com/questions/817882/unique-session-id-in-python/6092448#6092448\n return str(uuid.uuid1())", "title": "" }, { "docid": "5fd7efd08466f3b8863096f85ac613e4", "score": "0.6773705", "text": "def session():\n return str(uuid4())", "title": "" }, { "docid": "9785aba6d68db183f14bea20e8cb359b", "score": "0.6758889", "text": "def unique_id(self):\n return f\"{self._hub.device.serial_number}_session_start\"", "title": "" }, { "docid": "236a2e0a509c53b5bb2f1c6baf14f549", "score": "0.66807467", "text": "def random_id():\n return ((floor(time_now() * 1000.) - DISCORD_EPOCH) << 22) + floor(random() * 4194304.0)", "title": "" }, { "docid": "8c3dc10573d77fa391645bbaa6409236", "score": "0.6680557", "text": "def gen_id():\n return int(uuid.uuid4())", "title": "" }, { "docid": "d0968c87f5d3d40dc8a7160dfefc0c99", "score": "0.66557944", "text": "def session_id(self) -> Union[\n uuid.UUID,\n str,\n ]:\n return self.__session_id", "title": "" }, { "docid": "db1e665a58925e736b81a0fa95bbe032", "score": "0.6655184", "text": "def get_random_id(self):\n import string\n import random\n taskID = ''.join(random.sample(string.ascii_letters+string.octdigits*5, 10))\n return taskID", "title": "" }, { "docid": "03b32e2b667bcb635a8467c686822170", "score": "0.6642255", "text": "def _genid(self): \n\n # pi('_genid')\n\n return str(base64.urlsafe_b64encode(os.urandom(32)).replace(b'=', b'e'))", "title": "" }, { "docid": "4e9a792277ca27ccd6591db6148c67c1", "score": "0.6631743", "text": "def _get_random_id():\n return uuid.uuid4().hex[-12:]", "title": "" }, { "docid": "1692040278dd8b9c91b36718a968e7d7", "score": "0.65975416", "text": "def session_id(self):\n return self._session_id", "title": "" }, { "docid": "998cb001da5564fdb6fa9d73cf0f51aa", "score": "0.65938675", "text": "def session_id(self):\n return self.session.session_id", "title": "" }, { "docid": "35f4af3426246c50ff6d7a28c94cd825", "score": "0.65626705", "text": "def _get_session_id(self) -> str:\n response = self._get(self._ENDPOINT_INIT)\n if response.status_code == 200:\n try:\n return response.json()['sessionId']\n except json.JSONDecodeError:\n raise SessionError(500, \"Failed to obtain session ID. Could not decode response from Stellar.\")\n except KeyError:\n raise SessionError(500, \"Failed to obtain session ID. Response from Stellar does not contain \"\n \"sessionId field.\")\n else:\n raise SessionError(response.status_code, response.json()['reason'])", "title": "" }, { "docid": "112cd179f33e744b189b283e4e431df9", "score": "0.65201706", "text": "def generate_id():\n return int(random.getrandbits(48))", "title": "" }, { "docid": "4b7d1737393aae1ae7e3c71ba4155813", "score": "0.6518334", "text": "def _get_session_number(self):\n return self.__session_number", "title": "" }, { "docid": "88fdf482fd7af676295927fe637dbbd7", "score": "0.6393614", "text": "def _gen_id(self):\n return str(uuid.uuid4()) # uuid4 = make a random UUID", "title": "" }, { "docid": "909217b517692258d1ed136cd427d2a9", "score": "0.6390307", "text": "def generate_agent_id():\n return str(uuid.uuid4())", "title": "" }, { "docid": "d99fd2437a0eff4903c5bfb37aa23ce3", "score": "0.63554734", "text": "def generate_id(cls):\n return int(time.time() * 1000000)", "title": "" }, { "docid": "568b384295f113aeb17f807aa91cf0a3", "score": "0.6307349", "text": "def get_invite_id():\n return get_random_string(length=8)", "title": "" }, { "docid": "a717715152e5734a8d667cac01aada73", "score": "0.62849295", "text": "def _random_id():\n data = struct.pack('Q', random.getrandbits(64))\n return base64.urlsafe_b64encode(data).strip(b'=')", "title": "" }, { "docid": "4f2669f3ad1fb7e3d7fe339ded334250", "score": "0.62587667", "text": "def CreateID(self):\n return random_generator.randrange(0, 256)", "title": "" }, { "docid": "6e15d5202ec655b5f9897d43869dd213", "score": "0.6226677", "text": "def _uniq_id():\n return random.randint(0, (2 ** 56) - 1)", "title": "" }, { "docid": "169c5778112a7f5b5b96f948c539b603", "score": "0.6138648", "text": "def getSessionId(self):\n return self.sid", "title": "" }, { "docid": "f6fa2f002dc84c10ceec3953cbd37872", "score": "0.6132168", "text": "def get_session_id(\n self,\n sess_id=None,\n reuse_session=True,\n choose=False,\n generate=False,\n ):\n _sess_id = None\n if self.last_session_id is None:\n self.last_session_id = self.get_last_session_id()\n if sess_id is not None:\n _sess_id = sess_id\n elif (\n self.last_session_id is not None\n and reuse_session\n and not choose\n ):\n logger.info('[+] implicitly reusing last '\n f'session ID { self.last_session_id }')\n _sess_id = self.last_session_id\n if _sess_id is None and choose:\n ids = self.get_session_ids()\n _sess_id = choose_wisely(\n from_list=ids,\n what=\"a session\",\n cancel_throws_exception=True\n )\n if _sess_id is None and generate:\n _sess_id = uuid.uuid4()\n if sess_id is None and _sess_id is None:\n raise RuntimeError(\n \"[-] session ID not provided\" +\n (\" - use '--choose'?\" if sys.stdout.isatty() else \"\")\n )\n if _sess_id is not None:\n self.set_last_session_id(sess_id=_sess_id)\n return _sess_id", "title": "" }, { "docid": "0f999feef2822c901f4fefce4a1487b5", "score": "0.61284727", "text": "def genIdNum(self):\n self.id_num = random.randrange(40000,90000) % 300", "title": "" }, { "docid": "4f6acb5836405d5feb34ebaa1f1a9a44", "score": "0.61282444", "text": "def unique_id():\n return binascii.hexlify(os.urandom(16)).decode(\"utf-8\")", "title": "" }, { "docid": "20c93833e4fdebc82f27c5f8958886e3", "score": "0.6122008", "text": "def gen_new_session_salt(self):\n self.handle_random_reps_over_limit()\n\n self.session_salt = random.randint(0, self.RANDOM_LIMIT)\n self.times_used_random += 1\n self.current_salt = self.session_salt\n\n self.write_current_times_used_random_to_local_file()\n print(self.current_salt)", "title": "" }, { "docid": "e8d55fd323f7561c42caa2ac44954538", "score": "0.61138564", "text": "async def get_unique_session_id(page):\n unique_session_id = await page.evaluate(\"sessionStorage.getItem(STORAGE)\")\n\n return unique_session_id", "title": "" }, { "docid": "fb55fdb6712649549c27dd260304f074", "score": "0.6111927", "text": "def randuuid(self):\r\n return str(uuid.uuid4())", "title": "" }, { "docid": "b015b72338a5e6087f270710d3f6dcd5", "score": "0.61103475", "text": "def GetNewSessionID(cls, queue, client_id=None, parent_flow_urn=None):\n if parent_flow_urn:\n base = parent_flow_urn\n else:\n base = client_id or aff4.ROOT_URN\n base = base.Add(\"flows\")\n\n return rdfvalue.SessionID(base=base, queue=queue)", "title": "" }, { "docid": "198780ab84a6d722db2459d8233e8fd0", "score": "0.6107096", "text": "def sessionid(self):\n\n url = self.url + '/en/login/'\n\n try:\n response = requests.get(url, headers=self._headers(), verify=False)\n response.raise_for_status()\n except (requests.exceptions.ConnectionError, requests.exceptions.Timeout):\n raise ConnectionError(str.format(\"Connection to {0} not possible\", url))\n except requests.exceptions.RequestException:\n raise InvalidURLError(str.format(\"Invalid Efesto url: {0}\", url))\n\n self.phpsessid = response.cookies.get(\"PHPSESSID\")\n\n return self.phpsessid", "title": "" }, { "docid": "05ffc0ac1640bd5295a4c078e4d70b57", "score": "0.6106078", "text": "def generate_id():\n return uuid.uuid4().hex", "title": "" }, { "docid": "8542379fd97f9505eeb2f3beb451edf3", "score": "0.6092242", "text": "def get_commissioner_session_id(self) -> int:\n return self.__parse_int(self.execute_command('commissioner sessionid'))", "title": "" }, { "docid": "e4510ae1b9496c58c96e52f54385d1e3", "score": "0.6081425", "text": "def incrementSessionId(self):\n session_id = self.getSessionId()\n session_id += 1\n self._setSessionId(session_id)\n self.resetMessageId() # for a new session, the message Id must be reset\n return session_id", "title": "" }, { "docid": "4261f49a3e4e3913cf38abe228572335", "score": "0.60667026", "text": "def _new_sid(self):\n sid = None\n while sid is None or sid in self._handlers:\n sid = crypto.random().randint(1, 2 ** 32)\n return sid", "title": "" }, { "docid": "392d16161499b6556837dd01b09b72d2", "score": "0.6060358", "text": "def sim_id(self) -> str:\n return pulumi.get(self, \"sim_id\")", "title": "" }, { "docid": "392d16161499b6556837dd01b09b72d2", "score": "0.6060358", "text": "def sim_id(self) -> str:\n return pulumi.get(self, \"sim_id\")", "title": "" }, { "docid": "28c697a408af09e646092db7156963ea", "score": "0.60603", "text": "def generateUniqueId(self):\n return hashlib.md5(\"a\"+str(random.random())+str(random.random())+str(random.random())).hexdigest()", "title": "" }, { "docid": "42342e4cca3fc18656393d52891fb61e", "score": "0.6019103", "text": "def generate_uuid() -> int:\n\t...", "title": "" }, { "docid": "35dbfef39355b20dd12ad6381f3ea027", "score": "0.6011236", "text": "async def get_id(self, request: web.Request) -> Hashable:\n session = await aiohttp_session.get_session(request)\n return session.get(self.session_key)", "title": "" }, { "docid": "47c149dd2e6d90cb8d4ea70eb7a6fbe4", "score": "0.59673923", "text": "def gen_id():\n global _NEXTID\n\n i = _NEXTID\n _NEXTID = _NEXTID + 1\n return i", "title": "" }, { "docid": "87635edebc9d8d16a2994b2be87c28fe", "score": "0.5952459", "text": "def _get_id():\n return str(uuid.uuid4())", "title": "" }, { "docid": "ad7931e418359ad80d1978eaedbadc67", "score": "0.59473395", "text": "def _generate_unique_id(self):\n return uuid.uuid4().hex", "title": "" }, { "docid": "f9d3ba22ad1f2860a6293b32e2df36ab", "score": "0.5939418", "text": "def gen_id(self):\n self._id_counter += 1\n return self._id_counter", "title": "" }, { "docid": "fe094a31d1e75439d4222a9df6990801", "score": "0.5939406", "text": "def generate_token():\n return uuid4().hex", "title": "" }, { "docid": "bb3982bdc3b58e7800a9396fcddaba5b", "score": "0.59241694", "text": "def unique_id(self):\n return str(uuid4().int)", "title": "" }, { "docid": "a6f8f1583c78b91bb552a96942f81699", "score": "0.5911556", "text": "def get_session_ID(self, value, name='session'):\n data = self.serializer.deserialize(name, value, max_age=None)\n return data.get('_sid')", "title": "" }, { "docid": "fee7d6608c83137df6f817eb95c05cf5", "score": "0.5910789", "text": "def acquire_ident(self):\n ident = self.next_id\n self.next_id += 2\n return ident", "title": "" }, { "docid": "c70daa3e8fa3a16daee9f684f8e09af9", "score": "0.58980435", "text": "def get_unique_id(cls):\n cls.unique_id +=1\n return cls.unique_id", "title": "" }, { "docid": "bde60937de634431ae4a7da7014090c8", "score": "0.5884503", "text": "def get_identity():\n return time_util.local_strftime(time_util.utcnow(),\n \"%Y-%m-%d-%H_%M_%S-\") + utils.random_word(8)", "title": "" }, { "docid": "cc6d3b065b3c5caf1b6a9d5310e7bd87", "score": "0.58693594", "text": "def nextid():\n global __objectid\n __objectid += 1\n return str(__objectid)", "title": "" }, { "docid": "18051e063c65b999c3dbf3061b4fcccb", "score": "0.58610517", "text": "def generate_id(self):\n # pylint: disable-msg=R0201\n return str(uuid.uuid4())", "title": "" }, { "docid": "e4c8b701d3ddeb0337c04445cf0a640a", "score": "0.58566236", "text": "def getRandom(self) -> int:\n return sample(self.hash, 1)[0]", "title": "" }, { "docid": "61bf8e6e431fc4b1503fbb8465e1b9f3", "score": "0.58545965", "text": "def getNewUID():\n uid = os.urandom(16).encode('hex')\n return uid", "title": "" }, { "docid": "88ada2ce674ff43c072c360e78f35f15", "score": "0.5849803", "text": "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "title": "" }, { "docid": "88ada2ce674ff43c072c360e78f35f15", "score": "0.5849803", "text": "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "title": "" }, { "docid": "4c41ea14dd65b8dd331f43df28a0c492", "score": "0.58408743", "text": "def unique_id(self):\n return self._hub.device.serial_number", "title": "" }, { "docid": "e33ee8a424dc9a6a5422ea15330b8f41", "score": "0.5831197", "text": "def get_last_session_id(self):\n sess_id = None\n if os.path.exists(LAST_SESSION_STATE):\n try:\n with open(LAST_SESSION_STATE, 'r') as sf:\n sess_id = sf.read().strip()\n except Exception: # noqa\n pass\n return sess_id", "title": "" }, { "docid": "3fa169a92f8002c9b73fb0483c43a3d9", "score": "0.5809964", "text": "def get_user_id(self):\n\n self.user_id = session.get('user_id',0)", "title": "" }, { "docid": "3f253f00a5d427a7d21f9e4ed3eb59b8", "score": "0.5802733", "text": "def get_payment_id():\n\n random_32_bytes = os.urandom(32)\n payment_id = \"\".join(map(chr, binascii.hexlify(random_32_bytes)))\n\n return payment_id", "title": "" }, { "docid": "b713523c5e45d5442aee6b8c5369fd8a", "score": "0.579877", "text": "def get_id(self):\n self.code = self.generator.code()\n return self.code", "title": "" }, { "docid": "1ff96e9fcb26eca9a9f8408e6573866c", "score": "0.5798654", "text": "def GetRndNId(self):\n return _snap.TCnCom_GetRndNId(self)", "title": "" }, { "docid": "4bef4a8834e58baa42aa8516849a6dd6", "score": "0.57942206", "text": "def get_random_number():\n return random.random()", "title": "" }, { "docid": "8f104b8d92a720289b6dc71c84c401ee", "score": "0.5790967", "text": "def uniqueID() -> str:\n\treturn str(random.randint(1,sys.maxsize))", "title": "" }, { "docid": "3947aeeb8bda360255dc0e7f79353e2f", "score": "0.57903105", "text": "def get_unique_key():\n return uuid.uuid4()", "title": "" }, { "docid": "4a07a3a959f6bdc468f0ca1d54ff1875", "score": "0.5787263", "text": "def get_new_id(self):\n return uuid.uuid1().hex", "title": "" }, { "docid": "a7cbcabad7e3096f779e591b80b9f764", "score": "0.57736784", "text": "def get_key(self):\n return urandom(32)", "title": "" }, { "docid": "e3a052191c1b7d0ee8f99ff303c42f96", "score": "0.5769877", "text": "def get_session_user_id():\n _session = get_session_info()\n return str(_session['user_id']) if _session and _session['user_id'] else None", "title": "" }, { "docid": "1126fd538a33d2bcefe64e62c1e5dedc", "score": "0.57669234", "text": "def make_session(self):\n incr = 0\n while True:\n session_id = self.get_timestamp() + \"_%03d\" % incr\n session_folder = os.path.join(self.data_folder, session_id)\n if os.path.exists(session_folder):\n incr += 1\n else:\n os.makedirs(session_folder)\n return session_id", "title": "" }, { "docid": "28137d30e1e16c42639307984a64f61d", "score": "0.575042", "text": "def unique_id(self):\n return self._device.unique_id", "title": "" }, { "docid": "aff6604a55fac89b13489e597671f311", "score": "0.5739494", "text": "def get_random_number():\n temp_date_time = datetime.datetime.now()\n return str(temp_date_time)[20:]", "title": "" }, { "docid": "7e71d7065d139e3793c00bb25a5f09da", "score": "0.57377297", "text": "def _generate_unique_integer_id():\n\n random_int = uuid.uuid4().int\n # Cast to string to get a fixed length\n random_str = str(random_int)[-_EXPERIMENT_ID_FIXED_WIDTH:]\n # append a random int as string to the end of the generated string for as many\n # leading zeros exist in the generated string in order to preserve the total length\n # once cast back to int\n for s in random_str:\n if s == \"0\":\n random_str = random_str + str(random.randint(0, 9))\n else:\n break\n return int(random_str)", "title": "" }, { "docid": "de7d2a4df0e19050c9f90f14dc1e2f83", "score": "0.5729284", "text": "def get_current_session_id(self) -> int:\n if self.con:\n query = \"SELECT session_id as id FROM sessions WHERE session_date = DATE()\"\n try:\n df = pd.read_sql(query, self.con)\n if df.empty:\n date = pd.read_sql(\"SELECT MAX(session_date) as date from sessions\", self.con)\n date = datetime.strptime(date[\"date\"][0], \"%Y-%m-%d\").date()\n if datetime.now().date()-date == timedelta(days=7):\n self.new_session()\n logging.debug(\" It's been seven days, getting a new session id\")\n return self.get_current_session_id()\n else:\n id = pd.read_sql(\"SELECT MAX(session_id) as id from sessions\", self.con)\n logging.debug(\" In between sessions, using %i as session id\" % id[\"id\"][0])\n return id[\"id\"][0]\n else:\n logging.debug(\" Current session id is %i\" % df[\"id\"][0])\n return df[\"id\"][0]\n except Exception as error:\n logging.error(\" **Error reading from table: %s\" % error)", "title": "" }, { "docid": "af22879a05521fd90311f619fd25e22e", "score": "0.5725207", "text": "def generate_request_id(self):\n if self.request_id is None:\n self.request_id = str(uuid.uuid4())\n return self.request_id", "title": "" }, { "docid": "794e9d54c86e9dfabe4c1db665b1bb7c", "score": "0.5720918", "text": "def _generate_id(max=MAX_INT32):\n return random.randint(1000, max)", "title": "" }, { "docid": "bcf5348999f9750fcdcd53424095ec68", "score": "0.57113475", "text": "def response_id():\n return str(uuid4())", "title": "" }, { "docid": "a0f411872641310a6b221374b7dfff53", "score": "0.5711123", "text": "def intent_id():\n return str(uuid4())", "title": "" }, { "docid": "4987d19408263b6a5b1eff2d3d071286", "score": "0.57104737", "text": "async def new_id(self, request: web.Request) -> None:\n session = await aiohttp_session.get_session(request)\n session[self.session_key] = await self.generate_id(request)", "title": "" }, { "docid": "f1543600c8be830d43c04de03622e22c", "score": "0.5708333", "text": "def _generate_cart_id():\n cart_id = ''\n characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\\\n 1234567890!@#$%&*()'\n cart_id_length = 50\n for y in range(cart_id_length):\n cart_id += characters[random.randint(0, len(characters) - 1)]\n return cart_id", "title": "" }, { "docid": "5f41f61e3fcbf7955b3aa2c743b2b930", "score": "0.5704154", "text": "def state_token(self):\n return get_random_string(32)", "title": "" }, { "docid": "51b57e1d01f09d2e958ccdb645f12f3c", "score": "0.5696907", "text": "def _gen_net_id():\n while True:\n net_id = str(uuid.uuid4())\n try:\n int(net_id[0])\n except ValueError:\n return net_id", "title": "" }, { "docid": "fbcac6d2dfdf7d1ba5874ad9ea800319", "score": "0.5691803", "text": "def id():\n return str(uuid.uuid4()).upper()", "title": "" }, { "docid": "a816d5273626518c08749c3ba0bee01a", "score": "0.5691127", "text": "def generate_id():\n # uuid -> string issue\n # --------------------\n # https://stackoverflow.com/questions/47429929/attributeerror-uuid-object-has-no-attribute-replace\n return str(uuid.uuid4())", "title": "" }, { "docid": "b4b96c3f9c86dfece35859c951adbef8", "score": "0.5678777", "text": "def _generate_instance_id(self):\n instance_id = f'{uuid.uuid4()}:{self._app_name}:{self._port}'\n LOG.debug('Generated new instance id: %s for app: %s', instance_id, self._app_name)\n return instance_id", "title": "" }, { "docid": "54e6e0ab57279753b073d86fa3fd9b3b", "score": "0.567453", "text": "def random():\n key = str(uuid.uuid4())\n print(\"Random Key generated (*save for decryption*) :\", key)\n return key", "title": "" }, { "docid": "1c6817c3ab5bdd55edca9df3967b2399", "score": "0.567423", "text": "def id(self):\n return self._simid", "title": "" }, { "docid": "cfb001f6063e12d9ccb89db5d20ba160", "score": "0.5671923", "text": "def unique_id(self):\n return _testing_single_scramble_swig.sc_sptr_unique_id(self)", "title": "" }, { "docid": "88de7250384b8e0b0371a017a4d91381", "score": "0.5671441", "text": "def _generate_instance_id(self) -> str:\n instance_id = '{}:{}:{}'.format(\n str(uuid.uuid4()), self._app_name, self._port\n )\n logger.debug('Generated new instance id: %s for app: %s', instance_id,\n self._app_name)\n return instance_id", "title": "" }, { "docid": "4b0cf9125f0fa10ae6193aed9c0cdb2e", "score": "0.56674004", "text": "def unique_id(self) -> str:\n return self.device.id", "title": "" }, { "docid": "ce6f67dae857f57f0d97339172880d20", "score": "0.56631124", "text": "def gen_uuid(self):\n return uuid.uuid4().hex", "title": "" }, { "docid": "3f2856c629300e31c52d12d26409dfa4", "score": "0.5660027", "text": "def generate_id(self, **kwargs) -> str:\n self._counter += 1\n return str(self._counter)", "title": "" }, { "docid": "b0ad56c1c2dd53eb415df3253724de88", "score": "0.56594825", "text": "def unique_id(self) -> str:\n return self.dam_id", "title": "" }, { "docid": "462791fc63253326d5cc5bfbdc115fef", "score": "0.5655529", "text": "def unique_id(self) -> str | None:\n return self._id", "title": "" }, { "docid": "484187368e704f824b36f1fdaa12769f", "score": "0.5652418", "text": "def next_id(self):\n res = \"%0.5d\" % self.conf['seq']\n self.conf['seq'] += 1\n return res", "title": "" }, { "docid": "8e78dc9fc11f5f556cc95d0419083253", "score": "0.5651973", "text": "def unique_id(self):\n return self.node.DeviceID", "title": "" } ]
ea3d105b9d0a86773cfefaa29047af97
Get device information as a dictionary
[ { "docid": "02c061ba8303e4ae92dfa3e081d6d4a7", "score": "0.69765407", "text": "def get_info(self):\n return dict(firmware_version=self.firmware_version,\n adc_mask=self.adc_mask,\n sampling_rate=self.sampling_rate)", "title": "" } ]
[ { "docid": "4b0099d52d86f3e79f6beb7cdd6ac700", "score": "0.8830638", "text": "async def get_device_info(self) -> Dict[str, str]:\n ...", "title": "" }, { "docid": "03f81716fd6eab6529a468da7a3a47a5", "score": "0.8455661", "text": "def device_info(self):\n return {\n \"identifiers\": self._device.identifiers,\n \"name\": self._device.name,\n \"manufacturer\": self._device.manufacturer,\n \"model\": self._device.model,\n \"sw_version\": self._device.sw_version,\n }", "title": "" }, { "docid": "ced4ad6dae0d3cf2cefd3a769bdc6c4b", "score": "0.839614", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.device_id)},\n \"name\": self.device_name,\n \"manufacturer\": DEFAULT_NAME,\n \"model\": TADO_BRIDGE,\n }", "title": "" }, { "docid": "3cb72a3efab8f43b4ab15211aed4782f", "score": "0.8340927", "text": "def device_info(self):\n return {\n \"name\": self.name,\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"manufacturer\": DOMAIN,\n \"model\": self._coordinator.data[self.unique_id].get(\"product_name\"),\n \"sw_version\": self._coordinator.data[self.unique_id].get(\n \"wifi_soft_version\"\n ),\n }", "title": "" }, { "docid": "085491c953accaaa4816fd0ed6c11500", "score": "0.8315989", "text": "def device_info(self) -> Dict[str, Any]:\n return {\n \"identifiers\": {(DOMAIN, self.device_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Hubitat\",\n \"model\": self.type,\n \"via_device\": (DOMAIN, self._hub.id),\n }", "title": "" }, { "docid": "e981e4d508228e82ab575ce297e5bfaa", "score": "0.8292019", "text": "def device_info(self):\n return {\n \"name\": self._name,\n \"identifiers\": {(DOMAIN, self._mac)},\n \"manufacturer\": \"Gree\",\n \"connections\": {(CONNECTION_NETWORK_MAC, self._mac)},\n }", "title": "" }, { "docid": "605852327b028f1921388bbe1a4bf1c3", "score": "0.82772976", "text": "def device_info(self) -> Dict[str, Any]:\n info = {\n \"connections\": {\n (CONNECTION_NETWORK_MAC, self._data[self._sid_data[\"sid_ref\"]])\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": self._data[self._sid_data[\"sid_name\"]],\n }\n if self._sid_data[\"sid\"] == \"interface\":\n info[\"name\"] = f\"{self._inst} {self._data[self._sid_data['sid_name']]}\"\n return info", "title": "" }, { "docid": "6911c80ff19948cea6fe65f382ec4c62", "score": "0.8257571", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.serial)},\n \"name\": self.name,\n \"suggested_area\": self._device[\"name\"].split(\"_\")[0],\n \"manufacturer\": MANUFACTURER,\n \"model\": f\"{self._device['model']} ({self._device['type']})\",\n \"via_device\": (DOMAIN, self._bridge_device[\"serial\"]),\n }", "title": "" }, { "docid": "dd2799410a3255bdcade306ca1a7f79c", "score": "0.81683654", "text": "def device_info(self):\n return {\n \"identifiers\": {\n (\n ios.DOMAIN,\n self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_PERMANENT_ID],\n )\n },\n \"name\": self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_NAME],\n \"manufacturer\": \"Apple\",\n \"model\": self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_TYPE],\n \"sw_version\": self._device[ios.ATTR_DEVICE][ios.ATTR_DEVICE_SYSTEM_VERSION],\n }", "title": "" }, { "docid": "82107837bf46a53ca6e061324492b6c3", "score": "0.80375206", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._spaclient.get_macaddr())},\n \"model\": self._spaclient.get_model_name(),\n \"manufacturer\": \"Balboa Water Group\",\n \"name\": self._device_name,\n \"sw_version\": self._spaclient.get_ssid(),\n }", "title": "" }, { "docid": "0a7094a2c579e00715ec192471a1ee5d", "score": "0.8027279", "text": "def device_info(self) -> Dict[str, Any]:\n if self._device_id is None:\n return None\n\n return {\n ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},\n ATTR_NAME: self.name,\n ATTR_MANUFACTURER: self.coordinator.data.info.brand,\n ATTR_MODEL: self.coordinator.data.info.model_name,\n ATTR_SOFTWARE_VERSION: self.coordinator.data.info.version,\n }", "title": "" }, { "docid": "144c85d553da72ddf7f9fe9a1a87d096", "score": "0.8014169", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._device.device_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Coway\",\n \"model\": self._device.product_name_full,\n }", "title": "" }, { "docid": "144c85d553da72ddf7f9fe9a1a87d096", "score": "0.8014169", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._device.device_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Coway\",\n \"model\": self._device.product_name_full,\n }", "title": "" }, { "docid": "144c85d553da72ddf7f9fe9a1a87d096", "score": "0.8014169", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._device.device_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Coway\",\n \"model\": self._device.product_name_full,\n }", "title": "" }, { "docid": "144c85d553da72ddf7f9fe9a1a87d096", "score": "0.8014169", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._device.device_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Coway\",\n \"model\": self._device.product_name_full,\n }", "title": "" }, { "docid": "144c85d553da72ddf7f9fe9a1a87d096", "score": "0.8014169", "text": "def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._device.device_id)},\n \"name\": self._device.name,\n \"manufacturer\": \"Coway\",\n \"model\": self._device.product_name_full,\n }", "title": "" }, { "docid": "3b1f532f7ed58f0483b86ca7d8214191", "score": "0.797958", "text": "def device_info(self) -> Dict[str, Any]:\n return {\n ATTR_IDENTIFIERS: {(DOMAIN, self._device_id)},\n ATTR_NAME: self.name,\n ATTR_MANUFACTURER: self.dtv.device.info.brand,\n ATTR_MODEL: None,\n ATTR_SOFTWARE_VERSION: self.dtv.device.info.version,\n ATTR_VIA_DEVICE: (DOMAIN, self.dtv.device.info.receiver_id),\n }", "title": "" }, { "docid": "78c70ab137d4b63dc8956cd410ed6464", "score": "0.79506207", "text": "def GetDeviceInfo(self, serial):\n return { key: self._GetDataStoreValue(serial, key)\n for key in self.device_info_fields }", "title": "" }, { "docid": "15f7ecf57831fa31d419b7056db07ee5", "score": "0.79269683", "text": "def device_info(self):\n return {\"identifiers\": {(DOMAIN, self.box_id)}}", "title": "" }, { "docid": "15f7ecf57831fa31d419b7056db07ee5", "score": "0.79269683", "text": "def device_info(self):\n return {\"identifiers\": {(DOMAIN, self.box_id)}}", "title": "" }, { "docid": "e17f70e5713e58f704e83c3c3dbd117e", "score": "0.79088545", "text": "def device_info(self):\n if not self.unique_id:\n return None\n device_info = {\n \"identifiers\": {(DOMAIN, self.lockObj.nuki_id)},\n \"name\": self.name,\n \"via_device\": self.lockObj.nuki_id,\n }\n return device_info", "title": "" }, { "docid": "ceb071e83ffe42c0899823677a6c7ef4", "score": "0.79081935", "text": "def GetDeviceInfoDict(self):\n ret = {\n 'vendor_id': self.vendor_id,\n 'product_id': self.product_id,\n 'product': self.name,\n }\n\n return ret", "title": "" }, { "docid": "c7b02f782a960ea9ff59c3d148cdedb5", "score": "0.7871482", "text": "def device_info(self):\n return self._device_info", "title": "" }, { "docid": "5b12a45049d212dc83b73c86609f8d9d", "score": "0.7866668", "text": "def device_info(self):\r\n _LOGGER.debug(\"My identifiers is %s\", {(DOMAIN, self.unique_id)})\r\n info = {\r\n \"identifiers\": {(DOMAIN, self.unique_id)},\r\n \"name\": self.name,\r\n \"manufacturer\": \"SleepAsAndroid\",\r\n \"type\": None,\r\n \"model\": \"MQTT\",\r\n }\r\n return info", "title": "" }, { "docid": "b485cd11185a8862aa4fdaa08a23c67e", "score": "0.7798589", "text": "def device_info(self):\n data = self.coordinator.data[self.index]\n return {\n \"identifiers\": {(DOMAIN, self.vin)},\n \"name\": self.get_vehicle_name(),\n \"manufacturer\": \"Mazda\",\n \"model\": f\"{data['modelYear']} {data['carlineName']}\",\n }", "title": "" }, { "docid": "31410e7dd963bd7930274e2031cbff9b", "score": "0.7795205", "text": "async def read_device_info(self):\n raise NotImplementedError()", "title": "" }, { "docid": "31410e7dd963bd7930274e2031cbff9b", "score": "0.7795205", "text": "async def read_device_info(self):\n raise NotImplementedError()", "title": "" }, { "docid": "27afccebbbf3aefe2b7ffd2ce098bdc7", "score": "0.7776223", "text": "def device_info(self):\n return self._state.device_info", "title": "" }, { "docid": "ea192ec08468f184403b39cc88f9c63a", "score": "0.767607", "text": "def device_info(self):\n return device_info(self._registration)", "title": "" }, { "docid": "63225b3e4a2169ff8a31cddf90eefb38", "score": "0.7668895", "text": "def device_info(self) -> DeviceInfo:\n return {\n ATTR_NAME: str(self.name),\n ATTR_MANUFACTURER: MANUFACTURER,\n ATTR_MODEL: MODEL,\n ATTR_IDENTIFIERS: {(DOMAIN, self._identifier)},\n }", "title": "" }, { "docid": "bab90d3019c6896e8225b6dd5c8d88c0", "score": "0.7610876", "text": "def device_info(self):\n return self._dc", "title": "" }, { "docid": "8b203392a1a11557a80d8dca10e475f4", "score": "0.755546", "text": "def get_info(self, usb_device):\n info = dict()\n info['manufacturer'] = usb_device.vendor_name\n info['product_name'] = usb_device.product_name\n info['serial_number'] = usb_device.serial_number\n info['vendor_id'] = '0x%04X' % usb_device.vendor_id\n info['product_id'] = '0x%04X' % usb_device.product_id\n return info", "title": "" }, { "docid": "71f408de9ce154d06104c84433ec0475", "score": "0.753614", "text": "def device_info(device):\n\n status = subprocess.check_output([\n 'ibstat', device, '-s']).splitlines()\n\n regexes = {\n \"CA type: (.*)\": \"device_type\",\n \"Number of ports: (.*)\": \"num_ports\",\n \"Firmware version: (.*)\": \"fw_ver\",\n \"Hardware version: (.*)\": \"hw_ver\",\n \"Node GUID: (.*)\": \"node_guid\",\n \"System image GUID: (.*)\": \"sys_guid\",\n }\n\n device = DeviceInfo()\n\n for line in status:\n for expression, key in regexes.items():\n matches = re.search(expression, line)\n if matches:\n setattr(device, key, matches.group(1))\n\n return device", "title": "" }, { "docid": "ffe78d6ff4f7cc8fa8f58b1df39fb399", "score": "0.7514357", "text": "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {\n (\n DOMAIN,\n f\"{self.coordinator.latitude}-{self.coordinator.longitude}\",\n )\n },\n \"name\": DEFAULT_NAME,\n \"manufacturer\": MANUFACTURER,\n \"entry_type\": \"service\",\n }", "title": "" }, { "docid": "e15db3e78844044d0141d06d1039e725", "score": "0.7511637", "text": "def get_info(self, usb_device):\n info = dict()\n info['manufacturer'] = self.usb.util.get_string(usb_device, usb_device.iManufacturer)\n info['product_name'] = self.usb.util.get_string(usb_device, usb_device.iProduct)\n info['serial_number'] = self.usb.util.get_string(usb_device, usb_device.iSerialNumber)\n info['vendor_id'] = \"0x%04X\" % usb_device.idVendor\n info['product_id'] = \"0x%04X\" % usb_device.idProduct\n return info", "title": "" }, { "docid": "b1170e2f706d070f6f4cf0f922fd77e9", "score": "0.748466", "text": "def gather_metric(self):\n if self.is_privileged():\n self.check_usbmon()\n dev_byte_dict = self.get_bytes()\n dev_name_dict = self.match_device_id()\n return {\n self.DEVICES: self.gen_output(dev_name_dict, dev_byte_dict)\n }\n else:\n return {self.DEVICES: None}", "title": "" }, { "docid": "b6f262a4dd6ad7d2f84351c46df24fbb", "score": "0.7459138", "text": "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, self._config_entry.data[CONF_API_KEY])},\n \"name\": \"ClimaCell\",\n \"manufacturer\": \"ClimaCell\",\n \"sw_version\": f\"v{self.api_version}\",\n \"entry_type\": \"service\",\n }", "title": "" }, { "docid": "00aa1805520bc43aa71368c0327b2a7b", "score": "0.7454449", "text": "def get_device_info(self):\r\n url = 'http://{}:{}/{}'.format(self._host, self._port, URL_CONTROL_NRC_DDD)\r\n \r\n res = urlopen(url, timeout=5).read()\r\n device_info = xmltodict.parse(res)['root']['device']\r\n\r\n return device_info", "title": "" }, { "docid": "087a0e8b719201815182fae05d975e14", "score": "0.7423395", "text": "def device_info(self) -> DeviceInfo:\n return self._device_info.device_info", "title": "" }, { "docid": "4c83883b7deb96328bd416dba091ddb9", "score": "0.73868406", "text": "def LMS_get_device_info():\n dll=LMSDLL\n dll.fnLMS_SetTestMode(U8(int(False)))\n device_info_array_type = U32 * int(dll.fnLMS_GetNumDevices(None))\n a=device_info_array_type()\n dll.fnLMS_GetDevInfo(a)\n devids=np.ctypeslib.as_array(a)\n devinfos=[]\n for devid in devids:\n model=C.create_string_buffer(8194)\n dll.fnLMS_GetModelName(U32(devid),model)\n serial=int(dll.fnLMS_GetSerialNumber(U32(devid)))\n# devstr=\"Device: %d\\tModel: %s\\tSerial: %d\" % (devid,model,serial)\n# print devstr\n devinfos.append({\"model\":model,\"serial\":serial,\"devid\":U32(devid)})\n return devinfos", "title": "" }, { "docid": "92b120a35c8df7a1cdc1f14808761aea", "score": "0.73296756", "text": "def get_device_infos(self, serial: str | None = None) -> dict[Any, Any]:\n\n devices = self._get_page_list()\n result: dict[str, Any] = {}\n _res_id = \"NONE\"\n\n for device in devices[\"deviceInfos\"]:\n _serial = device[\"deviceSerial\"]\n _res_id_list = {\n item\n for item in devices.get(\"CLOUD\", {})\n if devices[\"CLOUD\"][item].get(\"deviceSerial\") == _serial\n }\n _res_id = _res_id_list.pop() if len(_res_id_list) else \"NONE\"\n\n result[_serial] = {\n \"CLOUD\": {_res_id: devices.get(\"CLOUD\", {}).get(_res_id, {})},\n \"VTM\": {_res_id: devices.get(\"VTM\", {}).get(_res_id, {})},\n \"P2P\": devices.get(\"P2P\", {}).get(_serial, {}),\n \"CONNECTION\": devices.get(\"CONNECTION\", {}).get(_serial, {}),\n \"KMS\": devices.get(\"KMS\", {}).get(_serial, {}),\n \"STATUS\": devices.get(\"STATUS\", {}).get(_serial, {}),\n \"TIME_PLAN\": devices.get(\"TIME_PLAN\", {}).get(_serial, {}),\n \"CHANNEL\": {_res_id: devices.get(\"CHANNEL\", {}).get(_res_id, {})},\n \"QOS\": devices.get(\"QOS\", {}).get(_serial, {}),\n \"NODISTURB\": devices.get(\"NODISTURB\", {}).get(_serial, {}),\n \"FEATURE\": devices.get(\"FEATURE\", {}).get(_serial, {}),\n \"UPGRADE\": devices.get(\"UPGRADE\", {}).get(_serial, {}),\n \"FEATURE_INFO\": devices.get(\"FEATURE_INFO\", {}).get(_serial, {}),\n \"SWITCH\": devices.get(\"SWITCH\", {}).get(_serial, {}),\n \"CUSTOM_TAG\": devices.get(\"CUSTOM_TAG\", {}).get(_serial, {}),\n \"VIDEO_QUALITY\": {\n _res_id: devices.get(\"VIDEO_QUALITY\", {}).get(_res_id, {})\n },\n \"resourceInfos\": [\n item\n for item in devices.get(\"resourceInfos\")\n if item.get(\"deviceSerial\") == _serial\n ], # Could be more than one\n \"WIFI\": devices.get(\"WIFI\", {}).get(_serial, {}),\n \"deviceInfos\": device,\n }\n # Nested keys are still encoded as JSON strings\n result[_serial][\"deviceInfos\"][\"supportExt\"] = json.loads(\n result[_serial][\"deviceInfos\"][\"supportExt\"]\n )\n convert_to_dict(result[_serial][\"STATUS\"].get(\"optionals\"))\n\n if not serial:\n return result\n\n return result.get(serial, {})", "title": "" }, { "docid": "b9e194276ea0a5f0b03ea0da6f8142b1", "score": "0.7299067", "text": "def LPS_get_device_info():\n dll=LPSDLL\n dll.fnLPS_SetTestMode(U8(int(False)))\n device_info_array_type = U32 * int(dll.fnLPS_GetNumDevices(None))\n a=device_info_array_type()\n dll.fnLPS_GetDevInfo(a)\n devids=np.ctypeslib.as_array(a)\n devinfos=[]\n for devid in devids:\n model=C.create_string_buffer(8194)\n dll.fnLPS_GetModelNameA(U32(devid),model)\n serial=int(dll.fnLPS_GetSerialNumber(U32(devid)))\n# devstr=\"Device: %d\\tModel: %s\\tSerial: %d\" % (devid,model,serial)\n# print devstr\n devinfos.append({\"model\":model,\"serial\":serial,\"devid\":U32(devid)})\n return devinfos", "title": "" }, { "docid": "b68d454da6ccc085c0eb3f0685bcf681", "score": "0.7284858", "text": "def deviceInfo(self):\n getusbs = usb.core.find(find_all=True)\n devices = dict(\n enumerate(str(dev.manufacturer) + \":\" + str(dev.idProduct) + \":\" + str(dev.idVendor) for dev in getusbs))\n for key, value in devices.items():\n print(key, \":\", value)\n hook = input(\"---> Select a device: \")\n idProd, idVen = devices[int(hook)].split(':')[1:]\n device = usb.core.find(idVendor=int(idVen), idProduct=int(idProd))\n print(device)", "title": "" }, { "docid": "9c7c1d8802d0457fba70b0c453261a12", "score": "0.7270171", "text": "def GetAllDeviceData():\n return _GetInstance().Get({})", "title": "" }, { "docid": "3c7457804600fe4b655e6839758f2308", "score": "0.7232167", "text": "def get_device(self) -> Any:\n return self._api_get_pagelist(page_filter=\"CLOUD\", json_key=\"deviceInfos\")", "title": "" }, { "docid": "d0e4c0bb01e7e18e0538ef4c9aae12c8", "score": "0.7189528", "text": "def info(self) -> dict:\n _info = {'bt': self.bt}\n _info.update(**self._init_kwargs)\n with suppress(Exception):\n _info.update(\n device_info=self._pyaudio_instance.get_device_info_by_index(self._init_kwargs['input_device_index']))\n return _info", "title": "" }, { "docid": "f47e4a19ca377718b4925a2d7f38159b", "score": "0.7184789", "text": "def get_device(self):\n return self._api_get_pagelist(page_filter=\"CLOUD\", json_key=\"deviceInfos\")", "title": "" }, { "docid": "a5c27eb1c8b4417e8f56aff5d9abbfcb", "score": "0.717817", "text": "def device_info(self) -> DeviceInfo:\n dev_connection = DOMAIN\n dev_connection_value = self.entity_description.data_reference\n dev_group = self.entity_description.ha_group\n if self.entity_description.ha_group.startswith(\"data__\"):\n dev_group = self.entity_description.ha_group[6:]\n if dev_group in self._data:\n dev_group = self._data[dev_group]\n dev_connection_value = dev_group\n\n if self.entity_description.ha_connection:\n dev_connection = self.entity_description.ha_connection\n\n if self.entity_description.ha_connection_value:\n dev_connection_value = self.entity_description.ha_connection_value\n if dev_connection_value.startswith(\"data__\"):\n dev_connection_value = dev_connection_value[6:]\n dev_connection_value = self._data[dev_connection_value]\n\n info = DeviceInfo(\n connections={(dev_connection, f\"{dev_connection_value}\")},\n identifiers={(dev_connection, f\"{dev_connection_value}\")},\n default_name=f\"{self._inst} {dev_group}\",\n model=f\"{self._ctrl.data['resource']['board-name']}\",\n manufacturer=f\"{self._ctrl.data['resource']['platform']}\",\n sw_version=f\"{self._ctrl.data['resource']['version']}\",\n configuration_url=f\"http://{self._ctrl.config_entry.data[CONF_HOST]}\",\n via_device=(DOMAIN, f\"{self._ctrl.data['routerboard']['serial-number']}\"),\n )\n\n if \"mac-address\" in self.entity_description.data_reference:\n dev_group = self._data[self.entity_description.data_name]\n dev_manufacturer = \"\"\n if dev_connection_value in self._ctrl.data[\"host\"]:\n dev_group = self._ctrl.data[\"host\"][dev_connection_value][\"host-name\"]\n dev_manufacturer = self._ctrl.data[\"host\"][dev_connection_value][\n \"manufacturer\"\n ]\n\n info = DeviceInfo(\n connections={(dev_connection, f\"{dev_connection_value}\")},\n default_name=f\"{dev_group}\",\n manufacturer=f\"{dev_manufacturer}\",\n via_device=(\n DOMAIN,\n f\"{self._ctrl.data['routerboard']['serial-number']}\",\n ),\n )\n\n return info", "title": "" }, { "docid": "18611a5e3b8b1061210fdcf74b6391b7", "score": "0.71610993", "text": "async def get_info(self, device_id: str) -> dict:\n return await self._request(\"get\", f\"{API_V2_BASE}/devices/{device_id}\")", "title": "" }, { "docid": "ebbaddf726e96bb8da3b94bba6e8107d", "score": "0.7155309", "text": "def parse_device_info(line):\n device_info = dict()\n\n device_info['label'] = get_label(line) \n device_info['partition'] = get_partition(line) \n device_info['device'] = get_device(line) \n device_info['fstype'] = get_type(line) \n return device_info", "title": "" }, { "docid": "dbc1c0267bbd2a5af4757ccb69d85ce0", "score": "0.7153605", "text": "def get_device_info(platform_path: str):\n device_name = os.path.basename(platform_path)\n with zipfile.ZipFile(os.path.join(platform_path, 'hw',\n device_name + '.dsa')) as platform:\n with platform.open(device_name + '.hpfm') as metadata:\n platform_info = ET.parse(metadata).find('./xd:component/xd:platformInfo',\n XILINX_XML_NS)\n if platform_info is None:\n raise ValueError('cannot parse platform')\n clock_period = platform_info.find(\n \"./xd:systemClocks/xd:clock/[@xd:id='0']\", XILINX_XML_NS)\n if clock_period is None:\n raise ValueError('cannot find clock period in platform')\n part_num = platform_info.find('xd:deviceInfo', XILINX_XML_NS)\n if part_num is None:\n raise ValueError('cannot find part number in platform')\n return {\n 'clock_period':\n clock_period.attrib['{{{xd}}}period'.format(**XILINX_XML_NS)],\n 'part_num':\n part_num.attrib['{{{xd}}}name'.format(**XILINX_XML_NS)]\n }", "title": "" }, { "docid": "088645355eea2419c2b9f9f241fdc346", "score": "0.7148432", "text": "def hw_info(self) -> Dict:\n keys = [\n \"sw_ver\",\n \"hw_ver\",\n \"mac\",\n \"mic_mac\",\n \"type\",\n \"mic_type\",\n \"hwId\",\n \"fwId\",\n \"oemId\",\n \"dev_name\",\n ]\n sys_info = self.sys_info\n return {key: sys_info[key] for key in keys if key in sys_info}", "title": "" }, { "docid": "26ca8008b3317988dda099a02432d19c", "score": "0.71473145", "text": "def get_device_info(self):\n status = StatusVar(0)\n name = ctypes.create_string_buffer(16)\n bluetooth_address = (c_ubyte * 7)()\n signal_strength = (c_ubyte * 4)()\n available_flash = c_uint(0)\n dll.nFANTOM100_iNXT_getDeviceInfo(self.handle, name,\n bluetooth_address, signal_strength, byref(available_flash),\n cast(byref(status), POINTER(c_int)))\n name = name.value\n bluetooth_address = ':'.join('%02x' % c\n for c in bluetooth_address[0:6])\n return self.DeviceInfo(\n name = name.upper(),\n bluetooth_address = bluetooth_address.upper(),\n signal_strength = tuple(c for c in signal_strength),\n available_flash = available_flash.value,\n )", "title": "" }, { "docid": "5479d4698bc5844fde7fd99a0718e964", "score": "0.7126474", "text": "def get_Devices(self):\n self.deviceDict = {}\n pathElts = [\n \"DatasetInfo\",\n \"Devices\"\n ]\n self.devices = self.__fetch_query(pathElts, \"analytics\", [])\n for notif in self.devices:\n for device in notif[\"updates\"]:\n self.deviceDict[str(device)] = notif[\"updates\"][str(device)]\n return self.deviceDict", "title": "" }, { "docid": "49ebc99c41776e899143635643c54981", "score": "0.71035725", "text": "def device_info(self):\n info = {}\n sib = bytearray(self.datalink.read_sib())\n self.logger.info(\"SIB read out as: {}\".format(sib))\n \n # Parse fixed width fields according to spec\n family = sib[0:7].strip()\n info['family'] = family.decode()\n self.logger.info(\"Device family ID: '%s'\", family.decode())\n\n nvm = sib[8:11].strip()\n self.logger.info(\"NVM interface: '%s'\", nvm.decode())\n info['nvm'] = nvm.decode()\n if nvm.decode() == \"P:2\":\n self.logger.info(\"Using PDI v2\")\n self.write_nvm = self.write_nvm_v1\n self.chip_erase = self.chip_erase_v1\n self.write_fuse = self.write_fuse_v1\n self.datalink.set_24bit_updi(True)\n\n ocd = sib[11:14].strip()\n info['ocd'] = ocd.decode()\n self.logger.info(\"Debug interface: '%s'\", ocd.decode())\n\n osc = sib[15:19].strip()\n info['osc'] = osc.decode()\n self.logger.info(\"PDI oscillator: '%sMHz'\", osc.decode())\n\n self.logger.info(\"PDI revision = 0x{:X}\".format(self.datalink.ldcs(constants.UPDI_CS_STATUSA) >> 4))\n if self.in_prog_mode():\n if self.device is not None:\n devid = self.read_data(self.device.sigrow_address, 3)\n device_id_string = \"{0:02X}{1:02X}{2:02X}\".format(devid[0], devid[1], devid[2])\n info['device_id'] = device_id_string\n self.logger.info(\"Device ID = '%s'\", device_id_string)\n\n devrev = self.read_data(self.device.syscfg_address + 1, 1)\n devrev_major = (int(devrev[0]) & 0xF0) >> 4\n devrev_minor = int(devrev[0]) & 0x0F\n device_rev_string = \"{0:d}.{1:d}\".format(devrev_major, devrev_minor)\n info['device_rev'] = device_rev_string\n self.logger.info(\"Device rev = '%s'\", device_rev_string)\n return info", "title": "" }, { "docid": "b8cf457c8ebf74491da00dd1e84b720f", "score": "0.70555633", "text": "def device_info(self) -> DeviceInfo:\n return {\n \"identifiers\": {(DOMAIN, f\"unifi_controller_{self._item.site_id}\")},\n \"name\": \"UniFi Controller\",\n \"manufacturer\": ATTR_MANUFACTURER,\n \"model\": \"UniFi Controller\",\n \"entry_type\": \"service\",\n }", "title": "" }, { "docid": "8cd2b05118b713173dce9bcdbbe53d64", "score": "0.7053984", "text": "def get_devices(self,c):\n return self.devices.keys()", "title": "" }, { "docid": "4d7e548a5c9af0c4056b45b7f8807184", "score": "0.70452905", "text": "def LDA_get_device_info():\n dll=LDADLL\n dll.fnLDA_SetTestMode(U8(int(False)))\n device_info_array_type = U32 * int(dll.fnLDA_GetNumDevices(None))\n a=device_info_array_type()\n dll.fnLDA_GetDevInfo(a)\n devids=np.ctypeslib.as_array(a)\n devinfos=[]\n for devid in devids:\n model=C.create_string_buffer(8194)\n dll.fnLDA_GetModelName(U32(devid),model)\n serial=int(dll.fnLDA_GetSerialNumber(U32(devid)))\n# devstr=\"Device: %d\\tModel: %s\\tSerial: %d\" % (devid,model,serial)\n# print devstr\n devinfos.append({\"model\":model,\"serial\":serial,\"devid\":U32(devid)})\n return devinfos", "title": "" }, { "docid": "6c0e5941cc220cf11d85da3a69e5753e", "score": "0.70422685", "text": "def _get_device_detail(connection, sys_path, cmd_prefix=\"\"):\n command = cmd_prefix + 'find {} -type f -print -exec cat {{}} \\; -exec echo \\;'.format(sys_path + '/')\n ignore_errors = ['invalid argument', 'operation not supported', 'permission denied']\n\n device = {}\n\n try:\n # do -> find /sys/calls/net/<dev>/ -type f -print -exec cat {} \\; will output something like\n #\n # ...\n # /sys/class/net/qvo5faab8ab-96/queues/tx-0/byte_queue_limits/hold_time\n # 1000\n # /sys/class/net/qvo5faab8ab-96/queues/tx-0/byte_queue_limits/inflight\n # 0\n # /sys/class/net/qvo5faab8ab-96/tx_queue_len\n # 1000\n # /sys/class/net/qvo5faab8ab-96/uevent\n # INTERFACE=qvo5faab8ab-96\n # IFINDEX=38\n # /sys/class/net/qvo5faab8ab-96/statistics/rx_fifo_errors\n # 0\n # ...\n output, error = Client.exec_command(connection, command, ignore_errors)\n item_name = None\n\n for line in output.split(str('\\n')):\n # Is it a filename\n if len(line) == 0:\n continue # Extra echo in command insures all 'cat' commands end with at least one \\n\n\n elif line.startswith(sys_path):\n item_name = line[len(sys_path) + 1:]\n\n elif item_name is not None:\n # It is the contents of a file, add it as an item, watching to see if it is more than\n # just one line\n\n if item_name in device:\n if not isinstance(device[item_name], list):\n device[item_name] = [device[item_name]]\n device[item_name].append(line)\n else:\n device[item_name] = line\n\n def tryeval(val):\n # TODO: Add list recursion support\n try:\n val = ast.literal_eval(val)\n except (ValueError, SyntaxError):\n pass\n return val\n\n def build_nested_dict_helper(key, container, val):\n segments = key.split(str('/'))\n head = segments[0]\n tail = segments[1:]\n if not tail:\n container[head] = val\n else:\n if head not in container:\n container[head] = {}\n build_nested_dict_helper(str('/').join(tail), container[head], val)\n\n def build_nested_dict(paths):\n container = {}\n for key, val in paths.items():\n build_nested_dict_helper(key, container, tryeval(val))\n return container\n\n device = build_nested_dict(device)\n\n except Exception as e:\n logging.exception('Client._get_device_detail')\n\n logging.debug('_get_device_detail: detail:\\n{}'.format(pprint.PrettyPrinter(indent=2).pformat(device)))\n\n return device", "title": "" }, { "docid": "2ec02825edd7964df4abe98a37f1bda9", "score": "0.7038082", "text": "def _get_device_details(self):\n self.logger.info(\"Building Root\")\n vendor = \"CGS\"\n\n self.resource.contact_name = self.snmp_handler.get_property(\n \"SNMPv2-MIB\", \"sysContact\", \"0\"\n )\n self.resource.system_name = self.snmp_handler.get_property(\n \"SNMPv2-MIB\", \"sysName\", \"0\"\n )\n self.resource.location = self.snmp_handler.get_property(\n \"SNMPv2-MIB\", \"sysLocation\", \"0\"\n )\n self.resource.model = self.resource.model_name = self._get_device_model()\n self.resource.vendor = vendor", "title": "" }, { "docid": "f218b0e6fe40204ff95c5eab7aa0ed40", "score": "0.7015939", "text": "def _get_all_device_infos(self):\n\n devices = self._get_page_list()\n result = {}\n\n for idx, device in enumerate(devices[\"deviceInfos\"]):\n result[device[\"deviceSerial\"]] = {}\n result[device[\"deviceSerial\"]][\"deviceInfos\"] = device\n result[device[\"deviceSerial\"]][\"connectionInfos\"] = devices.get(\n \"connectionInfos\"\n ).get(device[\"deviceSerial\"])\n result[device[\"deviceSerial\"]][\"p2pInfos\"] = devices.get(\"p2pInfos\").get(\n device[\"deviceSerial\"]\n )\n result[device[\"deviceSerial\"]][\"alarmNodisturbInfos\"] = devices.get(\n \"alarmNodisturbInfos\"\n ).get(device[\"deviceSerial\"])\n result[device[\"deviceSerial\"]][\"kmsInfos\"] = devices.get(\"kmsInfos\").get(\n device[\"deviceSerial\"]\n )\n result[device[\"deviceSerial\"]][\"timePlanInfos\"] = devices.get(\n \"timePlanInfos\"\n ).get(device[\"deviceSerial\"])\n result[device[\"deviceSerial\"]][\"statusInfos\"] = devices.get(\n \"statusInfos\"\n ).get(device[\"deviceSerial\"])\n result[device[\"deviceSerial\"]][\"wifiInfos\"] = devices.get(\"wifiInfos\").get(\n device[\"deviceSerial\"]\n )\n result[device[\"deviceSerial\"]][\"switchStatusInfos\"] = devices.get(\n \"switchStatusInfos\"\n ).get(device[\"deviceSerial\"])\n for item in devices[\"cameraInfos\"]:\n if item[\"deviceSerial\"] == device[\"deviceSerial\"]:\n result[device[\"deviceSerial\"]][\"cameraInfos\"] = item\n result[device[\"deviceSerial\"]][\"cloudInfos\"] = devices.get(\n \"cloudInfos\"\n ).get(item[\"cameraId\"])\n\n return result", "title": "" }, { "docid": "7e9490f199db36d58908c3c810232609", "score": "0.70125264", "text": "def _get_device_details(self):\n\n self.logger.info(\"Building Root\")\n vendor = \"Cisco\"\n\n self.resource.contact_name = self.snmp_handler.get_property('SNMPv2-MIB', 'sysContact', '0')\n self.resource.system_name = self.snmp_handler.get_property('SNMPv2-MIB', 'sysName', '0')\n self.resource.location = self.snmp_handler.get_property('SNMPv2-MIB', 'sysLocation', '0')\n self.resource.os_version = self._get_device_os_version()\n self.resource.model = self._get_device_model()\n self.resource.model_name = self._get_device_model_name(self.resource.model)\n self.resource.vendor = vendor", "title": "" }, { "docid": "98e83f04b72dba8cdb85bc2465e933b3", "score": "0.69825375", "text": "def _get_device_details(self):\n\n self.logger.info('Building Root')\n\n self.resource.contact_name = self.snmp_service.get_property('SNMPv2-MIB', 'sysContact', '0')\n self.resource.system_name = self.snmp_service.get_property('SNMPv2-MIB', 'sysName', '0')\n self.resource.location = self.snmp_service.get_property('SNMPv2-MIB', 'sysLocation', '0')\n self.resource.vendor = self.VENDOR\n self.resource.os_version = self._get_device_os_version()\n self.resource.model = self._get_device_model()", "title": "" }, { "docid": "f0b1b9c67ca1694d3d98ee93c09c98de", "score": "0.6969224", "text": "def __get_device_dict__(device):\n\n device_dict = dict()\n\n # Verify device is not already present in our DEVICE_LIST\n for device_dict in DEVICE_LIST:\n if device == device_dict[\"hostname\"]:\n return device_dict\n\n device_dict[\"hostname\"] = device\n device_dict[\"if_table\"] = get_if_table(device)\n\n DEVICE_LIST.append(device_dict)\n\n return device_dict", "title": "" }, { "docid": "e558ae1b0f24f72eadd553983a90a9c1", "score": "0.69586325", "text": "def get_info(self, usb_device):\n return self.usb.get_info(usb_device)", "title": "" }, { "docid": "9efbfff310643ada43986c088d489244", "score": "0.6944886", "text": "def show_available_device(self):\n self.devices = {'devices_name': [], 'devices_id': []}\n mydevice = self.check_devices()\n for ids in mydevice['devices']:\n self.devices['devices_name'].append(ids['type'])\n self.devices['devices_id'].append(ids['id'])", "title": "" }, { "docid": "7e53cb0b5b33c15b371e517be1cc6eae", "score": "0.6933747", "text": "def read():\n # Create the list of devices from our data\n # return [DEVICES_HASH[key] for key in DEVICES_HASH.keys()]\n # print(DEVICES_HASH.keys())\n # return json.dumps(DEVICES_HASH)\n print(len(DEVICES_HASH.keys()))\n return list(DEVICES_HASH.keys())", "title": "" }, { "docid": "cfc283b7f74addbca871ee8f15a1b0fe", "score": "0.69316417", "text": "def get_info(self):\n self.ser.write(b\"version\")\n company = self.ser.readline().strip().decode(\"utf-8\")\n device = self.ser.readline().strip().decode(\"utf-8\")\n fw_ver = self.ser.readline().strip().decode(\"utf-8\")\n\n return {\n 'company': company,\n 'device': device,\n 'fw_ver': int(fw_ver.split(\":\")[-1])\n }", "title": "" }, { "docid": "e872484019e93f614ab1323078e90f96", "score": "0.69195604", "text": "def get_switch_device_info(self, device_id):\n # send requests\n try:\n wattload = get_power_load(bytearray(send_request(self, data_read_request(data_read_command,device_id,data_power_load))).hex())\n timer = get_timer_length(bytearray(send_request(self, data_read_request(data_read_command,device_id,data_power_timer))).hex())\n except OSError:\n raise PySinopeError(\"Cannot get switch info\") \n # Prepare data\n self._device_info = {'active': 1, 'wattage': wattload, 'timer': timer}\n return self._device_info", "title": "" }, { "docid": "aafdfe45c29ed6ee438d17e57f714c7d", "score": "0.6908583", "text": "async def async_device_info(self):\n try:\n return await self.controller.read_info()\n except ConnectionAbortedError:\n pass\n except ConnectionException:\n pass", "title": "" }, { "docid": "eed6734f01db213d359a5602fda85b8b", "score": "0.6906689", "text": "def get_detection_info(self) -> Tuple[Dict[str, str], Dict[str, str]]:\n persistent_dict = self.props[\"persistent_identifiers\"]\n address = persistent_dict[\"console_port_name\"]\n persistent_dict[\"serial_number\"] = (\n usb_utils.get_serial_number_from_path(address))\n persistent_dict[\"model\"] = \"PROTO\"\n return persistent_dict, {}", "title": "" }, { "docid": "5d044dd9c24d0b5ce8305e173ec4b9c9", "score": "0.68720376", "text": "def _info(conn, dom):\n raw = dom.info()\n return {\n \"cpu\": raw[3],\n \"cputime\": int(raw[4]),\n \"disks\": _get_disks(conn, dom),\n \"graphics\": _get_graphics(dom),\n \"nics\": _get_nics(dom),\n \"uuid\": _get_uuid(dom),\n \"loader\": _get_loader(dom),\n \"on_crash\": _get_on_crash(dom),\n \"on_reboot\": _get_on_reboot(dom),\n \"on_poweroff\": _get_on_poweroff(dom),\n \"maxMem\": int(raw[1]),\n \"mem\": int(raw[2]),\n \"state\": VIRT_STATE_NAME_MAP.get(raw[0], \"unknown\"),\n }", "title": "" }, { "docid": "dec02df6ab09ecab81ddec8c2216a891", "score": "0.68630224", "text": "def get_device_details(parsed_output):\r\n generate_table_hdr()\r\n \r\n table.add_row(\r\n parsed_output['version']['chassis'],\r\n parsed_output['version']['chassis_sn'],\r\n parsed_output['version']['hostname'],\r\n parsed_output['version']['xe_version'],\r\n )\r\n \r\n console.print(table)", "title": "" }, { "docid": "27a5d7c5358c2b7f184d936e1ab115d1", "score": "0.6861733", "text": "def get_info(usb_device):\n usb = Usb()\n return usb.get_info(usb_device)", "title": "" }, { "docid": "2212eafe83413f2aa4a41374b3371494", "score": "0.68237245", "text": "def getDeviceDetails(self, config):\n\n raise NotImplementedError()", "title": "" }, { "docid": "4e87b821b208eb1116498f3aa68288ed", "score": "0.6809062", "text": "def usbinfo():\n return __usbinfo()", "title": "" }, { "docid": "e8b3c4d5f3a5324bb4654967b36ae7c8", "score": "0.6801339", "text": "def device(self):\n device_model = self.metadata.get('device_model')\n if device_model:\n return deepcopy(ElecMeter.meter_devices[device_model])\n else:\n return {}", "title": "" }, { "docid": "050392f6381b7ab483ae91eccd3f3e8a", "score": "0.6788692", "text": "def get_info(self):\r\n return {'vendor': self.vendor.get_info()}", "title": "" }, { "docid": "0aa18e281be7daab466c68f227460cb9", "score": "0.6780308", "text": "def get_device_details(self, device_mac):\n return self.plugin_rpc.get_device_details(self.context, device_mac,\n self.agent_id)", "title": "" }, { "docid": "1554d7f0cd3a5a9bb094612b23de8771", "score": "0.6774769", "text": "def device_info(device_id: str) -> Optional[Dict[str, Any]]:\n device_info_map = xcrun.simctl.listall.device_raw_info()\n for operating_system in device_info_map.keys():\n devices = device_info_map[operating_system]\n for device in devices:\n if device[\"udid\"].lower() == device_id.lower():\n return device\n raise xcrun.simctl.device.DeviceNotFoundError(\"No device with ID: \" + device_id)", "title": "" }, { "docid": "73572560338de7b541af2027cd7cb708", "score": "0.67596155", "text": "def device_data(self):\n return self._device_data", "title": "" }, { "docid": "d7f6965287d119d4ff8125539530ef60", "score": "0.6729746", "text": "def update_dev_dict():\n DEVICE_DICT = create_dev_dict()\n return DEVICE_DICT", "title": "" }, { "docid": "5d465eba00d47f5fc3e1ba5b8afcc697", "score": "0.67269367", "text": "def device_info(self) -> DeviceInfo:\n properties = self._all_properties()\n\n build: Optional[str] = properties.get(\"systembuildversion\")\n version = properties.get(\"ov\")\n if not version:\n version = properties.get(\"osvers\", lookup_version(build))\n\n model_name: Optional[str] = properties.get(\"model\", properties.get(\"am\"))\n if model_name:\n model = lookup_model(model_name)\n else:\n model = self._model\n\n # MRP devices run tvOS (as far as we know now) as well as HomePods for\n # some reason\n if Protocol.MRP in self._services or model in [\n DeviceModel.HomePod,\n DeviceModel.HomePodMini,\n ]:\n os_type = OperatingSystem.TvOS\n elif Protocol.DMAP in self._services:\n os_type = OperatingSystem.Legacy\n elif model in [DeviceModel.AirPortExpress, DeviceModel.AirPortExpressGen2]:\n os_type = OperatingSystem.AirPortOS\n else:\n os_type = OperatingSystem.Unknown\n\n mac = properties.get(\"macaddress\", properties.get(\"deviceid\"))\n if mac:\n mac = mac.upper()\n\n # The waMA property comes from the _airport._tcp.local service, announced by\n # AirPort Expresses (used by the admin tool). It contains various information,\n # for instance MAC address and software version.\n wama = properties.get(\"wama\")\n if wama:\n props: Mapping[str, str] = dict(\n cast(Tuple[str, str], prop.split(\"=\", maxsplit=1))\n for prop in (\"macaddress=\" + wama).split(\",\")\n )\n if not mac:\n mac = props[\"macaddress\"].replace(\"-\", \":\").upper()\n version = props.get(\"syVs\")\n\n return DeviceInfo(os_type, version, build, model, mac)", "title": "" }, { "docid": "d8db55abccf1a55c527ca2b2e2c05914", "score": "0.670197", "text": "def get_facts(device):\n return {}", "title": "" }, { "docid": "c0433edd02275e27edcf1d50635d9391", "score": "0.66922736", "text": "def get_extra_attributes(self, device: str) -> dict:\n raise NotImplementedError()", "title": "" }, { "docid": "3fef71e526835449096ac81b4d1340dd", "score": "0.66851634", "text": "def describe_device(DeviceId=None):\n pass", "title": "" }, { "docid": "67858f04c7eee3e8198817359d763954", "score": "0.6661673", "text": "def gather(cls, device: Device) -> \"SystemInfo\":\n system = device.lib.system\n info = cls(\n device_name=device.get_name(),\n cuda_capability=device.get_cuda_compute_capability(),\n effective_power_limit=device.get_enforced_power_limit(),\n #applications_clock_target_sm=device.get_clock(ClockType.SM, ClockId.APP_CLOCK_TARGET),\n #max_boost_clock_sm=device.get_clock(ClockType.SM, ClockId.CUSTOMER_BOOST_MAX),\n #applications_clock_target_mem=device.get_clock(ClockType.MEM, ClockId.APP_CLOCK_TARGET),\n # not supported on dgx1\n # max_boost_clock_mem=device.get_clock(ClockType.MEM, ClockId.CUSTOMER_BOOST_MAX),\n # gather system infos\n driver_version=system.get_driver_version(),\n cuda_version=system.get_cuda_driver_version(),\n nvml_version=system.get_nvml_version(),\n platform=platform.platform(),\n os_release=platform.release(),\n os_version=platform.version(),\n machine=platform.machine(),\n distro=distro.lsb_release_info(),\n python_version=sys.version,\n pip_packages=[(x.project_name, x.version) for x in pkg_resources.working_set]\n )\n return info", "title": "" }, { "docid": "f62b09fe8967e1f194e79832c88d473f", "score": "0.6657265", "text": "async def _async_zha_physical_discovery(self) -> dict[str, Any]:\n return {\"usb\": get_usb_service_info(self.config_entry)}", "title": "" }, { "docid": "bac185512f3095964eb09bf6b04f6acb", "score": "0.66508853", "text": "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, parse_id(self._nuki_device.nuki_id))},\n name=self._nuki_device.name,\n manufacturer=\"Nuki Home Solutions GmbH\",\n model=self._nuki_device.device_model_str.capitalize(),\n sw_version=self._nuki_device.firmware_version,\n via_device=(DOMAIN, self.coordinator.bridge_id),\n )", "title": "" }, { "docid": "3fef90d45be469ae9a8e9064a35df641", "score": "0.66439533", "text": "def device_state_attributes(self):\r\n attrs = {'ip_address': self._dev.ip_addr,\r\n 'shelly_type': self._dev.type_name(),\r\n 'shelly_id': self._dev.id,\r\n 'discovery': self._dev.discovery_src\r\n }\r\n\r\n room = self._dev.room_name()\r\n if room:\r\n attrs['room'] = room\r\n\r\n if self._dev.block.info_values is not None:\r\n for key, value in self._dev.block.info_values.items():\r\n attrs[key] = value\r\n\r\n if self._dev.info_values is not None:\r\n for key, value in self._dev.info_values.items():\r\n attrs[key] = value\r\n\r\n if self._dev.sensor_values is not None:\r\n for key, value in self._dev.sensor_values.items():\r\n attrs[key] = value\r\n\r\n return attrs", "title": "" }, { "docid": "12ffdc70387942214e722d0a8bbe0c48", "score": "0.6642683", "text": "def getDevice(id):", "title": "" }, { "docid": "95b5ee06cb1d039fb07184b067856a28", "score": "0.6633995", "text": "def device_info(soapy_args=''):\n text = []\n try:\n device = simplesoapy.SoapyDevice(soapy_args)\n text.append('Selected device: {}'.format(device.hardware))\n text.append(' Amplification elements: {}'.format(', '.join(device.list_gains())))\n text.append(' Gain range [dB]: {:.2f} - {:.2f}'.format(*device.get_gain_range()))\n text.append(' Frequency range [MHz]: {:.2f} - {:.2f}'.format(*[x / 1e6 for x in device.get_frequency_range()]))\n rates = []\n for r in device.list_sample_rates():\n if r[0] == r[1]:\n rates.append('{:.2f}'.format(r[0] / 1e6))\n else:\n rates.append('{:.2f} - {:.2f}'.format(r[0] / 1e6, r[1] / 1e6))\n text.append(' Sample rates [MHz]: {}'.format(', '.join(rates)))\n except RuntimeError:\n device = None\n text.append('No devices found!')\n return (device, text)", "title": "" }, { "docid": "217965e58282132abb2aaef43e30db86", "score": "0.6631177", "text": "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n configuration_url=self.coordinator.data.base_url,\n identifiers={(DOMAIN, self._config.unique_id)},\n manufacturer=f\"SiliconDust ({self.coordinator.data.discovery_method.name})\",\n model=self.coordinator.data.model if self.coordinator.data else \"\",\n name=self._config.title,\n sw_version=self.coordinator.data.installed_version\n if self.coordinator.data\n else \"\",\n )", "title": "" }, { "docid": "cda7356db99f3da252c2bab3dc81e55c", "score": "0.6625457", "text": "def identify(self) -> dict[str, Any]:\n # cmd = self.instrument.start_command(0x6A)\n # cmd += self.instrument.pad_reserved(3)\n # cmd.append(self.instrument.checksum(cmd))\n # response = self.instrument.send_command(cmd)\n response = self.instrument.send_command(0x6A)\n\n return {\n 'model': response[3:8].decode('ascii').split('\\x00')[0],\n 'fw': f'{response[9]:02x}{response[8]:02x}',\n 'serial_number': response[10:20].decode('ascii')\n }", "title": "" }, { "docid": "cfe5613f38edb631c65517438dc489dc", "score": "0.6625219", "text": "def F_Get_Device_Info(self, index):\n return self.lib.F_Get_Device_Info(index)", "title": "" }, { "docid": "2907764cf94b69db24aa9b79259223f5", "score": "0.66060776", "text": "def match_device_id(self):\n devices = {}\n result = self._shell.run('lsusb').stdout\n\n if result:\n # Example line\n # Bus 003 Device 048: ID 18d1:4ee7 Device Name\n for line in result.split('\\n'):\n line_list = line.split(' ')\n # Gets bus number, strips leading 0's, adds a ':', and then adds\n # the device, without its ':'. Example line output: 3:048\n dev_id = line_list[1].lstrip('0') + ':' + line_list[3].strip(\n ':')\n # Parses the device name, example line output: 'Device Name'\n dev_name = ' '.join(line_list[6:])\n devices[dev_id] = dev_name\n return devices", "title": "" }, { "docid": "1ee9c27561f635bdd04834492e753d90", "score": "0.6602518", "text": "def get_extra_attributes(self, device):\n device = next(\n (result for result in self.last_results if result.macaddr == device), None\n )\n return device._asdict()", "title": "" }, { "docid": "b1a278cb62f82a1c5b0fa03802c0ee6e", "score": "0.6561839", "text": "def hardware_info():\n if sys.platform == 'darwin':\n out = _mac_hardware_info()\n elif sys.platform == 'win32':\n out = _win_hardware_info()\n elif sys.platform in ['linux', 'linux2']:\n out = _linux_hardware_info()\n elif sys.platform.startswith('freebsd'):\n out = _freebsd_hardware_info()\n else:\n out = {}\n return out", "title": "" }, { "docid": "8f2094b491f06044f5da0473db94a611", "score": "0.65602887", "text": "def get_info(self) -> tuple[str, str]:\n device = self.wlr_device\n name = device.name\n if name == \" \" or not name.isprintable():\n name = \"_\"\n type_key = \"type:\" + device.type.name.lower()\n identifier = \"%d:%d:%s\" % (device.vendor, device.product, name)\n\n if type_key == \"type:pointer\" and lib is not None:\n # This checks whether the pointer is a touchpad, so that we can target those\n # specifically.\n handle = device.libinput_get_device_handle()\n if handle and lib.libinput_device_config_tap_get_finger_count(handle) > 0:\n type_key = \"type:touchpad\"\n\n return type_key, identifier", "title": "" }, { "docid": "1b6d9d293975c978ff24bb8cc60ff0cc", "score": "0.6560101", "text": "def device_info(self) -> DeviceInfo:\n return DeviceInfo(\n identifiers={(DOMAIN, self.soco.uid)},\n name=self.speaker.zone_name,\n model=self.speaker.model_name.replace(\"Sonos \", \"\"),\n sw_version=self.speaker.version,\n connections={\n (dr.CONNECTION_NETWORK_MAC, self.speaker.mac_address),\n (dr.CONNECTION_UPNP, f\"uuid:{self.speaker.uid}\"),\n },\n manufacturer=\"Sonos\",\n suggested_area=self.speaker.zone_name,\n configuration_url=f\"http://{self.soco.ip_address}:1400/support/review\",\n )", "title": "" } ]
0ca0dd98dd1074caed4c03f7b0d6db0d
This function should provide analysis w.r.t Logistic Regression Model.
[ { "docid": "212aa0738ea68a80c02d61a4900dfcb8", "score": "0.66393757", "text": "def logistic_regression_modelling(x_train_res, y_train_res, X_test, y_test):\n\n print(\"\\n\\n\\nLogistic Regression\")\n print(\"Cross Validating for best parameters..\")\n print(\"This might take some time..\\n\")\n lr = LogisticRegression(multi_class='ovr')\n cList = [1, 10]\n parameters = {'C': cList}\n gridSearch = GridSearchCV(estimator=lr,\n param_grid=parameters,\n scoring=\"recall\",\n cv=10,\n n_jobs=4)\n gridSearch.fit(x_train_res, y_train_res)\n bestAccuracyLogBestK = gridSearch.best_score_\n bestParametersLogBestK = gridSearch.best_params_\n print(\"The best parameters for Logistic Regression model are :\\n{}\\n\".format(bestParametersLogBestK))\n lr = LogisticRegression(C=10)\n lr.fit(x_train_res, y_train_res)\n print('\\nTrain Results')\n print(lr.score(x_train_res, y_train_res))\n print(recall_score(y_train_res, lr.predict(x_train_res)))\n print('\\nTest Results')\n print(lr.score(X_test, y_test))\n print(recall_score(y_test, lr.predict(X_test)))", "title": "" } ]
[ { "docid": "31f378090b180373cc086b4762a552da", "score": "0.7591138", "text": "def logistic_regression(**kwargs):\n return base_models.LogRegression(**kwargs)", "title": "" }, { "docid": "6b1761eedbde255fc46714f922e2b9a1", "score": "0.7497423", "text": "def logistic_regression(self, X, y):\n raise NotImplementedError", "title": "" }, { "docid": "31c69276bb9a4787be07020aca4e6318", "score": "0.73388684", "text": "def train_logreg_model(X, y):\r\n logger = logging.getLogger(__name__)\r\n logger.info(\"Building Logistic Regression Model...\")\r\n \r\n # Logisitc regresion\r\n logreg = lm.LogisticRegression()\r\n logreg.fit(X, y)\r\n\r\n return logreg", "title": "" }, { "docid": "1aab78d6390d3f225df3f0c767985048", "score": "0.7200539", "text": "def simple_model():\n return LogisticRegression(solver='liblinear')", "title": "" }, { "docid": "850e45cdfeee3070811845086c404cb5", "score": "0.7120353", "text": "def logistic_regression(df):\n X_train, X_test, y_train, y_test, X, y = split(df)\n log_reg = LogisticRegression()\n log_reg.fit(X_train, y_train)\n y_pred = log_reg.predict(X_test)\n print(\"Coefficients:\",log_reg.coef_) # determine most important questions\n print(\"Confusion Matrix:\", confusion_matrix(y_test, y_pred))\n print('Logistic Regression Accuracy: ', log_reg.score(X, y))\n print(\"Precision:\", precision_score(y_test, y_pred))\n print(\"Recall:\", recall_score(y_test, y_pred))", "title": "" }, { "docid": "241d0edc63414f4fbc5aa0a8dbae67ae", "score": "0.708599", "text": "def logisticRegression(train_data, test_data, train_lbl, test_lbl):\n # default solver is incredibly slow thats why we change it\n logisticRegr = LogisticRegression(solver = 'lbfgs')\n #logisticRegr = LogisticRegression(solver = 'sag') ## much worse than lbfgs\n logisticRegr.fit(train_data, train_lbl.values.ravel())\n # Returns a NumPy Array\n # Predict for One Observation (image)\n predictions = logisticRegr.predict(test_data)\n accuracy = logisticRegr.score(test_data, test_lbl)\n print(\"Logistic Regression Accuracy: \\n\" + str(accuracy))\n\n tp, tn, fp, fn = precision_and_recall(test_lbl['isFraud'].tolist(), predictions.tolist())\n\n #print(\"precision = \" + str(precision)+ \" recall = \" + str(recall) + \" F1 = \" + str(F1) + \"\\n\")\n\n return accuracy, tp, tn, fp, fn", "title": "" }, { "docid": "8473404da8095d02533b640adadbc568", "score": "0.695118", "text": "def __init_logistic_regression_model(self) -> Pipeline:\n return Pipeline([\n ('counter', CountVectorizer(stop_words=get_stop_words('english'))),\n ('tfidf', TfidfTransformer()),\n ('classifier', SGDClassifier(loss='log'))\n ])", "title": "" }, { "docid": "21e70d2a2d8114f76e61ef8f92f84fd9", "score": "0.69051325", "text": "def train_model(self):\n \n self.predictor = LogisticRegression().fit(self.x_train, self.y_train)\n return", "title": "" }, { "docid": "8b35757be658456d0d9098452874f832", "score": "0.6878717", "text": "def log_regression(run_final):\n\t# do logistic regression\n\tx = logit(\"respcat ~ gain + loss\", run_final).fit()\n\n\t# check the summary\n\tprint(x.summary())\n\n\t#store the parameters of logistic regression\n\tlogit_pars = x.params\n\n\treturn logit_pars", "title": "" }, { "docid": "90a3cc7dc7d6157e66c600ce9b69bccd", "score": "0.68550605", "text": "def get_analytics(model,X_train,X_test,y_train,y_test):\n if y_train is not None:\n X_test = process(X_test)\n confusion_mx, report = analytic.get_analytics(model,X_train,X_test,y_train,y_test)\n wandb.log({\"SVM Accuracy Report\": wandb.Html( pd.DataFrame(report).to_html(), inject=False)})", "title": "" }, { "docid": "f841feeaaae78811b6cc02bdb277f2bd", "score": "0.6683134", "text": "def train(self):\n print \":: Baseline Model - Logistic Regression ::::\"\n\n \"\"\"Select all columns except last column (target)\"\"\"\n target_col = self.training_data.columns[-1]\n\n df_features_train = self.training_data[self.feature_cols]\n df_target_train = self.training_data[target_col]\n df_features_valid = self.validation_data[self.feature_cols]\n df_target_valid = self.validation_data[target_col]\n\n print \":::: Training model with default settings...\"\n self.model = LogisticRegression()\n self.model = self.model.fit(df_features_train, df_target_train)\n\n \"\"\"Check the accuracy on the validation set\"\"\"\n # lr_score = log_regr.score(df_features_valid, df_target_valid)\n # print \":::: Mean accuracy score: {0}\".format(lr_score)\n valid_predictions_proba = self.model.predict_proba(df_features_valid)\n loss = log_loss(df_target_valid, valid_predictions_proba)\n print \":::: Log loss: {0}\".format(loss)", "title": "" }, { "docid": "ee243b39d324539e6f13ea6bba7e0b66", "score": "0.66166437", "text": "def train(self):\n cols = self.get_dataset_col_names()\n\n X = self.multibranch_df[cols]\n y = self.multibranch_df[\"Y\"]\n\n self.model = LogisticRegression()\n self.model.fit(X, y)\n\n self.export_model()", "title": "" }, { "docid": "35687835d0e169dc9a9009e1254d7ada", "score": "0.65445095", "text": "def fit(self):\n X = self.feature_matrix_from_interactions(self.history)\n Y = np.array(self.history['outcome'].apply(lambda x: 1 if x else 0).values)\n\n Cs = [0.1, 1., 10.]\n def val_log_likelihood(C):\n \"\"\"\n Compute average log-likelihood of IRT model with a specific\n regularization constant on a validation set\n\n :param float C: Coefficient of L2 regularization term\n :rtype: float\n :return: Average log-likelihood on validation set\n \"\"\"\n train_idxes, val_idxes = cross_validation.train_test_split(\n np.arange(0, len(self.history), 1), train_size=0.7)\n model = LogisticRegression(penalty='l2', C=C)\n X_train = self.feature_matrix_from_interactions(self.history.ix[train_idxes])\n model.fit(X_train, Y[train_idxes])\n X_val = self.feature_matrix_from_interactions(self.history.ix[val_idxes])\n log_probas = model.predict_log_proba(X_val)\n idx_of_zero = 1 if model.classes_[1]==0 else 0\n return np.mean(log_probas[np.arange(0, len(val_idxes), 1), idx_of_zero ^ Y[val_idxes]])\n\n self.model = LogisticRegression(penalty='l2', C=(\n 1. if not self.select_regularization_constant else max(Cs, key=val_log_likelihood)))\n\n self.model.fit(X, Y)", "title": "" }, { "docid": "6433c10673c4128989afbf345de4e385", "score": "0.6525886", "text": "def train_logistic_regression(X_train, y_train, X_test_vecs, X_test_strs, y_test):\n\tlogistic_clf = LogisticRegression(solver='liblinear')\n\tlogistic_clf.fit(X_train, y_train)\n\tpredictions = predict(logistic_clf, X_test_vecs, X_test_strs)\n\treturn precision_recall_fscore_support(y_test, predictions, average='binary')", "title": "" }, { "docid": "458bc8eb3f0d7239c95cb0f36772f868", "score": "0.65018874", "text": "def init_logistic(training_path):\n from sklearn.linear_model import LogisticRegression\n training = pandas.DataFrame.from_csv(training_path)\n training = training.as_matrix()\n X = training[:, 0:5]\n Y = training[:,5]\n lr = LogisticRegression()\n lr.fit(X,Y)\n return lr", "title": "" }, { "docid": "e21e08f666f0ce6be1b4bbb4368f33ec", "score": "0.64846027", "text": "def logisticRegression(request):\n if request.method == \"POST\":\n request.session['test_experiment'] = request.POST.get('test_experiment', None)\n request.session['trained_model_gse'] = request.POST.get('trained_model', None)\n return redirect('/supervisedtest/')\n df = pd.read_json(request.session['dataframe'])\n feature_order = request.session['feature_order']\n gse = request.session['gse']\n X = df.iloc[:, :-1].values\n y = df.iloc[:, -1].values\n # To implement SVM, or other supervised models for that matter, one would only need to create a view\n # similar to this one, and replace the next line with the model needed from sklearn\n lr = LogisticRegression()\n lr.fit(X, y)\n\n rf = RandomForestClassifier(n_estimators=10000)\n rf.fit(X, y)\n if len(feature_order) < 10:\n important_features_indx = np.argpartition(rf.feature_importances_, -len(feature_order))[-len(feature_order):]\n else:\n important_features_indx = np.argpartition(rf.feature_importances_, -10)[-10:]\n\n important_features = []\n for indx in important_features_indx:\n important_features.append(feature_order[indx][0])\n\n gene_order = []\n gene_means = []\n for feature in feature_order:\n gene_order.append(feature[0])\n gene_means.append(feature[1])\n\n experiment = Experiment.objects.get(gse_id=gse)\n pc = request.session['pc']\n threshold = request.session['threshold']\n\n SupervisedModel.objects.get_or_create(experiment=experiment, model=pickle.dumps(lr), gene_order=gene_order, gene_means=gene_means, pc=pc, threshold=threshold)\n\n return render(request, 'supervised.html', {'important_features': important_features, 'experiment': Experiment.objects.all(), 'models': SupervisedModel.objects.all()})", "title": "" }, { "docid": "46408d367a29eba1cabd865739bc9e95", "score": "0.6428331", "text": "def logi_reg(data, dependent):\n target = pd.DataFrame(data[dependent], columns=[dependent])\n \n X = data.loc[:, data.columns != dependent]\n y = data.loc[:, data.columns == dependent]\n \n lm = linear_model.LogisticRegression()\n model = lm.fit(X,y)\n \n r_squared = lm.score(X,y) # returns R^2 value\n #coefs = lm.coef_ # returns coefficients\n #intercept = lm.intercept_ # returns intercept\n\n print(\"R^2 Value: \" + str(r_squared))\n #print(\"Equation: \" + str(coefs) + \" + \" + str(intercept))", "title": "" }, { "docid": "110f40d1674f38086c5017e3ec9dbc22", "score": "0.6345363", "text": "def logistic_regression(y, tx, w):\n loss = calculate_loss(y, tx, w)\n gradient = calculate_gradient(y, tx, w)\n hessian = calculate_hessian(y, tx, w)\n return loss, gradient, hessian", "title": "" }, { "docid": "1aaa3fa0354cc6b4844beb4a5bddabd9", "score": "0.63195527", "text": "def compute_log_reg(self):\n \n self.X = self.data.iloc[:,:-1].values\n self.X = sm.add_constant(self.X)\n self.y = self.data.iloc[:,-1]\n self.model = sm.Logit(self.y, self.X).fit(disp=False)", "title": "" }, { "docid": "f64ea33fb120799c618ea691db381f72", "score": "0.62881213", "text": "def logistic(self, x):\n pass", "title": "" }, { "docid": "e0f57730a9d8379112239b4f914a7ed4", "score": "0.62691945", "text": "def logisticRegressionTesting(request):\n if request.method == \"GET\":\n request.session['test_experiment'] = request.POST.get('test_experiment', None)\n request.session['trained_model_gse'] = request.POST.get('trained_model', None)\n return render(request, 'supervised.html', {'experiment': Experiment.objects.all(), 'models': SupervisedModel.objects.all()})\n test_experiment_gse = request.POST.get('test_experiment', None)\n trained_model_gse = request.POST.get('trained_model', None)\n\n test_experiment = Experiment.objects.get(gse_id=test_experiment_gse)\n train_experiment = Experiment.objects.get(gse_id=trained_model_gse)\n\n train_gene_format = train_experiment.gene_format\n test_gene_format = test_experiment.gene_format\n\n model = SupervisedModel.objects.get(experiment=train_experiment)\n train_gene_order = model.gene_order\n train_gene_means = model.gene_means\n\n if train_gene_format == 'names' and test_gene_format == 'ensembl':\n with open(os.path.join(BASE_DIR, \"supervisedLearning/helper/ensembl_to_name\"), 'rb') as f:\n ensembl_to_name = pickle.load(f)\n\n test_samples = Sample.objects.filter(experiment=test_experiment)\n test_genes = Gene.objects.filter(experiment__exact=Experiment.objects.get(gse_id=test_experiment_gse))\n\n test_gene_names = []\n test_gene_position = []\n for gene in test_genes:\n ensembl_id = gene.gene_name\n try:\n if '.' in ensembl_id:\n ensembl_id = ensembl_id.split('.')[0]\n gene_name = ensembl_to_name[ensembl_id]\n\n except KeyError:\n gene_name = None\n if gene_name:\n test_gene_names.append(gene_name)\n test_gene_position.append(gene.position)\n\n test_df_values = []\n indices = []\n for sample in tqdm(test_samples):\n indices.append(sample.sample_id)\n sample_counts = []\n gene_counts = sample.count\n for i, gene in enumerate(train_gene_order):\n try:\n gene_idx = test_gene_names.index(gene)\n pos = test_gene_position[gene_idx]\n count = gene_counts[pos]\n except Exception:\n count = train_gene_means[i]\n sample_counts.append(count)\n test_df_values.append(np.array(sample_counts))\n df = pd.DataFrame(np.array(test_df_values), columns=train_gene_order, index=indices)\n\n X = df.values\n lr = pickle.loads(model.model)\n y = lr.predict(X)\n\n samples = Sample.objects.filter(experiment=test_experiment)\n sample_to_geneCount = {}\n for sample in samples:\n sample_to_geneCount[sample.sample_id] = sample.count\n\n sample_to_geneCount_df = pd.DataFrame.from_dict(sample_to_geneCount, orient='index')\n new_cols = []\n allgenes = Gene.objects.filter(experiment=test_experiment).order_by('position')\n\n for gene in allgenes:\n new_cols.append(gene.gene_name)\n sample_to_geneCount_df.columns = new_cols\n\n dfValues = sample_to_geneCount_df.iloc[:, :].values\n\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(dfValues)\n principalDf = pd.DataFrame(data=principalComponents,\n columns=['principal component 1', 'principal component 2'],\n index=indices)\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlabel('PC1 {0:0.1f}%'''.format(pca.explained_variance_ratio_[0] * 100), fontsize=15)\n ax.set_ylabel('PC2 {0:0.1f}%'''.format(pca.explained_variance_ratio_[1] * 100), fontsize=15)\n yDict = {}\n for i, x in enumerate(indices):\n yDict[x] = y[i]\n selectedFeaturesdf = pd.DataFrame.from_dict(yDict, orient='index')\n selectedFeaturesdf.columns = ['subtype']\n finalDf = pd.concat([principalDf, selectedFeaturesdf], axis=1)\n targets = [0, 1]\n for target in targets:\n indicesToKeep = finalDf['subtype'] == target\n ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']\n , finalDf.loc[indicesToKeep, 'principal component 2']\n , s=50)\n ax.legend(targets)\n ax.set_xticks([0], minor=True)\n ax.xaxis.grid(True, which='minor', linestyle='--', linewidth=2)\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n buf.seek(0)\n string = base64.b64encode(buf.read())\n uri = 'data:image/png;base64,' + urllib.parse.quote(string)\n context = {'image': uri}\n return render(request, 'pcatest.html', context)\n\n elif train_gene_format == 'ensembl' and test_gene_format == 'names':\n with open(os.path.join(BASE_DIR, \"supervisedLearning/helper/name_to_ensembl\"), 'rb') as f:\n name_to_ensembl = pickle.load(f)\n\n test_samples = Sample.objects.filter(experiment=test_experiment)\n test_genes = Gene.objects.filter(experiment__exact=Experiment.objects.get(gse_id=test_experiment_gse))\n\n test_gene_names = []\n test_gene_position = []\n for gene in test_genes:\n ensembl_id = gene.gene_name\n if '.' in ensembl_id:\n ensembl_id = ensembl_id.split('.')[0]\n try:\n gene_name = name_to_ensembl[ensembl_id]\n except KeyError:\n gene_name = None\n if gene_name:\n test_gene_names.append(gene_name)\n test_gene_position.append(gene.position)\n\n test_df_values = []\n indices = []\n for sample in tqdm(test_samples):\n indices.append(sample.sample_id)\n sample_counts = []\n gene_counts = sample.count\n for i, gene in enumerate(train_gene_order):\n try:\n gene_idx = test_gene_names.index(gene)\n pos = test_gene_position[gene_idx]\n count = gene_counts[pos]\n except Exception:\n count = train_gene_means[i]\n sample_counts.append(count)\n test_df_values.append(np.array(sample_counts))\n df = pd.DataFrame(np.array(test_df_values), columns=train_gene_order, index=indices)\n\n X = df.values\n lr = pickle.loads(model.model)\n y = lr.predict(X)\n\n samples = Sample.objects.filter(experiment=test_experiment)\n sample_to_geneCount = {}\n for sample in samples:\n sample_to_geneCount[sample.sample_id] = sample.count\n\n sample_to_geneCount_df = pd.DataFrame.from_dict(sample_to_geneCount, orient='index')\n new_cols = []\n allgenes = Gene.objects.filter(experiment=test_experiment).order_by('position')\n\n for gene in allgenes:\n new_cols.append(gene.gene_name)\n sample_to_geneCount_df.columns = new_cols\n\n dfValues = sample_to_geneCount_df.iloc[:, :].values\n\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(dfValues)\n principalDf = pd.DataFrame(data=principalComponents,\n columns=['principal component 1', 'principal component 2'],\n index=indices)\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlabel('PC1 {0:0.1f}%'''.format(pca.explained_variance_ratio_[0] * 100), fontsize=15)\n ax.set_ylabel('PC2 {0:0.1f}%'''.format(pca.explained_variance_ratio_[1] * 100), fontsize=15)\n yDict = {}\n for i, x in enumerate(indices):\n yDict[x] = y[i]\n selectedFeaturesdf = pd.DataFrame.from_dict(yDict, orient='index')\n selectedFeaturesdf.columns = ['subtype']\n finalDf = pd.concat([principalDf, selectedFeaturesdf], axis=1)\n targets = [0, 1]\n for target in targets:\n indicesToKeep = finalDf['subtype'] == target\n ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']\n , finalDf.loc[indicesToKeep, 'principal component 2']\n , s=50)\n ax.legend(targets)\n ax.set_xticks([0], minor=True)\n ax.xaxis.grid(True, which='minor', linestyle='--', linewidth=2)\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n buf.seek(0)\n string = base64.b64encode(buf.read())\n uri = 'data:image/png;base64,' + urllib.parse.quote(string)\n context = {'image': uri}\n return render(request, 'pcatest.html', context)\n\n else:\n test_samples = Sample.objects.filter(experiment=test_experiment)\n test_genes = Gene.objects.filter(experiment__exact=Experiment.objects.get(gse_id=test_experiment_gse))\n\n test_gene_names = []\n test_gene_position = []\n for gene in test_genes:\n gene_name = gene.gene_name\n test_gene_names.append(gene_name)\n test_gene_position.append(gene.position)\n\n test_df_values = []\n indices = []\n for sample in tqdm(test_samples):\n indices.append(sample.sample_id)\n sample_counts = []\n gene_counts = sample.count\n for i, gene in enumerate(train_gene_order):\n try:\n gene_idx = test_gene_names.index(gene)\n pos = test_gene_position[gene_idx]\n count = gene_counts[pos]\n except Exception:\n count = train_gene_means[i]\n sample_counts.append(count)\n test_df_values.append(np.array(sample_counts))\n df = pd.DataFrame(np.array(test_df_values), columns=train_gene_order, index=indices)\n\n X = df.values\n lr = pickle.loads(model.model)\n y = lr.predict(X)\n\n samples = Sample.objects.filter(experiment=test_experiment)\n sample_to_geneCount = {}\n for sample in samples:\n sample_to_geneCount[sample.sample_id] = sample.count\n\n sample_to_geneCount_df = pd.DataFrame.from_dict(sample_to_geneCount, orient='index')\n new_cols = []\n allgenes = Gene.objects.filter(experiment=test_experiment).order_by('position')\n\n for gene in allgenes:\n new_cols.append(gene.gene_name)\n sample_to_geneCount_df.columns = new_cols\n\n dfValues = sample_to_geneCount_df.iloc[:, :].values\n\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(dfValues)\n principalDf = pd.DataFrame(data=principalComponents,\n columns=['principal component 1', 'principal component 2'],\n index=indices)\n fig = plt.figure(figsize=(8, 8))\n ax = fig.add_subplot(1, 1, 1)\n ax.set_xlabel('PC1 {0:0.1f}%'''.format(pca.explained_variance_ratio_[0] * 100), fontsize=15)\n ax.set_ylabel('PC2 {0:0.1f}%'''.format(pca.explained_variance_ratio_[1] * 100), fontsize=15)\n yDict = {}\n for i, x in enumerate(indices):\n yDict[x] = y[i]\n selectedFeaturesdf = pd.DataFrame.from_dict(yDict, orient='index')\n selectedFeaturesdf.columns = ['subtype']\n finalDf = pd.concat([principalDf, selectedFeaturesdf], axis=1)\n targets = [0, 1]\n for target in targets:\n indicesToKeep = finalDf['subtype'] == target\n ax.scatter(finalDf.loc[indicesToKeep, 'principal component 1']\n , finalDf.loc[indicesToKeep, 'principal component 2']\n , s=50)\n ax.legend(targets)\n ax.set_xticks([0], minor=True)\n ax.xaxis.grid(True, which='minor', linestyle='--', linewidth=2)\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n buf.seek(0)\n string = base64.b64encode(buf.read())\n uri = 'data:image/png;base64,' + urllib.parse.quote(string)\n context = {'image': uri}\n return render(request, 'pcatest.html', context)", "title": "" }, { "docid": "5dcb5d0b599d21411a2be57af101529a", "score": "0.6248001", "text": "def modeling(x_train, y_train, x_test, test):\n #LogisticRegression\n clf = LogisticRegression(penalty='l2', solver='liblinear', C=1000, max_iter=300)\n\n clf.fit(x_train, y_train)\n pred = clf.predict(x_test)\n pred_prob = clf.predict_proba(x_test)\n #Argmax for only one coach and one participant per transcript\n '''\n url_list = test.url.unique()\n pred_max_dev = np.array([])\n for url in url_list:\n url_index = test.index[test[\"url\"] == url].tolist()\n #print(url_index)\n prob = clf.predict_proba(x_test[url_index, :])[:, 1]\n tmp = np.where(prob == prob[prob.argmax()], 1, 0)\n pred_max_dev = np.concatenate([pred_max_dev, tmp])\n '''\n return pred, pred_prob", "title": "" }, { "docid": "f98026cf827c6a185a24d819d9ef89bf", "score": "0.6241261", "text": "def train_lr(X,y):\n lg = LogisticRegression(solver='lbfgs')\n lg.fit(X,y)\n\n return lg", "title": "" }, { "docid": "1e4a7b9bdf3a5182cb6fc1060320f275", "score": "0.6231127", "text": "def predict_LogisticRegressionWithSGD(iterations,step,regParam,regType):\n lrModel=LogisticRegressionWithSGD.train(scaledData, iterations=iterations,step=step,regParam=regParam, regType=regType)\n lrMetrics=scaledData.map(lambda p: (p.label, lrModel.predict(p.features)))\n lrAccuracy=lrMetrics.filter(lambda (actual,pred):actual==pred).count()*1.0/data.count()\n return lrAccuracy", "title": "" }, { "docid": "ff733bedb8668a4dd4ea5dcb771b7c52", "score": "0.62264955", "text": "def summary(self):\n\n if not hasattr(self, 'w'):\n print('LogisticModel has not been fit.')\n return(None)\n\n coef_labels = ['---------------','<Intercept>']+list(self.varnames[1:])\n estimates = ['---------------']+list(self.w)\n\n # This table will eventually contain more metrics\n table_dic = dict(zip(coef_labels, estimates))\n\n coef_str = ' + '.join(self.varnames[1:])+'\\n'\n\n print('\\n'+self.name+': logistic regression')\n print('\\n{} ~ {}'.format(self.varnames[0], coef_str))\n print('\\033[1m'+\"{:<15} {:<15}\".format('Coefficient','Estimate')+'\\033[0m')\n for k, v in sorted(table_dic.items()):\n label = v\n print(\"{:<15} {:<15}\".format(k, label))\n if not self.converged:\n print('\\nWarning: IRLS failed to converge. Try increasing the number of iterations.')\n else:\n print('\\nConverged in {} iterations (IRLS)'.format(self.converged_k))\n\n return(None)", "title": "" }, { "docid": "4526279d0f05dcadcd1ae43ddcfc8e20", "score": "0.62034017", "text": "def __init__(self):\n self.X = None # The feature vectors [shape = (m, n) => (n, m)]\n self.y = None # The regression outputs [shape = (m, 1)]\n self.W = None # The parameter vector `W` [shape = (n, 1)]\n self.bias = None\n self.lr = None # Learning Rate `alpha`\n self.m = None\n self.n = None\n self.epochs = None\n print('Logistic Regression initialized')", "title": "" }, { "docid": "3cce886712a8133ca5e0cc7fa2890731", "score": "0.61913556", "text": "def logistic(weights, data, targets, hyperparameters):\n y = logistic_predict(weights, data)\n\n #####################################################################\n # TODO: #\n # Given weights and data, return the averaged loss over all data #\n # points, gradient of parameters, and the probabilities given by #\n # logistic regression. #\n #####################################################################\n # Hint: hyperparameters will not be used here.\n N = y.size\n\n f, frac_correct = evaluate(targets, y)\n df = np.append(np.dot(data.T, y - targets) / N, [[np.sum(y - targets) / N]], axis=0)\n #####################################################################\n # END OF YOUR CODE #\n #####################################################################\n return f, df, y", "title": "" }, { "docid": "56583db9bbbc42143b79ef7f6973a37f", "score": "0.6150306", "text": "def test_part2():\n X,Y = get_exam_dataset()\n X,Y = normalize_data(X,Y)\n log_reg = LogisticRegression(cls=1)\n log_reg.fit(X,Y)\n accuracy = evaluate(log_reg, X, Y)\n plot_exam_data_decision_boundary(X,Y,log_reg, 1)\n assert(accuracy > 0.8)", "title": "" }, { "docid": "43a4d7d4fdf8d23d6f950f627840ecb4", "score": "0.6144465", "text": "def logistic_regression(x, y, logger=None):\r\n alpha=0.0001\r\n w = np.zeros(len(x[0]))\r\n\r\n def getGuess(inputs):\r\n # Get normal guess\r\n guess = np.dot(w.T, inputs)\r\n # Raise it to the e power\r\n guess = math.e**(-1 * guess)\r\n # Return the actual guess\r\n return 1 / (1 + guess)\r\n \r\n def getError():\r\n # Go through all points and figure out error\r\n error = 0\r\n for i, inputs in enumerate(x):\r\n if y[i] == 0:\r\n error += np.log(1 - getGuess(inputs))\r\n else:\r\n error += np.log(getGuess(inputs))\r\n\r\n return -1 * error\r\n \r\n def getDifference(index):\r\n difference = 0\r\n for inputs, output in zip(x, y):\r\n # Finds the difference per point \r\n difference = difference + ((getGuess(inputs) - output) * inputs[index])\r\n return difference\r\n \r\n pastError = 0\r\n currentError = -1\r\n j = 0\r\n # Making sure the algoritem goes past 500 iterations and then checks for convergence\r\n while currentError - pastError > 0.03 or j < 500:\r\n # Figure out new values of weights\r\n for i, _ in enumerate(w):\r\n w[i] = w[i] - alpha * getDifference(i)\r\n j = j + 1\r\n # Setup the comparison of the error\r\n pastError = currentError\r\n currentError = getError()\r\n\r\n return w", "title": "" }, { "docid": "6d6837aa8bc7f44fba6b0a0f1f4c46f9", "score": "0.6087256", "text": "def sklearn_trainer(\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> ClassifierMixin:\n clf = LogisticRegression(penalty=\"l1\", solver=\"saga\", tol=0.1)\n clf.fit(X_train.reshape((X_train.shape[0], -1)), y_train)\n return clf", "title": "" }, { "docid": "46f1cf738face578ad053c76a47ea2a7", "score": "0.605001", "text": "def lr(training_data, target_data):\n\n model = linear_model.LogisticRegression()\n model.fit(training_data, target_data)\n\n return model", "title": "" }, { "docid": "1b3c0b397a4aa39f9c80a67eeed8c9a7", "score": "0.6048946", "text": "def _binomial_wrapper(self):\n if self.verbose:\n print(\"Sparse logistic regression. \\n\")\n print(self.penalty.upper() + \"regularization via active set identification and coordinate descent. \\n\")\n levels = np.unique(self.y)\n if (levels.size != 2) or (1 not in levels) or (0 not in levels):\n raise RuntimeError(\"Response vector should contains 0s and 1s.\")\n return self._decor_cinterface(_PICASSO_LIB.SolveLogisticRegression)", "title": "" }, { "docid": "0ec87be5f97f8a64d32f462c89939a9f", "score": "0.60307384", "text": "def analyse_log():\n y_test = []\n y_predict = []\n for row in open(\"log/pred_true_log.csv\"):\n row = row.split(\",\")\n y_test.append(int(row[0]))\n y_predict.append(int(row[1]))\n n = len(y_test)\n label_names = get_label(DatasetEnum.MNIST_AZ)\n print(classification_report(y_test, y_predict, target_names=label_names))\n cm = confusion_matrix(y_test, y_predict)\n np.savetxt(\"log/confusion_matrix_log.csv\", cm, delimiter=\",\", fmt='%d')", "title": "" }, { "docid": "5ade64bef5cc0cc7af2e88856a4b129a", "score": "0.6027531", "text": "def stats():\n lambdas = np.logspace(-4, 5,10)\n model = LinReg(X, Y)\n\n\n models = []\n for regmethod in ['ols', 'ridge', 'lasso']:\n\n method = getattr(model, regmethod)\n\n for lamb in lambdas:\n\n model.lamb = lamb\n\n J = method(model.xTrain, model.yTrain)\n Ypred_train = model.xTrain @ J\n Ypred_test = model.xTest @ J\n\n mse_train = model.MSE(model.yTrain, Ypred_train)\n mse_test = model.MSE(model.yTest, Ypred_test)\n r2_train = model.R2(model.yTrain, Ypred_train)\n r2_test = model.R2(model.yTest, Ypred_test)\n\n models.append([regmethod, lamb, mse_train, mse_test,\\\n r2_train, r2_test])\n\n if regmethod == 'ols':\n break\n\n print(\"\\nMODEL ANALYSIS:\")\n print(\"=\"*85)\n print(\" Method | lambda | MSE Train | MSE Test | R2 Train | R2 Test |\")\n print(\"-\"*85)\n\n for i in range(len(models)):\n print(\"%8s|%8g|%11g|%10g|%10f|%10f|\" % tuple(models[i]))\n\n print(\"-\"*85)\n\n\n #r2s = np.array([models[i][4:] for i in range(len(models))])\n #plt.semilogx(lambdas, np.tile(r2s[0], (len(lambdas),1)))\n #plt.show()", "title": "" }, { "docid": "cc1b0f3719a563f9caa499daa65da62e", "score": "0.5982519", "text": "def log_model_search(self):\n self.compute_hyperparameter_ranges()\n none_model = [SGDClassifier(penalty='none', loss='log', class_weight={1: 0.07, 0: 1 - 0.07})]\n l2_model = [LogisticRegression(penalty='l2', C=x, class_weight={1: 0.07, 0: 1 - 0.07}) for x in self.c]\n l1_model = [LogisticRegression(penalty='l1', C=x, class_weight={1: 0.07, 0: 1 - 0.07}) for x in self.c]\n sgd_parameters = list(itertools.chain(itertools.product(self.alpha, self.l1_ratio)))\n en_model = [SGDClassifier(penalty='elasticnet', loss='log',\n alpha=alpha, l1_ratio=l1r, class_weight={1: 0.07, 0: 1 - 0.07}) for alpha, l1r in sgd_parameters]\n if self.model_type is 'none':\n self.models = none_model\n elif self.model_type is 'l1':\n self.models = l1_model\n elif self.model_type is 'l2':\n self.models = l2_model\n elif self.model_type is 'elasticnet':\n self.models = en_model\n elif self.model_type is None:\n self.models = none_model + l2_model + l1_model + en_model", "title": "" }, { "docid": "c09052e1dccff21b8b5fbaee00e4f198", "score": "0.59764916", "text": "def run_logistic_regression(pretrained=True, generate_submission_file=False) -> None:\n tx, y, tx_submission = load_data()\n tx_c = cap_outliers_fn(tx)\n datasets, outputs, _ = split_input_data(tx_c, y)\n\n for jet in range(num_jets):\n print('Training model for jet', jet)\n if pretrained:\n models[jet].w = load_weights_model(model_weights_filenames[jet])\n\n models[jet], mean, std, log_mean, log_std = train_model(datasets[jet], outputs[jet], models[jet],\n logistic_regression_fn, max_iters=300,\n batch_size=8192, gamma_decay=None, gamma=0.1,\n reg_lambda=1e-6, regularization='l2')\n else:\n models[jet] = Model()\n gammas = [0.3, 0.2, 0.2, 0.2]\n batch_sizes = [8192, 1024, 512, 128]\n max_iters = [8000, 15000, 14000, 30000]\n\n models[jet], mean, std, log_mean, log_std = train_model(datasets[jet], outputs[jet], models[jet],\n logistic_regression_fn,\n batch_size=batch_sizes[jet],\n max_iters=max_iters[jet],\n gamma=gammas[jet],\n reg_lambda=1e-6, regularization='l2')\n save_weights_model(models[jet], '../output_files/model_{}_logistic_regression.npy'.format(jet))\n\n means.append(mean)\n stds.append(std)\n log_means.append(log_mean)\n log_stds.append(log_std)\n print('Accuracy on whole training is', get_train_data_accuracy(tx, y))\n\n if generate_submission_file:\n create_submission('../output_files/output.csv', tx_submission)", "title": "" }, { "docid": "1d6cdf11f36b69cce5d0e5fccd0581ad", "score": "0.5970854", "text": "def do_main_function():\n args = options_parse_model()\n print('Loading data ...')\n dataset = load_csv(args, True)\n print('Preprocessing data ...')\n _, features, data = preprocessing(dataset)\n print('Features are: ', features[1:])\n lrc = MyLogisticRegressionClass()\n print('Model fitting ...')\n thetas = lrc.fit(data, features)\n np.save(args.thetas_file, thetas)\n success_message(\"Array of coefficients is saved to file \" + args.thetas_file + '.npy')\n print('Accuracy scoring ...')\n normal_message(\"Score = \" + str(lrc.score(data, features)))\n print('Done!')", "title": "" }, { "docid": "39e3fe1f44aee9c748da00f779e52cdb", "score": "0.59497696", "text": "def logisticRegressionBenchmark(self, target, features, test_size=0.2, eval_size=0.2):\n from sklearn.linear_model import LogisticRegression\n from sklearn.metrics import confusion_matrix, classification_report\n\n name1 = self.name1\n name2 = self.name2\n\n X_train1, X_val1, y_train1, y_val1, X_test1, y_test1 = self.dataPrep(target, features, test_size, eval_size, name1)\n X_train2, X_val2, y_train2, y_val2, X_test2, y_test2 = self.dataPrep(target, features, test_size, eval_size, name2)\n\n # Training models and calculating their accuracies\n clf1 = LogisticRegression().fit(X_train1, y_train1)\n s1 = clf1.score(X_test1, y_test1)\n\n clf2 = LogisticRegression().fit(X_train2, y_train2)\n s2 = clf2.score(X_test2, y_test2)\n\n # saving models in object\n self.LR_model1 = clf1\n self.LR_model2 = clf2\n self.score_clf1 = s1\n self.score_clf2 = s2\n\n # Evaulating and calculating confusion matrices\n predictions1 = clf1.predict(X_val1)\n predictions2 = clf2.predict(X_val2)\n conf_mat1 = confusion_matrix(y_true=y_val1, y_pred=predictions1)\n conf_mat2 = confusion_matrix(y_true=y_val2, y_pred=predictions2)\n\n self.plotVariableImportance(clf1, clf2, features, self.file_dir)\n self.plotConfusionMatrix(conf_mat1, conf_mat2, name1, name2, self.file_dir)\n\n return 0", "title": "" }, { "docid": "f0b946afe005d21b0d141125d4a45ff0", "score": "0.59356606", "text": "def logistic_regression_train(theta,X,Y):\n boolValue = False\n alpha = 0.1\n theta_temp = np.zeros((1,X.shape[1]))\n n = 0\n while n<=10000:\n Y_hat = hypothesis_function(X,theta)\n theta_temp = theta_temp - alpha*(1.0/X.shape[0])* np.dot((Y_hat-Y).T,X)\n cost = 1.0/float(2*Y.shape[0])*(Y_hat-Y).T.dot((Y_hat-Y))\n #boolValue = convergence(theta_temp.T,theta)\n theta = theta_temp.T\n if n%100==0:\n print('Cost Function Value = %2.5f \\t interation = %2.1f' %(cost,n))\n #alpha -=0.0001\n n+=1\n return theta", "title": "" }, { "docid": "8803cfb621be545801719cda85714e8c", "score": "0.591677", "text": "def logistic_regression(penalty, C, class_weight, warm_start, random_state, solver):\n logistic_regression_parameters = {\n 'penalty': penalty,\n 'C': C,\n 'class_weight': class_weight,\n 'warm_start': warm_start,\n 'random_state': random_state,\n 'solver': solver\n }\n # Defining model\n model = GridSearchCV(LogisticRegression(), logistic_regression_parameters, cv=5, verbose=1, n_jobs=-1)\n return model", "title": "" }, { "docid": "21b3719f253d087eb1babce65ed9b1d9", "score": "0.59145284", "text": "def linear_model_(housing_prepared, housing_labels):\n logging.info(\"Linear model.....\")\n lin_reg = LinearRegression()\n model1 = lin_reg.fit(housing_prepared, housing_labels)\n return model1", "title": "" }, { "docid": "e3834d3c2f1179b76ca5c6382b39400d", "score": "0.59082633", "text": "def logistic(x):\n pass\n return 1 / (1 + np.exp(-x))", "title": "" }, { "docid": "bc223194350cea5beaa4db7464d9545e", "score": "0.5896678", "text": "def create_classifier():\n\n # Logistic Regression\n return LogisticRegression(penalty='l2', max_iter=1000, C=1, random_state=42)", "title": "" }, { "docid": "88a11dc7d7ade60644cca64913b6322c", "score": "0.5890832", "text": "def predict_logit(self, x):\n self.model.train()\n with torch.no_grad():\n y_ = self.model(x)\n return y_", "title": "" }, { "docid": "d72ea1efc354fdb6f909afa0e011a896", "score": "0.58709085", "text": "def sandwish():\n\tdata = mx.sym.Variable(name = 'data')\n\tconv = mx.sym.Convolution(data = data, num_filter = 1, kernel = (3,3))\n\tlogis = mx.sym.LogisticRegressionOutput(data = conv, name = 'softmax')\n\treturn logis", "title": "" }, { "docid": "c9cfcf35e07b8004a17c590d6da6e9e5", "score": "0.58567363", "text": "def ids_logistic():\n\n\tdf = ids_load_df_from_csv (outdir, file)\n\tX_train, X_val, X_test, y_train, y_val, y_test = ids_split(df)\n\n\tscaler = StandardScaler()\n\tX_train = scaler.fit_transform(X_train)\n\t# print ('mean', X_train.mean(axis=0))\n\t\n\t# max_iter set to a large value to prevent LogisticRegression() from complaining that\n\t# it is not coverging\n\tlogreg = LogisticRegression(max_iter=10000)\n\tlogreg.fit(X_train, y_train)\n\n\tX_val = scaler.transform(X_val)\n\t# print ('mean', X_val.mean(axis=0))\n\n\ty_pred = logreg.predict(X_val)\n\t\n\tids_metrics(y_val, y_pred)", "title": "" }, { "docid": "d5f16fadb414ee319cab576fe28c4052", "score": "0.58396363", "text": "def _from_scala(tc, scala_model):\n return LogisticRegressionModel(tc, scala_model)", "title": "" }, { "docid": "16e4d97fed1bef7a4c374fc8a2a85f73", "score": "0.5824762", "text": "def create_linear_regression_model(self):\n\n model = LinearRegression()\n model.fit(self.X_train, self.y_train)\n score = model.score(self.X_test, self.y_test)\n print('Linear regression model:') \n print('score:\\t\\t{}'.format(score))", "title": "" }, { "docid": "d6df3d20e7683d64bea5ee3f3b7f4574", "score": "0.5820525", "text": "def predict(self, X):\n linear = self.base_regressor.predict(X)\n return np.array([self._logistic_function(a) for a in linear])", "title": "" }, { "docid": "954bdc0c104bc6d8e22225db61ac3be3", "score": "0.5815118", "text": "def train_model():\n df = pd.read_csv(\n '../Omega2020/data/dataset.csv').drop('Unnamed: 0',\n axis=1)\n df = df.drop(df[df.Level == 'TEST'].index)\n df['Tracker'] = df['Sudoku'].apply(lambda x: tracker(conv_values(x)))\n df[['Single', 'Candidate', 'Twins',\n 'Triples', 'Guess']] = pd.DataFrame(df['Tracker'].values.tolist(),\n index=df.index)\n target = ['Level']\n features = ['Single', 'Candidate', 'Twins', 'Triples', 'Guess']\n y = df[target]\n X = df[features]\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=0.2,\n random_state=42)\n model = LogisticRegression(solver='lbfgs',\n multi_class='auto',\n max_iter=1000)\n outfile = open('difficulty_level_model', 'wb')\n pickle.dump(model.fit(X_train, y_train.values.ravel()), outfile)\n outfile.close()", "title": "" }, { "docid": "900eaa63631a1cc34305469c8d1fba41", "score": "0.58008456", "text": "def train_model(df, method = None, save_model = None, **kwargs):\n #Assum method defined, in this case, logistic regression\n assert method in methods.keys()\n \n #get predictors from get_features method\n if \"get_features\" in kwargs:\n X = get_features(df, **kwargs[\"get_features\"])\n else:\n X = df\n \n X = normalize_features(X)\n\n #get predicted value from get_target method\n if \"get_target\" in kwargs:\n y = get_target(df, **kwargs[\"get_target\"])\n df = df.drop(labels = [kwargs[\"get_target\"][\"target\"]],axis = 1)\n else:\n y = None\n\n #Split train set and test set\n\n X,y = train_test_split(X, y, **kwargs[\"train_test_split\"])\n\n #Specified the method. In this case, logistic regression.\n model = methods[method]()\n\n #Fit model\n\n model.fit(X[\"train\"], y[\"train\"])\n\n #Save model if specified\n if save_model is not None:\n with open(save_model, \"wb\" ) as f: #write and binary\n pickle.dump(model,f)\n logger.info(\"Trained model save to %s\", save_model)\n return model", "title": "" }, { "docid": "da8a8c863ae88b87c601fa8033f4b983", "score": "0.57865244", "text": "def model(self):\n\n self.model = sm.OLS(self.y, sm.add_constant(self.X)).fit()\n\n # create df from X, y for easier plot handling\n self.df = pd.concat([self.X, self.y], axis=1)\n\n # Getting miscallaneous properties\n self.modelted_y = self.model.fittedvalues\n self.model_residuals = self.model.resid\n self.model_norm_residuals = self.model.get_influence().resid_studentized_internal\n self.model_norm_residuals_abs_sqrt = np.sqrt(\n np.abs(self.model_norm_residuals))\n self.model_abs_resid = np.abs(self.model_residuals)\n self.model_leverage = self.model.get_influence().hat_matrix_diag\n self.model_cooks = self.model.get_influence().cooks_distance[0]\n\n print(self.model.summary())", "title": "" }, { "docid": "cb2f50d88f7dc5ddd8e8e81cc6534aec", "score": "0.5767734", "text": "def outcomeAnalysis(cytomod_obj, patient_data,\n analyzeModules=True,\n outcomeVars=[],\n adjustmentVars=[],\n standardize=True,\n logistic=True):\n\n modStr = 'Module' if analyzeModules else 'Analyte'\n resL = []\n standardizeFunc = lambda col: (col - np.nanmean(col)) / np.nanstd(col)\n for outcome in outcomeVars:\n \"\"\"Logistic regression on outcome\"\"\"\n if analyzeModules:\n dataDf = cytomod_obj.modDf\n else:\n dataDf = cytomod_obj.cyDf\n if standardize: # standardize cytokine values\n dataDf = dataDf.apply(standardizeFunc)\n\n predictors = dataDf.columns\n data_outcome_Df = patient_data[outcomeVars + adjustmentVars].join(dataDf).copy() # why outcomeVars and not outcome? make sure we can change this\n\n if standardize:\n if not logistic: # if continuous outcomes, standardize to normal distribution Z\n data_outcome_Df[[outcome]] = data_outcome_Df[[outcome]].apply(standardizeFunc)\n\n if adjustmentVars != []: # if there are any covariates\n for covariate in adjustmentVars: # standardize each to normal distribution Z\n if len(data_outcome_Df[covariate].unique()) > 2: # but only if the covariate is not binary\n data_outcome_Df[[covariate]] = data_outcome_Df[[covariate]].apply(standardizeFunc)\n\n tmpres = GLMResults(data_outcome_Df, outcome, predictors, adj=adjustmentVars, logistic=logistic)\n tmpres['Outcome'] = outcome\n tmpres['Compartment'] = cytomod_obj.sampleStr\n tmpres['Adjusted'] = 'Yes' if cytomod_obj.adjusted else 'No'\n tmpres['Fold-diff'] = np.exp(tmpres['Diff'])\n tmpres[modStr] = predictors\n resL.append(tmpres)\n\n resDf = pd.concat(resL, axis=0, ignore_index=True)\n return resDf", "title": "" }, { "docid": "bd575a521d01dd006833786619ee57f7", "score": "0.5763915", "text": "def sample_report():\n\n def _get_data(df, scoring=True, return_test_df=False):\n x_train, x_test, y_train, y_test = prep_data(df, dv_name=\"DV\")\n model = LogisticRegression(solver=\"lbfgs\", max_iter=1000)\n lr = model.fit(x_train, y_train)\n yhat_test = lr.predict_proba(x_test)\n yhat_train = lr.predict_proba(x_train)\n print(\"x_train.shape\", x_train.shape)\n report = ClassificationEvaluation(\n model, x_train, y_train, x_test, y_test, yhat_train, yhat_test\n )\n\n return_val = [report, model, x_train, y_train]\n if return_test_df:\n return_val += [x_test, y_test]\n return return_val\n\n return _get_data", "title": "" }, { "docid": "aa8e5b07ccf4d7bb18799db6c3cd0cb1", "score": "0.575224", "text": "def classifier(df):\n y = df.pop('label')\n X = df.values\n X_train, X_test, y_train, y_test = (\n train_test_split(X, y, test_size=0.33, random_state=42)\n )\n gbc = GradientBoostingClassifier(n_estimators=200, learning_rate=0.1, max_features=\"auto\")\n logistic = LogisticRegression(n_jobs=-1)\n mod4 = gbc.fit(X_train, y_train)\n mod3 = logistic.fit(X_train, y_train)", "title": "" }, { "docid": "49457835d970bfdaf5bdd5e098438de9", "score": "0.57473004", "text": "def __default_param_space_LogisticRegression(self):\n param_space = {\n 'predictor__learning_rate': [0.0, 1.0],\n 'predictor__lambda': [0.0, 1.0]\n }\n \n # -----------------------------------------------------------\n \n return param_space", "title": "" }, { "docid": "2abeca9c0fbd65f7041a6559dfd5b7b2", "score": "0.5737116", "text": "def predict_logits(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "e1afe761222b8620b6df61af8e792aa9", "score": "0.5729294", "text": "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n\n report = classification_report(y_pred, Y_test, target_names= category_names, output_dict=True)\n\n print(report)\n\n\n return report", "title": "" }, { "docid": "50aad8d9fc849fe0b5faa6a296198839", "score": "0.57261896", "text": "def test_l1logistic_binary():\n n_inf = 10\n X, y, w, b = make_classification(n_samples=200,\n random_state=6,\n n_informative=n_inf,\n n_features=20,\n w_scale=4.,\n include_intercept=True)\n\n l1log = UoI_L1Logistic(random_state=10).fit(X, y)\n assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8", "title": "" }, { "docid": "d07f7fbc853e10c8e7980153015cb32f", "score": "0.5717517", "text": "def __init__( self, params=None ):\n # From http://aimotion.blogspot.com/2011/11/machine-learning-with-python-logistic.html\n self.summaries = {}", "title": "" }, { "docid": "f9113571def9464f68f4789f42fc53ff", "score": "0.5705211", "text": "def main1():\n loader = TRACEEligibleLoader()\n securities = loader.load(pickle_name=\"fromTRACEELIGIBLE.p\")\n print(securities.columns)\n\n # We need to exclude Next Call Date, WAC, and Current WAL since they give prepayment information\n X = securities.drop(['Is Mortgage Paid Off', \"Next Call Date\", \"WAC\", \"Current WAL\", \"Amt Out\"], axis=1)\n \n y = securities['Is Mortgage Paid Off'].values.reshape(-1,1)\n\n\n transformer=TRACETransformer(categoricalColumns=[\"BBG Composite\", \"Day Count\", \"Category\", \"isCallable\"], dateColumns=[\"Issue Date\", \"Maturity\"], \n labelColumns=[\"CUSIP\", \"Security Name\", \"Ticker\"])\n X=transformer.fit_transform(X)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=20)\n\n logistic_base = LogisticRegression()\n log_params = {'penalty': ['l2', 'l1', 'elasticnet', 'none'], 'C' : [0.1, 1, 10, 100, 1000]}\n log_search = RandomizedSearchCV(logistic_base, log_params, n_iter=200, cv=3, verbose=2, n_jobs=-1)\n\n svm_base = SVC()\n svm_params = {'C': [0.1, 1, 10, 100, 1000], 'kernel' : ['rbf', 'linear', 'sigmoid']}\n svm_search = RandomizedSearchCV(svm_base, svm_params, n_iter=200, cv=3, verbose=2, n_jobs=-1)\n\n kNN_base = KNeighborsClassifier()\n kNN_params = {'n_neighbors' : [i for i in range(2, 50, 5)], \n 'leaf_size' : [i for i in range(30, 60, 5)]}\n kNN_search = RandomizedSearchCV(kNN_base, kNN_params, n_iter=200, cv=3, verbose=2, n_jobs=-1)\n\n decision_tree_base = DecisionTreeClassifier()\n decision_tree_params = {'criterion' : ['gini', 'entropy'], 'max_depth' : [i for i in range(5, 50, 5)]}\n decision_tree_search = RandomizedSearchCV(decision_tree_base, decision_tree_params, n_iter=200, cv=3, verbose=2, n_jobs=-1)\n\n log_search.fit(X_train, y_train.ravel())\n svm_search.fit(X_train, y_train.ravel())\n kNN_search.fit(X_train, y_train.ravel())\n decision_tree_search.fit(X_train, y_train.ravel())\n\n sys.stdout = open(\"Classification Results.txt\", \"w\") \n\n \n for j, i in [(logistic_base, log_search), (svm_base, svm_search), (kNN_base, kNN_search), (decision_tree_base, decision_tree_search)]:\n j.set_params(**i.best_params_)\n j.fit(X_train, y_train.ravel())\n evaluation(j, X_train, y_train, X_test, y_test)\n \n sys.stdout.close()", "title": "" }, { "docid": "987e796669afbd9e4c6dd5bc0ff68c9a", "score": "0.56974906", "text": "def run_analytics(self):\n print()\n print(\"CLASSIFIER ANALYSIS: \")\n print()\n self.calculate_precision()\n print()\n self.calculate_recall()\n print()\n self.calculate_fbeta_score()\n print()\n print(\"=== Accuracy ===\")\n print(\"Model Accuracy:\", self.calculate_accuracy())", "title": "" }, { "docid": "301424cb84aaf0a7474d5bc7b36ac046", "score": "0.56926787", "text": "def __init__(self):\n self.label = \"Logistic Regression Train\"\n self.description = 'Trains a classifier using Logistic regression'\n self.canRunInBackground = False\n self.category = \"Modelling\"", "title": "" }, { "docid": "47b0c5677f19d5cc1d303d02c35e9822", "score": "0.5687706", "text": "def logistic_regression_clf(base_data_folder=\"data/processed/pwgc/\",\n alg_name=\"\"):\n X, y, z = load_subject_Xyz(base_data_folder)\n le = LabelEncoder()\n y = le.fit_transform(y)\n\n X_train, y_train, X_valid, y_valid = stratified_split(X, y, test_size=0.2)\n\n clf = Pipeline(\n steps=[(\"kern\", Nystroem(kernel=\"poly\")),\n (\"qt\", QuantileTransformer(output_distribution=\"normal\")),\n (\"logistic\", LogisticRegression(dual=False, solver=\"lbfgs\",\n class_weight=\"balanced\",\n multi_class=\"multinomial\",\n tol=1e-3,\n max_iter=500, warm_start=True))]\n )\n param_grid = {\"kern__gamma\": np.linspace(0.2, 1.5, 20),\n \"kern__degree\": [3, 4, 5],\n \"kern__coef0\": [0.5, 1.0, 1.5],\n \"kern__n_components\": [30, 40, 50, 60],\n \"logistic__C\": np.logspace(-1, 1, 20),\n \"logistic__penalty\": [\"l2\"]}\n\n cv = RandomizedSearchCV(estimator=clf, param_distributions=param_grid,\n n_iter=75, n_jobs=3, cv=3, refit=True)\n cv.fit(X_train, y_train)\n\n y_hat_valid = cv.predict(X_valid)\n n_subjects = len(np.unique(y_valid))\n C = np.zeros((n_subjects, n_subjects))\n for label, pred in zip(y_valid, y_hat_valid):\n C[label, pred] += 1\n C = C / np.sum(C, axis=1)[:, None]\n plt.imshow(C)\n plt.colorbar()\n plt.title(\"Multiclass Confusion Matrix for Algorithm {} ($MCC = {:+.3f}$)\"\n \"\".format(alg_name.replace(\"_\", \"\\\\_\"),\n matthews_corrcoef(y_hat_valid, y_valid)))\n plt.xlabel(\"Predicted Label\")\n plt.ylabel(\"True Label\")\n saved_fig_name = (\"logistic_regression_{}_confusion_matrix.pdf\"\n \"\".format(alg_name))\n plt.savefig(\n FIGURE_DIR + saved_fig_name, bbox_inches=\"tight\", pad_inches=0)\n plt.savefig(FIGURE_DIR + saved_fig_name, bbox_inches=\"tight\", pad_inches=0)\n plt.show()\n return", "title": "" }, { "docid": "4a597a750e1d28e280c45f74dbb8047c", "score": "0.5687336", "text": "def supervised_liver(\n x: DataFrame,\n y: DataFrame,\n attributes: tp.Sequence[str] = [\"ALT\"],\n data_type: str = \"NMR\",\n log: bool = False,\n suffix: str = \"\",\n plot_all: bool = True,\n overwrite: bool = False,\n) -> None:\n from statsmodels.stats.outliers_influence import variance_inflation_factor\n import statsmodels.formula.api as smf\n\n output_dir = (results_dir / f\"supervised_{data_type}{suffix}\").mkdir()\n\n if log:\n x = np.log1p(x)\n\n # # convert ordinal categories to numeric\n for attr in [\"WHO_score_sample\", \"WHO_score_patient\"]:\n y[attr] = y[attr].cat.codes.astype(float).replace(-1, np.nan)\n for attr in attributes:\n if y[attr].dtype.name == \"Int64\":\n y[attr] = y[attr].astype(float)\n cats = list(filter(lambda w: ~is_numeric(y[w]), attributes))\n nums = list(filter(lambda w: is_numeric(y[w]), attributes))\n\n # Use also a MLM and compare to GLM\n stats_f = output_dir / \"supervised.joint_model.model_fits.csv\"\n\n attrs = [\"age\", \"race\", \"bmi\", \"WHO_score_sample\"]\n model_str = \"{} ~ \" + \" + \".join(attrs)\n\n repl_ = {\"/\": \"__\", \"+\": \"__plus__\", \"-\": \"__minus__\", \"(\": \"__op__\", \")\": \"__cl__\"}\n if data_type == \"flow_cytometry\":\n for k, v in repl_.items():\n x.columns = x.columns.str.replace(k, v, regex=False)\n\n # Assess colinearity\n d = y[attrs].dropna().assign(Intercept=1)\n for attr in attrs:\n if d[attr].dtype.name == \"category\":\n d[attr] = d[attr].cat.codes\n var = pd.Series(\n [variance_inflation_factor(d.values, i) for i in range(d.shape[1])], index=d.columns\n )\n assert (var.drop(\"Intercept\") < 5).all()\n var.to_csv(output_dir / \"supervised.variance_inflation_factor.csv\")\n\n if not stats_f.exists() or overwrite:\n data = z_score(x).join(y[attrs + [\"patient_code\"]]).dropna(subset=attrs)\n\n # # GLM\n _res_glm = list()\n for feat in tqdm(x.columns, desc=\"feature\", position=1):\n mdf = smf.glm(model_str.format(feat), data).fit()\n res = (\n mdf.params.to_frame(\"coefs\")\n .join(mdf.conf_int().rename(columns={0: \"ci_l\", 1: \"ci_u\"}))\n .join(mdf.pvalues.rename(\"pvalues\"))\n .assign(feature=feat)\n )\n # res = res.loc[res.index.str.contains(attr)]\n _res_glm.append(res)\n res_glm = pd.concat(_res_glm)\n res_glm[\"qvalues\"] = pg.multicomp(res_glm[\"pvalues\"].values, method=\"fdr_bh\")[1]\n\n # # # Mixed effect\n _res_mlm = list()\n for feat in tqdm(x.columns, desc=\"feature\", position=2):\n mdf = smf.mixedlm(model_str.format(feat), data, groups=data[\"patient_code\"]).fit()\n res = (\n mdf.params.to_frame(\"coefs\")\n .join(mdf.pvalues.rename(\"pvalues\"))\n .join(mdf.conf_int().rename(columns={0: \"ci_l\", 1: \"ci_u\"}))\n .assign(feature=feat)\n )\n # res = res.loc[res.index.str.contains(attr)]\n _res_mlm.append(res)\n res_mlm = pd.concat(_res_mlm)\n res_mlm[\"qvalues\"] = pg.multicomp(res_mlm[\"pvalues\"].values, method=\"fdr_bh\")[1]\n\n res = pd.concat([res_mlm.assign(model=\"mlm\"), res_glm.assign(model=\"glm\")])\n res = res.rename_axis(index=\"contrast\")\n if data_type == \"flow_cytometry\":\n for k, v in list(repl_.items())[::-1]:\n res[\"feature\"] = res[\"feature\"].str.replace(v, k, regex=False)\n res[\"feature\"] = res[\"feature\"].str.replace(\"/_\", \"_/\", regex=False)\n res.to_csv(stats_f)\n\n if data_type == \"flow_cytometry\":\n for k, v in list(repl_.items())[::-1]:\n x.columns = x.columns.str.replace(v, k, regex=False)\n x.columns = x.columns.str.replace(\"/_\", \"_/\", regex=False)\n\n # Plot\n res = pd.read_csv(stats_f, index_col=0).drop([\"Intercept\", \"Group Var\"]).query(\"model == 'mlm'\")\n res.index = res.index.str.replace(r\"\\[.*\", \"\")\n p = (res[\"coefs\"].abs()).groupby(level=0).sum()\n p = (res[\"qvalues\"] < 0.05).groupby(level=0).sum()\n fig, ax = plt.subplots(figsize=(3, 3))\n sns.barplot(x=p, y=p.index, color=sns.color_palette()[0])\n for i, (idx, c) in enumerate(p.iteritems()):\n ax.text(c, i, s=c, va=\"center\")\n ax.set(\n xlabel=\"Number of significant features\", ylabel=\"Factors\", xlim=(0, p.max() + p.max() * 0.1)\n )\n fig.savefig(output_dir / f\"supervised.joint_model.number_significant.svg\", bbox_inches=\"tight\")\n\n attribute = \"WHO_score_sample\"\n res = res.loc[attribute]\n assert np.allclose((res[\"ci_l\"] - res[\"coefs\"]).abs(), (res[\"ci_u\"] - res[\"coefs\"]).abs())\n res[\"ci\"] = res[\"coefs\"] - res[\"ci_l\"]\n\n output_prefix = output_dir / f\"supervised.joint_model.{attribute}.\"\n\n res_glm = res.query(\"model == 'glm'\").set_index(\"feature\")\n res_mlm = res.query(\"model == 'mlm'\").set_index(\"feature\")\n\n cglm = res_glm[\"coefs\"].rename(\"GLM\")\n cmlm = res_mlm[\"coefs\"].rename(\"MLM\")\n c = cglm.to_frame().join(cmlm)\n pglm = res_glm[\"pvalues\"].rename(\"GLM\")\n pmlm = res_mlm[\"pvalues\"].rename(\"MLM\")\n p = pglm.to_frame().join(pmlm)\n q = p.copy()\n for col in q:\n q[col] = pg.multicomp(q[col].values, method=\"fdr_bh\")[1]\n\n # Compare models\n v = c.abs().max()\n v += v * 0.1\n fig, ax = plt.subplots(1, 1, figsize=(4, 4))\n ax.plot((-v, v), (-v, v), linestyle=\"--\", color=\"grey\", zorder=-2)\n ax.scatter(\n c[\"GLM\"],\n c[\"MLM\"],\n c=p.mean(1),\n cmap=\"Reds_r\",\n vmin=0,\n vmax=1.5,\n s=5,\n alpha=0.5,\n )\n _s = pg.corr(c[\"GLM\"], c[\"MLM\"]).squeeze()\n ax.set(\n title=attribute + f\"; r = {_s['r']:.3f}\",\n xlabel=r\"$\\beta$ (GLM)\",\n ylabel=r\"$\\beta$ (MLM)\",\n )\n fig.savefig(\n output_prefix + \"General_vs_Mixed_effect_model_comparison.scatter.svg\",\n **figkws,\n )\n\n # Plot all variables as rank vs change plot\n # # check error is symmetric\n var = get_feature_annotations(x, data_type=data_type).reindex(c.index)\n var[\"unit\"] = \"%\"\n cat = var[\"group\"].astype(pd.CategoricalDtype())\n cmap = sns.color_palette(\"tab20\")\n score = (-np.log10(res_mlm[\"qvalues\"])) * (res_mlm[\"coefs\"] > 0).astype(int).replace(0, -1)\n ci = res_mlm[\"ci\"].rename(\"MLM\")\n qmlm = res_mlm[\"qvalues\"]\n n_top = 10\n\n fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(6, 3 * 2))\n for ax in fig.axes:\n ax.axhline(0, linestyle=\"--\", color=\"grey\")\n feats = (\n score.sort_values().head(n_top).index.tolist()\n + score.sort_values().tail(n_top).index.tolist()\n )\n for ax, crit, text in zip(fig.axes, [ci.index, feats], [False, True]):\n rank = score.loc[crit].rank()\n for i, group in enumerate(cat.unique()):\n sel = (cat == group) & cat.index.to_series().rename(\"group\").isin(rank.index)\n # ax.errorbar(\n # rank.loc[sel],\n # score.loc[sel],\n # fmt=\"o\",\n # yerr=ci.loc[sel],\n # color=cmap[i],\n # alpha=0.2,\n # )\n f = sel[sel].index\n ax.scatter(\n rank.loc[f],\n score.loc[f],\n color=cmap[i],\n s=10 + 2.5 ** -np.log10(qmlm.loc[f]),\n alpha=0.5,\n label=group,\n )\n if text:\n for idx in rank.loc[f].index:\n ax.text(rank.loc[idx], score.loc[idx], s=idx, rotation=90, ha=\"center\")\n v = (score.abs() + res_mlm[\"ci\"]).max()\n v += v * 0.1\n ax.set(\n title=attribute,\n xlabel=r\"Metabolites (ranked)\",\n ylabel=r\"Change with COVID-19 severity (signed -log10(p-value))\",\n ylim=(-v, v),\n )\n ax0.legend(loc=\"upper left\", bbox_to_anchor=(1, 1))\n from matplotlib.lines import Line2D\n\n _p = qmlm.min()\n s0 = Line2D([0], [0], marker=\"o\", label=\"1.0 (max)\", markersize=np.sqrt(10 + 1))\n s1 = Line2D(\n [0],\n [0],\n marker=\"o\",\n label=f\"{_p:.3e} (min)\",\n markersize=np.sqrt(10 + 2.5 ** -np.log10(_p)),\n )\n ax1.legend(handles=[s0, s1], title=\"FDR\", loc=\"upper left\", bbox_to_anchor=(1, 0))\n ax1.axvline((ax1.get_xlim()[1] - ax1.get_xlim()[0]) / 2, linestyle=\"--\", color=\"grey\")\n ax0.axvline(n_top, linestyle=\"--\", color=\"grey\")\n ax0.axvline(score.shape[0] - n_top, linestyle=\"--\", color=\"grey\")\n fig.savefig(\n output_prefix + \"rank_vs_change.scatter.svg\",\n **figkws,\n )", "title": "" }, { "docid": "0fd7e75f53d4a6085ab6652fecf75a92", "score": "0.5685907", "text": "def logistic_regressor_model_fn(features, labels, mode, params):\n thresholds = params.get('thresholds') or [.5]\n logits = layers.linear(\n parse_tensor_or_dict(features),\n 1,\n weights_initializer=init_ops.zeros_initializer(),\n # Intentionally uses really awful initial values so that\n # AUC/precision/recall/etc will change meaningfully even on a toy\n # dataset.\n biases_initializer=init_ops.constant_initializer(-10.0))\n predictions = math_ops.sigmoid(logits)\n loss = loss_ops.sigmoid_cross_entropy(logits, labels)\n train_op = optimizers.optimize_loss(\n loss, variables.get_global_step(), optimizer='Adagrad',\n learning_rate=0.1)\n\n if mode == model_fn_lib.ModeKeys.EVAL:\n eval_metric_ops = _make_logistic_eval_metric_ops(\n labels=labels,\n predictions=predictions,\n thresholds=thresholds)\n else:\n eval_metric_ops = None\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n output_alternatives={\n 'head': (constants.ProblemType.LOGISTIC_REGRESSION, {\n 'predictions': predictions\n })\n })", "title": "" }, { "docid": "fbd8cda5bb60d85094f5af6b49391090", "score": "0.5676965", "text": "def predict(model, X_test):", "title": "" }, { "docid": "1936ae0d2886b891a58752936c4a854a", "score": "0.56710136", "text": "def logistic(x):\n return np.exp(x)/(1+np.exp(x))", "title": "" }, { "docid": "86d04703f9b453e14a52c2e052fd18c3", "score": "0.5668811", "text": "def test_l1logistic_multiclass():\n n_features = 20\n n_inf = 10\n X, y, w, b = make_classification(n_samples=200,\n random_state=10,\n n_classes=5,\n n_informative=n_inf,\n n_features=n_features,\n shared_support=True,\n w_scale=4.)\n l1log = UoI_L1Logistic().fit(X, y)\n assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8", "title": "" }, { "docid": "75ce7b8b96991c090366ecd45a224aff", "score": "0.56657535", "text": "def train(model_name, basepath):\n clf_fpath = os.path.join(basepath, model_name, model_name+u\".clf\")\n imap_fpath = os.path.join(basepath, model_name, u\"intent_map.json\")\n\n intent_map = get_intent_map(imap_fpath)\n data = get_labeled_data(basepath)\n targets = map(str, sorted(intent_map.values()))\n\n all_x = map(lambda t: t[0], data)\n all_y = map(lambda t: t[1], data)\n\n x_vectors = vectorization_x(all_x)\n y_vectors = vectorization_y(all_y, intent_map)\n\n clf = LogisticRegression(class_weight=u\"balanced\", C=0.5)\n\n x_train, x_test, y_train, y_test = train_test_split(x_vectors, y_vectors, test_size=0.0, random_state=42)\n clf_model = clf.fit(x_train, y_train)\n predictions = clf_model.predict(x_train)\n cr = classification_report(y_train, predictions, target_names=targets)\n _ = joblib.dump(clf_model, clf_fpath, compress=9)\n return cr", "title": "" }, { "docid": "e7b802c58ea727981f646c15c4590a7a", "score": "0.56595856", "text": "def logistic(weights, data, targets, hyperparameters):\n\n # TODO: Finish this function\n N, M = data.shape\n data_with_bias = np.c_[data, np.ones(N)]\n lin_res = data_with_bias.dot(weights)\n f = targets.T.dot(np.log(1. + np.exp(-lin_res))) + \\\n (1 - targets).T.dot(np.log(1. + np.exp(lin_res)))\n y = sigmoid(lin_res)\n df = np.sum((targets * (y - 1.) + (1 - targets) * y) * \\\n data_with_bias, axis=0).reshape((M + 1, 1))\n \n return f, df, y", "title": "" }, { "docid": "7fb8d33f9b6164cdf6f220c257d8f96d", "score": "0.56508255", "text": "def evaluate_model(model):\n # Print classification report\n y_pred = model.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=y_test.columns))", "title": "" }, { "docid": "9c05795c4b34f8b129b0ec3f57392809", "score": "0.56417185", "text": "def fit(self, X, y):\n\n\n # Check that X and y have correct shape\n X, y = check_X_y(X, y)\n # Store the classes seen during fit\n self.classes_ = unique_labels(y)\n\n self.X_ = X\n self.y_ = y\n\n # Setup LogisticRegression and call fit()\n self._logit = LogisticRegression(max_iter=self.max_iter, solver=self.solver, random_state=self.random_state)\n self._logit.fit(self.X_, self.y_)\n\n # Setup ThresholdBinarizer, fit() store y_true\n self._binarizer = ThresholdBinarizer()\n self._binarizer.fit(y.reshape(-1, 1))\n\n # Return the classifier\n return self", "title": "" }, { "docid": "b2af11d72938fbf8a2a93fc09d8b69b2", "score": "0.561963", "text": "def predict(self, X):\r\n\r\n # Check arrays\r\n self.X = np.asarray(X)\r\n \r\n # Concatenate ones to the testing set\r\n x_ones = np.ones((self.X.shape[0], 1))\r\n self.X = np.concatenate((x_ones, self.X), axis=1)\r\n \r\n # Binary Logistic Regression\r\n if self.classes == 2:\r\n label = []\r\n \r\n # Iterate through each testing sample\r\n for x in self.X:\r\n \r\n # Calculate the probability using the hypothesis function \r\n tmp = sigmoid(np.dot(x, self.theta))\r\n \r\n # If the probability is greater than 0.5 threshold, assign it the label of class 1\r\n if tmp >= 0.50:\r\n label.append(1)\r\n \r\n # Else assign it the label of class 0\r\n else:\r\n label.append(0)\r\n \r\n return np.array(label)\r\n \r\n # If the number of classes is greater than 2 and one-versus-rest classification\r\n elif (self.classes > 2) & (self.multi == 'OVR'):\r\n label = []\r\n \r\n # Iterate through each testing sample\r\n for x in self.X:\r\n tmp_list = []\r\n \r\n # Iterate through each class\r\n for i in range(self.classes):\r\n # Calculate the probabilities using the hypothesis function\r\n tmp = sigmoid(np.dot(x,self.theta[i, :]))\r\n tmp_list.append(tmp)\r\n \r\n # Assign the class label with the greatest probability\r\n max_ = np.argmax(tmp_list)\r\n label.append(max_)\r\n \r\n return np.array(label)\r\n \r\n # If the number of classes is greater than 2 and multinomial classification\r\n elif (self.classes > 2) & (self.multi == 'Multinomial'):\r\n label = []\r\n \r\n # Iterate through each testing sample\r\n for x in self.X:\r\n tmp_list = []\r\n \r\n # Iterate through each class\r\n for i in range(self.classes):\r\n # Calculate the probability using the hypothesis function\r\n tmp = softmax(x = x, y = self.y,\r\n theta = self.theta, idx = i)\r\n tmp_list.append(tmp)\r\n \r\n # Assign the class label with the greatest probability\r\n max_ = np.argmax(tmp_list)\r\n label.append(max_)\r\n \r\n return np.array(label)", "title": "" }, { "docid": "c37638e2e43f6e70fc37875547696656", "score": "0.55967665", "text": "def predict(w,x):\n \n return logistic(np.dot(w,x)) > 0.5 or -1", "title": "" }, { "docid": "f7b2df10682787c39880e1426d4d0be7", "score": "0.5583601", "text": "def fit_model(y, x, covars = None):\n if x.ndim == 1:\n x = x.reshape(-1,1) # make sure dim is (n,1) and not(n,)\n if y.ndim == 1:\n y = y.reshape(-1, 1)\n\n # X should have a column of ones, the site of interest and the covariates\n X = x\n if covars is not None:\n X = column_stack((covars, X))\n n = X.shape[0] # number of sites\n X = np.concatenate((np.ones((n,1)), X), axis=1)\n \n \n logit = sm.Logit(y,X)\n result = logit.fit(disp=False) # False disable the print of \"Optimization terminated successfully\" message\n\n # from doc - \n # result.params # The parameters of a fitted model - same as coef if you print result.summary()\n # result.pvalues # p values\n # result.tvalues # Return the t-statistic for a given parameter estimate.\n return result.params, result.tvalues, result.pvalues #coefficients, t-statistic and p-values", "title": "" }, { "docid": "9352cb6e68bc20be4a8905a5c46b0505", "score": "0.55808806", "text": "def evaluate_model(model, X_test, y_test, category_names):\n y_pred=model.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=category_names))", "title": "" }, { "docid": "9a1d44392192bb3b64b2783c7ce479e4", "score": "0.5580376", "text": "def analysis(self, **kwargs) -> list:\r\n\r\n pass", "title": "" }, { "docid": "72870726c096697c207846253bc70e4c", "score": "0.55779994", "text": "def decision_function(self, X):\n ...", "title": "" }, { "docid": "72870726c096697c207846253bc70e4c", "score": "0.55779994", "text": "def decision_function(self, X):\n ...", "title": "" }, { "docid": "4d5f8eb7c479ec9d4ebe0f45891ad61d", "score": "0.55636555", "text": "def logistic(x):\n return 2.0 / (1 + exp(-x)) - 1.0", "title": "" }, { "docid": "4d5f8eb7c479ec9d4ebe0f45891ad61d", "score": "0.55636555", "text": "def logistic(x):\n return 2.0 / (1 + exp(-x)) - 1.0", "title": "" }, { "docid": "bb2c55ce0003fb24d1e3fa9344d8dc90", "score": "0.55611604", "text": "def main():\n # Load the data set for training and testing the logistic regression classifier\n dataset = pd.read_csv(DATA_SET_PATH)\n print (dataset['Hogwarts House'].unique())", "title": "" }, { "docid": "da66b0d81e68127339738ff85634d7d9", "score": "0.5553217", "text": "def test_estimation_score_usage():\n methods = ('acc', 'log')\n X, y, w, b = make_classification(n_samples=100,\n random_state=6,\n n_informative=2,\n n_features=6)\n scores = []\n for method in methods:\n l1log = UoI_L1Logistic(random_state=12, estimation_score=method)\n assert_equal(l1log.estimation_score, method)\n l1log.fit(X, y)\n score = np.max(l1log.scores_)\n scores.append(score)\n assert_equal(len(set(scores)), len(methods))", "title": "" }, { "docid": "b5a746b13953a1f763a638a4d40b965b", "score": "0.5547505", "text": "def classification_report(self,X,y):\n y_pred = self.predict(X)\n clfr = classification_report(y, y_pred)\n\treturn clfr", "title": "" }, { "docid": "e4538dd6c6d30e636e4997939daa1ca5", "score": "0.5545217", "text": "def evaluate_model(model, X_test, Y_test):\n Y_pred = model.predict(X_test)\n for index, column in enumerate(Y_test):\n print(column, classification_report(Y_test[column], Y_pred[:, index]))", "title": "" }, { "docid": "c61c7e6a552575f3304b1db300b34d96", "score": "0.5531566", "text": "def pickleModel(self):\n print 'Saving model to file...'\n logit = LogisticRegression(C=self.C, penalty='l1')\n logit.fit(self.X_mapped,self.y)\n \n with open('model','w') as myFile:\n pickle.dump({'logit':logit,'degree':self.degree,'useInverse':self.useInverse,'mean':self.mean,'stdev':self.stdev,'n':self.n,'m':self.m},myFile)", "title": "" }, { "docid": "521149dbc7c057aca05ac1752e6aba96", "score": "0.550026", "text": "def predict_log_proba(self, X):\n ...", "title": "" }, { "docid": "b854853bbd597a851d5d46961b61491e", "score": "0.54991263", "text": "def Logistic(mu, s, x):\n return 1 / (1 + np.exp(-(x - mu)/s))", "title": "" }, { "docid": "7c3364ca626d04b66cb21f3cc5a8ca9f", "score": "0.54977155", "text": "def __init__(self):\n self.users = dict()\n self.items = dict()\n self.lr = LogisticRegression()\n self.X_train = pd.DataFrame()\n self.Y_train = pd.DataFrame()", "title": "" }, { "docid": "ccbf045a62c5ee95e3a19ba84f25c71a", "score": "0.5497138", "text": "def predict(self, X, use_probability = False):\n\n if not hasattr(self, 'w'):\n print('LogisticModel has not been fit.')\n return(None)\n\n pred = X.dot(self.w)\n\n if use_probability:\n odds = np.exp(pred)\n pred = odds / (1 + odds)\n\n return(pred)", "title": "" }, { "docid": "2d7b26cd119c8b6a06cce692e83a2d7e", "score": "0.5496659", "text": "def runCode():\n import scipy.io\n import numpy as np\n\n # Read Train and Test file. which are .mat files\n # Read Train\n mat = scipy.io.loadmat('Train_data.mat')\n train = mat['train']\n # Shuffle Data\n np.random.shuffle(train)\n\n # Separate Label from train\n train = np.transpose(train)\n yTrain = train[len(train)-1]\n train = train[0:-1]\n # Add feature X0 which is all one\n RowOfOnes = np.array([1.0]*np.shape(train)[1])\n train = np.vstack([RowOfOnes, train])\n train = np.transpose(train)\n yTrain = np.transpose(yTrain)\n # Convert labels from -1,1 to 0,1\n for ind, y in enumerate(yTrain):\n if y == -1:\n yTrain[ind] = 0\n\n # Read Test\n mat = scipy.io.loadmat('Test_Data.mat')\n test = mat['test']\n # Shuffle Data\n np.random.shuffle(test)\n\n # Separate Label from train\n test = np.transpose(test)\n yTest = test[len(test) - 1]\n test = test[0:-1]\n # Add feature X0 which is all one\n RowOfOnes = np.array([1.0] * np.shape(test)[1])\n test = np.vstack([RowOfOnes, test])\n test = np.transpose(test)\n yTest = np.transpose(yTest)\n # Convert labels from -1,1 to 0,1\n for ind, y in enumerate(yTest):\n if y == -1:\n yTest[ind] = 0\n\n # Use Gradient Decent to minimize optimal weights\n weight, scales, costOfTrainDataSet, accuracyOfTestDataSet = logistic_gradient_descent(xTrain=train,\n yTrain=yTrain, numberOfIter=100,\n learningRate=0.5, xTest=test, yTest=yTest)\n\n print(weight,scales, costOfTrainDataSet, accuracyOfTestDataSet)\n print(\"Training Error: \", costOfTrainDataSet)\n print(\"Test accuracy: \", accuracyOfTestDataSet)", "title": "" }, { "docid": "17ff6154a1e0a9dd89cd3a3aa690ffbc", "score": "0.5486821", "text": "def setup_logistic_regression(lambda1=0., lambda2=0.):\n\n assert lambda1 >= 0\n assert lambda2 >= 0\n\n if lambda1 < LAMBDA_TOLERANCE and lambda2 < LAMBDA_TOLERANCE:\n return SGDClassifier(\n loss='log', penalty='none', fit_intercept=True, verbose=0,\n random_state=RANDOM_SEED\n )\n\n if lambda1 < LAMBDA_TOLERANCE:\n return SGDClassifier(\n loss='log', penalty='l2', alpha=lambda2, fit_intercept=True,\n verbose=0, random_state=RANDOM_SEED\n )\n\n if lambda2 < LAMBDA_TOLERANCE:\n return SGDClassifier(\n loss='log', penalty='l1', alpha=lambda1, fit_intercept=True,\n verbose=0, random_state=RANDOM_SEED\n )\n\n alpha, l1_ratio = _lambdas_to_sklearn_inputs(\n lambda1=lambda1, lambda2=lambda2\n )\n\n return SGDClassifier(\n loss='log', penalty='elasticnet', alpha=alpha, l1_ratio=l1_ratio,\n fit_intercept=True, verbose=0, random_state=RANDOM_SEED\n )", "title": "" }, { "docid": "04cfee77e3c0e7411dbc446b19473624", "score": "0.5484187", "text": "def train_final_model(classifier, train_val, field=\"statement\",feature_rep=\"binary\"):\n \n logging.info(\"Starting model training...\") \n\n # features\n train_x=train_val['statement']\n \n # GET LABELS\n target=train_val['label'].values\n \n # GET FEATURES\n features,feature_transformer=extract_final_features(field,train_x,type=feature_rep)\n\n # INIT LOGISTIC REGRESSION CLASSIFIER\n logging.info(\"Training a Final Model...\")\n# scikit_log_reg = LogisticRegression(verbose=1, solver='liblinear',random_state=0, C=5, penalty='l2',max_iter=1000)\n model=classifier.fit(features,target)\n\n logging.info(\"Done training.\")\n \n return model,feature_transformer", "title": "" }, { "docid": "a5c5a097a876e345e40a872e1a87cd44", "score": "0.5475346", "text": "def linear1():\r\n #1.obatin dataset\r\n boston=load_boston()\r\n\r\n #2.split dataset\r\n x_train,x_test,y_train,y_test=train_test_split(boston.data, boston.target,random_state=22)\r\n\r\n #3.feature: normalization\r\n transfer=StandardScaler()\r\n x_train=transfer.fit_transform(x_train)\r\n x_test=transfer.transform(x_test)\r\n\r\n #4.estimator\r\n #fit()model\r\n estimator= LinearRegression()\r\n estimator.fit(x_train,y_train)\r\n #coef_intercept\r\n print(\"coef:\\n\",estimator.coef_)\r\n print(\"intercept:\\n\",estimator.intercept_)\r\n\r\n\r\n #save model\r\n joblib.dump(estimator,\"my_LR.pkl\")\r\n #load model\r\n estimator=joblib.load(\"my_LR.pkl\")\r\n\r\n #5.model evaluation\r\n y_predict = estimator.predict(x_test)\r\n print(\"y_predict:\\n\", y_predict)\r\n error = mean_squared_error(y_test, y_predict)\r\n print(\"LR error:\\n\", error)\r\n\r\n return None", "title": "" }, { "docid": "d8ac02346fad342f09f9e66bb3deb0c4", "score": "0.5470754", "text": "def logistic_regression_alcoholism_visualization(\n base_data_folder=\"data/processed/pwgc/\", alg_name=\"\"):\n X, y, z = load_subject_Xyz(base_folder=base_data_folder)\n z = np.array(z == \"A\")\n\n qt = QuantileTransformer(output_distribution=\"normal\")\n kern = Nystroem(kernel=\"poly\", gamma=39.3, degree=3, coef0=9340.0,\n n_components=60, random_state=0)\n K = kern.fit_transform(X)\n K = qt.fit_transform(K)\n\n pls = PLSRegression(n_components=2, scale=False)\n pls.fit(K, z)\n X_proj = pls.x_scores_\n\n clf = SVC(kernel=\"rbf\")\n param_grid = {\"C\": np.logspace(-2, 2, 20),\n \"gamma\": np.logspace(-2, 2, 20)}\n\n cv = GridSearchCV(estimator=clf, param_grid=param_grid,\n n_jobs=3, cv=5, refit=True)\n cv.fit(X_proj, z)\n\n clf = cv.best_estimator_\n clf.probability = True\n clf.fit(X_proj, z)\n\n h = .01\n x_min, x_max = X_proj[:, 0].min() - 1, X_proj[:, 0].max() + 1\n y_min, y_max = X_proj[:, 1].min() - 1, X_proj[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 0]\n Z = Z.reshape(xx.shape)\n\n sel = (z == 0)\n plt.scatter(X_proj[sel, 0], X_proj[sel, 1], c=\"r\", marker=\"o\",\n label=\"Control\")\n plt.scatter(X_proj[~sel, 0], X_proj[~sel, 1], c=\"b\", marker=\"^\",\n label=\"Alcoholic\")\n plt.contourf(xx, yy, Z, cmap=plt.cm.viridis, alpha=0.5,\n vmin=0, vmax=1)\n plt.colorbar()\n plt.title(\"Separating Alcoholism by EEG Granger-Causality Graph \"\n \"from Algorithm {}\".format(alg_name.replace(\"_\", \"\\\\_\")))\n plt.ylabel(\"PLS Projection $y$\")\n plt.xlabel(\"PLS Projection $x$\")\n plt.legend()\n\n saved_fig_name = (\"logistic_regression_{}_alcoholism_separation.png\"\n \"\".format(alg_name))\n plt.savefig(FIGURE_DIR + saved_fig_name,\n bbox_inches=\"tight\", pad_inches=0)\n plt.savefig(FIGURE_DIR + saved_fig_name,\n bbox_inches=\"tight\", pad_inches=0)\n plt.show()\n\n return", "title": "" }, { "docid": "158a079caa78730567aab7903d66614c", "score": "0.5455322", "text": "def test_model(model, X_train, X_val, y_train, y_val,\n logfile='log.linear-models.txt'):\n model.fit(X_train, y_train)\n with open(logfile, 'a') as fp:\n evaluate(model, X_val, y_val, dest=fp)", "title": "" }, { "docid": "1aadb8a7ab98c1d0fe84eaf62da00369", "score": "0.54535526", "text": "def func(coef: pd.Series, data: Tuple[pd.DataFrame, pd.Series]) -> float:\n\n X_train, y_train = data\n prob = X_train.apply(lambda row: LogisticRegression.prob(coef, row), axis=1)\n return -np.mean(np.log(prob) * y_train + np.log(1 - prob) * (1 - y_train))", "title": "" }, { "docid": "866bfce41a96086d46686991436067ad", "score": "0.5450804", "text": "def modeliser(self):\r\n text_clf = Pipeline([('vect', self.vectorizer),('clf', self.classifieur)])\r\n text_clf.fit(self.X_train, self.y_train)\r\n predicted = text_clf.predict(self.X_test)\r\n report = classification_report(predicted, self.y_test)\r\n return self.clf, self.vec, report", "title": "" }, { "docid": "e14aef1082be3e7d92b674df23a9cac0", "score": "0.5450353", "text": "def logistic_regression(X, Y):\n m, n = X.shape\n theta = np.zeros(n)\n learning_rate = 1e-1\n Lambda = 0.05\n\n i = 0\n while True:\n i += 1\n prev_theta = theta\n grad = calc_grad(X, Y, theta) + Lambda/(m)*theta\n theta = theta - learning_rate * grad\n if i % 10000 == 0:\n print('Finished %d iterations' % i)\n print(np.linalg.norm(prev_theta - theta))\n if np.linalg.norm(prev_theta - theta) < 1e-15:\n print('Converged in %d iterations' % i)\n break\n print(theta)\n return theta", "title": "" } ]
2b27dd8aee7d95b8454db0feb276548e
Return the allocated type for this allocator.
[ { "docid": "29ce9c4bc71c3bb0aad8d3ebf014d135", "score": "0.74096143", "text": "def get_allocated_type():\n return origin.bind(Self.origin_node, Entity.type_or_expr.match(\n lambda t=SubtypeIndication.entity: t.designated_type,\n lambda q=QualExpr.entity: q.designated_type,\n lambda _: No(BaseTypeDecl.entity)\n ))", "title": "" } ]
[ { "docid": "973a29d668d185078df1d15b78c6e96c", "score": "0.682636", "text": "def get_typ(self, ):\n return self._typ", "title": "" }, { "docid": "7c874f510c1a1fd7f094a29e38d629a4", "score": "0.67755115", "text": "def getType(self):\n return self.__typeid.getType()", "title": "" }, { "docid": "0b93e352f57421b3d430dcf6ae0f3557", "score": "0.6700562", "text": "def type(self):\n\n return self.descr._v_types[self.name]", "title": "" }, { "docid": "e0751386a3e8acd5b520f2e4683911bb", "score": "0.6679658", "text": "def _get_type(self):\n return self.__type", "title": "" }, { "docid": "e0751386a3e8acd5b520f2e4683911bb", "score": "0.6679658", "text": "def _get_type(self):\n return self.__type", "title": "" }, { "docid": "defe07719740dc0544f5dfe1b83a50b0", "score": "0.6678648", "text": "def get_type(self):\n return self._type", "title": "" }, { "docid": "defe07719740dc0544f5dfe1b83a50b0", "score": "0.6678648", "text": "def get_type(self):\n return self._type", "title": "" }, { "docid": "35dbe2de2965128a49d043c3dd18677a", "score": "0.66678166", "text": "def get_type(self):\n return self.__type", "title": "" }, { "docid": "2ae2b93707a5cf80275f82c8a5dcd6bb", "score": "0.6642433", "text": "def type(self):\n return self.container['type']", "title": "" }, { "docid": "2ae2b93707a5cf80275f82c8a5dcd6bb", "score": "0.6642433", "text": "def type(self):\n return self.container['type']", "title": "" }, { "docid": "80802fe023a7ec2129b520836ecbd6dd", "score": "0.6627569", "text": "def type(self):\n # XXX what does this return?\n return TypeRef(ffi.lib.LLVMPY_TypeOf(self))", "title": "" }, { "docid": "15e2779c2eba75fb84a12ceb5c45f799", "score": "0.6613311", "text": "def get_type(self):\r\n return self._type", "title": "" }, { "docid": "15e2779c2eba75fb84a12ceb5c45f799", "score": "0.6613311", "text": "def get_type(self):\r\n return self._type", "title": "" }, { "docid": "5dd772b074c9514f3a6473e72efe7980", "score": "0.66102", "text": "def type(self) -> int:\n return self._get_type()", "title": "" }, { "docid": "dd7dccd54333ab926c6d26c8bb5f233d", "score": "0.66031796", "text": "def getType(self):\n return self.__type", "title": "" }, { "docid": "c19b824401fab2412ee013abc8bb56b9", "score": "0.65431046", "text": "def type(self) -> type[np.generic]:\n return self._dtype.type", "title": "" }, { "docid": "c3eaa4b634ed011ac973a3148e032451", "score": "0.65250295", "text": "def getType(self) -> cern.japc.value.Type:\n ...", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "cf4c95cc729f4eb935bb4b657836beb2", "score": "0.6503621", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "a879494c135a9817e6cda2d6c14a3a40", "score": "0.6491335", "text": "def type(self):\n return self.dtype().value", "title": "" }, { "docid": "6af49f6d6925956a602636dbf6f55705", "score": "0.6406388", "text": "def get_type(self):\n return self.type", "title": "" }, { "docid": "64797f9edc3dc913193bc62486b56a24", "score": "0.6399427", "text": "def type(self):\n \n return DeviceType(self.data['deviceType'])", "title": "" }, { "docid": "a82415a3f1d4e8f911632de0217542e4", "score": "0.6382383", "text": "def type(self):\n\n return self.__info[\"type\"]", "title": "" }, { "docid": "5b55d8138303ef494693431f8a1bdeab", "score": "0.63786644", "text": "def type_(self):\n return self._type", "title": "" }, { "docid": "efbaa60b4eeb85477c77604a74787383", "score": "0.6345522", "text": "def getType(self) -> Type:\n ...", "title": "" }, { "docid": "6745ae32d3d9a54b49cc48ebcb8154f3", "score": "0.6342728", "text": "def GetType(self):\n return self.type", "title": "" }, { "docid": "908180e8478c734cfdccf05fee141cc1", "score": "0.6336976", "text": "def get_type(self):\n if self.total_size() == 0:\n return CONSTANT_TYPE_NULL\n return unpack_from(FMT_BE_INT, self._buffer, TYPE_OFFSET)[0]", "title": "" }, { "docid": "64f6bfa6954f210c4fd3889f4fbc3be9", "score": "0.6329999", "text": "def type(self):\n return self._type.get_waarde()", "title": "" }, { "docid": "64f6bfa6954f210c4fd3889f4fbc3be9", "score": "0.6329999", "text": "def type(self):\n return self._type.get_waarde()", "title": "" }, { "docid": "f70e4ef2951e341dfdb78497b4e3aad0", "score": "0.63173866", "text": "def get_type(self):\n return self.properties.get('type', self.__class__.__name__)", "title": "" }, { "docid": "997067a5dfd1bdcaa4a6027ef41a20e2", "score": "0.63004416", "text": "def Type(self):\n if self.force_auto_sync:\n self.get('Type')\n return self._Type", "title": "" }, { "docid": "31b87e7bd9b39ffb09ff2863e85e9510", "score": "0.62919205", "text": "def _get_type(self):\n return self.__type", "title": "" }, { "docid": "31b87e7bd9b39ffb09ff2863e85e9510", "score": "0.62919205", "text": "def _get_type(self):\n return self.__type", "title": "" }, { "docid": "31b87e7bd9b39ffb09ff2863e85e9510", "score": "0.62916404", "text": "def _get_type(self):\n return self.__type", "title": "" }, { "docid": "31b87e7bd9b39ffb09ff2863e85e9510", "score": "0.62916404", "text": "def _get_type(self):\n return self.__type", "title": "" }, { "docid": "f784fc9d4a5d384fe733ef74dae92655", "score": "0.6272019", "text": "def get_type_value(self):\n return # osid.type.Type", "title": "" }, { "docid": "6e9cb9ae0c819aaacd6243d96e18fcd5", "score": "0.626929", "text": "def type(self):\n return self._type", "title": "" }, { "docid": "ab1106ea0d71d14afb715cbda0d41f27", "score": "0.62686604", "text": "def getType(self):\n\t\treturn self.type", "title": "" }, { "docid": "57be2218ea3d5a066189561dfe164ea7", "score": "0.6251663", "text": "def get_type(self):\n raise NotImplementedError", "title": "" }, { "docid": "57be2218ea3d5a066189561dfe164ea7", "score": "0.6251663", "text": "def get_type(self):\n raise NotImplementedError", "title": "" }, { "docid": "267c9e7f8455aa880f967d34491034d6", "score": "0.625114", "text": "def type(self) -> str:\n return getattr(self, \"_type\", None)", "title": "" }, { "docid": "99f9b2bf13156f0e63bbf637b29bef27", "score": "0.6207978", "text": "def type(self) -> str:\n return self._type", "title": "" }, { "docid": "99f9b2bf13156f0e63bbf637b29bef27", "score": "0.6207978", "text": "def type(self) -> str:\n return self._type", "title": "" }, { "docid": "99f9b2bf13156f0e63bbf637b29bef27", "score": "0.6207978", "text": "def type(self) -> str:\n return self._type", "title": "" }, { "docid": "99f9b2bf13156f0e63bbf637b29bef27", "score": "0.6207978", "text": "def type(self) -> str:\n return self._type", "title": "" }, { "docid": "99f9b2bf13156f0e63bbf637b29bef27", "score": "0.6207978", "text": "def type(self) -> str:\n return self._type", "title": "" }, { "docid": "8535d7006b00184c327588a653c42d2a", "score": "0.61991686", "text": "def type(self) -> str:\n return self.__type", "title": "" }, { "docid": "8535d7006b00184c327588a653c42d2a", "score": "0.61991686", "text": "def type(self) -> str:\n return self.__type", "title": "" }, { "docid": "8535d7006b00184c327588a653c42d2a", "score": "0.61991686", "text": "def type(self) -> str:\n return self.__type", "title": "" }, { "docid": "81a9e65be28013cecc4b691f04df8a3e", "score": "0.61588556", "text": "def Type(self):\n return self._get_attribute('type')", "title": "" }, { "docid": "ba0177e36fde50741b4bba906bc8416d", "score": "0.61485654", "text": "def gettype(self) -> type:\n return type(self.__value)", "title": "" }, { "docid": "78622557944dcc8fd0d6436764f4da8e", "score": "0.61103547", "text": "def GetType(self):\n return _snap.TPrimitive_GetType(self)", "title": "" }, { "docid": "e1a97832bbfa8c2fa1f2b68720ef148b", "score": "0.61088645", "text": "def getType(self):\n return self.__nodetype", "title": "" }, { "docid": "0705c9a6d251c4ca27b45a5c831cad17", "score": "0.60946596", "text": "def type(self):\n flags = self.threaded_get(ObjectField.TYPE) # misleading field name\n return ObjectType(OBJECT_FLAGS_TO_TYPE[flags])", "title": "" }, { "docid": "fd17e2668d2ac1e0ed997e8082a64e98", "score": "0.6075526", "text": "def type(self):\n if self.__type == None:\n return 'None'\n else:\n return self.__type", "title": "" }, { "docid": "e533b8ffcbbebbf92c6ec7d3793e4718", "score": "0.6073397", "text": "def get_type(self):\r\n pass", "title": "" }, { "docid": "4e36c2cfcbdd57ab80b6a584909cb7b2", "score": "0.607135", "text": "def _get_type(self):\n raise NotImplementedError", "title": "" }, { "docid": "a0b9ed3630e543b95ed7bb7280a93f42", "score": "0.6069221", "text": "def get_type(self):\n\t\treturn self.cost", "title": "" }, { "docid": "bfa3ae78868320a96174d186bc740ff4", "score": "0.6028155", "text": "def element_type(self):\n if not self.is_pointer:\n raise ValueError(\"Type {} is not a pointer\".format(self))\n return TypeRef(ffi.lib.LLVMPY_GetElementType(self))", "title": "" }, { "docid": "e27030aca17d1db8d2ce28869a986f50", "score": "0.6021998", "text": "def type(self):\n return self['type']", "title": "" }, { "docid": "96c0c0dbe0f73cc756fb31a6fce4575d", "score": "0.6018367", "text": "def getType():", "title": "" }, { "docid": "96c0c0dbe0f73cc756fb31a6fce4575d", "score": "0.6018367", "text": "def getType():", "title": "" }, { "docid": "199b80464e88e395af6a00c2a9ac4584", "score": "0.6007859", "text": "def type(self, name):\n return self.types[name]", "title": "" }, { "docid": "fdcf73e798abda5675055e5a3fc7c260", "score": "0.59899855", "text": "def get_type(cls) -> str:\n return cls._OBJ_TYPE", "title": "" }, { "docid": "a8e4ce1c20299e0f91fee3c5c904e997", "score": "0.5981018", "text": "def getType(self):\n return self.getAttr('type')", "title": "" }, { "docid": "a8e4ce1c20299e0f91fee3c5c904e997", "score": "0.5981018", "text": "def getType(self):\n return self.getAttr('type')", "title": "" }, { "docid": "a8e4ce1c20299e0f91fee3c5c904e997", "score": "0.5981018", "text": "def getType(self):\n return self.getAttr('type')", "title": "" }, { "docid": "20c1726c2cc63e83035f75dab663ebb1", "score": "0.5977373", "text": "def get_type():\n pass", "title": "" }, { "docid": "a846b510367f7c1ed56f736981e17466", "score": "0.59646964", "text": "def getDataType(self) -> ghidra.program.model.data.DataType:\n ...", "title": "" }, { "docid": "1f201accca6922c424063dd0a6273575", "score": "0.5961191", "text": "def get_type(self):\r\n return _RMF.NodeConstHandle_get_type(self)", "title": "" }, { "docid": "5370dc53807acde24b7f07d1bf2b3dd4", "score": "0.5957949", "text": "def GetType(self):\n return _pcbnew.PAGE_INFO_GetType(self)", "title": "" }, { "docid": "374b51f946f083258e495301ea1db482", "score": "0.59569997", "text": "def get_type(self):\n return self.get('type')", "title": "" }, { "docid": "14fe3cf524ce77f5163424d719b6d1b3", "score": "0.59492046", "text": "def get_type(self) -> str:\n return self[\"type\"]", "title": "" }, { "docid": "3fb072dbf7aca56a98a4a6592979ee67", "score": "0.5945928", "text": "def get_device_type(self) -> DeviceType:\n return self.get_status().device_type", "title": "" } ]
97cbb65bef88db7d1debcf8c87daec78
Searches for printable strings in a file
[ { "docid": "35f411b1a0cf39dd76bff3edfc1585d1", "score": "0.48263007", "text": "def test_grep_string(self):\n\n sampleFile = os.path.join(os.path.dirname(__file__), \"SampleDir\", \"SampleFile.txt\")\n sampleFile = lib_util.standardized_file_path(sampleFile)\n\n mySourceGrep = lib_client.SourceLocal(\n \"sources_types/CIM_DataFile/grep_text_strings.py\",\n \"CIM_DataFile\",\n Name=sampleFile)\n\n tripleGrep = mySourceGrep.get_triplestore()\n\n matchingTriples = tripleGrep.get_matching_strings_triples(\"[Pp]ellentesque\")\n\n lstStringsOnly = sorted( [ trpObj.value for trpSubj,trpPred,trpObj in matchingTriples ] )\n\n assert(lstStringsOnly == [u'Pellentesque;14;94', u'Pellentesque;6;36', u'Pellentesque;8;50', u'pellentesque;10;66', u'pellentesque;14;101'])", "title": "" } ]
[ { "docid": "b18cc9a8bb2647a8f266f3e70ec1005c", "score": "0.6613732", "text": "def get_strings(path):\n with open(path, errors='ignore') as file:\n res = ''\n for c in file.read():\n if c in string.printable:\n res += c\n continue\n if len(res) >= 4:\n yield res\n res = ''\n if len(res) >= 4:\n yield res", "title": "" }, { "docid": "1bb64d9fb3dd919d6a83b95cc13f1c24", "score": "0.5929734", "text": "def scan():\n \n with open('mem.dmp', 'rb') as dump_file:\n dump = dump_file.read()\n \n byte_length = 2 if args.encoding == 'utf16' else 1\n\n # Loop through each matching start string\n for m in re.finditer(encode(args.start), dump):\n end = dump.find(encode(args.end), m.start() + byte_length) + byte_length\n \n length = end - m.start()\n \n if length < int(args.maxlen):\n match = dump[m.start():end]\n \n try:\n match = match.decode(args.encoding)\n except UnicodeDecodeError:\n continue\n \n match = match.encode('utf8')\n \n if args.contains.lower() in match.lower() or not args.contains:\n print match", "title": "" }, { "docid": "814d6fe12dc7e1df63b46f7bd6390086", "score": "0.5728063", "text": "def test_scan_multiple_lines_cp1252(self):\n lines = ls.scan(testhelpers.TEST_FILES_FILE_ONE,\n 8, 9, encoding=\"cp1252\")\n self.assertEqual(lines, \"\"\"Taken from http://tvipsum.com/\nThe weather started getting rough - the tiny ship was tossed. If not for the\n\"\"\")", "title": "" }, { "docid": "b0472c105e216f6d9e38033665331826", "score": "0.56345594", "text": "def get_all_characters(script_file_handle):\n characters_dict = {}\n script_file_handle.seek(0)\n for line in script_file_handle.readlines():\n line = line.strip()\n search_result = parse.search('{}:', line)\n if search_result:\n character = search_result.fixed[0]\n if character not in characters_dict:\n characters_dict[character] = True\n return characters_dict.keys()", "title": "" }, { "docid": "0b5b404c3f0e390ec638f0c43e4787e6", "score": "0.55924666", "text": "def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True", "title": "" }, { "docid": "f91747030d9209bb1aed7e01e5a7906b", "score": "0.5581722", "text": "def is_ascii_file(file_path):\r\n with open(file_path, \"r\") as file_data:\r\n for line in file_data:\r\n if line.strip() == \"\":\r\n continue\r\n return line.startswith(\"#SDDSASCIIFORMAT v1\")\r\n return False", "title": "" }, { "docid": "1d07535642d7e1719e78bb46cd000c49", "score": "0.55255586", "text": "def grep_file(\n filename=None,\n fileobj=None,\n searchfn=lambda x: x.startswith('G=')):\n if not fileobj:\n fileobj = codecs.open(filename, \"r\", \"utf-8\")\n for i, n in izip(count(1), fileobj):\n n = n.lstrip() # ...\n if searchfn(n):\n yield (i, n)", "title": "" }, { "docid": "3b2f92e098dd4ee865aca5af48622b5c", "score": "0.5511462", "text": "def search_for_magic(ns):\n try:\n for file in global_dict:\n with open(ns.path + \"/\" + file) as f:\n filename_lines = f.readlines()\n for i, line in enumerate(filename_lines):\n if ns.magic in line:\n if i not in global_dict[file]:\n global_dict[file].append(i)\n logger.info(\n f\"{file} found magic text in this file\")\n except Exception as e:\n logger.info(e)", "title": "" }, { "docid": "02e4a75877dfe007213baacf60cb5ba8", "score": "0.5459256", "text": "def not_printable(word):\n for char in word:\n if char in printable:\n return False\n return True", "title": "" }, { "docid": "98f23291eb502d78e2830a92e9810f52", "score": "0.54415816", "text": "def search_file_byte(byte_str, filename): \r\n try:\r\n byte_num = int(byte_str)\r\n except ValueError:\r\n return \"Invalid Input\"\r\n byte_char = chr(byte_num)\r\n count = 0\r\n with open(filename, \"r\") as f:\r\n while True:\r\n char = f.read(1) # read by character\r\n if char == byte_char:\r\n count += 1\r\n if not char: \r\n break\r\n return \"%d instances of byte %r, or in Python %r\" % (count, byte_str, byte_char)", "title": "" }, { "docid": "a59551464b94593e3b6bdeb773ff73d3", "score": "0.5433031", "text": "def _find_strings(filename, encoding=None):\n d = {}\n # If the first token is a string, then it's the module docstring.\n # Add this special case so that the test in the loop passes.\n prev_ttype = token.INDENT\n with open(filename, encoding=encoding) as f:\n tok = tokenize.generate_tokens(f.readline)\n for ttype, tstr, start, end, line in tok:\n if ttype == token.STRING:\n if prev_ttype == token.INDENT:\n sline, scol = start\n eline, ecol = end\n for i in range(sline, eline + 1):\n d[i] = 1\n prev_ttype = ttype\n return d", "title": "" }, { "docid": "2c2d6044c89541f6c51a5ee52cfb9d59", "score": "0.53720856", "text": "def lookForStringInFile(self,fname,searchString):\n with open(fname, 'r') as handle:\n for line in handle:\n if line.find(searchString)>-1:\n return True\n\n return False", "title": "" }, { "docid": "35d2033e12221b023f6829b6e6bc231b", "score": "0.53668296", "text": "def search_file(filename, byte):\n char = chr(byte)\n total = 0\n with open(filename) as f:\n for line in f:\n total += line.count(char)\n return total", "title": "" }, { "docid": "9a9c4c1d0ab2816e2e3243554ca4d2ef", "score": "0.5364518", "text": "def naughty_strings(filepath=FILEPATH):\n\n strings = []\n with gzip.open(filepath, 'r') as f:\n\n # put all lines in the file into a Python list\n for string in f.read().split(\"\\n\"):\n if len(string) > 0:\n if (string[0] != \"#\" and \"script\" not in string \n and \"SCRIPT\" not in string \n and \"alert\" not in string and string[0] != '>'):\n strings.append(string)\n return strings", "title": "" }, { "docid": "19bdc9af64a8d3f18361d977dea65180", "score": "0.53480655", "text": "def treatment(file_):\r\n for line in file_.split('\\n'):\r\n if re.search('лечение', line): # Does the same thing as \"if 'hello' in line:\"\r\n return(line.replace('Проведенное лечение:', \\\r\n '').replace('ЛФК', '').replace('ФТЛ', ''))", "title": "" }, { "docid": "0ed916badc858fcbd110b46031328393", "score": "0.53245604", "text": "def test_readlines_from_file_with_bad_encoding(self):\n self.assertEqual(\n ['# -*- coding: zlatin-1 -*-\\n'],\n autopep8.readlines_from_file(\n os.path.join(ROOT_DIR, 'test', 'bad_encoding.py')))", "title": "" }, { "docid": "3733e5cce5ede37c810dcbb6b4f70979", "score": "0.53153896", "text": "def words_in_book(filename):\n with open(filename,\"r\") as f:\n try:\n text = f.read()\n except UnicodeDecodeError:\n text = open(filename, \"r\", encoding = \"utf8\").read()\n \n return re.findall(r\"[a-zA-Z]+\", text)", "title": "" }, { "docid": "b3b46b78c6ea6b079f99ccc4dec88356", "score": "0.5311415", "text": "def srchcomments(filename, substr):\n for index, line in enumerate(open(filename)):\n L = line.split('#')\n if len(L) == 2:\n if L[1].find(substr) != -1:\n print(\"%5d: %s\" % (index, line.rstrip()))", "title": "" }, { "docid": "47210fb64f600672ba9cc349c51a3401", "score": "0.5250739", "text": "def test_scan_single_line_utf8(self):\n line = ls.scan(testhelpers.TEST_FILES_FILE_ONE, 6, 6, encoding=\"utf-8\")\n self.assertEqual(line, \"revolutionary ROI.\\n\")", "title": "" }, { "docid": "ad513ac6da7b1dab1eccd858da886b86", "score": "0.5243514", "text": "def find_term(vcffile, term):\r\n with open(vcffile, 'r') as fin:\r\n tag_type = ''\r\n notfound = False\r\n print('CHROM\\tPOS',term)\r\n for line in fin:\r\n if line.startswith('#'):\r\n if '<ID='+term in line and line.startswith('##INFO'):\r\n tag_type = 'INFO'\r\n elif '<ID='+term in line and line.startswith('##FORMAT'):\r\n tag_type = 'FORMAT'\r\n continue\r\n if tag_type == '':\r\n print('Tag',term,'was not found in either INFO or FORMAT.')\r\n return\r\n chrom, pos, id_, ref, alt, qual, filter, info, format_, *samples = line.strip().split()\r\n if tag_type == 'INFO':\r\n info_l = info.split(';')\r\n for il in info_l:\r\n if term in il:\r\n print(chrom, pos, il[3:])\r\n break\r\n else:\r\n print(chrom, pos, '.')\r\n elif tag_type == 'FORMAT':\r\n format_l = format_.split(':')\r\n ind = format_l.index(term)\r\n output = [chrom, pos]\r\n for sample in samples:\r\n sl = sample.split(':')[ind]\r\n output.append(sl)\r\n print('\\t'.join(output))\r\n else:\r\n notfound = True\r\n if notfound:\r\n print('Tag',term,' was missing for one or more variants.')", "title": "" }, { "docid": "6c9054a50c5f901748725494aa529a78", "score": "0.52412397", "text": "def check_if_string_in_file(file_name, string_to_search):\n with open(file_name,'r') as read_obj:\n for line in read_obj:\n if string_to_search in line:\n return True\n return False", "title": "" }, { "docid": "a3b347330f56e97bfb8bc8164d88a1f4", "score": "0.52270687", "text": "def stringparser( tfile, c ):\n\n\n\t\tcharcount = [] \t# Holds the lengths of each line\n\t\tcharlines = []\t# \n\t\tcharpos = []\t# The positions of the found characters or the first letters of the found words\n\t\twordlens = []\n\t\tk = 0\n\n\t\t\n\t\tfor string in tfile.split(\"\\\\n\"):\n\n\n\t\t\t# Each string needs to be decoded to utf-8 as the files are saved in utf-8 format. \n\t\t\t# Without decoding matching would be done to ascii decoding and that causes the \n\t\t\t# strings to contain extra characters.\n\t\t\t# Also the newline characters are removed so that the length of the lines are \n\t\t\t# correct\n\n\t\t\ts = string.rstrip('\\n').decode('utf-8', 'ignore')\t\n\t\t\tcharindlist = []\n\t\t\tcharcount.append(len(s))\n\t\t\t\n\t\t\tfor m in re.finditer(c, s):\n\t\t\t\tcharindlist.append(m.start())\n\t\t\t\n\t\t\tif charindlist:\t\n\t\t\t\tcharpos.append(charindlist)\n\t\t\t\t\n\t\t\t\ttempv1 = [k]*len(charindlist)\t\t# These two temporary values are needed to append right amount of linenumbers and wordlenght numbers\n\t\t\t\ttempv2 = [len(c)]*len(charindlist)\t# into their respective vectors\n\t\t\t\t\n\t\t\t\tcharlines.append(tempv1)\n\t\t\t\twordlens.append(tempv2)\n\n\t\t\tk += 1\n\n\t\t#print k\n\n\n\t\treturn charcount, charpos, charlines, wordlens", "title": "" }, { "docid": "b34e12249007fa1a88b19c1e0c6685ed", "score": "0.5218695", "text": "def search_by_text(text : str, files : []) -> []:\n outfiles = []\n for file in files:\n try:\n infile = open(file, 'r')\n if text in infile.read():\n outfiles.append(file)\n except UnicodeDecodeError:\n continue\n return outfiles", "title": "" }, { "docid": "f666300777680fde33aea306519a53fa", "score": "0.5208492", "text": "def detect_problems (filename):", "title": "" }, { "docid": "8de0803df41a6473536e55253ae92b1e", "score": "0.51987684", "text": "def _scan_file(path):\n data = \"\"\n with open(path, \"r\") as f:\n for line in f.readlines():\n strings = re.findall(r'\"%[^\"]+\"', line) + re.findall(r'[[%[^\"]+]]', line) + re.findall(r'\\'%[^\"]+\\'', line)\n for string in strings:\n raw_string = string[2:-2] if string.startswith(\"[[\") else string[1:-1]\n trans_string = TranslatableString(raw_string)\n line = line.replace(string, f\"EAL.translations[\\\"{ADDON_NAME}\\\"][EAL.language][\\\"{trans_string.string}\\\"]\")\n translatable_string_manager.add_string(trans_string)\n data += line\n\n with open(path, \"w\") as f:\n f.write(data)", "title": "" }, { "docid": "6c356f4e413ec0506447b99360b8f3b4", "score": "0.5194126", "text": "def is_printable(s):\r\n try:\r\n return all(c in string.printable for c in s)\r\n except TypeError:\r\n return False", "title": "" }, { "docid": "02d3db7dde189cf10c836a0d46b97d02", "score": "0.51797986", "text": "def extract():\n file = open(\"compilation.txt\", \"r\")\n print(\"Choose a marking symbol (substring):\")\n c = input().strip()\n print(\"Choose output file path:\")\n name = input()\n out = open(name, \"a\")\n print (\"...\")\n\n for line in file:\n if c in line:\n out.write(line[1:])\n\n print (\"Completed\")", "title": "" }, { "docid": "f60e51506cf131957fa867694ddf5c94", "score": "0.5161632", "text": "def search(filename):\n data = [(title.strip(), int(year), genres.split(','))\n for title, year, genres in [line.strip().split('\\t')\n for line in open(filename)]]\n letter_index = build_index(data)\n letter = raw_input()\n curr_dict = letter_index[letter]\n while True:\n print curr_dict\n if '@' in curr_dict:\n print curr_dict['@']\n break\n else:\n letter = raw_input()\n if letter not in curr_dict:\n print 'not found :('\n break\n curr_dict = curr_dict[letter]", "title": "" }, { "docid": "033ba891f68528143dc81f788aefce71", "score": "0.51508087", "text": "def test_unicode_files_where(self):\n self.base_where_clause(SAMPLE_UNICODE_FILE_HASH)", "title": "" }, { "docid": "5d6e69b38d0af585bb814faae0563b22", "score": "0.51494044", "text": "def is_file_string(s):\n if len(s) >= 5000:\n return False # pragma: no cover\n global _allowed\n if not _allowed.search(s):\n return False\n for c in s:\n if ord(c) < 32:\n return False\n return True", "title": "" }, { "docid": "7240df43120e81d4e0aeab761235ce7e", "score": "0.5147022", "text": "def readEnzymes(pFile):\n pass", "title": "" }, { "docid": "f8632486f2e931f66e6bbe9f703f924f", "score": "0.5128666", "text": "def formatted_text_search(encoding):\n\n if encoding == 'formatted_text':\n return codecs.CodecInfo(formatted_text_encode, formatted_text_decode, name='formatted_text')", "title": "" }, { "docid": "c0e2c8240bbe673f9eb65f5f8e0200b2", "score": "0.51199263", "text": "def ed_find(filename, search_str):\n\tcontents = ed_read(filename)\n\treturn list(find_all(contents, search_str))", "title": "" }, { "docid": "9716a3c845659a7cdcc239c114dcf5b3", "score": "0.51049286", "text": "def probe_file_for_secret(fn):\n mebbe = []\n with open(fn) as f:\n for num, line in enumerate(f):\n line = line.strip()\n (s, k, t) = ranked_cleartext(line)\n mebbe.append((s, num, k, t))\n mebbe.sort()\n return mebbe[0]", "title": "" }, { "docid": "d5e426f727b35d1e125ca42279ecf9a3", "score": "0.50983447", "text": "def search_file(self,filename):\n\t\tf=open(filename,'r')\n\t\tfor line in f:\n\t\t\tself.search(line)", "title": "" }, { "docid": "e3267368c4ab964f4238beda1d54ac95", "score": "0.5084882", "text": "def get_non_ascii_words(filename):\n words = set()\n with open(filename, \"rb\") as fh:\n for line in fh.readlines():\n for word in line.strip().split():\n if not is_ascii(word):\n words.add(word.decode())\n return words", "title": "" }, { "docid": "8621626724f448761729abb2a5f95772", "score": "0.50389147", "text": "def chapter_format_identifier():\n\n with open(filename, 'r') as f:\n contents = f.read()\n\n pattern = 'chapter.....'\n matches = re.findall(pattern, contents)\n\n # test for written numbers\n written_counter = 0\n written_matches = []\n for poss_match in written_cases:\n if re.search(poss_match, contents, flags=re.IGNORECASE) is not None:\n written_counter += 1\n\n return written_cases", "title": "" }, { "docid": "7f4467e69f40c68cb8de91867a628a65", "score": "0.500881", "text": "def SearchDemo(name, keyword):\n fid = open(GetOriginalFilename(name), \"rt\")\n fullText = fid.read()\n fid.close()\n\n fullText = fullText.decode(\"iso-8859-1\")\n\n if fullText.find(keyword) >= 0:\n return True\n\n return False", "title": "" }, { "docid": "e1e614f94ad49c3087d97612cacabcf0", "score": "0.4998978", "text": "def main(path):\r\n for item in cleanUpText(path):\r\n pprint.pprint(item)", "title": "" }, { "docid": "52d8aee6091907f7b675cfa26a79a6da", "score": "0.49770707", "text": "def test_egrep_char_range(self):\n test_file = FileAsObj()\n test_file.add(TESTCONTENTS)\n self.assertTrue(test_file.egrep('[a-z]ird'))", "title": "" }, { "docid": "4eb913f14c27c5e3fd961086f23a246f", "score": "0.49760962", "text": "def searchable_text():", "title": "" }, { "docid": "6693e057977c30f7518d1abfa8e85ca1", "score": "0.4959682", "text": "def test_single_file_str(datafiles):\n assert (datafiles / 'huckleberry.txt').is_file()\n assert 'Mark Twain' in (datafiles / 'huckleberry.txt').read_text('utf-8')", "title": "" }, { "docid": "43771ea77f11f72f16f67112a5048e3e", "score": "0.49503657", "text": "def txtfparser( tfile, c ):\n\n\t\tcharcount = [] \t# Holds the lengths of each line\n\t\tcharlines = []\t# \n\t\tcharpos = []\t# The positions of the found characters or the first letters of the found words\n\t\twordlens = []\n\t\tk = 0\n\n\t\tfid = open( tfile, 'r' )\n\n\t\tlines = fid.readlines()\n\t\tfid.close()\n\n\t\t\n\t\tfor string in lines:\n\n\t\t\t# Each string needs to be decoded to utf-8 as the files are saved in utf-8 format. \n\t\t\t# Without decoding matching would be done to ascii decoding and that causes the \n\t\t\t# strings to contain extra characters.\n\t\t\t# Also the newline characters are removed so that the length of the lines are \n\t\t\t# correct\n\n\t\t\ts = string.rstrip('\\n').decode('utf-8', 'ignore')\t\n\t\t\tcharindlist = []\n\t\t\tcharcount.append(len(s))\n\t\t\t\n\t\t\tfor m in re.finditer(c, s):\n\t\t\t\tcharindlist.append(m.start())\n\t\t\t\n\t\t\tif charindlist:\t\n\t\t\t\tcharpos.append(charindlist)\n\t\t\t\t\n\t\t\t\ttempv1 = [k]*len(charindlist)\t\t# These two temporary values are needed to append right amount of linenumbers and wordlenght numbers\n\t\t\t\ttempv2 = [len(c)]*len(charindlist)\t# into their respective vectors\n\t\t\t\t\n\t\t\t\tcharlines.append(tempv1)\n\t\t\t\twordlens.append(tempv2)\n\n\t\t\tk += 1\n\n\n\t\treturn charcount, charpos, charlines, wordlens", "title": "" }, { "docid": "97ec31b30648e37509122a59a80bc509", "score": "0.49397665", "text": "def filter_printable(text: str) -> str:\n return ''.join(list(filter(lambda ch: ch in string.printable, text)))", "title": "" }, { "docid": "c0369e3ad2836074adcc6206cd8fed8f", "score": "0.49332452", "text": "def replace_bad_chars(filename, bad_chars):\n\n messages = []\n\n debug('replace_bad_chars(filename=\"{}\" bad_chars=\"{}\")'.format(filename, bad_chars))\n for c in bad_chars:\n if c in filename:\n messages.append('bad character \"{}\"'.format(printable(c)))\n\n filename = filename.replace(c, \"_\")\n debug('replace_bad_chars returning=\"{}\"'.format(filename))\n\n return filename, messages", "title": "" }, { "docid": "ebef7f447aa1908379524fd3353b4e3e", "score": "0.49209836", "text": "def check_if_string_in_file(file_name, string_to_search):\n # Open the file in read only mode\n with open(file_name, 'r') as read_obj:\n # Read all lines in the file one by one\n for line in read_obj:\n # For each line, check if line contains the string\n if string_to_search in line:\n return True\n return False", "title": "" }, { "docid": "9bbae0247f63aa36e6704c67379e7949", "score": "0.49126214", "text": "def _get_unique_non_ascii(text) :\n\timport unicodedata\n\tnonascii = set([c for c in text if ord(c) >= 128])\n\tfor c in nonascii :\n\t\tprint (c, '\\t', hex(ord(c)),'\\t', unicodedata.name(c))\n\treturn nonascii", "title": "" }, { "docid": "67c49eab6c039c6d1f9b3b662aae5a92", "score": "0.489653", "text": "def wbc(file_):\r\n file_ = file_.lower()\r\n patt1 = re.sub(r'[():]', '', file_)\r\n patt1 = re.sub(r'10(\\*|[еe])9', '', patt1)\r\n patt1 = re.sub(r'\\Dбщийанализкрови', 'оак', patt1) \r\n patt2 = re.sub(r'(?<=оак)\\d\\d.\\d\\d.\\d{2,4}|(?<=оак)\\d\\d.\\d\\d', '', patt1)\r\n patt3 = re.compile(r'(?<=оак|wbc)(?:л|лейкоцит\\w)(\\d*.\\d+|\\d+)')\r\n all_wbc = patt3.findall(''.join(patt2.split()))\r\n try:\r\n if all_wbc:\r\n return all_wbc\r\n else:\r\n return 'None'\r\n except:\r\n pass", "title": "" }, { "docid": "c65a5ead31b89b5552188698f45a6c34", "score": "0.48826763", "text": "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 131)", "title": "" }, { "docid": "5a564702c753eee826cfe00c679414c4", "score": "0.48753566", "text": "def check_if_string_in_file(file_name, string_to_search):\n # Open the file in read only mode\n with open(file_name, 'r') as read_obj:\n # Read all lines in the file one by one\n for line in read_obj:\n # For each line, check if line contains the string\n if string_to_search in line:\n return True\n\n return False", "title": "" }, { "docid": "4807ec67f458f44aa0bf491811ec6748", "score": "0.4864348", "text": "def match_in_file(file_name, lines, regex, color, machine):\r\n for line_no, matched_line, regex_result, start_pos in search(lines, regex):\r\n # a formatter works on a single line at a time\r\n if color:\r\n print(ColoredFormatter(matched_line, regex_result).format(),\r\n end='')\r\n elif machine:\r\n print(MachineReadableFormatter(file_name, line_no,\r\n matched_line, start_pos\r\n ).format(), end='')\r\n else:\r\n print(SimpleFormatter(file_name, line_no, matched_line).format(),\r\n end='')\r\n print()", "title": "" }, { "docid": "60288b7f12b951b8b1153c7bf48e5426", "score": "0.48626998", "text": "def _find_executable_linenos(filename):\n try:\n with tokenize.open(filename) as f:\n prog = f.read()\n encoding = f.encoding\n except OSError as err:\n print((\"Not printing coverage data for %r: %s\"\n % (filename, err)), file=sys.stderr)\n return {}\n code = compile(prog, filename, \"exec\")\n strs = _find_strings(filename, encoding)\n return _find_lines(code, strs)", "title": "" }, { "docid": "e3fbe02056fd5a1aeb749c4e89105a80", "score": "0.48544747", "text": "def _parse_enc(path):\n with open(path, encoding=\"ascii\") as file:\n no_comments = \"\\n\".join(line.split(\"%\")[0].rstrip() for line in file)\n array = re.search(r\"(?s)\\[(.*)\\]\", no_comments).group(1)\n return re.findall(r\"(?<=/)[A-za-z0-9._]+\", array)", "title": "" }, { "docid": "e2d32de4ca8dfb0bbcaa3229f4e3fb4e", "score": "0.48483825", "text": "def FindHeaderLength():\n\n lookup = '#------------------------------'\n \n with open(filename) as myFile:\n for FoundPosition, line in enumerate(myFile, 1):\n if lookup in line:\n #print 'Scan Data found at line:', FoundPosition\n break\n \n return FoundPosition", "title": "" }, { "docid": "ac45d41f94b7419ccb73ddfe81e05298", "score": "0.48461643", "text": "def remove_special_chars_in_file(fname):\n \n with open(fname, 'rb') as fin:\n lines = fin.readlines()\n\n \n\n to_remove = re.compile('[' + re.escape(r'@/\\[]<>*-_.|:(){}=\"\",#&$1234567890?') + ']+')\n \n \n \n with open(fname, 'wb') as fout:\n for line in lines:\n fixed_line = re.sub(to_remove, '', line)\n fixed = fixed_line.replace('\\n', '') \n #if fixed_line != line:\n # print(\"--%r\\n++%r\" % (line, fixed_line))\n \n fout.write(fixed)", "title": "" }, { "docid": "2d34c4a635c9e25799fc1df551116dba", "score": "0.4844724", "text": "def readSourceText(fname):\r\n\t\r\n#\tTEXT = file('test.txt').read()\r\n\tTEXT = file(fname).read()\r\n\tprint \"Number of lines in reference text\", len(TEXT)\r\n\r\n\tTEXT = sanitizeText(TEXT)\r\n\t\r\n\t# Make the association map of letter pairs\r\n\tc = Counter(zip(TEXT, TEXT[1:]))\r\n\r\n\t# increment counter by 1 because later going to take log of this qty\r\n\tfor i in c:\r\n\t\tc[i] += 1\r\n\r\n\r\n\treturn c", "title": "" }, { "docid": "6e272e076a1c2ec1bc4084a08a73f499", "score": "0.48422053", "text": "def from_text_file(path, n, chars=CHARSET_ASCII):\n text = open(path, 'r', encoding='utf-8').read().encode(\"ascii\", errors=\"ignore\").decode()\n chars_st = set(chars)\n text = ''.join(filter(lambda x: ord(x) in chars_st, text))\n return HuffDict.from_text(text, n, chars)", "title": "" }, { "docid": "afc6104accd77ea88d37b2f3c0210da9", "score": "0.48309004", "text": "def ascii_identify(origin, *args, **kwargs):\n name = os.path.basename(args[0])\n\n if name.lower().split('.')[-1] in ['txt', 'ascii']:\n return True\n\n return False", "title": "" }, { "docid": "a9a344f3c2e3ccbae1f158f03c29da06", "score": "0.48302197", "text": "def search_log(log, string, test_data):\n logs_path = make_path(test_data.logs_dir, log)\n try:\n results = []\n log = codecs.open(logs_path, \"r\", \"utf_8\")\n try:\n for line in log:\n if string in line:\n results.append(line)\n log.close()\n except UnicodeDecodeError:\n pass\n if results:\n return results\n except:\n raise FileNotFoundException(logs_path)", "title": "" }, { "docid": "686cc2ded9546f7f053ab098731af273", "score": "0.4828764", "text": "def get_letter(self,letter,file_name):\n\n URLS = open(\"Resources/\"+file_name+\".txt\",\"r\")\n #need an exception list here for puncutation and integers\n for line in URLS:\n if(line != \" \" and line != '\\n'):\n self.insert_obs(letter,line.strip('\\n').decode(\"utf-8\"))", "title": "" }, { "docid": "b9b96f8ba13f3a001a8dbc360a7ccc7a", "score": "0.48263735", "text": "def process_file(filename, skip_header):\n hist = {}\n fp = open(filename)\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n for line in fp:\n if line.startswith('*** END OF THIS PROJECT'):\n break\n line = line.replace('-', ' ') #? What are we doing here? Why?\n strippables = string.punctuation + string.whitespace\n\n for word in line.split():\n word = word.strip(strippables)\n word = word.lower()\n\n # word = line.strip(string.whitespace + string.punctuation) #? Could this work?\n # update the histogram\n hist[word] = hist.get(word, 0) + 1 #? develop understanding\n \n return hist", "title": "" }, { "docid": "4662e21982c303fdef4f272dad98ba72", "score": "0.48168257", "text": "def _no_encoding_on_file_open(self, filepath: str):\n\n with open(filepath, \"r\", encoding=\"utf-8\") as input_file:\n regexp = re.compile(r\"(?!.*\\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\\b)(?<=\\s)(open)\\((.*)\\)\")\n input_text = input_file.read()\n match = regexp.search(input_text)\n\n return match", "title": "" }, { "docid": "42f3afd00d21eab405b44559b79c6b8e", "score": "0.48166272", "text": "def test_grep_no_matches(self):\n test_file = FileAsObj()\n test_file.contents = TESTCONTENTS.split('\\n')\n result = test_file.egrep('substring_not_found')\n self.assertFalse(result)", "title": "" }, { "docid": "113fcc5b860d83c343965c5996506297", "score": "0.481129", "text": "def main(path):\n\tif os.path.exists(path):\n\t\tspaces, tabs, lines = parse_file(path)\n\t\tprint \"Spaces %d tabs %d. lines %d\" % (spaces, tabs, lines)\n\t\treturn True\n\telse:\n\t\treturn False", "title": "" }, { "docid": "5421aa7eb303dee02a75553308d281ad", "score": "0.47993457", "text": "def test_missing_bom_error_for_non_ascii_cpp(tmp_path: Path) -> None:\n source = \"int ŢōŶ; \"\n filename = tmp_path.joinpath(\"a.cpp\")\n filename.write_text(source, encoding=\"UTF-8\")\n output = run([str(filename)], expected_exit=1)\n output.fnmatch_lines(\n str(filename) + \": ERROR Not a valid UTF-8 encoded file, since it contains non-ASCII*\"\n )\n output.fnmatch_lines(\"*== ERRORS ==*\")\n output.fnmatch_lines(\n str(filename) + \": ERROR Not a valid UTF-8 encoded file, since it contains non-ASCII*\"\n )", "title": "" }, { "docid": "82d3cbcc47490a1ec5d5f15a1a2e1c5d", "score": "0.47967812", "text": "def showFoundAuthors (findstr):\n\titems = batchFilterWosFile (lambda item: 1)\n\tprint \"\\n%d authors found\" % len(items)\n\tfor item in items:\n\t\tfor author in item['authors'].split(';'):\n\t\t\t# print \"%s\" % str(author).strip()\n\t\t\tif author.lower().find (findstr.lower()) != -1:\n\t\t\t\tprint \"%s\" % str(author).strip()", "title": "" }, { "docid": "1feadf1da01961bbea983a0d59483579", "score": "0.47933117", "text": "def test_uni_print():", "title": "" }, { "docid": "f59d3ff1aad38d3b5f1324e775eb03fd", "score": "0.47865704", "text": "def wordSearch():\n\n wordFile = \"/usr/share/dict/words\"\n\n if os.path.isfile(wordFile):\n wordData = open(wordFile)\n wordList = {}\n\n for word in wordData.read().split('\\n'):\n if word.isalpha():\n wordList[word.lower()] = None\n wordData.close()\n else:\n print(\"Word file not found.\")\n\n return wordList", "title": "" }, { "docid": "d5029db59dae1d46c79748306cd575a6", "score": "0.47824818", "text": "def findtext(self, path, default=None, namespaces=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "2d94af76535201000e48822eb86f0773", "score": "0.47750795", "text": "def search_logs(string, test_data):\n logs_path = test_data.logs_dir\n results = []\n for file in os.listdir(logs_path):\n log = codecs.open(make_path(logs_path, file), \"r\", \"utf_8\")\n try:\n for line in log:\n if string in line:\n results.append(line)\n log.close()\n except UnicodeDecodeError:\n pass\n return results", "title": "" }, { "docid": "e243a5d298c146719dfba3abebc311ff", "score": "0.47690302", "text": "def read_lines(filepath, all_letters):\n lines = open(filepath, encoding='utf-8').read().strip().split('\\n')\n\n return [unicode2ascii(line, all_letters) for line in lines]", "title": "" }, { "docid": "a4b5386b0cf773059c299d161f5fae82", "score": "0.4768786", "text": "def filter_line(line):\n\n # filters out non-printable characters\n line = filter(lambda x: x in string.printable, line)\n return line", "title": "" }, { "docid": "1889f6a9ef7b96a4bccac0d3ac13ca37", "score": "0.47665122", "text": "def treat_file(filename, outfp):\n try:\n fp = open(filename, 'r')\n except OSError:\n sys.stderr.write('Cannot open %s\\n'%filename)\n return\n charno = 0\n lineno = 0\n tags = []\n size = 0\n while 1:\n line = fp.readline()\n if not line:\n break\n lineno = lineno + 1\n m = matcher.search(line)\n if m:\n tag = m.group(0) + '\\177%d,%d\\n' % (lineno, charno)\n tags.append(tag)\n size = size + len(tag)\n charno = charno + len(line)\n outfp.write('\\f\\n%s,%d\\n' % (filename,size))\n for tag in tags:\n outfp.write(tag)", "title": "" }, { "docid": "ca40a0cf37beab69d9675deeaebcb2e8", "score": "0.4766042", "text": "def file_read():\n try:\n with args.inputfile as source:\n for line in source:\n register.append(str(line).strip())\n\n except IOError:\n print(\"File '{}' is not accessible, exit.\".format(sys.argv[1]))\n sys.exit()", "title": "" }, { "docid": "ca69fba4081910815885b13c407f5152", "score": "0.47650224", "text": "def regex_search(fi):\r\n if fi.getDocumentCount() == 0:\r\n return\r\n\r\n length_sel = fi.getSelectionLength()\r\n offset = fi.getSelectionOffset()\r\n if length_sel > 0:\r\n length = length_sel\r\n buf = fi.getSelection()\r\n else:\r\n buf = fi.getDocument()\r\n length = fi.getLength()\r\n offset = 0\r\n\r\n if buf == \"\":\r\n return\r\n\r\n keyword = fi.showSimpleDialog(\"Regular expression (please see https://docs.python.org/2.7/library/re.html for syntax):\")\r\n\r\n time_start = time.time()\r\n\r\n if len(keyword) > 0:\r\n if length_sel > 0:\r\n print \"Search from offset %s to %s with keyword '%s'\\r\\n\" % (hex(offset), hex(offset + length - 1), keyword)\r\n else:\r\n print \"Search in the whole file with keyword '%s'\\r\\n\" % keyword\r\n\r\n try:\r\n re.compile(keyword)\r\n except:\r\n print \"Error: invalid regular expression\"\r\n return\r\n\r\n num_hits = 0\r\n match = re.finditer(keyword, buf)\r\n bookmark_start = []\r\n bookmark_end = []\r\n for m in match:\r\n if is_printable(m.group()):\r\n print \"Offset: 0x%x Search hit: %s\" % (offset + m.start(), re.sub(\"[\\r\\n\\v\\f]\", \"\", m.group()))\r\n else:\r\n print \"Offset: 0x%x Search hit: %s (hex)\" % (offset + m.start(), binascii.hexlify(m.group()))\r\n if num_hits > 0 and offset + m.start() == bookmark_end[-1]:\r\n bookmark_end[-1] = offset + m.end()\r\n else:\r\n bookmark_start.append(offset + m.start())\r\n bookmark_end.append(offset + m.end())\r\n num_hits += 1\r\n\r\n print \"\\r\\nElapsed time (search): %f (sec)\" % (time.time() - time_start)\r\n time_start = time.time()\r\n\r\n for i in range(0, len(bookmark_start)):\r\n fi.setBookmark(bookmark_start[i], bookmark_end[i] - bookmark_start[i], hex(bookmark_start[i]), \"#aaffaa\")\r\n\r\n if num_hits == 1:\r\n print \"Added a bookmark to the search hit.\"\r\n elif num_hits > 1:\r\n print \"Added bookmarks to the search hits.\"\r\n\r\n print \"Elapsed time (bookmark): %f (sec)\" % (time.time() - time_start)", "title": "" }, { "docid": "80b578b6f19537e3ac6c7eaa44569dd0", "score": "0.47573465", "text": "def print_file_diagnostics(filename):\n summary_template = \"\"\"\nFilename: {filename}\nChars: {nchars}\nLetters: {nletters}\nUppercase: {upper}\nLowercase: {lower}\n\"\"\"[1:-1]\n print(summary_template.format(filename=filename, \n nchars=diagnostics.n_chars(filename),\n nletters=diagnostics.n_letters(filename),\n upper=diagnostics.n_uppercase(filename),\n lower=diagnostics.n_lowercase(filename)))", "title": "" }, { "docid": "22298501c196725a19fe7decbd3d1195", "score": "0.4756045", "text": "def main():\n\n WORDSFILE = \"alice.txt\"\n\n \n ##\n ## The main loop\n while True:\n\n # Open the file\n try:\n f = open(WORDSFILE, \"rU\")\n except:\n print \"Could not open `%s'\" % WORDSFILE\n exit()\n\n # Ask for two letters\n print\n print\n le = str(raw_input(\"Give a few letters (`!' to quit): \"))\n\n if (le == \"!\"):\n break\n\n if (len(le) == 0):\n print\n print \"Need a few letters\"\n print\n continue\n\n # Now scan all words\n list = []\n\n for l in f:\n # find a list of words\n res = re.findall(\"\\s%s\\w*\" % le, l)\n\n for w in res: # for every word found\n if (not(w in list)):\n list.append(w)\n \n f.close()\n \n if (len(list) == 0):\n print \"No words starting from `%s' found\" % le\n else:\n print\n print \"----------------------------------------------\"\n print \"These are the words (%d) that start with `%s':\" \\\n % ( len(list), le )\n print \"----------------------------------------------\"\n print list\n print\n\n ## Ending the program\n print\n print\n print \"Bye!\"\n print\n print", "title": "" }, { "docid": "bdea474738ffd1013e4456d1e658665d", "score": "0.47553942", "text": "def search_term_in_question_file(question, pattern=r\"\\w+_\\d{8}\"):\n terms = []\n with open(question, \"r\") as question_file:\n for line in question_file.readlines():\n terms.extend(re.findall(pattern, line))\n return terms", "title": "" }, { "docid": "39c929c3037b8f9c106ba67d581b5804", "score": "0.47509122", "text": "def process_file(file):\n\n ins = FileCommandInterface()\n while True:\n line = file.readline()\n if not line:\n break\n line = str(line.decode(\"utf-8\").strip())\n ins.register(line)\n file.close()\n results = ins.get_output()\n if results:\n for name, value in results.items():\n click.echo(f\"{name}: {value}\")", "title": "" }, { "docid": "1b5e247e5841b7d539da442453852fda", "score": "0.47505522", "text": "def read_file(filename=\"\"):\n with open(filename, encoding=\"utf-8\") as Myfile:\n for line in Myfile:\n print(line, end='')", "title": "" }, { "docid": "bab8d3782d0230727a5eda40ac1a9868", "score": "0.47500336", "text": "def _file_contains(self, file_path, target_strings):\n for target_string in target_strings:\n if target_string in open(file_path).read():\n return True\n return False", "title": "" }, { "docid": "d52906fd115e8a3ec9f3c2b761532e56", "score": "0.47491768", "text": "def parseSearchResults( searchResultsFile ):\n searchResultText = []\n for lines in open( searchResultsFile, 'r').readlines():\n if not lines[0] == '%':\n searchResultText.append(lines.rstrip())\n return searchResultText", "title": "" }, { "docid": "5bd25de888e18f9df25eca4ea0fa0b5b", "score": "0.4748586", "text": "def search_log_set(type, string, test_data):\n logs_path = test_data.logs_dir\n results = []\n for file in os.listdir(logs_path):\n if type in file:\n log = codecs.open(make_path(logs_path, file), \"r\", \"utf_8\")\n try:\n for line in log:\n if string in line:\n results.append(line)\n log.close()\n except UnicodeDecodeError:\n pass\n return results", "title": "" }, { "docid": "ee639d2c808456f307d8febd704bb0b6", "score": "0.47438487", "text": "def test_readlines_from_file_with_bad_encoding2(self):\n # This causes a warning on Python 3.\n with warnings.catch_warnings(record=True):\n self.assertTrue(autopep8.readlines_from_file(\n os.path.join(ROOT_DIR, 'test', 'bad_encoding2.py')))", "title": "" }, { "docid": "a16ffc912a284f04ce156b9e02ba1b20", "score": "0.4740126", "text": "def _find_text_in_file(filename: str, start_prompt: str, end_prompt: str) -> Tuple[str, int, int, List[str]]:\n with open(filename, \"r\", encoding=\"utf-8\", newline=\"\\n\") as f:\n lines = f.readlines()\n # Find the start prompt.\n start_index = 0\n while not lines[start_index].startswith(start_prompt):\n start_index += 1\n start_index += 1\n\n end_index = start_index\n while not lines[end_index].startswith(end_prompt):\n end_index += 1\n end_index -= 1\n\n while len(lines[start_index]) <= 1:\n start_index += 1\n while len(lines[end_index]) <= 1:\n end_index -= 1\n end_index += 1\n return \"\".join(lines[start_index:end_index]), start_index, end_index, lines", "title": "" }, { "docid": "4bd3cfb14e38c1f0b8f65d896e78ea1d", "score": "0.4717685", "text": "def get_pinyins(vocab_file):\n vocab = collections.OrderedDict()\n with open(vocab_file, \"r\", encoding=\"utf-8\") as reader:\n tokens = reader.readlines()\n for index, token in enumerate(tokens):\n token = token.rstrip(\"\\n\")\n vocab[token] = index\n \n pinyins=[]\n\n for token in vocab.keys():\n for char in token:\n if is_chinese_char(ord(char)) and any(x in pinyin(char,style=Style.NORMAL)[0][0] for x in 'aoeiuvn'):\n pinyins.append(pinyin(char,style=Style.NORMAL)[0][0])\n pinyins=sorted({*pinyins})\n return pinyins", "title": "" }, { "docid": "50a7c6aa120af63b8e7ff24db2970f5d", "score": "0.4714348", "text": "def listJsFilesWithChinese(rootpath):\n matches = []\n print(\"Enumerating code files with Chinese strings ...\")\n for filepath in glob.glob(os.path.join(rootpath, '*.js')):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n if containsChineseCharacters(f.read()):\n matches.append(filepath)\n print(\"Found \"+str(len(matches)))\n return matches", "title": "" }, { "docid": "a88054ba0dd81eebbb1ae7c624ab2f35", "score": "0.4714035", "text": "def search(text):", "title": "" }, { "docid": "74e0e5b1583c3c31515c4a230602b49f", "score": "0.47137478", "text": "def read_file(filename=\"\"):\n with open(filename, encoding=\"utf-8\") as MyFile:\n for line in MyFile:\n print(line, end=\"\")", "title": "" }, { "docid": "ee6afa73e131f3492d8f26742de89554", "score": "0.47084865", "text": "def get_words(path: str, letters: List[str]) -> List[str]:\n good_words = []\n main_letter = letters[4]\n with open(path, \"r\", encoding=\"utf-8\") as dictionary:\n lines = dictionary.readlines()\n for i in range(3, len(lines)):\n lines[i] = lines[i].lower().strip()\n if (main_letter in lines[i]) and len(lines[i]) >= 4:\n check = 1\n for letterl in lines[i]:\n if (letterl not in letters) or lines[i].count(letterl) > letters.count(letterl):\n check = 0\n if check == 1:\n good_words.append(lines[i])\n return good_words", "title": "" }, { "docid": "6cc7f4af6d07244ac5a8eefc54b1f12f", "score": "0.4701635", "text": "def knipt(alpaca_seq):\n try:\n bestand = open(\"enzymen.txt\")\n for line in bestand:\n naam, seq = line.split(\" \")\n seq = seq.strip().replace(\"^\", \"\")\n if seq in alpaca_seq:\n print(naam, \"knipt in sequentie\")\n except IOError:\n print(\"Het bestand kan niet geopent worden\")", "title": "" }, { "docid": "84e567c86af9205dde0ea69c60498607", "score": "0.46931186", "text": "def test_valid_file_input(self): \r\n input_file = 'animal.pzl'\r\n argv = ['', input_file]\r\n\r\n (chars_array, words) = WordSearch.process_input(argv)\r\n\r\n expected_array = ['CIRN', 'ADOG', 'TCIS', 'KCOW']\r\n self.assertListEqual(chars_array, expected_array)\r\n expected_words = ['CAT', 'DOG', 'COW']\r\n self.assertListEqual(words, expected_words)", "title": "" }, { "docid": "3584ab52bc7ae083382747ca17c6fca4", "score": "0.4683109", "text": "def test_egrep_char_list(self):\n test_file = FileAsObj()\n test_file.add(TESTCONTENTS)\n subject = 'h[o0]stname'\n result = test_file.egrep(subject)\n self.assertTrue(result)\n self.assertIsInstance(result, list)", "title": "" }, { "docid": "d6de06f33651f173b86e0c8dd4a1b5ae", "score": "0.4679841", "text": "def clean_file(path):\n letters = list(string.ascii_lowercase)\n cleaned_words = [] \n\n with open(path, 'r') as in_f:\n words = ('').join(in_f.readlines()).split()\n for w in words:\n if not [l for l in letters if l in w]:\n cleaned_words.append(w)\n\n cleaned_file = os.path.join('tmp', path)\n with open(cleaned_file, 'w', encoding='utf8') as out_f:\n for w in cleaned_words:\n out_f.write(w + '\\n')\n\n return cleaned_file", "title": "" }, { "docid": "74bccb535400c793bf96e4ea426ed7b4", "score": "0.4678353", "text": "def test_include_grep(text, expected, tmpdir):\r\n src = tmpdir.join('include_grep')\r\n src.write(text)\r\n assert [str(x) for x in Include.grep(str(src))] == expected", "title": "" }, { "docid": "2ba4dba378cb02351b734807506edce9", "score": "0.46767014", "text": "def checkRegex(regex, textFileContents):\n regexMatch = regex.findall(textFileContents, re.I)\n print(regexMatch)", "title": "" }, { "docid": "80728f848bde27c1a70ce26d0eb132f4", "score": "0.46760952", "text": "def file_contains(filepath, searchtext, lines=15, reverse=True):\n with open(filepath) as f:\n data = f.readlines()\n if reverse:\n data = reversed(data)\n for lineno, line in enumerate(data):\n if searchtext in line and lineno < lines:\n logger._log_to_console_and_log_file('The entry \"{0}\" exists in log file'.format(searchtext))\n return True\n return False", "title": "" }, { "docid": "52b659fccfe254ec373edb5e933f4969", "score": "0.46712622", "text": "def process_file(filename, skip_header):\n hist = {}\n\n #local = os.getcwd()\n #fp = open(local + '/' + filename, \"r\")\n fp = open(filename)\n #words = fin.read()\n\n if skip_header:\n skip_gutenberg_header(fp)\n\n for line in fp:\n if line.startswith('*** END OF THIS'):\n break\n\n process_line(line, hist)\n\n return hist", "title": "" }, { "docid": "2c96da42c64e8a37db90d341153dbdc0", "score": "0.46687537", "text": "def print_info(fn):\n info = [0]*3 # lines, words, chars\n with open(fn, 'r') as infile:\n for line in infile:\n info[0] += 1\n words = line.split()\n info[1] += len(words)\n string = ''.join(words)\n info[2] += len(string)\n print(' '.join([str(num) for num in info]) + ' ' + fn)", "title": "" }, { "docid": "f5ebb22c4a7768869c13b4372500aa82", "score": "0.4666645", "text": "def ide_find_file():", "title": "" } ]
9024498c7a6b12c88b2e7355fd6b45a0
cast(itkLightObject obj) > itkMirrorPadImageFilterIUS2IUS2
[ { "docid": "f97e644df5f9ca2980068d6c4d7199f8", "score": "0.8566191", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUS2IUS2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUS2IUS2_cast(obj)", "title": "" } ]
[ { "docid": "6c84b2c32ac5fcd58068eb1711acd375", "score": "0.85065377", "text": "def itkMirrorPadImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUS2IUS2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "cc1660c6ece4943ccd57656aa43e2666", "score": "0.8072607", "text": "def cast(obj: 'itkLightObject') -> \"itkOrImageFilterIUS2IUS2IUS2 *\":\n return _itkOrImageFilterPython.itkOrImageFilterIUS2IUS2IUS2_cast(obj)", "title": "" }, { "docid": "c41465f75f2fade1c01b4e2aeb85e66b", "score": "0.8068315", "text": "def itkOrImageFilterIUS2IUS2IUS2_cast(obj: 'itkLightObject') -> \"itkOrImageFilterIUS2IUS2IUS2 *\":\n return _itkOrImageFilterPython.itkOrImageFilterIUS2IUS2IUS2_cast(obj)", "title": "" }, { "docid": "f37dddea35e01b7b43b2b391b30815d6", "score": "0.8002967", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2IUS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "784c7a6596ed8eaf3b65e14bd810cbbe", "score": "0.7974724", "text": "def cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIUS2IUS2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "110f515586101ff8096085fb83cbf0e8", "score": "0.79170597", "text": "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueDifferenceImageFilterIUS2IUS2IUS2 *\":\n return _itkConstrainedValueDifferenceImageFilterPython.itkConstrainedValueDifferenceImageFilterIUS2IUS2IUS2_cast(obj)", "title": "" }, { "docid": "d0077ec8b45097cc3f6f91721f70d3ba", "score": "0.7907057", "text": "def cast(obj: 'itkLightObject') -> \"itkLog10ImageFilterIUS2IUS2 *\":\n return _itkLog10ImageFilterPython.itkLog10ImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "cdc2abc41d19295bb5ae5c392028d248", "score": "0.7897151", "text": "def itkSimilarityIndexImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIUS2IUS2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "d4ca98bfababa87183b0edbe7ce3e5ac", "score": "0.7832439", "text": "def itkHardConnectedComponentImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2IUS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "d97192d6e9767f2dd5990782f1bfebd7", "score": "0.78222173", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUC2IUC2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "5835a0cc383c52ff04809c504bb6c891", "score": "0.78203297", "text": "def cast(obj: 'itkLightObject') -> \"itkMattesMutualInformationImageToImageMetricIUS2IUS2 *\":\n return _itkMattesMutualInformationImageToImageMetricPython.itkMattesMutualInformationImageToImageMetricIUS2IUS2_cast(obj)", "title": "" }, { "docid": "cc5176d0bff5c881a2cf87bb605f6e1b", "score": "0.7817088", "text": "def itkLog10ImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkLog10ImageFilterIUS2IUS2 *\":\n return _itkLog10ImageFilterPython.itkLog10ImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "56b1592cd68bc993978d2fa15fa6b29a", "score": "0.78031105", "text": "def cast(obj: 'itkLightObject') -> \"itkMultiResolutionPyramidImageFilterIUS2IUS2 *\":\n return _itkMultiResolutionPyramidImageFilterPython.itkMultiResolutionPyramidImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "d12827417d1b77b6fb56162ef67495db", "score": "0.77923274", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIF2IF2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "99171571dfcf9d29ddc27cc8c640d1a2", "score": "0.7782287", "text": "def cast(obj: 'itkLightObject') -> \"itkMaximumImageFilterIUS2IUS2IUS2 *\":\n return _itkMaximumImageFilterPython.itkMaximumImageFilterIUS2IUS2IUS2_cast(obj)", "title": "" }, { "docid": "8f65ecf82d5b1c5337e754cf9026a6f1", "score": "0.77781147", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2IUL2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2IUL2_cast(obj)", "title": "" }, { "docid": "d40b78ad6cde7e9e8f6908bc345c19f5", "score": "0.77665335", "text": "def itkVotingBinaryImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkVotingBinaryImageFilterIUS2IUS2 *\":\n return _itkVotingBinaryImageFilterPython.itkVotingBinaryImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "bf826cfd7b5633c8bfdc8fedc664da99", "score": "0.7756983", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryShapeKeepNObjectsImageFilterIUS2 *\":\n return _itkBinaryShapeKeepNObjectsImageFilterPython.itkBinaryShapeKeepNObjectsImageFilterIUS2_cast(obj)", "title": "" }, { "docid": "b47fad72920555e71aed136be388d8c3", "score": "0.7754128", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUC2IUS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUC2IUS2_cast(obj)", "title": "" }, { "docid": "02d3acccb902e2a54b271fb3d0029056", "score": "0.77458566", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2IUC2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2IUC2_cast(obj)", "title": "" }, { "docid": "25a3b69d17b36a112a1c12a069d09453", "score": "0.77391255", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUS3IUS3 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "e81130260982b6c022df31e10bb39ba0", "score": "0.7733113", "text": "def cast(obj: 'itkLightObject') -> \"itkVotingBinaryImageFilterIUS2IUS2 *\":\n return _itkVotingBinaryImageFilterPython.itkVotingBinaryImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "e8d6319b3a1bae95b18cfe769eb754eb", "score": "0.7719945", "text": "def itkMirrorPadImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUC2IUC2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "546e12cbdd60c46b473567740c93a07e", "score": "0.77104723", "text": "def itkConstrainedValueDifferenceImageFilterIUS2IUS2IUS2_cast(obj: 'itkLightObject') -> \"itkConstrainedValueDifferenceImageFilterIUS2IUS2IUS2 *\":\n return _itkConstrainedValueDifferenceImageFilterPython.itkConstrainedValueDifferenceImageFilterIUS2IUS2IUS2_cast(obj)", "title": "" }, { "docid": "35d3d540bacbd52f81a43d8295e2e5d7", "score": "0.770884", "text": "def itkBinaryShapeKeepNObjectsImageFilterIUS2_cast(obj: 'itkLightObject') -> \"itkBinaryShapeKeepNObjectsImageFilterIUS2 *\":\n return _itkBinaryShapeKeepNObjectsImageFilterPython.itkBinaryShapeKeepNObjectsImageFilterIUS2_cast(obj)", "title": "" }, { "docid": "2c36e8a505542272b16271e1d4c82580", "score": "0.76961863", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryDilateImageFilterIUS2IUS2SE2 *\":\n return _itkBinaryDilateImageFilterPython.itkBinaryDilateImageFilterIUS2IUS2SE2_cast(obj)", "title": "" }, { "docid": "971853d038169c287e9b846853698add", "score": "0.76825905", "text": "def cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2IUS2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "ae38506843d454840aa7384586b25b3a", "score": "0.7655719", "text": "def itkMirrorPadImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIF2IF2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "f149fe79d927be44805e4cfffc82127d", "score": "0.7649655", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2ISS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2ISS2_cast(obj)", "title": "" }, { "docid": "74c0be342b642599223c4fa00da31bd6", "score": "0.76162875", "text": "def itkStatisticsRelabelImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2IUS2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "1842058ee39e9b78577c6458b9153f58", "score": "0.7610304", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterISS2ISS2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterISS2ISS2_cast(obj)", "title": "" }, { "docid": "f50f1521542ef33ab78e11a301bddbf5", "score": "0.7605806", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterISS2IUS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterISS2IUS2_cast(obj)", "title": "" }, { "docid": "68f8a24b2d797e8f60309c3800981860", "score": "0.75889367", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF22IVF22 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF22IVF22_cast(obj)", "title": "" }, { "docid": "83c5f505eedcf711e1bb23f60417d6b6", "score": "0.7583324", "text": "def itkHardConnectedComponentImageFilterIUC2IUS2_cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUC2IUS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUC2IUS2_cast(obj)", "title": "" }, { "docid": "89abd75a920b20f367ad4f2c040ba054", "score": "0.7577344", "text": "def itkMattesMutualInformationImageToImageMetricIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkMattesMutualInformationImageToImageMetricIUS2IUS2 *\":\n return _itkMattesMutualInformationImageToImageMetricPython.itkMattesMutualInformationImageToImageMetricIUS2IUS2_cast(obj)", "title": "" }, { "docid": "0ece93b04bb3e956285a8da16c6b92cc", "score": "0.7575149", "text": "def itkMaximumImageFilterIUS2IUS2IUS2_cast(obj: 'itkLightObject') -> \"itkMaximumImageFilterIUS2IUS2IUS2 *\":\n return _itkMaximumImageFilterPython.itkMaximumImageFilterIUS2IUS2IUS2_cast(obj)", "title": "" }, { "docid": "868d3880d51a4b8613e65facb1dc0a7b", "score": "0.7561737", "text": "def itkMultiResolutionPyramidImageFilterIUS2IUS2_cast(obj: 'itkLightObject') -> \"itkMultiResolutionPyramidImageFilterIUS2IUS2 *\":\n return _itkMultiResolutionPyramidImageFilterPython.itkMultiResolutionPyramidImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "e67a565e9c01af85963f4ff819474d62", "score": "0.7534769", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF32IVF32 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF32IVF32_cast(obj)", "title": "" }, { "docid": "621497f8085320f2562322908c773ec2", "score": "0.75341547", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICF2ICF2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICF2ICF2_cast(obj)", "title": "" }, { "docid": "62d8042e914f048420fb810484043c17", "score": "0.7531506", "text": "def itkHardConnectedComponentImageFilterIUS2IUL2_cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2IUL2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2IUL2_cast(obj)", "title": "" }, { "docid": "a74f4566d0d809a29d341c2c4cb34c32", "score": "0.75312936", "text": "def cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2IF2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2IF2_cast(obj)", "title": "" }, { "docid": "de9b30b4f68f9017a754b1ca5ffdcc38", "score": "0.75291044", "text": "def cast(obj: 'itkLightObject') -> \"itkOrImageFilterIUS3IUS3IUS3 *\":\n return _itkOrImageFilterPython.itkOrImageFilterIUS3IUS3IUS3_cast(obj)", "title": "" }, { "docid": "153408bdf1bbd28f5bd78090fea3ec31", "score": "0.7526186", "text": "def itkStatisticsRelabelImageFilterIUS2IF2_cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2IF2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2IF2_cast(obj)", "title": "" }, { "docid": "3d8615d78c68e2725bf45e7cdab16f82", "score": "0.7522421", "text": "def cast(obj: 'itkLightObject') -> \"itkRecursiveMultiResolutionPyramidImageFilterIUS2IUS2 *\":\n return _itkRecursiveMultiResolutionPyramidImageFilterPython.itkRecursiveMultiResolutionPyramidImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "dc142f84dff33a5751c376070ff6682d", "score": "0.7502016", "text": "def itkBinaryDilateImageFilterIUS2IUS2SE2_cast(obj: 'itkLightObject') -> \"itkBinaryDilateImageFilterIUS2IUS2SE2 *\":\n return _itkBinaryDilateImageFilterPython.itkBinaryDilateImageFilterIUS2IUS2SE2_cast(obj)", "title": "" }, { "docid": "61eb45788f87e815f6b6aa0f7ea685c7", "score": "0.749498", "text": "def cast(obj: 'itkLightObject') -> \"itkTanImageFilterIF2IF2 *\":\n return _itkTanImageFilterPython.itkTanImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "89a0c7c4d18098d056ca4ec95e881b13", "score": "0.7490734", "text": "def itkHardConnectedComponentImageFilterISS2IUS2_cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterISS2IUS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterISS2IUS2_cast(obj)", "title": "" }, { "docid": "6d6f94e8d8b90e6972627981cf4cb336", "score": "0.7489492", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIRGBUC2IRGBUC2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIRGBUC2IRGBUC2_cast(obj)", "title": "" }, { "docid": "6e3cfe8e91356d63dc315408807a9c02", "score": "0.7481681", "text": "def cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUC2IUS2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUC2IUS2_cast(obj)", "title": "" }, { "docid": "f22e5a975d1a670a159ca417e9aadb48", "score": "0.74759054", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF42IVF42 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF42IVF42_cast(obj)", "title": "" }, { "docid": "8f05e7c2e629c1857b21b30fbc2f3616", "score": "0.7469185", "text": "def cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIUC2IUC2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "58f753235ca8f33d8f728182b8eefe80", "score": "0.7463273", "text": "def cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIF2IF2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "c22d1f93c169159d55cc0f93c396945c", "score": "0.7459245", "text": "def itkHardConnectedComponentImageFilterIUS2IUC2_cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2IUC2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2IUC2_cast(obj)", "title": "" }, { "docid": "42499b2774eddcb2061da6d327c37d89", "score": "0.74526864", "text": "def cast(obj: 'itkLightObject') -> \"itkApproximateSignedDistanceMapImageFilterIF2IF2 *\":\n return _itkApproximateSignedDistanceMapImageFilterPython.itkApproximateSignedDistanceMapImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "4448c6821f2f7350fa0068839c62b47b", "score": "0.74514365", "text": "def itkTanImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkTanImageFilterIF2IF2 *\":\n return _itkTanImageFilterPython.itkTanImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "43561baeea32d9bb2bcd490b28d2f987", "score": "0.7450657", "text": "def cast(obj: 'itkLightObject') -> \"itkSmoothingRecursiveGaussianImageFilterIUS2IUS2 *\":\n return _itkSmoothingRecursiveGaussianImageFilterPython.itkSmoothingRecursiveGaussianImageFilterIUS2IUS2_cast(obj)", "title": "" }, { "docid": "30c9ce50c85464ab70983d9b00b38659", "score": "0.74442166", "text": "def itkMirrorPadImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterISS2ISS2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterISS2ISS2_cast(obj)", "title": "" }, { "docid": "c0f81ed2e806ca910e4bbfa1b37280e1", "score": "0.74346066", "text": "def cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2IUC2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2IUC2_cast(obj)", "title": "" }, { "docid": "6c32e6bc90bff4a5a2b07021374e0474", "score": "0.74320346", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF23IVF23 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF23IVF23_cast(obj)", "title": "" }, { "docid": "439273a4f19fefdf17dc6020c1b66154", "score": "0.74245685", "text": "def itkMirrorPadImageFilterIUS3IUS3_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUS3IUS3 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "99d9334cb540d5a91fb585bdd653a45e", "score": "0.74163187", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF43IVF43 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF43IVF43_cast(obj)", "title": "" }, { "docid": "69f975ae710ff64d420ac169c5eb4692", "score": "0.7415978", "text": "def itkHardConnectedComponentImageFilterIUS2ISS2_cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS2ISS2 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS2ISS2_cast(obj)", "title": "" }, { "docid": "5ff7cece74d86df2dbd24a11d9be7ceb", "score": "0.73974127", "text": "def cast(obj: 'itkLightObject') -> \"itkConstrainedValueDifferenceImageFilterIUS3IUS3IUS3 *\":\n return _itkConstrainedValueDifferenceImageFilterPython.itkConstrainedValueDifferenceImageFilterIUS3IUS3IUS3_cast(obj)", "title": "" }, { "docid": "61b2c3e95bfd28a7ede7ef9f6b1b13fd", "score": "0.7393072", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryDilateImageFilterIUS3IUS3SE3 *\":\n return _itkBinaryDilateImageFilterPython.itkBinaryDilateImageFilterIUS3IUS3SE3_cast(obj)", "title": "" }, { "docid": "23887498d452ddf9a39a053226249ebe", "score": "0.7382858", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS3IUS3 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "03a6db6d5fc0403da2092b2f265b8e8d", "score": "0.7358753", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF33IVF33 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF33IVF33_cast(obj)", "title": "" }, { "docid": "8d824f235c0cdb2a348264a5bd633057", "score": "0.7338795", "text": "def itkSimilarityIndexImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIF2IF2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIF2IF2_cast(obj)", "title": "" }, { "docid": "19adadb609de64f8f77ffa171cc7882e", "score": "0.7337005", "text": "def itkStatisticsRelabelImageFilterIUC2IUS2_cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUC2IUS2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUC2IUS2_cast(obj)", "title": "" }, { "docid": "d7574b49071df1e7ce7975917c75dde0", "score": "0.73346215", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICVF43ICVF43 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICVF43ICVF43_cast(obj)", "title": "" }, { "docid": "4656cec081ce52875481be67bcfae267", "score": "0.7333657", "text": "def cast(obj: 'itkLightObject') -> \"itkMattesMutualInformationImageToImageMetricIUS3IUS3 *\":\n return _itkMattesMutualInformationImageToImageMetricPython.itkMattesMutualInformationImageToImageMetricIUS3IUS3_cast(obj)", "title": "" }, { "docid": "723f5ec0a8dc24570b6a1c989dfb42cf", "score": "0.73239154", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICVF22ICVF22 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICVF22ICVF22_cast(obj)", "title": "" }, { "docid": "1d7b70273a2cd86a1c0f9998889dc927", "score": "0.73001635", "text": "def cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2ISS2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2ISS2_cast(obj)", "title": "" }, { "docid": "52c8d5e2a4e1696083af7dfbcfa216c5", "score": "0.72964656", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICVF42ICVF42 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICVF42ICVF42_cast(obj)", "title": "" }, { "docid": "dba200127da8e6842d9bd039d167dd91", "score": "0.72955775", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICVF23ICVF23 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICVF23ICVF23_cast(obj)", "title": "" }, { "docid": "571983bf8fb70f078a377a1bf1aba28e", "score": "0.72924536", "text": "def cast(obj: 'itkLightObject') -> \"itkOrImageFilterIUC2IUC2IUC2 *\":\n return _itkOrImageFilterPython.itkOrImageFilterIUC2IUC2IUC2_cast(obj)", "title": "" }, { "docid": "d7b12c759c79b48a10c5f7307bc0928b", "score": "0.7287482", "text": "def cast(obj: 'itkLightObject') -> \"itkGrayscaleFunctionDilateImageFilterIUS2IUS2SE2 *\":\n return _itkGrayscaleFunctionDilateImageFilterPython.itkGrayscaleFunctionDilateImageFilterIUS2IUS2SE2_cast(obj)", "title": "" }, { "docid": "203d85d8ebf8b9cc720acf73a108801c", "score": "0.7283774", "text": "def itkSimilarityIndexImageFilterIUC2IUC2_cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIUC2IUC2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "2f54e5fbcbc30f051cde6e37369c06b9", "score": "0.7282979", "text": "def itkOrImageFilterIUS3IUS3IUS3_cast(obj: 'itkLightObject') -> \"itkOrImageFilterIUS3IUS3IUS3 *\":\n return _itkOrImageFilterPython.itkOrImageFilterIUS3IUS3IUS3_cast(obj)", "title": "" }, { "docid": "190290f89a2586ba1e3912152709ed7b", "score": "0.72817874", "text": "def itkMirrorPadImageFilterIVF32IVF32_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF32IVF32 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF32IVF32_cast(obj)", "title": "" }, { "docid": "1d27c19e8aaf8d51ce2b3c958a1cd882", "score": "0.728107", "text": "def cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterIUS3IUS3 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "7ceee71bb31159670a0d2e5b10b83024", "score": "0.7267332", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryShapeKeepNObjectsImageFilterIUC2 *\":\n return _itkBinaryShapeKeepNObjectsImageFilterPython.itkBinaryShapeKeepNObjectsImageFilterIUC2_cast(obj)", "title": "" }, { "docid": "ac799a60e837154af47c8032353af308", "score": "0.7258054", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIUC3IUC3 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIUC3IUC3_cast(obj)", "title": "" }, { "docid": "ec3f943426084f133c44883aa109b44c", "score": "0.7252526", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS3IUL3 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS3IUL3_cast(obj)", "title": "" }, { "docid": "730f8321febcda94656f441d2ee3be41", "score": "0.7252281", "text": "def cast(obj: 'itkLightObject') -> \"itkScalarToRGBColormapImageFilterIUS2IRGBUC2 *\":\n return _itkScalarToRGBColormapImageFilterPython.itkScalarToRGBColormapImageFilterIUS2IRGBUC2_cast(obj)", "title": "" }, { "docid": "a5cb70ab785e8e5a59fab8680c6eafc7", "score": "0.7251125", "text": "def itkStatisticsRelabelImageFilterIUS2IUC2_cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2IUC2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2IUC2_cast(obj)", "title": "" }, { "docid": "5bf89eaf81f89114bcc60131bfa5b6c6", "score": "0.7250818", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIRGBAUC2IRGBAUC2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIRGBAUC2IRGBAUC2_cast(obj)", "title": "" }, { "docid": "8c99a5fa3babb356c80b94fac418942b", "score": "0.7246496", "text": "def cast(obj: 'itkLightObject') -> \"itkImageToMeshFilterIUS2MD2QEMTD2BBFF *\":\n return _itkQuadEdgeMeshBasePython.itkImageToMeshFilterIUS2MD2QEMTD2BBFF_cast(obj)", "title": "" }, { "docid": "6e03349888166508385a6c393fa8e9ff", "score": "0.7241738", "text": "def itkMirrorPadImageFilterICF2ICF2_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICF2ICF2 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICF2ICF2_cast(obj)", "title": "" }, { "docid": "f691c0970bf665cbc412e3a7133e0525", "score": "0.7238877", "text": "def itkMirrorPadImageFilterIVF22IVF22_cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterIVF22IVF22 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterIVF22IVF22_cast(obj)", "title": "" }, { "docid": "dedde4fe6bd9e7fc6e6dcd7abde3343e", "score": "0.7237588", "text": "def cast(obj: 'itkLightObject') -> \"itkMattesMutualInformationImageToImageMetricIF2IF2 *\":\n return _itkMattesMutualInformationImageToImageMetricPython.itkMattesMutualInformationImageToImageMetricIF2IF2_cast(obj)", "title": "" }, { "docid": "df90b8dc20f44ef27471c93d89af899d", "score": "0.7229271", "text": "def cast(obj: 'itkLightObject') -> \"itkHardConnectedComponentImageFilterIUS3IUC3 *\":\n return _itkHardConnectedComponentImageFilterPython.itkHardConnectedComponentImageFilterIUS3IUC3_cast(obj)", "title": "" }, { "docid": "c9a6af72023d0021d5fda2a60e6569da", "score": "0.72267586", "text": "def itkOrImageFilterIUC2IUC2IUC2_cast(obj: 'itkLightObject') -> \"itkOrImageFilterIUC2IUC2IUC2 *\":\n return _itkOrImageFilterPython.itkOrImageFilterIUC2IUC2IUC2_cast(obj)", "title": "" }, { "docid": "d77840881c38cb146bf3bc3b84c5c135", "score": "0.7218331", "text": "def cast(obj: 'itkLightObject') -> \"itkSimilarityIndexImageFilterISS2ISS2 *\":\n return _itkSimilarityIndexImageFilterPython.itkSimilarityIndexImageFilterISS2ISS2_cast(obj)", "title": "" }, { "docid": "d11ebc5bd0a6adbd09e48a6e65d878ae", "score": "0.7216957", "text": "def cast(obj: 'itkLightObject') -> \"itkMirrorPadImageFilterICVF33ICVF33 *\":\n return _itkMirrorPadImageFilterPython.itkMirrorPadImageFilterICVF33ICVF33_cast(obj)", "title": "" }, { "docid": "5d08ed250399e2744de1dbdd416749c8", "score": "0.7212069", "text": "def cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS3IUS3 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "a7496d2e829e9931797d4cd3db6cadee", "score": "0.7201561", "text": "def cast(obj: 'itkLightObject') -> \"itkLog10ImageFilterIUS3IUS3 *\":\n return _itkLog10ImageFilterPython.itkLog10ImageFilterIUS3IUS3_cast(obj)", "title": "" }, { "docid": "e37575e6ec8e21bbb3e970aa27cffd0f", "score": "0.7200743", "text": "def cast(obj: 'itkLightObject') -> \"itkLog10ImageFilterIUC2IUC2 *\":\n return _itkLog10ImageFilterPython.itkLog10ImageFilterIUC2IUC2_cast(obj)", "title": "" }, { "docid": "78bd4b0a68dd41e3cae031169f141001", "score": "0.71976787", "text": "def cast(obj: 'itkLightObject') -> \"itkBinaryShapeKeepNObjectsImageFilterISS2 *\":\n return _itkBinaryShapeKeepNObjectsImageFilterPython.itkBinaryShapeKeepNObjectsImageFilterISS2_cast(obj)", "title": "" }, { "docid": "8b2659c2582b4bc813a81a37c4a26f10", "score": "0.7192759", "text": "def itkStatisticsRelabelImageFilterIUS2ISS2_cast(obj: 'itkLightObject') -> \"itkStatisticsRelabelImageFilterIUS2ISS2 *\":\n return _itkStatisticsRelabelImageFilterPython.itkStatisticsRelabelImageFilterIUS2ISS2_cast(obj)", "title": "" }, { "docid": "d48d06e316ec40e0aea98bda9b10cf88", "score": "0.71851605", "text": "def cast(obj: 'itkLightObject') -> \"itkMultiResolutionPyramidImageFilterIUS3IUS3 *\":\n return _itkMultiResolutionPyramidImageFilterPython.itkMultiResolutionPyramidImageFilterIUS3IUS3_cast(obj)", "title": "" } ]
c1e62984ba020ecf718c16d15fed7c96
Returns a small dataset of data.
[ { "docid": "5b1a5928c84dcc51b86ea54e013ac8ac", "score": "0.0", "text": "def get_data(path_data, transformation=None):\r\n\r\n # get a list of all the images\r\n img_list = os.listdir(path_data)\r\n\r\n # throw away files that are not in the allowed format (png or jpg)\r\n for img_file in img_list:\r\n if not (img_file.endswith(\".png\") or img_file.endswith(\".jpg\")):\r\n img_list.remove(img_file)\r\n\r\n # fill up data matrix\r\n X = []\r\n X_filenames = []\r\n for i in range(len(img_list)):\r\n img = PIL.Image.open('{}/{}'.format(path_data, img_list[i]))\r\n if len(X) == 0:\r\n X = [img]\r\n else:\r\n X.append(img)\r\n X_filenames.append(''.join(img_list[i].split('.')[:-1]))\r\n\r\n # cast to image values that can be displayed directly with plt.imshow()\r\n X_im = [np.array(img) for img in X]\r\n\r\n # preprocess\r\n if transformation == None:\r\n transformation = transforms.Compose([\r\n transforms.ToTensor()\r\n ])\r\n\r\n X_pre = []\r\n for img in X:\r\n X_pre.append(transformation(img).unsqueeze_(0))\r\n X = X_pre\r\n\r\n return X, X_im, X_filenames", "title": "" } ]
[ { "docid": "6b8ac51f166d56feae905fb66d14e31b", "score": "0.7209428", "text": "def dataset(self) -> global___Dataset:", "title": "" }, { "docid": "8bacfe130d4b5ad546bc19587b311be3", "score": "0.70424104", "text": "def dataset():\n return obsplus.load_dataset(DATASET_NAME)", "title": "" }, { "docid": "aa7a04d14220fdab9fd01612a56f9c50", "score": "0.69821626", "text": "def createDataset():", "title": "" }, { "docid": "11811897817555149bf7916aa643ff11", "score": "0.68287086", "text": "def make_dataset():\r\n \r\n #print ts[0:4]\r\n data = SupervisedDataSet(INPUTS,1)\r\n \r\n normalize(ts);\r\n normalize(testdata);\r\n \r\n addValues(data, ts)\r\n \r\n \r\n return data", "title": "" }, { "docid": "f244754a372655e47772cb55f1cf7721", "score": "0.68265045", "text": "def get_train_dataset(self, *args, **kwargs) -> Dataset:", "title": "" }, { "docid": "38265b55ae698e987f702e959df8f2df", "score": "0.6818847", "text": "def generate_data(self):\n\n dataset = []\n for i in range(4):\n x = random.random() * 5\n y = random.random() * 5\n z = random.random() * 5\n datapoint = [scipy.array((x+random.normalvariate(0,0.1), y+random.normalvariate(0,0.1), z+random.normalvariate(0,0.1))) for j in range(100)]\n dataset += datapoint\n random.shuffle(dataset)\n\n return dataset", "title": "" }, { "docid": "f4fe158c47550fd954f9d459f0d15e12", "score": "0.662156", "text": "def create_dataset(function, data_size):\n xs = np.random.rand(data_size)\n ys = function(xs)\n return np.array([[x] for x in xs]), np.array([[y] for y in ys])", "title": "" }, { "docid": "9d9685f443a0719d4a7ac4ca9d0f3186", "score": "0.65977925", "text": "def train(self):\n return self.make_dataset(self.train_df, shuffle=self.shuffle)", "title": "" }, { "docid": "4a2985f9f611b2a3070665134361784f", "score": "0.6533691", "text": "def gen_single_dataset_data(dataset=DEFAULT_DATASET, test_ratio=TEST_RATIO, train_ratio=TRAIN_RATIO, train_on_excess=TRAIN_ON_EXCESS):\n datasets = (dataset,)\n base_index = index_by_label_and_name(dataset)\n\n test_label_name_index, train_label_name_index = get_split_indexes([\n test_ratio,\n train_ratio,\n ], base_index)\n\n train_paths, train_labels = deindex(train_label_name_index)\n\n database_paths, database_labels, query_paths, query_labels = index_data(\n test_label_name_index,\n excess_paths=train_paths if train_on_excess else None,\n excess_labels=train_labels if train_on_excess else None,\n )\n\n return locals()", "title": "" }, { "docid": "ada19afe64d266bd7ea216b8c5552ce9", "score": "0.6525249", "text": "def get_dataset():\n dataset = sets.Ocr()\n dataset = sets.OneHot(dataset.target, depth=2)(dataset, columns=['target'])\n dataset['data'] = dataset.data.reshape(\n dataset.data.shape[:-2] + (-1,)).astype(float)\n train, test = sets.Split(0.66)(dataset)\n return train, test", "title": "" }, { "docid": "bd8a62543779a46a3bca0653a5d3742a", "score": "0.65160865", "text": "def load_sampled_dataset():\n if arguments.Config[\"data\"][\"dataset\"] == \"CIFAR_SAMPLE\":\n X, labels, runnerup = load_cifar_sample_data(normalized=True, MODEL=arguments.Config['model']['name'])\n data_max = torch.tensor(preprocess_cifar(1.)).reshape(1,-1,1,1)\n data_min = torch.tensor(preprocess_cifar(0.)).reshape(1,-1,1,1)\n eps_temp = 2./255.\n eps_temp = torch.tensor(preprocess_cifar(eps_temp, perturbation=True)).reshape(1,-1,1,1)\n elif arguments.Config[\"data\"][\"dataset\"] == \"MNIST_SAMPLE\":\n X, labels, runnerup = load_mnist_sample_data(MODEL=arguments.Config['model']['name'])\n data_max = torch.tensor(1.).reshape(1,-1,1,1)\n data_min = torch.tensor(0.).reshape(1,-1,1,1)\n eps_temp = 0.3\n eps_temp = torch.tensor(eps_temp).reshape(1,-1,1,1)\n return X, labels, runnerup, data_max, data_min, eps_temp", "title": "" }, { "docid": "87ad63fd91aec4fe4cd1a42e8d8dc9aa", "score": "0.6484327", "text": "def create_dataset(data, target): \n ds = TensorDataset(\n torch.tensor(data).float(), \n torch.tensor(target).float())\n \n return ds", "title": "" }, { "docid": "76e983bcf236e751b6f15f2ddfe82623", "score": "0.64788914", "text": "def _get_dataset(scale):\n data = scipy.io.loadmat(\"data/arrhythmia.mat\")\n \n full_x_data = data[\"X\"]\n full_y_data = data['y']\n \n x_train, x_test, \\\n y_train, y_test = train_test_split(full_x_data,\n full_y_data,\n test_size=0.5,\n random_state=42)\n\n y_train = y_train.flatten().astype(int)\n y_test = y_test.flatten().astype(int)\n\n if scale:\n print(\"Scaling dataset\")\n scaler = MinMaxScaler()\n scaler.fit(x_train)\n x_train = scaler.transform(x_train)\n x_test = scaler.transform(x_test)\n\n \n dataset = {}\n dataset['x_train'] = x_train.astype(np.float32)\n dataset['y_train'] = y_train.astype(np.float32)\n dataset['x_test'] = x_test.astype(np.float32)\n dataset['y_test'] = y_test.astype(np.float32)\n\n return dataset", "title": "" }, { "docid": "172099d6dc17c5af03977d076274c380", "score": "0.6457569", "text": "def load():\n data = _get_data()\n names = data.columns.tolist()\n dtype = lzip(names, ['a45', 'a3', 'a40', 'a14'] + ['<f8'] * 54)\n data = lmap(tuple, data.values.tolist())\n dataset = du.Dataset(data=np.array(data, dtype=dtype).view(np.recarray), names=names)\n return dataset", "title": "" }, { "docid": "a8c38c218a7ad49059f86d011e34862f", "score": "0.64130986", "text": "def sample(self: \"Dataset[T]\", n_samples: int) -> T:", "title": "" }, { "docid": "887da3dccd400d0614299a0440f3d82c", "score": "0.64007306", "text": "def load_data_set():", "title": "" }, { "docid": "b3a06519a61e31dd9a27fde9ba3a3e99", "score": "0.63535535", "text": "def get_testing_dataset():\n return get_dataset(\"test.txt\")", "title": "" }, { "docid": "cbefba46a5381909814bc503a7964e60", "score": "0.6342473", "text": "def dataset(self):\n raise NotImplementedError", "title": "" }, { "docid": "616e70f06e58effdae924024cbbb5dc9", "score": "0.6324634", "text": "def from_dataset(self):", "title": "" }, { "docid": "28488f31287534a759711c10ae26662d", "score": "0.6320637", "text": "def dataset():\n return pd.DataFrame(data=[[1, 2, 3], [3, 4, 5]], columns=['A', 'B', 'C'])", "title": "" }, { "docid": "6e716af1a6a4e689958831e3bff56976", "score": "0.6298745", "text": "def get_newsDataset(self):\n self.generate_dataset()\n return self.dataset", "title": "" }, { "docid": "9d0fdcb8364df625d32aa0246463f96d", "score": "0.6294229", "text": "def _open_dataset(self):\n try:\n dataset = xarray.open_mfdataset(self.args.input, chunks={}, data_vars='minimal')\n except ValueError: # Decoding error?\n dataset = xarray.open_mfdataset(self.args.input, chunks={}, data_vars='minimal', decode_cf=False)\n\n if 'time' in dataset.dims.keys():\n dataset = dataset.chunk({'time':1})\n\n return dataset", "title": "" }, { "docid": "dd6043f2d5749a45bb55776f5f19c48e", "score": "0.6281908", "text": "def create_4_part_dataset(raw, fft, wavelets, target): \n ds = TensorDataset(\n torch.tensor(raw).float(), \n torch.tensor(fft).float(), \n torch.tensor(wavelets).float(), \n torch.tensor(target).float())\n \n return ds", "title": "" }, { "docid": "3ab2bb0c64904061d439e4b5043bbadc", "score": "0.6250987", "text": "def generate_data_set(self):\n # Initialize variables\n number_of_positive_labels = int(self.number_of_points*self.ratio)\n number_of_negative_labels = self.number_of_points - number_of_positive_labels\n positive_counter = 0\n negative_counter = 0\n counter = 0\n data_set = []\n labels = []\n\n # Fill up data_set list with random data points\n if self.number_of_points % 2 == 0:\n while counter < self.number_of_points:\n # Generate a random data point\n data_point = self._random_data_point_generator()\n\n # Make a label for the data_point\n label = self._labeler(data_point)\n\n # Check the label and increase counters\n if label == 1 and positive_counter < number_of_positive_labels:\n data_set.append(data_point)\n labels.append(label)\n positive_counter += 1\n counter += 1\n\n elif label == 0 and negative_counter < number_of_negative_labels:\n data_set.append(data_point)\n labels.append(label)\n negative_counter += 1\n counter += 1\n else:\n while counter < self.number_of_points:\n # Generate a random data point\n data_point = self._random_data_point_generator()\n\n # Make a label for the data_point\n label = self._labeler(data_point)\n\n # Check the label and increase counters\n if label == 1 and positive_counter < number_of_positive_labels:\n data_set.append(data_point)\n labels.append(label)\n positive_counter += 1\n counter += 1\n\n elif label == 0 and negative_counter < number_of_negative_labels:\n data_set.append(data_point)\n labels.append(label)\n negative_counter += 1\n counter += 1\n\n return np.array(data_set), np.array(labels)", "title": "" }, { "docid": "d04495e01ceba81636707b14cddb139c", "score": "0.6244427", "text": "def subsample_dataset(self):\n num_examples = self.size\n self.dataset = self.dataset.sample(num_examples).reset_index(drop=True)", "title": "" }, { "docid": "e23162a6afb48691ce3c2618ec2c3d11", "score": "0.62356997", "text": "def makeTrainingSet(data):\r\n trainingData = data[:len(data) // 2] # Double // used for integer division\r\n\r\n return trainingData # Return the data only containing the first half of the records\r", "title": "" }, { "docid": "50ff473604000a9871736847a1d7e7df", "score": "0.62188387", "text": "def full_dataset():\n return TabularDataset.from_path(train_path='tests/data/dummy_tabular/train.csv',\n val_path='tests/data/dummy_tabular/val.csv', sep=',')", "title": "" }, { "docid": "03d951b0fabda5960359ca4a9b13a209", "score": "0.62174684", "text": "def create_uganda_dataset():\n\n # features will be of shape (dataset size, num features)\n features = extract_uganda_features()\n\n # labels will be the same size as the dataset.\n labels = load_satellite_labels()\n\n features, labels = shuffle_data(features, labels)\n\n ## YOUR CODE HERE\n num_val = features.shape[0]//10\n num_train = features.shape[0] - num_val\n data_train = features[:num_train, :]\n data_validation = features[num_train:, :]\n labels_train = labels[:num_train]\n labels_validation = labels[num_train:]\n ## END YOUR CODE\n\n return data_train, labels_train, data_validation, labels_validation", "title": "" }, { "docid": "edf3ec8141d7d85878dfec7510d5ab87", "score": "0.6206444", "text": "def retrieve_full_datasets(host, dataset_ids, n=None):\r\n\r\n panda_datasets = []\r\n for i in (dataset_ids):\r\n\r\n # getting the sample and feature names\r\n samples_names = xena.dataset_samples(host, i, n)\r\n features_names = xena.dataset_field(host, i)\r\n\r\n # retrieving the full dataset\r\n dataset = xena.dataset_fetch(host, i, samples_names, features_names)\r\n\r\n # transposing the data so it is in the \"sample X features\" format\r\n dataset = np.array(dataset)\r\n dataset = dataset.T\r\n print(dataset.shape)\r\n\r\n panda_dataset = pd.DataFrame(data=dataset, # values\r\n index=samples_names, # 1st column as index\r\n columns=features_names) # 1st row as the column names\r\n\r\n panda_datasets.append(panda_dataset)\r\n\r\n return panda_datasets", "title": "" }, { "docid": "9e87e32833f2cf3056fb79d5ad838834", "score": "0.62062645", "text": "def _create_dataset(self, filename):\r\n dataset = tf.data.TFRecordDataset(filename)\r\n \r\n # Map each record\r\n dataset = dataset.map(self._parse_function) \r\n\r\n # shuffle dataset\r\n dataset = dataset.shuffle(buffer_size=1000)\r\n \r\n # batch the dataset\r\n dataset = dataset.batch(self.BATCH_SIZE)\r\n \r\n # repeat indefinitely\r\n dataset = dataset.repeat()\r\n \r\n return dataset", "title": "" }, { "docid": "dd618c4566ea1db72e47f321468d3296", "score": "0.6191369", "text": "def construct_dataset(res_list):\n pass", "title": "" }, { "docid": "f1898763279678250da194267bde7a45", "score": "0.6182244", "text": "def makeTestSet(data):\r\n testData = data[len(data) // 2:] # Double // used for integer division\r\n\r\n return testData # Return the data only containing the second half of the records\r", "title": "" }, { "docid": "bb05f446809cfab0b8163dfff369f119", "score": "0.6178515", "text": "def get_data(self):\n return self.datasets", "title": "" }, { "docid": "88d49a4ed5a1c6a0a7723411e51c1a7d", "score": "0.6144553", "text": "def gen_basic_dataset(size, params):\n\n\tdata = np.zeros(size)\n\tK = params #np.ceil(0.01*size).astype(int)\n\tindx = np.random.choice(size, size=K, replace=False)\n\tdata[indx] = 1\n\n\treturn data", "title": "" }, { "docid": "923516f271aff979a534397e087a9a41", "score": "0.61386836", "text": "def create_test_dataset():\n\n def preprocess(ds):\n def generate_example(i, t):\n del t # Unused.\n features = tf.random.stateless_uniform(shape=[3], seed=(0, i))\n label = (\n tf.expand_dims(\n tf.reduce_sum(features * tf.constant([1.0, 2.0, 3.0])), axis=-1\n )\n + 5.0\n )\n return (features, label)\n\n return ds.map(generate_example).batch(5, drop_remainder=True)\n\n num_examples = 25\n return preprocess(tf.data.Dataset.range(num_examples).enumerate())", "title": "" }, { "docid": "fa903214b99beff146a77a88065f5671", "score": "0.6138213", "text": "def get_data(domain, dataset_identifier, limit=50_000):\n # Unauthenticated client only works with public data sets. Note 'None'\n # in place of application token, and no username or password:\n client = Socrata(domain, None)\n\n\n # Example authenticated client (needed for non-public datasets):\n # client = Socrata(data.kingcounty.gov,\n # MyAppToken,\n # userame=\"user@example.com\",\n # password=\"AFakePassword\")\n\n # First 50,000 results, returned as JSON from API / converted to Python list of\n # dictionaries by sodapy.\n results = client.get(dataset_identifier, limit=limit)\n\n # Convert to pandas DataFrame\n df = pd.DataFrame.from_records(results)\n return df", "title": "" }, { "docid": "b5920b6a2aca7a03904950d6320697f2", "score": "0.61377686", "text": "def load_data_set():\n return pd.read_parquet(\"dataset.parquet\")", "title": "" }, { "docid": "571fc477b4045fb1de651893a3913564", "score": "0.61181784", "text": "def makeDataset(data, epchos_num, batch_size, shuffle_buffer_size=10000, drop_remainder=True):\r\n dataset = tf.data.Dataset.from_tensor_slices(data)\r\n\r\n if shuffle_buffer_size:\r\n dataset = dataset.shuffle(shuffle_buffer_size)\r\n\r\n dataset = dataset.repeat(None)\r\n \r\n if drop_remainder:\r\n tf.logging.info(\"Drop remainder feature is actived in batch dispenser\")\r\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\r\n else:\r\n dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)\r\n\r\n return dataset", "title": "" }, { "docid": "bacc74667441a2b244432475fdee926a", "score": "0.61136943", "text": "def dataset_batch(args, dataset):\n dataset = dataset.shuffle(10000)\n dataset = dataset.batch(args.batch)\n dataset = dataset.repeat()\n return dataset.make_one_shot_iterator().get_next()", "title": "" }, { "docid": "ae28f3d47e95d47e3b245de5c7d21790", "score": "0.6098228", "text": "def iris_dataset() -> Dataset:\n\n # Path to dataset.\n dataset_directory = os.path.join(test_data_path(), \"dataset\")\n dataset_path = os.path.join(dataset_directory, \"iris.csv\")\n dataset = pd.read_csv(dataset_path)\n train, test = train_test_split(dataset, ratio_second=0.30)\n label = \"class\"\n classes = [\"setosa\", \"versicolor\", \"virginica\"]\n\n def clean(ds):\n ds[label] = ds[label].map(classes.index)\n return ds\n\n train = clean(train)\n test = clean(test)\n return prepare_dataset(train, test, label, num_classes=len(classes))", "title": "" }, { "docid": "77cc019ac02db6c81ceb7dc5ed86a078", "score": "0.609125", "text": "def generate_dataset():\n x_batch = np.linspace(-1, 1, 101)\n y_batch = 2*x_batch + np.random.randn(*x_batch.shape)*0.3\n \n return x_batch, y_batch", "title": "" }, { "docid": "dff6753e4102379535d319164ac126d3", "score": "0.60880613", "text": "def get_data():\n mat = loadmat(os.path.join(SHOGUN_DATA_DIR, 'multiclass/usps.mat'))\n Xall = mat['data']\n Yall = np.array(mat['label'].squeeze(), dtype=np.double) - 1\n subset = random.permutation(len(Yall))\n # Xtrain, Ytrain, Xtest, Ytest\n return Xall[:, subset[:500]], Yall[subset[:500]], Xall[:, subset[500:600]], Yall[subset[500:600]]", "title": "" }, { "docid": "57d11161ff44ecbd9b4fb1d5b2e0e7e6", "score": "0.6069891", "text": "def getDataSet():\r\n # Step 1: Generate data by a module\r\n n = 100 # 1st class contains N objects\r\n alpha = 1.5 # 2st class contains alpha*N ones\r\n sig2 = 1 # assume 2nd class has the same variance as the 1st\r\n dist2 = 4\r\n\r\n # later we move this piece of code in a separate file\r\n # [X, y] = loadModelData(N, alpha, sig2, dist2);\r\n n2 = math.floor(alpha * n) # calculate the size of the 2nd class\r\n cls1X = np.random.randn(n, 2) # generate random objects of the 1st class\r\n\r\n # generate a random distance from the center of the 1st class to the center of the 2nd\r\n # https://stackoverflow.com/questions/1721802/what-is-the-equivalent-of-matlabs-repmat-in-numpy\r\n a = np.array([[math.sin(math.pi * random.random()), math.cos(math.pi * random.random())]])\r\n a1 = a * dist2\r\n shiftClass2 = np.kron(np.ones((n2, 1)), a1)\r\n\r\n # generate random objects of the 2nd class\r\n cls2X = sig2 * np.random.randn(n2, 2) + shiftClass2\r\n # combine the objects\r\n X = np.concatenate((cls1X, cls2X), axis=0)\r\n\r\n # assign class labels: 0s and 1s\r\n y = np.concatenate((np.zeros((cls1X.shape[0], 1)), np.ones((cls2X.shape[0], 1))), axis=0)\r\n # end % of module.\r\n return X, y", "title": "" }, { "docid": "b6f4494e39a54995b184df83ed7341c2", "score": "0.6061404", "text": "def get_dataset():\n # Numbers of row per class\n row_per_class = 10\n # Generate rows\n # np.random.randn(row_per_class, 2) = genere des points proches de 0, et l'addition de l'array place dans le bon cadran\n sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])\n sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])\n\n healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])\n healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])\n\n features = np.vstack([sick, sick_2, healthy, healthy_2])\n targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))\n targets2 = np.arange(40)\n print(\"features:\", features)\n print(\"targets:\", targets)\n\n\n targets = targets.reshape(40,)\n\n return features, targets", "title": "" }, { "docid": "6b2807db031b84cbe9ef075874305db5", "score": "0.60488003", "text": "def read(self):\n\t\tdata_sets = pd.read_csv(self.path, nrows=3000)\n\t\treturn data_sets", "title": "" }, { "docid": "160b37bee11a755ed47b4ceb1ba3f48e", "score": "0.6044137", "text": "def get_dataset(dataset_root):\n global POSSIBLE_PHONEME_CODES\n with open(dataset_root / 'all_possible_phoneme_codes.pkl', 'rb') as f:\n POSSIBLE_PHONEME_CODES = pickle.load(f)\n\n dataset = LJSpeech(dataset_root)\n\n NUM_VAL_SAMPLES = 100\n train_dataset = torch.utils.data.Subset(dataset, range(len(dataset))[NUM_VAL_SAMPLES:])\n val_dataset = torch.utils.data.Subset(dataset, range(len(dataset))[:NUM_VAL_SAMPLES])\n\n return train_dataset, val_dataset", "title": "" }, { "docid": "d8f50e4aeec301d69b49c30ecc1f1a08", "score": "0.6013528", "text": "def get_dataset(args, mode='train', preload_data=[None,None]):\n assert mode in ['train', 'test']\n\n label_erosion = 0\n sample_label_size = args.model_output_size\n sample_invalid_thres = args.data_invalid_thres\n augmentor = None\n topt,wopt = -1,-1\n if mode=='train':\n sample_volume_size = args.model_input_size\n if args.data_aug_mode==1:\n augmentor = Compose([Flip(p=1.0, do_ztrans=args.data_aug_ztrans),\n Grayscale(p=0.75),\n MissingParts(p=0.9),\n MissingSection(p=0.5),\n MisAlignment(p=1.0, displacement=16)], \n input_size = args.model_input_size)\n sample_volume_size = augmentor.sample_size\n sample_label_size = sample_volume_size\n elif args.data_aug_mode==2:\n augmentor = Compose([Rotate(p=1.0),\n Rescale(p=0.5),\n Flip(p=1.0, do_ztrans=args.data_aug_ztrans),\n Elastic(alpha=12.0, p=0.75),\n Grayscale(p=0.75),\n MissingParts(p=0.9),\n MissingSection(p=0.5),\n MisAlignment(p=1.0, displacement=16)], \n input_size = args.model_input_size)\n sample_volume_size = augmentor.sample_size\n sample_label_size = sample_volume_size\n label_erosion = args.label_erosion\n sample_stride = (1,1,1)\n topt, wopt = args.target_opt, args.weight_opt\n elif mode=='test':\n sample_stride = args.test_stride\n sample_volume_size = args.model_input_size\n \n # dataset\n if args.do_chunk_tile==1:\n label_json = args.input_path+args.label_name if mode=='train' else ''\n dataset = TileDataset(chunk_num=args.data_chunk_num, chunk_num_ind=args.data_chunk_num_ind, chunk_iter=args.data_chunk_iter, chunk_stride=args.data_chunk_stride,\n volume_json=args.input_path+args.img_name, label_json=label_json,\n sample_volume_size=sample_volume_size, sample_label_size=sample_label_size,\n sample_stride=sample_stride, sample_invalid_thres = sample_invalid_thres,\n augmentor=augmentor, target_opt = topt, weight_opt = wopt, mode = mode, \n label_erosion = label_erosion, pad_size=args.pad_size)\n else:\n if preload_data[0] is None: # load from command line args\n volume, label = _get_input(args, mode=mode)\n else:\n volume, label = preload_data\n dataset = VolumeDataset(volume=volume, label=label, \n sample_volume_size=sample_volume_size, sample_label_size=sample_label_size,\n sample_stride=sample_stride, sample_invalid_thres=sample_invalid_thres, \n augmentor=augmentor, target_opt = topt, weight_opt = wopt, mode = mode)\n\n return dataset", "title": "" }, { "docid": "77fc0bc4e4cc7c67e9a61cc6da496fcd", "score": "0.6006732", "text": "def timeseries_dataset(self):\n raise NotImplementedError()", "title": "" }, { "docid": "5066cede55ac834a55936fba7a936ed7", "score": "0.59951425", "text": "def generate_dataset(num_samples, test_size=0.33):\r\n\r\n # build inputs/targets for sum operation: y[0][0] = x[0][0] + x[0][1]\r\n x = np.array([[random()/2 for _ in range(2)] for _ in range(num_samples)])\r\n y = np.array([[i[0] + i[1]] for i in x])\r\n\r\n # split dataset into test and training sets\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size)\r\n return x_train, x_test, y_train, y_test", "title": "" }, { "docid": "2510c5064a21bc2e2df80151716c14b9", "score": "0.5993288", "text": "def load_dataset():\r\n \r\n try:\r\n df = load_imis_3months()\r\n except:\r\n ValueError(\"Invalid dataset name %s specified in parameters.py\" %parameters.OPTIONS['datasets']['name'])\r\n \r\n # TODO: uncomment if wish to also implement insertion/deletion\r\n # Select 50% of point as initial_dataset I\r\n # n = len(df)\r\n # shuffled = df.sample(n)\r\n # initial_dataset = shuffled.head(math.floor(n/2))\r\n \r\n # # Select 50% of points as extra_dataset E\r\n # extra_dataset = shuffled.tail(math.floor(n/2))\r\n \r\n # # Reorder dataset + reset index\r\n # initial_dataset = initial_dataset.sort_values(by=parameters.OPTIONS['datasets']['labels'][0]).reset_index(drop=True)\r\n # extra_dataset = extra_dataset.sort_values(by=parameters.OPTIONS['datasets']['labels'][0]).reset_index(drop=True)\r\n\r\n initial_dataset = df.sort_values(by=parameters.OPTIONS['datasets']['labels'][0]).reset_index(drop=True)\r\n\r\n return initial_dataset#, extra_dataset\r", "title": "" }, { "docid": "4dd4f07bbfca57c0dd4e362bf90476f2", "score": "0.5992481", "text": "def make_dataset(ampli_factor=1.0, nsubj=10):\n nsubj = 10\n dimx = 60\n dimy = 60\n pos = 2*np.array([[ 6, 7], [10, 10], [15, 10]])\n ampli = ampli_factor*np.array([5, 6, 7])\n sjitter = 1.0\n dataset = simul.surrogate_2d_dataset(nbsubj=nsubj, dimx=dimx, dimy=dimy, \n pos=pos, ampli=ampli, width=5.0, seed=1)\n return dataset", "title": "" }, { "docid": "56890acb1d9885266abc1e316d6192f2", "score": "0.5984578", "text": "def make_dataset(X, y, batchsize):\n n_samples = X.shape[0]\n assert y.shape[0] == n_samples, 'X and y should have the same number of sample X({})!=y({})'.format(X.shape[0], y.shape[0])\n n_train = int(0.6*n_samples)\n n_val = int(0.15*n_samples)+n_train\n\n X_train, X_val, X_test = X[0:n_train], X[n_train:n_val], X[n_val:]\n y_train, y_val, y_test = y[0:n_train], y[n_train:n_val], y[n_val:]\n \n # Note to self : \n # You should still use the incomming datasets as if they were simple dictionaries\n # In order to prevent breaking the old working codes\n dataset = Dataset({\n 'X_train': X_train,\n 'y_train': y_train,\n 'X_val': X_val,\n 'y_val': y_val,\n 'X_test': X_test,\n 'y_test': y_test,\n 'batchsize': batchsize,\n })\n return dataset", "title": "" }, { "docid": "b1a16b101cf6ce76d3acad97521948ba", "score": "0.5976041", "text": "def getRawData():\n app.logger.info(\"Making GET Request against database\")\n sets = DataSet.query.all()\n schema = DataSetSchema(many=True)\n output = schema.dump(sets).data\n return jsonify(output)", "title": "" }, { "docid": "ce3691d51219b34a86e359682d708752", "score": "0.5963223", "text": "def get_dataset(dataset: DatasetEntity, subset: Subset):\n data = dataset.get_subset(subset)\n return data if len(data) > 0 else None", "title": "" }, { "docid": "0b18d0664b7b0731bbe6588de05cddc5", "score": "0.59507847", "text": "def get_dataset():\n\n #Numbers of row per class\n row_per_class = 100\n #Generate rows\n sick = np.random.randn(row_per_class, 2) + np.array([-2, -2])\n sick_2 = np.random.randn(row_per_class, 2) + np.array([2, 2])\n\n healthy = np.random.randn(row_per_class, 2) + np.array([-2, 2])\n healthy_2 = np.random.randn(row_per_class, 2) + np.array([2, -2])\n\n features = np.vstack([sick, sick_2, healthy, healthy_2])\n targets = np.concatenate((np.zeros(row_per_class * 2), np.zeros(row_per_class * 2) + 1))\n\n targets = targets.reshape(-1, 1)\n\n return features, targets", "title": "" }, { "docid": "07485ca2ff69bc329632e22c4524db77", "score": "0.5949108", "text": "def _get_data(self, sample_hdf5_path: str) -> Data:\n if self.dataset is None:\n self.dataset = h5py.File(self.hdf5_file_path, \"r\")\n\n grp = self.dataset[sample_hdf5_path]\n # [...] needed to make a copy of content and avoid closing HDF5.\n # Nota: idx_in_original_cloud SHOULD be np.ndarray, in order to be batched into a list,\n # which serves to keep track of indivual sample sizes in a simpler way for interpolation.\n return Data(\n x=torch.from_numpy(grp[\"x\"][...]),\n pos=torch.from_numpy(grp[\"pos\"][...]),\n y=torch.from_numpy(grp[\"y\"][...]),\n idx_in_original_cloud=grp[\"idx_in_original_cloud\"][...],\n x_features_names=grp[\"x\"].attrs[\"x_features_names\"].tolist(),\n # num_nodes=grp[\"pos\"][...].shape[0], # Not needed - performed under the hood.\n )", "title": "" }, { "docid": "442fec8d881b75eced906ce14749fc87", "score": "0.59416515", "text": "def get_dataset(config):\n\n data_loader = config[\"data_loader\"]\n size = data_loader[\"input_size\"]\n data_transforms = transforms.Compose([\n transforms.CenterCrop(600),\n transforms.Resize((size, size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n data_transforms_idiap = transforms.Compose([\n transforms.Resize((size, size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n data_transforms_depth = transforms.Compose([\n transforms.Resize((size, size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485], std=[0.229])\n ])\n\n if data_loader[\"name\"] == \"UPNAHeadPose\":\n dataset = data_loaders.UpnaHeadPoseTrainTest(\n data_loader[\"config\"], data_transforms)\n train_dataset = dataset.train\n elif data_loader[\"name\"] == \"IDIAP\":\n dataset = data_loaders.IDIAPTrainTest(data_loader[\"config\"], \n data_transforms_idiap)\n train_dataset = dataset.train\n elif data_loader[\"name\"] == \"T_Less\":\n dataset = data_loaders.TLessTrainTest(\n data_loader[\"config\"], data_transforms_idiap)\n train_dataset = dataset.train\n else:\n sys.exit(\"Unknown data loader \" + config['data_loader'][\"name\"] + \".\")\n\n training_size = int(len(train_dataset) * 0.90)\n val_size = len(train_dataset) - training_size\n\n train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [training_size, val_size]) \n return train_dataset, val_dataset", "title": "" }, { "docid": "a039e66f475ab6622c65f576c50dabd8", "score": "0.5940318", "text": "def CreateDataSet():\n data = []\n files = [f for f in os.listdir('.') if os.path.isfile(f) and f[-3:] == 'dat']\n for file in files:\n data.append(CollectData(file))\n \n return data", "title": "" }, { "docid": "d3429612b593375973e04a4b9920a227", "score": "0.59355104", "text": "def dataset(self, *args, **kwargs):\n kwargs['connection'] = self\n return Dataset(*args, **kwargs)", "title": "" }, { "docid": "5257bb99e3c42c5b1069dac5e936c9a2", "score": "0.5930147", "text": "def resize(data,siz = 152): # Resize \n\n data = data[1:]\n setA = np.arange(152)\n idx = random.sample(list(setA),siz) # Amostragem da Data\n dataset = data[idx,:]\n return dataset", "title": "" }, { "docid": "bdd7db54d3f8e54b507ebf37b8dcda73", "score": "0.592156", "text": "def create_empty_dataset(dataset):\n return [()] * len(dataset)", "title": "" }, { "docid": "395b4c8d8d1a547e5cc5c753c90b1b4c", "score": "0.5919796", "text": "def _get_data(self):\n loader = ElementSaver(self.args.save_dir)\n _, data = loader.load_states(self.args.save_name)\n # Randomize data\n np.random.shuffle(data)\n for row in data:\n self.memory.add_sample(tuple(row))", "title": "" }, { "docid": "601e6948f2f99464f40eec1f973c51b0", "score": "0.59181803", "text": "def parallel_test_data():\n return ParallelTestData()", "title": "" }, { "docid": "75477b7e6847df42f2e20704d7782550", "score": "0.5915379", "text": "def _to_dataset(self, data):\r\n\r\n if data == None or data.size == 0:\r\n self.logger.warning('Data corrupt: no valid frames found in data packet')\r\n return None\r\n\r\n X = data\r\n Y = numpy.zeros((1, X.shape[1]))\r\n I = self._estimate_timing(X.shape[1])\r\n\r\n self.logger.debug('Number of samples parsed: %d' % X.shape[1])\r\n return psychic.DataSet(data=X, label=Y, ids=I, feat_lab=self.feat_lab)", "title": "" }, { "docid": "1b284218c9605fa0b5f8a54d9738095e", "score": "0.5913526", "text": "def get_samples(dataset, num_samples, idcs=[]):\n data_loader = get_dataloaders(dataset,\n batch_size=1,\n shuffle=idcs is None)\n\n idcs += random.sample(range(len(data_loader.dataset)), num_samples - len(idcs))\n samples = torch.stack([data_loader.dataset[i][0] for i in idcs], dim=0)\n print(\"Selected idcs: {}\".format(idcs))\n\n return samples", "title": "" }, { "docid": "a81a5edd8364a98e50a468e0bf60abc6", "score": "0.5912666", "text": "def get_training_dataset():\n return get_dataset(\"train.txt\")", "title": "" }, { "docid": "db76614d6b46807a58d7005ac3548f6e", "score": "0.5906186", "text": "def get_dataset(*args):\n dset = StudyList()\n for name in args:\n dset.extend(load_dataset(name))\n return dset", "title": "" }, { "docid": "0e3d7977c2472318a1881f1736121ae1", "score": "0.5901915", "text": "def get_dataset():\n # Canteen data\n canteen = get_extended_canteen_data()\n canteen.index = pd.to_datetime(canteen.index).date\n canteen.index.name = \"date\"\n\n # Define date interval\n earliest_date = canteen.iloc[0].name\n latest_date = canteen.iloc[-1].name\n\n # Holiday data\n holiday = get_holiday_data(earliest_date, latest_date)\n\n # Weather data\n weather = get_weather_data(earliest_date, latest_date)\n\n # Merge weather and holiday through left outer join\n merged = pd.merge(\n weather, canteen, left_index=True, right_index=True, how=\"left\"\n )\n merged = pd.merge(\n merged, holiday, left_index=True, right_index=True, how=\"left\"\n )\n\n merged[\"Canteen\"] = merged.apply(\n lambda row: row[\"Canteen\"] if not row[\"holiday\"] else 0.0, axis=1\n )\n\n map_bool_to_int(merged, \"holiday\")\n map_bool_to_int(merged, \"vacation\")\n map_bool_to_int(merged, \"inneklemt\")\n return merged.dropna()", "title": "" }, { "docid": "11f0edec28daaf25c635f9c01eddf953", "score": "0.5900782", "text": "def createdata(*args):\n res = Data([(k, np.empty(0)) for k in args])\n return res", "title": "" }, { "docid": "49ffb89dca7dc56088fb721407b57ddb", "score": "0.59000343", "text": "def get_dataset(self) -> Dict[str, Optional[Iterable]]:\n return self.dataset", "title": "" }, { "docid": "e2bf936847fad9ecbe32b369675d5043", "score": "0.5895992", "text": "def load_data(self):\n all_data = sklearn.datasets.load_iris()\n data = all_data[\"data\"]\n labels = all_data[\"target\"]\n\n test_size = int(len(data) * 0.1)\n self._train_data, self._train_labels,\\\n self._test_data, self._test_labels = db.split_train_test(data, labels, test_size)\n\n self.shuffle()\n\n return self.data", "title": "" }, { "docid": "6ef2d735192589d55b1dc6cc39b66522", "score": "0.5885437", "text": "def extract(self):\n\n print('log:: Start loading files...')\n\n real = pd.read_csv(os.path.join('data', 'True.csv'))\n fake = pd.read_csv(os.path.join('data', 'Fake.csv'))\n print('log:: Files loaded')\n\n real = self.mining(real)\n fake = self.mining(fake)\n real['real'] = 1\n fake['fake'] = 1\n dataset = pd.concat([real, fake]).fillna(0)\n\n print('log:: Data mined')\n\n dataset.sample(2500).to_csv(os.path.join('data', 'dataset_sample.csv'), encoding='utf8', sep=',', index=True)\n dataset.to_csv(os.path.join('data', 'dataset.csv'), encoding='utf8', sep=',', index=True)\n print('log:: Dataset exported')\n\n return dataset.reset_index(drop=False), dataset.sample(2500).reset_index(drop=False)", "title": "" }, { "docid": "543ab4dafa6ca4955c63b711dcb44349", "score": "0.5874003", "text": "def GenerateNRandTestData(shape=(100, 5)):\n testData = pd.DataFrame(np.random.randn(shape[0], shape[1]))\n return testData", "title": "" }, { "docid": "33fda1e53ae58176c4b4bcb4a6e4fbdf", "score": "0.58710665", "text": "def get_dataset(train, resize = None, class_num = 0, trans = True):\n \n name = 'sketch'\n # image pre-processing\n transform_pipline = []\n \"\"\"\n if resize is not None:\n transform_pipline.append(transforms.Resize(resize))\n \"\"\"\n if train:\n transform_pipline = transform_pipline + [\n transforms.RandomResizedCrop(size = resize),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.ColorJitter(brightness=[0.6,1.4], contrast=[0.6,1.4], saturation=[0.6,1.4], hue=[-.5, .5]),\n transforms.ToTensor(),\n transforms.Normalize(mean=constant.dataset_mean, std=constant.dataset_std),\n Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec'])\n ]\n else:\n transform_pipline = transform_pipline + [\n transforms.Resize(size = resize[0]+32),#transforms.CenterCrop((28)),\n transforms.CenterCrop(resize),\n transforms.ToTensor(),\n transforms.Normalize(mean= constant.dataset_mean, std = constant.dataset_std)\n ]\n\n transform = transforms.Compose(transform_pipline)\n if trans is False:\n transform = None\n # dataset and data loader\n dataset = DataGenerator(\n dataset_name = name,\n train = train,\n transform = transform,\n class_num = class_num\n )\n\n return dataset", "title": "" }, { "docid": "c5980167fc114dc161adaac0f8559b45", "score": "0.5868972", "text": "def create_dataset():\n images, targets = dataset(CAR_DIRECTORY, NONCAR_DIRECTORY)\n features = []\n ntargets = []\n nb_images = len(images)\n for i, img in enumerate(images):\n features.append(extract_features(img))\n ntargets.append(targets[i])\n U.progress(i, nb_images, \"Extract features...\")\n features = np.array(features)\n # Normalize featues\n X_scaler = StandardScaler().fit(features)\n features = X_scaler.transform(features)\n filename = 'x_scaler.pkl'\n # Save the dataset for futur use\n pickle.dump(X_scaler, open(filename, 'wb'))\n np.save(\"features\", features)\n np.save(\"targets\", ntargets)\n targets = ntargets\n return features, targets", "title": "" }, { "docid": "eea4cdcecf007a21d2a3b480baeee2d1", "score": "0.5864416", "text": "def get_val_dataset(self, *args, **kwargs) -> Dataset:", "title": "" }, { "docid": "acd61aa5aa3c6430e0ce8f84aa70cb2c", "score": "0.5861222", "text": "def get_data():\n no_features = 50\n redundant_features = int(0.1*no_features)\n informative_features = int(0.6*no_features)\n repeated_features = int(0.1*no_features)\n x,y = make_classification(n_samples=500,n_features=no_features,flip_y=0.03,\\\n n_informative = informative_features, n_redundant = redundant_features \\\n ,n_repeated = repeated_features,random_state=7)\n return x,y", "title": "" }, { "docid": "5cd38be2853b08bb01c3628b6ad26cea", "score": "0.5855134", "text": "def full_example_dataset():\n\n np.random.seed(500)\n\n target = np.random.randint(low=0, high=2, size=100)\n x1 = target.copy()\n x2 = target.copy()\n\n x1[2] = 2\n x1[9:12] = 2\n x1[15:20] = 0\n x1[33] = 2\n x1[38:44] = 0\n x2[4:7] = 1\n x2[9:12] = 1\n x2[15:17] = 1\n x2[23:26] = 1\n x2[33:44] = 0\n\n fixture = pd.DataFrame({\"target\": target, \"x1\": x1, \"x2\": x2})\n\n fixture.reset_index(drop=True, inplace=True)\n\n return fixture", "title": "" }, { "docid": "4ef850de4a8474cec34d8b7c6a74a2d3", "score": "0.5854721", "text": "def sample_data():\n d_dimensions = 1\n n_samples = 20\n noise_std = 0.1\n seed = 123\n rng = np.random.RandomState(seed)\n\n n_train = 20\n n_test = 1000\n n_train = 20\n n_test = 1000\n xtrain = np.linspace(-4, 5, n_train).reshape(n_train, 1)\n xtest = np.linspace(-4, 5, n_test).reshape(n_test, 1)\n\n f = lambda x: np.sin(x) * np.exp(0.2 * x)\n ytrain = f(xtrain) + noise_std * rng.randn(n_train, 1)\n ytest = f(xtest)\n\n return xtrain, xtest, ytrain, ytest", "title": "" }, { "docid": "44ef8b007eadccae56043f5634c2056b", "score": "0.5851612", "text": "def get_data():\n X_train, y_train=read_from_csv()\n return shuffle_and_split(X_train, y_train)", "title": "" }, { "docid": "c8c9463ab3ec8be9da8122ae6c8c3dc6", "score": "0.58481324", "text": "def load_data():\n data = sio.loadmat(\"./data/dataset\")[\"Points\"]\n \n return data", "title": "" }, { "docid": "4e586facce5e031768fe8bd34ffd76c7", "score": "0.58443254", "text": "def _as_dataset(self, split, shuffle_files=None):\n raise NotImplementedError", "title": "" }, { "docid": "5ceee916cd63cf210060aee62c59f23b", "score": "0.58336306", "text": "def get_dataset(options):\n\n # parse files\n sets, test, fs = get_files_list(options)\n # ask to proceed\n disp_examples_info(sets)\n\n file_names = list(map(lambda el: el[\"path\"], sets))\n n_examples = sum(map(lambda el: el[\"trains\"] + el[\"noises\"], sets))\n\n # build dataset\n acc = None\n for file in file_names:\n dataset = tf.data.TextLineDataset(file)\n dataset = dataset.skip(1) # skip first, header, line\n dataset = dataset.map(parse_smp_line) # parse values\n if (acc):\n acc = acc.concatenate(dataset)\n else:\n acc = dataset\n acc = acc.shuffle(n_examples) # if n_examples is not too big!!!\n if not test:\n # if no test dataset provided, take examples from main dataset\n print(\"No test dataset found, taking test examples from main dateset\")\n test_set = acc.take(10)\n else:\n # make test dataset from file\n test_set = tf.data.TextLineDataset(test[\"path\"])\n test_set = test_set.skip(1) # skip first, header, line\n test_set = test_set.map(parse_smp_line)\n test_set.shuffle(test[\"trains\"] + test[\"noises\"])\n acc = acc.batch(32)\n return acc, test_set, fs", "title": "" }, { "docid": "dc3bd6a1522c8eaca370d22ab5f203b3", "score": "0.5831561", "text": "def getData(self) -> ghidra.program.model.listing.Data:\n ...", "title": "" }, { "docid": "c81a801ab4d41cc5b39529e67100d512", "score": "0.58307445", "text": "def create_3_part_dataset(raw, fft, target): \n ds = TensorDataset(\n torch.tensor(raw).float(), \n torch.tensor(fft).float(), \n torch.tensor(target).float())\n \n return ds", "title": "" }, { "docid": "cc8ebf874c4b983908721ecf763e4cb0", "score": "0.5827871", "text": "def create_dataset(opt):\r\n dataset_train = get_loader(opt.dataroot, opt.char_categories, opt.batch_size, 'train')\r\n dataset_test = get_loader(opt.dataroot, opt.char_categories, opt.batch_size, 'test')\r\n return dataset_train, dataset_test", "title": "" }, { "docid": "bc199c73e85aed031dbdada7191137f1", "score": "0.58247584", "text": "def load_datasets(self):", "title": "" }, { "docid": "93db736639ab18a97a30b3c5de59ad0b", "score": "0.58125085", "text": "def dataset(self) -> 'DataSet':\n return self._dataset", "title": "" }, { "docid": "c5fb1f975e676a567db83d6c86e663ca", "score": "0.5802604", "text": "def create_new_dataset(ds, longitudes, latitudes, timestamp, all_vars=False):\n # initialise the list\n da_list = []\n\n # for each variable create a new data array and append to list\n if all_vars:\n for variable in [v for v in ds.variables.keys()]:\n da_list.append(\n create_new_dataarray(ds, variable, longitudes, latitudes, timestamp)\n )\n else:\n # only export the VHI data\n da_list.append(\n create_new_dataarray(ds, \"VHI\", longitudes, latitudes, timestamp)\n )\n\n # merge all of the variables into one dataset\n new_ds = xr.merge(da_list)\n new_ds.attrs = ds.attrs\n\n return new_ds", "title": "" }, { "docid": "a244d0003c8d46d8be3844ca44ddccfa", "score": "0.58016896", "text": "def get_datarecords():\n try:\n return record_models.Dataset.objects.all()\n except:\n raise", "title": "" }, { "docid": "a244d0003c8d46d8be3844ca44ddccfa", "score": "0.58016896", "text": "def get_datarecords():\n try:\n return record_models.Dataset.objects.all()\n except:\n raise", "title": "" }, { "docid": "f11b87c577f6a43e24f7509d6151c4a8", "score": "0.5795598", "text": "def data(self):\n if self._data.empty:\n num_datasets = len(self.data_ids)\n if num_datasets == 0:\n print(\"No datasets were found.\")\n else:\n print(f\"Attempting to fetch ${num_datasets} datasets...\")\n try:\n data_list = []\n for name, data_type, data_id in self.data_ids.values:\n dataset = Dataset(data_id)\n data_list.append(\n {\n \"Name\": name,\n \"DataType\": data_type,\n \"Operation\": dataset.operation,\n \"AnalysisTool\": dataset.analysis_tool,\n \"Datasets\": dataset,\n }\n )\n data_frame = pd.DataFrame(data_list)\n\n # Rearrange columns\n self._data = data_frame[\n [\"Name\", \"DataType\", \"Operation\", \"AnalysisTool\", \"Datasets\"]\n ]\n except ReadTimeout:\n print(\n f\"Operation timed out after {self.timeout} seconds. Returning \"\n \"data_ids instead of Datasets.\\nTo try again, increase value \"\n \"of self.timeout and resubmit request.\"\n )\n return self.data_ids\n return self._data", "title": "" }, { "docid": "8de3c7735e76caa85514add03187dab9", "score": "0.5792825", "text": "def load_data():\n train_data, info = tfds.load(name=data_set,\n split=[tfds.core.ReadInstruction('train', from_=k, to=k+10, unit='%')\n for k in range(0, 100, 10)], with_info=True)\n val_data = tfds.load(name=data_set,\n split=[tfds.core.ReadInstruction('train', to=k, unit='%') +\n tfds.core.ReadInstruction('train', from_=k+10, unit='%')\n for k in range(0, 100, 10)])\n\n test_data = tfds.load(name=data_set, split='test')\n\n assert isinstance(train_data, tf.data.Dataset)\n assert isinstance(test_data, tf.data.Dataset)\n assert isinstance(val_data, tf.data.Dataset)\n\n print(info)\n\n return train_data, test_data, val_data, info", "title": "" }, { "docid": "a35e95e1f31be468bf74017cd4cde603", "score": "0.57923347", "text": "def get_dataset(self):\n trajectory_dataset = TrajectoryDataset(self.states,\n self.actions,\n self.next_states,\n self.rewards,\n self.dones)\n\n dataset = DataLoader(trajectory_dataset, batch_size=self.t_update,\n shuffle=False)\n\n return dataset", "title": "" }, { "docid": "e3be8a326a41bd9f62820fcc953dd986", "score": "0.5792069", "text": "def prepare_dataset(self):\n rows = []\n _, keywords, sitetypes = self._metadata\n print('keywords:')\n print('\\t', keywords)\n print('dataset:')\n\n # read texts for each site type\n for class_id, site_type in enumerate(sitetypes):\n print('\\t', 'class_id:', class_id)\n print('\\t', 'site_type:', site_type)\n with open('data/' + site_type + '.json', 'r') as file:\n texts = json.loads(file.read())\n print('\\t\\t', 'texts count in dataset:', len(texts))\n tokens = [self._word_model.tokenize(t) for t in texts]\n print('\\t\\t', 'average text tokens count', np.mean([len(t) for t in tokens]))\n\n # one dataset row for one text\n for tk in tokens:\n feature_vector = self.tokens_to_feature_vector(tk, keywords)\n rows.append((class_id, feature_vector))\n\n labels, features = zip(*rows)\n means = np.mean(features, axis=0)\n stds = np.std(features, axis=0)\n features = (features - means) / stds\n assert len(labels) == len(features)\n assert len(means) == len(stds) == len(keywords)\n return labels, np.array(features), means, stds", "title": "" }, { "docid": "0408f5ffa8f33eebbfd83c152327f444", "score": "0.5775291", "text": "def __create_datasets__(self):\n pass", "title": "" }, { "docid": "0408f5ffa8f33eebbfd83c152327f444", "score": "0.5775291", "text": "def __create_datasets__(self):\n pass", "title": "" }, { "docid": "eb9a0f24838037ad92830c77914675cf", "score": "0.5772384", "text": "def create_dataset(self):\n get_model = self.client.get_model\n dataset = get_model('Dataset')(data=get_model('DatasetData')())\n dataset.data.data_format = get_model('DataFormat')()\n dataset.data.tags = self.tags\n return dataset", "title": "" }, { "docid": "b238f5369c0e8d35477e48b5d2c27c33", "score": "0.57717806", "text": "def build_dataset(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "83468614bd024e28e10e7630376ab0cb", "score": "0.57703674", "text": "def load_data():\n # Load dataset\n print(\"Loading dataset...\")\n data = fetch_openml('mnist_784', version=1, cache=True)\n #data = fetch_mldata('MNIST original')\n #X = check_array(data['data'], dtype=dtype, order=order)\n X = data['data']\n y = data[\"target\"]\n\n # Normalize features\n X = X / 255\n\n # Create train-test split (as [Joachims, 2006])\n print(\"Creating train-test split...\")\n n_train = 60000\n X_train = X[:n_train]\n y_train = y[:n_train]\n X_test = X[n_train:]\n y_test = y[n_train:]\n\n return X_train, X_test, y_train, y_test", "title": "" }, { "docid": "aaef5100d9423907c3eff533cab97e7c", "score": "0.5769473", "text": "def build_datasets_debugging(dir_data):\n\n # Read in images and sort\n loc_list = glob(dir_data+'*.jpg')\n loc_list.sort()\n lst_len = len(loc_list)\n\n # Partition dataset\n train_idx_end = int(1000)\n\n train_dataset = CustomDataset(dir_data, 0, train_idx_end)\n val_dataset = CustomDataset(dir_data, train_idx_end + 1, 1101)\n\n return train_dataset, val_dataset", "title": "" } ]
eac4ed6f8e20037cf0acc6d4e61d1374
Return the path up to the folder
[ { "docid": "d7b69421195330f20aaa278aab51337e", "score": "0.62405604", "text": "def localPath(self):\n return self.home", "title": "" } ]
[ { "docid": "5c0709de22f515be7f5334ecf931497e", "score": "0.74314433", "text": "def dir(self) -> str:\n return f'{os.path.dirname(self.path)}/'.lstrip('/')", "title": "" }, { "docid": "1219ae0f2bf743c1c792d2b69d8da416", "score": "0.73235214", "text": "def get_path(self):\n definition = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id)\n parent_id = definition.get(\"parentId\", None)\n if parent_id is not None:\n parent = DSSProjectFolder(self.client, parent_id)\n path = parent.get_path()\n return (\"\" if path == \"/\" else path) + \"/\" + definition.get(\"name\", \"\")\n else:\n return \"/\"", "title": "" }, { "docid": "337f50f4861dfdcff55233db7aa32413", "score": "0.7104879", "text": "def dirname(self):\n _, tail = os.path.split(self.url)\n return self.folder + '/' + tail[:-4]", "title": "" }, { "docid": "96e3b765cc67b2e6d4bdae8319e09792", "score": "0.7095475", "text": "def dir(self):\n return os.path.dirname(self.path)", "title": "" }, { "docid": "f31719063f1af67188d6e59abe39c7a5", "score": "0.6996612", "text": "def getPath(self):\n return self.__folder", "title": "" }, { "docid": "90a3419d6bf870ee8cc8f6deccf26734", "score": "0.6887771", "text": "def path(self):\n return self._dir_entry.path", "title": "" }, { "docid": "27fb0c71ccc9f507b77e3ae891eeb6bb", "score": "0.68731755", "text": "def folder(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"folder\")", "title": "" }, { "docid": "c286b90c2af996a75d12cc425a7263b0", "score": "0.6855003", "text": "def GetPath(self):\r\n\r\n return self.directory", "title": "" }, { "docid": "8a1bf0cafdf1151f0e3eaa64ce54ccd8", "score": "0.68351495", "text": "def get_file_path(cls, file_name, folder_name):\n return cls.file_path.parent / folder_name / file_name", "title": "" }, { "docid": "69b2efb441cc9404a77033638e15ade8", "score": "0.6806131", "text": "def _get_folder(self):\n # type: () -> str\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id, \"folderpath\"], headers=headers\n )\n\n return response.json().get(\"path\")", "title": "" }, { "docid": "9455b635ff9379bd967a3d78269ad56d", "score": "0.6767671", "text": "def upper_directory(self):\n\n cur_dir = os.path.dirname(os.path.abspath(__file__))\n up_dir = cur_dir[:cur_dir.rfind('\\\\')] + '\\\\'\n return up_dir", "title": "" }, { "docid": "0b800eb03afbbd443ded19991db7de53", "score": "0.67630714", "text": "def get_parent_directory(src: str) -> str:\n return src[: src.rfind(os.path.sep)]", "title": "" }, { "docid": "50e6f87534ca626c5c937617cbd73a8f", "score": "0.6751437", "text": "def path(self):\n return os.path.dirname(os.path.abspath(self._filename))", "title": "" }, { "docid": "5387dacefd08f6a23773f95249540992", "score": "0.671774", "text": "def file_path(self):\n return posixpath.dirname(self.file_name)", "title": "" }, { "docid": "e453f84d502b6edfc606df6faf4d0074", "score": "0.6712309", "text": "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "title": "" }, { "docid": "80fe33c34549c6fb667f139651794a4c", "score": "0.67014503", "text": "def get_directory() -> str:\n return directory", "title": "" }, { "docid": "4fe2f3a10046b592acf47155febc5cb4", "score": "0.67010385", "text": "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "title": "" }, { "docid": "50f2e98344f197acaa19767869409345", "score": "0.66962254", "text": "def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result", "title": "" }, { "docid": "3a07f06c2627581e4652a9f27d14d6d7", "score": "0.6669", "text": "def root_rel_path(self):\n return os.path.dirname(self.image.name)", "title": "" }, { "docid": "a618f032b0a9ccfbabab5fe5aa414b63", "score": "0.6649745", "text": "def Directory(self) -> str:", "title": "" }, { "docid": "b19c7276988b4c15b62cad9dca3901cf", "score": "0.6616357", "text": "def path(self):\n\t\treturn os.path.join(*self._string_values(limit=4))", "title": "" }, { "docid": "652dc2d018810bc06252311bca204a14", "score": "0.66157305", "text": "def getParentDirectory():\n path = os.path.dirname(os.path.realpath(__file__))\n path = '/'.join( path.split('/')[:-1] )\n return path", "title": "" }, { "docid": "da319d28215094a7b7e39b5f641bb06b", "score": "0.6610475", "text": "def get_directory(path):\n return mangle_path(path).rsplit('/',1)[0]", "title": "" }, { "docid": "fb77b3f02f883bf146ce8d51ab46c581", "score": "0.6607193", "text": "def get_root_folder() -> Path:\n return Path(__file__).parent.parent", "title": "" }, { "docid": "bb642d5649448cbb0df1019d53233167", "score": "0.66059583", "text": "def _parent_path(cls,path):\n # os.path.dirname(), but strip directories like files (like unix basename)\n # Treat directories like files...\n if path[-1]=='/':\n path=path[:-1]\n ret = os.path.dirname(path)\n return ret", "title": "" }, { "docid": "8f17426b43b86362dbd6bbe2bdd0c345", "score": "0.6593984", "text": "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "title": "" }, { "docid": "2f649e9609b1d7930f051c344e7295b9", "score": "0.65686077", "text": "def pathtofolder():\n return os.getcwd()", "title": "" }, { "docid": "7ce36adec69ea4aae37544e404d514bc", "score": "0.65642464", "text": "def extracted_path(self):\n return os.path.join(self.destination_directory, self.__extracted_name__)", "title": "" }, { "docid": "9892575d09bfbf4c325cf716dc7ba580", "score": "0.6557466", "text": "def get_full_folder_path(self):\n data_dir_path = os.path.join(settings.MEDIA_ROOT,self.folder)\n return data_dir_path", "title": "" }, { "docid": "36d535c7f9e3d8a88640da03c94c475b", "score": "0.65564096", "text": "def root_path(self):\n return os.path.dirname(self.image.path)", "title": "" }, { "docid": "42a24b351c05f829bd014bcfac5b48df", "score": "0.65543", "text": "def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2])", "title": "" }, { "docid": "db7b8366fb26579acfcd1a9df6b3c516", "score": "0.6553482", "text": "def abs_path(self) -> str:\n full_path = '/'.join(folder.name for folder in reversed(self.ancestors))\n return f'/{full_path}/'", "title": "" }, { "docid": "c3258688629576ac88a093dc097d79cb", "score": "0.6519889", "text": "def get_last_path(self):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n spec_path = self.data_root_path / folders[0]\n logging.info('Last download folder was %s', spec_path)\n return spec_path", "title": "" }, { "docid": "3223a800c2daaa3ef1759531dc3db694", "score": "0.64929223", "text": "def completePath(path):\n return os.getcwd() + convertString(path)", "title": "" }, { "docid": "43cf6fb960e16fb778998160d0822ef2", "score": "0.6483873", "text": "def path(self) -> str:\n self.__verify_repo_initialized()\n return str(self._repo_path.parent)", "title": "" }, { "docid": "d0fecf05ed36a1ebda413cae4c65adba", "score": "0.6452027", "text": "def getFolder():\n from tkinter import Tk, filedialog\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n return(filedialog.askdirectory()+'/')", "title": "" }, { "docid": "86afc3c4c00af56a5e1a76407c5d9dbb", "score": "0.6445523", "text": "def folder(self):\n return os.path.join(\n td.project.folder.replace(\"/\", os.sep), # incase we're on windows\n \"component_repos\",\n self.name)", "title": "" }, { "docid": "47de1eea152ca3ddb8d3a133e64ae9c3", "score": "0.64452726", "text": "def get_top_level_directory(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "5968285387dcd70dd21e628486602710", "score": "0.64423853", "text": "def previous_directory(self):\r\n prev_dir = Path(self.path_viewer.text()).parent\r\n self.set_new_path(str(prev_dir))", "title": "" }, { "docid": "bf44750f5bf9b1f705a0de372efd4b71", "score": "0.6436737", "text": "def dirpath(self) -> str:\n return self._dirpath", "title": "" }, { "docid": "b0f96641d70fdbe4b24b23f0810255e2", "score": "0.64347875", "text": "def path(self) -> str:\n return self.src + \"/\"", "title": "" }, { "docid": "7ca99367869ae059cfe166c50e836ce2", "score": "0.6434691", "text": "def fpath(self):\n return os.path.join(self.path, self.name)", "title": "" }, { "docid": "09ebf8ba420df7a30d0e47666cf2bd33", "score": "0.64311475", "text": "def get_location(self):\n return os.path.dirname(self.filename)", "title": "" }, { "docid": "0a723d11a58c503c5b3f922139ae6f5e", "score": "0.6429038", "text": "def dirname(path):\n return os.path.dirname(path)", "title": "" }, { "docid": "35266c0590319e70e0f010216321329d", "score": "0.64261097", "text": "def shortpath(path):\r\n import os\r\n if path.startswith(base_dir):\r\n return path[len(base_dir) + len(os.path.sep) : ]\r\n return path", "title": "" }, { "docid": "f8218df4618f5be21eca4cbe6b04c368", "score": "0.6423569", "text": "def parent_dir_path(path):\n return absolute_path(os.path.dirname(path))", "title": "" }, { "docid": "9943e7c5b53770dd1a6af072e1307f0e", "score": "0.64205056", "text": "def get_actual_path(self, path):\n if self._params.path_to_dir[-1] != '/':\n if path:\n path = self._params.path_to_dir + '/' + path\n path = path.replace('//', '/')\n return path", "title": "" }, { "docid": "be0f8c54da60aaa05474948b6778988f", "score": "0.6418454", "text": "def folder(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"folder\")", "title": "" }, { "docid": "4cc47f1fe41fe07a5142b8d3335300da", "score": "0.641668", "text": "def get_directory(self) -> Path:\n path = Path(str(self.max_nesting_lvl))\n path /= str(self.num_snps)\n path /= self.read_quality\n path /= str(self.coverage)\n path /= str(self.denovo_kmer_size)\n return path", "title": "" }, { "docid": "b9e89d39d110c0d9b9c9783aa90a7f50", "score": "0.64103454", "text": "def output_path():\n folder = path.join(path.curdir, \"stages\")\n folder = path.abspath(folder)\n return ensure_path(folder)", "title": "" }, { "docid": "e67a5bfefd58d94451508d9db19dddec", "score": "0.6407992", "text": "def get_parent_dir(path):\n return os.path.dirname(path)", "title": "" }, { "docid": "63c0eb83efee10b613e0f5bc2a4dde95", "score": "0.6404427", "text": "def shpname(self):\n _, tail = os.path.split(self.url)\n return self.folder + ('/' + tail[:-4]) * 2", "title": "" }, { "docid": "64a26882ee5e1fb3bbb76014ce6832ec", "score": "0.64042836", "text": "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "title": "" }, { "docid": "bfc099f6414877b0e76addbc7e219716", "score": "0.6402163", "text": "def build_relpath(self):\n return join_path(\"..\", self.build_dirname)", "title": "" }, { "docid": "a55071a0ca64931e8e6ae7fe817e1f0e", "score": "0.6393404", "text": "def getFolderPath(self) -> unicode:\n ...", "title": "" }, { "docid": "300a22303f18a813bb527a9a47ec60de", "score": "0.6383744", "text": "def get_dir_path():\n return DIR_PATH", "title": "" }, { "docid": "9bf7cee5bec8cde12cd8942a6a573884", "score": "0.6371265", "text": "def get_relative_path (folder, file) :\n if not os.path.exists (folder) : raise PQHException (folder + \" does not exist.\")\n if not os.path.exists (file) : raise PQHException (file + \" does not exist.\")\n sd = folder.replace(\"\\\\\",\"/\").split(\"/\")\n sf = file.replace(\"\\\\\",\"/\").split(\"/\")\n for i in range (0, len (sd)) :\n if i >= len (sf) : break\n elif sf [i] != sd [i] : break\n res = copy.copy (sd)\n j = i\n while i < len (sd) :\n i += 1\n res.append (\"..\")\n res.extend (sf [j:])\n return os.path.join (*res)", "title": "" }, { "docid": "a639faf5f667116fcd732eb542a0b852", "score": "0.63560057", "text": "def get_path(filename: str = None, folder: str = None) -> str:\n return IMAGE_SET.path(filename, folder)", "title": "" }, { "docid": "f2470882440fff741e96713566a728a1", "score": "0.6353091", "text": "def format_folder_path(folder_path):\n if folder_path[-1] != '/':\n folder_path += '/'\n\n return folder_path", "title": "" }, { "docid": "8b4ae0972e4035a1b7269706fe615d90", "score": "0.63526285", "text": "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "title": "" }, { "docid": "fcb1453d953361d3c15f808350994980", "score": "0.63473004", "text": "def get_output_folder(self):\n return os.path.join(self.root_output_folder, self.base_fish_folder)", "title": "" }, { "docid": "26b43b85c8bacc1157d3bceaecb4cd8f", "score": "0.6344295", "text": "def get_path(path):\n if _prefix and not '/' in path:\n path = _prefix + path\n\n if not _cwd:\n return path\n\n return join(_cwd, path)", "title": "" }, { "docid": "93e44446b01c2d5bda014d912a85aede", "score": "0.6339725", "text": "def dirpath(self):\n return self.__edir", "title": "" }, { "docid": "594977f7e7a5bb47b4edbe7247ed850e", "score": "0.63336813", "text": "def get_directory(self, subdir=None):\n path = settings.SUBMISSION_DIR / str(self.assignment.id) / str(self.id)\n if subdir:\n path = path / subdir\n\n return path", "title": "" }, { "docid": "d763e3aa90de87fbeeb1357f4a440c55", "score": "0.633344", "text": "def path(self, args):\n dir_path = self.dir_path_(*args)\n return os.path.join(dir_path, self.file_name)", "title": "" }, { "docid": "71dfe40eabecbf59c9e7aae5a8a731ad", "score": "0.6320968", "text": "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "title": "" }, { "docid": "429cc5f2de8b53a55c81bf953ffdc780", "score": "0.63195777", "text": "def get_dir(self):\n return self.dir", "title": "" }, { "docid": "55b2afc409b5ed9fa354a27f33335c55", "score": "0.63093585", "text": "def project_path(cur_path=''):\n if not cur_path:\n cur_path = __file__\n real_path = os.path.realpath(cur_path)\n # path of upper-level directory\n upper_folder = os.path.split(real_path)[0]\n # path of topmost-level directory (trunk)\n return os.path.split(upper_folder)[0]", "title": "" }, { "docid": "3d7379268b8270619753e8e8536a31d1", "score": "0.6308357", "text": "def path(self, prefix, args=()):\n dir_path = self.dir.path(prefix, args)\n return os.path.join(dir_path, self.name)", "title": "" }, { "docid": "5e645b78a4e2a3003911776eb68c6ed8", "score": "0.63065493", "text": "def path(self):\n if self.parent and self.parent.category_id:\n return self.parent.path + '/' + self.basename\n return self.basename", "title": "" }, { "docid": "7bac4b6a13192c6e1b2e75110a47af5a", "score": "0.63017136", "text": "def get_full_path(path, *args):\n\n return os.path.join(_search_parent_dir(\".wit\"), *args, path)", "title": "" }, { "docid": "610371fdf355a2ca4c5096cf97ab0fa9", "score": "0.6298443", "text": "def get_image_path(source_path):\n\n split = source_path.split('\\\\')\n # get filename\n filename = split[-1].lstrip()\n # get folder name\n folder = split[-3]\n # get full data path\n current_path = folder + '/IMG/' + filename\n return current_path", "title": "" }, { "docid": "f8ded2917b50e025d6ba245c8faa5334", "score": "0.62983465", "text": "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "title": "" }, { "docid": "163853ad7bb0fd99cf08a98f5db89474", "score": "0.6294657", "text": "def kard_folder_path(self):\n if self._base_path is None:\n if is_running_in_docker():\n container_id = os.popen(\n 'cat /proc/self/cgroup | grep docker | '\n 'grep -o -E \"[0-9a-f]{64}\" | head -n 1').read().rstrip()\n cli = docker.DockerClient(version='auto')\n cont = cli.containers.get(container_id)\n mount = next((\n c for c in cont.attrs['Mounts']\n if c['Destination'] == str(get_kard_root_path())))\n self._base_path = Path(mount['Source'])\n else:\n self._base_path = Path(self.kard.path).parent\n return self._base_path", "title": "" }, { "docid": "33730fab62a8bc964e3c8ef836a66188", "score": "0.6290705", "text": "def upload_dir_rel(self):\n return os.path.join(self.short_name,\"uploads\")", "title": "" }, { "docid": "184adea4b3cdd2e5cf71c3d2310d2c00", "score": "0.62842643", "text": "def Dir(path=None):\n global _last_files\n if path:\n _last_files = glob.glob(path)\n if _last_files:\n return os.path.split(_last_files.pop(0))[1] # VB just returns the filename, not full path\n else:\n return \"\"", "title": "" }, { "docid": "d36ffa13e38c2c204a117b29e62d01c7", "score": "0.62822294", "text": "def ReturnPathOfFile(self, url):\n\tcount=0\n\turlComponent = urlparse.urlparse(url)\n\tfor part in urlComponent:\n\t count = count + 1\n\t if count == 3:\n\t\tFolderPath = part\n\treturn FolderPath", "title": "" }, { "docid": "f6cc15f41658a0241049902a67bdee1e", "score": "0.62741023", "text": "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "title": "" }, { "docid": "93a4a2969ab4c275b413509c64b73280", "score": "0.6264619", "text": "def full_path(self):\n fullpath = os.path.join(self.path, self.name)\n if self.path == \"\":\n fullpath = self.name\n return fullpath", "title": "" }, { "docid": "eed2a2f97a68ac8b2df78f0b52d5a5a3", "score": "0.62642527", "text": "def relativize(path: str):\n return join('.', path)", "title": "" }, { "docid": "de820b628dfd674490a93468178674f1", "score": "0.6263903", "text": "def __get_path(self):\n return self.path", "title": "" }, { "docid": "dd24a7c431ce1353011935f5fcf060ee", "score": "0.6259138", "text": "def path(self, f):\n\t\treturn os.path.join(self.directory, f)", "title": "" }, { "docid": "98dbbc7f08b40977e5e4f58f839c4d1f", "score": "0.6255145", "text": "def file_directory(file):\n return os.path.dirname(os.path.realpath(file))", "title": "" }, { "docid": "eed2c2860a4d263e2ab6dbd378ce82c3", "score": "0.625345", "text": "def get_dir(path):\n extension = path.suffix\n if extension == '':\n return path\n else:\n return path.parent", "title": "" }, { "docid": "3b51922ce562a7890614376946de86cb", "score": "0.6250059", "text": "def working_directory(self):\n project_full_path = \"/\".join(self.file_name.split(\"/\")[:-1])\n file_name = self.file_name.split(\"/\")[-1]\n if \".h5\" in file_name:\n file_name = file_name.split(\".h5\")[0]\n file_name += \"_hdf5\"\n if self.h5_path[0] == \"/\":\n h5_path = self.h5_path[1:]\n else:\n h5_path = self.h5_path\n return posixpath.join(project_full_path, file_name, h5_path)", "title": "" }, { "docid": "9534d6bd9b0e3b1913c8ab1d3a81ec7b", "score": "0.624736", "text": "def get_relative_path(self):\n return urlparse(self.browser.current_url).path", "title": "" }, { "docid": "537d0104272195456d5199512a19939a", "score": "0.6242903", "text": "def path_only(self):\n path = six.moves.urllib.parse.urlparse(self.path).path\n if path.endswith('/'):\n return path[:-1]\n else:\n return path", "title": "" }, { "docid": "fa65e6534222c9da30b5baac203b941b", "score": "0.624119", "text": "def subFolder(self):\r\n return self.__folder", "title": "" }, { "docid": "d2f6c3bf1cfae2b66d9b26f347640045", "score": "0.62410265", "text": "def full_path(self):\n return os.path.abspath(self.path)", "title": "" }, { "docid": "ee91a0ea2261ba94dac44b04edc3f9b5", "score": "0.62341195", "text": "def module_directory(file_path):\n return os.path.dirname(os.path.realpath(file_path))", "title": "" }, { "docid": "954096fd319e9f5e80fb7c7a2f33de23", "score": "0.6231139", "text": "def get_proj_dir(path: Union[pathlib.PurePath, str] = __file__) -> str:\n return str(pathlib.Path(path).parent.absolute())", "title": "" }, { "docid": "c385d21d3a33bbca59debdf6b09858ec", "score": "0.6230145", "text": "def get_full_path(self):\n return self.path_display", "title": "" }, { "docid": "20f2a6a925a36b9622baf067bfd21796", "score": "0.62301123", "text": "def public_upload_dir_rel(self):\n return os.path.join(self.short_name,settings.COMIC_PUBLIC_FOLDER_NAME)", "title": "" }, { "docid": "b090920fc43649ec08da7506b9a88dde", "score": "0.6227109", "text": "def getDirectory(path):\n\tfrom os.path import split\n\tpath = normalizePath(path)\n\treturn split(path)[0]", "title": "" }, { "docid": "4121b89e18dd4d705b6498b3593072fb", "score": "0.6226696", "text": "def sub_path(self) -> str:\n return self._sub_path", "title": "" }, { "docid": "0d00887d101679e482f5a1c4063343f8", "score": "0.6226467", "text": "def _dirname(self, key):\n return os.path.join(self.root, key[:2])", "title": "" }, { "docid": "c5f6ad2bbbfe5685841e2268005f6b8c", "score": "0.62180513", "text": "def get_path(self):\n return self.path", "title": "" }, { "docid": "933a86490cc0aa0696b58d307116bc40", "score": "0.6205627", "text": "def parentOrThisDir(path):\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n return path", "title": "" }, { "docid": "ebcb0b58b12f07e063c317080ff30f96", "score": "0.6203265", "text": "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "title": "" }, { "docid": "50458aa40c370f9aae4c7263e36a9792", "score": "0.6195301", "text": "def get_path_to(self, *args):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))", "title": "" } ]
0a588590510a1389dbe8ecda4d7718da
Returns namedTuple from table file using first row fields as col headers.
[ { "docid": "380986dd868ee86e8702f703fc9cc56e", "score": "0.76259065", "text": "def tableFile2namedTuple(tablePath,sep='\\t'):\n\n reader = csv.reader(open(tablePath), delimiter=sep)\n headers = reader.next()\n Table = collections.namedtuple('Table', ', '.join(headers))\n data = map(Table._make, reader)\n return data", "title": "" } ]
[ { "docid": "7c6de548fc0189c5e6a6aadfdc75942c", "score": "0.60297465", "text": "def name_dtypes(file):\n with open(file,'r') as f:\n columns = f.readline().split()\n return tuple(columns)", "title": "" }, { "docid": "80146f967e30aaa718c2c6b46e109ac6", "score": "0.59406924", "text": "def _read_table(self, filename):\n filename = pkg_resources.resource_filename(\n __name__, filename)\n segments = []\n with open(filename, 'rb') as f:\n reader = csv.reader(f, encoding='utf-8')\n header = next(reader)\n names = header[1:]\n for row in reader:\n seg = row[0]\n vals = row[1:]\n specs = set(zip(vals, names))\n segments.append((seg, specs))\n seg_dict = dict(segments)\n return segments, seg_dict, names", "title": "" }, { "docid": "3e6fb992ace50b565355ac7533057edd", "score": "0.5854499", "text": "def tabDelimitedRead(self, fname):\r\n with open(fname, 'rb') as f:\r\n R = []\r\n for L in f.readlines():\r\n R+=[L.replace('\\r\\n','').split('\\t')]\r\n \r\n D = [self.dateStr2tup(d) for d in R[self.dateRow][1:]]\r\n\r\n X = []\r\n t = []\r\n for r in R[self.dataStartRow:]:\r\n t+= [self.timeStr2min(r[0])]\r\n X+=[float64(r[1:])]\r\n \r\n \r\n return (float64(t), float64(X), D)", "title": "" }, { "docid": "c03883d1402a1808eeed1b740d9cb240", "score": "0.5835269", "text": "def setup_headers_TOUGH2(self, filename, header):\n if filename.endswith('OFT') and len(filename) >= 4:\n self.type = filename[-4:].strip()\n else: self.type = None\n self.time_index = 1\n self._nkeys = 1\n items = header.strip().split(',')\n if items[-1] == '': del items[-1] # often an extra comma on the end of the lines\n items = items[3:]\n int_index = 0\n for i, item in enumerate(items):\n try:\n int_item = int(item)\n int_index = i\n break\n except: pass\n if int_index == 0: ncols = len(items)\n else: ncols = int_index\n self.column_name=range(ncols)", "title": "" }, { "docid": "e3326e49739c1d68989edb73782e9f5b", "score": "0.5778163", "text": "def parseCSVHeader(ifile):\n if isinstance(ifile, types.TupleType):\n if verbose:\n print(\"parsing header of preprocessed tuple instead of CSV\")\n return ifile[0]\n\n if isinstance(ifile, types.StringTypes):\n name = ifile\n else:\n name = ifile.name\n\n if verbose:\n print((\"parsing header of %s\" % name))\n possibleDelimiters = [\";\", \",\", \"\\t\", None] # empty string means whitespace-delimited\n with openByNameOrFile(ifile) as f:\n lines = f.readlines(20) # don't bother reading the entire file, just enough to get hints\n delimFound = 0\n delimForThisFile = None\n for delim in possibleDelimiters:\n if delim is not None and lines[0].find(delim) < 0:\n if debug:\n print((\"Delim is not <%s> (no occurrences in labels)\" % delim))\n continue\n tryRegex = makeSplitRegex(delim)\n wordCount = len(tryRegex.findall(lines[0]))\n if wordCount < 3:\n if debug:\n print((\"Delim is not <%s> (no labels found)\\n\" % delim))\n continue\n for line in lines[1:]:\n nwords = len(tryRegex.findall(line))\n if nwords > 0 and nwords != wordCount:\n if debug:\n print((\"%d vs. %d: <%s>\" % (nwords, wordCount, line)))\n print((\"Delim is not <%s>\\n\" % delim))\n break\n else:\n delimFound = 1\n delimForThisFile = delim\n regexForThisFile = tryRegex\n break\n if not delimFound:\n raise Exception(\"Cannot find the right delimiter for this CSV input!\")\n # sys.exit(\"Cannot find the right delimiter for this CSV input!\")\n if debug:\n print((\"delimForThisFile= <%s>\" % delimForThisFile))\n keys = regexForThisFile.findall(lines[0])[:-1] # skip empty regex match at end\n keys = [x.strip() for x in keys]\n stringsAreQuoted = 1\n for key in keys:\n if len(key) > 0 \\\n and (not key.startswith('\"') or not key.endswith('\"')) \\\n and (not key.startswith(\"'\") or not key.endswith(\"'\")):\n stringsAreQuoted = 0\n if debug:\n print((\"stringsAreQuoted= %d\" % stringsAreQuoted))\n if stringsAreQuoted:\n keys = [x[1:-1] for x in keys]\n return keys", "title": "" }, { "docid": "fe6ec12ac25a4b1195de70d679d31293", "score": "0.57215965", "text": "def load_typed_table(fp, header=True, col_names=None, col_types=None):\n\n if col_names is not None:\n header = False\n\n num_cols = None\n _rows = []\n if header:\n col_names = next(fp).rstrip('\\n').split('\\t')\n num_cols = len(col_names)\n line = next(fp)\n if line[0] == '#':\n fields = line[1:].rstrip('\\n').split('\\t')\n assert len(fields) <= num_cols\n col_types = [COLUMN_TYPES.get(s, str) for s in fields]\n else:\n _rows.append(_process_data_row(line, num_cols))\n else:\n line = next(fp)\n fields = line.rstrip('\\n').split('\\t')\n num_cols = len(fields)\n _rows.append(_process_data_row(line, num_cols))\n\n for line in fp:\n if line[0] == '#':\n continue\n _rows.append(_process_data_row(line, num_cols))\n\n if col_names is None:\n col_names = ['Column%2.2d' % j for j in range(1, num_cols+1)]\n\n if col_types is not None:\n rows = [[ct(s) if s is not None else None\n for ct,s in zip(col_types, fields)] for fields in _rows]\n else:\n rows = _rows\n\n return col_names, rows", "title": "" }, { "docid": "8f32cce9401fa42ec346693149429bf3", "score": "0.56999516", "text": "def _F_header(file):\n header = OrderedDict()\n header['TSess'] = struct.unpack('l', file.read(8))[0]\n major_ver = struct.unpack('i', file.read(4))[0]\n minor_ver = struct.unpack('i', file.read(4))[0]\n micro_ver = struct.unpack('i', file.read(4))[0]\n header['FAC'] = '{}.{}.{}'.format(major_ver, minor_ver, micro_ver)\n header['Type'] = struct.unpack('i', file.read(4))[0]\n\n header['Z'] = struct.unpack('f', file.read(4))[0]\n header['atom'] = file.read(2).decode('utf-8')\n file.read(1)\n header['Endian'] = bool(file.read(1))\n header['NBlocks'] = struct.unpack('i', file.read(4))[0]\n return header, file", "title": "" }, { "docid": "01dcd1342e2acd38c48fee715b1a08b4", "score": "0.56453806", "text": "def read_csv_into_namedtuples(istream, namedtuple):\n reader = DictReader(istream)\n adapt_fieldnames_to_tuple(reader, namedtuple)\n return [namedtuple(**r) for r in reader]", "title": "" }, { "docid": "7e149f75e17daf4c7ead698e8702c748", "score": "0.56429523", "text": "def file_parser(file, col=0):\n\tdata={}\n\theaders=[]\n\tf=open(file)\n\tline=f.readline()\n\ti=1;\n\twhile line:\n\t\tline=line.rstrip(\"\\n\")\n\t\tif (i==1):\n\t\t\theaders=line.split(\"\\t\")\n\t\telse:\n\t\t\twords=line.split(\"\\t\")\n\t\t\tfor j in range(len(words)):\n\t\t\t\tdata.update({str(words[col]):{str(headers[j]):str(words[j])}})\n\t\tline=f.readline()\n\t\ti+=1\n\tf.close()\n\treturn data", "title": "" }, { "docid": "c4ee0adb6d635207bd8858ab20320a20", "score": "0.5633464", "text": "def get_table_header(self, table_name):\n result = self.execute('SELECT * FROM %s LIMIT 0' % table_name)\n return [col[0] for col in result.description]", "title": "" }, { "docid": "bfbe75de2697aed57a817c401c731050", "score": "0.5626556", "text": "def get_header(inp_file):\n # Type: (str) -> List[namedtuple]\n header = []\n CV = namedtuple('CV', ['name', 'min', 'max', 'nbin', 'periodic'])\n with open(inp_file, 'r') as fp:\n firstline = fp.readline().split()\n if firstline[-1] == 'projection':\n NUM_CV = int((len(firstline) - 1) / 2)\n else:\n NUM_CV =int((len(firstline) - 3) / 2)\n handle_pi = lambda x: float(eval(x.replace('pi', '*pi') if re.match('\\dpi', x) else x))\n for i in range(NUM_CV):\n header.append(CV(name=firstline[i+2],\n min=handle_pi(fp.readline().split()[-1]),\n max=handle_pi(fp.readline().split()[-1]),\n nbin=int((fp.readline()).split()[-1]),\n periodic=True if str(fp.readline().split()[-1]) == 'true' else False))\n return header", "title": "" }, { "docid": "1039ec0826a224d79ab2550574959adc", "score": "0.56237984", "text": "def lineFileNext(fh):\n line1 = fh.readline()\n line1 = line1.strip(\"\\n\").strip(\"#\")\n headers = line1.split(\"\\t\")\n Record = namedtuple('tsvRec', headers)\n \n for line in fh:\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\")\n try:\n rec = Record(*fields)\n except Exception, msg:\n logging.error(\"Exception occured while parsing line, %s\" % msg)\n logging.error(\"Filename %s\" % fh.name)\n logging.error(\"Line was: %s\" % repr(line))\n logging.error(\"Does number of fields match headers?\")\n logging.error(\"Headers are: %s\" % headers)\n #raise Exception(\"wrong field count in line %s\" % line)\n continue\n # convert fields to correct data type\n yield rec", "title": "" }, { "docid": "ca83c92297c502b27e3913646e564491", "score": "0.5596673", "text": "def _parse_header(\n self,\n file: BinaryIO\n ) -> Tuple[Dict[int, str], Dict[str, Tuple[int, int]]]:\n cpus = {}\n peripherals = {}\n cpus_count = self._read('i', file)[0]\n for _ in range(cpus_count):\n cpu_id = self._read('i', file)[0]\n cpu_name_len = self._read('i', file)[0]\n cpus[cpu_id] = self._read(f'{cpu_name_len}s', file)[0].decode()\n\n peripherals_count = self._read('i', file)[0]\n for _ in range(peripherals_count):\n peripheral_name_len = self._read('i', file)[0]\n peripheral_name = self._read(\n f'{peripheral_name_len}s', file\n )[0].decode()\n peripheral_start_address, peripheral_end_address = self._read(\n '2Q', file\n )\n peripherals[peripheral_name] = (\n peripheral_start_address,\n peripheral_end_address\n )\n\n return cpus, peripherals", "title": "" }, { "docid": "1cb34abe19a6c0efd627253715e16d60", "score": "0.5575449", "text": "def _first_two_columns(input_file):\n ofile = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n with open(input_file) as ifile:\n for line in ifile:\n col_1_2 = line.strip().split()[:2]\n ofile.write('\\t'.join(col_1_2) + '\\n')\n ofile.close()\n return os.path.abspath(ofile.name)", "title": "" }, { "docid": "0dfb56081ed654cacc79b1ac9b29add2", "score": "0.5553275", "text": "def read_mumax3_table(filename):\n \n table = pd.read_csv(filename, sep='\\t')\n table.columns = ' '.join(table.columns).split()[1::2]\n \n return table", "title": "" }, { "docid": "5682cfb5b84ca45a949d3b7f1e384d73", "score": "0.5544758", "text": "def read_header(filename):\n with open(filename) as f:\n\n N=int(f.readline().strip())\n dim=int(f.readline().strip())\n t=float(f.readline().strip())\n\n return N, dim, t", "title": "" }, { "docid": "a800d37b4a485b54e4128777118ef9e5", "score": "0.55309653", "text": "def NamedTupleRow(cursor):\n from collections import namedtuple\n\n attr_names = [x[0] for x in cursor.description]\n\n class Row(namedtuple('Row', attr_names, rename=True)):\n cursor_description = cursor.description\n\n def __new__(cls, iterable):\n return super(Row, cls).__new__(cls, *iterable)\n\n return Row", "title": "" }, { "docid": "c2f02d67c9239ca84d13bba78373d59a", "score": "0.55280674", "text": "def split_schema_tablename(table):\n reader = csv.reader(io.StringIO(str(table)),\n delimiter=\".\",\n doublequote=True,\n quotechar='\"')\n schema_name_tup = next(reader)\n if len(schema_name_tup) == 1:\n schema_name_tup = (None, schema_name_tup[0])\n if len(schema_name_tup) != 2:\n raise ValueError(\"Cannot parse schema and table. \"\n \"Does '{}' follow the pattern 'schema.table'?\"\n .format(table))\n return tuple(schema_name_tup)", "title": "" }, { "docid": "68f3297f65125f557c3e33106e71ae7f", "score": "0.5488642", "text": "def csv_load_opcode_table(self, opcode_file):\n with open(opcode_file, 'r') as csvfile:\n csv_data = csv.reader(csvfile, delimiter=';', )\n for rows in csv_data:\n self.op_table[int(rows[0], 16)] = self.opcode_to_named_tuple(\n rows[1:]) # first field in row is two characters in hex format, parse the rest as strings", "title": "" }, { "docid": "8c5358a14f25d94099026b91112fcdea", "score": "0.5471727", "text": "def read(cls, file: typing.BinaryIO) -> '_TableMetadata':\n _section_expect(file, _SectionTypeId.TABLEMETADATA)\n metadata_count = _read_int32(file)\n if metadata_count < 0:\n raise SBDFError(\"the number of elements is incorrect\")\n metadata = _Metadata()\n for _ in range(metadata_count):\n name = _read_string(file)\n valtype = _ValueType.read(file)\n value_present = _read_int8(file)\n value = _SbdfObject.read(file, valtype) if value_present else None\n value_present = _read_int8(file)\n default_value = _SbdfObject.read(file, valtype) if value_present else None\n metadata.add(name, value, default_value)\n tmeta = _TableMetadata(metadata)\n\n column_count = _read_int32(file)\n metadata_count = _read_int32(file)\n md_name = []\n md_type = []\n md_default = []\n for i in range(metadata_count):\n md_name.append(_read_string(file))\n md_type.append(_ValueType.read(file))\n value_present = _read_int8(file)\n md_default.append(_SbdfObject.read(file, md_type[i]) if value_present else None)\n for i in range(column_count):\n metadata = _Metadata()\n for j in range(metadata_count):\n value_present = _read_int8(file)\n if value_present:\n value = _SbdfObject.read(file, md_type[j])\n metadata.add(md_name[j], value, md_default[j])\n tmeta.add(metadata)\n return tmeta", "title": "" }, { "docid": "7a467573262cdfc6083ac1b0f88a6e63", "score": "0.54714555", "text": "def load_TTable_filename(filename, line_num = 1):\n infile = open(filename, \"r\")\n TTable = load_TTable(infile, line_num)\n infile.close()\n return TTable", "title": "" }, { "docid": "ad74a7b55b477032c17dde47787e45b3", "score": "0.5468406", "text": "def read_generic_table(cls, file, tabletype=0, tablename=None):\n if tablename is None:\n tablename = cls.guess_tablename(file)\n with open(file, errors='ignore') as file:\n lines = file.readlines()\n s = \"\".join(lines)\n try:\n p_rst = cls.GenericTableParser.parseString(s)\n except pp.ParseException as e:\n print(file)\n raise e\n\n table = cls.parse_result2table(p_rst['rows'])\n col_names = list(p_rst['colNames'])\n n_col = p_rst['colNum']\n index_num = p_rst['tblStartAt'] - 1\n df = pd.DataFrame(data=table, columns=col_names)\n df.set_index(col_names[:index_num], inplace=True)\n assert df.shape[1] == n_col, \"column missing\"\n return cls(tablename, tabletype, df)", "title": "" }, { "docid": "315395c7cad6346fd350f42190533a7f", "score": "0.5450223", "text": "def __call__(self, fname):\r\n try:\r\n t, X, D = self.excelRead(fname)\r\n except xlrd.biffh.XLRDError:\r\n t, X, D = self.tabDelimitedRead(fname)\r\n \r\n tStr = [self.min2timeStr(T) for T in t]\r\n \r\n return (t, D, X.T, tStr)", "title": "" }, { "docid": "2b506a490785d89b1c23cba346fdc469", "score": "0.5447734", "text": "def parse_table_header_AUTOUGH2(self):\n cols = []\n headline = self.readline()\n headstrs = headline.strip().split()\n indexstr = 'INDEX'\n nkeys = headstrs.index(indexstr)\n for s in headstrs[nkeys+1:]:\n if s[0] == s[0].upper(): cols.append(s)\n else: cols[-1] += ' ' + s\n return nkeys,cols", "title": "" }, { "docid": "d3bf4084799abb83064107e764e066a5", "score": "0.54422855", "text": "def Tuple(*fields):\n return NamedTuple('field', *fields)", "title": "" }, { "docid": "e8bb9719c39975e957957c71541c7da2", "score": "0.54330975", "text": "def parse_header(f):\n columns = ['pokemon', 'species_id', 'height', 'weight', 'type_1', 'type_2',\n 'url_image', 'generation_id', 'evolves_from_species_id']\n result = {}\n\n # Write your code below.\n header = f.readline().split(const.SEP)\n for col in columns:\n result[col] = header.index(col)\n return result", "title": "" }, { "docid": "fcc534e47d3b5dcbf805d301112ae16d", "score": "0.5402222", "text": "def read(self, fileobj):\n reader = csv.reader(fileobj, delimiter=self.delimiter)\n head = next(reader) if self.hasheader is True else None\n rows = list(reader)\n \n if head is None:\n head = self._generic_header(len(rows[0]))\n\n self.header = head\n self.rows = rows\n\n return head, rows", "title": "" }, { "docid": "e5781ece65ed88edfc5ae18549e9e4c9", "score": "0.5392405", "text": "def read_named_csv(file_, Named = None, types = None, read_names = True):\n\n def read(csv_file, Named, types):\n reader = csv.reader(csv_file)\n\n if read_names:\n names = reader.next()\n\n if Named is None:\n Named = collections.namedtuple(\"TupleFromCSV\", names)\n elif set(names) != set(Named._fields):\n raise RuntimeError(\"names in CSV file do not match\")\n else:\n names = Named._fields\n\n if types is None:\n types = dict((f, lambda x: x) for f in names)\n\n for line in reader:\n yield Named(**dict((f, types[f](v)) for (f, v) in zip(names, line)))\n\n if isinstance(file_, str):\n with open(file_) as csv_file:\n for row in read(csv_file, Named, types):\n yield row\n else:\n for row in read(file_, Named, types):\n yield row", "title": "" }, { "docid": "5c7926ca984f896e76f4b2bbf1b0cfe2", "score": "0.5388728", "text": "def get_file_columns() -> [tuple]:\n columns = SessionService.get_item(SessionService.Columns) or []\n return [(column, column) for column in columns]", "title": "" }, { "docid": "e7b0cb31da28837cdc1531f5203d065d", "score": "0.5388231", "text": "def getFitsHeader(self,tbl,filename):\n\n if self.__cnx1 is not None:\t\t\n\t \t\t\n prihdu = tbl + 'prihdu'\n\t exthdu = tbl + 'exthdu'\n\t idprihdu = 'id' + prihdu\n fk_constraint = \"%s.%s = %s.%s_%s\" % (prihdu,idprihdu,exthdu,prihdu,idprihdu)\n \t cursor = self.__cnx1.cursor(MySQLdb.cursors.DictCursor)\n\t \t\n\t if tbl == 'NC':\n\t sql = \"\"\"SELECT %s.*,%s.CD1_1,%s.CD1_2,%s.CCDSUM,%s.GAIN1 AS GAIN \n\t\t FROM FitsHeader.%s, FitsHeader.%s \n\t\t WHERE %s.`file` = '%s' AND %s LIMIT 1\"\"\" % \\\n\t\t(prihdu,exthdu,exthdu,exthdu,prihdu,prihdu,exthdu,prihdu,filename,fk_constraint)\n\t else:\n\t sql = \"\"\"SELECT %s.*,%s.CD1_1,%s.CD1_2,%s.CCDSUM,%s.GAIN \n\t\t FROM FitsHeader.%s, FitsHeader.%s \n\t\t WHERE %s.`file` = '%s' AND %s LIMIT 1\"\"\" % \\\n\t\t(prihdu,exthdu,exthdu,exthdu,exthdu,prihdu,exthdu,prihdu,filename,fk_constraint)\n\n\t try:\n\t cursor.execute(sql)\n return cursor.fetchone()\n\n except MySQLdb.Error, e: \n raise DBException (\"Error: %s (%s)\" % (str(e.__class__), str(e)))", "title": "" }, { "docid": "92664a4c704844a449bcbc0a9b32084d", "score": "0.5371749", "text": "def get_column_names_in_file(path, table_name):\n with h5py.File(path, \"r\") as f:\n return list(f[table_name].keys())", "title": "" }, { "docid": "c131da6a0a634f5db36497508ec5c488", "score": "0.5368434", "text": "def read(self):\n path = self.path\n with open(path, \"r\") as file:\n data = file.read()\n file.close()\n\n table_row = data.split(\"\\n\")\n table = {(number.split(\":\"))[0]: float((number.split(\":\"))[1].strip()) for number in table_row}\n return table", "title": "" }, { "docid": "671efe3c7543027c1491d3e015eb1697", "score": "0.53058153", "text": "def RawTuple(num_fields, name_prefix='field'):\n assert isinstance(num_fields, int)\n assert num_fields >= 0\n return NamedTuple(name_prefix, *([np.void] * num_fields))", "title": "" }, { "docid": "84ec21c25f8f838157fdbb567ae2eee0", "score": "0.5297468", "text": "def _process_single( cls, filePath ):\n listOfStuff = [ i[ 0 ] for i in read_csv( filePath ) ]\n return tuple( listOfStuff )", "title": "" }, { "docid": "e48b4426fc85f6d5f528212434c02c62", "score": "0.5291084", "text": "def load_TTable(infile, line_num = 1):\n if line_num < 1:\n raise Exception, \"load_TTable: line_num must be >= 1\"\n while line_num > 1:\n if not infile.readline():\n raise Exception, \"Not enough lines in file\"\n line_num -= 1\n line = infile.readline()\n return parse_ttable(line)", "title": "" }, { "docid": "40720af6868bbb340ba1827d9ec9fb91", "score": "0.52882737", "text": "def _parseSingleFile(self, path, base, name):\n\n # Open file\n with open(path + base + name, 'r') as dataFile:\n # Read lines into a list\n lines = dataFile.read().splitlines()\n # Get header in order\n header = [line.split('=')[0].rstrip().lstrip() for line in lines]\n # Get header and values in oder\n header_and_values = {line.split('=')[0].rstrip().lstrip():\n line.split('=')[-1].lstrip().rstrip() for line in lines}\n # Return in this order\n return header_and_values, header", "title": "" }, { "docid": "3850ec3af1948551f5f635861af87132", "score": "0.5275527", "text": "def get_header_names(filename):\n with open(filename) as f:\n reader = csv.reader(f)\n cols = next(reader)\n return cols", "title": "" }, { "docid": "0898d841ab66b91f62808ddcb06cfbff", "score": "0.5273563", "text": "def reader(fname, sep=\"\\t\"):\n line_gen = (l.rstrip(\"\\r\\n\").split(sep) for l in open(fname))\n header = line_gen.next()\n header[0] = header[0].lstrip(\"#\")\n for toks in line_gen:\n yield dict(zip(header, toks))", "title": "" }, { "docid": "ad95e533c63ee5db1c3fb139f5df64e2", "score": "0.5268348", "text": "def parse_table_name(self, table_name):\n parse = table_name.split('.', 1)\n if len(parse) == 1:\n return None, parse[0]\n else:\n return parse[0], parse[1]", "title": "" }, { "docid": "8069468cc036aa543f4b3be4ae3cf9ce", "score": "0.5264187", "text": "def read_table_of_table(cls, file, tablename=None):\n return cls.read_generic_table(file, ProphetTableType.TableOfTable, tablename)", "title": "" }, { "docid": "598bfbae9591fbf6295ec7031fb3b6ff", "score": "0.5261743", "text": "def _read_header(fname):\n header_size = 5\n with fname.open() as myfile:\n head = [next(myfile) for x in range(header_size)]\n\n out = {}\n # header size\n out['header_size'] = header_size\n # site\n out['site'] = head[0].split()[0]\n # channels\n channels = head[2].split()\n out['channels'] = channels[:channels.index('channel')]\n\n # date\n out['date'] = _pd.to_datetime(head[1].split()[0])\n # return head\n return out", "title": "" }, { "docid": "ed4204a1605fb42454e6c13dc4234663", "score": "0.5256547", "text": "def parse_header(self, f):\n\n _start = False\n data = []\n\n for _line in f:\n\n line = _line.replace(b\"\\xb7\", b\"-\").decode().strip()\n data.append(line)\n\n if _start:\n break\n\n if line.startswith(\"Time\"):\n _start = True\n\n self._desc = \" \".join([h.replace('\"', \"\") for h in data[:-2]]).strip()\n\n chandata, unitdata = data[-2:]\n return chandata, unitdata", "title": "" }, { "docid": "d9f9bd8e5c0dda815718ff1779280571", "score": "0.525643", "text": "def get_table():\n with open(file_path, mode=\"rb\") as file:\n arrow = file.read()\n table = perspective.Table(arrow)\n for _ in range(TABLE_SCALAR - 1):\n table.update(arrow)\n return table", "title": "" }, { "docid": "21fea7d4fa33b929477924ce6a1befce", "score": "0.5254773", "text": "def parse_table_header_TOUGH2(self):\n cols = []\n if self.simulator in ['TOUGH2','TOUGH2_MP']: flow_headers = ['RATE']\n else: flow_headers = ['Flow','Veloc']\n headline = self.readline().strip()\n headstrs = headline.split()\n indexstrs = ['INDEX','IND.'] # for EWASG\n for s in indexstrs:\n if s in headstrs:\n nkeys = headstrs.index(s)\n break\n for s in headstrs[nkeys+1:]:\n if s in flow_headers: cols[-1] += ' ' + s\n else: cols.append(s)\n return nkeys,cols", "title": "" }, { "docid": "c6f308c3e00064ae607573e10d9d14f5", "score": "0.5252538", "text": "def _get_row_description(file_name: str):\n\n start_index = 0\n for field_name, char_length, data_type in read_csv(file_name):\n\n if data_type not in DataTypes.__members__:\n raise DataDescriptorParseError(f\"Unexpected datatype {data_type}\")\n\n char_length = int(char_length)\n yield RowDescription(field_name,\n start_index,\n start_index + char_length,\n DataTypes[data_type])\n start_index += char_length", "title": "" }, { "docid": "17812a501c07ed8cbdeaf7c6cea30485", "score": "0.5250829", "text": "def get_xyt_txt(fname, xcol, ycol, tcol):\n return np.loadtxt(fname, usecols=(xcol,ycol,tcol), unpack=True)", "title": "" }, { "docid": "e82ed2264fd3ac0c295762bc0398036b", "score": "0.52387357", "text": "def getColumns(inFile, delim=\" \", header=True): # delim=\"\\t\"\n cols = {}\n indexToName = {}\n for lineNum, line in enumerate(inFile):\n if lineNum == 0:\n headings = line.split(delim)\n i = 0\n for heading in headings:\n heading = heading.strip()\n if header:\n cols[heading] = []\n indexToName[i] = heading\n else:\n # in this case the heading is actually just a cell\n cols[i] = [heading]\n indexToName[i] = i\n i += 1\n else:\n cells = line.split(delim)\n i = 0\n for cell in cells:\n cell = cell.strip()\n cols[indexToName[i]] += [cell]\n i += 1\n\n return cols, indexToName", "title": "" }, { "docid": "6a1e2427b2b3577818bee7f977ede2a7", "score": "0.523844", "text": "def headers(self, table):\n return self.query(\"PRAGMA table_info(%s)\" % table)", "title": "" }, { "docid": "b99516c8091890989187d3ee13ab2d89", "score": "0.52330977", "text": "def process_info_from_file(values_in_datasheet): \n result = values_in_datasheet.strip().split(',')\n course_tuple = tuple(result[0:2])\n student_id = int(result[2])\n first_name = result[3]\n last_name = result[4]\n student_tuple = (student_id, first_name, last_name)\n both_tuples_combined = (course_tuple, student_tuple)\n return(both_tuples_combined)", "title": "" }, { "docid": "c3f726d7ec4509484d1bd2d8e0f3211f", "score": "0.52317876", "text": "def _dict_to_tuple(self, data):\n return tuple(\n read_identity(c.name, data, c._desc.default, None)\n for c in self._columns)", "title": "" }, { "docid": "a486060eda73ecb4fc8b1c63aa81cebc", "score": "0.52310866", "text": "def _read_from_csv(\n file_name: str) -> Tuple[Sequence[str], List[Dict[str, Any]]]:\n with tf.io.gfile.GFile(file_name, 'r') as csv_file:\n reader = csv.DictReader(csv_file, quoting=_QUOTING)\n fieldnames = reader.fieldnames\n csv_metrics = list(reader)\n return fieldnames, csv_metrics", "title": "" }, { "docid": "4d1144b46c6247219c1ae665421806b5", "score": "0.52299225", "text": "def adapt_fieldnames_to_tuple(reader, namedtuple):\n got_fieldnames = reader.fieldnames\n expected_fieldnames = namedtuple._fields\n\n if are_expected_fieldnames(got_fieldnames, expected_fieldnames):\n reader.fieldnames = expected_fieldnames\n else:\n raise ValueError(\"Unexpected fieldnames: got={}, expected={}\".format(got_fieldnames, expected_fieldnames))", "title": "" }, { "docid": "689d0acc32b6bd4423d19f603dc40cbc", "score": "0.51722246", "text": "def load_gtfs_table_to_sqlite(fp, gtfs_basename, cc, header=None, verbose=False):\r\n \r\n ur = UTF8TextFile( fp )\r\n rd = csv.reader( ur )\r\n\r\n # create map of field locations in gtfs header to field locations as specified by the table definition\r\n gtfs_header = [x.strip() for x in rd.next()]\r\n\r\n print(gtfs_header)\r\n \r\n gtfs_field_indices = dict(zip(gtfs_header, range(len(gtfs_header))))\r\n \r\n field_name_locations = [gtfs_field_indices[field_name] if field_name in gtfs_field_indices else None for field_name, field_type, field_converter in header]\r\n field_converters = [field_definition[2] for field_definition in header]\r\n field_operator = list(zip(field_name_locations, field_converters))\r\n\r\n # populate stoptimes table\r\n insert_template = 'insert into %s (%s) values (%s)'%(gtfs_basename,\",\".join([x[0] for x in header]), \",\".join([\"?\"]*len(header)))\r\n print( insert_template )\r\n for i, line in withProgress(enumerate(rd), 5000):\r\n # carry on quietly if there's a blank line in the csv\r\n if line == []:\r\n continue\r\n \r\n _line = []\r\n for i, converter in field_operator:\r\n if i<len(line) and i is not None and line[i].strip() != \"\":\r\n if converter:\r\n _line.append( converter(line[i].strip()) )\r\n else:\r\n _line.append( line[i].strip() )\r\n else:\r\n _line.append( None )\r\n \r\n cc.execute(insert_template, _line)", "title": "" }, { "docid": "027143a1af18ed9ac29ed88b0161bc79", "score": "0.5153889", "text": "def get_header_info(file):\n\n dirname = file[:7]\n fits_filepath = os.path.join(FILESYSTEM_DIR, dirname, file)\n header = fits.getheader(fits_filepath, ext=0).tostring(sep='\\n')\n\n return header", "title": "" }, { "docid": "2d28e87a61f608a7c1dc67caef808b4b", "score": "0.5144431", "text": "def read_table(filename):\n try:\n f = open(filename, 'r')\n my_table = []\n for line in f:\n if line[0] != '#':\n my_table.append(line.rstrip().split(':'))\n return my_table \n except IOError:\n raise IOError('file not found!')", "title": "" }, { "docid": "921820d7bf0e3a3599f36d95b5836fe6", "score": "0.51396006", "text": "def create(file, tin_dict, key_index=0, parse_index=4):\n\n\twith open(file, 'r') as fh:\n\t\theader = next(fh).strip().split('\\t')\n\t\tcolid = header[key_index]\n\t\tfile = os.path.basename(file) # Remove PATH\n\t\tsample = file.split(\".star_rg_added.sorted.dmark.tin.xls\")[0]\n\n\t\tfor line in fh:\n\t\t\tlinelist = line.strip().split('\\t')\n\t\t\ttid = linelist[key_index]\n\t\t\ttinvalue = linelist[parse_index]\n\t\t\tif sample not in tin_dict:\n\t\t\t\ttin_dict[sample] = {}\n\n\t\t\ttin_dict[sample][tid] = tinvalue\n\n\treturn colid, tin_dict", "title": "" }, { "docid": "e3a5f6dd9f47e2706ca30e699ca3c149", "score": "0.51352006", "text": "def __init__(self, filename):\n # Variables to store metadata about the table structure\n self.num_rows = 0\n self.num_cols = 0\n self.table = []\n file = open('2018S1-proj1_data/' + filename, 'r')\n for line in file.readlines():\n # Split based on common to get the values\n row = line.split(',')\n self.num_cols = len(row)\n # Add row to table and increment row count\n self.table.append(row)\n self.num_rows += 1\n file.close()", "title": "" }, { "docid": "d76b34d953fdc8f457ae240aec583da1", "score": "0.5131333", "text": "def tsv_reader(fp):\n return csv.reader(fp, delimiter=\"\\t\")", "title": "" }, { "docid": "c686e8176d6c4844fc1e5cede9feb75b", "score": "0.5126377", "text": "def loadFasta(filename):\n fp = open(filename, 'r')\n # split at headers\n data = fp.read().split('>')\n fp.close()\n # ignore whatever appears before the 1st header\n data.pop(0) \n headers = []\n sequences = []\n for sequence in data:\n lines = sequence.split('\\n')\n headers.append(lines.pop(0))\n # add an extra \"+\" to make string \"1-referenced\"\n sequences.append( ''.join(lines))\n return (headers, sequences)", "title": "" }, { "docid": "8a4de5088a275a785f481737f3176e83", "score": "0.5124054", "text": "def read_table(self, table_path):\n\n tdf = pd.read_csv(table_path, delimiter=\"|\")\n return tdf", "title": "" }, { "docid": "ad2456a4738278e6fd7e27bbc058312d", "score": "0.5122289", "text": "def read_header(in_file):\n lines = []\n for _ in range(Tsdata.header_size):\n lines.append(in_file.readline().rstrip())\n return os.linesep.join(lines)", "title": "" }, { "docid": "82dd8a20880af4cec4c938bf6c56d69d", "score": "0.5113379", "text": "def tablefromascii (tablename, asciifile,\n headerfile='',\n autoheader=False, autoshape=[],\n columnnames=[], datatypes=[],\n sep=' ',\n commentmarker='',\n firstline=1, lastline=-1,\n readonly=True,\n lockoptions='default', ack=True):\n import os.path\n filename = os.path.expandvars(asciifile);\n filename = os.path.expanduser(filename);\n if not os.path.exists(filename):\n s = \"File '%s' not found\" % (filename)\n raise IOError(s)\n if headerfile != '':\n filename = os.path.expandvars(headerfile);\n filename = os.path.expanduser(filename);\n if not os.path.exists(filename):\n s = \"File '%s' not found\" % (filename)\n raise IOError(s)\n tab = table(asciifile, headerfile, tablename, autoheader, autoshape,\n sep, commentmarker, firstline, lastline,\n _columnnames=columnnames, _datatypes=datatypes, _oper=1);\n print 'Input format: [' + tab._getasciiformat() +']';\n # Close table and reopen it in correct way.\n tab = 0;\n return table(tablename, readonly=readonly, lockoptions=lockoptions,\n ack=ack);", "title": "" }, { "docid": "af7b408defefcafe841d2cf0415d905f", "score": "0.5105975", "text": "def read_csv(fname):\n\twith open(fname, 'r') as csvfile:\n\t\tfreader = csv.reader(csvfile, delimiter=',')\n\t\theaders = freader.next()\n\t\trows = [[float(r) for r in row] for row in freader]\n\t\tcols = zip(*rows)\n\t\t#return {headers[i]:cols[i] for i in xrange(len(cols))}\n\t\treturn headers, cols", "title": "" }, { "docid": "0b229e67059d2aa36f327f444a9e97ee", "score": "0.5096697", "text": "def getMdFirstRow(filename):\n md = xmipp.MetaData()\n md.read(filename, 1)\n if md.getParsedLines():\n row = XmippMdRow()\n row.readFromMd(md, md.firstObject())\n else:\n row = None\n \n return row", "title": "" }, { "docid": "3992d01d1c3399439a500901118abd8e", "score": "0.508967", "text": "def create_map(tspfile, sep=\" \", index_col=0, header=None, names=None, skiprows=7, skipfooter=1):\r\n if names is None:\r\n names = [\"Y\", \"X\"]\r\n return pd.read_csv(tspfile, sep=sep, index_col=index_col, header=header, names=names, skiprows=skiprows,\r\n skipfooter=skipfooter, engine=\"python\")", "title": "" }, { "docid": "f69f8721a8df4faed015a3af4f595e11", "score": "0.5088983", "text": "def extract_wfdb_header(file_path):\n from wfdb.io import rdheader\n header = rdheader(record_name=os.path.splitext(file_path)[0])\n return _standardized_wfdb_header(header)", "title": "" }, { "docid": "32229881ee7ab8fef13b2d053e1011db", "score": "0.5088551", "text": "def extractData(filename: str) -> tuple:\n \n with open(filename) as inf:\n lines = inf.readlines()\n \n # 244 genes\n # 148 cell lines\n \n titles = lines.pop(0)\n genenames = titles.split(',')\n genenames.pop(0)\n \n celllinenames = []\n celllines = []\n for line in lines:\n cellline = line.split(',')\n celllinename = cellline.pop(0)\n for i, thing in enumerate(cellline):\n cellline[i] = float(thing)\n \n celllinenames.append([celllinename])\n celllines.append(cellline)\n \n return celllines, celllinenames, genenames", "title": "" }, { "docid": "53888ba93a76824abd465c4ad4824a21", "score": "0.50883704", "text": "def get_table_from_file(file_name):\n with open(file_name, \"r\") as file:\n lines = file.readlines()\n table = [element.replace(\"\\n\", \"\").split(\";\") for element in lines]\n return table", "title": "" }, { "docid": "53888ba93a76824abd465c4ad4824a21", "score": "0.50883704", "text": "def get_table_from_file(file_name):\n with open(file_name, \"r\") as file:\n lines = file.readlines()\n table = [element.replace(\"\\n\", \"\").split(\";\") for element in lines]\n return table", "title": "" }, { "docid": "e8082b4a1b05a9fa16e16b248d8a4e0e", "score": "0.508263", "text": "def getNext(self):\n line = self._file.readline()\n if line:\n return tuple(line.strip('\\n').split('\\t'))\n else: \n return None", "title": "" }, { "docid": "e8082b4a1b05a9fa16e16b248d8a4e0e", "score": "0.508263", "text": "def getNext(self):\n line = self._file.readline()\n if line:\n return tuple(line.strip('\\n').split('\\t'))\n else: \n return None", "title": "" }, { "docid": "8938db09584e7dfc3196a390f34faf9a", "score": "0.50809777", "text": "def setup_table_TOUGH2(self, tablename):\n nkeys,cols = self.parse_table_header_TOUGH2()\n expected_floats = self.table_expected_floats(tablename, cols)\n header_skiplines = self.skip_to_results_line(expected_floats)\n line = self.readline()\n start = self.start_of_values(line, cols)\n keypos = self.key_positions(line[:start],nkeys)\n if keypos:\n index_pos = [keypos[-1]+5, start]\n longest_line = line\n rowdict = {}\n count,index = 0,-1\n skiplines = []\n lsep = 60\n more = True\n internal_header_skiplines = None\n def count_read(count): return self.readline(), count + 1\n def is_header(line): return all([col in line for col in cols])\n def is_separator(line): return len(line)>lsep and line[1:lsep+1] == line[1]*lsep\n while more:\n keyval = [fix_blockname(line[kp:kp+5]) for kp in keypos]\n if len(keyval) > 1: keyval = tuple(keyval)\n else: keyval = keyval[0]\n indexstr = line[index_pos[0]:index_pos[1]]\n try: index = int(indexstr) - 1\n # To handle overflow (****) in index field: assume indices continue:\n except ValueError: index += 1\n # Use a dictionary to deal with duplicate row indices (TOUGH2_MP):\n rowdict[index] = (count,keyval)\n if len(line.strip()) > len(longest_line): longest_line = line\n pos = self._file.tell()\n last_count = count\n line,count = count_read(count)\n internal_header = False\n if is_header(line): internal_header = True\n elif is_separator(line): # end of table\n more = False\n self._file.seek(pos)\n elif not line.strip(): # blank- check next line:\n pos = self._file.tell()\n line,count = count_read(count)\n stripline = line.strip()\n if is_header(line): internal_header = True\n elif is_separator(line) or stripline == self.title or not stripline:\n more = False # end of table\n self._file.seek(pos)\n if more and internal_header:\n if internal_header_skiplines is None:\n internal_header_skiplines = self.skip_to_results_line(expected_floats)\n count += internal_header_skiplines\n line = self.readline()\n else:\n for i in range(internal_header_skiplines): line,count = count_read(count)\n skiplines.append(count - last_count - 1)\n indices = sorted(rowdict.keys())\n row_line = [rowdict[index][0] for index in indices]\n rows = [rowdict[index][1] for index in indices]\n numpos = self.parse_table_line(longest_line, start, cols)\n row_format = {'key': keypos, 'index': keypos[-1] + 5, 'values': numpos}\n allow_rev = tablename == 'connection'\n self._table[tablename] = listingtable(cols, rows, row_format, row_line, num_keys = nkeys,\n allow_reverse_keys = allow_rev,\n header_skiplines = header_skiplines,\n skiplines = skiplines)\n self._tablenames.append(tablename)\n else: raise Exception('Error parsing '+tablename+' table keys: table not created.')", "title": "" }, { "docid": "9939181398d6e0ccf5f23427269e5785", "score": "0.5076455", "text": "def _read_record_header(fhandle):\n header_info = _read_header_info(fhandle, _record_header_format, nbytes_total=50)\n return header_info", "title": "" }, { "docid": "0bc2e52d2b81a1eecc94d2cdb7b2350a", "score": "0.50747925", "text": "def first_csv():\n trips = [\n (\"A\", \"B\", 5),\n (\"B\", \"A\", 5),\n ]\n return _csv_file(trips)", "title": "" }, { "docid": "7072064b34d3d8d663f8bd61a92da1e7", "score": "0.5063524", "text": "def read_scores_file(self, file: Path) -> Tuple[str, pd.DataFrame]:\n\n try:\n # load the tsv file\n df = pd.read_csv(file, sep='\\t', header=None,\n names=['query_id', 'Q0', 'document_id', 'rank', 'score', 'system'],\n usecols=['document_id', 'score'])\n #df = pd.read_csv(file, sep='\\t')\n # get the query id from the header\n q_id = os.path.splitext(os.path.basename(file))[0][2:]\n #df.query_id.ix[0]\n #q_id = pd.columns.values[0]\n #df.columns = ['document_id', 'score']\n\n return q_id, df\n except pd.errors.EmptyDataError:\n logging.log(logging.WARN, f' * No matcher results for {file}')\n return '', pd.DataFrame()", "title": "" }, { "docid": "84811690d7437b363a4d75059b85d9d0", "score": "0.5061657", "text": "def test_column_header():\n raw = \"* Sea-Bird SBE 9 Data File:\\n* System UpLoad Time = Aug 01 2011 11:34:32\\n# nquan = 2\\n# nvalues = 3\\n# name 0 = depSM: Depth [salt water, m]\\n# name 1 = prDM: Pressure, Digiquartz [db]\\n# start_time = Aug 01 2011 11:34:32\\n# bad_flag = -9.990e-29\\n# datcnv_date = Aug 02 2011 04:16:47, 7.18c\\n# file_type = ascii\\n*END*\\n Depth Press \\n 3.973 3.995\\n 4.079 4.102\\n 3.902 3.924\\n\"\n profile = CNV(raw)\n assert len(profile[\"DEPTH\"]) == 3\n assert profile[\"DEPTH\"][0] == 3.973\n\n # Now without the headers\n profile = CNV(raw.replace(\" Depth Press \\n\", \"\"))\n assert len(profile[\"DEPTH\"]) == 3\n assert profile[\"DEPTH\"][0] == 3.973", "title": "" }, { "docid": "3d8565b545166147c33c864bc8b1fe44", "score": "0.505002", "text": "def setup_headers_TOUGH2_MP(self, filename, header):\n headers = header.strip().split()\n self.type = headers[0] # FOFT, COFT or GOFT\n time_header = [h for h in headers if h.lower().startswith('time')][0]\n self.time_index = headers.index(time_header)\n if self.type == 'FOFT':\n self.key_index = self.time_index - 1\n self._nkeys = 1\n else: # COFT or GOFT\n self.key_index = self.time_index + 1\n self._nkeys = 2\n self.key_name = headers[self.key_index: self.key_index+self._nkeys]\n prepend_titles, append_titles = ['GAS','GENERATION'], ['flow']\n startcol = self._nkeys + 2\n cols = []\n i = startcol\n while i <= len(headers) - 1:\n title = headers[i]\n if title in prepend_titles:\n cols.append(title + ' ' + headers[i+1])\n i += 1\n elif title in append_titles:\n cols[-1] += ' ' + title\n else: cols.append(title)\n i += 1\n self.column_name = cols\n self.col_start = [header.index(colname) for colname in self.column_name]\n self.key_start = [header.index(key) for key in self.key_name]\n self.time_pos = [header.index(time_header)]\n if self.type == 'FOFT':\n self.key_start.append(self.time_pos[0])\n self.time_pos.append(self.col_start[0])\n else:\n self.key_start.append(self.col_start[0])\n self.time_pos.append(self.key_start[0])", "title": "" }, { "docid": "bbe0c86ee70d4c86c3598bd5dcd44d30", "score": "0.5049688", "text": "def load_csv(infile):\n header = infile.readline()\n dialect = csv.Sniffer().sniff(header)\n reader = csv.reader((header, ), dialect)\n\n CSVRow = namedtuple(\"CSVRow\", [format_field_name(x) for x in next(reader)])\n\n reader = csv.reader(infile, dialect)\n\n for data in map(CSVRow._make, reader):\n yield data", "title": "" }, { "docid": "58aff043f2aa3071a08e913e14937d21", "score": "0.50469524", "text": "def getFullData(path):\n reader = csv.reader(file(path, \"rb\"))\n header = None\n data = []\n for row in reader:\n if not header:\n header = row\n continue\n obj = {}\n for k in range(len(header)):\n obj[header[k]] = row[k]\n data.append(obj)\n #print header\n return header, data", "title": "" }, { "docid": "bdccc6352d63b601ede45c220e4ffe03", "score": "0.5041414", "text": "def next(self) -> Optional[Tuple[Union[str, int]]]:\r\n line = self.__sam.readline().rstrip()\r\n if line:\r\n fields: List[Union[str, int]] = line.split('\\t')\r\n for i in (1, 3, 4, 7, 8):\r\n fields[i] = int(fields[i])\r\n return tuple(fields)\r\n else: # line == ''\r\n return None", "title": "" }, { "docid": "30b10c57b54f21cb1a908655d9057afc", "score": "0.50392026", "text": "def retColumns(inf, delim = \"\\t\"):\n f = openAnyFile(inf)\n line = f.readline()\n if line.isspace():\n log(\"ERROR: encountered a blank header\\n\", die = True)\n line = line.rstrip(\"\\r\\n\")\n return(re.split(delim, line)[1:])", "title": "" }, { "docid": "8048f22563c153af7d74b1626fed02bb", "score": "0.5033996", "text": "def prepare_kegg_table(kegg_table_file):\n kegg_table = dict()\n with open(kegg_table_file, 'r') as f:\n for line in f:\n fields = line.strip().split(\"\\t\")\n if len(fields)>1:\n kegg_table[fields[0]] = fields[1]\n return kegg_table", "title": "" }, { "docid": "c5f3c3b6f17ddaf76bbeb114e3a3d8a5", "score": "0.50270337", "text": "def get_raw_data(input_dir, filename):\n fh = open(input_dir + \"/\" + filename)\n fh.readline()\n for line in fh:\n fields = line[:-1].split(\"\\t\")\n yield fields", "title": "" }, { "docid": "fd0a0fcb83f6cf1cd136141cb376b8d1", "score": "0.5026104", "text": "def ReadTable(inputStr, header=0, delim='\\t'):\n rows = []\n with open(inputStr, 'rb') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=delim)\n if (header > 0):\n# print 'Skipping', header, 'lines of header from file', inputStr\n\t for i in range(header):\n csvreader.next()\n for row in csvreader:\n rows.append(row)\n return rows", "title": "" }, { "docid": "91d9377786767d117e4446c5e9ffc34c", "score": "0.5025951", "text": "def read_header(f_in):\n header = f_in.read(1024) # 256 symbols, 4 bytes each\n unpacked_header = struct.unpack('256I', header)\n \n return [(weight,chr(ch_indx) ) for ch_indx, \\\n weight in enumerate(unpacked_header) if weight]", "title": "" }, { "docid": "c60c09ef602c8e8d8c76ce8b4401af07", "score": "0.5025348", "text": "def _info_from_file(path):\r\n file = open(path, \"r\")\r\n lines = file.readlines()\r\n file.close()\r\n\r\n line = [int(num_str) for num_str in lines[0].split()]\r\n\r\n rows_num = line[0]\r\n row_len = line[1]\r\n unavailable_num = line[2]\r\n pools_num = line[3]\r\n servers_num = line[4]\r\n\r\n current_index = 1\r\n\r\n # Getting positions of unavailable slots:\r\n unavailable_pos_list = []\r\n for index in range(current_index, current_index + unavailable_num):\r\n unavailable_pos_list.append(tuple([int(num_str) for num_str\r\n in lines[index].split()]))\r\n current_index += unavailable_num\r\n\r\n # Getting tuples representing servers:\r\n servers_data_list = []\r\n for index in range(current_index, current_index + servers_num):\r\n servers_data_list.append(tuple([int(num_str) for num_str\r\n in lines[index].split()]))\r\n\r\n return tuple([tuple(line), tuple(unavailable_pos_list), tuple(servers_data_list)])", "title": "" }, { "docid": "4194b7b45675b68fcd252841eb1abd3c", "score": "0.5021917", "text": "def setup_headers_TOUGHplus(self, filename, header):\n headers = header.strip().split('-')\n if filename.endswith('OFT') and len(filename) >= 4:\n self.type = filename[-4:].strip()\n elif '_Time_Series' in filename:\n filetype = {'Elem_Time_Series': 'FOFT', 'Conx_Time_Series': 'COFT',\n 'SS_Time_Series': 'GOFT'}\n for key in filetype.keys():\n if key in filename:\n self.type = filetype[key]\n break\n else: self.type = None\n if self.type == 'FOFT':\n self.time_index = 1\n self._nkeys = 1\n else:\n self.time_index = 0\n self._nkeys = 0\n cols = headers[self._nkeys + 1:]\n from re import sub\n cols = [sub('\\[.*\\]','',col).strip() for col in cols] # remove units\n self.column_name = cols", "title": "" }, { "docid": "99122d76dbf3179723a41a8dcc63ad6c", "score": "0.5018062", "text": "def parse_header(header_row):\n\n field_names = []\n\n for x in header_row.iter('%sth' %br):\n # just look for the para tags - its text is the column header\n if x.find('%spara' %br) is not None:\n col_label = x.find('%spara' %br).text\n field_names.append(col_label)\n else:\n field_names.append(\"none found\")\n pass\n\n return field_names", "title": "" }, { "docid": "b9441414c63c83086bcf09c7d24e06e0", "score": "0.50156045", "text": "def main(filename):\n\n table = PrettyTable(['No.', 'Time', 'HCI', 'Direction', 'CMD/EVT/L2CAP', 'Data'])\n table.align['CMD/EVT/L2CAP'] = \"l\"\n table.align['Data'] = \"l\"\n\n records = bts.parse(filename)\n rows = get_rows(records)\n [table.add_row(r) for r in rows]\n\n print(table)", "title": "" }, { "docid": "4162203c0651fce8b4b384fb0423586b", "score": "0.50132465", "text": "def extract_header(file_path):\n fname = os.path.split(os.path.abspath(file_path))[-1]\n _, ext = os.path.splitext(fname)\n load_func = _EXT_TO_LOADER[ext[1:]]\n header = load_func(file_path)\n # Add file location data\n file_path, file_name = os.path.split(file_path)\n header['data_dir'] = file_path\n header[\"file_name\"] = file_name\n return header", "title": "" }, { "docid": "55f85d691e153b29c22ec0732d907b6d", "score": "0.50097257", "text": "def _extractColNames(self, fileName):\n return []", "title": "" }, { "docid": "4edf66caf5838af3ccf083ca965c8c02", "score": "0.5008661", "text": "def sorted_headers(fields):\n header_names = []\n header_ids = []\n for column in fields.fields_columns:\n header_names.append(fields.fields[\n fields.fields_by_column_number[column]][\"name\"])\n header_ids.append(fields.fields_by_column_number[column])\n\n return header_names, header_ids", "title": "" }, { "docid": "f01131a6c563eb54d5b411b08c928988", "score": "0.5008467", "text": "def read(file: typing.BinaryIO) -> typing.Tuple[int, int]:\n _section_expect(file, _SectionTypeId.FILEHEADER)\n major = _read_int8(file)\n minor = _read_int8(file)\n return major, minor", "title": "" }, { "docid": "64784abd7a8b0aed423e1e5ff905c520", "score": "0.50036734", "text": "def get_SST_header(afile):\n afile.seek(0) # Assumes only one cube per file\n sheader = afile.read(512)\n header = {}\n for s in filter(None,sheader.decode(\"utf-8\").split('\\x00')):\n for s in s.strip().replace(':',',').split(', '):\n x = s.strip().split('=')\n try:\n header.update({x[0]:x[1]})\n except IndexError:\n pass\n else:\n header.update({x[0]:x[1]})\n\n #Convert cube dimensions to integers\n header['dims'] = int(header['dims'])\n header['ny'] = int(header['ny'])\n header['nx'] = int(header['nx'])\n\n if 'diagnostics' in header:\n header['diagnostics'] = list(header['diagnostics'].replace('[','').replace(']','').split(','))\n\n if header['dims'] != 3:\n raise Exception(\"Not 3D\")\n else:\n header['nt'] = int(header['nt'])\n\n return header", "title": "" }, { "docid": "87b0039d431801638590eee51520059e", "score": "0.5000712", "text": "def get_header_info(headers):\n\n keywords = []\n names = []\n units = []\n systs = []\n\n for column_header in headers:\n column_header = decode_string(column_header)\n\n unit, column_header = get_units(column_header)\n units.append(unit)\n\n keyword, syst, column_header = get_keyword(column_header)\n keywords.append(keyword)\n systs.append(syst)\n\n names.append(column_header)\n\n return keywords, names, units, systs", "title": "" }, { "docid": "e17e3f89642f40c9e874fa447e7e1d68", "score": "0.49976736", "text": "def _readcsv(file):\n rows = []\n with open(file, newline='') as csvfile:\n reader = csv .reader(csvfile, delimiter=',', quotechar='\"')\n headers = next(reader, None) # headers\n for row in reader:\n rows.append([float(x) for x in row])\n return headers, rows", "title": "" }, { "docid": "5b09c5e50f3c562cf33cef9ae2f9912a", "score": "0.49934605", "text": "def parse_grid_header(filename):\n infile = open(filename, 'r')\n header = {}\n\n ncols = int(infile.readline().split()[1])\n nrows = int(infile.readline().split()[1])\n xllcorner = float(infile.readline().split()[1])\n yllcorner = float(infile.readline().split()[1])\n cellsize = float(infile.readline().split()[1])\n\n maxx = xllcorner + (ncols * cellsize)\n maxy = yllcorner + (nrows * cellsize)\n header['min'] = [xllcorner, yllcorner, 0]\n header['max'] = [maxx, maxy, 1000]\n header['rows'] = nrows\n header['cols'] = ncols\n header['cellsize'] = cellsize\n return header", "title": "" }, { "docid": "123c3585fbd57b50c7ea0e33e8fae335", "score": "0.49905828", "text": "def read(filename: str, custom_field_map: Optional[NRRDFieldMap] = None, index_order: IndexOrder = 'F') \\\n -> Tuple[npt.NDArray, NRRDHeader]:\n\n with open(filename, 'rb') as fh:\n header = read_header(fh, custom_field_map)\n data = read_data(header, fh, filename, index_order)\n\n return data, header", "title": "" }, { "docid": "27f7c0a216ed2c105490530e328dbb0f", "score": "0.4989333", "text": "def csv_to_tuples(file_path):\n import csv\n with open(file_path, 'r') as f:\n data=[tuple(line) for line in csv.reader(f)]\n return data", "title": "" }, { "docid": "de8dc9a68f926174080908d39c2b62d7", "score": "0.497212", "text": "def _cols(self, file):\n return self.pd.read_csv(file, index_col=0, nrows=0).columns", "title": "" } ]
20a66f5fe17b32e5e44979c26ec8fca1
Tests that the field uk region name has correct default value.
[ { "docid": "7ad5f17c217e2ad0ddbbd305165ca427", "score": "0.64815146", "text": "def test_uk_region_name(actual_uk_region_id, possible_uk_region_id, expected):\n investment_project = InvestmentProjectFactory()\n if actual_uk_region_id:\n investment_project.actual_uk_regions.add(parse_uuid(actual_uk_region_id))\n if possible_uk_region_id:\n investment_project.uk_region_locations.add(parse_uuid(possible_uk_region_id))\n\n etl = ETLInvestmentProjects(destination=MIInvestmentProject)\n\n updated, created = etl.load()\n assert (0, 1) == (updated, created)\n\n mi_investment_project = MIInvestmentProject.objects.values(*etl.COLUMNS).first()\n assert mi_investment_project['uk_region_name'] == expected", "title": "" } ]
[ { "docid": "7e29ae2184d4aea393895cb1a38957f2", "score": "0.7086188", "text": "def test_default_values(self):\r\n form = AustralianPlaceForm()\r\n self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'NSW',\r\n str(form['state_default'])))\r\n self.assertTrue(re.search(INPUT_VALUE_PATTERN % '2500',\r\n str(form['postcode_default'])))", "title": "" }, { "docid": "cd080ced11aea6247a8094ae017de5b3", "score": "0.68900025", "text": "def test_default_values(self):\r\n form = PakistaniPlaceForm()\r\n self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'PK-IS',\r\n str(form['state_default'])))\r\n self.assertTrue(re.search(INPUT_VALUE_PATTERN % '44000',\r\n str(form['postcode_default'])))", "title": "" }, { "docid": "eea73a987258d389f5349ae52ad6df8d", "score": "0.6312018", "text": "def test_possible_uk_region_names(uk_region_id, expected):\n investment_project = InvestmentProjectFactory()\n if uk_region_id:\n investment_project.uk_region_locations.add(parse_uuid(uk_region_id))\n\n etl = ETLInvestmentProjects(destination=MIInvestmentProject)\n\n updated, created = etl.load()\n assert (0, 1) == (updated, created)\n\n mi_investment_project = MIInvestmentProject.objects.values(*etl.COLUMNS).first()\n\n assert mi_investment_project['possible_uk_region_names'] == expected", "title": "" }, { "docid": "8bc8fafca2cb941aa4076d61c7ebd68e", "score": "0.6040815", "text": "def test_actual_uk_region_names(uk_region_id, expected):\n investment_project = InvestmentProjectFactory()\n if uk_region_id:\n investment_project.actual_uk_regions.add(parse_uuid(uk_region_id))\n\n etl = ETLInvestmentProjects(destination=MIInvestmentProject)\n\n updated, created = etl.load()\n assert (0, 1) == (updated, created)\n\n mi_investment_project = MIInvestmentProject.objects.values(*etl.COLUMNS).first()\n\n assert mi_investment_project['actual_uk_region_names'] == expected", "title": "" }, { "docid": "a790d7e863588b017699d3d9d246b396", "score": "0.59022886", "text": "def verify_default_dropdown_value(self,**kwargs):\n for field,expected_value in kwargs.items():\n locator = eda_lex_locators[\"eda_settings_system\"][\"default_dropdown_value\"].format(field,expected_value)\n self.selenium.wait_until_element_is_visible(locator,\n error= \"Element is not displayed for the user\")\n actual_value = self.selenium.get_webelement(locator).text\n if not str(expected_value).lower() == str(actual_value).lower() :\n raise Exception (f\"Dropdown value in {field} is {actual_value} but it should be {expected_value}\")", "title": "" }, { "docid": "e636606551279e8724553cd1baeec586", "score": "0.58526796", "text": "def test_schedule_default_values(field_name, expected):\n schedule_field = Schedule._meta.get_field(field_name)\n assert schedule_field.default == expected", "title": "" }, { "docid": "84b61c3439bb362dc92d03bd936d2171", "score": "0.5821078", "text": "def test_gcputil_init_region_config():\n test_region = \"europe-west3\"\n TEST_ENV_DATA = copy.deepcopy(config.ENV_DATA)\n TEST_ENV_DATA[\"region\"] = test_region\n with patch(\"ocs_ci.framework.config.ENV_DATA\", TEST_ENV_DATA):\n gcp_util = gcp.GoogleCloudUtil()\n assert gcp_util._region_name == test_region\n # but the config can specify only the default value\n gcp_util = gcp.GoogleCloudUtil(region_name=\"something_else\")\n assert gcp_util._region_name == \"something_else\"", "title": "" }, { "docid": "6a46ee925d38be578655b3d46b66d7fd", "score": "0.57604563", "text": "def test_empty_default(self):\n assert self.reg.defaults.get(Type1) is None", "title": "" }, { "docid": "04e1b9c304ac9d3984d99eae5bb325b4", "score": "0.5727835", "text": "def get_default_region(self):\n\n return '4'", "title": "" }, { "docid": "9415e8bca9d8a5b3767532dd2cbeecdb", "score": "0.5673106", "text": "def is_region_flagged(self):\n element = self.driver.find_element(*ProjectFormLoc.ERROR_REGION)\n assert('Region is required' in element.text), 'Region error missing'", "title": "" }, { "docid": "1241f3dad8b8957e419f68b0a4bfb278", "score": "0.5647528", "text": "def test_required_default_value(self):\n\n field = self.input_definition._meta.get_field(\"required\")\n self.assertFalse(field.default)", "title": "" }, { "docid": "845a4337f01f3bcd487ebefca82a8329", "score": "0.5629654", "text": "def getDefault(field):", "title": "" }, { "docid": "21f2828fb95e385880722b46612cfc5b", "score": "0.5612702", "text": "def test_region_name_not_in_response(client):\n assert not Requester(client, MOSCOW_PATH).response_has(\"Europe\")", "title": "" }, { "docid": "fb7a6a6f0d3433ca6088af6f583e2c8c", "score": "0.5607116", "text": "def test_gcputil_init_region():\n gcp_util = gcp.GoogleCloudUtil(region_name=\"europe-west1\")\n assert gcp_util._region_name == \"europe-west1\"", "title": "" }, { "docid": "1239a0a5fae3e954fde5c3eefdd1bbfc", "score": "0.5603828", "text": "def test_setUpName(self):\n self.assertEqual(self.testCity.name, \"SANFRANCISCO\")", "title": "" }, { "docid": "4227b900f980bb37f70b9b4d10b94ad9", "score": "0.5598451", "text": "def auto_find_region(\n session: Optional[Session] = None, default: Optional[str] = None\n) -> str:\n region_name = botocore_default_region(session)\n\n if region_name is None:\n region_name = ec2_current_region()\n\n if region_name is not None:\n return region_name\n\n if default is None:\n raise ValueError(\"Region name is not supplied and default can not be found\")\n\n return default", "title": "" }, { "docid": "b07edc768b8eea5f2861cd95554a8283", "score": "0.5532597", "text": "def check(cls, name, cloud):\n if not cls.exists(name, cloud):\n raise ValueError(\n \"the default value {} in cloud {} does not exist\".format(name,\n cloud))", "title": "" }, { "docid": "ccbeed0fd6d2b5fd3b6ae31ea67cab84", "score": "0.55017394", "text": "def test_get_regional_info_obj_fail(self):\n self.initialize()\n\n obj = get_regional_info_obj(\"gdansk\", \"pomorskie\")\n\n self.failUnlessEqual(obj, None)", "title": "" }, { "docid": "b44e6b18a2681a9521f54a2dc03aae16", "score": "0.5451591", "text": "def test_check_s3_access_default_region(self, mock_boto3_client):\n expected_region_name = \"us-east-1\"\n with self.assertRaisesRegex(AttributeError, \"Raised intentionally\"):\n _check_s3_access(\"bucket\", {})\n\n self.assertIn(\"region_name\", mock_boto3_client.call_args.kwargs)\n self.assertEqual(expected_region_name, mock_boto3_client.call_args.kwargs.get(\"region_name\"))", "title": "" }, { "docid": "d7e3b0f2e7de28f82b7179135ed75363", "score": "0.5415354", "text": "def test_form_select_country_name(self):\n self.check_ajax(\"country\", \"DummyCountry\", self.db.fetch_distinct_states)", "title": "" }, { "docid": "e3df1faa80ff242aa6f17b449c7ffa4f", "score": "0.5411013", "text": "def get_region_name(place_info: dict) -> Optional[str]:\n region: Optional[str]\n try:\n region = place_info['region'][0]['value']\n except Exception as ex:\n region = None\n logging.error(f\"Error in region reading {ex}\")\n return region", "title": "" }, { "docid": "8b4397abc9b3c1458e015b3b4cc7021c", "score": "0.53571796", "text": "def test_get_enketo_defaults_with_non_existent_field(self):\n # create xform\n self._publish_transportation_form()\n # create kwargs with NON-existing xform variable\n kwargs = {\"name\": \"bla\"}\n defaults = generate_enketo_form_defaults(self.xform, **kwargs)\n self.assertEqual(defaults, {})", "title": "" }, { "docid": "45a71e82e4dbc8ccfe1107c94e57e209", "score": "0.5330518", "text": "def country_or_region(self):\n if \"countryOrRegion\" in self._prop_dict:\n return self._prop_dict[\"countryOrRegion\"]\n else:\n return None", "title": "" }, { "docid": "5aa6391d3ea7f314d27152b7889f15f6", "score": "0.5318128", "text": "def test_create_one_config_with_empty_country_urn(self):\n url = reverse('saml_provider_config-list')\n provider_config_blank_country = {\n 'entity_id': 'id',\n 'metadata_source': 'http://test.url',\n 'name': 'name-of-config-blank-country',\n 'enabled': 'true',\n 'slug': 'test-slug-empty',\n 'enterprise_customer_uuid': ENTERPRISE_ID,\n 'country': '',\n }\n\n response = self.client.post(url, provider_config_blank_country)\n assert response.status_code == status.HTTP_201_CREATED\n provider_config = SAMLProviderConfig.objects.get(slug='test-slug-empty')\n assert provider_config.country == ''", "title": "" }, { "docid": "de6d73de1b186263cb139b08c428622d", "score": "0.5313071", "text": "def test_city_country(self):\r\n city = get_formatted_city_name('santiago', 'chile')\r\n self.assertEqual(city, 'Santiago, Chile')", "title": "" }, { "docid": "4f0f409e198ec27d247844d7918b3fdd", "score": "0.53025293", "text": "def test_check_default_values():\n iniconf.check_default_values(spec, 'key1')\n iniconf.check_default_values(spec, 'key2')\n try:\n iniconf.check_default_values(spec, 'key3')\n except ConfigError:\n spec['key3'] = 'integer(default=1)'\n else:\n raise AssertionError(\"Checking for a default value should have failed with: %s\" % spec['key3'])", "title": "" }, { "docid": "2d1b60e564f6d60ad0a5123adf365d4d", "score": "0.53016394", "text": "def test_default_field_is_none(self):\n\n self.assertIsNone(self.input_definition.default)", "title": "" }, { "docid": "036698454b93863dd76af26fe3c8698e", "score": "0.5300612", "text": "def _default_sepa_origid_issr(self):\n if self.partner_id.country_id.code == 'BE':\n return 'KBO-BCE'", "title": "" }, { "docid": "20a9d505d412be4605bacfc9573b3fa2", "score": "0.52872354", "text": "def test_field_blank_option(self):\r\n self.assertTrue(re.search(BLANK_OPTION_PATTERN,\r\n str(self.form['state'])))", "title": "" }, { "docid": "20a9d505d412be4605bacfc9573b3fa2", "score": "0.52872354", "text": "def test_field_blank_option(self):\r\n self.assertTrue(re.search(BLANK_OPTION_PATTERN,\r\n str(self.form['state'])))", "title": "" }, { "docid": "17bc5a4f3a1330de1b976fac6ace7990", "score": "0.5286056", "text": "def country_or_region(self) -> Optional[str]:\n return pulumi.get(self, \"country_or_region\")", "title": "" }, { "docid": "1fc81a4db086545aaab53c49db6659f7", "score": "0.52781075", "text": "def default_value(field, kind):", "title": "" }, { "docid": "7013b4549c6d327e5d5a495b1dcc4892", "score": "0.52764446", "text": "def test_city_country_names(self):\r\n city = formatted_city_country('london', 'united kingdom')\r\n self.assertEqual(city, 'London, United Kingdom')", "title": "" }, { "docid": "dfe674963f4444bb7255cadbc91b8fd2", "score": "0.52602047", "text": "def test_validate_plugin_parameter_defaults_raises_validation_error_if_missing_name_or_default(self):\n plugin_ds = Plugin.objects.get(meta__name=self.plugin_ds_name)\n parameter_defaults = [{'name': 'dummyInt'}]\n with self.assertRaises(serializers.ValidationError):\n PipelineSerializer.validate_plugin_parameter_defaults(plugin_ds, parameter_defaults)\n parameter_defaults = [{'default': 3}]\n with self.assertRaises(serializers.ValidationError):\n PipelineSerializer.validate_plugin_parameter_defaults(plugin_ds, parameter_defaults)\n parameter_defaults = [{'name': 'dummyInt', 'default': 3}]\n PipelineSerializer.validate_plugin_parameter_defaults(plugin_ds, parameter_defaults)", "title": "" }, { "docid": "2439fa5a19bce0bdf27ffd2861be8fba", "score": "0.52584296", "text": "def test_get_regional_info_obj(self):\n self.initialize()\n\n obj = get_regional_info_obj(\"warsaw\", \"mazowieckie\")\n\n self.failUnlessEqual(obj.contents, \"Lorem ipsum\")", "title": "" }, { "docid": "33d2b35390bb858f1ae95f312fb4f37f", "score": "0.5253105", "text": "def test_validate_default(self):\n data = {'no_default': 456}\n validated = self.serializer.validate(data)\n assert validated == {'default': 123, 'no_default': 456}", "title": "" }, { "docid": "45a2bad3b9515c56f6bf7dac471618e8", "score": "0.52514875", "text": "def is_right_country_no_value(self):\n self.assertTrue(\n utils.is_right_country_pass(\n \"Q500388\", self.wikidata_site, \"Q34\"))", "title": "" }, { "docid": "afcf313182ee8cad6d39415bd83db5ab", "score": "0.5250338", "text": "def test_preflabel_defaultlang(self):\n l = Label.objects.get(concept__term=\"Concept_defaultlang\", label_text=\"A label in default language\")\n self.assertEqual(l.language, DEFAULT_LANG)", "title": "" }, { "docid": "fffe011f9ae197462b0b9ad6810e20e0", "score": "0.5249123", "text": "def test_city_country(self):\n formatted_version = city_country('brugge', 'belgium')\n self.assertEqual(formatted_version, 'Brugge, Belgium')", "title": "" }, { "docid": "a06ef24b015af386b818994e9033eb0c", "score": "0.52482504", "text": "def test_validate_default_not_used(self):\n data = {'default': 0, 'no_default': 456}\n validated = self.serializer.validate(data)\n assert validated == {'default': 0, 'no_default': 456}", "title": "" }, { "docid": "9ece43b469b8ae71d430575c60f8ce09", "score": "0.523915", "text": "def test_unexpected_values(self):\n with self.assertRaises(Exception):\n countryCapital('MORDOR')", "title": "" }, { "docid": "147444c523fae65ac54f93c692b6c98e", "score": "0.5222777", "text": "def test_missing_var_with_no_default(self):\n self.assertRaises(ImproperlyConfigured, envar, 'TOWEL', str)", "title": "" }, { "docid": "3b39880837ba4d16dc97d4242de12ab9", "score": "0.521692", "text": "def set_region_name():\n if 'DEFAULT_REGION' not in os.environ:\n region_name = boto3.Session().region_name\n if not region_name:\n region_name = get_instance_region()\n if not region_name:\n region_name = DEFAULT_REGION\n log.info('Setting region name as %s', region_name)\n os.environ['DEFAULT_REGION'] = region_name\n else:\n log.info('Region name already set as %s', os.environ['DEFAULT_REGION'])", "title": "" }, { "docid": "45df5c42fbf2b1cc35da3f61e56f37b4", "score": "0.52097774", "text": "def tde_region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tde_region\")", "title": "" }, { "docid": "18d1d03e178ecfc755c1cdc1f1b7e68d", "score": "0.520537", "text": "def allowed_regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_regions\")", "title": "" }, { "docid": "3b1c5d08557b2fe9fe832646467b0550", "score": "0.51966536", "text": "def test_parses_name(self):\n self.assertEqual(self._place.name, \"Kokstad Platform 51852\")", "title": "" }, { "docid": "d9caea543d05635ef15dbfa99b7bfb11", "score": "0.5188009", "text": "def test_get_default_value(self):\n\n obj = self.TestModel()\n obj.score.set(settings.LANGUAGE_CODE, 25.0)\n\n secondary_language = settings.LANGUAGES[-1][0]\n assert obj.score.get(secondary_language) is None\n assert obj.score.get(secondary_language, 1337.0) == 1337.0", "title": "" }, { "docid": "9047289603f06a61d4f837cab6443c6e", "score": "0.5174406", "text": "def test_city_country(self):\n formatted_name = city_country('santiago', 'chile')\n self.assertEqual(formatted_name, 'Santiago, Chile')", "title": "" }, { "docid": "b879410cd2ba732c09a990f9cd3fbab2", "score": "0.51521605", "text": "def get_region_name(facility):\n if \"province\" in facility and facility[\"province\"] is not None:\n provinces = {\n (\"Noord-Holland\", \"Utrecht\", \"Flevoland\"): \"Noordwest-Nederland\",\n (\"Zuid-Holland\", \"Zeeland\"): \"Zuidwest-Nederland\",\n (\"Noord-Brabant\", \"Limburg\"): \"Zuid-Nederland\",\n (\"Gelderland\", \"Overijssel\"): \"Oost-Nederland\",\n (\"Groningen\", \"Friesland\", \"Drenthe\", \"Fryslân\"): \"Noord-Nederland\"\n }\n for province_list, region in provinces.items():\n if facility[\"province\"] in province_list:\n return region\n return None", "title": "" }, { "docid": "d30438fb434e2d02a2a33074a8442360", "score": "0.5144878", "text": "def test_province_restriction(self):\n form = self.create_PermanentParcelForm(province=\"OtraProvincia\")\n self.assertFalse(form.is_valid())", "title": "" }, { "docid": "f7d3f8f69c3df62c1a7613cdf764bd65", "score": "0.5144582", "text": "def test_parses_name(self):\n self.assertEqual(self._place.name, \"Kokstad Platform 123\")", "title": "" }, { "docid": "08916a848a2a7d7f04f5fb611a267e91", "score": "0.5143414", "text": "def test_default_value(self):\n\n model = get_fake_model(\n {\n \"score\": LocalizedFloatField(\n default={settings.LANGUAGE_CODE: 75.0}\n )\n }\n )\n\n obj = model.objects.create()\n assert obj.score.get(settings.LANGUAGE_CODE) == 75.0\n\n obj = model()\n for lang_code, _ in settings.LANGUAGES:\n obj.score.set(lang_code, None)\n obj.save()\n\n for lang_code, _ in settings.LANGUAGES:\n if lang_code == settings.LANGUAGE_CODE:\n assert obj.score.get(lang_code) == 75.0\n else:\n assert obj.score.get(lang_code) is None", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "57febb6e7ca7d20f6efe783f2b50736f", "score": "0.5140031", "text": "def region(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "2ed4516dc1f1c9324af6d0afa7a16de9", "score": "0.5135468", "text": "def is_empty(self, region):", "title": "" }, { "docid": "a64ba080f52a1332afd4052de0137349", "score": "0.51336676", "text": "def test_default_car_name(self):\n car_name = Car()\n self.assertEqual('General', car_name.name,\n msg='The car should be called `General`,'+\n ' if no name was passed as an argument')", "title": "" }, { "docid": "08ccd515ece2241530120d1a17c79605", "score": "0.5127754", "text": "def test_is_configuration_default_value(self):\n\n field = self.input_definition._meta.get_field(\"is_configuration\")\n self.assertTrue(field.default)", "title": "" }, { "docid": "90a558c9dfd1486c079861909cf8c702", "score": "0.51137674", "text": "def test_address_label(self):\n recycling_center = RecyclingCenter.objects.get(id=1)\n field_label = recycling_center._meta.get_field('address').verbose_name\n self.assertEqual(field_label, 'address')", "title": "" }, { "docid": "045e08855054d25a6b9589025fc266d3", "score": "0.5106348", "text": "def fixture_region():\n return [-53, -47, -20, -15]", "title": "" }, { "docid": "4776054c5348f31cd84f17046e858f3e", "score": "0.5103679", "text": "def test_default_blank_and_null(self):\n\n field = self.boolean_input_definition._meta.get_field(\"default\")\n self.assertTrue(field.blank)\n self.assertTrue(field.null)", "title": "" }, { "docid": "8849ad76bee7556f8813e87acd7792d1", "score": "0.5103467", "text": "def test_get_default_translation_returns_correct_str(lang_code, expected):\n test_field = {'fi': 'fi_test', 'en': 'en_test', 'sv': 'sv_test'}\n assert get_default_translation(test_field, lang_code) == expected", "title": "" }, { "docid": "76b2ba7b2d4f54ae93b74c98746d9a05", "score": "0.51027775", "text": "def check_encoded_defaults(self, cls):\n self.assertEquals(\n {'availability': 'private'}, cls.encoded_defaults())", "title": "" }, { "docid": "4631ad92c0aa37a9bd53dff8268ea00f", "score": "0.5089652", "text": "def assert_default(self, value):\n self.assert_in_help('default: %s' % value)", "title": "" }, { "docid": "617069441faac6b18c113bf11e2d9201", "score": "0.50775665", "text": "def test_city_country(self):\n city_country_test_name = city_country('tokyo', 'japan')\n self.assertEqual(city_country_test_name, 'Tokyo, Japan')", "title": "" }, { "docid": "acbb9b2964e8c7b10db4d35d6c60dcb3", "score": "0.5075937", "text": "def test_empty_agmt(self):\n self.place.name = None\n self.assertIsNone(self.place.name)", "title": "" }, { "docid": "5415ceea8f2609e4e9c78af8641925f3", "score": "0.50751233", "text": "def validate_recoveryoption_name(recoveryoption_name):\n ...", "title": "" }, { "docid": "64fe71f2957806cd45f038c31591ab98", "score": "0.50726146", "text": "def test_field_label_name(self):\r\n form = SearchForm()\r\n self.assertTrue(form.fields['name'].label == 'Recherche')", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b34b55ae3668a5bfe7aa968a9306428b", "score": "0.5067746", "text": "def region(self) -> Optional[str]:\n return pulumi.get(self, \"region\")", "title": "" }, { "docid": "b93f6742497233e914d0117e70ae3f4a", "score": "0.5060441", "text": "def test_default_tag(self):\n tagging_specs = [('Country Name', '#country+name'), ('Country Code', '#country+code')]\n source = hxl.data(hxl.converters.Tagger(self.UNTAGGED, tagging_specs, default_tag='#targeted'))\n self.assertEqual(self.EXPECTED_TAGS_DEFAULT, source.display_tags)", "title": "" }, { "docid": "8bea8731b505d7388099d9331103d5f4", "score": "0.50580937", "text": "def test_missing_term_name(self):\n assert berkeley.sis_term_id_for_name(None) is None", "title": "" }, { "docid": "3196c41ff953ec22934f1e6f0d1cec5d", "score": "0.50544864", "text": "def default_na(value):\n if value is not None and len(value):\n return value\n else:\n return mark_safe(f'<span class=\"govuk-hint govuk-!-margin-0\">{strings.NOT_APPLICABLE}</span>') # nosec", "title": "" } ]
2f492391193d21e391f187901e5fa1dc
get the minimum node root at node
[ { "docid": "5b326b64d272fe62e99054fcbc1eba39", "score": "0.75541747", "text": "def min_node(self, node):\n if node is None:\n return\n curr = node\n while curr.left is not None:\n curr = curr.left\n return curr", "title": "" } ]
[ { "docid": "38fe563b4800c65347e0c3b604c4163e", "score": "0.84560716", "text": "def minimum(self):\n return self.minimum_node(self.root)", "title": "" }, { "docid": "9f5c4998799db6b9f2127d8d21a86ec8", "score": "0.81945544", "text": "def minvalue(node):\n cursor = node \n # loop down to find the leftmost leaf \n while(cursor.left is not None): \n cursor = cursor.left \n return cursor", "title": "" }, { "docid": "83ee17d1f21b7c0083801b3361bcf46a", "score": "0.81523544", "text": "def find_min(self):\n return self.find_min_value(self.root)", "title": "" }, { "docid": "f15cef7f5e741ba9fae7360af21bd4eb", "score": "0.8073825", "text": "def find_min(self) -> object:\n # Set root to cur\n cur_node = self.root\n # If empty tree\n if self.root is None:\n return None\n # Traverse to extreme left\n while cur_node is not None:\n node = cur_node\n cur_node = cur_node.left\n\n return node.value", "title": "" }, { "docid": "523bca98094f974805387ffefd42524f", "score": "0.8046573", "text": "def min(self, root):\n\n curr_node = root\n\n while curr_node.left:\n curr_node = curr_node.left\n\n return curr_node.data", "title": "" }, { "docid": "291370b17725badca35d77cc5ea9d8dc", "score": "0.8038991", "text": "def min(self):\n return self._min(self.root)", "title": "" }, { "docid": "8134c2085ee15e712857881239f55eb1", "score": "0.7949434", "text": "def get_min(self) -> Any:\n if self.size == 0 or self.root is None:\n return None\n return self._get_min(self.root).key", "title": "" }, { "docid": "e63ccbd374b048978a359dbbe8cbfb7e", "score": "0.7949253", "text": "def min(self):\n if self.empty():\n return None\n else:\n min_node = self.__min(self.root)\n return min_node['key']", "title": "" }, { "docid": "cb998bc9c46493bccfa4fc6399c8ff88", "score": "0.7873924", "text": "def _get_min(self, node: binary_tree.Node) -> binary_tree.Node:\n current_node = node\n while current_node.left:\n current_node = current_node.left\n return current_node", "title": "" }, { "docid": "b3b66f3930e6b1da9ce4c9588e13ec78", "score": "0.78222317", "text": "def _get_min_node(self, node):\n\n if node.left is None:\n return node\n return self._get_min_node(node.left)", "title": "" }, { "docid": "b403daedd3c13327a4b384db608063ba", "score": "0.7695689", "text": "def __min(self, node):\n if node['left'] == None:\n return node\n else:\n return self.__min(node['left'])", "title": "" }, { "docid": "6cb1c8125de4aba0757873ef5bec171b", "score": "0.76416385", "text": "def min_value_node(node):\n while node.left is not None:\n node = node.left\n return node", "title": "" }, { "docid": "fc83eb5a022d2aa4f9e55d69bedc10f4", "score": "0.7639582", "text": "def _get_min(self):\n n = self\n while n.left:\n n = n.left\n return n", "title": "" }, { "docid": "d2e8dfe4e99211bc1b1c8ac671decc70", "score": "0.7629657", "text": "def minvalueval(node):\n cursor = node \n # loop down to find the leftmost leaf \n while(cursor.left is not None): \n cursor = cursor.left \n return cursor.val", "title": "" }, { "docid": "6b0301cca3a70fc1968b84d87c880ff1", "score": "0.7626274", "text": "def _find_smallest(self, start_node):\n node = start_node\n while node.left:\n node = node.left\n return node", "title": "" }, { "docid": "982e55d7535ff4a01ee61569fdd7644e", "score": "0.7623921", "text": "def _minVal(self, node):\n while node.left != self.null_node:\n node = node.left\n return node", "title": "" }, { "docid": "e63b2f675926b236adbe81182c7cfb80", "score": "0.75661254", "text": "def getMin(self):\n if self.root is None:\n raise ValueError(\"Binary Tree is Empty\")\n child = self.root\n while child.left:\n child = child.left\n return child.value", "title": "" }, { "docid": "781658a8455e921dc774f60e0486e220", "score": "0.7556653", "text": "def minBST(self, root: TreeNode):\n assert(root != None)\n\n min = root.val\n ptr = root\n while ptr.left != None:\n ptr = ptr.left\n if min > ptr.val:\n min = ptr.val\n return min", "title": "" }, { "docid": "ed960e84fde924cadaa379b37f5344f8", "score": "0.7493343", "text": "def min(self, x: Node = None) -> Node:\n\n if not x:\n x = self.root\n\n\n # we know the max value is always to the right\n while x.left is not None:\n x = x.left\n return x", "title": "" }, { "docid": "3cbf98788168b717f57f7e4be0425390", "score": "0.74836856", "text": "def _findMinNode(self, s):\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n #print(\"MinVal\",minVal)\n return minNode", "title": "" }, { "docid": "ae42c8b8894a1bce69641b4600d7318d", "score": "0.7465956", "text": "def minNode(self, node):\n if not node or not node.left:\n return node\n return self.minNode(node.left)", "title": "" }, { "docid": "b74bda739d89bb75d175c64af47fade6", "score": "0.74537593", "text": "def _min(node):\n while node and node.l is not None:\n node = node.l\n return node", "title": "" }, { "docid": "d8a0579cf69e48bce3c88796d2e19975", "score": "0.7440382", "text": "def __min_value_node(self, node):\n while node.left != None:\n node = node.left\n return node", "title": "" }, { "docid": "fa9a60975dcc75e4a509213b64120665", "score": "0.73406255", "text": "def minimum_node(x:TheNode):\n if isinstance(x, TheNode) is False:\n raise TypeError(\"Wrong type provided\")\n while x.left is not None:\n x = x.left\n return x", "title": "" }, { "docid": "055b85b7d813c8b3bc6a1469e338d2e2", "score": "0.7308178", "text": "def min(self, node):\n if node is None:\n return None\n if node.left is not None:\n return self.min(node.left)\n else:\n return node", "title": "" }, { "docid": "0678e384094757f7a70d0c0575d5a7e0", "score": "0.7241113", "text": "def get_first_node_of_tree(root_node):\n return root_node.sons[0].sons[0].sons[0].sons[0].sons[0]", "title": "" }, { "docid": "19b235da1e638884d6872fcb55c76b74", "score": "0.72122574", "text": "def min_node_value(self):\n return _get_tree_properties(self)['min_node_value']", "title": "" }, { "docid": "e498334ef81b3f24e58124cdd1e8ce8c", "score": "0.7197841", "text": "def get_min(self):\n # Add source node to queue and mark as visited\n bfs_queue = Queue()\n bfs_queue.put(self.root)\n min_elem = float('+inf')\n\n while not bfs_queue.empty():\n removed = bfs_queue.get() # Dequeue from queue\n # Compare the removed element to min_elem and update if smaller\n min_elem = min(removed.key, min_elem)\n\n # Add removed's adjacent vertices to the queue\n if removed.left is not None:\n bfs_queue.put(removed.left)\n if removed.right is not None:\n bfs_queue.put(removed.right)\n return min_elem", "title": "" }, { "docid": "ec0fbaa00e82fa5f30bf7f2b4cc1576f", "score": "0.71688557", "text": "def find_minimum(self, **kwargs):\r\n if self.is_empty:\r\n raise IndexError(\"Binomial heap is empty.\")\r\n min_node = None\r\n idx, min_idx = 0, None\r\n for tree in self.root_list:\r\n if ((min_node is None) or\r\n (tree is not None and tree.root is not None and\r\n min_node.key > tree.root.key)):\r\n min_node = tree.root\r\n min_idx = idx\r\n idx += 1\r\n if kwargs.get('get_index', None) is not None:\r\n return min_node, min_idx\r\n return min_node", "title": "" }, { "docid": "bf588b1795c933110d9ed3dfe37cb396", "score": "0.7103541", "text": "def find_min_value(self, t: Node):\n if t is None:\n return None\n current = t\n while (current.left is not None):\n current = current.left\n return current.data", "title": "" }, { "docid": "1c668a1e7a5a8607294e4e531041c109", "score": "0.7084948", "text": "def min(self, x):\n\t if x is None:\n\t \treturn None\n\t while x.left is not None:\n\t \tx = x.left\n\t return x", "title": "" }, { "docid": "da89a9072be93791611ea913b069ec90", "score": "0.7069738", "text": "def __pick_least_root(self, x, paths, directions):\n min_cost = np.inf\n min_node = None\n for path in paths:\n # find least unvisited node\n for node in path:\n direction = directions[node.feature_index]\n # Find last unvisited node\n if not node.is_visited:\n # Same direction\n if direction == 0 or direction == node.get_sign(x):\n cost = node.get_cost(x, self.epsilon)\n if min_cost > cost:\n min_cost = cost\n min_node = node\n # Only check the first unvisited node from the root. Look no further\n break\n return min_node", "title": "" }, { "docid": "053189d1ebce59ed5a5f5e6338958a2b", "score": "0.7055291", "text": "def get_first(self):\n if self.root is None:\n return None\n else:\n return self.root.val", "title": "" }, { "docid": "660b7ff410191a4c2b2a008b36a64265", "score": "0.7028022", "text": "def get_start_node(self):\n return self.get_nodes_at_depth(0)[self.start_node_id()]", "title": "" }, { "docid": "216e7098cd2d7493915f95f74e5772ef", "score": "0.7006478", "text": "def root(self):\r\n return self.nodes[0]", "title": "" }, { "docid": "10b507c1007ae253c670846a53431506", "score": "0.699936", "text": "def min_nodes(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"min_nodes\")", "title": "" }, { "docid": "a0e887d8a4cf35886826f339c9c93219", "score": "0.6991227", "text": "def smallest(self, k):\n if self.root is None:\n raise ValueError(\"Binary search tree is empty\")\n \n if k < 0:\n k = 0\n node = self.root\n while node:\n if k == node.numLeft:\n return node.value\n elif k < node.numLeft:\n node = node.left\n else:\n k = k - node.numLeft - 1\n if node.right is None:\n return node.value\n node = node.right", "title": "" }, { "docid": "b1f87b466e76a96b154ecf8e84d8f4df", "score": "0.6990073", "text": "def getMinNode(self, input_list):\n currentNode = input_list[0]\n for node in input_list:\n if node.f < currentNode.f:\n currentNode = node\n return currentNode", "title": "" }, { "docid": "66adfd065543f09f7a179eb7f9409c82", "score": "0.69395334", "text": "def _find_root(self):\n node = self.nodes[0]\n while node.parents:\n node = node.parents[0]\n return node", "title": "" }, { "docid": "3452eb55159dbdcd791c2302d49cc94d", "score": "0.6896792", "text": "def root(tree):\n\treturn tree[0]", "title": "" }, { "docid": "1823a0466c166ceb8d717b28bf72b7b9", "score": "0.68634784", "text": "def get_root(self):", "title": "" }, { "docid": "d17b9bf2a265cd40b6264b3530a2514f", "score": "0.68194634", "text": "def getRoot(self) -> 'LktimTreeNode':\n ...", "title": "" }, { "docid": "b2539359424d33689a48ffd92ce6e064", "score": "0.68063307", "text": "def getMinLat(self):\n for info in self.root.iter('extents'):\n if info.find('.//MinLatitude') is None:\n return None\n else:\n return info.find('.//MinLatitude').text", "title": "" }, { "docid": "87b011692cadeecdddf545d0c99064c5", "score": "0.6769241", "text": "def min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n return self.root().element()[1]", "title": "" }, { "docid": "d6cfcdeb1f38f956d078862a8bcce83c", "score": "0.6730354", "text": "def get_root(tree: MerkleTree) -> Hash32:\n return tree[0][0]", "title": "" }, { "docid": "7f2687b05b91a9bd230a5493c1edb9fa", "score": "0.67119765", "text": "def getRoot(self):\n v0 = self.findleaves()[0]\n nodes = range(self.numVertices())\n s = float(len(nodes))\n k = self.maxDegree()\n if k == 1:\n v1 = self.findleaves()[1]\n return (v0, v1)\n queue = [(v0, None, self.numVertices())]\n while len(queue) > 0:\n (pointer,parent,d) = queue.pop()\n if (s/(k)-1.0/(k-1) <= d and d <= s*(k-1)/(k)):\n return (pointer, parent)\n children = [x for x in self.neighbors(pointer) if x != parent]\n des_children = [len(self.findNodesBelow(x, pointer)) for x in children]\n idx = np.argmax(des_children)\n queue.append((children[idx], pointer, des_children[idx]))", "title": "" }, { "docid": "9009507098efc5abbda4a9076f2d4a8c", "score": "0.6693093", "text": "def min(self):\n return self.structure[0]", "title": "" }, { "docid": "594b90003ad00938a234401b9af2be47", "score": "0.66815007", "text": "def findMin(self):\n if not self.queue_merged:\n return self.queue_nodes.pop()\n\n if not self.queue_nodes:\n return self.queue_merged.popleft()\n\n if self.queue_nodes[-1] < self.queue_merged[0]:\n return self.queue_nodes.pop()\n\n return self.queue_merged.popleft()", "title": "" }, { "docid": "564e9ca39f620e576dd758e24f03ec34", "score": "0.6654149", "text": "def __pick_least_leaf(self, x, paths, directions):\n min_cost = np.inf\n min_node = None\n for path in paths:\n # find least unvisited node\n for node in reversed(path):\n direction = directions[node.feature_index]\n # Find last unvisited node\n if not node.is_visited:\n # Same direction\n if direction == 0 or direction == node.get_sign(x):\n cost = node.get_cost(x, self.epsilon)\n if min_cost > cost:\n min_cost = cost\n min_node = node\n break # Only check the last leaf node. Look no further\n return min_node", "title": "" }, { "docid": "44389de8dcb4d91ca4b3912f64bb9f68", "score": "0.6629548", "text": "def get_lower_bound(self):\n node = self.parent\n old_node = self\n while node:\n if node.rnode == old_node:\n return node.value\n old_node = node\n node = old_node.parent", "title": "" }, { "docid": "2bda6d93868ff3d1984fb9db0df2f417", "score": "0.6610626", "text": "def get_node_start(self) -> Node:\n return self._current_start", "title": "" }, { "docid": "db64045ae8ba941e5f7abc19afd1d249", "score": "0.6606594", "text": "def getclosestnodeXMLindex(self, p):\n minDist = np.inf\n nodeId = 0\n for k in self.nodes:\n d = latlong_distance(p, self.XMLNodeXY(k))\n if d < minDist:\n minDist = d\n nodeId = k\n return nodeId", "title": "" }, { "docid": "bee0d487474265bddaa3073ef71f78d6", "score": "0.6600582", "text": "def first(self):\r\n return self.nodes[0]", "title": "" }, { "docid": "bcc5f5391ddd1a55f572d6ab4b402545", "score": "0.6579626", "text": "def find_min(self):\n return self.items[1]", "title": "" }, { "docid": "9f720562883e475cf3d5ac29871ed0a3", "score": "0.6532622", "text": "def find_min(self) -> int:\n pass", "title": "" }, { "docid": "f542e7c9f76a907f4966e67d12fc87ef", "score": "0.65283537", "text": "def get_min(self):\n\t\tpass", "title": "" }, { "docid": "30f01c2f5dec25a2239f90ff3184e2ef", "score": "0.65249634", "text": "def first_root_node_index(self):\n for index, node in enumerate(self.hierarky):\n if node.parent is None:\n return index", "title": "" }, { "docid": "5f2b24c33819ee41d2a83cd677635483", "score": "0.6516111", "text": "def get_min(self):\n if self.is_empty():\n return None\n return self.data[1]", "title": "" }, { "docid": "aa81e556cb855185b2e05f8c8f95834f", "score": "0.65156823", "text": "def kthsmallest(self, root, k):\n stack = []\n counter = 0\n node = root\n while node or stack:\n if node:\n stack.append(node)\n node = node.left\n else:\n node = stack.pop()\n counter += 1\n if counter == k:\n return node.val\n\n node = node.right", "title": "" }, { "docid": "5b59528973b88fb23445bedd5289338d", "score": "0.65087354", "text": "def get_min(self):\n return self.heap[0]", "title": "" }, { "docid": "5ebe713a7370226d55912250c8a0afb2", "score": "0.6484234", "text": "def get_root(self):\n return self.root", "title": "" }, { "docid": "acca3967bd41584cf6da6e27add8e356", "score": "0.6483931", "text": "def getMinimumDifference(self, root):\n self.inorder_tree(root)\n # 题中给定是一个『二叉搜索树』, 左 < val < 右, 因此中序遍历完就是排好序的, 不用排否则需要排\n # self.list.sort()\n # 至少有两个, 如果不是 res = sys.maxint\n res = self.list[1] - self.list[0]\n for i in range(len(self.list) - 1):\n diff = self.list[i + 1] - self.list[i]\n if diff < res:\n res = diff\n return res", "title": "" }, { "docid": "e1c18f97c458981dda01a08d45823ee5", "score": "0.6479552", "text": "def pop_min(self):\r\n if self.get_size() == 0:\r\n return None\r\n # Swap the min to the end\r\n self.swap(0, self.get_size()-1)\r\n\r\n # Remove the min\r\n rmvd = self.table.pop()\r\n\r\n # Put the new root in its correct position\r\n self.percolate_down(0)\r\n\r\n return rmvd", "title": "" }, { "docid": "a861044ee00fd41fdacdfe4340b3c826", "score": "0.6454923", "text": "def get_first(nodes):\n return nodes[0]", "title": "" }, { "docid": "5d2b26b48197ca718a0bb58634fce056", "score": "0.6427807", "text": "def root(self):\r\n for i in self.nodes:\r\n if i.get_previous_steps()==[]:\r\n return i", "title": "" }, { "docid": "480df0426fe8286572abf7f8bd284802", "score": "0.640855", "text": "def find_smallest_node_letter(self, node, min_letter_node=Leaf(-1, 'z')):\n if not node.is_leaf():\n left = node.left\n right = node.right\n return sorted([left, right], key=lambda x: self.find_smallest_node_letter(x).char)[0]\n else:\n if node.char < min_letter_node.char:\n min_letter_node = node\n return min_letter_node", "title": "" }, { "docid": "bad31e5b7760db94d10c772932037324", "score": "0.64070594", "text": "def get_min_value(self):\n if self.head.next is None:\n raise Exception(\"Error - The heap is empty\")\n min_value = None\n currant_node = self.head.next\n\n while currant_node is not None:\n if min_value is None or inf(currant_node.value, min_value, DEB, FIN):\n min_value = currant_node.value\n currant_node = currant_node.next\n\n return min_value", "title": "" }, { "docid": "4022d0aa4d53fc1c357fdc55fbd4ef1e", "score": "0.63936025", "text": "def root(self) -> BPlusTree_Node:\n return self._root", "title": "" }, { "docid": "1f68cc8d885d8505d6dd7cf6b767b4bd", "score": "0.6382807", "text": "def MinLevel(self):\n return _stomp.IndexedTreeMap_MinLevel(self)", "title": "" }, { "docid": "ecc2a3ff28c9a03d20caab275e90e432", "score": "0.6381289", "text": "def GetMin(self):\n ...", "title": "" }, { "docid": "6338ffef02ef5f97d50a31e9f05d675a", "score": "0.63704556", "text": "def get_min_x() -> int:\n return 0", "title": "" }, { "docid": "a4d003402ed0c3a391e7674d837c556a", "score": "0.6364792", "text": "def get_lower_bound(self):\n # compose the lower bound as the own coordinate the lower bound of the lowest child\n if len(self.children) != 0:\n return [(self.depth, self.coord)] + self.children[-1][0].get_lower_bound()\n return [(self.depth, self.coord)]", "title": "" }, { "docid": "467a80a7e75a3dde779b555522a928d9", "score": "0.6362049", "text": "def find_node_closest(G, nodeset):\n min_dist = np.inf\n min_dist_node = None\n\n for node in G:\n if node in nodeset:\n continue\n # Find the total distance from the nodes in nodeset to node\n dist = 0\n for node2 in nodeset:\n dist += G[node][node2]['weight']\n\n if dist < min_dist:\n min_dist = dist\n min_dist_node = node\n\n return min_dist_node", "title": "" }, { "docid": "5dd82d09c089751dcfc421942e9f027c", "score": "0.63575876", "text": "def get_closest(nodes, point):\n return min(nodes, key=lambda x: dist(x.pos, point))", "title": "" }, { "docid": "5074e952c4716ba0f09c09aef82e3573", "score": "0.6347834", "text": "def kthSmallest(self, root: TreeNode, k: int) -> int:\n if root is None:\n return root\n\n count = 0\n queue = deque([root])\n all_vals = []\n while queue:\n node = queue.popleft()\n all_vals.append(node.val)\n if node.right:\n queue.appendleft(node.right)\n if node.left:\n queue.appendleft(node.left)\n return sorted(all_vals)[k - 1]", "title": "" }, { "docid": "1ca40835188d66ffad987206b968f216", "score": "0.6338601", "text": "def getMin(self):\r\n if cmds.attributeQuery(self.lastPlugAttr(), node=self.node(), minExists=True):\r\n return cmds.attributeQuery(self.lastPlugAttr(), node=self.node(), min=True)[0]", "title": "" }, { "docid": "5d62e0a72876c1b09ce9ec6931fab522", "score": "0.6338487", "text": "def getRoot(self):\n return self._root", "title": "" }, { "docid": "ee4d044c3870b8d477e0b4b8a740d45d", "score": "0.63261926", "text": "def start_node(self):\r\n return self.start_", "title": "" }, { "docid": "e8201f7bdec18b25320fdd36474c34c0", "score": "0.63243043", "text": "def __getFirstNodeOfSize(self, index, radius, startingNode='root'):\n if startingNode == 'root':\n if self.__ballTreeNodes['radius'][index] < radius or self.__ballTreeNodes['is_leaf'][index] == 1:\n return index\n else:\n return hstack([self.__getFirstNodeOfSize(self.getLeftChildOfNode(index), radius, 'root'),\n self.__getFirstNodeOfSize(self.getRightChildOfNode(index), radius, 'root')])\n\n elif startingNode == 'leaves':\n if self.__ballTreeNodes['radius'][index] < radius:\n\n while self.__ballTreeNodes['radius'][self.__relations[index]['parent']] < radius:\n index = self.__relations[index]['parent']\n\n return index\n else:\n return None", "title": "" }, { "docid": "a293e39575b4d45109c1769ed6f93b41", "score": "0.63233393", "text": "def MinLevel(self):\n return _stomp.TreeMap_MinLevel(self)", "title": "" }, { "docid": "4446bc192eee57e7d4c18c4a257276c4", "score": "0.63099027", "text": "def euclidean_minimum_spanning_tree(points):\n # start with Delaunay triangulation\n G = delaunay(points)\n EMST = nx.minimum_spanning_tree(G, weight='length')\n\n return EMST", "title": "" }, { "docid": "83f5e0017e307d7c6784c9dc6242be0a", "score": "0.63092494", "text": "def _extract_min(self):\n if len(self.depq) == 0: return None\n min = self.depq.poplast()[0]\n return min", "title": "" }, { "docid": "e5ebdaf0ef58fd8295f191e00ca2c40c", "score": "0.6307626", "text": "def root(self):\n return self.tree_structure.tree_root", "title": "" }, { "docid": "eeabea0d56d6225d71dd5a1e06436993", "score": "0.63056827", "text": "def min_depth(self, node):\n if self._is_a is None:\n self._analyze()\n id_ = node['id']\n if id_ not in self._min_depth:\n parents = self.parents(node)\n if not parents:\n self._min_depth[id_] = 0 # root\n else:\n self._min_depth[id_] = 1 + min(\n self.min_depth(p) for p in parents)\n return self._min_depth[id_]", "title": "" }, { "docid": "f70cd54a05901b28b71a6ac5d95fa85e", "score": "0.63022155", "text": "def getMinValue(self, node):\r\n minValue = node.getAttribute('min_value')\r\n\r\n if minValue:\r\n return minValue != u'' and minValue or None\r\n else:\r\n return None", "title": "" }, { "docid": "0410dc08686768c317314e706727de3d", "score": "0.6298207", "text": "def find_weakest(self):\n if (self.right == None):\n return float(\"Inf\"), [self]\n b_error, num_nodes = self.get_cost_params()\n alpha = (self.error - b_error) / (num_nodes - 1)\n alpha_right, tree_right = self.right.find_weakest()\n alpha_left, tree_left = self.left.find_weakest()\n smallest_alpha = min(alpha, alpha_right, alpha_left)\n smallest_trees = []\n # if there are multiple weakest links collapse all of them\n if smallest_alpha == alpha:\n smallest_trees.append(self)\n if smallest_alpha == alpha_right:\n smallest_trees = smallest_trees + tree_right\n if smallest_alpha == alpha_left:\n smallest_trees = smallest_trees + tree_left\n return smallest_alpha, smallest_trees", "title": "" }, { "docid": "779004711ba3e7295c1197dd6d118dc2", "score": "0.6297308", "text": "def _getFirstNode(self, name=None):\n nodes = self._getNodes(name)\n if nodes:\n return nodes[0]\n return None", "title": "" }, { "docid": "cd368b15035c491f02bbd434ec07c312", "score": "0.6290459", "text": "def first_leaf_node(self):\n for node in self.hierarky:\n if node.first_child is None:\n return node\n raise LakeFilterException(\"There was no leaf node... \\\nSomething is very wrong.\")", "title": "" }, { "docid": "c432518c73af756eb24f5ab9355a29a4", "score": "0.6288123", "text": "def sroot(self):\n assert len(self.ss) > 0\n return self.ss[0]", "title": "" }, { "docid": "ae8956fb0167285b55b3d371dd438934", "score": "0.6276248", "text": "def findSecondMinimumValue_iterative(self, root):\n if not root:\n return -1\n result, next_nodes, min1 = float('inf'), [root], root.val\n while next_nodes:\n node = next_nodes.pop()\n if not node.left:\n continue\n if node.left.val == min1:\n next_nodes.append(node.left)\n else:\n result = min(result, node.left.val)\n if node.right.val == min1:\n next_nodes.append(node.right)\n else:\n result = min(result, node.right.val)\n return result if result < float('inf') else -1", "title": "" }, { "docid": "cc35c69dc98d0697fc98b4b9c740d76f", "score": "0.62702775", "text": "def getMin(self):\n # type: () -> Optional[float]\n if cmds.attributeQuery(self.attrName(), node=self.node(),\n minExists=True):\n return cmds.attributeQuery(self.attrName(), node=self.node(),\n min=True)[0]\n else:\n return None", "title": "" }, { "docid": "3334525455ca581dfbfa53ac286ce7f7", "score": "0.62647134", "text": "def getRoot(self):\n return self.root", "title": "" }, { "docid": "356cac077acd29ad32e2c316834d8d2c", "score": "0.62606215", "text": "def query_root_node(self):\n return self.query_root_node_of_node(self._get_obj())", "title": "" }, { "docid": "a66b824745e37fd7a7b92d856ac3ef75", "score": "0.6247884", "text": "def min_x(self):\n return min(n.x for n in self) if self else None", "title": "" }, { "docid": "e20555365c57f00f817fb04cb7c2fc35", "score": "0.62272894", "text": "def get_gl_root_node(self):\n return self.__gl_root_node", "title": "" }, { "docid": "8678218070317d953f77e3a1f6bee9d9", "score": "0.62158954", "text": "def get_min(self):\n top_aux_stack = self.aux_stack.pop()\n self.aux_stack.push(top_aux_stack)\n return top_aux_stack", "title": "" }, { "docid": "d1cb39d87d46c5c8f3bcc88bafc940bb", "score": "0.62136215", "text": "def find_root( rows ):\n maxes = sorted( rows.values(), key = lambda x: x.cumulative )\n if not maxes:\n raise RuntimeError( \"\"\"Null results!\"\"\" )\n root = maxes[-1]\n roots = [root]\n for key,value in rows.items():\n if not value.parents:\n logger.debug( 'Found node root: %s', value )\n if value not in roots:\n roots.append( value )\n if len(roots) > 1:\n root = runsnakerun.pstatsloader.PStatGroup(\n directory='*',\n filename='*',\n name=\"<profiling run>\",\n children= roots,\n )\n root.finalize()\n return root", "title": "" }, { "docid": "4228c797e278f8184863c87ba1bd5acd", "score": "0.6212059", "text": "def get_root_node(self):\n return nuke.toNode(\"root\")", "title": "" }, { "docid": "caadbaa4b41f18f2067f35a20920d089", "score": "0.6205866", "text": "def getMin(self):\n return self.mins.top()", "title": "" }, { "docid": "7cd284c850238b5c19fd7d7d95ee729e", "score": "0.6200292", "text": "def get_start_node(self):\n semester = Semester([], semester=None)\n start_node = Node(parent=None, semester=semester)\n return start_node", "title": "" } ]
d57f0ee2fe3702c783d948681b672191
Get the list of Snapshots
[ { "docid": "20b1f321f397d64a6aecaa6330326504", "score": "0.7944827", "text": "def getSnapshots(self):\n response, body = self.http.get('/snapshots')\n return body", "title": "" } ]
[ { "docid": "49f84d34119227ad50fa52d13d09fb43", "score": "0.82314456", "text": "def get_snapshots_list(self):\n request = self._build_request_url(\n [self.REQUEST_SNAPSHOTS, self.REQUEST_SNAPSHOTS_LIST])\n\n return json.loads(self._process_request(request))", "title": "" }, { "docid": "96203b35b048575d444d9a652f6cc901", "score": "0.8161918", "text": "def get_snapshots(self):\n response = self._get(\"/snapshots\")\n return response.get(\"data\", {}).get(\"snapshots\", [])", "title": "" }, { "docid": "bb7738cd9dc1b6d0380676ebef1de8b4", "score": "0.80565745", "text": "def listSnapshots(self, **kwargs):\n return self.call(\"listSnapshots\", args)", "title": "" }, { "docid": "c0e2e60b4f0abf9de8ba687517ef8280", "score": "0.7844295", "text": "def __get_snapshots(self):\n self.all_snapshots = get_snapshot_data(self.client, self.repository)\n for list_item in self.all_snapshots:\n if 'snapshot' in list_item.keys():\n self.snapshots.append(list_item['snapshot'])\n self.snapshot_info[list_item['snapshot']] = list_item\n self.empty_list_check()", "title": "" }, { "docid": "960005165ffc2f6c0d9b492a6208eefb", "score": "0.77164954", "text": "def all(self):\n snapshots = []\n for p in os.listdir(self.path):\n s = Snapshot(os.path.join(self.path, p))\n if s.is_snapshot:\n snapshots.append(s)\n return snapshots", "title": "" }, { "docid": "ca37d96126db4cc2a06481e6ad32896c", "score": "0.7659085", "text": "def _get_snapshots(self):\n return self.__snapshots", "title": "" }, { "docid": "930bb6dadd7d64517b559e757a70e455", "score": "0.76461846", "text": "def list_snapshots(self):\n return set(self.snapshots_obj.values())", "title": "" }, { "docid": "e130312c740ed52bd8d9c6bf5cb6b58a", "score": "0.7521689", "text": "def list():\n snapshots = Stellar().get_snapshots()\n\n print '\\n'.join(\n '%s: %s' % (\n s.snapshot_name,\n humanize.naturaltime(datetime.utcnow() - s.created_at)\n )\n for s in snapshots\n )", "title": "" }, { "docid": "cf0a40440de60fb3f5a19b3755244589", "score": "0.7424928", "text": "def get_snapshots(self):\n ec2 = self.get_ec2_connection()\n rs = ec2.get_all_snapshots()\n all_vols = [self.volume_id] + self.past_volume_ids\n snaps = []\n for snapshot in rs:\n if snapshot.volume_id in all_vols:\n if snapshot.progress == '100%':\n snapshot.date = boto.utils.parse_ts(snapshot.start_time)\n snapshot.keep = True\n snaps.append(snapshot)\n snaps.sort(cmp=lambda x, y: cmp(x.date, y.date))\n return snaps", "title": "" }, { "docid": "4ce64ddc383991e9b1f622a05b986e36", "score": "0.73450637", "text": "def snapshot_list_uri(self, otype, otypename, ouri):\n (s, h) = common.service_json_request(\n self.ipaddr, self.port,\n \"GET\",\n Snapshot.URI_SNAPSHOT_LIST.format(otype, otypename, ouri), None)\n o = common.json_decode(s)\n return o['snapshot']", "title": "" }, { "docid": "b12e7fb5eff4cb1b8b1fe05da9d6b59a", "score": "0.7342956", "text": "def list_snapshots(self):\n self.state.check('actions', 'install', 'ok')\n return self.machine.snapshots", "title": "" }, { "docid": "fd9972e4ef17fa9fa672919528cb334d", "score": "0.73302245", "text": "def list_snapshots(self):\n if not self.machine:\n raise RuntimeError('machine %s is not found' % self.name)\n\n return self.machine.snapshots", "title": "" }, { "docid": "2b508c0be003529c6e3c47359ea7d13c", "score": "0.7311896", "text": "def snapshot_list(self):\n return [[s[1], 'Epoch #%s' % s[1]] for s in reversed(self.snapshots)]", "title": "" }, { "docid": "865c59b0f7354d3323adad05b35afa50", "score": "0.72764224", "text": "def list_snapshots(self, detailed=False, params=None,\n version=LATEST_MICROVERSION):\n uri = 'snapshots/detail' if detailed else 'snapshots'\n uri += '?%s' % urlparse.urlencode(params) if params else ''\n resp, body = self.get(uri, version=version)\n self.expected_success(200, resp.status)\n return self._parse_resp(body)", "title": "" }, { "docid": "892c5216e10b323efdc7c6655eccf9d1", "score": "0.72706896", "text": "def snapshots(self):\n return sub_collection(\n self.get_relation('snapshots', EngineCommandFailed), Snapshot)", "title": "" }, { "docid": "9bc2d144226b40c67d6ba7c344e5e85a", "score": "0.72706145", "text": "def list_snapshots(self, db_instance):\n return sorted(self.client.describe_db_snapshots(DBInstanceIdentifier='{0}'.format(db_instance))['DBSnapshots'],\n key=lambda k: k['SnapshotCreateTime'], reverse=True)", "title": "" }, { "docid": "e8c15675e9d4da710f93823143b50505", "score": "0.72395366", "text": "def list_snapshot_instances(self, detail=False, snapshot_id=None,\n version=LATEST_MICROVERSION):\n uri = \"snapshot-instances%s\" % ('/detail' if detail else '')\n if snapshot_id is not None:\n uri += '?snapshot_id=%s' % snapshot_id\n resp, body = self.get(uri, version=version)\n self.expected_success(200, resp.status)\n return self._parse_resp(body)", "title": "" }, { "docid": "0c4b3e8ae3f2e5293a3a76fe7d4781a2", "score": "0.71224666", "text": "def list_snapshots(\n self,\n request: swas__open20200601_models.ListSnapshotsRequest,\n ) -> swas__open20200601_models.ListSnapshotsResponse:\n runtime = util_models.RuntimeOptions()\n return self.list_snapshots_with_options(request, runtime)", "title": "" }, { "docid": "3ae0d3815ed379865ad9192db7d0ba91", "score": "0.7118854", "text": "def share_snapshot_access_get_all_for_snapshot_instance(\n context, snapshot_instance_id, session=None):\n return IMPL.share_snapshot_access_get_all_for_snapshot_instance(\n context, snapshot_instance_id, session)", "title": "" }, { "docid": "826ae338c777a291356058df218a2a08", "score": "0.71130574", "text": "def list_snapshots(vm, list=1):\n result = vboxmanage(f'snapshot {vm} list --machinereadable')\n if result[0] == 0:\n if list == 1:\n snapshots_list = re.findall(r'^SnapshotName(?:-\\d+)?=\"(\\S+)\"', result[1], flags=re.MULTILINE)\n else:\n snapshots_list = result[1]\n return result[0], snapshots_list, result[2]\n else:\n logging.error(f'Unable to get list of snapshots: {result[2]}')\n return result[0], result[1], result[2]", "title": "" }, { "docid": "7ed179104abb47874b35668e01f487b5", "score": "0.70497626", "text": "def get_snapshots(volume_id=False):\n response = client.describe_snapshots(\n Filters=[\n {\n 'Name': 'volume-id',\n 'Values': [volume_id, ]\n },\n {\n 'Name': 'status',\n 'Values': ['completed', ]\n }\n ],\n )\n\n if \"Snapshots\" not in response or len(response[\"Snapshots\"]) == 0:\n logger.warning(\"Don't found snapshots of the volume %s\", volume_id)\n return False\n\n snapshots = []\n\n for _, snapshot in enumerate(response[\"Snapshots\"]):\n # Ignore snapshots created by AMI\n if snapshot[\"Description\"] != \"\":\n continue\n\n snapshot[\"oSnapshot\"] = ec2.Snapshot(snapshot[\"SnapshotId\"])\n\n snapshots.append(snapshot)\n\n return snapshots", "title": "" }, { "docid": "319881545271df3a9329d4ced74fbe86", "score": "0.7038936", "text": "def ListSnapshots(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "title": "" }, { "docid": "36479a03818349c8f854596138595018", "score": "0.70348084", "text": "def list_snapshots(db_instance):\n if not db_instance:\n click.echo(\"Please specify a database using --db-instance option\", err=True)\n return sys.exit(1)\n\n dbcon = DBSnapshot()\n db_snapshots = sorted(dbcon.list_snapshots(db_instance=db_instance), key=lambda k: k['SnapshotCreateTime'], reverse=True)\n\n click.echo(\"Database Snapshosts:\")\n\n for snapshot in db_snapshots:\n print(\"\\t- {0}\\t- {1}\".format(snapshot['DBSnapshotIdentifier'], snapshot['SnapshotCreateTime']))", "title": "" }, { "docid": "9f4bd12135d68a8738e698f90fa684e3", "score": "0.6976659", "text": "def snapshots(self):\n if self.is_snap():\n return []\n else:\n cand= self.get_container().childs()\n cand= [ v for v in cand if v == self ]\n return cand", "title": "" }, { "docid": "eb7abf4f6879cb47eecd486986780b3a", "score": "0.69020253", "text": "async def list_snapshots_async(\n self,\n request: swas__open20200601_models.ListSnapshotsRequest,\n ) -> swas__open20200601_models.ListSnapshotsResponse:\n runtime = util_models.RuntimeOptions()\n return await self.list_snapshots_with_options_async(request, runtime)", "title": "" }, { "docid": "9a1f2569231913ff4f93651ed2080a23", "score": "0.68958515", "text": "def list_snapshots_with_detail(self, params=None,\n version=LATEST_MICROVERSION):\n return self.list_snapshots(detailed=True, params=params,\n version=version)", "title": "" }, { "docid": "1164727abf8f449ee52651afc8756f2c", "score": "0.68828475", "text": "def get_snapshots(self, volume_id_or_uri, start=0, count=-1, filter='', sort=''):\n uri = self.__build_volume_snapshot_uri(volume_id_or_uri)\n return self._client.get_all(start, count, filter=filter, sort=sort, uri=uri)", "title": "" }, { "docid": "4f56d2dae776df81c21b0114f2962c4f", "score": "0.6825569", "text": "def list_snapshots(\n self,\n ) -> Callable[\n [bigtable_table_admin.ListSnapshotsRequest],\n Awaitable[bigtable_table_admin.ListSnapshotsResponse],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_snapshots\" not in self._stubs:\n self._stubs[\"list_snapshots\"] = self.grpc_channel.unary_unary(\n \"/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots\",\n request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize,\n response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize,\n )\n return self._stubs[\"list_snapshots\"]", "title": "" }, { "docid": "98fce202e94345138a70d1c4dbcd7d81", "score": "0.67871803", "text": "def test_snapshot_list_all(self):\n pass", "title": "" }, { "docid": "1ff1a400a397e6a25edbca76b0090b44", "score": "0.6782215", "text": "def get_zfs_snapshots(root, properties):\n result = subprocess.run([\"zfs\", \"list\", \"-H\", \"-t\", \"snapshot\", \"-r\", \"-p\", \"-o\", \",\".join(properties), root], stdout=subprocess.PIPE, encoding=\"utf-8\")\n result.check_returncode()\n return [line.split(\"\\t\") for line in result.stdout.splitlines()]", "title": "" }, { "docid": "e7c6b29a3057a9cdb02d79529a6bbc79", "score": "0.67471665", "text": "def listSnapshots(self, vmxFilePath):\n vmrun = CommandCapture([\"vmrun\", \"-T\", self._hostType, \"listSnapshots\", vmxFilePath],\n copyToStdio=False)\n listWithHeading = vmrun.stdout\n # first line tells number of snapshots, names are in subsequent lines\n snapshots = []\n numberMatch = re.search(r\"^.*\\:\\s*([0-9]+)\", listWithHeading)\n if numberMatch:\n numberOfSnapshots = int(numberMatch.group(1))\n # (?m) effects MULTILINE, omit leading and trailing whitespace if any\n snapshots = re.findall(r\"(?m)^\\s*(.*?)\\s*$\", listWithHeading)\n # omit first line, i.e. line with number of snapshots\n snapshots.pop(0)\n # omit empty lines, if any, e.g. after trailing newline\n snapshots = filter(None, snapshots)\n # here an opportunity to see in debugger\n return snapshots", "title": "" }, { "docid": "6db4f2a2986e865e6e5ff50599d00a65", "score": "0.67433274", "text": "def list_volume_snapshots(\n self,\n ) -> Callable[\n [volume_snapshot.ListVolumeSnapshotsRequest],\n volume_snapshot.ListVolumeSnapshotsResponse,\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_volume_snapshots\" not in self._stubs:\n self._stubs[\"list_volume_snapshots\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.baremetalsolution.v2.BareMetalSolution/ListVolumeSnapshots\",\n request_serializer=volume_snapshot.ListVolumeSnapshotsRequest.serialize,\n response_deserializer=volume_snapshot.ListVolumeSnapshotsResponse.deserialize,\n )\n return self._stubs[\"list_volume_snapshots\"]", "title": "" }, { "docid": "d0139bb57e9af098da36d6608804bec8", "score": "0.67007333", "text": "def list_cgsnapshots(self, detailed=False, params=None,\n version=LATEST_MICROVERSION):\n uri = 'cgsnapshots/detail' if detailed else 'cgsnapshots'\n uri += '?%s' % (urlparse.urlencode(params) if params else '')\n resp, body = self.get(uri, headers=EXPERIMENTAL, extra_headers=True,\n version=version)\n self.expected_success(200, resp.status)\n return self._parse_resp(body)", "title": "" }, { "docid": "ec4d6d862195c23a5004e712ebaaf4d3", "score": "0.66795206", "text": "def get_volume_snapshots(self, future, callback):\n pass", "title": "" }, { "docid": "3650564ded77ecb3ff398b6870acea42", "score": "0.6602535", "text": "def get_snapshot(self, shards_to_get: List[int]):\n shards_quoted = tuple(int(shard) for shard in shards_to_get)\n try:\n cursor = self.connection.cursor('snapshot')\n cursor.itersize = 10000\n cursor.execute(\n f'SELECT doc_id, embedding from {self.snapshot_table} '\n f'WHERE shard in %s '\n f'ORDER BY doc_id',\n (shards_quoted,),\n )\n for rec in cursor:\n vec = (\n np.frombuffer(rec[1], dtype=self.dump_dtype)\n if rec[1] is not None\n else None\n )\n yield rec[0], vec\n except (Exception, psycopg2.Error) as error:\n self.logger.error(f'Error importing snapshot: {error}')\n self.connection.rollback()\n self.connection.commit()", "title": "" }, { "docid": "b8792e38445fe93c9e3abb9619eb988b", "score": "0.65234756", "text": "def get_snapshots(response_filter=None): \n if response_filter: \n kwargs = response_filter \n else: \n kwargs = {} \n snapshot_dict = {} \n client = boto3.client('ec2') \n snapshot_generator = custom_paginator(client, 'describe_snapshots', **kwargs) \n for snapshots in snapshot_generator: \n snapshot_id = snapshot_date = snapshot_state = snapshot_volume_id = None \n for snapshot in snapshots['Snapshots']: \n snapshot_id = (snapshot['SnapshotId']) \n snapshot_date = (snapshot['StartTime']) \n snapshot_volume_id = (snapshot['VolumeId']) \n snapshot_state = (snapshot['State']) \n snapshot_dict[snapshot_id] = {} \n snapshot_dict[snapshot_id]['Date'] = snapshot_date \n snapshot_dict[snapshot_id]['VolumeId'] = snapshot_volume_id \n snapshot_dict[snapshot_id]['State'] = snapshot_state \n return snapshot_dict", "title": "" }, { "docid": "413df037f2e3453bcbd6d7ad9cb5f805", "score": "0.6508533", "text": "def snapshots_vm(self, vm_id):\n vm = self.conn.lookupByID(vm_id)\n return vm.snapshotListNames()", "title": "" }, { "docid": "407ec2a0e292a51f27f7c2b94f93f17a", "score": "0.64643764", "text": "def _export_snapshot(self, snapshot: ResticSnapshot) -> List[Dict[str, Any]]:\n\n fields = {\n KEY_SNAPSHOT_SHORT_ID: snapshot.key.snapshot_id,\n }\n\n assert snapshot.stats is not None\n fields.update(self._get_fields_from_stats_bundle(snapshot.stats))\n\n point = {\n \"measurement\": MEASUREMENT_SNAPSHOTS,\n \"tags\": self._get_influx_tags_from_key(snapshot.key),\n \"time\": snapshot.snapshot_time,\n \"fields\": fields,\n }\n return [point]", "title": "" }, { "docid": "eeced5424ded46559202101d44735a60", "score": "0.64358866", "text": "def camera_snapshots(self):\n time_now = self.time()\n ts = f\"{time_now.hour:02d}_{time_now.minute:02d}\"\n tic = monotonic()\n md_links, tasks = [], []\n for cam in CAMERA_ENTITIES:\n p = f\"/config/www/snapshot_{cam.split('.')[1]}_{ts}.jpg\"\n p_link = p.replace(\"config/www\", \"local\")\n tasks.append((\"camera/snapshot\", dict(entity_id=cam, filename=p)))\n md_links.append(f\"[snapshot {cam}]({self._base_url}{p_link})\")\n\n self.run_multiple_service_calls(tasks)\n self.log(\n f\"Multi-snapshot done in {monotonic() - tic:.2f}s with ts={ts}\",\n log=LOGGER_SPECIAL,\n )\n return md_links", "title": "" }, { "docid": "dc0844517c2aba6ed0228195aaa278ae", "score": "0.6401723", "text": "def snapshots():\n repo = TwitRepo.from_cwd()\n for snapshot in repo.snapshots:\n oid = repo.rev_parse(snapshot)\n info = repo.commit_info(snapshot)\n click.echo('{} at {}'.format(oid[:6],\n datetime.datetime.fromtimestamp(info.time)))", "title": "" }, { "docid": "3a6adbffcfa5cf8f8455e239fdcf2fe8", "score": "0.6391434", "text": "def share_snapshot_access_get_all_for_share_snapshot(context,\n share_snapshot_id,\n filters):\n return IMPL.share_snapshot_access_get_all_for_share_snapshot(\n context, share_snapshot_id, filters)", "title": "" }, { "docid": "76660303a33eb6e9f8350093eee4d2e6", "score": "0.6372903", "text": "def get_snapshots(self, user_id):\n return self._driver.get_snapshots(user_id)", "title": "" }, { "docid": "2a38f61ac136a326c8346b2164c30a36", "score": "0.6336749", "text": "def snapshots(self):\n pb_snapshot = sphere_pb2.Snapshot()\n with gzip.open(self.path, 'rb') as f:\n f.seek(self._cur)\n size = f.read(4)\n while size:\n size, = struct.unpack('<I', size)\n pb_snapshot.ParseFromString(f.read(size))\n snapshot = pb_to_snapshot(pb_snapshot)\n yield snapshot\n size = f.read(4)", "title": "" }, { "docid": "6cb8ed35eb1e2f3f4369612d04edc18e", "score": "0.6336316", "text": "def get_s3_snapshot(self, snapshot):\n response = self.collection.snapshot.get(repository=self.repository_name,\n snapshot=snapshot,\n ignore_unavailable=True)['snapshots']\n return response", "title": "" }, { "docid": "186d1b5a88e2f75b4a8d279feec31985", "score": "0.63088626", "text": "def share_group_snapshot_get_all(context, detailed=True, filters=None,\n sort_key=None, sort_dir=None):\n return IMPL.share_group_snapshot_get_all(\n context, detailed=detailed, filters=filters, sort_key=sort_key,\n sort_dir=sort_dir)", "title": "" }, { "docid": "79313d7a4fb4e7de874b711d1e2477ee", "score": "0.6303982", "text": "def get_frame(self, criteria):\n\n filenames = []\n if criteria is None:\n # return self.list_snapshots()\n return None\n with self.get_connection() as conn:\n cursor = conn.cursor()\n statement = f\"SELECT id, t, img FROM IMG_SNAPSHOTS WHERE {criteria}\"\n cursor.execute(statement)\n for i, c in enumerate(cursor):\n id, t, blob = c\n filenames.append(self.save_frame(id, t, blob))\n\n return filenames", "title": "" }, { "docid": "6f981a3673fdb6443bae1c63a6c18f5b", "score": "0.62687546", "text": "def get_snapshots_by_vps(self, vps_name):\n cookie = self.build_cookie(mode=MODE_RO, method='getSnapshotsByVps', parameters=[vps_name])\n self.update_cookie(cookie)\n return self.soap_client.service.getSnapshotsByVps(vps_name)", "title": "" }, { "docid": "d4b8d82b27eec285a8af81f9041b4444", "score": "0.6254815", "text": "def share_snapshot_get_all_for_share(context, share_id, filters=None,\n sort_key=None, sort_dir=None):\n return IMPL.share_snapshot_get_all_for_share(\n context, share_id, filters=filters, sort_key=sort_key,\n sort_dir=sort_dir,\n )", "title": "" }, { "docid": "98f463f7479c78bc287bd06b6d066e67", "score": "0.62359154", "text": "def working_list(self):\n # Copy by value, rather than reference to prevent list stomping during\n # iterations\n return self.snapshots[:]", "title": "" }, { "docid": "4967f5efe91a9290fee6eacbf332a9e2", "score": "0.62086874", "text": "def get_node_snapshots(self, n):\n snaps = []\n for t in self.temporal_snapshots_ids():\n if self.has_node(n, t):\n snaps.append(t)\n return t", "title": "" }, { "docid": "e42673f4020120b4edce13747b054523", "score": "0.6198232", "text": "def get_db_snapshots_instance(rds_client, instance_identifier=None, snapshot_type=None, snapshot_identifier=None):\n describe_db_snapshot_params = {}\n if instance_identifier:\n describe_db_snapshot_params['DBInstanceIdentifier'] = instance_identifier\n if snapshot_type:\n describe_db_snapshot_params['snapshot_type'] = snapshot_type\n if snapshot_identifier:\n describe_db_snapshot_params['DBSnapshotIdentifier'] = snapshot_identifier\n try:\n print('Getting DB snapshots with the following parameters: ')\n print(json.dumps(describe_db_snapshot_params))\n snapshot_response = rds_client.describe_db_snapshots(\n **describe_db_snapshot_params)\n snapshots = snapshot_response['DBSnapshots']\n while 'Marker' in snapshot_response:\n describe_db_snapshot_params['Marker'] = snapshot_response['Marker']\n snapshot_response = rds_client.describe_db_snapshots(\n **describe_db_snapshot_params)\n snapshots = snapshots + snapshot_response['DBSnapshots']\n except ClientError as err:\n # Check if error code is due to throttling.\n if err.response['Error']['Code'] == 'Throttling':\n print(\"Throttling occurring.\")\n raise MaskopyThrottlingException(err)\n print(f\"Failed to get DB Instance Snapshots: {err}\")\n raise\n return snapshots", "title": "" }, { "docid": "c5cf28e6dfd3181a637654087f38d4d6", "score": "0.61880875", "text": "def get_snapshot_policy_list(self, filter_dict=None):\n\n try:\n LOG.info('Getting snapshot schedules list ')\n if filter_dict:\n snapshot_schedules = \\\n self.powerflex_conn.snapshot_policy.get(\n filter_fields=filter_dict)\n else:\n snapshot_schedules = \\\n self.powerflex_conn.snapshot_policy.get()\n\n return result_list(snapshot_schedules)\n\n except Exception as e:\n msg = 'Get snapshot schedules list from powerflex array failed with' \\\n ' error %s' % (str(e))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "title": "" }, { "docid": "ba19f62b37b9f6b93b7e1edef7d92176", "score": "0.6187128", "text": "def list_snapshots_with_options(\n self,\n request: swas__open20200601_models.ListSnapshotsRequest,\n runtime: util_models.RuntimeOptions,\n ) -> swas__open20200601_models.ListSnapshotsResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.disk_id):\n query['DiskId'] = request.disk_id\n if not UtilClient.is_unset(request.instance_id):\n query['InstanceId'] = request.instance_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.snapshot_ids):\n query['SnapshotIds'] = request.snapshot_ids\n if not UtilClient.is_unset(request.source_disk_type):\n query['SourceDiskType'] = request.source_disk_type\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ListSnapshots',\n version='2020-06-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n swas__open20200601_models.ListSnapshotsResponse(),\n self.call_api(params, req, runtime)\n )", "title": "" }, { "docid": "d0707ab3e9b3850a87e486793032bce8", "score": "0.61741096", "text": "def getRecoverySnapshots(self): #$NON-NLS-1$\r", "title": "" }, { "docid": "e599c1231b858c29573553e0378c84c7", "score": "0.6173328", "text": "def share_snapshot_get_all_by_project(context, project_id, filters=None,\n limit=None, offset=None, sort_key=None,\n sort_dir=None):\n return IMPL.share_snapshot_get_all_by_project(\n context, project_id, filters=filters, limit=limit, offset=offset,\n sort_key=sort_key, sort_dir=sort_dir)", "title": "" }, { "docid": "d14dd7b4454c7ffe670baa2f56e4fa8b", "score": "0.6132745", "text": "def snapshot_commits(self):\n return [\n self.rev_parse(snapshot)\n for snapshot in self.snapshots\n ]", "title": "" }, { "docid": "044ae93d38006ad7788da8713c3e0985", "score": "0.612515", "text": "def list(self):\n return self._invoke('list', None)", "title": "" }, { "docid": "044ae93d38006ad7788da8713c3e0985", "score": "0.612515", "text": "def list(self):\n return self._invoke('list', None)", "title": "" }, { "docid": "1e7be72e2ec80f6a0eb071574a969061", "score": "0.6121538", "text": "def get_snapshot_data(self, tags):\n return self.post_json(\"api/data/v2/snapshot\", json={\"tags\": normalise_tag_map(tags)})", "title": "" }, { "docid": "8b94afad8bb415e40f8a7532168dafaa", "score": "0.60600656", "text": "def _set_snapshots(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"id\",yc_snapshots_vnf_br__outputs_vnfpp_reports_snapshots, yang_name=\"snapshots\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name=\"snapshots\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:ietf:params:xml:ns:yang:vnf-br', defining_module='vnf-br', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"snapshots must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"id\",yc_snapshots_vnf_br__outputs_vnfpp_reports_snapshots, yang_name=\"snapshots\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions=None), is_container='list', yang_name=\"snapshots\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='urn:ietf:params:xml:ns:yang:vnf-br', defining_module='vnf-br', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__snapshots = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "03793ec9db1953c52a620da2333e900c", "score": "0.6057671", "text": "def snapshot(self, containers, cluster_uuid, num_snapshots):\r\n snapshots = []\r\n for c in containers:\r\n snapshot_name = '%s-%s-%s:SNAPSHOT-%s' % (c.image, \r\n cluster_uuid,\r\n c.host_name,\r\n num_snapshots)\r\n snapshots.append( {'image' : snapshot_name,\r\n 'base' : c.image,\r\n 'type' : c.service_type, \r\n 'name' : c.name, \r\n 'args' : c.args,\r\n 'ports': c.ports} )\r\n self.cli.commit(c, snapshot_name)\r\n return snapshots", "title": "" }, { "docid": "7a4874aadebbcb23cf84d005f97ed6c1", "score": "0.60516065", "text": "def list(self):\n return self.storage_list\n #raise NotImplementedError()", "title": "" }, { "docid": "42c5e310bd3df1bd210fe9ff2ce34eca", "score": "0.59908605", "text": "def _get_snapshots(prefix):\n ret = []\n for name in os.listdir(SNAP_DEST_DIR):\n if name.startswith(prefix):\n path = os.path.join(SNAP_DEST_DIR, name)\n ret.append(path)\n ret.sort(reverse=True)\n return ret", "title": "" }, { "docid": "0f9d684136728937fa37d43e8aa77a10", "score": "0.59882355", "text": "def get_manageable_snapshots(self, cinder_snapshots, marker, limit, offset,\n sort_keys, sort_dirs):\n return self.common.get_manageable_snapshots(marker, limit, offset,\n sort_keys, sort_dirs)", "title": "" }, { "docid": "61d7d6eeb820100221137f2da053160e", "score": "0.5974514", "text": "def list(self):\n self._fetch()\n return self._list", "title": "" }, { "docid": "8599c9c12d16a1893d82c07180afb597", "score": "0.59633225", "text": "def load_snapshots(ctx: Context, label_filters: Dict[str, str]) -> List[Snapshot]:\n raise NotImplementedError()", "title": "" }, { "docid": "a9ac86122f237b3bd8e44b79e2f75117", "score": "0.5938548", "text": "def listSnapshotPolicies(self, **kwargs):\n if not volumeid in kwargs:\n raise RuntimeError(\"Missing required argument volumeid\")\n return self.call(\"listSnapshotPolicies\", args)", "title": "" }, { "docid": "e018bbd1cc0c760be976405e557f2694", "score": "0.59373474", "text": "def share_snapshot_instance_export_locations_get_all(\n context, share_snapshot_instance_id):\n return IMPL.share_snapshot_instance_export_locations_get_all(\n context, share_snapshot_instance_id)", "title": "" }, { "docid": "4b58b51787944b8a055926ac226c157c", "score": "0.5887977", "text": "def get_snapshot(self, snapshot_date, symbol=\"\", fields=\"\"):\n res = self.get(symbol=symbol, start_date=snapshot_date, end_date=snapshot_date, fields=fields)\n if res is None:\n print(\"No data. for date={}, fields={}, symbol={}\".format(snapshot_date, fields, symbol))\n return\n\n res = res.stack(level='symbol', dropna=False)\n res.index = res.index.droplevel(level=self.TRADE_DATE_FIELD_NAME)\n\n return res", "title": "" }, { "docid": "a09feff51c98e25675a17b98b59bf7a4", "score": "0.58732855", "text": "def share_snapshot_instance_get_all_with_filters(context, filters,\n with_share_data=False):\n return IMPL.share_snapshot_instance_get_all_with_filters(\n context, filters, with_share_data=with_share_data)", "title": "" }, { "docid": "6d295a38e00a232e0a7670bc068657c9", "score": "0.5865416", "text": "def volumes(self):\n return list(self.__volumes)", "title": "" }, { "docid": "c6acf7aded64e89576e0f11bbc4c245c", "score": "0.585142", "text": "def get_snapshot_data(self):\n return {\n \"robots\": self.robots,\n \"robot_id\": self.robot_id,\n \"start_time\": self.start_time,\n \"last_time\": self.last_time\n }", "title": "" }, { "docid": "c944b342275ece4d8a994e99411331bc", "score": "0.58476126", "text": "async def list_snapshots_with_options_async(\n self,\n request: swas__open20200601_models.ListSnapshotsRequest,\n runtime: util_models.RuntimeOptions,\n ) -> swas__open20200601_models.ListSnapshotsResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.disk_id):\n query['DiskId'] = request.disk_id\n if not UtilClient.is_unset(request.instance_id):\n query['InstanceId'] = request.instance_id\n if not UtilClient.is_unset(request.page_number):\n query['PageNumber'] = request.page_number\n if not UtilClient.is_unset(request.page_size):\n query['PageSize'] = request.page_size\n if not UtilClient.is_unset(request.region_id):\n query['RegionId'] = request.region_id\n if not UtilClient.is_unset(request.snapshot_ids):\n query['SnapshotIds'] = request.snapshot_ids\n if not UtilClient.is_unset(request.source_disk_type):\n query['SourceDiskType'] = request.source_disk_type\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='ListSnapshots',\n version='2020-06-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n swas__open20200601_models.ListSnapshotsResponse(),\n await self.call_api_async(params, req, runtime)\n )", "title": "" }, { "docid": "0ee246c5367b74e90fc69ad17b4bdb17", "score": "0.5842938", "text": "def get_base_snapshots(self, vcenter_id : str, base_vm_id:str ) -> list:\n\n response = requests.get(f'{self.url}/rest/external/v1/base-snapshots?base_vm_id={base_vm_id}&vcenter_id={vcenter_id}', verify=False, headers=self.access_token)\n\n if response.status_code == 400:\n error_message = (response.json())[\"error_message\"]\n raise Exception(f\"Error {response.status_code}: {error_message}\")\n elif response.status_code == 404:\n error_message = (response.json())[\"error_message\"]\n raise Exception(f\"Error {response.status_code}: {response}\")\n elif response.status_code != 200:\n raise Exception(f\"Error {response.status_code}: {response.reason}\")\n else:\n try:\n response.raise_for_status()\n except requests.exceptions.RequestException as e:\n raise \"Error: \" + str(e)\n else:\n return response.json()", "title": "" }, { "docid": "c0c47eab8c0a1ac39140dbe01dc5bfaf", "score": "0.5824583", "text": "def getSnapShots(self):\n self.applyConditions()\n return self.search_helper.getSnapShots()", "title": "" }, { "docid": "889791de45a01cdf8524d42ec1041203", "score": "0.58017355", "text": "def getSnapshot(self, snapshot_id):\n response, body = self.http.get('/snapshots/%s' % snapshot_id)\n return body", "title": "" }, { "docid": "bf02f96b42b504a0cd13e5e0a2c7647c", "score": "0.57987124", "text": "def _snapshot(self):\n response = requests.get(self.snapshot_url, auth=self.auth)\n if response.status_code == 200:\n content_type = response.headers['content-type'] if 'content-type' in response.headers else 'application/octet-stream'\n return (response.content, content_type)\n raise SnapshotError('Failed to get snapshot from {}, error {}'.format(self.snapshot_url, response.status_code))", "title": "" }, { "docid": "cd5fbd2f47538d0f7790da23b8a65299", "score": "0.5791695", "text": "def share_group_snapshot_members_get_all(context, share_group_snapshot_id):\n return IMPL.share_group_snapshot_members_get_all(\n context, share_group_snapshot_id)", "title": "" }, { "docid": "18dd8920fd91b0f5b7c29ddd43ce53d0", "score": "0.5782981", "text": "def list(event, context):\n try:\n minio, mongo = setup()\n except Exception as err:\n return json.dumps({\"setup error\": str(err)})\n \n try:\n collection = mongo[mgo_database][MONGODB_COLLECTION]\n photos = [x for x in collection.find({})]\n result = bdumps({\"photos\": photos})\n return result\n except Exception as err:\n return json.dumps({\"error\": str(err)})", "title": "" }, { "docid": "a5e90353f1e7ae31dd134e30b6aa5373", "score": "0.5758358", "text": "def get_db_snapshots(rds_client, engine, rds_identifier=None,\n snapshot_type=None, snapshot_identifier=None):\n if 'aurora' in engine:\n return get_db_snapshots_cluster(rds_client,rds_identifier, snapshot_type, snapshot_identifier)\n else:\n return get_db_snapshots_instance(rds_client,rds_identifier, snapshot_type, snapshot_identifier)", "title": "" }, { "docid": "80fc99d32552913b3f9199eddda949a4", "score": "0.5738421", "text": "def share_snapshot_export_locations_get(context, snapshot_id):\n return IMPL.share_snapshot_export_locations_get(context, snapshot_id)", "title": "" }, { "docid": "47e88737aa5605cc35474a717a6c09fd", "score": "0.5716442", "text": "def get_snapshot(self, snapshot_id):\n\n request = self._build_request_url(\n [self.REQUEST_SNAPSHOTS, snapshot_id])\n\n return self._process_request(request)", "title": "" }, { "docid": "91dc146d8f2d0afaadce834fdf4343cc", "score": "0.56876224", "text": "def test_describe_snapshots(self):\n vol = db.volume_create(self.context, {})\n snap1 = db.snapshot_create(self.context,\n {'volume_id': vol['id'], 'project_id': self.project_id})\n snap2 = db.snapshot_create(self.context,\n {'volume_id': vol['id'], 'project_id': self.project_id})\n result = self.cloud.describe_snapshots(self.context)\n self.assertEqual(len(result['snapshotSet']), 2)\n snapshot_id = ec2utils.id_to_ec2_snap_id(snap2['id'])\n result = self.cloud.describe_snapshots(self.context,\n snapshot_id=[snapshot_id])\n self.assertEqual(len(result['snapshotSet']), 1)\n self.assertEqual(\n ec2utils.ec2_snap_id_to_uuid(\n result['snapshotSet'][0]['snapshotId']),\n snap2['id'])\n db.snapshot_destroy(self.context, snap1['id'])\n db.snapshot_destroy(self.context, snap2['id'])\n db.volume_destroy(self.context, vol['id'])", "title": "" }, { "docid": "d22ed0d118b31f71553d71a9f7f39774", "score": "0.56840587", "text": "def _get_volume_snapshot(self, context, snapshot_id):\n volume_snapshot_name = self.configuration.\\\n volume_snapshot_name_template % snapshot_id\n volume_snapshot_list = self.volume_api.get_all_snapshots(context,\n {'display_name': volume_snapshot_name})\n volume_snapshot = None\n if len(volume_snapshot_list) == 1:\n volume_snapshot = volume_snapshot_list[0]\n elif len(volume_snapshot_list) > 1:\n raise exception.ManilaException(\n _('Error. Ambiguous volume snaphots'))\n return volume_snapshot", "title": "" }, { "docid": "9c578feef1ea1137c459f424e01c9da2", "score": "0.56816024", "text": "def get_snapshot(self, slug: str):\n response = self._get(f\"/snapshots/{slug}/info\")\n return response.get(\"data\")", "title": "" }, { "docid": "6a5c1d3e9714f45edff5b7721d66b53e", "score": "0.5681275", "text": "def share_group_snapshot_get_all_by_project(context, project_id, detailed=True,\n filters=None, sort_key=None,\n sort_dir=None):\n return IMPL.share_group_snapshot_get_all_by_project(\n context, project_id, detailed=detailed, filters=filters,\n sort_key=sort_key, sort_dir=sort_dir)", "title": "" }, { "docid": "b6881312b8988aecbf069cefdd4112b1", "score": "0.56761664", "text": "def map_instance_volume_snapshot(): \n #backup_tags = ['HIPAA','IRIS_HIPAA','IRIS_NON_HIPAA','NON_HIPAA'] \n backup_tags = ['NON_HIPAA'] \n kwargs = {'Filters':[{'Name':'tag:DataType','Values': backup_tags}]} \n instances = get_instances(kwargs) \n kwargs = {'Filters':[{'Name':'attachment.instance-id', 'Values': list(instances.keys())}]} \n volumes = get_volumes(kwargs) \n kwargs = {'Filters':[{'Name':'volume-id','Values':list(volumes.keys())}],'OwnerIds':['self']} \n snapshots = get_snapshots(kwargs) \n for volume_id, volume_value in volumes.items(): \n snapshot_list = [] \n for snapshot_id, snapshot_value in snapshots.items(): \n if snapshot_value['VolumeId'] == volume_id: \n snapshot_list.append({ \n 'StartDate': (snapshot_value['Date'].strftime(\"%m/%d/%Y\")), \n 'Status': snapshot_value['State'], \n 'SnapshotId': snapshot_id, \n 'VolumeId': volume_id}) \n volume_value['Snapshots'] = snapshot_list \n for instance, instance_value in instances.items(): \n volume_list = [] \n for volume_id, volume_value in volumes.items(): \n for attachment in volume_value['Attachments']: \n if instance == attachment: \n volume_list.append({ \n 'VolumeId': volume_id, \n 'VolumeDetails': volume_value}) \n instance_value['Volumes'] = volume_list \n logger.info(json.dumps(instances, sort_keys=False, indent=4, default=str))", "title": "" }, { "docid": "8c61d6c7865ab18c5e53b6ac2f23a42b", "score": "0.5643344", "text": "def list_images(self):\n\n response = self.request(\"GET\", \"/image\")\n return [VPSImage(self, i) for i in response]", "title": "" }, { "docid": "2164e6fd465295d8e89651454522ce1c", "score": "0.5611817", "text": "def test_get_snapshot(self):\n pass", "title": "" }, { "docid": "560e6cd7eb2ea5eb2cd9585032cacabd", "score": "0.56116045", "text": "def Snapshot(servers,alias=None):\n\t\treturn(Server._ServerActions(\"Snapshot\",alias,servers))", "title": "" }, { "docid": "cab250409251bb239f366ba29ac851e8", "score": "0.5608895", "text": "def getSnapshotByName(self, name):\n response, body = self.http.get('/snapshots?name=%s' % name)\n return body", "title": "" }, { "docid": "f4dfefde3c08c18cb4649a636d425b05", "score": "0.55902904", "text": "def get_replay_list(query: str) -> List[str]:\n r = requests.get(query)\n return [replay['hash'] for replay in r.json()['data']]", "title": "" }, { "docid": "825e5a0fed611a770add55ce45db17a3", "score": "0.55787206", "text": "def findAll(self):\n cursor = self.collection.find()\n\n images = []\n\n for image in cursor:\n image['_id'] = str(image['_id']) \n images.append(image)\n\n return images", "title": "" }, { "docid": "d3d6519c743aa54a8b657100fcc0a03e", "score": "0.5568562", "text": "def print_snapshots(self, snapshots, msg=None, prefix=\"\"):\n if msg:\n self.verbose_print(msg, prefix=prefix)\n\n for snap in snapshots:\n self.verbose_print(\" %s: start_time %s\" % (snap.id, snap.start_time), prefix=prefix)", "title": "" }, { "docid": "e8e40e686b2a572e6d1ebe8534cd53eb", "score": "0.55677235", "text": "def getAllBackupLogs(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('logList')\n\t\treturn deserialize_list_Log_json(payload)", "title": "" }, { "docid": "1941c727fe09044ae1971268164d5463", "score": "0.5565313", "text": "def temporal_snapshots_ids(self):\n return sorted(self.snapshots.keys())", "title": "" }, { "docid": "734c292c60e66f61a8d28c4a26d52234", "score": "0.5563495", "text": "def listStorages(self):\n self.client.get(\"/api/storage?includeProducts=true\", headers = self.headers())\n #group = Group()\n #group.spawn(lambda: self.client.get(\"/some_url\")\n #group.spawn(lambda: self.client.get(\"/some_url\")\n #group.join() # wait for greenlets to finish", "title": "" }, { "docid": "f5509c9eda4104e085247ec867a88a4f", "score": "0.5562884", "text": "def _handle_list_snapshots(self, param):\n\n # Implement the handler here\n self.save_progress(\"In action handler for: {0}\".format(self.get_action_identifier()))\n\n # Add an action result object to self (BaseConnector) to represent the action for this param\n action_result = self.add_action_result(ActionResult(dict(param)))\n\n resource_group_name = param.get('resource_group_name')\n\n if resource_group_name:\n resource_part = VM_RESOURCE_GROUP_VALUE_PART.format(resourceGroupName=resource_group_name)\n endpoint = VM_LIST_SNAPSHOTS_ENDPOINT.format(resourceValue=resource_part)\n else:\n endpoint = VM_LIST_SNAPSHOTS_ENDPOINT.format(resourceValue='')\n\n # make rest call\n ret_val, response = self._make_rest_call_helper(endpoint, action_result, params=None, headers=None)\n\n if phantom.is_fail(ret_val):\n return action_result.get_status()\n\n # Add the response into the data section\n values = response.get('value', [])\n for s in values:\n action_result.add_data(s)\n\n # Add a dictionary that is made up of the most important values from data into the summary\n summary = action_result.update_summary({})\n summary['num_snapshots'] = len(values)\n\n # Return success, no need to set the message, only the status\n # BaseConnector will create a textual message based off of the summary dictionary\n return action_result.set_status(phantom.APP_SUCCESS)", "title": "" }, { "docid": "c13a5a4175caff41f17e1c2cfacdbcf0", "score": "0.55606097", "text": "def list(self):\n return self.api_client.call_api('get', self.base)", "title": "" }, { "docid": "f38b28915433683803ecb332e14e2995", "score": "0.55500245", "text": "def listImages(glance):\n\timages = []\n image_list = list(glance.images.list())\n for imageObj in image_list:\n image = {\n\t\t'name':imageObj.name,\n\t\t'id':imageObj.id\n\t\t}\n images.append(image)\n return images", "title": "" } ]
abc5a1f084d1bb4c3fbf0efa308dcc4f
Cook up a fake group.
[ { "docid": "f612d0b8394f81610baedbcb6bd05359", "score": "0.0", "text": "def group_object_factory(group_type_id, **attributes):\n group = {\n 'name': rl_fake().word(),\n 'description': rl_fake().sentences(nb=1)[0],\n 'active': flip(),\n 'groupTypeId': group_type_id,\n **attributes\n }\n return group", "title": "" } ]
[ { "docid": "8cdd45be6df65e77b44660665779a97c", "score": "0.59586596", "text": "def test_create_group(self):\n pass", "title": "" }, { "docid": "2a352fd728637cf85ed4e2752e473ddf", "score": "0.59189045", "text": "def dummy_group_fixture(self):\n try:\n from anuket.models.auth import AuthGroup\n group = AuthGroup()\n group.groupname = u'groupname'\n self.DBSession.add(group)\n self.DBSession.flush()\n return group\n except: # pragma: no cover\n self.DBSession.rollback()\n raise AssertionError", "title": "" }, { "docid": "eed1f1101ec392b9bd7b62f566306e66", "score": "0.5918058", "text": "def group():\n\n return GroupFactory()", "title": "" }, { "docid": "26ddb5c0497b0dd0009c45b1dc6f6ebb", "score": "0.5582809", "text": "def test_get_group(self):\n pass", "title": "" }, { "docid": "88e6dbc25530a053bc87c3eeda8493be", "score": "0.5570652", "text": "def test_update_group(self):\n pass", "title": "" }, { "docid": "3976bfd85a668b9aedfaa205668133e3", "score": "0.5543785", "text": "def test_update_group_cancelled(\n xfs, mockgroupandnode, queue, emptypool, loop_once, mock_serial_io\n):\n\n mockio = mockgroupandnode[0]\n\n # Set up the I/O mock\n mockio.node.before_update.return_value = True\n mockio.group.before_update.return_value = False\n\n xfs.create_file(\"/mocknode/ALPENHORN_NODE\", contents=\"mocknode\")\n\n update.update_loop(queue, emptypool)\n\n # Group update started\n mockio.group.before_update.assert_called_once()\n\n # Idle update didn't happen\n mockio.group.idle_update.assert_not_called()\n\n # After update hook called\n mockio.group.after_update.assert_called_once()", "title": "" }, { "docid": "e61b46aa9a0bd6880beae4acfd5456c3", "score": "0.5517771", "text": "def test_get_cyclos_group_initial_groups(self, mock):\n mock.return_value = self.cyclos_group.id\n\n self.assertEqual(self.profile.get_cyclos_group(), self.cyclos_group)", "title": "" }, { "docid": "d36f7424c1ec5f5c9219965b73288188", "score": "0.549434", "text": "def create_group(self):\n pass", "title": "" }, { "docid": "a5def43010d82ac043c446b8864883fb", "score": "0.54742795", "text": "def setUp(self):\n self.mongo_patcher = patch(\"ibutsu_server.controllers.group_controller.mongo\")\n self.mock_mongo = self.mongo_patcher.start()\n self.mock_mongo.groups = MagicMock()\n self.mock_mongo.groups.count.return_value = 1\n self.mock_mongo.groups.find_one.return_value = MOCK_GROUP\n self.mock_mongo.groups.find.return_value = [MOCK_GROUP]", "title": "" }, { "docid": "a741c2ce889d643f67c2f15c8ac6b620", "score": "0.5473682", "text": "def group():\n pass", "title": "" }, { "docid": "83fed3d3a6326073c22994086dac88e4", "score": "0.5401923", "text": "def _init_group(self, group):\n self.__group = group", "title": "" }, { "docid": "0ee8b450b21b03dc92a36a68360107a0", "score": "0.5366165", "text": "def test_groups_propagates_facets(self):\n class MockWorkList(WorkList):\n\n def featured_works(self, _db, facets):\n self.featured_called_with = facets\n return []\n\n def _groups_for_lanes(self, _db, relevant_children, relevant_lanes, facets):\n self.groups_called_with = facets\n return []\n\n mock = MockWorkList()\n mock.initialize(library=self._default_library)\n facets = object()\n [x for x in mock.groups(self._db, facets=facets)]\n eq_(facets, mock.groups_called_with)\n\n [x for x in mock.groups(self._db, facets=facets, include_sublanes=False)]\n eq_(facets, mock.featured_called_with)", "title": "" }, { "docid": "421f26ea10532adc565815b4dc572f6b", "score": "0.53270596", "text": "def group():", "title": "" }, { "docid": "8207b2ecb2f46c45a70ba8d0b619369e", "score": "0.53035206", "text": "def setup_group(sender, **kwargs):\n\n if kwargs['created']:\n group_perms = {}\n instance = kwargs['instance']\n for entry in ['manage', 'moderate', 'bbadmin', 'ic', 'ooc', 'titleself', 'titleother']:\n perm, created = GroupPermissions.objects.get_or_create(name=entry)\n group_perms[entry] = GroupPermissionsLink.objects.create(group=instance, permission=perm)\n\n rank_data = (\n {'rank': 1,\n 'title': 'Leader',\n 'perms': ('manage', 'moderate', 'bbadmin', 'ic', 'ooc', 'titleself', 'titleother')\n },\n {'rank': 2,\n 'title': 'Second in Command',\n 'perms': ('manage', 'moderate', 'bbadmin', 'ic', 'ooc', 'titleself', 'titleother')\n },\n {'rank': 3,\n 'title': 'Officer',\n 'perms': ('moderate', 'manage', 'ic', 'ooc', 'titleself', 'titleother')\n },\n {'rank': 4,\n 'title': 'Member',\n 'perms': ('ic', 'ooc', 'titleself')\n }\n )\n ranks = {}\n for rnk in rank_data:\n rank = instance.ranks.create(num=rnk['rank'], name=rnk['title'])\n ranks[rnk['rank']] = rank\n for perm in rnk['perms']:\n group_perms[perm].ranks.add(rank)\n\n\n instance.start_rank = ranks[4]\n instance.alert_rank = ranks[3]\n locks = 'member:group(##)'\n instance.lock_storage = locks.replace('##', str(instance.id))\n instance.save()\n instance.setup_channels()", "title": "" }, { "docid": "96666c45f1c3e4fc84f8d88e26d76bcf", "score": "0.52913404", "text": "def set_group(self, group):\n self.group = group", "title": "" }, { "docid": "41292dfe8299f2f0b7f7382485f28744", "score": "0.5286453", "text": "def test_bad_server_group(self):\n\n # TODO make this a param in YAML\n setid2 = self.basepath + self.params.get(\"setname\",\n '/run/testparams/setnames/othersetname/')\n\n hostlist1 = self.params.get(\"test_machines1\",'/run/hosts/')\n hostfile1 = WriteHostFile.WriteHostFile(hostlist1, self.tmp)\n\n hostlist2 = self.params.get(\"test_machines2a\",'/run/hosts/')\n hostfile2 = WriteHostFile.WriteHostFile(hostlist2, self.tmp)\n\n\n # TODO make these params in the yaml\n daosctl = self.basepath + '/install/bin/daosctl'\n\n # start 2 different sets of servers,\n ServerUtils.runServer(hostfile1, self.server_group, self.basepath)\n ServerUtils.runServer(hostfile2, setid2, self.basepath)\n\n host = hostlist1[0]\n\n # not sure I need to do this but ... give it time to start\n time.sleep(1)\n\n uuid_str = \"\"\n\n try:\n # use the uid/gid of the user running the test, these should\n # be perfectly valid\n uid = os.geteuid()\n gid = os.getegid()\n\n create_cmd = ('{0} create-pool -m {1} -u {2} -g {3} -s {4}'.\n format(daosctl, 0x731, uid, gid,\n self.server_group))\n uuid_str = \"\"\"{0}\"\"\".format(process.system_output(create_cmd))\n print(\"uuid is {0}\\n\".format(uuid_str))\n\n exists = CheckForPool.checkForPool(host, uuid_str)\n if exists != 0:\n self.fail(\"Pool {0} not found on host {1}.\\n\".\n format(uuid_str, host))\n\n # try and delete it using the wrong group\n delete_cmd = ('{0} destroy-pool -i {1} -s {2}'.\n format(daosctl, uuid_str, setid2))\n\n process.system(delete_cmd)\n\n exists = CheckForPool.checkForPool(host, uuid_str)\n if exists != 0:\n self.fail(\"Pool {0} not found on host {1} but delete \"\n \"should have failed.\\n\".format(uuid_str, host))\n\n except Exception as e:\n\n # now issue a good delete command so we clean-up after this test\n delete_cmd = ('{0} destroy-pool -i {1} -s {2}'.\n format(daosctl, uuid_str, self.server_group))\n\n process.system(delete_cmd)\n\n exists = CheckForPool.checkForPool(host, uuid_str)\n if exists == 0:\n self.fail(\"Pool {0} ound on host {1} but delete \"\n \"should have removed it.\\n\".\n format(uuid_str, host))\n\n # no matter what happens shutdown the server\n finally:\n ServerUtils.stopServer()\n os.remove(hostfile1)\n os.remove(hostfile2)", "title": "" }, { "docid": "38b85db2a335a0e2464e532d47bbeb61", "score": "0.51752114", "text": "def get_arcus_group():\n\n @click.group(name=\"arcus\")\n def arcus_group():\n \"\"\" Arcus services \"\"\"\n\n arcus_group.add_command(api.get_api_group())\n arcus_group.add_command(client.get_client_group())\n arcus_group.add_command(mgr.get_mgr_group())\n return arcus_group", "title": "" }, { "docid": "c1e403b39f62472c6b9f705458b6aa66", "score": "0.5174874", "text": "def cmd_grp(ctx, *args, **kwargs):", "title": "" }, { "docid": "3652f14d00bfb1998f04c50466294852", "score": "0.5165663", "text": "def test_update_group_idle(\n xfs, mockgroupandnode, queue, emptypool, loop_once, mock_serial_io\n):\n\n mockio = mockgroupandnode[0]\n\n # Set up the I/O mock\n mockio.node.before_update.return_value = True\n mockio.group.before_update.return_value = True\n\n xfs.create_file(\"/mocknode/ALPENHORN_NODE\", contents=\"mocknode\")\n\n update.update_loop(queue, emptypool)\n\n # Group update started\n mockio.group.before_update.assert_called_once()\n\n # Idle update happened\n mockio.group.idle_update.assert_called_once()\n\n # After update hook called\n mockio.group.after_update.assert_called_once()", "title": "" }, { "docid": "49aa187d669beab470623d16152843e6", "score": "0.51324594", "text": "def set_group(self, group):\n pass", "title": "" }, { "docid": "d06f50175bed00b4780979c9c28ce2c4", "score": "0.5129395", "text": "def test_groups_propagates_facets(self):\n def mock(self, _db, relevant_lanes, queryable_lanes, facets):\n self.called_with = facets\n return []\n old_value = Lane._groups_for_lanes\n Lane._groups_for_lanes = mock\n lane = self._lane()\n facets = FeaturedFacets(0)\n lane.groups(self._db, facets=facets)\n eq_(facets, lane.called_with)\n Lane._groups_for_lanes = old_value", "title": "" }, { "docid": "d0f0974fed00492f5de20f6e3adf2283", "score": "0.51257676", "text": "def test_sync_cyclos_group(self, mock):\n # Now creating a separate 'initial' Cyclos group. The testing profile\n # has been initially assigned to another group: ``self.cyclos_group``,\n # but now we are mocking the return value from the Cyclos SOAP API to\n # say that the Cyclos group of that user has changed for the new one.\n # Conclusively, the Cyclos group of the profile in Django DB must be\n # changed according with Cyclos response.\n cyclos_group = CyclosGroupFactory.create(initial=True)\n mock.return_value = cyclos_group.id\n\n cyclos_groupset = CyclosGroupSetFactory.create(\n groups=[cyclos_group])\n community = CC3CommunityFactory.create(groupsets=[cyclos_groupset])\n self.profile.community = community\n self.profile.save()\n\n # The method returns ``True`` because it succeeded.\n self.assertTrue(self.profile.sync_cyclos_group())\n\n # Check that the group has been changed in Django DB.\n self.assertEqual(self.profile.cyclos_group, cyclos_group)", "title": "" }, { "docid": "b65ac158996e5ccd877f5390dcf17bf2", "score": "0.5089821", "text": "def create_toolgroup(name=None):\n if name is None:\n name = \"Test Group %d\" % Static.group_count\n\n Static.group_count += 1\n return ToolGroup.objects.create(name=name)", "title": "" }, { "docid": "6a0ed4b6c22209ecc7e08af2becb7735", "score": "0.5044308", "text": "def default_grouping(instance, **kw):\r\n log(\"Firing signal: default_grouping\")\r\n\r\n hsh = generate_key(instance)\r\n if hsh:\r\n digest = hsh.hexdigest()\r\n try:\r\n created = False\r\n group = Group.all().filter(\"uid = \", digest)[0]\r\n group.count = Error.all().filter(\"group = \", group).count() + 1\r\n group.save()\r\n except IndexError:\r\n created = True\r\n group = Group()\r\n group.uid = digest\r\n group.count = 1\r\n group.save()\r\n\r\n instance.group = group\r\n instance.save()\r\n\r\n if created:\r\n signals.group_assigned.send(sender=group.__class__, instance=group)", "title": "" }, { "docid": "4d48c883a74830f0be0383d8a9f7e962", "score": "0.50282955", "text": "def setUp(self):\n # Pool member\n self.pool = None", "title": "" }, { "docid": "8ace7f579057ec83755b3468839058f4", "score": "0.5025882", "text": "def net_group_preparation(obj):\n\n node_ssh = obj.env.d_env.get_admin_remote()\n\n # preparing commands for gateway setting and private interface updating\n commands = [\n 'fuel network-group --delete --network 5',\n 'fuel network-group --create --name fuel network-group'\n '--create --name private --cidr 10.109.3.0/24 '\n '--gateway 10.109.3.1 --nodegroup 1',\n 'fuel network-group --set --network 6 --meta \"'\n '{\"name\": \"private\", \"notation\": \"cidr\", \"render_type\": null,'\n '\"map_priority\": 2, \"configurable\": true, \"use_gateway\": true,'\n ' \"render_addr_mask\": \"internal\", \"vlan_start\": null, '\n '\"cidr\": \"10.109.3.0/24\"}\"']\n\n for i in commands:\n node_ssh.execute_async(i)\n time.sleep(40)", "title": "" }, { "docid": "0d8631271b3e350c437033b84e09094d", "score": "0.5021436", "text": "def test_groups(self):\n c = usage.CompleteGroups()\n out = c._shellCode('some-option', usage._ZSH)\n self.assertEqual(out, ':group:_groups')\n\n c = usage.CompleteGroups(descr='some action', repeat=True)\n out = c._shellCode('some-option', usage._ZSH)\n self.assertEqual(out, '*:some action:_groups')", "title": "" }, { "docid": "b00de0ad2b7d78227d892980de7fd9ac", "score": "0.5017374", "text": "def default_grouping(instance, **kw):\r\n log(\"Firing signal: default_grouping\")\r\n\r\n hsh = generate_key(instance)\r\n if hsh:\r\n digest = hsh.hexdigest()\r\n try:\r\n created = False\r\n group = Group.objects.get(uid=digest)\r\n group.count = F('count')+getattr(instance, 'count', 1)\r\n group.save()\r\n except Group.DoesNotExist:\r\n created = True\r\n group = Group.objects.create(uid=digest, count=getattr(instance, 'count', 1))\r\n\r\n instance.group = group\r\n instance.save()\r\n\r\n if created:\r\n signals.group_assigned.send(sender=group.__class__, instance=group)\r\n signals.error_assigned.send(sender=instance.__class__, instance=instance)", "title": "" }, { "docid": "5ef1d62d4e0a2e972c1fe825e383a710", "score": "0.5016805", "text": "def fake_image_pool(self, num_fakes, fake, fake_pool):\r\n if num_fakes < self._pool_size:\r\n fake_pool[num_fakes] = fake\r\n return fake\r\n else:\r\n if random.random() > 0.5:\r\n random_id = random.randint(0, self._pool_size - 1)\r\n temp = fake_pool[random_id]\r\n fake_pool[random_id] = fake\r\n return temp\r\n else:\r\n return fake", "title": "" }, { "docid": "69a69e5cd65fd5cd9702375867197a9c", "score": "0.50043684", "text": "async def group_add(self, group, channel):\n ...", "title": "" }, { "docid": "a6c7037a645e42c42c1f9fab582ed9e5", "score": "0.49996015", "text": "async def test_get_groups(hass, client):\n network = hass.data[DATA_NETWORK] = MagicMock()\n node = MockNode(node_id=2)\n node.groups.associations = \"assoc\"\n node.groups.associations_instances = \"inst\"\n node.groups.label = \"the label\"\n node.groups.max_associations = \"max\"\n node.groups = {1: node.groups}\n network.nodes = {2: node}\n\n resp = await client.get(\"/api/zwave/groups/2\")\n\n assert resp.status == 200\n result = await resp.json()\n\n assert result == {\n \"1\": {\n \"association_instances\": \"inst\",\n \"associations\": \"assoc\",\n \"label\": \"the label\",\n \"max_associations\": \"max\",\n }\n }", "title": "" }, { "docid": "ca589dea1648b754eb793856ab1e74b1", "score": "0.49892", "text": "def test_bad_server_group(self):\n setid2 = self.basepath + self.params.get(\"setname\",\n '/run/setnames/othersetname/')\n\n self.hostlist_servers1 = self.params.get(\"test_machines1\",\n '/run/hosts/')\n hostfile_servers1 = write_host_file.write_host_file(\n self.hostlist_servers1, self.tmp)\n\n self.hostlist_servers2 = self.params.get(\"test_machines2a\",\n '/run/hosts/')\n hostfile_servers2 = write_host_file.write_host_file(\n self.hostlist_servers2, self.tmp)\n\n\n # TODO make these params in the yaml\n daosctl = self.basepath + '/install/bin/daosctl'\n\n # start 2 different sets of servers,\n self.agent_sessions = agent_utils.run_agent(self.basepath,\n self.hostlist_servers1)\n self.agent_sessions2 = agent_utils.run_agent(self.basepath,\n self.hostlist_servers2)\n server_utils.run_server(hostfile_servers1, self.server_group,\n self.basepath)\n server_utils.run_server(hostfile_servers2, setid2, self.basepath)\n\n host = self.hostlist_servers1[0]\n\n uuid_str = \"\"\n\n try:\n # use the uid/gid of the user running the test, these should\n # be perfectly valid\n uid = os.geteuid()\n gid = os.getegid()\n\n create_cmd = ('{0} create-pool -m {1} -u {2} -g {3} -s {4}'\n .format(daosctl, 0x731, uid, gid,\n self.server_group))\n uuid_str = \"\"\"{0}\"\"\".format(process.system_output(create_cmd))\n print (\"uuid is {0}\\n\".format(uuid_str))\n\n exists = check_for_pool.check_for_pool(host, uuid_str)\n if exists != 0:\n self.fail(\"Pool {0} not found on host {1}.\\n\"\n .format(uuid_str, host))\n\n # try and delete it using the wrong group\n delete_cmd = ('{0} destroy-pool -i {1} -s {2}'\n .format(daosctl, uuid_str, setid2))\n\n process.system(delete_cmd)\n\n exists = check_for_pool.check_for_pool(host, uuid_str)\n if exists != 0:\n self.fail(\"Pool {0} not found on host {1} but delete \"\n \"should have failed.\\n\".format(uuid_str, host))\n\n except Exception as _excep:\n # now issue a good delete command so we clean-up after this test\n delete_cmd = ('{0} destroy-pool -i {1} -s {2}'\n .format(daosctl, uuid_str, self.server_group))\n\n process.system(delete_cmd)\n\n exists = check_for_pool.check_for_pool(host, uuid_str)\n if exists == 0:\n self.fail(\"Pool {0} ound on host {1} but delete\"\n \"should have removed it.\\n\"\n .format(uuid_str, host))\n\n # no matter what happens shutdown the server\n finally:\n if self.agent_sessions:\n agent_utils.stop_agent(self.agent_sessions)\n if self.agent_sessions2:\n agent_utils.stop_agent(self.agent_sessions2)\n server_utils.stop_server(hosts=self.hostlist_servers)\n os.remove(hostfile_servers1)\n os.remove(hostfile_servers2)", "title": "" }, { "docid": "aa6f92092b2065d0224c2073f0484594", "score": "0.49700084", "text": "def test_update_group_not_idle_group(\n xfs, mockgroupandnode, queue, emptypool, loop_once, mock_serial_io\n):\n\n mockio, group, node = mockgroupandnode\n\n # This function adds something to the queue so that after the\n # node update, it's not idle\n def node_before_update(idle):\n nonlocal queue, node\n queue.put(None, node.name)\n\n return True\n\n # Set up the I/O mock\n mockio.node.before_update = node_before_update\n mockio.group.before_update.return_value = True\n\n xfs.create_file(\"/mocknode/ALPENHORN_NODE\", contents=\"mocknode\")\n\n update.update_loop(queue, emptypool)\n\n # Idle update didn't happen\n mockio.group.idle_update.assert_not_called()\n\n # After update hook called\n mockio.node.after_update.assert_called_once()", "title": "" }, { "docid": "fd54d664af30e46e542c929a40f6cdb1", "score": "0.49654865", "text": "def group(ctx, name, rediscover):\n\n ctx.ensure_object(dict)\n\n registry = Registry()\n\n if rediscover:\n registry.discover()\n\n try:\n group = registry.groups[name]\n except KeyError:\n logging.error(\n f\"Unknown group '{name}', available groups \"\n f\"are: {list(registry.groups.keys())}\"\n )\n sys.exit(1)\n\n ctx.obj[\"group\"] = group", "title": "" }, { "docid": "da998a6f7a0fac3e2de4a0fac2633477", "score": "0.49496168", "text": "def admin_group_fixture(self):\n try:\n from anuket.models.auth import AuthGroup\n group = AuthGroup()\n group.groupname = u'admins'\n self.DBSession.add(group)\n self.DBSession.flush()\n return group\n except: # pragma: no cover\n self.DBSession.rollback()\n raise AssertionError", "title": "" }, { "docid": "070e76253fb539d63960d2453b106293", "score": "0.49455586", "text": "def test(self):\n self.unpack_group(self.test_group)\n self.which='test'\n return self", "title": "" }, { "docid": "be614ebe7e9c9a4324cce60929f049a7", "score": "0.49385428", "text": "def test_delete_group_when_lbaas_down(self):\n pass", "title": "" }, { "docid": "7b0b93d00fb53014db46e7aa2801c31c", "score": "0.49219748", "text": "def setUp(self):\n super(ScalingUpExecuteWebhookTest, self).setUp()\n self.create_group_response = \\\n self.autoscale_behaviors.create_scaling_group_given(\n gc_min_entities=self.gc_min_entities_alt)\n self.group = self.create_group_response.entity\n self.resources.add(self.group, self.empty_scaling_group)", "title": "" }, { "docid": "e250a8013b7faff631b78f255efc66bf", "score": "0.49092612", "text": "def group(self, group):\n self.__group = group", "title": "" }, { "docid": "11f739abab3760ced39b5bb88584e80d", "score": "0.4902954", "text": "def set_group(self, **kwargs):\n builder = getargs('builder', kwargs)\n self.__set_builder(builder, GroupBuilder.__group)", "title": "" }, { "docid": "c26f7173a591877a17c0791d844ef0f0", "score": "0.49014905", "text": "def test_group_many_members(client, logged_in_dummy_user, dummy_group, make_users):\n users = [f\"testuser-{i}\" for i in range(1, 120)]\n make_users(users)\n ipa_admin.group_add_member(a_cn=\"dummy-group\", o_user=users)\n\n result = client.get('/group/dummy-group/')\n assert result.status_code == 200\n page = BeautifulSoup(result.data, 'html.parser')\n\n members = page.select(\"div[data-section='members'] ul li\")\n assert len(members) == len(users)", "title": "" }, { "docid": "9e96eb2692d02258969b375248bc4b05", "score": "0.48866758", "text": "def test_schedule_instance_group(\n self, mock_get_hosts, mock_get_all_states, mock_claim,\n ):\n num_instances = 2\n ig = objects.InstanceGroup(hosts=[])\n spec_obj = objects.RequestSpec(\n num_instances=num_instances,\n flavor=objects.Flavor(memory_mb=512,\n root_gb=512,\n ephemeral_gb=0,\n swap=0,\n vcpus=1,\n disabled=False,\n is_public=True,\n name=\"small_flavor\"),\n project_id=uuids.project_id,\n instance_group=ig,\n instance_uuid=uuids.instance0,\n requested_resources=[],\n )\n # Reset the RequestSpec changes so they don't interfere with the\n # assertion at the end of the test.\n spec_obj.obj_reset_changes(recursive=True)\n\n hs1 = mock.Mock(\n spec=host_manager.HostState,\n host=\"host1\",\n nodename=\"node1\",\n limits={},\n uuid=uuids.cn1,\n cell_uuid=uuids.cell1,\n instances={},\n aggregates=[],\n allocation_candidates=[],\n )\n hs2 = mock.Mock(\n spec=host_manager.HostState,\n host=\"host2\",\n nodename=\"node2\",\n limits={},\n uuid=uuids.cn2,\n cell_uuid=uuids.cell2,\n instances={},\n aggregates=[],\n allocation_candidates=[],\n )\n all_host_states = [hs1, hs2]\n mock_get_all_states.return_value = all_host_states\n mock_claim.return_value = True\n\n alloc_reqs_by_rp_uuid = {\n uuids.cn1: [{\"allocations\": \"fake_cn1_alloc\"}],\n uuids.cn2: [{\"allocations\": \"fake_cn2_alloc\"}],\n }\n\n # Simulate host 1 and host 2 being randomly returned first by\n # _get_sorted_hosts() in the two iterations for each instance in\n # num_instances\n visited_instances = set([])\n get_sorted_hosts_called_with_host_states = []\n\n def fake_get_sorted_hosts(_spec_obj, host_states, index):\n # Keep track of which instances are passed to the filters.\n visited_instances.add(_spec_obj.instance_uuid)\n if index % 2:\n s = list(host_states)\n get_sorted_hosts_called_with_host_states.append(s)\n return s\n s = list(host_states)\n get_sorted_hosts_called_with_host_states.append(s)\n return reversed(s)\n mock_get_hosts.side_effect = fake_get_sorted_hosts\n instance_uuids = [\n getattr(uuids, 'instance%d' % x) for x in range(num_instances)\n ]\n ctx = mock.Mock()\n self.manager._schedule(ctx, spec_obj, instance_uuids,\n alloc_reqs_by_rp_uuid, mock.sentinel.provider_summaries)\n\n # Check that we called claim_resources() for both the first and second\n # host state\n claim_calls = [\n mock.call(ctx.elevated.return_value, self.manager.placement_client,\n spec_obj, uuids.instance0,\n alloc_reqs_by_rp_uuid[uuids.cn2][0],\n allocation_request_version=None),\n mock.call(ctx.elevated.return_value, self.manager.placement_client,\n spec_obj, uuids.instance1,\n alloc_reqs_by_rp_uuid[uuids.cn1][0],\n allocation_request_version=None),\n ]\n mock_claim.assert_has_calls(claim_calls)\n\n # Check that _get_sorted_hosts() is called twice and that the\n # second time, we pass it the hosts that were returned from\n # _get_sorted_hosts() the first time\n sorted_host_calls = [\n mock.call(spec_obj, mock.ANY, 0),\n mock.call(spec_obj, mock.ANY, 1),\n ]\n mock_get_hosts.assert_has_calls(sorted_host_calls)\n self.assertEqual(\n all_host_states, get_sorted_hosts_called_with_host_states[0])\n self.assertEqual(\n [hs1], get_sorted_hosts_called_with_host_states[1])\n\n # The instance group object should have both host1 and host2 in its\n # instance group hosts list and there should not be any \"changes\" to\n # save in the instance group object\n self.assertEqual(['host2', 'host1'], ig.hosts)\n self.assertEqual({}, ig.obj_get_changes())\n # Assert that we updated HostState.instances for each host.\n self.assertIn(uuids.instance0, hs2.instances)\n self.assertIn(uuids.instance1, hs1.instances)\n # Make sure that the RequestSpec.instance_uuid is not dirty.\n self.assertEqual(sorted(instance_uuids), sorted(visited_instances))\n self.assertEqual(0, len(spec_obj.obj_what_changed()),\n spec_obj.obj_what_changed())", "title": "" }, { "docid": "026ed8c42250db42a3be4c688907eeac", "score": "0.4882672", "text": "def group_A(db):\n group, _ = Group.objects.get_or_create(name='A')\n return group", "title": "" }, { "docid": "23abaf95542ca12539a6d4702af52b20", "score": "0.4882168", "text": "async def kid():\n while True:\n try:\n print('Can I play?')\n await curio.timeout_after(1, start_evt.wait)\n break\n except curio.TaskTimeout:\n print('Wha!?')\n\n print('Building the Millennium Falcon in Minecraft')\n\n async with curio.TaskGroup() as f:\n\n await f.spawn(friend, 'Max')\n await f.spawn(friend, 'Lillian')\n await f.spawn(friend, 'Thomas')\n try:\n total = 0\n for fib_nbr in range(50):\n total += fib(fib_nbr)\n print(f'Total so far is {total} for {fib_nbr}')\n await curio.sleep(1000)\n except curio.CancelledError as xcp:\n print('Fine. Saving my work.')\n raise", "title": "" }, { "docid": "1adcebc925d7c7b09c21efe22547a93a", "score": "0.48802206", "text": "def prepare_test(self):\n super(MutexAdminTests, self).prepare_test()\n # print \" Preparing phase\",self.testphase,\" for mutex\", self.testmutex\n # username = self.proxy.username()\n self.admingroup = None", "title": "" }, { "docid": "c7d4bf709695dd5b26ca1859328e4494", "score": "0.48797065", "text": "def test_delete_group(self):\n pass", "title": "" }, { "docid": "e4fdb32f92c39fe9c22ac4f63fc220e7", "score": "0.4873824", "text": "def repopulate(self, setup):\n self.setup = setup\n if self.grouptype is '':\n raise Exception(\"Group has never been populated before.\")\n self._channel = None\n self._laddr = None\n self._paddr = None\n self.addr = self.addr.copy()", "title": "" }, { "docid": "d02b16f797260953cc947d26e8e4e820", "score": "0.4869267", "text": "def group_B(db):\n group, _ = Group.objects.get_or_create(name='B')\n return group", "title": "" }, { "docid": "a890a6c150606733f28445217bb3e4b5", "score": "0.48571378", "text": "def test_set_stack_group_fills_scale(default_kwargs):\n new_stack_group = 5\n\n kwargs = dict(default_kwargs)\n del kwargs[\"stack_group_id\"]\n catmaid_url = CatmaidUrl(**default_kwargs)\n catmaid_url.set_stack_group(new_stack_group)\n\n assert \"sg={}&sgs={}\".format(new_stack_group, SCALE) in str(catmaid_url)", "title": "" }, { "docid": "eeda2aba0c9189eecd1dd42b47cbf152", "score": "0.48528478", "text": "def test_get_cyclos_group_full_groups(self, mock):\n # Now creating a separate 'full' Cyclos group. The testing profile will\n # now be a only member of that group. The mocked Cyclos SOAP API method\n # ``get_group`` will return that Cyclos group ID, so our tested\n # ``get_cyclos_group`` method must return that group.\n cyclos_group = CyclosGroupFactory.create(full=True)\n mock.return_value = cyclos_group.id\n\n cyclos_groupset = CyclosGroupSetFactory.create(\n groups=[cyclos_group])\n community = CC3CommunityFactory.create(groupsets=[cyclos_groupset])\n self.profile.community = community\n self.profile.cyclos_group = cyclos_group\n self.profile.save()\n\n self.assertEqual(self.profile.get_cyclos_group(), cyclos_group)", "title": "" }, { "docid": "dba86b10c5644b8771931d60c7e7b956", "score": "0.48476613", "text": "def test_permissions(self):\n\n client = APIClient()\n a = client.login(username='user2@localhost.local', password='user2')\n\n # Create group\n response = client.post('/api/v1/groups/', common.valid_group, format='json')\n self.assertEqual(response.status_code, 201)\n content = response.content.decode(\"utf-8\")\n data = json.loads(content)\n cid = data['id']\n\n # Get new group as owner\n response = client.get(f'/api/v1/groups/{cid}', common.valid_group, format='json')\n self.assertEqual(response.status_code, 200)\n content = response.content.decode(\"utf-8\")\n\n self.assertIn(\"firstPlacementChoice\", content)\n self.assertIn(\"priceAdjustments\", content)\n self.assertIn(\"contactName\", content)\n\n client.logout()\n\n response = client.get(f'/api/v1/groups/{cid}', common.valid_group, format='json')\n self.assertEqual(response.status_code, 200)\n content = response.content.decode(\"utf-8\")\n\n self.assertNotIn(\"firstPlacementChoice\", content)\n self.assertIn(\"priceAdjustments\", content)\n self.assertNotIn(\"contactName\", content)\n\n client.logout()\n client.login(username='user1@localhost.local', password='user1')\n response = client.get(f'/api/v1/groups/{cid}', common.valid_group, format='json')\n self.assertEqual(response.status_code, 200)\n content = response.content.decode(\"utf-8\")\n\n self.assertNotIn(\"firstPlacementChoice\", content)\n self.assertIn(\"priceAdjustments\", content)\n self.assertNotIn(\"contactName\", content)\n\n client.logout()\n client.login(username='admin@localhost.local', password='admin')\n response = client.get(f'/api/v1/groups/{cid}', common.valid_group, format='json')\n self.assertEqual(response.status_code, 200)\n content = response.content.decode(\"utf-8\")\n\n self.assertIn(\"firstPlacementChoice\", content)\n self.assertIn(\"priceAdjustments\", content)\n self.assertIn(\"contactName\", content)", "title": "" }, { "docid": "b6b5932aed42cdd93da2c09049c0b4e8", "score": "0.4846273", "text": "def _setup_user_groups(self):\n userdata = [\n ('user_1', True, self.GROUPNAME_1),\n ('user_1_nostaff', False, self.GROUPNAME_1),\n ('user_2', True, self.GROUPNAME_2),\n ('user_2_nostaff', False, self.GROUPNAME_2),\n ('user_3', True, self.GROUPNAME_3),\n ('user_3_nostaff', False, self.GROUPNAME_3),\n ('user_4', True, self.GROUPNAME_4),\n ('user_4_nostaff', False, self.GROUPNAME_4),\n ('user_5', True, self.GROUPNAME_5),\n ('user_5_nostaff', False, self.GROUPNAME_5),\n ('user_staff', True, None),\n ]\n default_users_count = User.objects.all().count()\n for username, is_staff, groupname in userdata:\n user = User.objects.create(username=username,\n email=username + '@domain.com',\n is_active=True,\n is_staff=is_staff)\n user.set_password(username)\n user.save()\n if groupname:\n group, _ = Group.objects.get_or_create(name=groupname)\n group.user_set.add(user)\n group.save()\n self.assertEquals(11, User.objects.all().count()-default_users_count)", "title": "" }, { "docid": "b816d0e366cbfc6cd166a8939e92cc7a", "score": "0.48420587", "text": "def main_group(ctx):\n ctx.obj = {}", "title": "" }, { "docid": "96e07b955e750331dc2920d696a6bdc8", "score": "0.48274598", "text": "def group(self, group) :\n\t\ttry :\n\t\t\tself._group = group\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "39cbafb0d2f195db6d3aa1b70953a408", "score": "0.48202485", "text": "def test_delete_group_when_nova_down(self):\n pass", "title": "" }, { "docid": "0cd3463e8a342f661384d15396731503", "score": "0.4818262", "text": "def _group(path, group_id):\n\n _deploy_group(path)\n try:\n yield\n finally:\n remove_group(group_id)", "title": "" }, { "docid": "b6629acfeaf0a86ac6864767cb8ba54f", "score": "0.48135042", "text": "def test_list_groups(self):\n pass", "title": "" }, { "docid": "8e02aef8c16d7f9f622f11be69af00f4", "score": "0.48130164", "text": "def join_group(self, login, group):", "title": "" }, { "docid": "016e775585cdd8c19f8d4a737e30de4c", "score": "0.48032963", "text": "def setUp(self):\n\n self.user1 = UserFactory(username=\"gcostanza\")\n self.user2 = UserFactory(username=\"hsimpson\")\n self.user3 = UserFactory(username=\"gseindfeld\")\n\n # make user 2 an administrator\n adminGrp, created = Group.objects.get_or_create(name=\"admin\")\n self.user2.groups.add(adminGrp)\n\n self.ticket = TicketFactory(submitted_by=self.user1)\n\n self.msg1 = \"This is a public message posted by George\"\n self.comment1 = FollowUpFactory(\n ticket=self.ticket, submitted_by=self.user1, comment=self.msg1\n )\n\n self.msg2 = \"This is a public message posted by Homer\"\n self.comment2 = FollowUpFactory(\n ticket=self.ticket, submitted_by=self.user2, comment=self.msg2\n )\n\n self.msg3 = \"This is a PRIVATE message posted by George\"\n self.comment3 = FollowUpFactory(\n ticket=self.ticket, submitted_by=self.user1, comment=self.msg3, private=True\n )\n\n self.msg4 = \"This is a PRIVATE message posted by Homer\"\n self.comment4 = FollowUpFactory(\n ticket=self.ticket, submitted_by=self.user2, comment=self.msg4, private=True\n )", "title": "" }, { "docid": "9a9afafb5fd4966d166882b03249c41a", "score": "0.47973725", "text": "def test_group_add_member(client, dummy_user_as_group_manager, make_user):\n make_user(\"testuser\")\n with fml_testing.mock_sends(\n MemberSponsorV1(\n {\"msg\": {\"agent\": \"dummy\", \"user\": \"testuser\", \"group\": \"dummy-group\"}}\n )\n ):\n result = client.post(\n '/group/dummy-group/members/', data={\"new_member_username\": \"testuser\"}\n )\n\n expected_message = (\n \"\"\"You got it! testuser has been added to dummy-group.\n <span class='ml-auto' id=\"flashed-undo-button\">\n <form action=\"/group/dummy-group/members/remove\" method=\"post\">\"\"\"\n \"\\n \\n \"\n \"\"\" <button type=\"submit\" class=\"btn btn-outline-success btn-sm\"\n name=\"username\" value=\"testuser\">\n Undo\n </button>\n </form>\n </span>\"\"\"\n ) # noqa\n\n assert_redirects_with_flash(\n result,\n expected_url=\"/group/dummy-group/\",\n expected_message=Markup(expected_message),\n expected_category=\"success\",\n )", "title": "" }, { "docid": "62956f4fafe2cada70e34550e79b68d7", "score": "0.47939774", "text": "def test_send_created_emails(self, mock_task):\n Group.objects.create(\n name='Signal test',\n store=self.store,\n email='test@cloock.be'\n )\n self.assertTrue(mock_task.called)", "title": "" }, { "docid": "a9b3d035811cdd42c18428223263f627", "score": "0.4791812", "text": "def _group_create(self, name, pkgs, opts):\n\n srids = self._group_find_sr(pkgs, opts)\n srids = self._group_verify_grouping(srids, opts)\n\n # compose the xml\n xml='<request><action type=\"group\">'\n for r in srids:\n xml += \"<grouped id='\" + str(r) + \"'/>\"\n xml += '</action><description>' + str(name) + '</description></request>'\n\n # sent the request to server\n query = {'cmd': 'create'}\n u = makeurl(opts.apiurl, ['request'], query=query)\n f = http_POST(u, data=xml)\n root = ET.parse(f).getroot().attrib['id']\n\n print('Created GR#{0} with following requests: {1}'.format(str(root), ', '.join(map(str, srids))))", "title": "" }, { "docid": "c12cab9143e938d30500d13d29f61ea6", "score": "0.47788748", "text": "def test_creator(self):\n component = bundy.bundy.special_component.SockCreator(None, self,\n 'needed', None)\n orig_creator = \\\n bundy.bundy.special_component.bundy.bundy.sockcreator.Creator\n # Just ignore the creator call\n bundy.bundy.special_component.bundy.bundy.sockcreator.Creator = \\\n lambda path: self.FakeCreator()\n component.start()\n self.assertTrue(self.__change_user_called)\n # Doesn't do anything, but doesn't crash\n component.stop()\n component.kill()\n component.kill(True)\n component = bundy.bundy.special_component.SockCreator(None, self,\n 'needed', None)", "title": "" }, { "docid": "2033ab73c70caa09a8f3542e21edfb8f", "score": "0.4771558", "text": "def groups(self):\n ...", "title": "" }, { "docid": "9f42e410f82da13e53a00bc18e5cc46a", "score": "0.47633356", "text": "def test_slug_called(\n mocker: MockFixture, tech_group_factory: factories.TechGroupFactory\n) -> None:\n mocked_slugger = mocker.patch(\"domain.utils.generate_unique_slug\")\n mocked_slugger.return_value = \"python\"\n tech_group = tech_group_factory(name=\"Python\", description=\"Best group ever.\")\n assert mocked_slugger.called\n assert tech_group.slug == \"python\"", "title": "" }, { "docid": "d71ce2eaebb9f0de21aa7cc1740ee0a6", "score": "0.4759978", "text": "def setUp(self):\n self.client = Client()\n self.course = Course.objects.create(title='algdat', course_code='tdt4125')\n self.username = 'CC'\n self.password = '123'\n self.user = User.objects.create_user(username=self.username, password=self.password)\n self.client.login(username=self.username, password=self.password)\n self.cc_group = Group.objects.create(name='course_coordinators')\n self.user.groups.add(self.cc_group)\n self.assistant_group = Group.objects.create(name='assistants')\n self.student_group = Group.objects.create(name='students')", "title": "" }, { "docid": "99a242bccde838eb91d11e3c35045fb8", "score": "0.47599667", "text": "def test_that_group_category_may_be_set_to_administrative_group():\n\n group = GroupFactory(category=GroupCategory.administrative_group)\n\n assert group.category == GroupCategory.administrative_group", "title": "" }, { "docid": "03ad7e29ee1057e86c16a7400641dde9", "score": "0.47552565", "text": "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "title": "" }, { "docid": "be3ee55e9c22271a71fb5aa89d60bf18", "score": "0.47533455", "text": "def test_airflow_group(Group):\n\n assert Group('airflow').exists is True", "title": "" }, { "docid": "b27e1f3d92c132328457e2cefdf1dfd3", "score": "0.47510412", "text": "def create_group(self, group_name: str, contacts: list):\n\n self.driver.find_element_by_css_selector(\n '#side > header > div._2XP8p > div > span > div:nth-child(3) > div'\n ).click()\n\n self.driver.find_element_by_css_selector(\n '#side > header > div._2XP8p > div > span > div._2n-zq._3zHcq > span > div > ul > li:nth-child(1)'\n ).click()\n\n for contact in contacts:\n box = self.driver.find_element_by_css_selector(\n '#app > div > div > div.Akuo4 > div._1Flk2._2DPZK > span > div > span > div > div > div._3tEPr > div > div > input'\n )\n\n box.clear()\n box.send_keys(contact)\n box.send_keys(Keys.ENTER)\n \n self.driver.find_element_by_css_selector(\n '#app > div > div > div.Akuo4 > div._1Flk2._2DPZK > span > div > span > div > div > span > div'\n ).click()\n\n self.driver.find_element_by_css_selector(\n '#app > div > div > div.Akuo4 > div._1Flk2._2DPZK > span > div > span > div > div > div:nth-child(2) > div > div._3rhi1 > div > div._2_1wd.copyable-text.selectable-text'\n ).send_keys(group_name)\n\n self.driver.find_element_by_css_selector(\n '#app > div > div > div.Akuo4 > div._1Flk2._2DPZK > span > div > span > div > div > span > div > div'\n ).click()\n\n sleep(2)\n\n self.driver.find_element_by_css_selector(\n '#app > div > span:nth-child(2) > div > span > div > div > div > div > div > div._1uJw_ > div._1dwBj._3xWLK'\n ).click()\n\n sleep(0.5)\n\n self.driver.find_element_by_css_selector(\n '#app > div > span:nth-child(2) > div > span > div > div > div > div > div > div > div > span > div'\n ).click()", "title": "" }, { "docid": "bdbe5dbb6275c31b1ed3dfad4f534a09", "score": "0.4744065", "text": "def test_resume_default_group(self, MockSource):\n # Add 2 sources to the 'all' group and 2 to the 'test' group\n mock_all_1 = Mock(state=MockSource.PAUSE)\n mock_all_2 = Mock(state=MockSource.PAUSE)\n mock_test_1 = Mock(state=MockSource.PAUSE)\n mock_test_2 = Mock(state=MockSource.PAUSE)\n\n self.director.add(mock_all_1)\n self.director.add(mock_all_2)\n self.director.add(mock_test_1, group='test')\n self.director.add(mock_test_2, group='test')\n\n # Resuming the default group should only resume sources in 'all'\n self.director.resume()\n\n mock_all_1.play.assert_called_once()\n mock_all_2.play.assert_called_once()\n mock_test_1.play.assert_not_called()\n mock_test_2.play.assert_not_called()", "title": "" }, { "docid": "bb2ca5f19777d050ca46a67937d9acc6", "score": "0.47332528", "text": "def test_set_stack_group(default_url):\n new_stack_group = 5\n new_stack_group_scale = -1\n\n default_url.set_stack_group(new_stack_group, new_stack_group_scale)\n assert \"sg={}&sgs={}\".format(new_stack_group, new_stack_group_scale) in str(\n default_url\n )", "title": "" }, { "docid": "1d1f74e36cb0cc08aa4af3064f7016f1", "score": "0.47265986", "text": "def groups_factory(_context, request):\n user_service = request.find_service(name=\"user\")\n return GroupService(session=request.db, user_fetcher=user_service.fetch)", "title": "" }, { "docid": "311b28f53b903e3b845ea76756adb24e", "score": "0.47247353", "text": "def test_create_scaling_group_account_closed(self):\n pass", "title": "" }, { "docid": "d92c06fe727b6cfe3f3d9d61120b9d30", "score": "0.47198117", "text": "def test_group_scale(num_apps, num_instances):\n\n current_test = initalize_test('root', 'apps', 'group', num_apps, num_instances)\n group_test_app(current_test)\n log_current_test(current_test)", "title": "" }, { "docid": "23835d35c13a1804058fc92556496693", "score": "0.47181144", "text": "def setSelfGroups(self):\n # TODO: Accept groups variable. The name of the method sounds more\n # natural if it accepts it.\n for item in self.Groups:\n self.seq_svc.addJointGroup(item[0], item[1])\n for k, v in self.HandGroups.iteritems():\n if self.sc_svc:\n self.sc_svc.addJointGroup(k, v)", "title": "" }, { "docid": "170eac14af3129168139b00444c95147", "score": "0.4716958", "text": "def group_open(ctx, name):\n\n resp: GroupCreationResponse = ctx[\"client\"].create_group(group_name=name)\n write_or_print(\"Opened group with ID {} and name '{}'\".format(resp.group.identifier, resp.group.name))", "title": "" }, { "docid": "cde3ccb1967281a0c166b04071946f89", "score": "0.47060907", "text": "def test_update_create_front_page_groups(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "title": "" }, { "docid": "3ea7e4b29238bccc9b35009bc5527ddd", "score": "0.47020248", "text": "def test_jenkins_group(host):\n assert host.group(\"jenkins\").exists", "title": "" }, { "docid": "ac6177b89d55e08494fbfa2278a274e4", "score": "0.470072", "text": "async def test_get_groups_nogroups(hass, client):\n network = hass.data[DATA_NETWORK] = MagicMock()\n node = MockNode(node_id=2)\n\n network.nodes = {2: node}\n\n resp = await client.get(\"/api/zwave/groups/2\")\n\n assert resp.status == 200\n result = await resp.json()\n\n assert result == {}", "title": "" }, { "docid": "39dba94d8a35f9e1aba92ab6f6222ab9", "score": "0.47001383", "text": "def testInstallGroup(self):\n db = self._ParseSpec(MINIMAL_ACCOUNTS_DB)\n mock_user_db = mock.MagicMock()\n db.InstallGroup(MINIMAL_DB_GROUP.name, mock_user_db)\n installed_group = user_db.Group(\n group=MINIMAL_DB_GROUP.name, password=MINIMAL_DB_GROUP.password,\n gid=MINIMAL_DB_GROUP.gid, users=MINIMAL_DB_GROUP.users)\n self.assertEqual([mock.call.AddGroup(installed_group)],\n mock_user_db.mock_calls)", "title": "" }, { "docid": "3373f379fbe2d47c4fe72b90e818bbae", "score": "0.46738327", "text": "def __init__(self, grabber, mirrors, **kwargs):\r\n MirrorGroup.__init__(self, grabber, mirrors, **kwargs)\r\n random.shuffle(self.mirrors)", "title": "" }, { "docid": "933f5d819ae7921487584f8f682eab21", "score": "0.4669613", "text": "def test_add_defaults_to_all_group(self):\n # Adding a source without a group should default to 'all' group\n mock_source = Mock()\n self.director.add(mock_source)\n\n # Playing the 'all' group should play the added source\n self.director.play()\n mock_source.play.assert_called_once()", "title": "" }, { "docid": "9af7138f75c1194ebab4f63ebd8c7e95", "score": "0.46691155", "text": "def testC(self):\n job = Job(\"TestJob\")\n seeder = AutomaticSeeding()\n\n job.addBaggageParameter(\"process.RandomNumberGeneratorService.seed1.initialSeed\", 123445)\n job.addBaggageParameter(\"process.RandomNumberGeneratorService.seed2.initialSeed\", 123445)\n job.addBaggageParameter(\"process.RandomNumberGeneratorService.seed3.initialSeed\", 7464738)\n job.addBaggageParameter(\"process.RandomNumberGeneratorService.seed44.initialSeed\", 98273762)\n\n\n seeder(job)\n\n tweak = PSetTweak()\n for x in job.baggage.process.RandomNumberGeneratorService:\n parameter = \"process.RandomNumberGeneratorService.%s.initialSeed\" % x._internal_name\n tweak.addParameter(parameter, x.initialSeed)\n print(tweak)", "title": "" }, { "docid": "60eca68617829f8385200c313fc1d775", "score": "0.46561834", "text": "def cook(M, shared, cook_id):\n\n while True:\n shared.empty_pot.wait()\n prepare_part_of_meal(cook_id)\n shared.cooks_barier.cooks_wait(cook_id, shared)", "title": "" }, { "docid": "4adc433dc4a4eb052abe47bc02d89bfd", "score": "0.46548897", "text": "def _reconstruct_group(self, grpidx):\n pass", "title": "" }, { "docid": "73dcba4a8456102d8695a3fc35423eb6", "score": "0.46473083", "text": "def test_create_subgroup_global(self):\n # This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.\n pass", "title": "" }, { "docid": "6fae0e9a2cc48372e379b7af3dc223dd", "score": "0.46439537", "text": "def test_get_groups_correct_request(self):\n self.spotinst_client._make_request = MagicMock(return_value={\"response\": {\"items\": []}})\n\n groups = self.spotinst_client.get_groups()\n\n self.assertEqual([], groups)\n\n self.spotinst_client._make_request.assert_called_once_with(method='get', path='aws/ec2/group')", "title": "" }, { "docid": "3ebe87adf6c3a33e6c0f25125aca0f4a", "score": "0.46404207", "text": "def join_group(self, group):\n\t\tleave_group()\n\t\tgroup.add(self)\n\t\tself.group = group", "title": "" }, { "docid": "c6cd6f704d9d55c0f5750bd05df1ce99", "score": "0.4640268", "text": "def test_export_to_enclosing_group(self):\n pass", "title": "" }, { "docid": "504b93bd444d6f53d7261273151b4d8f", "score": "0.46392605", "text": "def group(self, group):\n\n self._group = group", "title": "" }, { "docid": "504b93bd444d6f53d7261273151b4d8f", "score": "0.46392605", "text": "def group(self, group):\n\n self._group = group", "title": "" }, { "docid": "504b93bd444d6f53d7261273151b4d8f", "score": "0.46392605", "text": "def group(self, group):\n\n self._group = group", "title": "" }, { "docid": "73a8c3b5e929cb26b71e33347cb2c72e", "score": "0.46392146", "text": "def test_set_up(self):\n xknx = XKNX(loop=self.loop)\n cover = Cover(\n xknx,\n 'TestCover',\n group_address_long='1/2/1',\n group_address_short='1/2/2',\n group_address_position='1/2/3',\n group_address_position_state='1/2/4')\n self.loop.run_until_complete(asyncio.Task(cover.set_up()))\n self.assertEqual(xknx.telegrams.qsize(), 1)\n telegram = xknx.telegrams.get_nowait()\n self.assertEqual(telegram,\n Telegram(GroupAddress('1/2/1'), payload=DPTBinary(0)))", "title": "" }, { "docid": "ded2c608a21a4a3e8b9876fe819a31b3", "score": "0.4635181", "text": "def _set_group_flag(self):\n self.flag = np.zeros(len(self), dtype=np.uint8)", "title": "" }, { "docid": "7264a8a7bdf29decf58ecf4c2335d76f", "score": "0.46349347", "text": "def test_group_remove_self(client, logged_in_dummy_user, dummy_group):\n ipa_admin.group_add_member(\"dummy-group\", o_user=\"dummy\")\n result = client.get('/group/dummy-group/')\n assert result.status_code == 200\n page = BeautifulSoup(result.data, 'html.parser')\n leave_btn = page.select_one(\"#leave-group-btn\")\n assert leave_btn.get_text(strip=True) == \"Leave group\"\n\n result = client.post(\n '/group/dummy-group/members/remove', data={\"username\": \"dummy\"}\n )\n\n expected_message = \"\"\"You got it! dummy has been removed from dummy-group.\n <span class='ml-auto' id=\"flashed-undo-button\">\n <form action=\"/group/dummy-group/members/\" method=\"post\">\n <input id=\"username\" name=\"username\" required type=\"hidden\" value=\"dummy\">\n <button type=\"submit\" class=\"btn btn-outline-success btn-sm\"\n name=\"new_member_username\" value=\"dummy\">\n Undo\n </button>\n </form>\n </span>\"\"\"\n\n assert_redirects_with_flash(\n result,\n expected_url=\"/group/dummy-group/\",\n expected_message=expected_message,\n expected_category=\"success\",\n )", "title": "" }, { "docid": "ef894bf1f38dc2ba1daead5764ace43c", "score": "0.4632541", "text": "def start(self, group: Group):\n\n def run_user(user):\n \"\"\"\n Main function for User greenlet. It's important that this function takes the user\n instance as an argument, since we use greenlet_instance.args[0] to retrieve a reference to the\n User instance.\n \"\"\"\n user.run()\n\n self._greenlet = group.spawn(run_user, self)\n self._group = group\n return self._greenlet", "title": "" }, { "docid": "7da6b866829092f648f44be812266083", "score": "0.4632177", "text": "def TestRapiNodeGroups():\n (group1, group2, group3) = qa_utils.GetNonexistentGroups(3)\n\n # Create a group with no attributes\n body = {\n \"name\": group1,\n }\n\n (job_id, ) = _DoTests([\n (\"/2/groups\", _VerifyReturnsJob, \"POST\", body),\n ])\n\n _WaitForRapiJob(job_id)\n\n # Create a group specifying alloc_policy\n body = {\n \"name\": group2,\n \"alloc_policy\": constants.ALLOC_POLICY_UNALLOCABLE,\n }\n\n (job_id, ) = _DoTests([\n (\"/2/groups\", _VerifyReturnsJob, \"POST\", body),\n ])\n\n _WaitForRapiJob(job_id)\n\n # Modify alloc_policy\n body = {\n \"alloc_policy\": constants.ALLOC_POLICY_UNALLOCABLE,\n }\n\n (job_id, ) = _DoTests([\n (\"/2/groups/%s/modify\" % group1, _VerifyReturnsJob, \"PUT\", body),\n ])\n\n _WaitForRapiJob(job_id)\n\n # Rename a group\n body = {\n \"new_name\": group3,\n }\n\n (job_id, ) = _DoTests([\n (\"/2/groups/%s/rename\" % group2, _VerifyReturnsJob, \"PUT\", body),\n ])\n\n _WaitForRapiJob(job_id)\n\n # Test for get/set symmetry\n\n # Identifying the node - RAPI provides these itself\n IDENTIFIERS = [\"group_name\"]\n # As the name states, not exposed yet\n NOT_EXPOSED_YET = [\"hv_state\", \"disk_state\"]\n\n # The parameters we do not want to get and set (as that sets the\n # group-specific params to the filled ones)\n FILLED_PARAMS = [\"ndparams\", \"ipolicy\", \"diskparams\"]\n\n # The aliases that we can use to perform this test with the group-specific\n # params\n CUSTOM_PARAMS = [\"custom_ndparams\", \"custom_ipolicy\", \"custom_diskparams\"]\n\n _DoGetPutTests(\"/2/groups/%s\" % group3, \"/2/groups/%s/modify\" % group3,\n opcodes.OpGroupSetParams.OP_PARAMS,\n rapi_only_aliases=CUSTOM_PARAMS,\n exceptions=(IDENTIFIERS + NOT_EXPOSED_YET),\n set_exceptions=FILLED_PARAMS)\n\n # Delete groups\n for group in [group1, group3]:\n (job_id, ) = _DoTests([\n (\"/2/groups/%s\" % group, _VerifyReturnsJob, \"DELETE\", None),\n ])\n\n _WaitForRapiJob(job_id)", "title": "" }, { "docid": "d0553e34b58dcbf14af1cd04d204ac52", "score": "0.4629675", "text": "def set_group(self, newgroup, **kwargs):\n for p in self.filter(**kwargs):\n p.group = newgroup", "title": "" }, { "docid": "63388532856fbd850a92607bd9ed7285", "score": "0.46282", "text": "def setUp(self):\n self.banned_user = self.create_user(is_banned=True, is_superuser=True)\n self.super_user = self.create_superuser()\n self.normal_user = self.create_user()\n self.group = mommy.make('groups.Group')\n self.banned_user.add_to_group(self.group.pk)\n self.super_user.add_to_group(self.group.pk)\n self.normal_user.add_to_group(self.group.pk)\n\n self.client.login(username=self.super_user.email, password='moo')\n\n path = os.path.dirname(os.path.abspath(__file__))\n self.largefile = path + '/1000x500.png'\n self.smallfile = path + '/200x200.png'\n\n self.largeimage = Image()\n self.largeimage.image = File(open(self.largefile))\n self.largeimage.user = self.super_user\n self.largeimage.save()\n\n self.smallimage = Image()\n self.smallimage.image = File(open(self.smallfile))\n self.smallimage.user = self.super_user\n self.smallimage.save()", "title": "" }, { "docid": "0436f190e2847a27714887326bb15be1", "score": "0.46279088", "text": "def enable_grub_cgroups():\n cfg = hookenv.config()\n if cfg.get('enable-cgroups'):\n hookenv.log('Calling enable_grub_cgroups.sh and rebooting machine.')\n check_call(['scripts/enable_grub_cgroups.sh'])\n set_state('cgroups.modified')", "title": "" } ]
c9b7fcef9c8934cafb0d5e9b726af7ef
Filter out include or libpath flags pointing to directories
[ { "docid": "d52e2576ef53b9c6f8090a07257f4f7b", "score": "0.55614215", "text": "def nonexistent_path_flags(cls, flags: AnySet[str]) -> OCDFrozenSet[str]:\n match_func = cls.directory_flag_matcher\n check_func = os.path.exists\n return OCDFrozenSet(\n filter(lambda flag: bool(match_func(flag)) and \\\n (not check_func(flag[1:])), flags))", "title": "" } ]
[ { "docid": "7003e546c3b31a256289940fe0d9f000", "score": "0.67606294", "text": "def include_flags(self):\n return \" \".join([\"-I\" + x for x in self.directories])", "title": "" }, { "docid": "529dbc089802271e42e6a220ae01d54f", "score": "0.6293114", "text": "def filter_toolchain_files(dirname, files):\n split = dirname.split(os.path.sep)\n for ign in IGNORE_LIST:\n if ign in split:\n print('Ignoring dir %s' % dirname)\n return files\n return []", "title": "" }, { "docid": "ad5053ad5627eb1ea197638a47b58a1e", "score": "0.61774784", "text": "def NormalizeIncludePaths(self, include_paths):\r\n normalized = []\r\n for path in include_paths:\r\n if path[0] == '/':\r\n path = gyp.common.RelativePath(path, self.android_top_dir)\r\n\r\n # Filter out the Android standard search path.\r\n if path not in android_standard_include_paths:\r\n normalized.append(path)\r\n return normalized", "title": "" }, { "docid": "d56df4ae782a5055f370b4d6144213cb", "score": "0.61155087", "text": "def IncludeFilter(pathnames):\n if isinstance(pathnames, basestring):\n pathnames = [pathnames]\n else:\n pathnames = list(pathnames)\n\n included_dirs = set()\n def filter_fun(rel_file, abs_file):\n # include rel_file if its a child of an already included directory\n rel_dirname = os.path.dirname(rel_file)\n if rel_dirname in included_dirs:\n if os.path.isdir(abs_file):\n included_dirs.add(rel_file)\n return True\n for pathname in pathnames:\n if fnmatch(rel_file, pathname):\n if os.path.isdir(abs_file):\n included_dirs.add(rel_file)\n return True\n return False\n return IncludeExcludeFilter(filter_fun)", "title": "" }, { "docid": "3de00de598f901f163630e72f4e850ca", "score": "0.60713255", "text": "def search_flags(self):\n return \" \".join([\"-L\" + x for x in self.directories])", "title": "" }, { "docid": "df7725638535ff7e6810409ca1ba6f54", "score": "0.6038492", "text": "def find_library(pattern, path_list, version=''):\n result = []\n for path in path_list:\n filepattern = os.path.join(path, pattern)\n result += glob.glob(filepattern)\n # ignore debug library\n result = [i for i in result if not i.endswith('d.lib')]\n if version:\n result = [i for i in result if version in i]\n return result", "title": "" }, { "docid": "0874c80fed1e2b8b29483f897cd1ac67", "score": "0.5986793", "text": "def exclude_paths(args):\n results = []\n if args.exclude:\n for excl_path in args.exclude + ['.git', '.svn']:\n results.append(os.path.abspath(os.path.join(args.root, excl_path)))\n return results", "title": "" }, { "docid": "971900aa690b3eeac8f0dacacf8b16df", "score": "0.59588826", "text": "def _library_paths():\n return [\n \"\",\n \"lib64\",\n \"lib\",\n \"lib/*-linux-gnu\",\n \"lib/x64\",\n \"extras/CUPTI/*\",\n \"local/cuda/lib64\",\n \"local/cuda/extras/CUPTI/lib64\",\n ]", "title": "" }, { "docid": "9e7d188a5f31211804a4eaba971603e2", "score": "0.5956603", "text": "def filter_out_dir(dirpath):\n return dirpath in dirs", "title": "" }, { "docid": "d664c011a25f663255cb2a9920da0853", "score": "0.59355843", "text": "def include_patterns(*patterns):\n\n def _ignore_patterns(path, names):\n # This is our cuisine\n bname = os.path.basename(path)\n if 'divide' in bname or 'log' in bname:\n keep = []\n else:\n keep = set(name for pattern in patterns\n for name in fnmatch.filter(names, pattern))\n ignore = set(name for name in names\n if name not in keep and not\n os.path.isdir(os.path.join(path, name)))\n return ignore\n\n return _ignore_patterns", "title": "" }, { "docid": "2f2cf03dc2e84ea63c49e63aa16b4137", "score": "0.59305507", "text": "def delocate_path(\n tree_path: Text,\n lib_path: Text,\n lib_filt_func: Optional[Union[str, Callable[[Text], bool]]] = None,\n copy_filt_func: Optional[Callable[[Text], bool]] = filter_system_libs,\n executable_path: Optional[Text] = None,\n ignore_missing: bool = False,\n) -> Dict[Text, Dict[Text, Text]]:\n if lib_filt_func == \"dylibs-only\":\n lib_filt_func = _dylibs_only\n elif isinstance(lib_filt_func, str):\n raise TypeError('lib_filt_func string can only be \"dylibs-only\"')\n if lib_filt_func is None:\n lib_filt_func = _allow_all\n if copy_filt_func is None:\n copy_filt_func = _allow_all\n if not exists(lib_path):\n os.makedirs(lib_path)\n # Do not inspect dependencies of libraries that will not be copied.\n filt_func = functools.partial(\n _delocate_filter_function,\n lib_filt_func=lib_filt_func,\n copy_filt_func=copy_filt_func,\n )\n\n lib_dict = tree_libs_from_directory(\n tree_path,\n lib_filt_func=filt_func,\n copy_filt_func=filt_func,\n executable_path=executable_path,\n ignore_missing=ignore_missing,\n )\n\n return delocate_tree_libs(lib_dict, lib_path, tree_path)", "title": "" }, { "docid": "24a6c6c11436e5b964db5e3409f3e749", "score": "0.59018075", "text": "def paths_containing_libs(paths, library_names):\n required_lib_fnames = possible_library_filenames(library_names)\n\n rpaths_to_include = []\n paths = path_to_os_path(*paths)\n for path in paths:\n fnames = set(os.listdir(path))\n if fnames & required_lib_fnames:\n rpaths_to_include.append(path)\n\n return rpaths_to_include", "title": "" }, { "docid": "6c6529a9a3fd18a015a48ac88395a060", "score": "0.58747447", "text": "def ExtractIncludesFromCFlags(self, cflags):\r\n clean_cflags = []\r\n include_paths = []\r\n for flag in cflags:\r\n if flag.startswith('-I'):\r\n include_paths.append(flag[2:])\r\n else:\r\n clean_cflags.append(flag)\r\n\r\n return (clean_cflags, include_paths)", "title": "" }, { "docid": "cc41675b02ac90cad900b3e3ccdbbc93", "score": "0.58399403", "text": "def include_patterns(*patterns):\n def _ignore_patterns(path, names):\n keep = set(name for pattern in patterns\n for name in filter(names, pattern))\n ignore = set(name for name in names\n if name not in keep and not os.path.isdir(os.path.join(path, name)))\n return ignore\n return _ignore_patterns", "title": "" }, { "docid": "c622316ad9ddc84d4bd018eafbb96f05", "score": "0.5828427", "text": "def _header_paths():\n return [\n \"\",\n \"include\",\n \"include/cuda\",\n \"include/*-linux-gnu\",\n \"extras/CUPTI/include\",\n \"include/cuda/CUPTI\",\n \"local/cuda/extras/CUPTI/include\",\n ]", "title": "" }, { "docid": "e7b4272d465d44ef22d18638e20492a9", "score": "0.5822568", "text": "def locate_py_files(root_path: pathlib.Path):\n include = {root_path / dir_path for dir_path in CONFIG[\"include\"]}\n exclude = {root_path / dir_path for dir_path in CONFIG[\"exclude\"]}\n exclude_unrooted = CONFIG[\"exclude_unrooted\"]\n for path in map(str, root_path.rglob(\"*.py\")):\n if (\n any(\n path.startswith(str(root_path / dir_path))\n for dir_path in map(pathlib.Path.absolute, include)\n )\n and all(\n not path.startswith(str(root_path / dir_path))\n for dir_path in map(pathlib.Path.absolute, exclude)\n )\n and all(dir_path not in path for dir_path in exclude_unrooted)\n ):\n print(path)", "title": "" }, { "docid": "bceec6f1e72418779cb4b66e950fe396", "score": "0.58090967", "text": "def _filter_directory_contents(paths=list[Path]) -> list[Path]:\n\n def _path_valid(p: Path) -> bool:\n return (\n (p.suffix == \".py\" or p.is_dir())\n and not p.name == \"__pycache__\"\n and not p.name.startswith(\".\")\n )\n\n return [p for p in paths if _path_valid(p)]", "title": "" }, { "docid": "f8cef1a70e2dfb927311ded01a270b24", "score": "0.57744974", "text": "def filter_out_dir(dirpath):\n return dirpath in dirs or os.path.ismount(dirpath)", "title": "" }, { "docid": "38a4a5afd2ad4079a0a527981d18b584", "score": "0.5732457", "text": "def search_paths_for_libraries(*path_hints):\n library_paths = []\n for path in path_hints:\n if not os.path.isdir(path):\n continue\n\n path = os.path.abspath(path)\n library_paths.append(path)\n\n lib_dir = os.path.join(path, \"lib\")\n if os.path.isdir(lib_dir):\n library_paths.append(lib_dir)\n\n lib64_dir = os.path.join(path, \"lib64\")\n if os.path.isdir(lib64_dir):\n library_paths.append(lib64_dir)\n\n return library_paths", "title": "" }, { "docid": "08264111800eeecb07b454552443e5c2", "score": "0.5728944", "text": "def PruneExtraFiles():\n pattern = re.compile(reduce(lambda x,y: '%s|%s' % (x,y),\n map(lambda z: '(%s)' % z, WHITELIST)))\n for (root, dirs, files) in os.walk(IDL_PATH, topdown=False):\n for f in files:\n if not pattern.match(f):\n os.remove(os.path.join(root, f))\n for d in dirs:\n dirpath = os.path.join(root, d)\n if not os.listdir(dirpath):\n shutil.rmtree(dirpath)", "title": "" }, { "docid": "2cbe945c5811803e548d45c51b3c6af9", "score": "0.57279307", "text": "def _extract_library_paths_from_glibc_module_files(self):\n\n library_paths = set()\n for module_file in self._glibc_module_files:\n if module_file.soname:\n library_paths.add(module_file.path.parent.__str__())\n\n return library_paths", "title": "" }, { "docid": "c815682808506a10f228d29c5203a238", "score": "0.571526", "text": "def DefaultIncludePaths( value = True):\n if not value:\n print \"DEFAULT_INCLUDE_PATHS :=\";\n print", "title": "" }, { "docid": "58b3d04f18fe6899c936fba107f95062", "score": "0.57003844", "text": "def get_library_dirs(self):\n opt = FCompiler.get_library_dirs(self)\n flang_dir = dirname(self.executables['compiler_f77'][0])\n opt.append(normpath(join(flang_dir, '..', 'lib')))\n\n return opt", "title": "" }, { "docid": "21f15944684d425d3461ff7fb5435cb2", "score": "0.5674821", "text": "def get_libraries():\n library_path = _get_library_path()\n dirlist = os.listdir(library_path)\n libraryfilter = lambda d: \\\n (not d[:1] == '.') and \\\n os.path.isdir(os.path.join(library_path, d))\n return filter(libraryfilter, dirlist)", "title": "" }, { "docid": "ac1db4c6baf92778ee193518a1ed6dea", "score": "0.5638876", "text": "def _get_include_dirs(self, mod_info):\n inc_dirs = []\n path_or_internal = {True: 'INTERNAL',\n False: 'PATH'}['is_component' in mod_info.keys()]\n try:\n cmakecache_fid = open(os.path.join(mod_info['build_dir'], 'CMakeCache.txt'))\n for line in cmakecache_fid:\n if line.find('GNURADIO_CORE_INCLUDE_DIRS:%s' % path_or_internal) != -1:\n inc_dirs += line.replace('GNURADIO_CORE_INCLUDE_DIRS:%s=' % path_or_internal, '').strip().split(';')\n if line.find('GRUEL_INCLUDE_DIRS:%s' % path_or_internal) != -1:\n inc_dirs += line.replace('GRUEL_INCLUDE_DIRS:%s=' % path_or_internal, '').strip().split(';')\n except IOError:\n pass\n if len(inc_dirs) == 0 and self.options.suggested_dirs is not None:\n inc_dirs = [os.path.normpath(path) for path in self.options.suggested_dirs.split(':') if os.path.isdir(path)]\n return inc_dirs", "title": "" }, { "docid": "658a70a90e9ab0e11864e1c919c23da5", "score": "0.56177175", "text": "def include_directories(self):\n return [os.path.join(self._directory, 'include')]", "title": "" }, { "docid": "0e2f33a70d3c6844bcc4a1a84347cb22", "score": "0.5614063", "text": "def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):\r\n config = self._TargetConfig(config)\r\n libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),\r\n config, default=[])\r\n libpaths = [os.path.normpath(\r\n gyp_to_build_path(self.ConvertVSMacros(p, config=config)))\r\n for p in libpaths]\r\n return ['/LIBPATH:\"' + p + '\"' for p in libpaths]", "title": "" }, { "docid": "f4e0f66a3e098e673d001af066887670", "score": "0.55964196", "text": "def exclude():\n exclude = ['*/alignment*', \n '*/trash*',\n '*/_Archived*',\n '*/backup*', \n '*/Commissioning*', \n '*/Test*', \n '*/.AppleDouble*',\n '*LaserX*',\n '*LaserZ*',\n '*Copy*',\n '*._*',\n '*.DS_Store*']\n return exclude", "title": "" }, { "docid": "13d1a0eeb797d48518a3c63ad9ab6839", "score": "0.5560822", "text": "def _get_library_paths(self) -> [str]:\n if self._sys_path is None:\n return None\n\n return [path for path in self._sys_path if \"lib\" in path and path.startswith(\"/\")]", "title": "" }, { "docid": "239734a4bf77e2430bd0cd8510ac3ddb", "score": "0.5555426", "text": "def scrubLibPaths( self ):\r\n \r\n self.LogInfo(\"Scrubbing the LD and DYLD LIBRARY paths\")\r\n \r\n self.scrubLibPath(\"LD_LIBRARY_PATH\")\r\n self.scrubLibPath(\"DYLD_LIBRARY_PATH\")\r\n self.scrubLibPath(\"DYLD_FALLBACK_LIBRARY_PATH\")\r\n self.scrubLibPath(\"DYLD_FRAMEWORK_PATH\")\r\n self.scrubLibPath(\"DYLD_FALLBACK_FRAMEWORK_PATH\")", "title": "" }, { "docid": "50f70afed241d68ed3d0ab2b7c48d090", "score": "0.5548547", "text": "def disable_stdlib_finder():\n\n def matches(finder):\n return getattr(\n finder, '__module__', None\n ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')\n\n for finder in filter(matches, sys.meta_path): # pragma: nocover\n del finder.find_distributions", "title": "" }, { "docid": "1a2da92aeeb03080dc15b12cae833941", "score": "0.55236316", "text": "def collect_python3_paths():\r\n paths = []\r\n import site\r\n packages = site.getsitepackages()\r\n\r\n for path in packages:\r\n paths.append(path)\r\n for include in glob.glob(path + '/*.egg'):\r\n paths.append(include)\r\n for include in glob.glob(path + '/*.egg-link'):\r\n with open(include, 'rb') as data:\r\n for include_path in str(data.read()).split('\\n'):\r\n include_path = os.path.join(include, include_path)\r\n paths.append(include_path)\r\n\r\n return paths", "title": "" }, { "docid": "c8f776e69905ab59fa4e46cce959ba07", "score": "0.55208635", "text": "def _filter_whatispaths(files):\n dirs = {}\n for v in files:\n splitted = os.path.split(v)\n if splitted[0]:\n dirs[splitted[0]] = 1\n return dirs.keys()", "title": "" }, { "docid": "f686401c2ad10b7db345fa20d1f3acf2", "score": "0.5501588", "text": "def _default_directories(self):\n dir_list = super(HeaderList, self).directories\n values = []\n for d in dir_list:\n # If the path contains a subdirectory named 'include' then stop\n # there and don't add anything else to the path.\n m = self.include_regex.match(d)\n value = os.path.join(*m.group(1, 2)) if m else d\n values.append(value)\n return values", "title": "" }, { "docid": "c0959a87d1de6b18f83e42198799c82d", "score": "0.5491064", "text": "def search_order():\n paths = [sys.prefix]\n paths.append(os.path.abspath(os.path.expanduser(\"~\")))\n if under_venv():\n paths.append(real_prefix())\n\n return [os.path.normpath(p) for p in paths]", "title": "" }, { "docid": "81baa06a9a272271ec7068938ce6b929", "score": "0.5483322", "text": "def test_noexclude_stdlib(self):\n if version_string in ('2.3', '2.4'): # CTB\n return\n \n figleaf.start(ignore_python_lib=False)\n \n os.path.dirname('/some/weird/path') # use some stdlib code\n \n figleaf.stop()\n\n coverage = figleaf.get_data().gather_files()\n\n print 'sysdir is:', sysdir\n\n found = False\n for k in coverage:\n print 'checking:', k\n if k.startswith(sysdir):\n found = True\n break\n\n assert found", "title": "" }, { "docid": "98926a701cd436eb5e0d1e392165795d", "score": "0.54817516", "text": "def _get_include_dirs(self, mod_info):\r\n inc_dirs = []\r\n try:\r\n cmakecache_fid = open(os.path.join(mod_info['build_dir'], 'CMakeCache.txt'))\r\n for line in cmakecache_fid:\r\n if line.find('GNURADIO_CORE_INCLUDE_DIRS:PATH') != -1:\r\n inc_dirs += line.replace('GNURADIO_CORE_INCLUDE_DIRS:PATH=', '').strip().split(';')\r\n if line.find('GRUEL_INCLUDE_DIRS:PATH') != -1:\r\n inc_dirs += line.replace('GRUEL_INCLUDE_DIRS:PATH=', '').strip().split(';')\r\n except IOError:\r\n pass\r\n return inc_dirs", "title": "" }, { "docid": "4c5133fcf08cc547e4cb57d85a4eaef1", "score": "0.547823", "text": "def ignored(path):\r\n path_ = path[2:]\r\n # Exception: files that are versionned by git but that we want to\r\n # ignore, and special sharebox directory.\r\n if (path_ == '.git-attributes' or\r\n path_.startswith('.git/') or\r\n path_.startswith('.git-annex/') or\r\n path_ == '.command'):\r\n return True\r\n else:\r\n ls_options = \"-c -o -d -m --full-name --exclude-standard\"\r\n considered = subprocess.Popen(\r\n shlex.split('git ls-files %s -- \"%s\"' % (ls_options, path_)),\r\n stdout=subprocess.PIPE).communicate()[0].strip().split('\\n')\r\n return path_ not in considered", "title": "" }, { "docid": "678640a43d89762ec294e23190159dd4", "score": "0.5423602", "text": "def lib_dirs(self) -> List[Path]:\n lib_dirs = [self.path / {\n Host.Darwin: 'lib/gcc/i686-apple-darwin11/4.2.1',\n Host.Linux: 'lib/gcc/x86_64-linux/4.8.3',\n Host.Windows64: 'lib/gcc/x86_64-w64-mingw32/4.8.3',\n }[self.target]]\n if self.target != Host.Darwin:\n lib_dirs.append(self.path / self.triple / 'lib64')\n return lib_dirs", "title": "" }, { "docid": "cc9136a1bac39f76dd854510a593f2e7", "score": "0.54070866", "text": "def _listdir_fullpaths_filtered(root, relpath=True):\n all = []\n def _rejectFile(f):\n if f[-3:] in ('pyc','bak'):\n return True\n if f.endswith('~'):\n return True\n if f.startswith('#') or f.startswith('.#'):\n return True\n return False\n \n for base, dirs, files in os.walk(root):\n if base.endswith('CVS'):\n continue\n for file in files:\n if not _rejectFile(file):\n _path = os.path.join(base, file)\n \n if relpath:\n _path = _path.replace(root,'')\n if _path.startswith('/'):\n _path = _path[1:]\n all.append(_path)\n \n return all", "title": "" }, { "docid": "dca3b87e16988fd681e3dff65f66dd44", "score": "0.539719", "text": "def getIncludePaths(self):\n return self.includePaths", "title": "" }, { "docid": "012e2f70fe61e2c895c700e8c37397d9", "score": "0.53953606", "text": "def get_ld_lib_paths(self):\n ld_lib_paths = []\n if self.source_dist:\n ld_lib_paths = [ os.path.join(self.basedir,\"libdrizzleclient/.libs/\")\n #, os.path.join(self.basedir,\"libdrizzle-2.0/libdrizzle.libs\")\n , os.path.join(self.basedir,\"libdrizzle/.libs\")\n , os.path.join(self.basedir,\"libdrizzle-2.0/libdrizzle/.libs\")\n , os.path.join(self.basedir,\"libdrizzle-1.0/libdrizzle/.libs\")\n , os.path.join(self.basedir,\"mysys/.libs/\")\n , os.path.join(self.basedir,\"mystrings/.libs/\")\n , os.path.join(self.basedir,\"drizzled/.libs/\")\n\t\t\t , \"/usr/local/lib\"\n ]\n else:\n ld_lib_paths = [ os.path.join(self.basedir,\"lib\")]\n return ld_lib_paths", "title": "" }, { "docid": "092ce2486f6ab65f1489658384cd4540", "score": "0.53864384", "text": "def GetAllIncludeDirectories(target_list, target_dicts,\r\n shared_intermediate_dirs, config_name, params):\r\n\r\n gyp_includes_set = set()\r\n compiler_includes_list = []\r\n\r\n flavor = gyp.common.GetFlavor(params)\r\n if flavor == 'win':\r\n generator_flags = params.get('generator_flags', {})\r\n for target_name in target_list:\r\n target = target_dicts[target_name]\r\n if config_name in target['configurations']:\r\n config = target['configurations'][config_name]\r\n\r\n # Look for any include dirs that were explicitly added via cflags. This\r\n # may be done in gyp files to force certain includes to come at the end.\r\n # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and\r\n # remove this.\r\n if flavor == 'win':\r\n msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)\r\n cflags = msvs_settings.GetCflags(config_name)\r\n else:\r\n cflags = config['cflags']\r\n for cflag in cflags:\r\n include_dir = ''\r\n if cflag.startswith('-I'):\r\n include_dir = cflag[2:]\r\n if include_dir and not include_dir in compiler_includes_list:\r\n compiler_includes_list.append(include_dir)\r\n\r\n # Find standard gyp include dirs.\r\n if config.has_key('include_dirs'):\r\n include_dirs = config['include_dirs']\r\n for shared_intermediate_dir in shared_intermediate_dirs:\r\n for include_dir in include_dirs:\r\n include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',\r\n shared_intermediate_dir)\r\n if not os.path.isabs(include_dir):\r\n base_dir = os.path.dirname(target_name)\r\n\r\n include_dir = base_dir + '/' + include_dir\r\n include_dir = os.path.abspath(include_dir)\r\n\r\n if not include_dir in gyp_includes_set:\r\n gyp_includes_set.add(include_dir)\r\n\r\n\r\n # Generate a list that has all the include dirs.\r\n all_includes_list = list(gyp_includes_set)\r\n all_includes_list.sort()\r\n for compiler_include in compiler_includes_list:\r\n if not compiler_include in gyp_includes_set:\r\n all_includes_list.append(compiler_include)\r\n\r\n # All done.\r\n return all_includes_list", "title": "" }, { "docid": "0a97da3f44cfc455290e394fea74f5c6", "score": "0.5384708", "text": "def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) -> Set[str]:\n pattern = re.compile(search_pattern)\n found = set()\n\n for fil in path.glob(glob_pattern):\n if not fil.is_file():\n continue\n\n for match in pattern.finditer(fil.read_text()):\n integration = match.groups()[1]\n\n if (\n # If it's importing something from itself\n integration == path.name\n # Platform file\n or (path / f\"{integration}.py\").exists()\n # Dir for platform\n or (path / integration).exists()\n ):\n continue\n\n found.add(match.groups()[1])\n\n return found", "title": "" }, { "docid": "7213a4c0893e347fa00cfb27ddb5c442", "score": "0.5378105", "text": "def getIncludePaths(compiler, lang):\n import io\n\n from ToolBOSCore.Util.FastScript import execProgram\n from subprocess import CalledProcessError\n\n def matchSearchPathStart(l):\n return l.strip() != '#include <...> search starts here:'\n\n def matchSearchPathEnd(l):\n return l.strip() != 'End of search list.'\n\n import itertools\n lines = _preprocessString( compiler, '', lang ).split('\\n')\n\n inp = io.StringIO( '' )\n out = io.StringIO( )\n err = io.StringIO( )\n\n try:\n execProgram( '{} -x{} -E -Wp,-v -'.format( compiler, lang ),\n stdin=inp,\n stdout=out,\n stderr=err )\n\n lines = err.getvalue().split( '\\n' )\n\n it = itertools.dropwhile( matchSearchPathStart , lines )\n it = itertools.islice( it, 1, None )\n it = itertools.takewhile( matchSearchPathEnd , it )\n\n return [ p.strip() for p in it ]\n\n except CalledProcessError as e:\n logging.error( 'Unable to run the preprocessor: %s.', e )\n\n return None", "title": "" }, { "docid": "bf785b2d157d31baf818a750ebd795a6", "score": "0.53767055", "text": "def ldflags(self):\n return self._libs", "title": "" }, { "docid": "7830ad4d7a3e0b4d71c9d6019465dcba", "score": "0.53272426", "text": "def __exclude_file(self):\n excludes = r'|'.join([fnmatch.translate(x) for x in self._exclude_ext]) or r'$.'\n return excludes", "title": "" }, { "docid": "9274caa85f8c438d71fa3f58ebde1fcb", "score": "0.53244853", "text": "def ncconfig_includes(self):\n\n incs = self.run_ncconfig('--cflags')[0].split()\n for inc in incs:\n if inc[:2] == '-I':\n self.include_dirs.append(inc[2:])\n elif inc[:2] == '-D':\n macro = inc[2:]\n value = None\n if '=' in macro:\n macro, value = macro.split('=')\n self.define_macros.append((macro, value))\n else:\n warings.warn(\"Unrecognised nc-config cflag %s\" % inc)\n \n # Now detect netcdf_incdir and hdf5_incdir\n for path in self.include_dirs:\n if os.path.exists(os.path.join(path, 'netcdf.h')):\n self.netcdf_incdir = path\n if os.path.exists(os.path.join(path, 'hdf5.h')):\n self.hdf5_incdir = path", "title": "" }, { "docid": "98f5960831c75c4e70469c54140d9f8e", "score": "0.5316095", "text": "def lib_dirs(self):\n ret = []\n c_compiler = self.c_compiler()\n for x in [y.type for y in self.variables] + [\n y.op for y in self.node_order]:\n try:\n try:\n ret += x.c_lib_dirs(c_compiler)\n except TypeError:\n ret += x.c_lib_dirs()\n except utils.MethodNotDefined:\n pass\n return utils.uniq(ret)", "title": "" }, { "docid": "8d65f2698f2ae2aa65b7c2e99017ad7f", "score": "0.5311644", "text": "def get_paths_to_include(config):\n dirpath = config.project.dirpath\n allpaths = set()\n\n # all mandatory files, which must exist (currently only bundles.yaml is mandatory, and\n # it's verified before)\n for fname in MANDATORY_FILES:\n allpaths.add(dirpath / fname)\n\n # the extra files (relative paths)\n for spec in config.parts:\n fpaths = sorted(fpath for fpath in dirpath.glob(spec) if fpath.is_file())\n logger.debug(\"Including per prime config %r: %s.\", spec, fpaths)\n allpaths.update(fpaths)\n\n return sorted(allpaths)", "title": "" }, { "docid": "748e5872329f4964f6677941c64a0e35", "score": "0.530998", "text": "def find_includes(self):\r\n missing_incs = list(self.inc_list)\r\n for incl in missing_incs:\r\n if incl.name in self.header_files:\r\n self.inc_list.remove(incl)", "title": "" }, { "docid": "63e0da25eafb1581cefb603a46006726", "score": "0.5295539", "text": "def AdjustIncludeDirs(self, include_dirs, config):\r\n config = self._TargetConfig(config)\r\n includes = include_dirs + self.msvs_system_include_dirs[config]\r\n includes.extend(self._Setting(\r\n ('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))\r\n return [self.ConvertVSMacros(p, config=config) for p in includes]", "title": "" }, { "docid": "86dc1eeb3432a8db6796df6f79815016", "score": "0.52825993", "text": "def getpaths(self,libname):\r\n if os.path.isabs(libname):\r\n yield libname\r\n\r\n else:\r\n for path in self.getplatformpaths(libname):\r\n yield path\r\n\r\n path = ctypes.util.find_library(libname)\r\n if path: yield path", "title": "" }, { "docid": "76c54c440c5730204b1074d543fa45eb", "score": "0.5281462", "text": "def get_ldflags(self, ldflags: OCDFrozenSet[str]) -> OCDFrozenSet[str]:\n return ldflags - self.nonexistent_path_flags(ldflags) # type: ignore", "title": "" }, { "docid": "ffecfdb1a26d61e4b03d34278f551a4e", "score": "0.5279856", "text": "def GetLibFlags(self, config, gyp_to_build_path):\r\n config = self._TargetConfig(config)\r\n libflags = []\r\n lib = self._GetWrapper(self, self.msvs_settings[config],\r\n 'VCLibrarianTool', append=libflags)\r\n libflags.extend(self._GetAdditionalLibraryDirectories(\r\n 'VCLibrarianTool', config, gyp_to_build_path))\r\n lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})\r\n lib('TargetMachine', map={'1': 'X86', '17': 'X64'}, prefix='/MACHINE:')\r\n lib('AdditionalOptions')\r\n return libflags", "title": "" }, { "docid": "50716b94e4dfad11382a2a7cc8e3ea99", "score": "0.5272813", "text": "def enable_include_from_sourcetree():\n return os.getenv(\"AIT_ENABLE_INCLUDE_FROM_SOURCETREE\", \"0\") == \"1\"", "title": "" }, { "docid": "698272f5909f61af856b3d2ae95ebeab", "score": "0.5272811", "text": "def get_project_excludes(project):\n\texcludes = []\n\ttestDirectory = get_test_directory()\n\tif project == CODEBLOCKS:\n\t\t# excludes because of %pythoncode\n\t\t# advprops.h is __WXPYTHON__ at line 192\n\t\t# propgrid.cpp is the macro IMPLEMENT_GET_VALUE\n\t\texcludes.append(\"--exclude=wx/wxscintilla.h\")\n\t\texcludes.append(\"--exclude=wx/propgrid/advprops.h\")\n\t\texcludes.append(\"--exclude=wx/propgrid/manager.h\")\n\t\texcludes.append(\"--exclude=wx/propgrid/propgrid.h\")\n\t\texcludes.append(\"--exclude=propgrid/propgrid.cpp\")\n\telif project == DRJAVA:\n\t\tNone\n\telif project == JEDIT:\n\t\tNone\n\telif project == KDEVELOP:\n\t\tNone # excludes.append(\"--exclude=app_templates\")\n\telif project == MONODEVELOP:\n\t\tNone \n\telif project == SCITE:\n\t\texcludes.append(\"--exclude=lua\")\n\telif project == SHARPDEVELOP:\n\t\t# excludes.append(\"--exclude=ExpressionEvaluator_Tests.cs\")\n\t\tNone\n\telif project == TESTPROJECT:\n\t\tNone\n\telse:\n\t\tsystem_exit(\"Bad get_project_excludes() project id: \" + project)\n\treturn excludes", "title": "" }, { "docid": "42d709657767598343567758f44faa1d", "score": "0.5263136", "text": "def in_search_directory(self, directory, root):\n if self.ignore_hidden and (directory[0] == \".\"\n or directory == \"__pycache__\"):\n return False\n path = os.path.join(root, directory)\n if os.path.abspath(path) in self.ignore_paths:\n return False\n if any(fnmatch.fnmatch(path[2:], filter)\n for filter in self.ignore_folders):\n return False\n return True", "title": "" }, { "docid": "3871ea477ca41d187bd2c632dae2cbce", "score": "0.52623516", "text": "def source_code_paths(self):\n paths = set()\n\n for line in self.iter_manifest_lines():\n line = line.strip().replace(' \\\\', '')\n directory = os.path.split(line)[0]\n if line.endswith('.c') and directory in self.LIBRARY_CORE_DIRS:\n paths.add(os.path.join(self._directory, line))\n\n return paths", "title": "" }, { "docid": "833a44de3fdd01bc38b0dc069883a562", "score": "0.5245854", "text": "def _GetIncludeDirs(config):\r\n # TODO(bradnelson): include_dirs should really be flexible enough not to\r\n # require this sort of thing.\r\n include_dirs = (\r\n config.get('include_dirs', []) +\r\n config.get('msvs_system_include_dirs', []))\r\n resource_include_dirs = config.get('resource_include_dirs', include_dirs)\r\n include_dirs = _FixPaths(include_dirs)\r\n resource_include_dirs = _FixPaths(resource_include_dirs)\r\n return include_dirs, resource_include_dirs", "title": "" }, { "docid": "a70332ec4f5e362e0d556b1ddde2edb0", "score": "0.52378595", "text": "def extend_include_path(include_path):\n root_path = \"-I{}\".format(include_path)\n ROOT.gInterpreter.AddIncludePath(root_path)\n\n # Retrieve ROOT internal list of include paths and add debug statement\n root_includepath = ROOT.gInterpreter.GetIncludePath()\n logger.debug(\"ROOT include paths:\\n{}\".format(root_includepath))", "title": "" }, { "docid": "6ee11d3d8874b0c3fbcb9ec45fd27a5b", "score": "0.52328384", "text": "def exclude_files():\n return ['cap_press']", "title": "" }, { "docid": "4ced358dd1a55f82c694e8c6bcb481e0", "score": "0.5212651", "text": "def get_java_include_paths(env, javac, version) -> List[str]:\n\n if not javac:\n return []\n\n # on Windows, we have a path to the actual javac, so look locally\n if env['PLATFORM'] == 'win32':\n javac_bin_dir = os.path.dirname(javac)\n java_inc_dir = os.path.normpath(os.path.join(javac_bin_dir, '..', 'include'))\n paths = [java_inc_dir, os.path.join(java_inc_dir, 'win32')]\n\n # for the others, we probably found something which isn't in the JDK dir,\n # so use the predefined patterns to glob for an include directory.\n elif env['PLATFORM'] == 'darwin':\n if not version:\n paths = [java_macos_include_dir_glob]\n else:\n paths = sorted(glob.glob(java_macos_version_include_dir_glob % version))\n else:\n base_paths = []\n if not version:\n for p in java_linux_include_dirs_glob:\n base_paths.extend(glob.glob(p))\n else:\n for p in java_linux_version_include_dirs_glob:\n base_paths.extend(glob.glob(p % version))\n\n paths = []\n for p in base_paths:\n paths.extend([p, os.path.join(p, 'linux')])\n\n return paths", "title": "" }, { "docid": "6d43c9bb8e39ea325ca978bfdc11ee30", "score": "0.5211544", "text": "def should_ignore_path(path):\n if os.path.sep == '\\\\':\n path = path.replace('\\\\', '/')\n for p in zf.config.site.compiled_file_ignore_patterns:\n if p.match(path):\n return True\n return False", "title": "" }, { "docid": "7c84fc181acee97f850df414484aa9bf", "score": "0.5206256", "text": "def IncludesToPaths(path):\n\n includeToPath = dict()\n prog = re.compile(r\"(itk.*\\.h)\")\n for root, dirs, files in os.walk(path):\n for f in files:\n if prog.match(f):\n includeFile = prog.findall(f)[0]\n parts = root.split(\"/\")\n module = parts[len(parts)-3] + parts[len(parts)-2]\n includeToPath[includeFile] = module\n return includeToPath", "title": "" }, { "docid": "5754fb1aa6908e385dd1f457cf113944", "score": "0.5200094", "text": "def skip_rewrite(self):\n return f'or path.endswith(\"lib_pypy{os.sep}__extensions__\") # PyPy2 built-in import marker'", "title": "" }, { "docid": "9ca9c8fba01d7a616a4e6a02c7fb96bf", "score": "0.5176367", "text": "def __populate_flags_source_paths(self):\n def expand_paths_in_flags_if_needed(flags):\n \"\"\"Expand paths in flags if they are present.\"\"\"\n from os import path\n new_flags = []\n for flag in flags:\n if '=' not in flag:\n new_flags.append(flag)\n continue\n split_flag = flag.split('=')\n prefix = split_flag[0].strip()\n value = split_flag[1].strip()\n expanded_values = self.__replace_wildcard_if_needed(\n value)\n if not expanded_values:\n continue\n joined_values = ';'.join(expanded_values)\n if path.isabs(expanded_values[0]):\n new_flags.append(\n prefix + '=\"' + joined_values + '\"')\n else:\n new_flags.append(prefix + '=' + joined_values)\n return new_flags\n\n if not self.flags_sources:\n log.critical(\" Cannot update paths of flag sources.\")\n return\n for idx, source_dict in enumerate(self.flags_sources):\n for option in SettingsStorage.FLAG_SOURCES_ENTRIES_WITH_PATHS:\n if option not in source_dict:\n continue\n if not source_dict[option]:\n continue\n if option == SettingsStorage.FLAGS_TAG:\n source_dict[option] = expand_paths_in_flags_if_needed(\n source_dict[option])\n else:\n source_dict[option] = self.__replace_wildcard_if_needed(\n source_dict[option])", "title": "" }, { "docid": "42f42e28991f2d618b0a6a8de7d39350", "score": "0.5174672", "text": "def _ExtendSysPath():\n paths = []\n for dir in SEARCH_DIRS:\n paths.append(dir)\n if _DEBUG:\n test_settings = os.path.join(dir, 'common_settings.py')\n if os.path.exists(test_settings):\n _Debug('%s exists' % test_settings)\n else:\n _Debug('%s does NOT exist' % test_settings)\n\n _AddToSysPath(paths)", "title": "" }, { "docid": "f2a1e787124642b11b2adee90b518b52", "score": "0.5174522", "text": "def freeze_includes():\n import py\n import _pytest\n\n result = list(_iter_all_modules(py))\n result += list(_iter_all_modules(_pytest))\n return result", "title": "" }, { "docid": "5cf86bb3cab5ab8941d31246be1218f7", "score": "0.51731855", "text": "def AdjustLibraries(self, libraries):\r\n libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]\r\n return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]", "title": "" }, { "docid": "2d834474de86083f2b5a357bf2adf231", "score": "0.5171284", "text": "def remove_unwanted_files(workdir, files):\n\n _files = []\n for _file in files:\n if not (workdir == _file or\n \"prmon\" in _file or\n \"/pilot\" in _file or\n \"/pandawnutil\" in _file or\n \"pilotlog\" in _file or\n \".lib.tgz\" in _file or\n \".py\" in _file or\n \"PoolFileCatalog\" in _file or\n \"setup.sh\" in _file or\n \"pandaJob\" in _file or\n \"runjob\" in _file or\n \"memory_\" in _file or\n \"mem.\" in _file or\n \"docs/\" in _file or\n \"DBRelease-\" in _file):\n _files.append(_file)\n\n return _files", "title": "" }, { "docid": "fcb2666edb07c1e44bdacecca911f9c0", "score": "0.5161872", "text": "def filter_paths(prefixes, files):\n\n result = []\n\n for f in files:\n for p in prefixes:\n if f[:len(p)] == p and os.path.isdir(p):\n found_p = True\n break\n else:\n found_p = not prefixes\n\n root, e = os.path.splitext(f)\n if e.lstrip('.').lower() in wanted_extensions:\n found_e = True\n else:\n found_e = not wanted_extensions\n\n if found_p and found_e and os.path.isfile(f):\n result.append(f)\n\n return result", "title": "" }, { "docid": "e0154c4c7dd717bb89cf0f82a30b2f78", "score": "0.51580226", "text": "def __check_file_paths(self, files):\n\n filtered_files = []\n for f in files:\n exclude_count, include_count = 0, 0\n for path in self.exclude_paths:\n if fnmatch.fnmatch(f, path):\n exclude_count += 1\n if len(self.include_paths) == 0:\n include_count = 1\n else:\n for path in self.include_paths:\n if fnmatch.fnmatch(f, path):\n include_count += 1\n if exclude_count == 0 and include_count >= 1:\n filtered_files.append(f)\n\n return filtered_files", "title": "" }, { "docid": "33bdd0acdc2544b2587e9ad43370f8be", "score": "0.5130593", "text": "def getLibraryPaths(self):\n return self.libraryPaths", "title": "" }, { "docid": "6980320b47abe35b30f888174b4b2ad7", "score": "0.5124029", "text": "def DefaultLibraryPaths( value = True):\n if not value:\n print \"DEFAULT_LIBRARY_PATHS :=\"\n print", "title": "" }, { "docid": "502157223c8bd499c70eefe58cc0f8c5", "score": "0.5123074", "text": "def copy_recurse(\n lib_path: Text,\n copy_filt_func: Optional[Callable[[Text], bool]] = None,\n copied_libs: Optional[Dict[Text, Dict[Text, Text]]] = None,\n) -> Dict[Text, Dict[Text, Text]]:\n warnings.warn(\n \"copy_recurse is obsolete and should no longer be called.\",\n DeprecationWarning,\n stacklevel=2,\n )\n if copied_libs is None:\n copied_libs = {}\n else:\n copied_libs = dict(copied_libs)\n done = False\n while not done:\n in_len = len(copied_libs)\n _copy_required(lib_path, copy_filt_func, copied_libs)\n done = len(copied_libs) == in_len\n return copied_libs", "title": "" }, { "docid": "fe1d1d2911e82b6d4c9847242e4f7a0c", "score": "0.5122119", "text": "def FindIncludeFiles(file_path):\n try:\n output_list = []\n lines = OpenFileAndDecodeLinesAndRemoveComments(file_path)\n for line in lines:\n # Remove c-style comments.\n if '#include' in line:\n line = re.sub('#include ', '', line).replace('\"', '')\n output_list.append(line)\n return output_list\n except IOError:\n print(' error while reading file ', file_path)", "title": "" }, { "docid": "724efb9e758606cd8fccd202f21565b0", "score": "0.51215273", "text": "def sanitize_build_args(build_args):\n\n safe_args = re.compile('--git-verbose|--git-upstream-tree=.*|--git-no-pristine-tar')\n p = re.compile('--git-.*|--hook-.*|--.*-hook=.*')\n\n gbp_args = [ arg for arg in build_args if safe_args.match(arg) ]\n dpkg_args = [ arg for arg in build_args if not p.match(arg) ]\n\n ignored_args = list(set(build_args) - set(gbp_args + dpkg_args))\n if ignored_args:\n logging.info(\"Ignoring build_args: %s\" % ignored_args)\n\n return gbp_args + dpkg_args", "title": "" }, { "docid": "e8616f8e228b0a03838e04380a1007fa", "score": "0.5120569", "text": "def get_path_extensions():\n module_repos = [\n 'utool',\n 'vtool_ibeis',\n 'guitool_ibeis',\n 'guitool_ibeis.__PYQT__',\n 'plottool_ibeis',\n 'pyrf',\n 'flann/src/python',\n #'pygist',\n 'ibeis',\n 'ibeis_cnn',\n 'pydarknet',\n 'hesaff',\n 'detecttools'\n ]\n pathex = ['.'] + [ join('..', repo) for repo in module_repos ]\n if APPLE:\n # We need to explicitly add the MacPorts and system Python site-packages folders on Mac\n pathex.append('/opt/local/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/')\n pathex.append('/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/')\n return pathex", "title": "" }, { "docid": "2667a5fb6531dfd1309cf88dab37c791", "score": "0.51190746", "text": "def build_dir_skipper(dirpath):\n return os.path.normpath(_BUILD_BASE) == os.path.normpath(dirpath)", "title": "" }, { "docid": "c26a599b6aab80d40cf46ff7daac8f30", "score": "0.51143306", "text": "def filter_subdirectories(self, root, dirs, files):\n return dirs, files", "title": "" }, { "docid": "fd6343adce21dfb486b9ba379646798a", "score": "0.5106337", "text": "def gen_paths_recurse(basepath,inc=[],exc = [],filetype = None):\r\n basepath=Path(basepath)\r\n str_to_contain=inc\r\n #str_to_contain.append(\"AG\") #Required for all mouse-level folders in the project.\r\n str_to_exclude=exc\r\n #str_to_exclude.append(\"exclude\")\r\n exc_allf=['~lock','._']\r\n output=[]\r\n for dirpath, dirnames, files in os.walk(basepath):\r\n cont_all=[i in dirpath for i in str_to_contain] #Boolean list of desired target strings\r\n allhit=sum(cont_all)==len(str_to_contain)\r\n ex_all=[i in dirpath for i in str_to_exclude] #Boolean list of undesired target strings\r\n fa=sum(ex_all) > 0\r\n # pdb.set_trace()\r\n if allhit==True and fa==False:\r\n if filetype != None:\r\n if '*' in filetype: #If wildcard present\r\n filetype = filetype.split('*')\r\n elif not isinstance(filetype,list):\r\n filetype=[filetype]\r\n for f in files:\r\n cont_allf=[i in f for i in filetype]\r\n file_hit=sum(cont_allf)==len(filetype)\r\n file_ex=sum([i in f for i in exc_allf]) > 0\r\n if file_hit==True and file_ex==False:\r\n output.append(Path(dirpath,f))\r\n else:\r\n output.append(Path(dirpath))\r\n if len(output)==0:\r\n print('Warning no paths found with those criteria!')\r\n else:\r\n if len(output)==1:\r\n output=output[0]\r\n\r\n return output", "title": "" }, { "docid": "c4291a986850246f67116f3c50e3450f", "score": "0.51041067", "text": "def filter_directories(dirs: t.List[str]) -> t.List[str]:\n\n # currently, we do not filter the directories. but you could filter the directory list like this:\n # return [dir for dir in dirs if not dir.lower().__contains__('atlas')]\n return dirs", "title": "" }, { "docid": "3cb41a2e4c10a45a4ac2afd959545a9e", "score": "0.51030684", "text": "def _get_search_paths():\n path_list = (os.environ.get('PATH') or os.defpath).split(os.pathsep)\n root = [sys.argv[0]]\n if os.path.islink(root[0]):\n # `pynq` is a symlink, so include the actual path\n root.append(os.path.realpath(root[0]))\n for r in root:\n scripts_dir = os.path.dirname(r)\n if os.path.isdir(scripts_dir) and os.access(r, os.X_OK):\n # make sure the `pynq` dir is included\n path_list.insert(0, scripts_dir)\n return path_list", "title": "" }, { "docid": "3c0191a122523e0eb02fa2ac47e38486", "score": "0.5090566", "text": "def visit_path(path: str, visit_cython: bool = False) -> dict:\n visited_modules_by_path = {}\n for (current_path, directory_names, filenames) in walk(path):\n if filenames and os.path.split(current_path)[-1] not in ginit.FOLDERS_TO_IGNORE:\n _set_dict_value_from_keys(visited_modules_by_path,\n current_path.split(os.path.sep),\n visit_files(parent_path=current_path,\n files=filenames,\n is_visiting_cython=visit_cython))\n return visited_modules_by_path", "title": "" }, { "docid": "173491af01b2af90eec1465df21b4c99", "score": "0.50847644", "text": "def get_libs_for_example(example_path):\n found_libs = set()\n found_imports = []\n found_imports = findimports.find_imports(example_path)\n\n for cur_import in found_imports:\n cur_lib = cur_import.name.split(\".\")[0]\n if cur_lib in bundle_data:\n found_libs.add(cur_lib)\n\n return found_libs", "title": "" }, { "docid": "53a0f72f833cceb419afd0fc6a2e2236", "score": "0.50647116", "text": "def add_include_paths(self, paths):\n for path in paths:\n self.add_include_path(path)", "title": "" }, { "docid": "3efc5543c812bd264cd7f36aa7bc9ac9", "score": "0.50597495", "text": "def find_packages(*args, **kwrds):\n from fnmatch import fnmatch\n excludes = kwrds.get('exclude', [])\n inc_base = kwrds.get('inc_base', False)\n pkgs = {}\n for base_path in args:\n for root, _dirs, files in os.walk(base_path):\n if '__init__.py' in files:\n assert root.startswith(base_path)\n pkg = root[len(base_path)+1:].replace(os.sep, '.')\n if inc_base and pkg:\n pkg = os.path.join(base_path, pkg).replace(os.sep, '.')\n elif inc_base:\n pkg = base_path.replace(os.sep, '.')\n pkgs[pkg] = root\n\n result = pkgs.keys()\n for excl in excludes:\n # We exclude packages that *begin* with an exclude pattern.\n result = [pkg for pkg in result if not fnmatch(pkg, excl + \"*\")]\n result.sort()\n return result", "title": "" }, { "docid": "036cd94b69c038c3600ddd580417199a", "score": "0.50529325", "text": "def in_include_directory(self, directory):\n return any(fnmatch.fnmatch(directory, filter)\n for filter in self.include_folders)", "title": "" }, { "docid": "7453ae6311f4f07efde988e1520bc077", "score": "0.50506437", "text": "def get_ld_lib_paths(self):\n ld_lib_paths = []\n if self.source_dist:\n ld_lib_paths = [ os.path.join(self.basedir,\"libmysql/.libs/\")\n , os.path.join(self.basedir,\"libmysql_r/.libs\")\n , os.path.join(self.basedir,\"zlib/.libs\")\n ]\n else:\n ld_lib_paths = [ os.path.join(self.basedir,\"lib\")\n , os.path.join(self.basedir,\"lib/mysql\")]\n return ld_lib_paths", "title": "" }, { "docid": "784b0633097c91a44d157d42b56d9558", "score": "0.5037091", "text": "def _hardcoded_includes(self):\r\n\t\tlog(\"_hardcoded_includes()\")\r\n\t\tchanged = False\r\n\t\t# add if not already included\r\n\t\tsrcList = [ \"skin\\\\\", \"screensavers\\\\\", \"scripts\\\\\", \"plugins\\\\video\", \"plugins\\\\pictures\", \\\r\n\t\t\t\t\t\"plugins\\\\music\", \"plugins\\\\programs\", \"system\\\\profiles.xml\" ]\r\n\t\t# ensure hardcoded in includes\r\n\t\tfor src in srcList:\r\n\t\t\tif src not in self.includes:\r\n\t\t\t\tself.includes.append(src)\r\n\t\t\t\tchanged = True\r\n\r\n\t\treturn changed", "title": "" }, { "docid": "85127a3cc09d8877f4f7a38a6c0baac4", "score": "0.50368124", "text": "def _populateFiles( self ):\n self.includeDir( self.details.topLevelDir )\n\n for path in self.excludeDirs:\n self.excludeDir( path )", "title": "" }, { "docid": "35e9219f4a74df46a887fb37ee4a3302", "score": "0.5023457", "text": "def get_includes(self, includes: OCDFrozenSet[str]) -> OCDFrozenSet[str]:\n return includes - self.nonexistent_path_flags(includes) # type: ignore", "title": "" }, { "docid": "91bc9f2b861d4b4177732f666e5d993b", "score": "0.5016718", "text": "def filter_modules(self, opts):\n\n # all default modules out of modules/ dir\n def_mods = [\n x.replace('/','.').replace('src.','') + '.default' \\\n for x in glob.glob('src/modules/**') if not '__' in x\n ]\n tmp = []\n\n # add/remove chosen module by user\n if 'in_modules' in opts['modules'] and opts['modules']['in_modules']:\n # list with default modules (force) + user chosed modules to import\n self.mods = def_mods\n for key, val in opts['modules']['in_modules'].items():\n for v in val:\n self.mods.append(f'modules.{key}.{v}')\n elif 'ex_modules' in opts['modules'] and opts['modules']['ex_modules']:\n for key, val in opts['modules']['ex_modules'].items():\n for v in val:\n if v == 'default':\n self.log('mod_default', _type='err', end='\\n')\n tmp.append(f'modules.{key}.{v}')\n for i in self.mods:\n if i in tmp:\n self.mods.remove(i)\n\n return", "title": "" }, { "docid": "11cd1628292f593b7d1bb2deaa3f2ee3", "score": "0.50114524", "text": "def ExcludeFilter(pathnames):\n if isinstance(pathnames, basestring):\n pathnames = [pathnames]\n else:\n pathnames = list(pathnames)\n\n def filter_fun(rel_file, abs_file):\n for pathname in pathnames:\n if fnmatch(rel_file, pathname):\n return False\n return True\n return IncludeExcludeFilter(filter_fun)", "title": "" }, { "docid": "a7320fc97d16ad0d93f150d91019b881", "score": "0.50009495", "text": "def _restrict_filepaths(cfg, iter_filepaths):\n restriction = cfg['scope']['restriction']\n if not restriction:\n for filepath in iter_filepaths:\n yield filepath\n else:\n logging.info('Build restriction: %s', restriction)\n dirpath_isolated_src = cfg['paths']['dirpath_isolated_src']\n for filepath in iter_filepaths:\n relpath = os.path.relpath(filepath, dirpath_isolated_src)\n if da.bldcfg.bldcfg.is_in_restricted_build(relpath, restriction):\n yield filepath", "title": "" }, { "docid": "7617482c0fb7b9993910d7d79440370d", "score": "0.50003964", "text": "def get_includes(self):\n if os.path.exists(self.includes_path):\n return utils.GetFiles(self.includes_path, '.h')\n\n return []", "title": "" }, { "docid": "9008d06ebd64a4d957fbd251c6f1f42c", "score": "0.49988618", "text": "def set_paths(pythonpath=\"\"):\n if not pythonpath:\n return\n paths = pythonpath.split(\":\")\n for p in paths:\n abspath = path.abspath(p)\n if abspath not in sys.path:\n sys.path.append(abspath)", "title": "" }, { "docid": "2ace4220eda1f314b49c094c04ca1d4e", "score": "0.49948108", "text": "def link_dest(self):\n if hasattr(self.pkg, \"libs\") and self.pkg.libs:\n pkg_libs = set(self.pkg.libs.directories)\n else:\n pkg_libs = set((self.pkg.prefix.lib, self.pkg.prefix.lib64))\n\n return pkg_libs | set([self.pkg.prefix.bin]) | self.internal_links", "title": "" }, { "docid": "cca4032d7300deb52e8fe9639cbedecf", "score": "0.4991602", "text": "def extra_paths(paths, start=True):\n\n # allow passing a single path or\n # a list of them\n if isinstance(paths, str):\n paths = paths.split()\n\n # On enter, add paths to both sys.path,\n # and the PYTHONPATH env var, so that subprocesses\n # can see it,\n # either the start or the end depending\n # on the start argument\n for path in paths:\n if start:\n sys.path.insert(0, path)\n else:\n sys.path.append(path)\n\n add_python_path(path, start)\n\n # Return control to caller\n try:\n yield\n # On exit, remove the paths\n finally:\n for path in paths:\n try:\n if start:\n sys.path.remove(path)\n else:\n remove_last(sys.path, path)\n # also remove env var entry\n remove_python_path(path, start)\n # If e.g. user has already done this\n # manually for some reason then just\n # skip\n except ValueError:\n pass", "title": "" } ]
f28a0bf064166e081a6aed00ad1adb70
gets the error description.
[ { "docid": "c92f86333d8f466655e6fadcd3fd7bfb", "score": "0.0", "text": "def description(self):\n\n return self._description", "title": "" } ]
[ { "docid": "767454acd36f8fd0da54896b02fb8955", "score": "0.8973676", "text": "def GetErrorDescription(self):", "title": "" }, { "docid": "de2e22bc4e4f9719fe97e9a1c1742446", "score": "0.8318775", "text": "def getErrorDesc(self):\n return self._error_desc", "title": "" }, { "docid": "483e9e58b3f43942f7c190df2477c3aa", "score": "0.81379443", "text": "def error_description(self):\n\n return self._error_description", "title": "" }, { "docid": "9f3d191b3db88ccb03519b27c47925d3", "score": "0.81085783", "text": "def errdesc(self):\n return self._errdesc", "title": "" }, { "docid": "ef6c3fe741d19083abdc2940071ccc12", "score": "0.7525137", "text": "def get_error_desc(self, code):\n for desc, byte_code in message_utils.error_codes.iteritems():\n if code == byte_code:\n return desc", "title": "" }, { "docid": "e8029d9f7453e3be8c16feb7dc6b10e3", "score": "0.7522826", "text": "def get_message(self):\n return self.error_string", "title": "" }, { "docid": "f917ca4af632632cc3f1f5ec185d6dd7", "score": "0.7468542", "text": "def ErrorText(self) -> str:", "title": "" }, { "docid": "3b201317dfb84bd3d5fc179be9e3976d", "score": "0.7463791", "text": "def get_error_msg(self):\n return self.error_msg", "title": "" }, { "docid": "fffede281b6892b585a85d8a4c827e54", "score": "0.7441207", "text": "def error_path_desc(self):\n return self._error_path_desc", "title": "" }, { "docid": "432559a6050b5f488b8ded9074fe0e6c", "score": "0.7412219", "text": "def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "04070d22f3d418dbe28ce8cb3350fbae", "score": "0.7369774", "text": "def describe_error(self, error):\n msg = {'command': 'describe_error',\n 'error': error}\n self._send_dict(msg)\n return_vals = self._recv_dict()\n return return_vals['msg']", "title": "" }, { "docid": "09500cc70b657ae73c13700e438e4ed0", "score": "0.721358", "text": "def describe_error(self, task, error):\n return '''\nThe ROVER %s (PID %d) task on %s has failed with the error:\n\n %s\n (%s)\n''' % (task, getpid(), gethostname(), error, error.__class__.__name__)", "title": "" }, { "docid": "b54dab6035dd76dcd866a2ff545bbf71", "score": "0.7209201", "text": "def errorString(self):\n return self._errorString", "title": "" }, { "docid": "7eb555225f501f5782be5c6b267f4886", "score": "0.7157053", "text": "def get_error(self):", "title": "" }, { "docid": "c8e5f29072e83007eb428eb55d9aeb1b", "score": "0.7151718", "text": "def error_message(self):\n return self._error_message", "title": "" }, { "docid": "ca27f3da396eb1ac26aa94df932a51f9", "score": "0.71441114", "text": "def error_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "f45bf97aa0a6902d5fa7d93fe5bc8993", "score": "0.70984524", "text": "def getErrorMessage(self) -> unicode:\n ...", "title": "" }, { "docid": "605b78cf4a83ab7c0170a14786642261", "score": "0.7072601", "text": "def __str__(self):\n return 'error name: %s \\nerror code: %s \\nerror description: %s' % (self._name, self._error_code, self._error_desc)", "title": "" }, { "docid": "ecdfe4d12ace448ebf22a56b6d191476", "score": "0.706532", "text": "def error_message(self):\n return self.container['error_message']", "title": "" }, { "docid": "72612f317cf96fa4f7888545d8f31751", "score": "0.7058259", "text": "def __repr__(self):\n return 'error name: %s \\nerror code: %s \\nerror description: %s' % (self._name, self._error_code, self._error_desc)", "title": "" }, { "docid": "4ff86de89b6b5ec76d5b150c56eae1b0", "score": "0.7021829", "text": "def ErrorMessage(self) -> str:", "title": "" }, { "docid": "4ff86de89b6b5ec76d5b150c56eae1b0", "score": "0.7021187", "text": "def ErrorMessage(self) -> str:", "title": "" }, { "docid": "d9ed70ace1e46139620b4afd65db473c", "score": "0.7003444", "text": "def error_title(self):\n return self.container['error_title']", "title": "" }, { "docid": "b1671f0fb35e404cfc3ae19108ebb06e", "score": "0.6967113", "text": "def message(self):\n if self.errors:\n return self.errors[0].message", "title": "" }, { "docid": "55ff88cada9505d3fa1781ca1530521f", "score": "0.69348806", "text": "def _format_description(self, msg):\n\n error_data = {'failure_desc': {'msg': msg}}\n return error_data", "title": "" }, { "docid": "7287087bc53df83a1b72dfdfd3de67d1", "score": "0.69183785", "text": "def error_details(self):\n # type: () -> Optional[str]\n return self._error_details", "title": "" }, { "docid": "fa9425c8cec9e95a7430306294f37166", "score": "0.6911611", "text": "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "fa9425c8cec9e95a7430306294f37166", "score": "0.6911611", "text": "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "fa9425c8cec9e95a7430306294f37166", "score": "0.6911611", "text": "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "fa9425c8cec9e95a7430306294f37166", "score": "0.6911611", "text": "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "fa9425c8cec9e95a7430306294f37166", "score": "0.6911611", "text": "def error_message(self) -> Optional[str]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "7b97533708daf921a8d5e8cfe3437bf2", "score": "0.68962425", "text": "def getDescription(self):\n # type: () -> str\n return 'No information available'", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.6877766", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.6877766", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.6877766", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "e954c84316b760108eaffdc1cdb2d7db", "score": "0.68777335", "text": "def Description(self) -> str:", "title": "" }, { "docid": "14e177e15c17aa4c06aa0ca1abcfc4f0", "score": "0.68691534", "text": "def get_description(self) -> str:\n pass", "title": "" }, { "docid": "97ae39a2733bcdad8ad69e0b1358ff7d", "score": "0.68620324", "text": "def error(self) -> str:\n return lookup_reference(\"Error\", self.data, self.dishwasher)", "title": "" }, { "docid": "b7d8e05470c7187221833aa4c897be35", "score": "0.68481445", "text": "def description():", "title": "" }, { "docid": "b7d8e05470c7187221833aa4c897be35", "score": "0.68481445", "text": "def description():", "title": "" }, { "docid": "9a99fe9ae10f984413b869389cff1fc1", "score": "0.684602", "text": "def __str__(self):\n return self.errorMessage", "title": "" }, { "docid": "59cfc2942df83155371023b17749c75d", "score": "0.6819935", "text": "def getDescription(self) -> unicode:\n ...", "title": "" }, { "docid": "59cfc2942df83155371023b17749c75d", "score": "0.6819935", "text": "def getDescription(self) -> unicode:\n ...", "title": "" }, { "docid": "5f820317be384654febeec72712f9195", "score": "0.68098056", "text": "def _build_error_message(self):\n # The error message is built from the return value of the criteria\n # matcher so this method is not needed.\n pass", "title": "" }, { "docid": "d8e3ce003d991ad542217030a19cccf6", "score": "0.67945963", "text": "def _error_message(self):\n msg = \"\"\n if hasattr(self, \"_error_log\"):\n msg = \"\\n\".join(line for line in self._error_log if \"[ERROR]\" in line)\n return msg", "title": "" }, { "docid": "75420c11a072d9ac790084172d5b142f", "score": "0.67860466", "text": "def get_description(self):\n pass", "title": "" }, { "docid": "75420c11a072d9ac790084172d5b142f", "score": "0.67860466", "text": "def get_description(self):\n pass", "title": "" }, { "docid": "6f41de318c8930dff7193fd066b20877", "score": "0.677522", "text": "def description(self) -> str:", "title": "" }, { "docid": "e173a21661e42418fdacdc12e4669380", "score": "0.6755933", "text": "def GetDescription(self):", "title": "" }, { "docid": "e173a21661e42418fdacdc12e4669380", "score": "0.6755933", "text": "def GetDescription(self):", "title": "" }, { "docid": "54b96ad8a611e15c57e455b6420b5dbe", "score": "0.6740561", "text": "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")", "title": "" }, { "docid": "3cc8f64deb9a6cdb55a5de0d60505774", "score": "0.6718605", "text": "def error(self) -> Optional[str]:\n return self.__error", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "f85a8712d1165b4c572a50e65a2d6f7f", "score": "0.66819704", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "d5bc05c93104843bf87dd7ea3b5a4d61", "score": "0.6679478", "text": "def error_inner_message(self) -> str:\n return pulumi.get(self, \"error_inner_message\")", "title": "" }, { "docid": "5d248d5295d892970d92315b5cce5680", "score": "0.66734785", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "5d248d5295d892970d92315b5cce5680", "score": "0.66734785", "text": "def _get_description(self):\n return self.__description", "title": "" }, { "docid": "da71bf3d8e63e21cae9cce29c6bb7600", "score": "0.66545165", "text": "def description(self):\n try:\n return self.props['description']\n except KeyError:\n return self.source[:50].strip() + u'...'", "title": "" }, { "docid": "d3bdde09cbb3309a60de977c92dfcbb2", "score": "0.6646677", "text": "def DPxGetErrorString():\n return GetErrorString()", "title": "" }, { "docid": "6f008a66d50abbc5367d2dec3d64158a", "score": "0.6630736", "text": "def get_error(self) -> Union[str, None]:\n return self._error", "title": "" }, { "docid": "537c29117ad8191f3f1b1ac59d927553", "score": "0.6615639", "text": "def get_description(self):\n raise NotImplementedError()", "title": "" }, { "docid": "97f89ac619ca1e2a710272a9099e0fc6", "score": "0.6610755", "text": "def get_description(self):\r\n raise NotImplementedError()", "title": "" }, { "docid": "9011d2e9bbf23d4e9e29c0d1ca028f23", "score": "0.65818226", "text": "def getErrorMessage(self):\r\n return str(self.value)", "title": "" }, { "docid": "65c68628661e916e15c037b2554b3cb8", "score": "0.6581161", "text": "def error_detail(self):\r\n return self._response['errorDetail']", "title": "" }, { "docid": "40a641e90802e737b45a160778a33108", "score": "0.65658283", "text": "def get_description(self):\n return None", "title": "" }, { "docid": "26b718716662c81305a5567269d1651b", "score": "0.6563578", "text": "def get_description(self):\r\n return self.description", "title": "" }, { "docid": "26b718716662c81305a5567269d1651b", "score": "0.6563578", "text": "def get_description(self):\r\n return self.description", "title": "" }, { "docid": "7ad2c75ac9f07c0809670f0453483e56", "score": "0.6557066", "text": "def _getErrorInfo(self):\n tArr=inspect.trace()[-1]\n fileName=tArr[1]\n lineNo=tArr[2]\n exc_obj=sys.exc_info()[1]\n s='%s:%s > %s'%(fileName, lineNo, exc_obj)\n sys.exc_clear()\n return s", "title": "" }, { "docid": "06dc59442eb092e757659297c729ea98", "score": "0.6556619", "text": "def _error_message(self):\n message = \"Command {cmd} returned code {code}.\\n\"\n message += \"############\\nstdout: {stdout}\\n\"\n message += \"############\\nstderr: {stderr}\"\n\n return message.format(\n cmd=self.command, code=self.code, stdout=self.stdout, stderr=self.stderr\n )", "title": "" }, { "docid": "db6a34a13584cd7a68c8606a05e845c2", "score": "0.6537228", "text": "def get_description(self):\n return(self.DESCRIPTION)", "title": "" }, { "docid": "d2385ca71f53c336e0e3e76675c70df3", "score": "0.65341395", "text": "def _exc_info_to_string(self, err, test):\n return err", "title": "" }, { "docid": "c263243d9c10704feb2d29fef9775e94", "score": "0.6526362", "text": "def description(self):\n pass", "title": "" }, { "docid": "4234e3868969bcb3053cb5aaf4a69cd4", "score": "0.65223426", "text": "def getErrorResult():", "title": "" }, { "docid": "805c4ab0027cc1254520404385b7ed46", "score": "0.6519948", "text": "def description(self):", "title": "" }, { "docid": "805c4ab0027cc1254520404385b7ed46", "score": "0.6519948", "text": "def description(self):", "title": "" }, { "docid": "a408ca57fa7a281e66f36ed7f1eef274", "score": "0.65197057", "text": "def get_error_message(self):\n try:\n return api.Api.get_error_message(self._session)\n except exceptions.UnknownException:\n return \"<error message irrecoverable>\"", "title": "" }, { "docid": "9adaca1f10a3dbb0eb14085f7c0aca6c", "score": "0.65187854", "text": "def get_error_desc(field, name, value):\n error_desc = 'Bad \\'{0}\\':\\'{1}\\' argument format.'.format(name, value)\n if isinstance(field, fields.Integer):\n try:\n int(value)\n msg_allow_zero = 'or equal to ' if field.validate.min < 1 else ''\n error_desc = 'Param \\'{0}\\':\\'{1}\\' must be greater than {2}0' \\\n .format(name, value, msg_allow_zero)\n except ValueError:\n error_desc = 'Bad \\'{0}\\':\\'{1}\\' argument format. Accepts only integer'.format(name, value)\n if isinstance(field, fields.Boolean):\n allowed_values = ['0', '1', 'true', 'false']\n error_desc = 'Bad \\'{0}\\':\\'{1}\\' argument format. Accepts only one of {2}'\\\n .format(name, value, allowed_values)\n if isinstance(field, fields.String) and isinstance(field.validate, validate.OneOf):\n error_desc = 'Bad \\'{0}\\':\\'{1}\\' argument format. Accepts only one of {2}'\\\n .format(name, value, field.validate.choices)\n if isinstance(field, fields.DateTime):\n dateformat = 'YYYYMMDD' if field.dateformat == '%Y%m%d' else field.dateformat\n error_desc = 'Bad \\'{0}\\':\\'{1}\\' argument format. Date must be in \\'{2}\\' format.'\\\n .format(name, value, dateformat)\n return error_desc", "title": "" }, { "docid": "9496346e4b7c7aa8e93c287845d4460c", "score": "0.6503607", "text": "def Description(self):\n pass", "title": "" }, { "docid": "9496346e4b7c7aa8e93c287845d4460c", "score": "0.6503607", "text": "def Description(self):\n pass", "title": "" }, { "docid": "d43725edfdeccb89e4abbd16cde4ed2f", "score": "0.64958173", "text": "def _get_error(self):\n return self.__error", "title": "" }, { "docid": "d43725edfdeccb89e4abbd16cde4ed2f", "score": "0.64958173", "text": "def _get_error(self):\n return self.__error", "title": "" }, { "docid": "d43725edfdeccb89e4abbd16cde4ed2f", "score": "0.64958173", "text": "def _get_error(self):\n return self.__error", "title": "" } ]
76ea43e2f89fb0d37f713ca3bca57611
run all contrasts through model
[ { "docid": "52d4f53a0f3b5e9acc36a42a2fc4edf0", "score": "0.6126068", "text": "def run(self, contrast, do_pmap = False):\n # Create contrast Volume\n data = self.weights.T.dot(contrast.vector)\n # print data[self._voxels_predicted]\n data[self.voxels_predicted] = npp.rs(data[self.voxels_predicted])\n # print data[self.voxels_predicted]\n # data[np.isnan(data)] = 0\n # print data[self.voxels_predicted]\n # contrast.data[contrast.data==0] = np.nan\n # data = np.nan_to_num(data)\n data[self.voxels_predicted==False] = np.nan\n contrast_data = ContrastData(np.nan_to_num(data),\n # data,\n self.pycortex_surface,\n self.pycortex_transform,\n vmin=-np.abs(np.nan_to_num(data)).max(),\n vmax= np.abs(np.nan_to_num(data)).max(),\n contrast = contrast,\n func_to_mni = self.func_to_mni,\n ref_to_subject = self)\n\n # Run analyses\n\tif isinstance(self.analyses[0], (list)):\n if do_pmap:\n results = [analysis(contrast_data) for analysis in self.analyses[1]]\n else:\n results = [analysis(contrast_data) for analysis in self.analyses[0]]\n else:\n results = [analysis(contrast_data) for analysis in self.analyses]\n return [self.make_output(results),contrast_data]", "title": "" } ]
[ { "docid": "981441a65f23576f435f647a9700a877", "score": "0.61060727", "text": "def contrastive_step(self, data):\n if self.step_id % self.report_interval == 0:\n self.visualize(data)\n\n self.optimizer.zero_grad()\n data = to_device(data, self.device)\n make_differentiable(data)\n args = self.run_networks(data)\n loss_val = self.loss(*args)\n\n self.log_statistics(loss_val, name=\"total loss\")\n\n loss_val.backward()\n self.optimizer.step()", "title": "" }, { "docid": "99f723147c126bf42b38086263e21505", "score": "0.6083566", "text": "def contrast_call(self, *args):\n self.contrast = self.contrast_entry.get()", "title": "" }, { "docid": "e0347b522218bffccc5da2530cd88757", "score": "0.5959876", "text": "def contrast(self, value):", "title": "" }, { "docid": "c8936b3470d74f4a6e3d676dfdf59ddb", "score": "0.589329", "text": "def contrast(self, x):\n # Two independent augmentation --> two views\n view1, view2 = self.augment(x), self.augment(x)\n \n # Get online embeddings\n onlineProjection1 = self.onlineEncoder(view1)\n onlineProjection2 = self.onlineEncoder(view2)\n \n # Get target embeddings\n with torch.no_grad():\n targetProjection1 = self.targetEncoder(view1)\n targetProjection2 = self.targetEncoder(view2)\n\n # Get predictions\n onlinePred1 = self.onlinePredictor(onlineProjection1)\n onlinePred2 = self.onlinePredictor(onlineProjection2)\n \n # Calculate loss terms\n loss1 = partLoss(onlinePred1, targetProjection2.detach())\n loss2 = partLoss(onlinePred2, targetProjection1.detach())\n loss = loss1 + loss2\n \n return loss.mean()", "title": "" }, { "docid": "112952c16414513133cadcef000c8264", "score": "0.5853694", "text": "def transform(self, results):\n if self.random_disable():\n return results\n\n img = results['img']\n img_contrasted = mmcv.auto_contrast(img)\n results['img'] = img_contrasted.astype(img.dtype)\n\n return results", "title": "" }, { "docid": "bd9ae09a676668f50012a2e988e8f45c", "score": "0.5849228", "text": "def eval():\n for model_name in imagenet_benchmark_zoo_model_names():\n print('Model: ' + model_name)\n\n # load the model and data\n model, dataset_normalized, dataset_original, preprocessing, num_classes, bounds = imagenet_benchmark_zoo(model_name)\n\n # wrap the model with EvalDNN\n measure_model = MXNetModel(model)\n\n # evaluate the top-1 and top-5 accuracy of the model\n accuracy = Accuracy()\n measure_model.predict(dataset_normalized, [accuracy.update, accuracy.report])\n\n # evaluate the neuron coverage of the model\n neuron_coverage = NeuronCoverage()\n measure_model.intermediate_layer_outputs(dataset_normalized, [neuron_coverage.update, neuron_coverage.report])\n\n # evaluate the robustness of the model\n robustness = Robustness(bounds)\n robustness.set_descriptions(dataset_original.filenames)\n robustness.set_save_dir(common.user_home_dir() + '/EvalDNN-adversarial-attack/mxnet/imagenet/' + model_name + '/FGSM')\n measure_model.adversarial_attack(dataset_original,\n bounds,\n num_classes,\n [robustness.update, robustness.save, robustness.report],\n attack=foolbox.attacks.FGSM,\n distance=foolbox.distances.Linf,\n preprocessing=preprocessing)\n\n # evaluate the robustness of the model\n robustness = Robustness(bounds)\n robustness.set_descriptions(dataset_original.filenames)\n robustness.set_save_dir(common.user_home_dir() + '/EvalDNN-adversarial-attack/mxnet/imagenet/' + model_name + '/BIM')\n measure_model.adversarial_attack(dataset_original,\n bounds,\n num_classes,\n [robustness.update, robustness.save, robustness.report],\n attack=foolbox.attacks.BIM,\n distance=foolbox.distances.Linf,\n preprocessing=preprocessing)\n\n # evaluate the robustness of the model\n robustness = Robustness(bounds)\n robustness.set_descriptions(dataset_original.filenames)\n robustness.set_save_dir(common.user_home_dir() + '/EvalDNN-adversarial-attack/mxnet/imagenet/' + model_name + '/DeepFoolAttack')\n measure_model.adversarial_attack(dataset_original,\n bounds,\n num_classes,\n [robustness.update, robustness.save, robustness.report],\n attack=foolbox.attacks.DeepFoolAttack,\n distance=foolbox.distances.MSE,\n preprocessing=preprocessing)", "title": "" }, { "docid": "c9ea80dab601c0c73f908bb54a55e4d9", "score": "0.5790746", "text": "def train_Contrastive(args, model, model_head, device, train_loader, optimizer, epoch, num_classes):\n\n batch_time = AverageMeter()\n train_loss = AverageMeter()\n\n # switch to train mode\n model.train()\n model_head.train()\n end = time.time()\n\n for ex1, ex2, labels, index in train_loader:\n\n ex1, ex2, labels, index = ex1.to(device), ex2.to(device), labels.to(device), index.to(device)\n # note ex1 and ex2 are batches of examples\n bsz = ex1.shape[0]\n\n # get the encoders' output (h_i, h_j), which feed the projection head\n if args['learn']['pretrained']:\n # model downloaded is less flexible\n embedi = model(ex1)\n embedj = model(ex2)\n else:\n # typical case: we extract embeddings before the fc layer\n _, embedi = model(ex1)\n _, embedj = model(ex2)\n\n # get metric embeddings that feed the loss (z_i, z_j)\n z_i = model_head(embedi)\n z_j = model_head(embedj)\n\n # concatenate the two batches: from N to 2N\n zs = torch.cat([z_i, z_j], dim=0)\n\n # Compute logits (pairwise comparison + temperature normalization)\n # embeddings are L2 normalized within the MLP, hence this dot product yields cosine similarity\n pairwise_comp = torch.div(torch.matmul(zs, zs.t()), args[\"learn\"][\"temp\"])\n logits = pairwise_comp\n\n # Unsupervised: one only positive by augmentation\n mask = torch.eye(bsz, dtype=torch.float32).to(device)\n\n # Positives mask: 3 parallel diagonals marking the same patches AND positives by augment\n mask = mask.repeat(2, 2)\n\n # Mask-out self-contrast cases\n # logits mask (all elements are one, except main diagonal with zeros, i.e. self-contrast)\n logits_mask = (torch.ones_like(mask) - torch.eye(2 * bsz).to(device))\n # Final positives mask after zeroing the diagonal, i.e. self-contrast cases\n mask = mask * logits_mask\n # only left are the positives by augmentation (one per example): 2 parallel non-main diagonals\n\n # Compute log_prob for 2N-1 pairs. This is the denominator\n # in SimCLR: all negs (2N-2) and the only pos by augmentation\n exp_logits_den = torch.exp(logits) * logits_mask # remove diagonal (self-contrast)\n exp_logits_den = torch.log(exp_logits_den.sum(1, keepdim=True) + 1e-10)\n # we sum each slice of exp_logits_den: sim of all the negs in the denominator for a given pair of positives (i,j)\n\n # Numerator: compute log_prob for positives (not yet masked)\n exp_logits_pos = torch.log(torch.exp(logits) + 1e-10)\n\n # log_prob is a subtraction after log(.) (for all samples, needs masking)\n log_prob = exp_logits_pos - exp_logits_den\n\n # Compute mean of log-likelihood over positives\n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)\n loss = - mean_log_prob_pos\n loss = loss.view(2, bsz)\n loss = loss.mean()\n\n train_loss.update(loss.item(), 2*bsz)\n\n # compute gradient and do opt step\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n return train_loss.avg, 0.0, 0.0, batch_time.sum", "title": "" }, { "docid": "87f7d5e1b0722451ac5c578b49000e74", "score": "0.5765049", "text": "def exercise_all_models(llobj):\n for model, attr in CML_MODELS.items():\n GC.conf['loglab']['model'] = model\n GC.conf['loglab']['window_size'] = attr['window_size']\n GC.conf['loglab']['weight'] = attr['weight']\n GC.conf['loglab']['feature'] = attr['feature']\n GC.conf['loglab']['cover'] = attr['cover']\n\n if GC.conf['general']['training']:\n llobj.train()\n else:\n llobj.predict()", "title": "" }, { "docid": "564f31b44b94e69844de0151039d280e", "score": "0.5750634", "text": "def run_cv(self):\n # We first setup your model\n self.setup()\n # For each fold\n for i in range(self.nfold):\n # Retrieve data\n train_df = self.train_df_list[i]\n dev_df = self.dev_df_list[i]\n # Separate features and labels in train set\n features,labels = self.separate_data(train_df)\n # We will be training your model here\n # You should update your model in train method\n self.train(features,labels)\n\n # Separate features and labels in dev (test) set\n features,labels = self.separate_data(dev_df)\n # This is where we test your trained model\n # Make sure you return the predictions as pandas.core.series.Series\n guess_labels = self.predict(features)\n\n # Compute test result\n this_result = Stats(labels,guess_labels)\n # Store this result\n self.results.append(this_result)\n\n return self.results", "title": "" }, { "docid": "24122a0324794bc6d85e1fac0740543d", "score": "0.57440543", "text": "def perform_contrast(img):\n enhancer = ImageEnhance.Contrast(img)\n factor = 1.3 # increase contrast\n img = enhancer.enhance(factor)\n return img", "title": "" }, { "docid": "4f978d58ed098f21e4c7d3421afeff75", "score": "0.5702064", "text": "def contrastive_single(phase,inference,dataloaders,model,optimizer,device,weighted_sampling,epoch_count=None,new_task_epochs=None,trial=None,save_path_dir=None): #b/c it is single, models_list contains one model only\n running_loss = 0.0\n outputs_list = []\n labels_list = []\n modality_list = []\n indices_list = []\n task_names_list = []\n pids_list = []\n batch_num = 0\n batch = 0\n for inputs,labels,pids,modality,task_names,indices in tqdm(dataloaders[phase]):\n batch += 1\n \"\"\" Send Data to Device \"\"\"\n inputs = inputs.to(device)\n labels = labels.to(device)\n \n with torch.set_grad_enabled('train1' in phase):# and inference == False): #('train' in phase and inference == False)\n outputs = model(inputs) #(BxHx2) in CPPC, (BxHx12) in CMLC, (BxHx24) in CMSMLC\n\n loss = obtain_contrastive_loss(outputs,pids,trial)\n \n \"\"\" Backpropagation and Update Step \"\"\"\n if phase == 'train1': #only perform backprop for train1 phase \n loss.backward()\n #for param in model.parameters():\n # print(param.grad)\n \n \"\"\" Network Parameters \"\"\"\n if isinstance(optimizer,tuple):\n optimizer[0].step()\n \"\"\" Task-Instance Parameters \"\"\"\n optimizer[1].step() \n optimizer[0].zero_grad()\n optimizer[1].zero_grad()\n else:\n optimizer.step()\n optimizer.zero_grad()\n \n \"\"\" Calculate Metrics \"\"\"\n running_loss += loss.item() * inputs.shape[0]\n if labels.data.dtype != torch.long:\n labels.data = labels.data.type(torch.long)\n\n outputs_list.append(outputs.cpu().detach().numpy())\n labels_list.append(labels.cpu().detach().numpy())\n modality_list.append(modality)\n indices_list.append(indices)\n task_names_list.append(task_names)\n pids_list.append(pids)\n batch_num += 1\n \n outputs_list, labels_list, modality_list, indices_list, task_names_list, pids_list = flatten_arrays(outputs_list,labels_list,modality_list,indices_list,task_names_list,pids_list)\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n return epoch_loss, outputs_list, labels_list, modality_list, indices_list, task_names_list, pids_list", "title": "" }, { "docid": "3f0a472597bce332b01bcec169070f97", "score": "0.569577", "text": "def calibrate_data(self, units=\"contrast\"):\n if units == \"contrast\":\n for i in range(self.output.shape[0]):\n self.output[i] *= self.contrast_scaling[:, None, None]", "title": "" }, { "docid": "ad375a1fcb282ba1c8acb57fbd94e689", "score": "0.56145394", "text": "def refresh_contrasts(self) -> None:\n self.executer.submit(\n self.PlotListWidget.refresh_current_plot,\n True\n )", "title": "" }, { "docid": "d7ade6d152070ceb1808276d0befe809", "score": "0.56100166", "text": "def test_contrast(self):\n enhancement_amount = 80.\n noise_filepath = common_utils.get_asset_path('whitenoise.wav')\n E = torchaudio.sox_effects.SoxEffectsChain()\n E.set_input_file(noise_filepath)\n E.append_effect_to_chain(\"contrast\", [enhancement_amount])\n sox_output_waveform, sr = E.sox_build_flow_effects()\n\n waveform, sample_rate = torchaudio.load(noise_filepath, normalization=True)\n output_waveform = F.contrast(waveform, enhancement_amount)\n\n torch.testing.assert_allclose(output_waveform, sox_output_waveform, atol=1e-4, rtol=1e-5)", "title": "" }, { "docid": "62c3f7da9d108af0489c6556dcd02775", "score": "0.56041485", "text": "def predict(self) -> None:\n self.base_model.step()\n if self.filtering:\n for i in range(self.ensemble_size):\n self.models[i].step()\n if self.run_vanilla:\n for i in range(self.vanilla_ensemble_size):\n self.vanilla_models[i].step()", "title": "" }, { "docid": "ad9f0cc4c848ea1a853997fd7ebe2bf1", "score": "0.5597049", "text": "def after_parameter_optimization(self, model, **kwargs):\n for _module in self._modules:\n if 'ReLU' not in str(type(_module)): \n _module.apply_masks_to_data()", "title": "" }, { "docid": "b6ee0ed81860b9e565bacae934bc7af7", "score": "0.55950695", "text": "def conduct_experiment(self):\n self.generate_all_output_files()\n self.train_predict_all_output_files()\n super().conduct_experiment()", "title": "" }, { "docid": "91eb1b11fb80ad02f45b34fd8d8c7e09", "score": "0.5578687", "text": "def _apply_contrast(self, x: Tensor, *args, **kwargs) ->Tensor:\n x_shape = [*x.shape]\n x_shape[1:] = [1] * (len(x_shape) - 1)\n if isinstance(self.contrast, nn.Parameter):\n magnitude = self.contrast\n elif isinstance(self.contrast, _distribution_tuple):\n magnitude = self.contrast(x_shape, device=x.device, data_type=x.dtype)\n else:\n raise NotImplementedError\n return random_contrast(x, magnitude, *args, *kwargs)", "title": "" }, { "docid": "6a65687ddf139ed7a017f2aa27910e70", "score": "0.55525684", "text": "def auto_contrast(img):\r\n\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n #cv2.imshow(\"asdasd\", img)\r\n # create a CLAHE o, bject (Arguments are optional).\r\n clahe = cv2.createCLAHE(clipLimit=0.5, tileGridSize=(8,8))\r\n cl1 = clahe.apply(img)\r\n return cl1", "title": "" }, { "docid": "a988c81d541e49420b4cedbf685127aa", "score": "0.55353004", "text": "def test_multi(self):\n # Load trained parameters\n G_path = os.path.join(self.model_save_path, '{}_G.pth'.format(self.test_model))\n self.G.load_state_dict(torch.load(G_path))\n self.G.eval()\n\n for i, (real_x, org_c) in enumerate(self.celebA_loader):\n\n # Prepare input images and target domain labels\n real_x = self.to_var(real_x, volatile=True)\n target_c1_list = self.make_celeb_labels(org_c)\n target_c2_list = []\n for j in range(self.c2_dim):\n target_c = self.one_hot(torch.ones(real_x.size(0)) * j, self.c2_dim)\n target_c2_list.append(self.to_var(target_c, volatile=True))\n\n # Zero vectors and mask vectors\n zero1 = self.to_var(torch.zeros(real_x.size(0), self.c2_dim)) # zero vector for rafd expressions\n mask1 = self.to_var(self.one_hot(torch.zeros(real_x.size(0)), 2)) # mask vector: [1, 0]\n zero2 = self.to_var(torch.zeros(real_x.size(0), self.c_dim)) # zero vector for celebA attributes\n mask2 = self.to_var(self.one_hot(torch.ones(real_x.size(0)), 2)) # mask vector: [0, 1]\n\n # Changing hair color, gender, and age\n fake_image_list = [real_x]\n for j in range(self.c_dim):\n target_c = torch.cat([target_c1_list[j], zero1, mask1], dim=1)\n fake_image_list.append(self.G(real_x, target_c))\n\n # Changing emotional expressions\n for j in range(self.c2_dim):\n target_c = torch.cat([zero2, target_c2_list[j], mask2], dim=1)\n fake_image_list.append(self.G(real_x, target_c))\n fake_images = torch.cat(fake_image_list, dim=3)\n\n # Save the translated images\n save_path = os.path.join(self.result_path, '{}_fake.png'.format(i+1))\n save_image(self.denorm(fake_images.data), save_path, nrow=1, padding=0)\n print('Translated test images and saved into \"{}\"..!'.format(save_path))", "title": "" }, { "docid": "25983ce335c367dcfe1b83da665ea4ac", "score": "0.5524756", "text": "def contrasts(self):\n return [list(c) for c in self.__contrasts]", "title": "" }, { "docid": "2bdb5e8b8ab71fc942ab23fea99ca7a6", "score": "0.5505614", "text": "def eval_model():", "title": "" }, { "docid": "333018894d058fdaca46cab8b7dcd78c", "score": "0.550341", "text": "def get_activations(imgs, nn_model):\n for step,img in enumerate(tqdm(imgs)):\n raw_acts=nn_model.predict(img)\n out_pic_new = scale_out(raw_acts)\n fm_val_new = featuremap_values(out_pic_new)\n if step==0:\n prep_act = fm_val_new.copy()\n else:\n for count, layer in enumerate(prep_act):\n prep_act[count] = (fm_val_new[count] * (1 / (step + 1)) +\n fm_val[count] * (step / (step + 1)))\n fm_val = prep_act.copy()\n return fm_val", "title": "" }, { "docid": "5aec00dd669894ffd3d502dbca132c59", "score": "0.5475609", "text": "def contrast_video(self, frames):\n\n contrasted_frames = []\n for frame in frames:\n enhancer = ImageEnhance.Contrast(frame)\n contrasted_frames.append(enhancer.enhance(self.factor))\n\n return contrasted_frames", "title": "" }, { "docid": "bc61197b1b190b0312f10132b539a9fe", "score": "0.5470721", "text": "def run(self):\n self.cv_multithread_model()", "title": "" }, { "docid": "067d0ef285208b8206ec4eba74b937a3", "score": "0.5466984", "text": "def transform(self, results):\n if self.random_disable():\n return results\n\n if self.magnitude is not None:\n magnitude = self.random_negative(self.magnitude)\n else:\n magnitude = self.random_negative(self.random_magnitude())\n\n img = results['img']\n img_contrasted = mmcv.adjust_contrast(img, factor=1 + magnitude)\n results['img'] = img_contrasted.astype(img.dtype)\n\n return results", "title": "" }, { "docid": "36da1c444a71ac6c68b348ccb86e4063", "score": "0.54639375", "text": "def score(self, sample):\n net_input = sample['net_input']\n\n # compute scores for each model in the ensemble\n avg_probs = None\n avg_attn = None\n\n for model in self.models:\n with torch.no_grad():\n model.eval()\n # decoder_out = model.forward(**net_input)\n prev_output_tokens = net_input['prev_output_tokens']\n del net_input['prev_output_tokens']\n encoder_out = model.encoder(**net_input)\n decoder_out = model.decoder(prev_output_tokens, encoder_out)\n # return decoder_out\n if GET_ENCOUT:\n attn = F.softmax(100 * encoder_out['encoder_out'].transpose(0, 1), 1).mean(-1)\n bsz, tk = attn.size()\n tq = prev_output_tokens.size(1)\n attn = attn.unsqueeze_(1).expand(bsz, tq, tk)\n cross_attn = decoder_out[1]['attn']\n assert list(attn.size()) == list(cross_attn.size()), f'{attn.size()} != {cross_attn.size()}, {prev_output_tokens.size()}'\n # attn: [b, tk, C]\n else:\n attn = decoder_out[1]\n\n probs = model.get_normalized_probs(decoder_out, log_probs=len(self.models) == 1, sample=sample).data\n\n if avg_probs is None:\n avg_probs = probs\n else:\n avg_probs.add_(probs)\n print(\"length of attention \", len(attn['attn']))\n if attn is not None:\n # {'attn': attn, 'inner_states': inner_states}\n if not torch.is_tensor(attn):\n if GET_INNER_ATT:\n attn = attn['inner_atts'][INNER_ATT]\n else:\n attn = attn['attn'][2]\n\n assert torch.is_tensor(attn), f'attn: {attn}'\n attn = attn.data\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n if len(self.models) > 1:\n avg_probs.div_(len(self.models))\n avg_probs.log_()\n if avg_attn is not None:\n avg_attn.div_(len(self.models))\n avg_probs = avg_probs.gather(\n dim=2,\n index=sample['target'].data.unsqueeze(-1),\n )\n return avg_probs.squeeze(2), avg_attn", "title": "" }, { "docid": "16cb436ff73653cdd0d7c1f4490bd197", "score": "0.5448722", "text": "def contrast_enhancement(im, se):\n \n return im + morphology.white_tophat(im,footprint=se) - morphology.black_tophat(im,footprint=se)", "title": "" }, { "docid": "0243393d930065e58afda17005ffe4d6", "score": "0.5446916", "text": "def loop_models(self):\n self.files = {}\n self.models = {\"Ridge regression\": Ridge(alpha=100), \"Lasso regression\": Lasso(alpha=100), \"Linjär regression\": LinearRegression(), \"PLSR\": PLSRegression(n_components=10), \"PCR\" : make_pipeline(StandardScaler(), PCA(n_components=20), LinearRegression())}\n for key, val in self.models.items():\n self.model = val\n self.fitOurModel()\n self.saveOutdata(model_name=key)\n #self.logErrors()\n print(self.times)", "title": "" }, { "docid": "61667ae606927b5f875a281b893cafbf", "score": "0.5446892", "text": "def eval(self):\n self.training = False\n for _, m in self._models.items():\n m.eval()", "title": "" }, { "docid": "14118fc7b6eea931908148fbc14d02b6", "score": "0.54327387", "text": "def score(self, sample):\n net_input = sample['net_input']\n\n # compute scores for each model in the ensemble\n avg_probs = None\n avg_attn = None\n assert len(self.models) == 1, f'{len(self.models)} not 1'\n for model in self.models:\n with torch.no_grad():\n model.eval()\n # decoder_out = model.forward(**net_input)\n prev_output_tokens = net_input['prev_output_tokens']\n del net_input['prev_output_tokens']\n encoder_out = model.encoder(**net_input)\n decoder_out = model.decoder(prev_output_tokens, encoder_out)\n # return decoder_out\n if GET_ENCOUT:\n # attn = F.softmax(100 * encoder_out['encoder_out'].transpose(0, 1), 1).mean(-1)\n # bsz, tk = attn.size()\n # tq = prev_output_tokens.size(1)\n # attn = attn.unsqueeze_(1).expand(bsz, tq, tk)\n # cross_attn = decoder_out[1]['attn']\n # assert list(attn.size()) == list(cross_attn.size()), f'{attn.size()} != {cross_attn.size()}, {prev_output_tokens.size()}'\n # # attn: [b, tk, C]\n raise NotImplementedError\n else:\n attn = decoder_out[1]\n # print([x for x in attn])\n # input()\n\n probs = model.get_normalized_probs(decoder_out, log_probs=len(self.models) == 1, sample=sample).data\n if avg_probs is None:\n avg_probs = probs\n else:\n avg_probs.add_(probs)\n\n # assert 'inner_atts' in attn\n assert 'inner_states' in attn\n # inner_atts = attts' in attn\n inner_atts = attn['inner_states']\n avg_attn = inner_atts[-1]\n # [b, tq, tk]\n inner_att_entropies = [-(x * x.log()).sum(dim=-1) for x in inner_atts]\n # [b, tq]\n print(inner_atts)\n print(inner_att_entropies)\n # input()\n inner_atts = torch.cat([x.unsqueeze_(-1) for x in inner_atts], dim=-1)\n inner_att_entropies = torch.cat([x.unsqueeze_(-1) for x in inner_att_entropies], dim=-1)\n # [b, tq, tk, 6]\n # [b, tq, 6]\n\n # if attn is not None:\n # # {'attn': attn, 'inner_states': inner_states}\n # if not torch.is_tensor(attn):\n # if GET_INNER_ATT:\n # attn = attn['inner_atts'][INNER_ATT]\n # else:\n # attn = attn['attn']\n #\n # assert torch.is_tensor(attn), f'attn: {attn}'\n # attn = attn.data\n # if avg_attn is None:\n # avg_attn = attn\n # else:\n # avg_attn.add_(attn)\n\n if len(self.models) > 1:\n avg_probs.div_(len(self.models))\n avg_probs.log_()\n if avg_attn is not None:\n avg_attn.div_(len(self.models))\n\n avg_probs = avg_probs.gather(\n dim=2,\n index=sample['target'].data.unsqueeze(-1),\n )\n return avg_probs.squeeze(2), avg_attn, inner_atts, inner_att_entropies", "title": "" }, { "docid": "550a9c161e25cbe978be61b5846c3c52", "score": "0.54278785", "text": "def evaluate_models(self):\n \n X = self.augment_state(self.df['Independent Variables'].values)\n y = self.df['Dependent Variable'].values.ravel()\n \n #Lambda functions to act on dataframe\n cross_validate = lambda x: cross_val_predict(x,X,y=y,cv=self.cv)\n mean_error = lambda y_predict: np.mean([y_a - y_p for y_a,y_p in zip(y,y_predict)])\n std_error = lambda y_predict: np.std([y_a - y_p for y_a,y_p in zip(y,y_predict)])\n mse = lambda y_predict: np.mean([(y_a - y_p)**2 for y_a,y_p in zip(y,y_predict)])\n \n \n self.model_df['Predictions'] = self.model_df['Model'].to_frame().applymap(cross_validate)\n self.model_df['Error Mean'] = self.model_df['Predictions'].to_frame().applymap(mean_error)\n self.model_df['Error Standard Deviation'] = self.model_df['Predictions'].to_frame().applymap(std_error)\n self.model_df['MSE'] = self.model_df['Predictions'].to_frame().applymap(mse)\n \n if self.verbose == 2:\n display(self.model_df)", "title": "" }, { "docid": "09ae215079b8b78eab76e5782baa36d7", "score": "0.5423141", "text": "def run_model(self):", "title": "" }, { "docid": "0ae96a6796f093bc7d9c806693780d1d", "score": "0.54136705", "text": "def contrast(self, value):\n assert(0x00 <= value <= 0xFF)\n self._contrast = value\n #self._ws.ws2811_channel_t_brightness_set(self._channel, value)\n self._flush()", "title": "" }, { "docid": "517a82a59a2759d0097fb4a21d647c6e", "score": "0.54042053", "text": "def runEnsemble(self):\n for train, test in self.kf:\n # Extract models\n knn = self.models[\"KNN\"]\n kmeans = self.models[\"KMEANS\"]\n svm = self.models[\"SVM\"]\n gmm = self.models[\"GMM\"]\n \n # Set up training and test data\n train_set, train_labels = self.getCurrFoldTrainData(train)\n test_set, test_labels = self.getCurrFoldTestData(test)\n \n if increase:\n train_set, train_labels=self.subsetData(train_set, train_labels)\n \n # Fit the models\n knn.fit(train_set, train_labels)\n kmeans.fit(train_set, train_labels)\n svm.fit(train_set, train_labels)\n gmm.fit(train_set, train_labels)\n\n # Generate predictions by weighting each model using accuracies \n # created from earlier runs\n knn_pred = knn.predict(test_set)\n kmeans_pred = kmeans.predict(test_set)\n svm_pred = svm.predict(test_set)\n gmm_pred = gmm.predict(test_set)\n \n preds = self.weightPredictions(knn_pred, kmeans_pred, \\\n svm_pred, gmm_pred)\n acc = self.getAccuracy(test_labels, preds)\n print \"(ENSEMBLE) Percent correct is\", acc", "title": "" }, { "docid": "6deb93b485a9d739b1ee5e1f066efaac", "score": "0.5390118", "text": "def run(self, model, episodes=1000):\n try:\n for episode in range(episodes):\n self.done, step = False, 0\n state = self.reset()\n while not self.done:\n action = model.model.predict(state)\n state, reward, self.done, _ = self.step(action[0])\n print(\n ' Episode {:2}, Step {:3}, Reward: {:.2f}, State: {}, Action: {:2}'.format(episode, step, reward,\n state[0], action[0]),\n end='\\r')\n self.render()\n step += 1\n except KeyboardInterrupt:\n pass", "title": "" }, { "docid": "e156b2d08c60cbc8782e0c802e832f70", "score": "0.53845716", "text": "def ensemble_2(train_data, val_data, test_data, k_dict):\n print(\"VAL data is_correct = {}\".format(len(val_data[\"is_correct\"])))\n\n num_models = 10\n lr = 0.03\n iters = 10\n two_param_list = []\n\n for i in range(num_models):\n print('Training Model ' + str(i + 1))\n model_params = two_param_item_response(train_data, val_data, lr, iters, k_dict)\n two_param_list.append(model_params)\n two_param_pred = bagged_predict_2(val_data, two_param_list)\n\n val_pred = two_param_pred\n evaluated_model = evaluate(val_data, val_pred)\n print(\"Bagged Validation accuracy: {}\".format(evaluated_model))\n\n two_param_test_pred = bagged_predict_2(test_data, two_param_list)\n test_pred = two_param_test_pred\n evaluated_test_model = evaluate(test_data, test_pred)\n print(\"Bagged Test accuracy: {}\".format(evaluated_test_model))\n\n two_param_train_pred = bagged_predict_2(train_data, two_param_list)\n train_pred = two_param_train_pred\n evaluated_train_model = evaluate(train_data, train_pred)\n print(\"Bagged Train accuracy: {}\".format(evaluated_train_model))", "title": "" }, { "docid": "54716ebc155449d8676dcd685c60fa9b", "score": "0.5373195", "text": "def evals(arch='res18'):\n trainset, valset, testset = build_datasets(dataset='SUNRGBD', base_size=512, crop_size=512)\n\n # load model\n if arch == 'res18':\n model = BiSeNet(37, context_path='resnet18', in_planes=32)\n load_state_dict(model, ckpt_path='runs/SUNRGBD/kd_pi_lr1e-3_Jul28_002404/checkpoint.pth.tar')\n elif arch == 'res101':\n model = BiSeNet(37, context_path='resnet101', in_planes=64)\n load_state_dict(model, ckpt_path='runs/SUNRGBD/res101_inp64_deconv_Jul26_205859/checkpoint.pth.tar')\n else:\n raise NotImplementedError\n\n model.eval()\n model.cuda()\n\n evaluator = Evaluator(testset.num_classes)\n evaluator.reset()\n\n print('imgs:', len(testset))\n for sample in tqdm(testset): # already transfrom\n image, target = sample['img'], sample['target']\n image = image.unsqueeze(0).cuda()\n pred = model(image)\n pred = F.interpolate(pred, size=(512, 512), mode='bilinear', align_corners=True)\n pred = torch.argmax(pred, dim=1).squeeze().cpu().numpy()\n target = target.numpy()\n evaluator.add_batch(target, pred)\n\n print('PixelAcc:', evaluator.Pixel_Accuracy())\n\n print('mAcc') # 各类的 acc 均值\n Accs = evaluator.Acc_of_each_class()\n print(np.nanmean(Accs)) # mAcc, mean of non-NaN elements\n approx_print(Accs)\n\n print('mIoU')\n IOUs = evaluator.IOU_of_each_class()\n print(np.nanmean(IOUs)) # mIoU\n approx_print(IOUs)", "title": "" }, { "docid": "80dc4824504864eeb536ea88d501147d", "score": "0.5371631", "text": "def compute_bold_contrast_regression(self):\n trials_max_bold=[]\n trials_max_bold_contrast=[]\n trials_max_exc_bold=[]\n trials_max_exc_bold_contrast=[]\n for trial_summary in self.trial_summaries:\n if not math.isnan(trial_summary.data.summary_data.bold_max):\n trials_max_bold.append(trial_summary.data.summary_data.bold_max)\n trials_max_bold_contrast.append(trial_summary.contrast)\n if not math.isnan(trial_summary.data.summary_data.bold_exc_max):\n trials_max_exc_bold.append(trial_summary.data.summary_data.bold_exc_max)\n trials_max_exc_bold_contrast.append(trial_summary.contrast)\n\n self.max_bold_regression=MaxBOLDContrastRegression(trials_max_bold, trials_max_bold_contrast)\n self.max_exc_bold_regression=MaxBOLDContrastRegression(trials_max_exc_bold, trials_max_exc_bold_contrast)", "title": "" }, { "docid": "26592e6ab7feae514150e9639e212144", "score": "0.5371488", "text": "def test_model_variation(model_file):\n\n \"\"\"\n ['CH_{n}_RETURN',\n 'oat1',\n 'oah',\n 'CW_{n}_RETURN',\n 'CH_{n}/Evaporator Saturation Temp']\n \"\"\"\n model = keras.models.load_model(model_file)\n minx, maxx = zip(*[(20.3, 21.4), (25.3, 32.5), (34.1, 82.1), (20.1, 24.5), (14.9, 22.2)])\n minx = np.asarray(minx)\n maxx = np.asarray(maxx)\n num_samples = 20\n xa = []\n ya = []\n\n fval = 0.3\n x_index = 1\n y_index = 2\n X = []\n for x in range(num_samples):\n for y in range(num_samples):\n v = [fval, fval, fval, fval, fval]\n x_val = x / num_samples\n y_val = y / num_samples\n v[x_index] = x_val\n v[y_index] = y_val\n X.append(v)\n xa.append(x_val)\n ya.append(y_val)\n\n X = np.asarray(X)\n z = model.predict(X, batch_size=1024, verbose=True)\n z = z[:, 0]\n print(X.shape, z.shape)\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.plot_trisurf(xa, ya, z, cmap=plt.cm.jet, linewidth=0.2)\n ax.set_xlabel(DC.chiller_input_params[x_index])\n ax.set_ylabel(DC.chiller_input_params[y_index])\n ax.set_zlabel('Power')\n\n plt.show()", "title": "" }, { "docid": "bbf0e79ce75d6c67435c57a605118bb6", "score": "0.53610396", "text": "def extreme_contrast( image ):\n\n for x, y, (r, g, b) in image: # Reads through each pixel in the chosen image.\n if 0 < r <= 127:\n contrast = create_color(0, g, b)\n set_color(image, x, y, contrast)\n else:\n contrast = create_color(255, g, b)\n set_color(image, x, y, contrast) \n \n if 0 < g <= 127:\n contrast = create_color(r, 0, b)\n set_color(image, x, y, contrast)\n else:\n contrast = create_color(r, 255, b)\n set_color(image, x, y, contrast)\n \n if 0 < b <= 127:\n contrast = create_color(r, g, 0)\n set_color(image, x, y, contrast)\n else:\n contrast = create_color(r, g, 255)\n set_color(image, x, y, contrast) \n \n save_as(image, 'returns/contrast_channel.png')\n show(load_image('returns/contrast_channel.png'))\n print('contrast_channel saved as new image')\n\n return image", "title": "" }, { "docid": "66a71fadb4e065657d3a9fc0c546e82a", "score": "0.5353212", "text": "def step(self) -> None:\n # Check if any of the models are active\n self.update_status()\n # Only predict-update if there is at least one active model\n if self.active:\n self.predict()\n self.update_state_ensemble()\n self.update_state_means()\n\n truth = self.base_model.get_state(sensor=self.sensor_type)\n\n # Get statuses by which to filter state vector\n statuses = self.get_state_vector_statuses(vector_mode=self.mode)\n\n # Filter ground truth vector and state mean vector\n filtered_truth = self.filter_vector(truth, statuses)\n filtered_state_mean = self.filter_vector(self.state_mean, statuses)\n\n f = self.error_func(filtered_truth, filtered_state_mean)[0]\n\n forecast_error = {'time': self.time,\n 'forecast': f}\n self.forecast_error.append(forecast_error)\n\n data = None\n\n prior = self.state_mean.copy()\n prior_ensemble = self.state_ensemble.copy()\n\n if self.time % self.assimilation_period == 0:\n # Construct observations\n obs_truth = self.base_model.get_state(sensor='location')\n data = self.make_data(obs_truth, self.R_vector)\n\n # Plot model state\n if self.vis:\n self.plot_model_state('before')\n\n # Update\n self.update(data)\n self.update_state_means()\n self.update_models()\n\n metrics = forecast_error.copy()\n metrics = self.make_metrics(metrics, truth, obs_truth, data)\n self.metrics.append(metrics)\n\n if self.mode == EnsembleKalmanFilterType.DUAL_EXIT:\n exits = self.state_mean[2 * self.population_size]\n self.exits.append(exits)\n\n # Plot posterior\n if self.vis:\n self.plot_model_state('after')\n\n # else:\n # self.update_state_mean()\n self.time += 1\n\n # Collect state information for vis\n if data is not None:\n result = {'time': self.time,\n 'ground_truth': obs_truth,\n 'prior': prior,\n 'posterior': self.state_mean.copy()}\n result['observation'] = data\n result['destination'] = self.make_base_destinations_vector()\n result['origin'] = self.make_base_origins_vector()\n\n for i in range(self.ensemble_size):\n result[f'prior_{i}'] = prior_ensemble[:, i]\n result[f'posterior_{i}'] = self.state_ensemble[:, i].copy()\n\n if self.run_vanilla:\n result['baseline'] = self.vanilla_state_mean\n self.results.append(result)\n\n if self.run_vanilla:\n self.vanilla_results.append(self.vanilla_state_mean)\n\n # self.results.append(self.state_mean)\n\n # print('time: {0}, base: {1}'.format(self.time,\n # self.base_model.pop_active))\n # print('time: {0}, models: {1}'.format(self.time, [m.pop_active for\n # m in self.models]))", "title": "" }, { "docid": "e498f6ad98f34f66833b8fd5aee531e2", "score": "0.53463644", "text": "def test_models():\n train_1 = np.random.rand(200, 20)\n train_2 = np.random.rand(200, 10)\n train_3 = np.random.rand(200, 5)\n test_1 = np.random.rand(50, 20)\n test_2 = np.random.rand(50, 10)\n test_3 = np.random.rand(50, 5)\n\n test_models = MODELS\n module = importlib.import_module(\"multiviewae\")\n for m in test_models:\n print('MODEL CLASS')\n print(m)\n class_ = getattr(module, m)\n if m not in [MODEL_JMVAE, MODEL_VAEBARLOW, MODEL_AEBARLOW]: #JMVAE only designed for 2 views of data\n model1 = class_(input_dim=[20])\n model1.fit(train_1) #fit model with 1 view\n model1.fit(train_1, max_epochs=5, batch_size=10) #fit using user specified max_epochs and batch size\n\n model2 = class_(input_dim=[20, 10, 5])\n model2.fit(train_1, train_2, train_3) #fit model with 3 views\n model2.fit(train_1, train_2, train_3, max_epochs=5, batch_size=5)\n\n print(\"RESULTS: \", m)\n latent = model1.predict_latents(test_1) \n print_results(\"latent\", latent)\n recon = model1.predict_reconstruction(test_1)\n print_results(\"recon\", recon)\n\n latent = model1.predict_latents(test_1, batch_size=10) #test user defined batch size\n print_results(\"latent\", latent)\n recon = model1.predict_reconstruction(test_1, batch_size=5)\n print_results(\"recon\", recon)\n\n latent = model2.predict_latents(test_1, test_2, test_3)\n print_results(\"latent\", latent)\n recon = model2.predict_reconstruction(test_1, test_2, test_3)\n print_results(\"recon\", recon)\n\n latent = model2.predict_latents(test_1, test_2, test_3, batch_size=10)\n print_results(\"latent\", latent)\n recon = model2.predict_reconstruction(test_1, test_2, test_3, batch_size=5)\n print_results(\"recon\", recon)\n print(\"\")\n else:\n model1 = class_(input_dim=[20, 10])\n model1.fit(train_1, train_2) #fit model with 2 views\n model1.fit(train_1, train_2, max_epochs=5, batch_size=10)\n\n print(\"RESULTS: \", m)\n latent = model1.predict_latents(test_1, test_2)\n print_results(\"latent\", latent)\n recon = model1.predict_reconstruction(test_1, test_2)\n print_results(\"recon\", recon)\n\n latent = model1.predict_latents(test_1, test_2, batch_size=10)\n print_results(\"latent\", latent)\n recon = model1.predict_reconstruction(test_1, test_2, batch_size=5)\n print_results(\"recon\", recon)", "title": "" }, { "docid": "5603692555dc3e61acd6313b50a3e288", "score": "0.53406435", "text": "def run_model(number_run, number_steps):\n for j in range(number_run):\n # Reinitialize model\n t0 = time.time()\n if j < 30:\n model = ABM_CE_PV(\n seed=j)\n elif j < 60:\n model = ABM_CE_PV(\n seed=(j - 30), w_sn_eol=0)\n elif j < 90:\n model = ABM_CE_PV(\n seed=(j - 60), seeding_recyc={\"Seeding\": True,\n \"Year\": 1, \"number_seed\": 100, \"discount\": 0.35})\n elif j < 120:\n model = ABM_CE_PV(seed=(j - 90), seeding_recyc={\"Seeding\": True,\n \"Year\": 1, \"number_seed\": 200, \"discount\": 0.35})\n elif j < 150:\n model = ABM_CE_PV(seed=(j - 120),\n calibration_n_sensitivity_4=2)\n elif j < 180:\n model = ABM_CE_PV(seed=(j - 150),\n recycling_learning_shape_factor=-0.6)\n elif j < 210:\n model = ABM_CE_PV(seed=(j - 180),\n recycling_learning_shape_factor=-1E-6)\n elif j < 240:\n model = ABM_CE_PV(seed=(j - 210),\n dynamic_lifetime_model={\"Dynamic lifetime\": True,\n \"d_lifetime_intercept\": 15.9,\n \"d_lifetime_reg_coeff\": 0.87,\n \"Seed\": False, \"Year\": 5,\n \"avg_lifetime\": 50})\n elif j < 270:\n model = ABM_CE_PV(seed=(j - 240),\n all_EoL_pathways={\"repair\": True, \"sell\": True,\n \"recycle\": True,\n \"landfill\": False,\n \"hoard\": True})\n elif j < 300:\n model = ABM_CE_PV(seed=(j - 270),\n seeding={\"Seeding\": True,\n \"Year\": 5, \"number_seed\": 50})\n elif j < 330:\n model = ABM_CE_PV(seed=(j - 300),\n repairability=1,\n init_purchase_choice={\"new\": 0, \"used\": 1,\n \"certified\": 0},\n w_sn_eol=0,\n w_pbc_eol=0.44,\n w_a_eol=0,\n w_sn_reuse=0.497,\n w_pbc_reuse=0.382,\n w_a_reuse=0,\n original_repairing_cost=[0.0001, 0.00045,\n 0.00028],\n all_EoL_pathways={\"repair\": False, \"sell\": True,\n \"recycle\": False,\n \"landfill\": True,\n \"hoard\": True})\n else:\n model = ABM_CE_PV(seed=(j - 330),\n calibration_n_sensitivity_3=0.65,\n recovery_fractions={\n \"Product\": np.nan, \"Aluminum\": 0.994, \"Glass\": 0.98,\n \"Copper\": 0.97, \"Insulated cable\": 1., \"Silicon\": 0.97,\n \"Silver\": 0.94})\n for i in range(number_steps):\n model.step()\n # Get results in a pandas DataFrame\n results_model = model.datacollector.get_model_vars_dataframe()\n results_agents = model.datacollector.get_agent_vars_dataframe()\n results_model.to_csv(\"results\\\\Results_model_run%s.csv\" % j)\n results_agents.to_csv(\"results\\\\Results_agents.csv\")\n # Draw figures\n draw_graphs(False, False, model, results_agents, results_model)\n print(\"Run\", j+1, \"out of\", number_run)\n t1 = time.time()\n print(t1 - t0)", "title": "" }, { "docid": "422035ff5dba7475deeef97d1ebafac9", "score": "0.5337905", "text": "def evaluateOnTestData(self, model_name):\n self.model = keras.models.load_model(model_name)\n\n print(\"\\nevaluating on test set with 0 edit threshold...\")\n test_encoder_input, test_decoder_input, test_decoder_output = self.processData('test1_cleansed.txt')\n self.results1 = self.model.evaluate(\n x=[test_encoder_input, test_decoder_input],\n y=test_decoder_output\n )\n print(\"test loss, test acc:\", self.results1)\n\n print(\"evaluating on test set with 0.1 edit threshold...\")\n test_encoder_input, test_decoder_input, test_decoder_output = self.processData('test2_cleansed.txt')\n self.results2 = self.model.evaluate(\n x=[test_encoder_input, test_decoder_input],\n y=test_decoder_output\n )\n print(\"test loss, test acc:\", self.results2)\n\n print(\"evaluating on test set with 0.25 edit threshold...\")\n test_encoder_input, test_decoder_input, test_decoder_output = self.processData('test3_cleansed.txt')\n self.results3 = self.model.evaluate(\n x=[test_encoder_input, test_decoder_input],\n y=test_decoder_output\n )\n print(\"test loss, test acc:\", self.results3)", "title": "" }, { "docid": "feba9c147b12d5183b449a78c9891742", "score": "0.53343385", "text": "def runStuhrmann(self, contrasts=[]):\n\n if contrasts == []: \n for contrast in range(11):\n contrasts.append(float(contrast)/10)\n \n\n for contrast in contrasts:\n self.runCryson(contrast)\n print contrast\n\n outfiles = os.listdir('cryson/')\n logfiles = filter(self.filterLogFiles, outfiles)\n \n stuhrvalues = []\n for log in logfiles:\n params = crysox.parseLogFile('cryson/' + log)\n rg = params['rg']\n contrast = params['particle_cont']\n stuhrvalues.append([contrast, rg])\n\n return stuhrvalues", "title": "" }, { "docid": "3ff7e74eb775213f8e493714ce934664", "score": "0.53251517", "text": "def evaluate(trans_configs, model_configs,\n data_configs, save=False, output_dir=None):\n # Load the baseline defense (PGD-ADT model)\n baseline = load_lenet(file=model_configs.get('jsma_trained'), trans_configs=None,\n use_logits=False, wrap=False)\n\n # get the undefended model (UM)\n file = os.path.join(model_configs.get('dir'), model_configs.get('um_file'))\n undefended = load_lenet(file=file,\n trans_configs=trans_configs.get('configs0'),\n wrap=True)\n print(\">>> um:\", type(undefended))\n\n # load weak defenses into a pool\n pool, _ = load_pool(trans_configs=trans_configs,\n model_configs=model_configs,\n active_list=True,\n wrap=True)\n # create an AVEP ensemble from the WD pool\n wds = list(pool.values())\n print(\">>> wds:\", type(wds), type(wds[0]))\n ensemble = Ensemble(classifiers=wds, strategy=ENSEMBLE_STRATEGY.AVEP.value)\n\n # load the benign samples\n bs_file = os.path.join(data_configs.get('dir'), data_configs.get('bs_file'))\n x_bs = np.load(bs_file)\n img_rows, img_cols = x_bs.shape[1], x_bs.shape[2]\n\n # load the corresponding true labels\n label_file = os.path.join(data_configs.get('dir'), data_configs.get('label_file'))\n labels = np.load(label_file)\n\n # get indices of benign samples that are correctly classified by the targeted model\n print(\">>> Evaluating UM on [{}], it may take a while...\".format(bs_file))\n pred_bs = undefended.predict(x_bs)\n corrections = get_corrections(y_pred=pred_bs, y_true=labels)\n\n # Evaluate AEs.\n results = {}\n ae_list = data_configs.get('ae_files')\n ae_file = os.path.join(data_configs.get('dir'), ae_list[4])\n x_adv = np.load(ae_file)\n\n # evaluate the undefended model on the AE\n print(\">>> Evaluating UM on [{}], it may take a while...\".format(ae_file))\n pred_adv_um = undefended.predict(x_adv)\n err_um = error_rate(y_pred=pred_adv_um, y_true=labels, correct_on_bs=corrections)\n # track the result\n results['UM'] = err_um\n\n # evaluate the ensemble on the AE\n print(\">>> Evaluating ensemble on [{}], it may take a while...\".format(ae_file))\n pred_adv_ens = ensemble.predict(x_adv)\n err_ens = error_rate(y_pred=pred_adv_ens, y_true=labels, correct_on_bs=corrections)\n # track the result\n results['Ensemble'] = err_ens\n\n # evaluate the baseline on the AE\n print(\">>> Evaluating baseline model on [{}], it may take a while...\".format(ae_file))\n pred_adv_bl = baseline.predict(x_adv)\n err_bl = error_rate(y_pred=pred_adv_bl, y_true=labels, correct_on_bs=corrections)\n # track the result\n results['JSMA-ADT'] = err_bl\n\n # TODO: collect and dump the evaluation results to file(s) such that you can analyze them later.\n print(\">>> Evaluations on [{}]:\\n{}\".format(ae_file, results))\n\n attack_configs = load_from_json(\"../src/configs/demo/at.json\")\n model_configs = load_from_json(\"../src/configs/demo/md.json\")\n data_configs = load_from_json(\"../src/configs/demo/dt.json\")\n\n output_dir = \"../results\"\n\n # evaluate\n evaluate(attack_configs=trans_configs,\n model_configs=model_configs,\n data_configs=data_configs,\n save=False,\n output_dir=output_root)", "title": "" }, { "docid": "8e1efd27a1d80a871f2d2e30cf6b424a", "score": "0.5306629", "text": "def apply_model(self, ner_model, features):\n return None", "title": "" }, { "docid": "43e2e31d32e8d99cc94698cbc31dd662", "score": "0.52944237", "text": "def train(model):", "title": "" }, { "docid": "3683abd2b8a7936f968f4fa42d4d70e4", "score": "0.5293823", "text": "def apply(self):\n for target in self.targets():\n self.action(target)", "title": "" }, { "docid": "dc1236e0e118aa2a318f15a43f77cd8e", "score": "0.52860355", "text": "def contrast_signal_cell(self, cell_i, *args):\n texture_maxi = self.read_texture_analysis()['texture_maxi']\n contrast_signal = self.generatecontrast(texture_maxi[cell_i], *args)\n return contrast_signal", "title": "" }, { "docid": "18036111494b7493df0338f1bbde5a13", "score": "0.5284112", "text": "def _apply_model(args):\n pass", "title": "" }, { "docid": "8f96e938366d50f69dee854b8bcf3913", "score": "0.52780867", "text": "def test(model):\n pg.Surface.lock(config.drawingBoard)\n CV = [] # Color Values\n CVBinary = [] # Color Values but 1s or 0s\n\n for row in range(0, 560, 20):\n for column in range(0, 560, 20):\n new = pg.Surface.get_at(config.drawingBoard, (column, row))[:3]\n CV.append(new)\n\n for i in CV:\n if i == (0, 0, 0): # black\n CVBinary.append(1.)\n else:\n CVBinary.append(0.)\n\n CVBinary = np.array(CVBinary).reshape((1, 28, 28, 1))\n \n print(CVBinary)\n\n predictions = model.predict(CVBinary)\n guess = np.argmax(predictions)\n print('GUESS: ', guess)\n\n pg.Surface.unlock(config.drawingBoard)\n\n return guess", "title": "" }, { "docid": "ea1f0ec85af0b9aaea14db8643decc88", "score": "0.52753633", "text": "def finetune(base_model, model, X_train, y_train, X_val, y_val,\n epochs_1=1000, patience_1=2,\n patience_lr=1, batch_size=32,\n nb_train_samples=41000, nb_validation_samples=7611,\n img_width=299, img_height=299, class_imbalance=False,\n inception_h5_1=\"inceptionv3_128_fine_tuned_1.h5\",\n inception_h5_check_point_1=\"inceptionv3_128_fine_tuned_check_point_1.h5\",\n layer_names_file=\"inceptionv3_128_mod_layer_names.txt\", verbose=1):\n\n # let's visualize layer names and layer indices to see how many layers\n # we should freeze:\n with open(layer_names_file, \"w\") as iOF:\n for ix, layer in enumerate(model.layers):\n iOF.write(\"%d, %s\\n\"%(ix, layer.name))\n if verbose >= 4: print(ix, layer.name)\n\n # this is the augmentation configuration we will use for training\n train_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n rotation_range=180,\n fill_mode='reflect',\n p_rotation=0.2,\n rotation_angles=[-90, 0, 90, 180],\n p_zoom=0.4,\n p_shift=0.4)\n\n # this is the augmentation configuration we will use for testing:\n test_datagen = ImageDataGenerator(\n preprocessing_function=preprocess_input,\n horizontal_flip=True,\n vertical_flip=True,\n zoom_range=0.15,\n width_shift_range=0.15,\n height_shift_range=0.15,\n rotation_range=180,\n fill_mode='reflect',\n p_rotation=0.,\n rotation_angles=[-90, 0, 90, 180],\n p_zoom=0.,\n p_shift=0.)\n\n # define train & val data generators\n train_generator = train_datagen.flow(\n X_train,\n y_train,\n batch_size=batch_size,\n shuffle=True)\n\n validation_generator = test_datagen.flow(\n X_val,\n y_val,\n batch_size=batch_size,\n shuffle=True)\n\n # get class weights\n if class_imbalance:\n class_weight = get_class_weights(np.sum(y_train, axis=0), smooth_factor=0.1)\n else:\n class_weight = None\n\n # train the model on the new data for a few epochs on the batches generated by datagen.flow().\n model.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs_1,\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size,\n callbacks=[EarlyStopping(monitor='val_loss', patience=patience_1),\n ModelCheckpoint(filepath=inception_h5_check_point_1, save_best_only=True),\n ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=patience_lr)],\n class_weight=class_weight)\n\n # save weights just in case\n model.save_weights(inception_h5_1)", "title": "" }, { "docid": "cb820b415c87ce0403da37a91dcb7143", "score": "0.52697563", "text": "def train_all_models():\n\n fict_df = load_processed_data()\n\n fem_model = train_single_production_model(fict_df, best_models[0], 'is_fem')\n fict_model = train_single_production_model(fict_df, best_models[1], 'is_fict')\n fim_model = train_single_production_model(fict_df, best_models[2], 'is_fim')\n gef_model = train_single_production_model(fict_df, best_models[3], 'is_gef')\n\n return fem_model, fict_model, fim_model, gef_model", "title": "" }, { "docid": "ae141e3139510b30221c4001d7a0a667", "score": "0.52672505", "text": "def contrast(self, value):\n # TODO this is potentially confusing -- revisit later\n value = value * (self.pedestal + 1)\n ColorMixin.contrast.fset(self, value)\n self._needTextureUpdate = True\n self._needUpdate = True", "title": "" }, { "docid": "2534f8de94f7b612ddebbc4732ae9410", "score": "0.5263024", "text": "def contrast(image_list):\n image = Image.new('L', (IMAGE_SIZE, IMAGE_SIZE)) # L = 8bit grayscale\n image_list = [x * 255 / HIGHEST_PIXEL_VALUE for x in image_list]\n image.putdata(image_list)\n image = ImageEnhance.Contrast(image)\n image_list = list(image.enhance(4).getdata())\n image_list = [x * HIGHEST_PIXEL_VALUE / 255 for x in image_list]\n return image_list", "title": "" }, { "docid": "02393c15b3ad00f6b99cf2f8b27e9bff", "score": "0.5262428", "text": "def run(self):\n predict_proba = self.fit_initial_model()\n for i in range(1, len(self.train_pools)):\n predict_proba = self.run_iteration(i, predict_proba)", "title": "" }, { "docid": "017206a9ebc3152255a142cbbe562222", "score": "0.5261577", "text": "def reset_model(self):\n\n def weights_init(m):\n if isinstance(m, torch.nn.Conv2d):\n torch.nn.init.xavier_uniform_(m.weight.data)\n\n self.model.apply(weights_init)", "title": "" }, { "docid": "543f7a4c2522ce1b25da16410522b5a0", "score": "0.5253573", "text": "def _build_momentum_contrast_training_step(model, mo_model, optimizer, buffer, alpha=0.999, tau=0.07, weight_decay=0,\n N=0, s=0, s_prime=0, margin=0):\n # check whether we're in mixed-precision mode\n #mixed = tf.keras.mixed_precision.global_policy().name == 'mixed_float16'\n def training_step(img1, img2):\n print(\"tracing training step\")\n batch_size = img1.shape[0]\n # compute averaged embeddings. tensor is (N,d)\n #k1 = tf.nn.l2_normalize(mo_model(img1, training=True), axis=1)\n k2 = tf.nn.l2_normalize(mo_model(img2, training=True), axis=1)\n with tf.GradientTape() as tape:\n # compute normalized embeddings for each batch of augmented images\n q1 = tf.nn.l2_normalize(model(img1, training=True), axis=1)\n #q2 = tf.nn.l2_normalize(model(img2, training=True), axis=1)\n # compute MoCo and/or MoCHi logits\n all_logits1 = _build_logits(q1, k2, buffer, N, s, s_prime, margin)\n #all_logits2 = _build_logits(q2, k1, buffer, N, s, s_prime, margin)\n # create labels (correct class is the batch index)\n labels = tf.range((batch_size), dtype=tf.int32)\n # compute crossentropy loss\n xent_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels, all_logits1 / tau)) #+ tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(\n #labels, all_logits2 / tau))\n if weight_decay > 0:\n l2_loss = compute_l2_loss(model)\n else:\n l2_loss = 0\n loss = xent_loss + weight_decay*l2_loss\n #if mixed:\n # loss = optimizer.get_scaled_loss(loss)\n\n # update fast model\n variables = model.trainable_variables\n gradients = tape.gradient(loss, variables)\n optimizer.apply_gradients(zip(gradients, variables))\n # update slow model\n weight_diff = exponential_model_update(mo_model, model, alpha)\n\n # update buffer\n _update_queue(k2, buffer)\n\n # also compute the \"accuracy\"; what fraction of the batch has\n # the key as the largest logit. from figure 2b of the MoCHi paper\n nce_batch_accuracy = tf.reduce_mean(tf.cast(tf.argmax(all_logits1,\n axis=1) == tf.cast(labels, tf.int64), tf.float32))\n\n return {\"loss\":loss, \"weight_diff\":weight_diff,\n \"l2_loss\":l2_loss, \"nt_xent_loss\":xent_loss,\n \"nce_batch_acc\":nce_batch_accuracy}\n return training_step", "title": "" }, { "docid": "e3cf328b5621a68bdd10c6e387b99afa", "score": "0.52526474", "text": "def eval(self):\n self.model_plus.eval()\n self.model_cross.eval()\n self.likelihood.eval()", "title": "" }, { "docid": "4f5d9c103a0a903527efc5fd9240f646", "score": "0.5249523", "text": "def normalize_images(training_set):\n print 'adjusting images...'\n for source_image in training_set:\n source_image.image = ImageOps.grayscale(source_image.image)\n source_image.image = ImageOps.autocontrast(source_image.image)\n #source_image.image = ImageOps.equalize(source_image.image)", "title": "" }, { "docid": "012f1a476c1f6a91405bc6daddfc9e05", "score": "0.52464724", "text": "def contrast_enhance(img, contrast=60.0):\n img = img * 1.0\n thre = img.mean()\n result = img * 1.0\n if contrast <= -255.0:\n result = (result >= 0) + thre - 1\n elif contrast > -255.0 and contrast < 0:\n result = img + (img - thre) * contrast / 255.0\n elif contrast < 255.0 and contrast > 0:\n new_con = 255.0 * 255.0 / (256.0 - contrast) - 255.0\n result = img + (img - thre) * new_con / 255.0\n else:\n mask_1 = img > thre\n result = mask_1 * 255.0\n result = result / 255.0\n\n mask_1 = result < 0\n mask_2 = result > 1\n result = result * (1 - mask_1)\n result = result * (1 - mask_2) + mask_2\n result = cv2.normalize(result, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\n\n return result", "title": "" }, { "docid": "8e31eaf3cae27e40e7fd46dd2a3e0203", "score": "0.524643", "text": "def evaluate(model: ActorCriticRLModel, env: RecoEnv, num_steps: int = 1000) -> None:\n start_time = dt.now()\n obs = env.reset()\n step_count = 0\n episode_number = 1\n for i in range(num_steps):\n step_count += 1\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n if done:\n elapsed = (dt.now() - start_time).seconds\n print(f\"**************EPISODE #{episode_number}****************\")\n print(f\"Total steps = {step_count} | steps/second = {step_count / elapsed}\")\n print(f\"Total correct predictions = {env.total_correct_predictions}\")\n print(f\"Prediction accuracy = {env.total_correct_predictions / step_count}\")\n obs = env.reset()\n step_count = 0\n episode_number += 1\n start_time = dt.now()", "title": "" }, { "docid": "227832a416eb7644025e1fd453c79fc7", "score": "0.5245192", "text": "def run_episode(env, model):\r\n done = False\r\n States = []\r\n Actions = []\r\n Rewards = []\r\n \r\n gamma = 0.99\r\n\r\n s = env.reset()\r\n\r\n total_reward = 0\r\n\r\n while not done:\r\n\r\n a = model.sample_action(s)\r\n\r\n s2, r, done, info = env.step(a)\r\n total_reward += r\r\n\r\n States.append(s)\r\n Actions.append(a)\r\n Rewards.append(r)\r\n\r\n s = s2\r\n\r\n if done:\r\n States = np.array(States)\r\n Actions = np.array(Actions)\r\n Rewards = np.array(Rewards)\r\n \r\n returns = np.zeros_like(Rewards, dtype=np.float32)\r\n G = 0\r\n for t in reversed(range(len(Rewards))):\r\n \r\n G = G * gamma + Rewards[t]\r\n returns[t] = G\r\n\r\n returns -= returns.mean() / returns.std()\r\n \r\n\r\n model.partial_fit(States, Actions, returns)\r\n\r\n return total_reward", "title": "" }, { "docid": "f100782a4b1de5cc5fd753115350694a", "score": "0.5243444", "text": "def run_experiment(self):\n self.collect_data()\n self.split_data()\n self.encode_target()\n self.tokenize_data()\n self.encode_data()\n self.perform_embedding()\n self.create_neural_model()\n self.fit_model()", "title": "" }, { "docid": "ef2d557655ca6e171ef56c0ffc63a19e", "score": "0.5237681", "text": "def fit_model(self):", "title": "" }, { "docid": "b6c2f07f7659e50f6f1b9cd2ebf624e7", "score": "0.52306896", "text": "def set_contrast(image, contrast=2):\n\n if contrast == 0:\n out_img = image\n elif contrast == 1:\n out_img = exposure.equalize_hist(image)\n elif contrast == 2:\n out_img = exposure.equalize_adapthist(image)\n elif contrast == 3:\n out_img = exposure.rescale_intensity(image)\n else:\n out_img = None\n print(\"No valid contrast option provided. \\n\"\n \"Please specify one of the numbers 1-4. \\n\")\n\n return out_img", "title": "" }, { "docid": "64d0802657c0536f1711d4658edafa3a", "score": "0.5228685", "text": "def run_model(self):\n\t\tself.model = AdaBoostRegressor(n_estimators=1)\n\t\tself.model.fit(self.x_train, self.y_train)\n\t\t#self.scoring = roc_auc_score(self.y_train, model.predict(self.x_train))", "title": "" }, { "docid": "4567fde18c35ba855f44f819968c7e54", "score": "0.52269405", "text": "def run(self):\n self.trained_model = self.pipeline.fit(self.X, self.y)", "title": "" }, { "docid": "994a319d5e3857187fe7e1cfdfff3d58", "score": "0.52264726", "text": "def extract_contrast_params(self,contrast_dict):\n contrast_factor = 1.0 # 1 gives the original image => NO brightness\n for param_name, param_value in contrast_dict.items():\n if(param_name == 'contrast_factor' and type(param_value) is float and param_value > 0): \n contrast_factor = param_value\n return contrast_factor", "title": "" }, { "docid": "59c74c6cf0eb1c919be095738d845180", "score": "0.5224572", "text": "def test_models_run(self):\n model = GMMDescent(self.k, self.test_data)\n model(self.test_data)", "title": "" }, { "docid": "98ee909478de892a15de819280efef37", "score": "0.5220559", "text": "def run_style_transfer(cnn, normalization_mean, normalization_std,\n content_img, style_img, style_img_masks,\n num_steps=ITERATIONS, update_masks=True,\n color_aware_weight=STYLE_LOSS_WEIGHT,\n content_weight=CONTENT_LOSS_WEIGHT):\n print('Building the style transfer model..')\n model, content_losses, color_aware_losses = ( \n get_style_model_and_losses(cnn, normalization_mean, normalization_std, \n style_img, content_img, style_img_masks))\n \n optimizer = get_input_optimizer(input_img)\n\n mask_generator = CreateMask(color_palette=final_palette)\n\n if not update_masks:\n input_masks = mask_generator(input_img).detach()\n for i in range(len(model) - 1, -1, -1):\n if isinstance(model[i], ColorAwareLoss):\n model[i].set_input_masks(input_masks)\n \n print('Optimizing..')\n run = [0]\n\n while run[0] <= num_steps:\n def closure():\n # correct the values of updated input image\n input_img.data.clamp_(0, 1)\n optimizer.zero_grad()\n\n if update_masks:\n input_masks = mask_generator(input_img).detach()\n\n \n if SHOW_MASKS and run[0] % 50 == 0:\n for i in range(final_palette.shape[0]):\n plt.figure()\n imshow(style_masks[:, i, :, :], \n title=f'Style Mask of Color # {i}')\n plt.figure()\n imshow(input_masks[:, i, :, :], \n title=f'Input Mask of Color # {i}')\n\n if update_masks:\n for i in range(len(model) - 1, -1, -1):\n if isinstance(model[i], ColorAwareLoss):\n model[i].set_input_masks(input_masks)\n\n\n model(input_img)\n\n content_score = 0\n color_aware_score = 0\n \n for cl in content_losses:\n content_score += cl.loss\n for cal in color_aware_losses:\n color_aware_score += cal.loss\n\n content_score *= content_weight\n color_aware_score *= color_aware_weight\n \n loss = content_score + color_aware_score \n\n loss.backward()\n\n run[0] += 1\n if run[0] % 50 == 0:\n print(\"run {}:\".format(run)) \n print('Content Loss: {:4f} Color-Aware Loss: {:4f}'.format(\n content_score.item(), color_aware_score.item()))\n \n return content_score + color_aware_score \n \n\n optimizer.step(closure)\n\n input_img.data.clamp_(0, 1)\n\n return input_img", "title": "" }, { "docid": "a46fd998872e8c70f8bc7c082b474b7e", "score": "0.5215146", "text": "def contrastive_loss(self, *args):\n raise NotImplementedError(\"Abstract\")", "title": "" }, { "docid": "29f068bf3a6cb2469e1e4d08de68fc56", "score": "0.5214663", "text": "def multi_model_selection(self, root_dir, experiment_designation, cv=5):\r\n for model in self.list_models:\r\n print(model)\r\n model_call = getattr(self, \"model_selection_\" + model)\r\n model_call(self.X_train.astype(np.float), self.y_train.astype(int), cv)\r\n file_name = model + '_' + experiment_designation\r\n self.print_parameter_values()\r\n scores = self.evaluate_model(self.X_test.astype(np.float), self.y_test.astype(int))\r\n self.write_report(scores, root_dir, file_name)\r\n self.write_cv_results(root_dir, file_name)\r\n self.save_best_model(\"hold_out\")", "title": "" }, { "docid": "26ab4fd5414f678fe52d6a966c342dda", "score": "0.5213472", "text": "def get_contrasts(contrasts,desmat,allcons=None):\n if not contrasts:\n contrasts={}\n \n # Conditions vary by run in this expriment\n contrast_matrix = np.eye(desmat.shape[1])\n basic_contrasts = dict([(column, contrast_matrix[i])\n for i, column in enumerate(desmat.columns)])\n # Make contrasts from sums of matching basic_contrasts\n bc=basic_contrasts.keys()\n \n # Each contrast, odd or even\n for con, conelements in allcons.items():\n if not con in contrasts:\n contrasts[con]=[]\n\n contrasts[con].append(sum([basic_contrasts[x] for x in set(conelements).intersection(set(bc))]))\n\n return contrasts", "title": "" }, { "docid": "30fc3bcca1e00d49d6294808f8227820", "score": "0.5210881", "text": "def eval_all_dataset(model, acc_thresh=0.5, num_actions_list=None, path='/h/sagar/ece496-capstone/datasets/fi', print_stuff=False):\n FI = path\n\n mean_best_ious = []\n mean_rand_ious = []\n mean_ours_ious = []\n\n all_vid_mean_best_iou = 0.0\n all_vid_mean_rand_iou = 0.0\n all_vid_mean_ours_iou = 0.0 # ours: model-chosen output\n \n all_vid_best_acc = 0.0\n all_vid_rand_acc = 0.0\n all_vid_ours_acc = 0.0\n\n vid_count = 0\n \n #eval all num actions\n if num_actions_list is None:\n num_actions_list = [int(num_actions) for num_actions in sorted(os.listdir(FI))]\n \n for num_actions in num_actions_list:\n indices = len(os.listdir(os.path.join(FI, str(num_actions))))\n\n for idx in range(indices):\n mean_best_iou_vid, mean_rand_iou_vid, mean_ours_iou_vid, best_acc, rand_acc, ours_acc = compute_eval_ious(model, num_actions=num_actions, index=idx, root=FI, gt_bbox_all=None, acc_thresh=acc_thresh, print_stuff = print_stuff)\n\n mean_best_ious.append(mean_best_iou_vid)\n mean_rand_ious.append(mean_rand_iou_vid)\n mean_ours_ious.append(mean_ours_iou_vid)\n\n all_vid_mean_best_iou += mean_best_iou_vid\n all_vid_mean_rand_iou += mean_rand_iou_vid\n all_vid_mean_ours_iou += mean_ours_iou_vid\n \n all_vid_best_acc += best_acc\n all_vid_rand_acc += rand_acc\n all_vid_ours_acc += ours_acc\n \n vid_count += 1\n\n all_vid_mean_best_iou /= vid_count\n all_vid_mean_rand_iou /= vid_count\n all_vid_mean_ours_iou /= vid_count\n\n all_vid_best_acc /= vid_count\n all_vid_rand_acc /= vid_count\n all_vid_ours_acc /= vid_count\n \n if print_stuff:\n print('--------------------------------------------------')\n print('EVALUATION SUMMARY')\n print('Number of videos: {}'.format(vid_count))\n print('Mean IoU:')\n print('\\tProposal Upper Bound: {}'.format(all_vid_mean_best_iou))\n print('\\tRandom: {}'.format(all_vid_mean_rand_iou))\n print('\\tModel: {}'.format(all_vid_mean_ours_iou))\n\n print('Top-1 accuracy@{}:'.format(acc_thresh))\n print('\\tProposal Upper Bound: {:.1f}%'.format(all_vid_best_acc*100))\n print('\\tRandom: {:.1f}%'.format(all_vid_rand_acc*100))\n print('\\tModel: {:.1f}%'.format(all_vid_ours_acc*100))\n print('--------------------------------------------------')\n \n #mean performance ours, best, rand, and then top-1 acc ours, best, rand\n return all_vid_mean_ours_iou, all_vid_mean_best_iou, all_vid_mean_rand_iou, all_vid_ours_acc, all_vid_best_acc, all_vid_rand_acc", "title": "" }, { "docid": "e536237f6af208d65dfc67470697336e", "score": "0.5210528", "text": "def apply_predict_filters(self, choosers, alternatives):\n return super(MNLDiscreteChoiceModel, self).apply_predict_filters(\n choosers, alternatives)", "title": "" }, { "docid": "d8514c9d7a7cad156bbe3ebdc3a12a54", "score": "0.52066994", "text": "def step(self, data):\n self.contrastive_step(data)\n self.each_step()", "title": "" }, { "docid": "c85cfa2cc561fe6e3d04a96022c703b8", "score": "0.5204742", "text": "def filter_activations(args):\n\n # Creates the model and loads weights\n set_mem_growth()\n\n model = load_model(args.model, custom_objects=get_custom_objects())\n conv_layer_ids = [idx for idx, layer in enumerate(model.layers) if \"Conv1D\" in str(layer)]\n conv_layer_idx = conv_layer_ids[args.inter_layer - 1]\n motif_length = model.get_layer(index=conv_layer_idx).get_weights()[0].shape[0]\n pad_left = (motif_length - 1) // 2\n pad_right = motif_length - 1 - pad_left\n\n print(\"Loading test data (.npy) ...\")\n test_data_set_name = os.path.splitext(os.path.basename(args.test_data))[0]\n samples = np.load(args.test_data, mmap_mode='r')\n total_num_reads = samples.shape[0]\n\n print(\"Loading test data (.fasta) ...\")\n reads = list(SeqIO.parse(args.test_fasta, \"fasta\"))\n assert len(reads) == total_num_reads, \\\n \"Test data in .npy-format and fasta files containing different number of reads!\"\n\n print(\"Padding reads ...\")\n reads = [\"N\" * pad_left + r + \"N\" * pad_right for r in reads]\n # extract genome_id, genomic start and end positions of the reads\n reads_info = []\n for r in reads:\n r_info = re.split(\">|:|\\.\\.\", r.id)\n reads_info.append([r_info[0], int(r_info[1]), int(r_info[2])])\n\n # create output directory\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n # Specify input and output of the network\n\n if tf.executing_eagerly():\n model = tf.keras.Model(model.inputs,\n (model.get_layer(index=conv_layer_idx).get_output_at(0),\n model.get_layer(index=conv_layer_idx).get_output_at(1)))\n iterate_fwd = None\n iterate_rc = None\n else:\n # Specify input and output of the network\n input_img = model.layers[0].input\n\n layer_output_fwd = model.get_layer(index=conv_layer_idx).get_output_at(0)\n iterate_fwd = K.function([input_img, K.learning_phase()],\n [layer_output_fwd])\n\n layer_output_rc = model.get_layer(index=conv_layer_idx).get_output_at(1)\n # index at fwd_output = output size - index at rc_output\n iterate_rc = K.function([input_img, K.learning_phase()],\n [layer_output_rc])\n\n print(\"Computing activations ...\")\n chunk_size = args.chunk_size\n n = 0\n all_filter_rows_fwd = None\n all_filter_rows_rc = None\n filter_range = None\n\n while n < total_num_reads:\n print(\"Done \"+str(n)+\" from \"+str(total_num_reads)+\" sequences\")\n samples_chunk = samples[n:n+chunk_size, :, :]\n reads_info_chunk = reads_info[n:n+chunk_size]\n if tf.executing_eagerly():\n activations_fwd, activations_rc = model(samples_chunk, training=False)\n activations_fwd = activations_fwd.numpy()\n activations_rc = activations_rc.numpy()\n else:\n activations_fwd = iterate_fwd([samples_chunk, 0])[0]\n activations_rc = iterate_rc([samples_chunk, 0])[0]\n\n n_filters = activations_fwd.shape[-1]\n if args.inter_neuron is not None:\n filter_range = args.inter_neuron\n else:\n filter_range = range(n_filters)\n if all_filter_rows_fwd is None:\n all_filter_rows_fwd = [[] for f in range(n_filters)]\n all_filter_rows_rc = [[] for f in range(n_filters)]\n get_activation_data(activations_fwd, filter_range, all_filter_rows_fwd, reads_info_chunk,\n pad_left, motif_length, rc=False)\n get_activation_data(activations_rc, filter_range, all_filter_rows_rc, reads_info_chunk,\n pad_left, motif_length, rc=True)\n\n n += chunk_size\n\n print(\"Done \" + str(total_num_reads) + \" sequences. Saving data...\")\n\n for filter_index in filter_range:\n rows_fwd = pd.concat(all_filter_rows_fwd[filter_index], ignore_index=True)\n rows_rc = pd.concat(all_filter_rows_rc[filter_index], ignore_index=True)\n filter_bed_file = args.out_dir + \"/\" + test_data_set_name + \"_filter_\" + str(filter_index) + \".bed\"\n # sort by sequence and filter start position\n rows_fwd = rows_fwd.sort_values(['region', 'start', 'end', 'activation'], ascending=[True, True, True, False])\n rows_rc = rows_rc.sort_values(['region', 'start', 'end', 'activation'], ascending=[True, True, True, False])\n # remove duplicates (due to overlapping reads) or take max of two scores at the same genomic position\n # (can occur if filter motif is recognized at the border of one read)\n rows_fwd = rows_fwd.drop_duplicates(['region', 'start', 'end'])\n rows_rc = rows_rc.drop_duplicates(['region', 'start', 'end'])\n\n all_rows = pd.concat([rows_fwd, rows_rc], ignore_index=True)\n all_rows['activation'] = all_rows['activation'].apply(lambda x: round(x, 3 - int(floor(log10(abs(x))))))\n all_rows.to_csv(filter_bed_file, sep=\"\\t\", index=False, header=False)", "title": "" }, { "docid": "276aedc44f1905c073c89c1be231f745", "score": "0.52022505", "text": "def eval_bald_single(img, models, args):\n for model in models:\n model.eval()\n\n if args.cuda:\n img = img.cuda()\n\n # Sample data for this image.\n ts = sample_eig_data(\n img.unsqueeze(0),\n models,\n args,\n samples_per_batch=100,\n batches_per_model=1,\n importance_sampling=True,\n return_tensors=True,\n )\n caps, caplens, scores, targets, idxs, idxs_onehot, weights = ts\n\n # part 1 - marginal entropy\n mean_scores = -(torch.logsumexp(scores, 1) - np.log(args.ensemble))\n marginal_entropy = mean_scores.mean().item()\n\n # part 2 - mean entropy for individual members of the ensemble\n # E_\\theta H(Y | X, \\theta)\n # -> 1/M \\sum_m H(Y | X, \\theta_m)\n # -> 1/M \\sum_m ( E_Y [ -log p(Y | X, \\theta_m) ] )\n # -> 1/M \\sum_m ( 1/K \\sum_k -log p(Y_k | X, \\theta_m ) )\n entropies = []\n for m in range(args.ensemble):\n scores_m = scores[targets == m, m]\n entropy_m = -(scores_m.mean().item())\n entropies.append(entropy_m)\n mean_entropy = np.mean(entropies)\n\n return marginal_entropy - mean_entropy", "title": "" }, { "docid": "a909eb80048c2ceec816844de93db67f", "score": "0.5200462", "text": "def contrast(img):\n\n # image brightness enhancer\n enhancer = ImageEnhance.Contrast(img)\n\n # contrast factor\n factor = 1.4\n img_contrast = enhancer.enhance(factor)\n return img_contrast", "title": "" }, { "docid": "a9293400383b8ae2dfcb68c72ba96cd5", "score": "0.520014", "text": "def do_correction(model):\n\n # Get the meta data values that we need\n nframes = model.meta.exposure.nframes\n frame_divisor = model.meta.exposure.frame_divisor\n if nframes is None or frame_divisor is None:\n log.warning('Necessary meta data not found')\n log.warning('Step will be skipped')\n model.meta.cal_step.group_scale = 'SKIPPED'\n return\n\n log.info('NFRAMES={}, FRMDIVSR={}'.format(nframes, frame_divisor))\n log.info('Rescaling all groups by {}/{}'.format(frame_divisor, nframes))\n\n # Apply the rescaling to the entire data array\n scale = float(frame_divisor) / nframes\n model.data *= scale\n model.meta.cal_step.group_scale = 'COMPLETE'\n\n return", "title": "" }, { "docid": "9cc16c535f280e6d0d62eb924964b46e", "score": "0.5198713", "text": "def get_contrast_lookup():\n contrasts = dict()\n for task in CognitiveAtlasTask.objects.all():\n task_contrasts = CognitiveAtlasContrast.objects.filter(task=task)\n contrasts[task.pk] = [\n {\"name\": contrast.name, \"value\": contrast.pk} for contrast in task_contrasts\n ]\n return contrasts", "title": "" }, { "docid": "c0a0e8ba70618abb8bc4cd532ca4cbf1", "score": "0.51907533", "text": "def do_everything(self):\n\t\tself.build_weights()\n\t\tself.train_per_epo = self.iterative_train(EPOCHS)\n\t\tself.test_score, self.con_mat = self.test()\n\t\tself.report()", "title": "" }, { "docid": "89df50d04f5a2c11bd70b02550f65245", "score": "0.5190434", "text": "def run_model():\r\n classifier_model(df, input1, input2, input3, input4, input5, input6, input7, input8, input9, input10,\r\n input11, input12, input13, input14, input15, input16, input17, input18, input19, input19_0,\r\n input19_1, input19_2, input19_3, input19_4, input20, input20_1, input20_2, input20_3, input20_4)", "title": "" }, { "docid": "8c1a1b15058f58ea65244b4fc5ba80c9", "score": "0.51877135", "text": "def contrast_list(var, images):\n alpha = 1.0 + np.random.uniform(-var, var)\n\n out_images = []\n for image in images:\n img_gray = grayscale(image)\n img_gray.fill(np.mean(img_gray[0]))\n out_images.append(blend(image, img_gray, alpha))\n return out_images", "title": "" }, { "docid": "2987368009533436930f2bf09e701c42", "score": "0.5181369", "text": "def run_model(self, case):", "title": "" }, { "docid": "ad23db58635eaf1b347136ab2050c21a", "score": "0.5180428", "text": "def eval_models(models, interactions, membership=None, n_folds=5):\n test_acc = np.zeros((len(models), n_folds))\n test_auc = np.zeros((len(models), n_folds))\n # iterate over the models\n kfold = KFold(interactions.shape[0], n_folds, shuffle=True)\n for k, (train_idxs, test_idxs) in enumerate(kfold):\n for j, model in enumerate(models):\n print(\"Fold: \" + str(k) + \" Model: \" + str(type(model)))\n # treat IPM differently because it has a different api\n if type(model) == IdealPointModel:\n vb = VB(maxLaps=30)\n # run variational inference\n vb.run(model, interactions[train_idxs, :])\n probs = model.predict_proba(interactions[test_idxs, :2])\n elif type(model) == LCIPM:\n vb = VB(maxLaps=150)\n # run variational inference\n vb.run(model, (interactions[train_idxs, :], membership),\n save=True, outdir=\"cv_estimates\")\n probs = model.predict_proba(interactions[test_idxs, :2])\n # if comparing to the all yes model predict 1 for everything\n elif model == \"yes\":\n probs = np.ones(len(test_idxs))\n else:\n X_sp, y = to_sparse_features(interactions)\n # fit the model\n model.fit(X_sp[train_idxs, :], y[train_idxs])\n # evaluate the model\n probs = model.predict_proba(X_sp[test_idxs, :])[:, 1]\n # test accuracy\n acc = np.mean((probs > 0.5) == interactions[test_idxs, 2])\n # test auc\n auc = metrics.roc_auc_score(interactions[test_idxs, 2], probs)\n test_acc[j, k] = acc\n test_auc[j, k] = auc\n # save after each fold just in case\n np.savetxt(\"test_acc.dat\", test_acc)\n np.savetxt(\"test_auc.dat\", test_auc)\n return(test_acc, test_auc)", "title": "" }, { "docid": "c3cc42c83a2284b77bddd5fde3ae57bc", "score": "0.5173501", "text": "def contrast(self, contrast):\n self._contrast = contrast", "title": "" }, { "docid": "33a633ed44af6b94c74f201c43e4b679", "score": "0.51698077", "text": "def test_blend_model_predict(load_setup, load_models):\n exp = load_setup\n models = load_models\n random.seed(42)\n weights = [random.uniform(0, 1) for _ in range(len(models))]\n\n # -------------------------------------------------------------------------#\n # Prediction should be different for different methods\n # -------------------------------------------------------------------------#\n mean_blender = exp.blend_models(models, method=\"mean\")\n gmean_blender = exp.blend_models(models, method=\"gmean\")\n median_blender = exp.blend_models(models, method=\"median\")\n min_blender = exp.blend_models(models, method=\"min\")\n max_blender = exp.blend_models(models, method=\"max\")\n\n mean_blender_w_wts = exp.blend_models(models, method=\"mean\", weights=weights)\n gmean_blender_w_wts = exp.blend_models(models, method=\"gmean\", weights=weights)\n median_blender_w_wts = exp.blend_models(models, method=\"median\", weights=weights)\n min_blender_w_wts = exp.blend_models(models, method=\"min\", weights=weights)\n max_blender_w_wts = exp.blend_models(models, method=\"max\", weights=weights)\n\n mean_blender_pred = exp.predict_model(mean_blender)\n gmean_blender_pred = exp.predict_model(gmean_blender)\n median_blender_pred = exp.predict_model(median_blender)\n min_blender_pred = exp.predict_model(min_blender)\n max_blender_pred = exp.predict_model(max_blender)\n\n mean_blender_w_wts_pred = exp.predict_model(mean_blender_w_wts)\n gmean_blender_w_wts_pred = exp.predict_model(gmean_blender_w_wts)\n median_blender_w_wts_pred = exp.predict_model(median_blender_w_wts)\n min_blender_w_wts_pred = exp.predict_model(min_blender_w_wts)\n max_blender_w_wts_pred = exp.predict_model(max_blender_w_wts)\n\n different_preds = [\n mean_blender_pred,\n gmean_blender_pred,\n median_blender_pred,\n min_blender_pred,\n max_blender_pred,\n mean_blender_w_wts_pred,\n gmean_blender_w_wts_pred,\n median_blender_w_wts_pred,\n ]\n\n for i, _ in enumerate(different_preds):\n for j in range(i + 1, len(different_preds)):\n assert not np.array_equal(different_preds[i], different_preds[j])\n\n # -------------------------------------------------------------------------#\n # Prediction for some methods should not be impacted by weights\n # e.g. min, max\n # -------------------------------------------------------------------------#\n assert np.array_equal(\n min_blender_pred, min_blender_w_wts_pred\n ), \"min blender predictions with and without weights are not the same\"\n assert np.array_equal(\n max_blender_pred, max_blender_w_wts_pred\n ), \"max blender predictions with and without weights are not the same\"", "title": "" }, { "docid": "3afdb7373d769783b88b790449f4f52a", "score": "0.51681393", "text": "def test_conditionalVAE():\n train_n = 200\n test_n = 50\n num_cat = 3\n\n module = importlib.import_module(\"multiviewae\")\n\n tests = {\n \"\" : [[10, 12], MODELS], \n \"./user_config/condae.yaml\" : [[10, 12], VARIATIONAL_MODELS], \n }\n\n module = importlib.import_module(\"multiviewae\")\n for cfg, [dim, models] in tests.items():\n train_data = []\n test_data = []\n for d in dim:\n if isinstance(d, int):\n train_data.append(np.random.rand(train_n, d))\n test_data.append(np.random.rand(test_n, d))\n else:\n train_data.append(np.random.rand(train_n, *d))\n test_data.append(np.random.rand(test_n, *d))\n train_y = np.random.randint(num_cat, size=train_n)\n test_y = np.random.randint(num_cat, size=test_n )\n\n for m in models:\n class_ = getattr(module, m)\n if len(cfg) != 0:\n model1 = class_(cfg=abspath(join(dirname( __file__ ), cfg)), input_dim=dim)\n else:\n model1 = class_(input_dim=dim)\n\n model1.fit(*train_data, labels=train_y) \n model1.fit(*train_data, labels=train_y, max_epochs=5, batch_size=10)\n\n print(\"RESULTS: \", m)\n latent = model1.predict_latents(*test_data, labels=test_y)\n print_results(\"latent\", latent)\n recon = model1.predict_reconstruction(*test_data, labels=test_y)\n print_results(\"recon\", recon)\n\n latent = model1.predict_latents(*test_data, labels=test_y, batch_size=10)\n print_results(\"latent\", latent)\n recon = model1.predict_reconstruction(*test_data, labels=test_y, batch_size=5)\n print_results(\"recon\", recon)", "title": "" }, { "docid": "a3608b06de1f8223a7ff807cf4a71265", "score": "0.5167968", "text": "def fuse_model(self):\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()", "title": "" }, { "docid": "a3608b06de1f8223a7ff807cf4a71265", "score": "0.5167968", "text": "def fuse_model(self):\n for m in self.modules():\n if type(m) == QuantizableBasicConv2d:\n m.fuse_model()", "title": "" }, { "docid": "52860b07e5766aaa552446caf8405f4e", "score": "0.5167696", "text": "def train(self):\n for ep in range(int(self.num_ep)):\n state = self.env.reset()\n state = np.reshape(state, [1, self.obs_dim])\n tot_rew = 0\n done = False\n t = 0\n # Iterate over timesteps\n while t < 1000:\n t += 1\n if self.render:\n self.env.render()\n act = self.pick_action(state)\n\n # Get next state and reward from environment\n next_state, rew, done, info = self.env.step(act)\n next_state = np.reshape(next_state, [1, self.obs_dim])\n # Store transition in memory\n transition = deque((state, act, rew, next_state, done))\n self.store_memory(transition)\n \n tot_rew += rew\n state = next_state\n if done:\n print(\"\\nEpisode: {}/{}, score: {}\"\n .format(ep, self.num_ep, t))\n break\n if len(self.memory) > self.batch_size:\n self.replay()\n\n self.rew_list.append(tot_rew)", "title": "" }, { "docid": "8ad59bfe8bcd4ac8e98887f298dae1e6", "score": "0.5153215", "text": "def colorize(config, work_dir):\n config.__setattr__('continue', False)\n config.__setattr__('model_save', False)\n config.__setattr__('model_epochs', 1)\n config.__setattr__('images_output_enable', True)\n config.__setattr__('images_output_step', 1)\n Train.run(TrainConfigs(Config(config)), work_dir=work_dir, predict=True)", "title": "" }, { "docid": "544c25594671c3084a04ba45c5e280b1", "score": "0.5136239", "text": "def routine(self):\n\n self.get_ml_algs()\n self.get_train_ratio()\n self.get_cv_check()\n self.get_cv_k()\n self.get_meta_model_check()\n self.get_meta_model()\n self.train()", "title": "" }, { "docid": "348d861c6f51ff7aa9ed710a10aff0df", "score": "0.5133507", "text": "def _augment(self, img):\n return ImageOps.autocontrast(img)", "title": "" }, { "docid": "cc16c759cd884ad47989ecbb9354d514", "score": "0.5131775", "text": "def exercise_all_models(lzobj, sel='all'):\n if sel == 'stc':\n myiter = CML_STC_MODELS.items()\n elif sel == 'inc':\n myiter = CML_INC_MODELS.items()\n else: # Exercise all models\n myiter = dict(CML_STC_MODELS, **CML_INC_MODELS).items()\n\n for model, attr in myiter:\n GC.conf['loglizer']['model'] = model\n GC.conf['loglizer']['window_size'] = attr['window_size']\n GC.conf['loglizer']['window_step'] = attr['window_step']\n\n if GC.conf['general']['training']:\n lzobj.train()\n elif GC.conf['general']['metrics']:\n lzobj.evaluate()\n else:\n lzobj.predict()", "title": "" }, { "docid": "f829e96909fa3f5a168fffbcd36db92f", "score": "0.5126786", "text": "def test_blend_models_tuning():\n data = get_data(\"airline\", verbose=False)\n\n exp = TSForecastingExperiment()\n exp.setup(data=data, fh=12, fold=2, session_id=42)\n model1 = exp.create_model(\"naive\")\n model2 = exp.create_model(\"ets\")\n model3 = exp.create_model(\"lr_cds_dt\")\n blender = exp.blend_models([model1, model2, model3])\n _, tuner = exp.tune_model(blender, return_tuner=True)\n\n assert len(pd.DataFrame(tuner.cv_results_)) > 1", "title": "" } ]
4bd164fe888f4e8dd49dd0473517a0a0
Given a string rewrite the string as a block of text and return the columns of the block.
[ { "docid": "c6c9c634f9c1099d6dda62c15c508850", "score": "0.0", "text": "def encrypt(str):\n length = int(math.floor(len(str) ** 0.5))\n width = int(math.ceil(len(str) ** 0.5))\n while len(str) > length*width:\n length += 1\n str = str.ljust(length*width)\n\n result = []\n for i in range(width):\n result.append(str[i::width].replace(' ', ''))\n\n return result", "title": "" } ]
[ { "docid": "36965b3d2c211f4629000c582719004e", "score": "0.6524146", "text": "def convert(string):\n string = string.upper()\n rows = [\"\", \"\", \"\", \"\", \"\"]\n\n for i in range(len(string)):\n b_char = BLOCK_CHAR[string[i]]\n for i in range(5):\n rows[i] += b_char[i]\n\n return rows", "title": "" }, { "docid": "91fa1cfe27664214c3c92179ee706fdc", "score": "0.59150475", "text": "def get_col(self, s):\n i = 0\n in_parens = 0\n col = \"\"\n while True:\n c = s[i]\n col += c\n if c == \"(\":\n in_parens += 1\n elif c == \")\":\n in_parens -= 1\n if c == \",\" and not in_parens:\n yield col[:-1].strip()\n col = \"\"\n i += 1\n if i >= len(s):\n break\n if col:\n yield col.strip()\n return None", "title": "" }, { "docid": "b708844bfdfd93b5cb59532d61041a7b", "score": "0.5762054", "text": "def find_block_contents(self, blocks, output_string):\n\n block_desire = \"\"\n start_block = False\n delimiter = \"-\"\n\n block_strings = []\n for line in str.splitlines(output_string):\n\n if re.match(r'\\s' + delimiter + r'\\w+', line) and start_block:\n block_strings.append(block_desire)\n block_desire = \"\"\n start_block = False\n\n if not start_block:\n for block in blocks:\n if re.match(r'\\s*' + delimiter + \"{}\".format(block.block_name) + r'(\\s+|:)', line):\n start_block = True\n break\n\n if start_block:\n block_desire += line + \"\\n\"\n\n return block_strings", "title": "" }, { "docid": "72a88ab5cc27014b6658ff01f8b8ab1d", "score": "0.57058614", "text": "def createColumns(script):\n speaker = list()\n text = list()\n data = getTextBlock(script)\n for block in data:\n # reduce cases of empty block of data\n if not len(block):\n continue\n # separate every block to lines\n line = block.split('\\n')\n # reduce spaces\n for i in range(len(line)):\n line[i] = line[i].lstrip()\n line[i] = line[i].rstrip()\n # line[0] = speaker and above that is the text\n speaker.append(line[0])\n text.append(line[1])\n # print(line[1])\n for i in range(2,len(line)):\n text.append(text.pop() + line[i])\n return text, speaker", "title": "" }, { "docid": "f2d2478a52388be657a1c3d5c02f89ab", "score": "0.5692208", "text": "def parse_columns(format_string):\n offset = None\n\n if 'x' in format_string:\n string = format_string.split('x')[0].strip()\n elif ',' in format_string:\n string = format_string.split(',')[0].strip()\n else:\n raise Exception('Cannot parse formatting string {}.'.format(format_string))\n\n if 'o' in string:\n s = string.split('o')\n string = s[0].strip()\n offset = s[1].strip()\n\n final_string = COLUMNS[string]\n\n if offset is not None:\n final_offset = OFFSETS[offset]\n final_string = final_string + ' ' + final_offset\n\n return final_string", "title": "" }, { "docid": "26a17219e394a34feecc2f40cf16c770", "score": "0.5598258", "text": "def find_column(inputstr, token):\n last_cr = inputstr.rfind('\\n', 0, token.lexpos)\n if last_cr < 0:\n last_cr = 0\n column = (token.lexpos - last_cr) + 1\n return column", "title": "" }, { "docid": "9ee8ff0a39201f75882c6309c1f0faa2", "score": "0.54030955", "text": "def split_into_columns(s):\n\ts = re.sub(',,,', ',0,0,', s)\n\ts = re.sub(',,', ',0,', s)\n\treturn s.split(',')", "title": "" }, { "docid": "6818abe5106cacaaa381d888f804c2e1", "score": "0.52669144", "text": "def wrap_text(text, ncolumns):\n def list_wrapped_lines():\n \"\"\"Parses the input text into a stream of lines.\"\"\"\n for line in text.split('\\n'):\n while len(line) > ncolumns:\n yield line[:ncolumns]\n line = line[ncolumns:]\n yield line\n return '\\n'.join(list_wrapped_lines())", "title": "" }, { "docid": "ec4ee0e1ea488e1e613e9eec46d754e8", "score": "0.5225541", "text": "def _split_text(text: str, nrows: int, ncols: int,\n strong: str = const.C_HIGHLIGHT) -> list[str]:\n lines = text.splitlines()\n result = []\n for line in lines:\n current_line = ''\n line_words = line.split(' ')\n for word in line_words:\n cl_len = len(current_line.replace(strong, '')\n + word.replace(strong, ''))\n if current_line and cl_len > ncols:\n result.append(current_line.rstrip())\n current_line = word + ' '\n else:\n current_line += word + ' '\n result.append(current_line.rstrip())\n # Check for height.\n if len(result) > nrows:\n result = result[:nrows]\n result[nrows-1] = result[nrows-1][:ncols-3] + '...'\n # Check for width.\n for i, line in enumerate(result):\n if len(line.replace(strong, '')) > ncols:\n result[i] = line[:ncols-3] + '...'\n return result", "title": "" }, { "docid": "196e6d017bc51d5800adce956ad1d1ca", "score": "0.51373565", "text": "def convert_to_text_blocks(dataframe, block_size):\n # initialize output data dictionary\n block_dict = {'No.':[], 'Author':[], 'Text Block':[]}\n #iterate through all documents\n for i in range(dataframe.shape[0]):\n author = dataframe['Author'][i]\n no = dataframe['No.'][i]\n sentence_list = tokenize.sent_tokenize(dataframe['Text'][i])\n sentence_count = len(sentence_list)\n # iterate through sentences in each document and create blocks\n for j in range(0, sentence_count, block_size):\n idx1 = j\n idx2 = j + block_size\n if idx2 <= sentence_count:\n doc_block_list = sentence_list[idx1:idx2]\n doc_block = ' '.join(doc_block_list)\n #collect information\n block_dict['No.'].append(no)\n block_dict['Author'].append(author)\n block_dict['Text Block'].append(doc_block)\n # create output dataframe\n dataframe_text_blocks = pd.DataFrame(block_dict)\n return dataframe_text_blocks", "title": "" }, { "docid": "2942c45294da653d9a736c55920366b6", "score": "0.5116006", "text": "def split_into_rows(s):\n\treturn s.split('\\n')", "title": "" }, { "docid": "0167e5ef7dfcff4431bfbd812bad8473", "score": "0.5090932", "text": "def parse_columns(self, line, offset):\r\n cols = []\r\n end = 0\r\n while 1:\r\n begin = line.find('-', end)\r\n end = line.find(' ', begin)\r\n if begin < 0:\r\n break\r\n if end < 0:\r\n end = len(line)\r\n cols.append((begin, end))\r\n if self.columns:\r\n if cols[-1][1] != self.border_end:\r\n raise TableMarkupError('Column span incomplete at line '\r\n 'offset %s.' % offset)\r\n # Allow for an unbounded rightmost column:\r\n cols[-1] = (cols[-1][0], self.columns[-1][1])\r\n return cols", "title": "" }, { "docid": "5859b44e5b34296751455511f8b654f8", "score": "0.5050602", "text": "def code_blocks_from_str(doc):\r\n out = []\r\n for i in _code_blocks_re.finditer(doc):\r\n block = i.group(2)\r\n if \"import vtkplotlib as vpl\" in block:\r\n first_line_no = doc[:i.start(1)].count(\"\\n\")\r\n out.append((first_line_no, block))\r\n return out", "title": "" }, { "docid": "727b36b4dc6610dd238b58cb295d7397", "score": "0.50482917", "text": "def column_words(solution, width):\n for col in range(width):\n yield tuple(row[col] for row in solution)", "title": "" }, { "docid": "107876b721c7e7dc51c2d9f25af26f15", "score": "0.50149155", "text": "def detect_bedcov_columns(text):\n firstline = text[: text.index(\"\\n\")]\n tabcount = firstline.count(\"\\t\")\n if tabcount < 3:\n raise RuntimeError(f\"Bad line from bedcov:\\n{firstline!r}\")\n if tabcount == 3:\n return [\"chromosome\", \"start\", \"end\", \"basecount\"]\n if tabcount == 4:\n return [\"chromosome\", \"start\", \"end\", \"gene\", \"basecount\"]\n # Input BED has arbitrary columns after 'gene' -- ignore them\n fillers = [f\"_{i}\" for i in range(1, tabcount - 3)]\n return [\"chromosome\", \"start\", \"end\", \"gene\"] + fillers + [\"basecount\"]", "title": "" }, { "docid": "f70be4ffbd6466932963bf2e432714f8", "score": "0.5013119", "text": "def column_one(text):\r\n #creates an empty list_of_words\r\n list_of_words = [] \r\n #split string into lines\r\n lines = text.splitlines()\r\n # create in for-loop: takes text in format\r\n for line in lines:\r\n #take each line and split it into its constituent fields\r\n fields = line.split(\"\\t\") #\"\\t\" signifies tab character, instead of comma-separated\r\n #take the first field from each line \r\n item1 = fields[0]\r\n #append it to a list\r\n list_of_words.append(item1)\r\n\r\n return \" \".join(list_of_words)", "title": "" }, { "docid": "cf2d842df656c213ebf4b2ce74972bca", "score": "0.5002569", "text": "def format_blocks(self):\n\n block_text = []\n for el, text in self._block_text.items():\n content = ''.join(text)\n if content:\n block_text.append((content, self.construct_selector(el)))\n return block_text", "title": "" }, { "docid": "d3569078f46246e47b08803e7bfdd6d2", "score": "0.499414", "text": "def block(self, text):\r\n tre = '|'.join(self.btag)\r\n text = text.split('\\n\\n')\r\n\r\n tag = 'p'\r\n atts = cite = graf = ext = ''\r\n\r\n out = []\r\n\r\n anon = False\r\n for line in text:\r\n pattern = r'^(%s)(%s%s)\\.(\\.?)(?::(\\S+))? (.*)$' % (tre, self.a, self.c)\r\n match = re.search(pattern, line, re.S)\r\n if match:\r\n if ext:\r\n out.append(out.pop() + c1)\r\n\r\n tag,atts,ext,cite,graf = match.groups()\r\n o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, graf)\r\n # leave off c1 if this block is extended, we'll close it at the start of the next block\r\n if ext:\r\n line = \"%s%s%s%s\" % (o1, o2, content, c2)\r\n else:\r\n line = \"%s%s%s%s%s\" % (o1, o2, content, c2, c1)\r\n\r\n else:\r\n anon = True\r\n if ext or not re.search(r'^\\s', line):\r\n o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, line)\r\n # skip $o1/$c1 because this is part of a continuing extended block\r\n if tag == 'p' and not self.hasRawText(content):\r\n line = content\r\n else:\r\n line = \"%s%s%s\" % (o2, content, c2)\r\n else:\r\n line = self.graf(line)\r\n\r\n line = self.doPBr(line)\r\n line = re.sub(r'<br>', '<br />', line)\r\n\r\n if ext and anon:\r\n out.append(out.pop() + \"\\n\" + line)\r\n else:\r\n out.append(line)\r\n\r\n if not ext:\r\n tag = 'p'\r\n atts = ''\r\n cite = ''\r\n graf = ''\r\n\r\n if ext:\r\n out.append(out.pop() + c1)\r\n return '\\n\\n'.join(out)", "title": "" }, { "docid": "840024d89f0b4b84f3a58cdd7cb3ff98", "score": "0.49741098", "text": "def markblocks(codestr):\r\n \r\n from .se import (\r\n HAS_JUMP,\r\n GETJUMPTGT,\r\n HAVE_ARGUMENT)\r\n \r\n blocks = [0] * len(codestr)\r\n i = 0\r\n codelen = len(codestr)\r\n while i<codelen:\r\n op = codestr[i]\r\n if op in HAS_JUMP:\r\n j = GETJUMPTGT(codestr, i)\r\n blocks[j] = 1\r\n i+=(3 if (op>=HAVE_ARGUMENT) else 1)\r\n i=0\r\n blockcnt=0\r\n while i<codelen:\r\n blockcnt+=blocks[i]\r\n blocks[i] = blockcnt\r\n i+=1\r\n return blocks", "title": "" }, { "docid": "f486ccb5ed521a50d794de7e0de4d38b", "score": "0.49468446", "text": "def split_string(string, record, start):\n\n space = 81 - start\n if len(string) <= space:\n return [\"{}{}\".format(record.ljust(start - 1), string).ljust(80)]\n else:\n words = string.split(\" \")\n lines, line = [], record.ljust(start - 1)\n while words:\n if len(line) + len(words[0]) > 79:\n lines.append(line[:80].ljust(80))\n line = \"{}{:<2}{} \".format(\n record.ljust(start - 2), len(lines) + 1, words.pop(0).lstrip()\n )\n else:\n line += words.pop(0).lstrip() + \" \"\n if len(line.rstrip()) > 10: lines.append(line[:80].ljust(80))\n return lines", "title": "" }, { "docid": "bae7adeba66dcc7b5a2a250abba5e51c", "score": "0.4944153", "text": "def parse(text):\n parser = Parser(SqlLexer, [FunctionBlock])\n return parser.parse(text)", "title": "" }, { "docid": "14e5d52337616b465a42fc9cb798addb", "score": "0.49416783", "text": "def table_data(self, text, delimiter):\n #divide our text into rows and columns and set our row, col, width vars\n lines = text.rsplit(\"\\n\")\n self.row = len(lines)\n self.col = 0\n \n rows = [] #contains rows, each row contains columns\n \n for line in lines:\n rows.append(line.rsplit(delimiter)) #separate rows by column\n if len(rows[-1]) > self.col: #get maximum column count for a row\n self.col = len(rows[-1])\n \n self.width = 0\n \n for row in rows:\n for col in row:\n if len(col) > self.width: #get maximum column width\n self.width = len(col)\n \n self.width += 2 #add cushioning\n \n #construct table around text\n pieces = []\n \n for i in range(self.row): #begin normal construction\n if i != 0:\n pieces.append(self.horizontal(False))\n else:\n if self.has_outer:\n pieces.append(self.horizontal(True))\n \n pieces.append(self.vertical_data(rows[i])) #add col data as vertical\n \n if self.height > 1:\n for i in range(self.height - 1): #more empty vertical space\n pieces.append(self.vertical())\n \n if self.has_outer:\n pieces.append(self.horizontal(True))\n \n return '\\n'.join(pieces)", "title": "" }, { "docid": "4f9350e41526d2c5d3905a402d738d69", "score": "0.49346864", "text": "def split_in_columns(message=message):\n split_message = message.split('\\n')\n pipe_message = '|'.join(split_message)\n\n return pipe_message", "title": "" }, { "docid": "6d25a11f2caf7252db5fffaafec2f745", "score": "0.4921651", "text": "def list_wrapped_lines():\n for line in text.split('\\n'):\n while len(line) > ncolumns:\n yield line[:ncolumns]\n line = line[ncolumns:]\n yield line", "title": "" }, { "docid": "e1a176bc1a938d7ed49585caa6f4dd2e", "score": "0.4906604", "text": "def print_block(b_str):\n for line in b_str:\n print line", "title": "" }, { "docid": "7c47ec171bcf73fe39af3647a2fcb88a", "score": "0.487602", "text": "def parse(self, text):\n lines = text.splitlines()\n i = 0\n total_lines = len(lines)\n blocks = list()\n block = None\n while i < total_lines:\n self.current_line_number = i\n self._set_previous_line(total_lines, lines, i)\n self._set_next_line(total_lines, lines, i)\n current_line = str(lines[i]).strip()\n\n if self.is_start_of_block(current_line, i) and self.has_block_ended:\n self.has_block_started = True\n self.has_block_ended = False\n\n if self.has_block_started and not self.has_block_ended:\n # Block has been initialize now we can add lines to block\n block = ('' if block is None else block) + current_line\n else:\n py_logger.info(\"Ignoring line {}\".format(current_line))\n\n if self.is_end_of_block(current_line, i):\n blocks.append(block.strip())\n self.has_block_started = False\n self.has_block_ended = True\n block = None\n if block is not None:\n block = block + self.newline_if_required()\n i += 1\n return blocks", "title": "" }, { "docid": "62ffc1ab1d3144e7ff25f7c80ce87d0c", "score": "0.48750138", "text": "def blockify(text, block_size=BLOCK_SIZE):\r\n \r\n blocks = {}\r\n for i in range(0, len(text), block_size):\r\n blocks[int(i/block_size)] = text[i : i + block_size]\r\n return blocks", "title": "" }, { "docid": "3ff72e540e950a986ee85a5609c17097", "score": "0.4844037", "text": "def formattedComments(textstring):\n if not textstring or len(textstring) > ROWSIZE + 1 or textstring[0].strip() != \"#\": return False\n strippedtext = textstring.strip(\"#\")\n if settings[\"page_guide\"] and settings[\"page_guide\"] > 20: comment_width = settings[\"page_guide\"]\n else: comment_width = ROWSIZE #changed from ROWSIZE - 1\n\n if strippedtext == \"\":\n tempText = \" \" * (comment_width)\n return tempText\n elif len(strippedtext) == 1:\n tempText = strippedtext * (comment_width)\n return tempText\n elif strippedtext.upper() == \"DEBUG\":\n textstring = \"**DEBUG**\"\n tempText = textstring.rjust(comment_width)\n return tempText\n else:\n try:\n if textstring[2] != \"#\" and textstring[-1] != \"#\": commentType = \"LEFT\"\n elif textstring[-1] != \"#\": commentType = \"RIGHT\"\n elif textstring[-1] == \"#\": commentType = \"CENTER\"\n else: commentType = \"LEFT\"\n except:\n commentType = \"LEFT\"\n #New formatting options\n if commentType == \"LEFT\":\n tempText = strippedtext.ljust(comment_width)\n return tempText\n elif commentType == \"CENTER\":\n tempText = strippedtext.center(comment_width)\n return tempText\n elif commentType == \"RIGHT\":\n tempText = strippedtext.rjust(comment_width)\n return tempText", "title": "" }, { "docid": "9f7559e26afbda3ee801a7501671f705", "score": "0.48172632", "text": "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2))\n\n return BlockArgs(\n num_repeat=int(options['r']),\n kernel_size=int(options['k']),\n stride=[int(x) for x in list(options['s'])],\n expand_ratio=int(options['e']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n se_ratio=float(options['se']) if 'se' in options else None,\n id_skip=('noskip' not in block_string))", "title": "" }, { "docid": "0cb43a4c07fdb2f9e14a75942db7741e", "score": "0.48095596", "text": "def findblocks(text):\n blocks = [[]]\n lines = text.splitlines()\n for line in lines:\n if line.strip():\n blocks[-1].append(line)\n elif blocks[-1]:\n blocks.append([])\n if not blocks[-1]:\n del blocks[-1]\n\n for i, block in enumerate(blocks):\n indent = min((len(l) - len(l.lstrip())) for l in block)\n blocks[i] = dict(indent=indent, lines=[l[indent:] for l in block])\n return blocks", "title": "" }, { "docid": "254732323f07419fc08b9a43b46b9aef", "score": "0.4801961", "text": "def parse(self, block):\r\n self.setup(block)\r\n self.find_head_body_sep()\r\n self.parse_table()\r\n structure = self.structure_from_cells()\r\n return structure", "title": "" }, { "docid": "3bde80ffcfa84c0b3dc34afbdfc8ad3b", "score": "0.47965056", "text": "def preprocess_string(string, skip_cuda_tests):\n codeblock_pattern = r\"(```(?:python|py)\\s*\\n\\s*>>> )((?:.*?\\n)*?.*?```)\"\n codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)\n is_cuda_found = False\n for i, codeblock in enumerate(codeblocks):\n if \"load_dataset(\" in codeblock and \"# doctest: +IGNORE_RESULT\" not in codeblock:\n codeblocks[i] = re.sub(r\"(>>> .*load_dataset\\(.*)\", r\"\\1 # doctest: +IGNORE_RESULT\", codeblock)\n if (\n (\">>>\" in codeblock or \"...\" in codeblock)\n and re.search(r\"cuda|to\\(0\\)|device=0\", codeblock)\n and skip_cuda_tests\n ):\n is_cuda_found = True\n break\n\n modified_string = \"\"\n if not is_cuda_found:\n modified_string = \"\".join(codeblocks)\n\n return modified_string", "title": "" }, { "docid": "20d8931481231dd8b675355cf7beb7f5", "score": "0.47931948", "text": "def find_column_by_lexpos(lexpos):\n global _data\n # i = lexpos\n # while i > 0:\n # if _data[i] == '\\n':\n # break\n # i -= 1\n # return (lexpos - i)\n last_cr = _data.rfind('\\n', 0, lexpos) + 1\n return (lexpos - last_cr)", "title": "" }, { "docid": "78d156b161171f0cee61073581c20a2d", "score": "0.4791339", "text": "def _decode_block_string(block_string):\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])", "title": "" }, { "docid": "c8dd2137abc21f0666d54737a75b3762", "score": "0.47772062", "text": "def getTextFromBlocks(self, blockInts, blockSize):", "title": "" }, { "docid": "db750b58bba4b250f3afd43883babd73", "score": "0.47764218", "text": "def process_string(s, delimiter):\n lines = s.split('\\n')\n return process_lines(lines, delimiter)", "title": "" }, { "docid": "47c7d101014469a1a6171f598e54c901", "score": "0.47669333", "text": "def chunk_string(string, length):\n return (string[0+i:length+i] for i in range(0, len(string), length))", "title": "" }, { "docid": "c9a00f8e5e26bcf55078b886f30427b5", "score": "0.4744739", "text": "def _decode_block_string(self, block_string):\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n if 's' not in options or len(options['s']) != 2:\n raise ValueError('Strides options should be a pair of integers.')\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n strides=[int(options['s'][0]), int(options['s'][1])],\n conv_type=int(options['c']) if 'c' in options else 0)", "title": "" }, { "docid": "cca2c6a1c87cbdd7d5ac288316e8dcc6", "score": "0.4730149", "text": "def pos2d(s):\n column = ord(s[0]) - ord('a')\n row = ord(s[1]) - ord('1')\n return column, row", "title": "" }, { "docid": "dc12ce2ac043c4ebc4436f250125c66f", "score": "0.4723374", "text": "def cloc(string: str) -> int:\n return len(string.split(\"\\n\"))", "title": "" }, { "docid": "014da2166c2cde263fd613e6935a72a3", "score": "0.47139317", "text": "def maze_list(self, maze_str: str) -> list:\n out = []\n row = []\n for col in maze_str:\n if col != '\\n':\n row.append(col)\n elif len(row):\n out.append(row)\n row = []\n return out", "title": "" }, { "docid": "21756e24bbb63e8f35781f608dfd93a8", "score": "0.47084898", "text": "def _decode_block_string(self, block_string):\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n if 's' not in options or len(options['s']) != 2:\n raise ValueError('Strides options should be a pair of integers.')\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])", "title": "" }, { "docid": "2952a364a46336ab5db4fca74776b8e6", "score": "0.46978685", "text": "def stringToListOfBoxes(stringInput):\n initialList = stringInput.split(\"n\")[1:]\n intermidiateList = [initialList[i][9:] for i in range(len(initialList))]\n finalList = [intermidiateList[i].split(\" \")[:4] for i in range(len(intermidiateList))]\n return finalList", "title": "" }, { "docid": "6c405631bc88ae7af4222da819f55bab", "score": "0.46830165", "text": "def unaligned_get_dimension(text):\n\n xmax = 0\n ymax = 0\n ymax = 0\n angle = None\n halign = None\n valign = None\n\n itr = text_iterator(None)\n for line in string.split(str(text), \"\\n\"):\n cur_height = 0\n cur_width = 0\n\titr.reset(line)\n while 1:\n elem = itr.next()\n if not elem:\n break\n (font, size, line_height, color, new_h, new_v, new_a, chunk) = elem\n if halign != None and new_h != halign:\n raise Exception, \"Only one /h can appear in string '%s'.\" % str(text)\n if valign != None and new_v != valign:\n raise Exception, \"Only one /v can appear in string '%s'.\" % str(text)\n if angle != None and new_a != angle:\n raise Exception, \"Only one /a can appear in string '%s'.\" % str(text)\n halign = new_h\n valign = new_v\n angle = new_a\n cur_width = cur_width + line_width(font, size, chunk)\n cur_height = max(cur_height, line_height)\n xmax = max(cur_width, xmax)\n ymax = ymax + cur_height\n return (xmax, ymax,\n halign or theme.default_font_halign,\n valign or theme.default_font_valign,\n angle or theme.default_font_angle)", "title": "" }, { "docid": "8aaa2a3e3ed3551e8cdc753f2f9313d2", "score": "0.46737203", "text": "def collatz_read(s):\n a = s.split()\n return [int(a[0]), int(a[1])]", "title": "" }, { "docid": "617104229224c1f78a613f51795bc8a0", "score": "0.46723962", "text": "def split_in_columns(message=message):\n pipe = \"|\"\n message_split = message.split(\"\\n\")\n message_join = pipe.join(message_split)\n return message_join", "title": "" }, { "docid": "0cbfa86ece44eef32ebf6c7102e5fcde", "score": "0.46634212", "text": "def _table_columns(first_table_row):\n positions = []\n start = 1 # there is '+' at 0\n while start < len(first_table_row):\n end = first_table_row.find('+', start)\n if end == -1:\n break\n positions.append((start, end))\n start = end + 1\n return positions", "title": "" }, { "docid": "277100e24266708fa16c45fa007d12a6", "score": "0.4659557", "text": "def parse_lsblk_output(text):\n block_list = []\n\n lines = text.split(os.linesep)\n lines = [x.strip() for x in lines]\n lines = [x for x in lines if x]\n\n length_list = [len(line) for line in lines]\n space_idx_list = [i for i, x in enumerate(lines[0]) if x == \" \"]\n for idx in space_idx_list[:]:\n for i, line in enumerate(lines):\n if idx >= length_list[i]:\n continue\n if line[idx] != \" \":\n space_idx_list.remove(idx)\n break\n\n i = 0\n while i + 1 <= len(space_idx_list) - 1:\n first = space_idx_list[i]\n second = space_idx_list[i + 1]\n if first + 1 == second:\n space_idx_list.remove(first)\n else:\n i = i + 1\n # merge neighbouring space idx\n if 4 != len(space_idx_list):\n raise ValueError(\"space idx of heading error\")\n\n for line in lines:\n block = Block(line)\n block.name = line[:space_idx_list[0]].strip()\n block.fs = FileSystemEnum.from_str(line[space_idx_list[0]:space_idx_list[1]].strip())\n block.size = line[space_idx_list[1]:space_idx_list[2]].strip()\n block.type = line[space_idx_list[2]:space_idx_list[3]].strip()\n block.mount_point = line[space_idx_list[3]:].strip()\n block_list.append(block)\n\n # remove header\n block_list = block_list[1:]\n\n current_disk = None\n for i, block in enumerate(block_list):\n if block.is_disk():\n current_disk = block\n continue\n\n if block.is_partition() and current_disk:\n current_disk.partitions.append(block)\n continue\n\n return block_list", "title": "" }, { "docid": "3fa3e726f65d01964a92ee49b9889bb4", "score": "0.46509847", "text": "def extract_text_blocks(post_edit_pair):\n post_id, post_edit = post_edit_pair\n markdown_content = post_edit['Text']\n post_blocks = []\n\n lines = re.split(newline_regex, markdown_content)\n\n current_post_block = None\n previous_line = None\n code_block_ends_with_next_line = False\n\n in_code_tag_block = False\n in_stack_snippet_code_block = False\n in_script_tag_code_block = False\n in_alternative_code_block = False\n\n for line in lines:\n # ignore empty lines\n if not line:\n previous_line = line\n continue\n\n # end code block which contained a code tag in the previous line (see below)\n if code_block_ends_with_next_line:\n in_code_tag_block = False\n code_block_ends_with_next_line = False\n\n # check for indented code blocks (Stack Overflow's standard way)\n # even if tab is not listed here: http://stackoverflow.com/editing-help#code\n # we observed cases where it was important to check for the tab, sometimes preceded by spaces\n in_markdown_code_block = code_block_regex.match(line) is not None # only match beginning of line\n # check if line only contains whitespaces\n # (ignore whitespaces at the beginning of posts and not end blocks with whitespace lines)\n is_whitespace_line = whitespace_line_regex.fullmatch(line) is not None # match whole line\n # e.g. \"<!-- language: lang-js -->\" (see https://stackoverflow.com/editing-help#syntax-highlighting)\n is_snippet_language = snippet_language_regex.fullmatch(line) is not None # match whole line\n # in some posts an empty XML comment (\"<!-- -->\") is used to divide code blocks (see, e.g., post 33058542)\n is_snippet_divider = snippet_divider_regex.fullmatch(line) is not None # match whole line\n # in some cases, there are inline code blocks in a single line (`...`)\n is_inline_code_line = inline_code_line_regex.fullmatch(line) is not None # match whole line\n\n # if line is not part of a regular Stack Overflow code block, try to detect alternative code block styles\n if not in_markdown_code_block and not is_whitespace_line and not is_snippet_language:\n # see https://stackoverflow.blog/2014/09/16/introducing-runnable-javascript-css-and-html-code-snippets/\n # ignore stack snippet begin in post block version\n if stack_snippet_begin_regex.match(line): # only match beginning of line\n in_stack_snippet_code_block = True\n # remove stack snippet info from code block\n line = stack_snippet_begin_regex.sub(\"\", line)\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n # line only contained stack snippet begin\n continue\n\n # ignore stack snippet end in post block version\n if stack_snippet_end_regex.match(line): # only match beginning of line\n in_stack_snippet_code_block = False\n # remove stack snippet info from code block\n line = stack_snippet_end_regex.sub(\"\", line)\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n # line only contained stack snippet begin\n continue\n\n # code block that is marked by <pre><code> ... </pre></code> instead of indention\n if code_tag_begin_regex.match(line): # only match beginning of line\n # remove code tag from line\n line = code_tag_begin_regex.sub(\"\", line)\n in_code_tag_block = True\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n # line only contained opening code tags -> skip\n continue\n\n if code_tag_end_regex.match(line): # only match beginning of line\n # remove code tag from line\n line = code_tag_end_regex.sub(\"\", line)\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n # line only contained closing code tags -> close code block and skip\n in_code_tag_block = False\n continue\n else:\n # line also contained content -> close code block in next line\n code_block_ends_with_next_line = True\n\n # code block that is marked by <script...> ... </script> instead of correct indention\n if script_tag_begin_regex.match(line): # only match beginning of line\n # remove opening script tag\n line = script_tag_open_regex.sub(\"\", line, count=1)\n in_script_tag_code_block = True\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n # line only contained opening script tag -> skip\n continue\n\n if script_tag_end_regex.match(line): # only match beginning of line\n # remove closing script tag\n line = script_tag_close_regex.sub(\"\", line)\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n # line only contained closing script tag -> close code block and skip\n in_script_tag_code_block = False\n continue\n else:\n # line also contained content -> close script block in next line\n code_block_ends_with_next_line = True\n\n # see https://meta.stackexchange.com/q/125148\n # example: https://stackoverflow.com/posts/32342082/revisions\n if alternative_code_block_begin_regex.match(line): # only match beginning of line\n # remove first \"```\" from line\n line = alternative_code_block_begin_regex.sub(\"\", line, count=1)\n in_alternative_code_block = True\n # continue if line only contained \"```\"\n if not line.strip(): # if string empty after removing leading and trailing whitespaces\n continue\n else:\n if alternative_code_block_marker_regex.match(line):\n # alternative code block was inline code block (which should be part of a text block)\n line = alternative_code_block_marker_regex.sub(\"\", line)\n in_alternative_code_block = False\n\n if alternative_code_block_end_regex.match(line): # only match beginning of line\n # remove \"```\" from line\n line = alternative_code_block_marker_regex.sub(\"\", line)\n in_alternative_code_block = False\n\n if is_snippet_language:\n # remove snippet language information\n line = snippet_language_regex.sub(\"\", line)\n\n if is_inline_code_line:\n # replace leading and trailing backtick and HTML line break if present\n line = inline_code_line_regex.match(line).group(1)\n\n # decide if the current line is part of a code block\n in_non_markdown_code_block = (is_snippet_language and not line.strip()) or in_stack_snippet_code_block \\\n or in_alternative_code_block or in_code_tag_block or in_script_tag_code_block or is_inline_code_line\n\n if not current_post_block: # first block in post\n # ignore whitespaces at the beginning of a post\n if not is_whitespace_line:\n # first line, block element not created yet\n if in_markdown_code_block or in_non_markdown_code_block:\n current_post_block = CodeBlock(post_id)\n else:\n current_post_block = TextBlock(post_id)\n else:\n # current block has length > 0 => check if current line belongs to this block\n # or if it is first line of next block\n if isinstance(current_post_block, TextBlock):\n # check if line contains letters or digits (heuristic for malformed post blocks)\n previous_line_contains_letters_or_digits = \\\n contains_letter_or_digit_regex.search(previous_line) is not None\n\n if ((in_markdown_code_block\n and (not previous_line.strip() or not previous_line_contains_letters_or_digits))\n or in_non_markdown_code_block) and not is_whitespace_line:\n # End of text block, beginning of code block.\n # Do not end text block if next line is whitespace line\n # see, e.g., second line of PostHistory, Id=97576027\n if not current_post_block.is_empty():\n post_blocks.append(current_post_block)\n current_post_block = CodeBlock(post_id)\n\n elif isinstance(current_post_block, CodeBlock):\n # snippet language or snippet divider divide two code blocks ( if first block is not empty)\n if is_snippet_language or is_snippet_divider:\n if not current_post_block.is_empty():\n post_blocks.append(current_post_block)\n current_post_block = CodeBlock(post_id)\n elif (not in_markdown_code_block and not in_non_markdown_code_block) and not is_whitespace_line:\n # In a Stack Snippet, the lines do not have to be indented (see version 12 of answer\n # 26044128 and corresponding test case).\n # Do not close code postBlocks when whitespace line is reached\n # see, e.g., PostHistory, Id=55158265, PostId=20991163 (-> test case).\n # Do not end code block if next line is whitespace line\n # see, e.g., second line of PostHistory, Id=97576027\n if not current_post_block.is_empty():\n post_blocks.append(current_post_block)\n current_post_block = TextBlock(post_id)\n\n # ignore snippet language information (see https://stackoverflow.com/editing-help#syntax-highlighting)\n if current_post_block and not is_snippet_language:\n current_post_block.append(line)\n\n previous_line = line\n\n if current_post_block and not current_post_block.is_empty():\n # last block not added yet\n post_blocks.append(current_post_block)\n\n _revise_post_blocks(post_blocks)\n\n return post_id, list(\n map(lambda block: block.content,\n filter(lambda block: isinstance(block, TextBlock),\n post_blocks)\n )\n )", "title": "" }, { "docid": "0cb59a058568eb2559580edc58c96aac", "score": "0.4633997", "text": "def niceblock(text,newlines=False):\n\n\treturn re.sub('\\n([\\t])+',(' ' if not newlines else '\\n'),re.sub('^\\n([\\t])+','',text))", "title": "" }, { "docid": "e410e57452c2033b7fa49cbf1688a277", "score": "0.462984", "text": "def collatz_read (s) :\n a = s.split()\n return [int(a[0]), int(a[1])]", "title": "" }, { "docid": "e410e57452c2033b7fa49cbf1688a277", "score": "0.462984", "text": "def collatz_read (s) :\n a = s.split()\n return [int(a[0]), int(a[1])]", "title": "" }, { "docid": "48ad5116c6ebf54f0ebac9a0ad2e3995", "score": "0.4622642", "text": "def _do_code_blocks(self, text):\r\n code_block_re = re.compile(r'''\r\n (?:\\n\\n|\\A)\r\n ( # $1 = the code block -- one or more lines, starting with a space/tab\r\n (?:\r\n (?:[ ]{%d} | \\t) # Lines must start with a tab or a tab-width of spaces\r\n .*\\n+\r\n )+\r\n )\r\n ((?=^[ ]{0,%d}\\S)|\\Z) # Lookahead for non-space at line-start, or end of doc\r\n ''' % (self.tab_width, self.tab_width),\r\n re.M | re.X)\r\n\r\n return code_block_re.sub(self._code_block_sub, text)", "title": "" }, { "docid": "23abb54d36ed78ad3c774edd58cd8bc8", "score": "0.46197057", "text": "def read_From_String(self, string):\n board = []\n for i in range(len(string)): \n if i % self.length == 0:\n temp = []\n for j in string[i:i+self.length]:\n temp.append(int(j))\n board.append(temp)\n self.board = board\n return board", "title": "" }, { "docid": "c5cd3ed877bbd26d9b139d6eebcb5db7", "score": "0.46111372", "text": "def get_bib_blocks(content, start_character=\"@\", delim=(\"{\", \"}\")):\n blocks = []\n delimiter_stack = []\n for i, c in enumerate(content):\n if c == \"{\":\n delimiter_stack.append(i)\n elif c == \"}\" and delimiter_stack:\n start = delimiter_stack.pop()\n if len(delimiter_stack) == 0:\n start_index = content.rfind(start_character, 0, start)\n blocks.append((content[start_index:start], content[start + 1 : i]))\n return blocks", "title": "" }, { "docid": "526aec4cb2c8e12b64bd1a59c506c0c4", "score": "0.4607425", "text": "def parse(self, string):\n lines = string.split('\\n')\n\n reader = extlib.vimlparser.StringReader(lines)\n parser = extlib.vimlparser.VimLParser()\n ast = parser.parse(reader)\n\n # TOPLEVEL does not have a pos, but we need pos for all nodes\n ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}\n\n for plugin in self.plugins:\n plugin.process(ast)\n\n return ast", "title": "" }, { "docid": "5377a02ac8eafda917ddb8bd99373568", "score": "0.45870957", "text": "def excelSheetColumnNumber(self, s):\n\n s = s.upper()\n\n out = 0\n\n for char in s:\n\n out *= 26\n\n index = self.letterLookup.index(char)\n out += index + 1\n\n return out", "title": "" }, { "docid": "a9dc1aee1f3a190941ebb2fc5dd2d96b", "score": "0.45798004", "text": "def split_by_widths(input_string, widths, maxsplit=None):\n start = 0\n widths = widths[:maxsplit-1] if maxsplit else widths\n for width in widths:\n if width:\n substr = input_string[start:start+width]\n else:\n matches = re.split(r'(\\s*\\S+\\s+)', input_string[start:], maxsplit=1)\n substr = ''.join(matches[:2]) if len(matches) > 2 else ''.join(matches)\n width = len(substr)\n yield substr\n start += width\n\n # finally yield rest of the string, in case all widths were not specified\n if start < len(input_string):\n yield input_string[start:]", "title": "" }, { "docid": "1727c7a564de01f115bed5b9de099457", "score": "0.45796484", "text": "def column(self):\n for i in my_xrange(self._column_query_pos, self.pos):\n if self.text[i] == '\\t':\n self._column += self.tab_size\n self._column -= self._column % self.tab_size\n else:\n self._column += 1\n self._column_query_pos = self.pos\n return self._column", "title": "" }, { "docid": "776211effd07cd498ec9fea433a44a7b", "score": "0.45793006", "text": "def highlightBlock(self, text):\n # Do other syntax formatting\n for expression, nth, format in self.rules:\n index = expression.indexIn(text, 0)\n\n while index >= 0:\n # We actually want the index of the nth match\n index = expression.pos(nth)\n length = len(expression.cap(nth))\n self.setFormat(index, length, format)\n index = expression.indexIn(text, index + length)\n\n self.setCurrentBlockState(0)\n\n # Do multi-line strings\n in_multiline = self.match_multiline(text, *self.tri_single)\n if not in_multiline:\n in_multiline = self.match_multiline(text, *self.tri_double)", "title": "" }, { "docid": "3c9d81131aa65c86fb9b29aee05156b9", "score": "0.45780674", "text": "def get_column(level, column):\n string = \"\"\n for j in range(height):\n string += level[j][column]\n return string", "title": "" }, { "docid": "09b7102c6559af5847d3ccdd55168f26", "score": "0.45702723", "text": "def chunks(text,blocksize):\n num_chunks = len(text) // blocksize\n chu = [] \n for i in range(num_chunks):\n chu.append(text[i*blocksize: (i+1)*blocksize])\n return chu", "title": "" }, { "docid": "1912d6a547cd1ba08150a84f95c4acad", "score": "0.4557113", "text": "def unchunk(string):\n return string.replace(\" \", \"\")", "title": "" }, { "docid": "4b0873b9d8ea2938a36ad396307524c9", "score": "0.45540512", "text": "def _from_text_columns(\n data, *, comment='#', separator=None, joiner='+', codepoint_column=0, unicode_column=1,\n codepoint_base=16, unicode_base=16, inline_comments=True, ignore_errors=False,\n ):\n mapping = {}\n for line in data.decode('utf-8-sig').splitlines():\n # ignore empty lines and comment lines (first char is #)\n if (not line) or (line[0] == comment):\n continue\n if line.startswith('START') or line.startswith('END'):\n # xfonts .enc files - STARTENCODING, STARTMAPPING etc.\n continue\n # strip off comments\n if inline_comments:\n line = line.split(comment)[0]\n # split unicodepoint and hex string\n splitline = line.split(separator)\n if len(splitline) > max(codepoint_column, unicode_column):\n cp_str, uni_str = splitline[codepoint_column], splitline[unicode_column]\n cp_str = cp_str.strip()\n uni_str = uni_str.strip()\n # right-to-left marker in mac codepages\n uni_str = uni_str.replace('<RL>+', '').replace('<LR>+', '')\n # reverse-video marker in kreativekorp codepages\n uni_str = uni_str.replace('<RV>+', '')\n # czyborra's codepages have U+ in front\n if uni_str.upper().startswith('U+'):\n uni_str = uni_str[2:]\n # ibm-ugl codepage has U in front\n if uni_str.upper().startswith('U'):\n uni_str = uni_str[1:]\n # czyborra's codepages have = in front\n if cp_str.upper().startswith('='):\n cp_str = cp_str[1:]\n try:\n # allow sequence of codepoints\n # multibyte code points can also be given as single large number\n # note that the page bytewidth of the codepoints is assumed to be 1\n cp_point = b''.join(\n int_to_bytes(int(_substr, codepoint_base))\n for _substr in cp_str.split(joiner)\n )\n if unicode_base == 'char':\n # the character itself is in the column, utf-8 encoded\n char = uni_str\n else:\n # allow sequence of unicode code points separated by 'joiner'\n char = ''.join(\n chr(int(_substr, unicode_base))\n for _substr in uni_str.split(joiner)\n )\n if char != '\\uFFFD':\n # u+FFFD replacement character is used to mark undefined code points\n mapping[cp_point] = char\n except (ValueError, TypeError) as e:\n # ignore malformed lines\n if not ignore_errors:\n logging.warning('Could not parse line in text charmap file: %s [%s]', e, repr(line))\n return mapping", "title": "" }, { "docid": "a62d0a8bba92f433eb1247fbf38b40cf", "score": "0.4553922", "text": "def split_statement(block):\n statement = ''\n while True:\n r = re.match(r'^([^{]*?) *; *(.*)$', block)\n if r:\n return (statement + r.group(1), r.group(2))\n r = re.match('^([^{]*? *{ *)(.*)$', block)\n if not r:\n return (block, '')\n statement += r.group(1)\n block = r.group(2)\n level = 1\n while True:\n r = re.search('^([^{}]* *([{}]) *)(.*)$', block)\n if not r:\n raise Exception(\"Failed to parse header: unmatched '{'\")\n statement += r.group(1)\n block = r.group(3)\n if r.group(2) == '}':\n level -= 1\n if level == 0:\n break\n else:\n level += 1", "title": "" }, { "docid": "6943c60aeaf6be6c188eaa3d0d88eb5f", "score": "0.45483446", "text": "def output(string, headerCount):\n i=0\n #print(string)\n output= \"\"\n #print(string)\n #print(string.splitlines(True))\n \n for line in string.splitlines(True):\n # print(i)\n if i > headerCount -1 :\n #print(line)\n output += line\n i=i+1\n #print(\"output: \"+output)\n return output", "title": "" }, { "docid": "389b156fc9662e92fe592e086035ca31", "score": "0.45440918", "text": "def read_current_col_widths(self):\n lines = self.read(\"/tag/sel/index\")\n self.curr_colwidths_px = []\n for line in lines:\n match = re.search(r\"# [^~:]+ \\d+ (\\d+)\", line)\n if match:\n self.curr_colwidths_px.append(int(match.group(1)))\n print self.curr_colwidths_px", "title": "" }, { "docid": "0616b36dda4b39df3193f9f11c3dd92b", "score": "0.45377734", "text": "def get_frame_size(text):\n\n lines = text.splitlines()\n rows = len(lines)\n columns = max([len(line) for line in lines])\n return rows, columns", "title": "" }, { "docid": "86128e8f13cc24497e86cacac944762c", "score": "0.45316845", "text": "def edit_str(str):\n return str.replace('\\n', '').split(' ')", "title": "" }, { "docid": "b00090f44bf8ba29e915c32d65978637", "score": "0.4530453", "text": "def print_vertically(sentence):\n columns = sentence.split()\n rows = zip_longest(*columns, fillvalue=\" \")\n return [\"\".join(r).rstrip() for r in rows]", "title": "" }, { "docid": "fbd9f8d91d8198f30b9825ad9b12078f", "score": "0.45272514", "text": "def _parse_docker_column_output(self, output):\n parsed = []\n output = output.splitlines()\n header = output[0]\n colstarts = [0]\n colidx = 0\n spacect = 0\n if not output:\n return parsed\n for i, c in enumerate(header):\n if c != ' ' and spacect > 1:\n colidx += 1\n colstarts.append(i)\n spacect = 0\n elif c == ' ':\n spacect += 1\n colstarts.append(None)\n colheadings = []\n for i in range(0, len(colstarts) - 1):\n colheadings.append(header[colstarts[i]:colstarts[i + 1]].strip())\n for line in output[1:]:\n row = {}\n for i, key in enumerate(colheadings):\n row[key] = line[colstarts[i]:colstarts[i + 1]].strip()\n parsed.append(row)\n return parsed", "title": "" }, { "docid": "d3028d2f419abbe0e077b28c59f7da33", "score": "0.4520288", "text": "def findfieldlists(blocks):\n i = 0\n while i < len(blocks):\n # Searching for a paragraph that looks like this:\n #\n #\n # +--------------------+----------------------+\n # | \":\" field name \":\" | field body |\n # +-------+------------+ |\n # | (body elements)+ |\n # +-----------------------------------+\n if (blocks[i]['type'] == 'paragraph' and\n _fieldre.match(blocks[i]['lines'][0])):\n indent = blocks[i]['indent']\n fields = []\n for line in blocks[i]['lines']:\n m = _fieldre.match(line)\n if m:\n key, spaces, rest = m.groups()\n width = 2 + len(key) + len(spaces)\n fields.append(dict(type='field', lines=[],\n indent=indent, width=width))\n # Turn \":foo: bar\" into \"foo bar\".\n line = '%s %s%s' % (key, spaces, rest)\n fields[-1]['lines'].append(line)\n blocks[i:i+1] = fields\n i += len(fields) - 1\n i += 1\n return blocks", "title": "" }, { "docid": "a35489116e6f2fcfbdc7a2d41e7ef8a4", "score": "0.4515833", "text": "def charlimit_textwrap(string):\n\n list_string = textwrap.wrap(str(string), width=80)\n for line in list_string:\n print(line)\n return", "title": "" }, { "docid": "e99e6eba060bb951f581c47da21a4e76", "score": "0.45109457", "text": "def split(self, s):\n parts = []\n b = 0\n while 1:\n m = self.splitp.match(s, b)\n if m is None:\n break\n b, e = m.span()\n parts.append(s[b:e])\n b = e + 1\n return parts", "title": "" }, { "docid": "7183f7f1505ded3549a43871270c04e6", "score": "0.45061374", "text": "def colspan(text, cs=2):\n text = str(text)\n if '|' not in text[1:]:\n return f\"| colspan='{cs}' | {text}\\n\"\n else:\n parts = text.split('|')\n return f\"|{parts[1]}colspan='{cs}' | {parts[-1]}\"", "title": "" }, { "docid": "adb51eb9808ef3be85341acdd406cbd1", "score": "0.45024046", "text": "def get_column_names(desc):\n widths = get_column_widths(desc)\n container = ''\n for i in range(len(desc)):\n container += desc[i][0] + ' '*(widths[i])\n return container", "title": "" }, { "docid": "a09ecca0aadecc9e3c87b3d2ef30ce6f", "score": "0.44968572", "text": "def scytale(text, rows, cols, textline):\n m = np.array(list(text.ljust(rows*cols, FILL_CHARACTER))).reshape((rows, cols))\n result = ''.join([''.join(row) for row in m.transpose()]).strip()\n if textline!= None:\n textline.append(f'Text to cipher: \"{text}\" ({len(text)})')\n textline.append(np.array2string(m))\n textline.append(f\"Result size: {len(result)}\")\n return result", "title": "" }, { "docid": "411fadf2c8bc5fb2707d22030e9c6776", "score": "0.44901046", "text": "def break_line(one_string, width):\n result = \"\"\n counter = 0\n for c in one_string:\n if c == '\\n':\n result += c\n counter = 0\n elif counter < width:\n result += c\n counter += 1\n else:\n result += \"ϟ\\n ϟ\"\n result += c\n counter = 4\n return result", "title": "" }, { "docid": "45670a5d13761003c7f1c76c8e67139f", "score": "0.44850746", "text": "def ghc_column_to_sublime_column(view, line, column):\n cur_line = view.substr(view.line(view.text_point(line - 1, 0)))\n col = 1\n real_col = 0\n for char in cur_line:\n if col >= column:\n return real_col\n col += (8 if char == '\\t' else 1)\n real_col += 1\n return real_col", "title": "" }, { "docid": "8c7bbccc63762600c087043cd9969a06", "score": "0.44828248", "text": "def _colon_span_indices(string): # helper to _compress_colon_span: \"ff:::::::ff\" to \"ff::ff\"\n if string is None or len(string) < 3:\n return None\n\n colonspan_list_tuples = []\n\n length = len(string)\n beg, end, cur = 0, 0, 0\n\n while (cur < length): # traverse string across\n # find beginning\n have_distance = (length - 1) - cur >= 2 # keeps from out of bounds\n three_colons = False\n if have_distance:\n three_colons = string[cur] == \":\" and string[cur + 1] == \":\" and string[cur + 2] == \":\"\n beg = cur # beg must be first occurance\n\n compressible = have_distance and three_colons # remainder of string is potentially compressible\n\n if compressible:\n for i in range(cur + 2, length): # dns_state_analysis subsequence of \":\"\n if i == length - 1: # stop at end of string\n if string[i] != \":\":\n end = i - 1\n else:\n end = i\n cur = length\n colonspan_list_tuples.append((beg, end))\n break\n elif string[i] != \":\": # not at end and current char not \":\"\n end = i - 1 # previous char is the ending \":\" in the previous sequence\n colonspan_list_tuples.append((beg, end)) # record beg, end sequence\n # now we have [beg, end] sequence; pick up at i on next iteration\n cur = end # will be incremented at bottom of while loop\n break\n else:\n continue\n\n cur += 1\n\n if len(colonspan_list_tuples) == 0:\n colonspan_list_tuples = None\n\n return colonspan_list_tuples", "title": "" }, { "docid": "7122f76cebdfc4d3b3dc814746c3665f", "score": "0.44746533", "text": "def parse_sudoku(sudoku_str):\n sudoku = [[0 for x in range(9)] for y in range(9)]\n counter = 0\n for char in sudoku_str:\n if counter > 80:\n break\n\n try:\n val = int(char)\n row = counter // 9\n col = counter % 9\n sudoku[row][col] = val\n counter += 1\n except ValueError:\n continue\n\n if counter < 80:\n raise ValueError(\"Not enough integer values in sudoku string input.\")\n else:\n return sudoku", "title": "" }, { "docid": "f328fb499074042ed482b0be28b03654", "score": "0.4471318", "text": "def chunkify(string, n):\n return (\n string[i:(i + n)]\n for i in range(0, len(string), n)\n )", "title": "" }, { "docid": "bf80ab6133e282f2d162a95e37308c92", "score": "0.44573554", "text": "def parenthetic_contents(string):\n stack = []\n global nodes, temps, tmp_cnt\n cnt = 0\n while True:\n for i, c in enumerate(string):\n if c == '(':\n stack.append(i)\n elif c == ')' and stack:\n start = stack.pop()\n #yield (len(stack), string[start + 1: i])\n if (len(stack) != 0):\n parse = string[start + 1: i].split()\n #print(string, parse, len(stack))\n\n temp_node = summary(parse)\n print(string)\n if temp_node == 1:\n break\n elif temp_node == 2:\n string = string.replace(string[start : i + 1],string[start + 3 : i])\n break\n else: \n temps.append(temp_node)\n string = string.replace(string[start : i + 1],\"temp\"+str(tmp_cnt)+\" \")\n tmp_cnt += 1\n break\n else:\n break\n \n if stack:\n while stack:\n stack.pop()\n else:\n break\n \n return temps[-1]", "title": "" }, { "docid": "a54f9a4618abd4806e9f8ba4e5685fca", "score": "0.44558406", "text": "def divide(self,string,wd_width):\n s = string\n n = wd_width\n subs = []\n length = len(s)\n for i in range(length - n + 1):\n subs.append(s[i:i + n])\n return subs", "title": "" }, { "docid": "cf7018152b2e08fa966caa32ffd61018", "score": "0.4452929", "text": "def check_columns(self, lines, first_line, columns):\r\n # \"Infinite\" value for a dummy last column's beginning, used to\r\n # check for text overflow:\r\n columns.append((sys.maxint, None))\r\n lastcol = len(columns) - 2\r\n for i in range(len(columns) - 1):\r\n start, end = columns[i]\r\n nextstart = columns[i+1][0]\r\n offset = 0\r\n for line in lines:\r\n if i == lastcol and line[end:].strip():\r\n text = line[start:].rstrip()\r\n new_end = start + len(text)\r\n columns[i] = (start, new_end)\r\n main_start, main_end = self.columns[-1]\r\n if new_end > main_end:\r\n self.columns[-1] = (main_start, new_end)\r\n elif line[end:nextstart].strip():\r\n raise TableMarkupError('Text in column margin at line '\r\n 'offset %s.' % (first_line + offset))\r\n offset += 1\r\n columns.pop()", "title": "" }, { "docid": "fcf668847f3615a975017d6e4eb0c6e7", "score": "0.4452551", "text": "def stringWidth(*strings0):\n strings = list(strings0)\n for string in strings0:\n matches = re.search(r\"#splitline\\{(.*?)\\}\\{(.*?)\\}\",string) # check splitline\n if matches:\n while string in strings: strings.pop(strings.index(string))\n strings.extend([matches.group(1),matches.group(2)])\n matches = re.search(r\"[_^]\\{(.*?)\\}\",string) # check subscript/superscript\n if matches:\n while string in strings: strings.pop(strings.index(string))\n strings.append(matches.group(1))\n string = string.replace('#','')\n return max([len(s) for s in strings])", "title": "" }, { "docid": "a0c5ddb6713f84715062e3201142ab54", "score": "0.44490004", "text": "def block_parser(part):\n\n block = []\n lines = part.split('\\n')\n N = len(lines)\n i = 0\n decorator = None\n while 1:\n\n if i==N:\n # nothing left to parse -- the last line\n break\n\n line = lines[i]\n i += 1\n line_stripped = line.strip()\n if line_stripped.startswith('#'):\n block.append((COMMENT, line))\n continue\n\n if line_stripped.startswith('@'):\n # we're assuming at most one decorator -- may need to\n # rethink\n decorator = line_stripped\n continue\n\n # does this look like an input line?\n matchin = rgxin.match(line)\n if matchin:\n lineno, inputline = int(matchin.group(1)), matchin.group(2)\n\n # the ....: continuation string\n continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))\n Nc = len(continuation)\n # input lines can continue on for more than one line, if\n # we have a '\\' line continuation char or a function call\n # echo line 'print'. The input line can only be\n # terminated by the end of the block or an output line, so\n # we parse out the rest of the input line if it is\n # multiline as well as any echo text\n\n rest = []\n while i<N:\n\n # look ahead; if the next line is blank, or a comment, or\n # an output line, we're done\n\n nextline = lines[i]\n matchout = rgxout.match(nextline)\n #print \"nextline=%s, continuation=%s, starts=%s\"%(nextline, continuation, nextline.startswith(continuation))\n if matchout or nextline.startswith('#'):\n break\n elif nextline.startswith(continuation):\n inputline += '\\n' + nextline[Nc:]\n else:\n rest.append(nextline)\n i+= 1\n\n block.append((INPUT, (decorator, inputline, '\\n'.join(rest))))\n continue\n\n # if it looks like an output line grab all the text to the end\n # of the block\n matchout = rgxout.match(line)\n if matchout:\n lineno, output = int(matchout.group(1)), matchout.group(2)\n if i<N-1:\n output = '\\n'.join([output] + lines[i:])\n\n block.append((OUTPUT, output))\n break\n\n return block", "title": "" }, { "docid": "cfc8cbd9051bf8b9d4e335902848acd6", "score": "0.44452634", "text": "def get_column_alignments(row):\n alignments = []\n for hyphens in row:\n hyphens = hyphens.strip()\n if hyphens.startswith(':') and hyphens.endswith(':'):\n alignments.append('center')\n elif hyphens.startswith(':'):\n alignments.append('left')\n elif hyphens.endswith(':'):\n alignments.append('right')\n else:\n alignments.append('')\n\n return alignments", "title": "" }, { "docid": "a529cc28d47db2c5181f4bde5789e3a6", "score": "0.44440725", "text": "def get_block_source(template_source, block_name):\n # Find the open block for the given name\n match = re.search(NAMED_BLOCK_RE % (block_name,), template_source)\n if match is None:\n raise ValueError(u'Template block {n} not found'.format(n=block_name))\n end = inner_start = start = match.end()\n end_width = 0\n while True:\n # Set ``end`` current end to just out side the previous end block\n end += end_width\n # Find the next end block\n match = re.search(ENDBLOCK_RE, template_source[end:])\n # Set ``end`` to just inside the next end block\n end += match.start()\n # Get the width of the end block, in case of another iteration\n end_width = match.end() - match.start()\n # Search for any open blocks between any previously found open blocks,\n # and the current ``end``\n nested = re.search(BLOCK_RE, template_source[inner_start:end])\n if nested is None:\n # Nothing found, so we have the correct end block\n break\n else:\n # Nested open block found, so set our nested search cursor to just\n # past the inner open block that was found, and continue iteration\n inner_start += nested.end()\n # Return the value between our ``start`` and final ``end`` locations\n return start #template_source[start:end]", "title": "" }, { "docid": "82d40681e38b3d1f6eb3a5928be03713", "score": "0.44397682", "text": "def bytes2matrix(text):\n return [list(text[i:i+4]) for i in range(0, len(text), 4)]", "title": "" }, { "docid": "82d40681e38b3d1f6eb3a5928be03713", "score": "0.44397682", "text": "def bytes2matrix(text):\n return [list(text[i:i+4]) for i in range(0, len(text), 4)]", "title": "" }, { "docid": "c9fad9113fc66e77666197ce555063b2", "score": "0.443938", "text": "def make_columns(self):\n coords_to_tokens = self.make_coords_to_tokens()\n return [[coords_to_tokens.get((x, y), ' ') for y in range(3)]\n for x in range(3)]", "title": "" }, { "docid": "c0cc0c57d5e53ea61529c8af6859ddaf", "score": "0.4436193", "text": "def encode(string):\n\n output_string = \"\"\n\n for ch in string:\n\n if ch.isalpha():\n output_string += ch\n \n output_string = output_string.lower()\n\n columns_count = math.sqrt(len(output_string))\n columns_count = math.ceil(columns_count)\n\n result = [output_string[i::columns_count] for i in range(columns_count)]\n\n return \"\".join(result)", "title": "" }, { "docid": "9d637288084cadc158062d484ceb7a50", "score": "0.44335192", "text": "def extract_words(solution, width):\n for row in solution:\n yield despace(row)\n for col in column_words(solution, width):\n yield despace(col)", "title": "" }, { "docid": "00e2650b806575c23c1f75919c0ccdda", "score": "0.44325006", "text": "def __char(data):\n end = None\n width = 0\n chars = []\n for j in range(0, height):\n line = data.pop(0)\n if end is None:\n end = self.reEndMarker.search(line).group(1)\n end = re.compile(re.escape(end) + r'{1,2}$')\n\n line = end.sub('', line)\n\n if len(line) > width:\n width = len(line)\n chars.append(line)\n return width, chars", "title": "" }, { "docid": "7cfa7905d766630570516a03b3720e0d", "score": "0.44310305", "text": "def from_string(board_string):\n rows = [line.strip() for line in board_string.strip().split(\"\\n\")]\n boardsize = len(rows)\n if any(len(row) != boardsize for row in rows):\n raise ValueError('Board must be square')\n\n board = GoBoard(boardsize)\n rows.reverse()\n for r, row_string in enumerate(rows):\n for c, point in enumerate(row_string):\n if point in ('b', 'w'):\n board.apply_move(point, (r, c))\n return board", "title": "" }, { "docid": "167824604fec5df5e2a4e1f7b1ed1e80", "score": "0.4416733", "text": "def getCol(p):\n\tcols = list(range(8))\n\tfor char in p:\n\t\tmid = int((len(cols)/2))\n\t\tif char == 'L':\n\t\t\tcols = cols[:mid]\n\t\telif char == 'R': \n\t\t\tcols = cols[mid:]\n\treturn cols[0]", "title": "" }, { "docid": "b31523d40191bba4fad62b7b8285ae09", "score": "0.44146588", "text": "def read(markdown_string):\n block = reInsights.search(markdown_string)\n insights = []\n if block:\n offset = block.start(0)\n\n while True:\n match = reInsight.search(markdown_string, offset)\n if not match:\n break\n insights += [Insight(match.group(1), match.group(2).strip(), match.start(0), match.end(0))]\n offset = match.end(0) + 1\n\n return insights", "title": "" }, { "docid": "508793fd6c08e67ff79a755ff8e38aae", "score": "0.44120026", "text": "def splitBase64IntoBlocks(string, blocksize=16):\n\n # Converts to hex\n hexString = Conversion.remove_byte_notation(Base64_To.hexadecimal(string))\n\n hexBytes = re.findall(\"..\", hexString)\n\n # Adds padding if the lengths are not equal\n while len(hexBytes) % blocksize != 0:\n hexBytes.append(\"00\")\n\n chunks = []\n for x in range(0, len(hexBytes), blocksize):\n chunk = \"\"\n\n for i in range(x, x + blocksize):\n chunk += hexBytes[i]\n chunks.append(HexTo.base64(chunk))\n\n return chunks", "title": "" }, { "docid": "c5afec8b932be35327041ec0819aee0a", "score": "0.44102556", "text": "def split_col(col, char):\n return col.apply(lambda s: s.split(char) if type(s) == str else [s])", "title": "" }, { "docid": "a673a7f3407520504d635eaafaa22d6e", "score": "0.440667", "text": "def remove_odd_blocks(word: str, block_length: int) -> str:\n blocks = []\n for letter_index in range(0, len(word), block_length): # creates blocks of the given length from the given string\n blocks.append(word[letter_index:letter_index+block_length])\n\n processed_block = []\n for block_index in range(0, len(blocks), 2): # reads every second block\n processed_block.append(blocks[block_index])\n\n return \"\".join(processed_block)", "title": "" } ]
a206da35d1875a5bd983fe30e21ee345
Method for extracting the negative random value with settled minimum and maximum values.
[ { "docid": "3a8e7b4b55cdad8532cfb8d79b6d4e41", "score": "0.0", "text": "def rand(self):\n pass", "title": "" } ]
[ { "docid": "9cd0e7b95dbf464ecf16b47866d60332", "score": "0.6874778", "text": "def random_negative(self, value):\n if np.random.rand() < self.random_negative_prob:\n return -value\n else:\n return value", "title": "" }, { "docid": "6d795f79247e33effd70846ec5b2f594", "score": "0.6796967", "text": "def gen_near_zero(self):\n if self.signed:\n start_value = self.gen_zero_value() - self.highlow_range\n random_offset = self.random.randrange(self.highlow_range * 2)\n return self.rectify_value(start_value + random_offset)\n else:\n return self.gen_low_value()", "title": "" }, { "docid": "527e766c400e1cde0ba4c0cbeff23e47", "score": "0.67855734", "text": "def _posNegRandom(self):\n return random.random() * 2 - 1.0", "title": "" }, { "docid": "b950a9f670416db94cdd5a521d2be27b", "score": "0.675825", "text": "def gen_near_zero(self):\n if self.signed:\n start_value = self.gen_zero_value() - self.highlow_range\n random_offset = self.random.randrange(self.highlow_range * 2)\n return self.rectify_value(self.scale(start_value + random_offset))\n else:\n return self.scale(self.gen_low_value())", "title": "" }, { "docid": "0ad61772deeec3b905cb61f4527e8e1f", "score": "0.6690869", "text": "def gen_low_value(self):\n return self.rectify_value(self.min_value + self.scale(self.random.randrange(self.highlow_range)))", "title": "" }, { "docid": "0f9fdf958514c9dd080b709a16983779", "score": "0.6685387", "text": "def gen_low_value(self):\n return self.rectify_value(self.gen_min_value() + self.random.randrange(self.highlow_range))", "title": "" }, { "docid": "99fdf0e8460580da3ac4b6e6abf3ab14", "score": "0.6569671", "text": "def gen_standard_value(self):\n return self.random.randrange(self.gen_min_value(), self.gen_max_value() + 1)", "title": "" }, { "docid": "f7d182d0238c80a4bc1e8962f7806eec", "score": "0.64634496", "text": "def random(self):\n minimum = self.minimum() or 0\n maximum = self.maximum() or 100\n return random.randint(minimum, maximum)", "title": "" }, { "docid": "3373d901c265c966e50610ce85e6f9bb", "score": "0.64510316", "text": "def gen_high_value(self):\n return self.rectify_value(self.max_value - self.scale(self.random.randrange(self.highlow_range)))", "title": "" }, { "docid": "f6f3aa0b55b42daa2f7c6ce415389b71", "score": "0.64358985", "text": "def __init__(self, max=100, min=0, random=True):\n self.max = max\n self.min = min\n self.random = random\n self.current_value = self.min", "title": "" }, { "docid": "93e8e4f212296c0fa6265326e0a3b58e", "score": "0.6414941", "text": "def gen_high_value(self):\n return self.rectify_value(self.gen_max_value() - self.random.randrange(self.highlow_range))", "title": "" }, { "docid": "640cf8eb65e7a627fb219f003f2fcf06", "score": "0.6393561", "text": "def rng(data):\n return max(data) - min(data)", "title": "" }, { "docid": "1e21f926f4a3635af251489e2dae48bf", "score": "0.63608724", "text": "def rand(max_val, min_val=0):\n return (random.random() * (max_val - min_val)) + min_val", "title": "" }, { "docid": "674061345bd5bf1a40ae11054cbb182d", "score": "0.6270508", "text": "def gen_standard_value(self):\n return self.scale(\n self.random.randrange(\n self.min_unscaled_value,\n self.max_unscaled_value + 1\n )\n )", "title": "" }, { "docid": "a9f2d5d9c1f24e5b47d71837909b2f88", "score": "0.6210007", "text": "def random_value(min, max):\n\n return randint(min, max)", "title": "" }, { "docid": "18f761f9e605e93236de039bbe304486", "score": "0.62065107", "text": "def sample_maxx(minimum, maximum):\n x1 = random.randint(minimum, maximum)\n x2 = random.randint(minimum, maximum)\n return max(x1,x2)", "title": "" }, { "docid": "6a6c63e524d4453aa48c370fe8b95111", "score": "0.62059826", "text": "def _get_value_random(self):\n # TODO: add feature to sample from given statistical function\n a = self.value_range[0]\n b = self.value_range[1]\n if self.value_type == ValueType.Integer:\n res = randint(a,b)\n else:\n res = uniform(a,b)\n return res", "title": "" }, { "docid": "4e2c917b2f1cc374b9ef89b5c84d1428", "score": "0.61527157", "text": "def symetric_random(val_max):\n return np.random.rand()*2*val_max - val_max", "title": "" }, { "docid": "3e0897a38195ec2eb0d2255d9bcba4c1", "score": "0.6149663", "text": "def get_random_solution(self, rand_sol_rng):\n x = (min(max(0, rand_sol_rng.normalvariate(150, 50)), 2400),)\n return x", "title": "" }, { "docid": "836d4016014f5b9e012fbd4f43c9795d", "score": "0.61290604", "text": "def get_value(self):\n if self.random:\n value = random.uniform(self.min, self.max) \n else:\n value = self.current_value\n self.current_value += 1\n if self.current_value > self.max:\n self.current_value = self.min \n return value", "title": "" }, { "docid": "4a03aa2ba293c321f76f0ad05e96e077", "score": "0.61143804", "text": "def rand(minv, maxv): \r\n return random.random() * (maxv - minv) + minv", "title": "" }, { "docid": "5451b68b1cef3f8272755a8dc52680c4", "score": "0.60928714", "text": "def generate_random_value(self):\n\n return random.uniform(self.lower_bound, self.upper_bound)", "title": "" }, { "docid": "1543eed31eb18470c51cf75808249fb2", "score": "0.60746706", "text": "def mt_rand(min, max):\n if min is None:\n warn(\"Minimum value not specified, assuming minimum value of 1\")\n min = 1\n if max is None:\n raise TypeError(\"Random requires a maximum boundary. Given: None\")\n return random.randint(min, max)", "title": "" }, { "docid": "29df3204deb7af03028eb01ce49f3a16", "score": "0.6068767", "text": "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "title": "" }, { "docid": "2e0fbaf070ab767e87b04b4c45e351be", "score": "0.60578865", "text": "def _sample_neg(self, assign_result, num_expected, **kwargs):\n neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n if neg_inds.numel() != 0:\n neg_inds = neg_inds.squeeze(1)\n if len(neg_inds) <= num_expected:\n return neg_inds\n else:\n return self.random_choice(neg_inds, num_expected)", "title": "" }, { "docid": "ead5de98afb85b5cbade64c61269d98e", "score": "0.6046818", "text": "def minmax ( self , nshoots = 200000 ) :\n ## try to get minmax directly from pdf/function \n if self.tricks and hasattr ( self.pdf , 'function' ) :\n if hasattr ( self.pdf , 'setPars' ) : self.pdf.setPars() \n f = self.pdf.function()\n if hasattr ( f , 'minmax' ) :\n try :\n mn , mx = f.minmax()\n if 0<= mn and mn <= mx and 0 < mx : \n return mn , mx\n except :\n pass\n if hasattr ( f , 'max' ) :\n try :\n mx = f.max()\n if 0 < mx : return 0 , mx\n except :\n pass\n\n ## check RooAbsReal functionality\n code = self.pdf.getMaxVal( ROOT.RooArgSet ( self.xvar , self.yvar , self.zvar ) )\n if 0 < code :\n mx = self.pdf.maxVal ( code )\n if 0 < mx : return 0 , mx\n \n ## not try to use random\n \n mn , mx = -1 , -10\n if hasattr ( self.pdf , 'min' ) : mn = self.pdf.min()\n if hasattr ( self.pdf , 'max' ) : mx = self.pdf.max()\n if 0 <= mn and mn <= mx and 0 < mx : return mn , mx\n \n if not self.xminmax() : return ()\n if not self.yminmax() : return ()\n if not self.zminmax() : return ()\n \n mn , mx = -1 , -10\n xmn , xmx = self.xminmax()\n ymn , ymx = self.yminmax()\n zmn , zmx = self.zminmax()\n for i in range ( nshoots ) : \n xx = random.uniform ( xmn , xmx )\n yy = random.uniform ( ymn , ymx )\n zz = random.uniform ( zmn , zmx )\n with SETVAR ( self.xvar ) :\n with SETVAR ( self.yvar ) :\n with SETVAR ( self.zvar ) :\n self.xvar.setVal ( xx )\n self.yvar.setVal ( yy )\n self.zvar.setVal ( zz )\n vv = self.pdf.getVal()\n if mn < 0 or vv < mn : mn = vv\n if mx < 0 or vv > mx : mx = vv\n \n return mn , mx", "title": "" }, { "docid": "1fed864653daa14e4144c4d6cd1be222", "score": "0.6032778", "text": "def ground(self) -> float:\n min_val = self.values.min()\n self.values = self.values - min_val\n return min_val", "title": "" }, { "docid": "71ffd75bf8ee8a5258d430031da15f5e", "score": "0.59643537", "text": "def interestrate_randomization():\n x= random.uniform(-.1,.1)\n return x", "title": "" }, { "docid": "684230b452a0501b36dce776de23e5d0", "score": "0.5949994", "text": "def randrange(self, min_val, max_val):\n\n return NUM_TO_GUESS", "title": "" }, { "docid": "739c5b38cf11936b4d6153f537d34cac", "score": "0.59302473", "text": "def rand_temp():\n return random.randint(-30, 110) + random.random();", "title": "" }, { "docid": "aa4dfdcd36daa62a82f320f4403eb98f", "score": "0.59152466", "text": "def generate_value(generator):\n return generator.gen_near_zero()", "title": "" }, { "docid": "ecb60ea668ba4595efd1cd6ddca2bd58", "score": "0.5904", "text": "def _get_special_bounded_float(self, low, val, upp):\n pad = self._boundary_padding\n dist = self._maximum_distance\n r = upp - low\n lower = max(low + pad*r, val - dist*r)\n upper = min(upp - pad*r, val + dist*r)\n if lower > upper:\n print(\"No valid solution! Ignoring last sample value\")\n lower = low + pad*r\n upper = upp - pad*r\n return lower + (upper-lower)*random.random()", "title": "" }, { "docid": "44e1ebfffa303c57f0b9a9e1a36cc21f", "score": "0.58966464", "text": "def randomise(self):\n\n if self.is_integer():\n value = random.randint(self.min, self.max)\n else:\n value = random.uniform(self.min, self.max)\n self.value = value", "title": "" }, { "docid": "82844ce7af72e87d7a526d771e93e800", "score": "0.5885664", "text": "def fuzzy(max):\n\n return random_die(max) - random_die(max)", "title": "" }, { "docid": "e1635ff4a25cdfd58ce3513462b15ec8", "score": "0.58729035", "text": "def _get_random_number(self,min_max_array):\n\n # check length of array and caluclate random number ############################################################\n if len(min_max_array) > 1:\n if min_max_array[1] >= min_max_array[0]:\n return random.uniform(min_max_array[0],min_max_array[1])\n else:\n raise Exception(\"[StageBlenderLandscape] Max >=! Min!\")\n else:\n return min_max_array[0]\n ##################################################### end of check length of array and caluclate random number #", "title": "" }, { "docid": "fd338ca8247d5543819b5edb61785f38", "score": "0.58670163", "text": "def test_min_2():\n assert harvard.min() == -32768", "title": "" }, { "docid": "655c16f6ab59e1be11a423480e54e3f4", "score": "0.5855973", "text": "def random3DValues(min_values, max_values, not_like_this=[]):\n\tif type(min_values) == int or type(min_values) == float:\n\t\tmin_values = [min_values, min_values, min_values]\n\tif type(max_values) == int or type(max_values) == float:\n\t\tmax_values = [max_values, max_values, max_values]\n\tif hasattr(min_values, \"__len__\") and hasattr(max_values, \"__len__\") and (len(min_values) == len(max_values) == 3):\n\t\tindex = 0\n\t\tnew_values = []\n\t\twhile index < len(min_values):\n\t\t\tdiff = max_values[index]-min_values[index]\n\t\t\t# edge_limit_gauss_value = min_values[index]+diff*(random.gauss(1, 0.5)%1)\n\t\t\t# new_values.append(edge_limit_gauss_value)\n\n\t\t\tnew_values.append(random.randint(min_values[index], max_values[index]))\n\n\t\t\t# middle_gauss_value = min_values[index]+diff*(random.gauss(0.5, 0.5)%1)\n\t\t\t# new_values.append(middle_gauss_value)\n\t\t\tif len(not_like_this) == 3 and abs(new_values[index] - not_like_this[index]) < diff*0.1:\n\t\t\t\t# invert colour strength.\n\t\t\t\tnew_values[index] = min_values[index] + (new_values[index] + diff/2) % diff\n\t\t\tindex += 1\n\t\treturn new_values", "title": "" }, { "docid": "650f604c04c4653d96dbabec10b6828d", "score": "0.5854731", "text": "def random(minimum: Optional[int] = None, maximum: Optional[int] = None) -> int:\n pass", "title": "" }, { "docid": "b3bd88a321faa8df09a31d8429dee23d", "score": "0.58479184", "text": "def clip(self, n, min_value, max_value):\n\n return min(max(n, min_value), max_value)", "title": "" }, { "docid": "5eb5e7b0f68c6ebcc8b2fe313aa174f5", "score": "0.5839442", "text": "def min_val() -> float:\n return 0", "title": "" }, { "docid": "0b0b6481cec957fe06c2c3ab0708fddd", "score": "0.5828483", "text": "def between(min,max):\n return random.random() * (max - min) + min", "title": "" }, { "docid": "2a9ddbc1f7ac6ac0b020ae4e00c33cd5", "score": "0.5827321", "text": "def random_sign(value: int) -> int:\n pass", "title": "" }, { "docid": "130384e1bd7ded2f5332031acea3edd6", "score": "0.582571", "text": "def gen_min_value(self):\n if self.signed:\n return -S2**(self.size - 1)\n else:\n return self.gen_zero_value()", "title": "" }, { "docid": "e9c1a7b5fa4880fac43dae62f63e31bb", "score": "0.5814285", "text": "def _rrv_minmax_ ( s ) :\n return s.getMin(),s.getMax()", "title": "" }, { "docid": "5e013109ca38d3cc18d2d9a7c49d8543", "score": "0.58116156", "text": "def abs_max(self):\n return np.abs([self.min, self.max]).max()", "title": "" }, { "docid": "6294a37cde87ae6b22f0595ded1d5bb5", "score": "0.5808792", "text": "def get_rand_number(min_value, max_value,distribution):\n range = max_value - min_value\n if distribution=='uniform':\n choice = random.uniform(0,1)\n elif distribution=='normal':\n choice=random.normal()\n elif distribution=='lognormal':\n choice=random.lognormal()\n return min_value + range*choice", "title": "" }, { "docid": "bedee1a5ef551ceda39ce837e12f7aab", "score": "0.58084387", "text": "def _non_uniform_negativeitem_sampling3(self, n_samples):\n q = 0\n if q % (len(train_data) * log(len(train_data))) = 0:\n for f in (1: self._rank):\n sortednegatives[f] = sorted(self.H) # this gives the ranking of item based on the feature\n mu = numpy.mean(self.H.get_value[f])\n std = numpy.std(self.H[f])\n r = numpy.random.geometric(p=0.35, size=10000)", "title": "" }, { "docid": "c7c0886cec3d4d7f35b964adb89d215d", "score": "0.58072656", "text": "def _sample_from_range(self, range: Tuple[float, float]) -> float:\n min_value, max_value = min(range), max(range)\n value = np.random.random_sample() * (max_value - min_value) + min_value\n return value", "title": "" }, { "docid": "c7c0886cec3d4d7f35b964adb89d215d", "score": "0.58072656", "text": "def _sample_from_range(self, range: Tuple[float, float]) -> float:\n min_value, max_value = min(range), max(range)\n value = np.random.random_sample() * (max_value - min_value) + min_value\n return value", "title": "" }, { "docid": "4f9e08a8c9fadff1ae6b572c8c78af8d", "score": "0.57936627", "text": "def generate_negative_hypernyms(N, max_int):\n return np.random.randint(0, max_int, (N,2))", "title": "" }, { "docid": "a7a7b68841ebeb568c1567272164ff69", "score": "0.57815874", "text": "def choice_var(self , range_ = range(-10 , 10)): #\n assert type(range_) == list\n random.shuffle(range_)\n return range_[0]", "title": "" }, { "docid": "bf269ef0453c7ae11d6e064050804697", "score": "0.5778657", "text": "def random(self):\n while True:\n x = random.gauss(self._mu, self._sigma)\n if self._low <= x <= self._high:\n return x", "title": "" }, { "docid": "50371495d0997b895f042fc70c28cec4", "score": "0.57707673", "text": "def rand_num_exclude(_min: int, _max: int, exclude: list):\n rand = torch.randint(_min, _max, (1,))[0]\n return rand_num_exclude(_min, _max, exclude) if rand in exclude else rand", "title": "" }, { "docid": "317c7d4ba846bcd133cd46592df1a8ba", "score": "0.57604367", "text": "def __neg__(self):\n return -self.value", "title": "" }, { "docid": "d8546b9c0ec7a1362f89f18fec8d5cb7", "score": "0.57561135", "text": "def _clamp_not_neg(x: int) -> int:\n return max(x, 0)", "title": "" }, { "docid": "137a3f01f37166635986ad4f2fe75dd7", "score": "0.57544345", "text": "def chooseR( rmin, rmax):\n r = np.random.uniform(rmin,rmax)\n return(r)", "title": "" }, { "docid": "5e936c43b4825a9cb5c42a7c1fc49780", "score": "0.5743123", "text": "def random_float_range(mn=0, mx=0):\n return random.uniform(mn, mx)", "title": "" }, { "docid": "718b2f0aadf1bf156d77e195bb868fdd", "score": "0.5742174", "text": "def range(self):\n return self.max - self.min", "title": "" }, { "docid": "cf1092220eb1126d65eec4764a98a4ee", "score": "0.5732854", "text": "def potentialRange(self):\n values = []\n for candidate in self.root.output():\n values.append(float(candidate.get(self.attribute)))\n return (min(values), max(values))", "title": "" }, { "docid": "e1c5854e49b9f703bc6fae80fdfe79e6", "score": "0.572257", "text": "def random_vector(self, minmax):\n return [self.rand_in_bounds(minmax[i][0], minmax[i][1]) for i in range(len(minmax))]", "title": "" }, { "docid": "bb25318674707026ecf00c9b24ba90cd", "score": "0.571736", "text": "def __abs__(self):\n return abs(self.value)", "title": "" }, { "docid": "f9f0aeba0b890715e3ad4cf7ae990a4f", "score": "0.569147", "text": "def getmaxminx(self):\n self.loadmaxmin()\n return (self.maxminx.lower, self.maxminx.upper)", "title": "" }, { "docid": "08374fb6819ff6f8666787b7d09e4ad9", "score": "0.5687352", "text": "def _genBias(self):\n\n return random.uniform(*self.BIASRANGE)", "title": "" }, { "docid": "538ab26144904b045833ed3847b8bfef", "score": "0.56840575", "text": "def range_(values: Iterable[float]) -> float:\n return max(values) - min(values)", "title": "" }, { "docid": "4923b7a719a4b4176817472f9b7b6578", "score": "0.56835777", "text": "def __best_value(self, rcd, value):\n return abs(value - self.magic_const), random.choice([x for x in rcd if x != abs(value - self.magic_const)])", "title": "" }, { "docid": "e1735f23e5a4aee79d82011adb225cc6", "score": "0.5677782", "text": "def test_max_floatlist_neg(self):\n self.assertEqual(max_integer([-1.05, -2.2, -3.5, -4.0]), -1.05)", "title": "" }, { "docid": "0bb8b611fefc4fcfed14fb9c54f7683e", "score": "0.567558", "text": "def _sample_roll(self):\n if self.require_negative_roll:\n roll = np.random.uniform(low=self.info['min_roll'], high=self.info['max_roll']) \n else:\n roll = np.random.uniform(low=-180, high=180) \n while (self.info['roll_neg_ubound'] < roll < self.info['roll_pos_lbound']):\n roll = np.random.uniform(low=-180, high=180) \n return roll", "title": "" }, { "docid": "7636086e7413d8a11d2e7fc046bb37ce", "score": "0.56677115", "text": "def __abs__(self):\n\n # take absolute values of the mesh values\n\n absval = df.norm(self.values, 'L2')\n\n # return maximum\n return absval", "title": "" }, { "docid": "89acf3c992f1940fac13336055352e16", "score": "0.56647056", "text": "def get_data(self):\n if randint(1, 100) % 2 == 0:\n return False, -1\n return True, ((choice([-1,1]))*np.random.random())\n # return True, self.yielder()", "title": "" }, { "docid": "efde221021b82f3a06bc1e01f9da8491", "score": "0.5660228", "text": "def _sample_non_uniform_range(self, low, hi):\n hi_points = self._sample_non_uniform(hi)\n if low > 0:\n low_points = np.flip(-1 * self._sample_non_uniform(low))\n points = np.concatenate((low_points, hi_points))\n else:\n points = hi_points\n return np.unique(points)", "title": "" }, { "docid": "9272fbf29fb4d0173622c86b0a09049e", "score": "0.5658568", "text": "def mutate_value(value,low_lim,upp_lim):\n value = format_float(random.gauss(value, (upp_lim-low_lim)*0.1)) # Mutate in the range of 10% SD of the value\n if(value > upp_lim):\n value = upp_lim\n elif(value < low_lim):\n value = low_lim\n return value", "title": "" }, { "docid": "652df80d89719f920ffdac3f3682f9c9", "score": "0.56558424", "text": "def __neg__(self):\n t= self.amult(-1.)\n return t", "title": "" }, { "docid": "652df80d89719f920ffdac3f3682f9c9", "score": "0.56558424", "text": "def __neg__(self):\n t= self.amult(-1.)\n return t", "title": "" }, { "docid": "652df80d89719f920ffdac3f3682f9c9", "score": "0.56558424", "text": "def __neg__(self):\n t= self.amult(-1.)\n return t", "title": "" }, { "docid": "c7826bf55cc98099a37115dbdd18f5fe", "score": "0.5651987", "text": "def minmax(self):\n return self.data.min(), self.data.max()", "title": "" }, { "docid": "8ccb401b9ae4f50a3f7c05dd9e23441b", "score": "0.5649097", "text": "def norm_data_max_min_value(self):\n return (self.data - self.data.min()) / (self.data.max() - self.data.min())", "title": "" }, { "docid": "61f15095d1d2cce82242a460c723848c", "score": "0.5646689", "text": "def random_init(self):\n range_v = np.arange(\n self.param_range[0], self.param_range[1]\n +self.param_range[2], self.param_range[2])\n value = [random.choice(range_v)]\n if self.param_type == 'integer':\n value[0] = int(round(value[0]))\n value.append(0)\n return value", "title": "" }, { "docid": "c3ace059722b7f1ec44867b67520c092", "score": "0.5646254", "text": "def random2(minimum: Optional[int] = None, maximum: Optional[int] = None) -> int:\n pass", "title": "" }, { "docid": "f7faaad2f8f7c3cf55dc849846667a00", "score": "0.5646143", "text": "def rand(self):\n return random.uniform(-self.noise,self.noise)", "title": "" }, { "docid": "89c58a73f83eeb1280f4724f796ac4d8", "score": "0.56440884", "text": "def __init__(self, min=0., max=1.):\n super(Uniform, self).__init__()\n self.min = min\n self.max = max", "title": "" }, { "docid": "db84b25201373351d0f15238c6b7df49", "score": "0.56425524", "text": "def randsoliton(parameter_max_int):\n randuniform = random.random()\n if randuniform is 0.0:\n return 1\n else:\n inversevalue = int(math.ceil(1 / randuniform))\n if inversevalue <= parameter_max_int:\n return inversevalue\n else:\n return 1", "title": "" }, { "docid": "b8dd4a64923935eeafc58e64e346c773", "score": "0.5625106", "text": "def randsign(num: int) -> int:\n return random.choice([-1, 1]) * random.randint(0, num)", "title": "" }, { "docid": "eec2a1a8f3041c7938969cf8fc042ced", "score": "0.56117225", "text": "def select_arm(self) -> int:\n result = np.random.randint(self.values.shape[0])\n if np.random.rand() > self.epsilon:\n result = np.argmax(self.values)\n return result", "title": "" }, { "docid": "f9b053b9da5bb6a2f56b55e52830c94c", "score": "0.56116486", "text": "def test_random_output_limits():\n print(\"Careful, this test is run on random numbers, so errors might not reoccur\")\n\n assert max(gt.generate_heights(10,20,2)) <= 2\n assert min(gt.generate_heights(10,20,2)) >= 0\n\n assert min(gt.generate_slope(10,20,2)) >= 0\n # generate_slope needs calculation of slope!\n t = gt.generate_slope(10,20,2)\n max_slope = 0\n for i, height in enumerate(t[1:]):\n slope = abs(height - t[i]) # i will count from 0, even though enumerate starts from 1!\n max_slope = max(slope, max_slope)\n # print some things that may help with debugging\n print(\"index: \" + str(i))\n print(\"calculated slope: \" + str(i))\n print(\"two compared values: \" + str(height) + \" \" + str(t[i]))\n print(t)\n \n assert max_slope <= 2", "title": "" }, { "docid": "ed1b6b174447d10af32f22f5df4f99e2", "score": "0.56068724", "text": "def _flatPrior(self, val):\n if val < self.minval or val > self.maxval:\n return -N.inf\n return 0.", "title": "" }, { "docid": "1a6c8c0e7caef0718f7319afbbf4374f", "score": "0.5605206", "text": "def unnormalize(value, max_value, min_value):\n return (value * (max_value - min_value) + min_value)", "title": "" }, { "docid": "36d106708f0db4a49a8b999dc8a5843b", "score": "0.56045294", "text": "def get_eqp_itm_attr_value(self, min_value, max_value):\n # bereken de range. abs omdat in het geval van bijv min_str de waarden omgedraaid kunnen zijn.\n num_range = abs(max_value - min_value)\n # hoe groot is 1 stapje over de range als er in totaal 10 stapjes zijn.\n step = num_range / 9\n # zet de mec skill om naar de juiste positie in de range\n if max_value < min_value:\n target = min_value - (self.tot * step) + step\n else:\n target = min_value + (self.tot * step) - step\n # pak een willekeurige plek op de range op basis van de target met normaalverdeling.\n result = random.gauss(target, step * 2)\n\n # even de waarden omdraaien voor bijv min_str. want daar is laag 'goed' en hoog 'slecht'\n if max_value < min_value:\n min_value, max_value = max_value, min_value\n\n # als hij te hoog of te laag is, zet hem dan gelijk aan de max of min.\n if result > max_value:\n result = max_value\n elif result < min_value:\n result = min_value\n\n return round(result)", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" }, { "docid": "aa3d9e7e3fa914e822d5f3574a3d51aa", "score": "0.5601949", "text": "def get_raw_data(x_norm, min_val, max_val):\n x = x_norm * (max_val - min_val) + min_val\n \n return x", "title": "" } ]
be23d7683daca00e373a29768d3ceb1e
Helper Function used to hold information about source.
[ { "docid": "306ee5e70c004413151acaa5a82c7cc8", "score": "0.0", "text": "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['physobs'] = ''", "title": "" } ]
[ { "docid": "07df9a4b8a56ffb16ae2af1c35e2bde8", "score": "0.82223535", "text": "def _source_info(self):\n raise NotImplementedError", "title": "" }, { "docid": "8f1525149c824dd39c528c068e304f2b", "score": "0.80226475", "text": "def gets_source_info(self):\n return self.source_info", "title": "" }, { "docid": "edc5b5d92bad56c7f5af4495eed6058d", "score": "0.79903984", "text": "def _get_source(self):", "title": "" }, { "docid": "f04fdf3fa1914109a208fbe680f519e1", "score": "0.7812419", "text": "def getSource():", "title": "" }, { "docid": "f04fdf3fa1914109a208fbe680f519e1", "score": "0.7812419", "text": "def getSource():", "title": "" }, { "docid": "435be8834278480dc8fe0335e2f58e87", "score": "0.7717055", "text": "def source_info(self) -> str:\n return pulumi.get(self, \"source_info\")", "title": "" }, { "docid": "7992008431ec0a034d3ae66f35abca61", "score": "0.7608224", "text": "def get_source_info(self):\n try:\n source = self._source_info()\n except NotImplementedError:\n source = {}\n return source", "title": "" }, { "docid": "ed1432665798fce43c1387b411d26a1f", "score": "0.7495977", "text": "def _get_source(self):\n return self.__source", "title": "" }, { "docid": "ed1432665798fce43c1387b411d26a1f", "score": "0.7495977", "text": "def _get_source(self):\n return self.__source", "title": "" }, { "docid": "9a751d8085f977491b2730caf4593e12", "score": "0.74949104", "text": "def get_source_metadata(self):\n pass", "title": "" }, { "docid": "40fb62b910ec97406847e2e162adb2e2", "score": "0.7485064", "text": "def source_info(self) -> 'outputs.SourceInfoResponse':\n return pulumi.get(self, \"source_info\")", "title": "" }, { "docid": "3b800c9a43a6dff35204b292e61510a2", "score": "0.7309267", "text": "def getSource(self):\n pass", "title": "" }, { "docid": "d6653d45fb781826c09dbbdddd6429ee", "score": "0.70822144", "text": "def getSource(self):\r\n return (self.source)", "title": "" }, { "docid": "5f11d2fe76d64a753fd4be94523a42a9", "score": "0.70678467", "text": "def source(self):\n\t\treturn self._source", "title": "" }, { "docid": "dfe35a124668c60eaee13fa90032a02b", "score": "0.7062854", "text": "def getSourceInfo(self):\n result = filter(lambda f: (self.source == f['filename']), self.files)\n if result:\n return result[0]\n return None", "title": "" }, { "docid": "390878c9314cdb3e27fc48263f6e2c4c", "score": "0.70608604", "text": "def source(self) -> str:\n return self.observation[\"source\"]", "title": "" }, { "docid": "3f93b2bd2ed6048932f39f560c49aecf", "score": "0.7037", "text": "def get_source(self):\n return self.source", "title": "" }, { "docid": "3f93b2bd2ed6048932f39f560c49aecf", "score": "0.7037", "text": "def get_source(self):\n return self.source", "title": "" }, { "docid": "c9e331b71ee12143b32ee2453845ee57", "score": "0.7028573", "text": "def import_info_source(self, info_source):\n if self.has_attribute(info_source, TOOL):\n tool = self.get_attribute(info_source, TOOL)\n if self.has_attribute(info_source, USER):\n user = self.get_attribute(info_source, USER)\n if self.has_attribute(info_source, FILE):\n f = self.get_attribute(info_source, FILE)\n if self.has_attribute(info_source, TIMESTAMP):\n ts = self.get_attribute(info_source, TIMESTAMP)\n self.update_counter(INFO_SOURCE)", "title": "" }, { "docid": "f2445e7cc803f9b56bada1f3345c148e", "score": "0.7017242", "text": "def source(context):", "title": "" }, { "docid": "581e9ca0cfd0b22bf94c5b48c171ab6f", "score": "0.6931001", "text": "def source(self):\n return self._data.get('source')", "title": "" }, { "docid": "beb0716ead7cfa3e0c2f1ef6634202e0", "score": "0.6906154", "text": "def get_source(self):\n raise NotImplementedError()", "title": "" }, { "docid": "919cfb42d7ffaa9b9b9b03a0c4b12803", "score": "0.68476033", "text": "def source(self) -> str:\n return self._data.get('Source')", "title": "" }, { "docid": "de0b442c050fd7f89b32b964f97f025a", "score": "0.68397856", "text": "def get_source(self):\n return self.__source", "title": "" }, { "docid": "7ba55c7671feae63282ad3957e2d0b47", "score": "0.6824753", "text": "def get_source(self) :\n return self.opts.source", "title": "" }, { "docid": "ff8a99209110d3abbd926605196e358d", "score": "0.68091285", "text": "def source_details(self) -> Optional[Sequence['outputs.RuleSourceSourceDetail']]:\n return pulumi.get(self, \"source_details\")", "title": "" }, { "docid": "9bd0b4a364bd4535488ad060b7d657c3", "score": "0.6805226", "text": "def source(self) -> str:\n raise NotImplementedError()", "title": "" }, { "docid": "0f3002fc9e23737bc525bdad58fe8ed4", "score": "0.67816406", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "0f3002fc9e23737bc525bdad58fe8ed4", "score": "0.67816406", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "0f3002fc9e23737bc525bdad58fe8ed4", "score": "0.67816406", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "0f3002fc9e23737bc525bdad58fe8ed4", "score": "0.67816406", "text": "def get_source(cls, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "71c206a9975ce463517e76fc2c216f21", "score": "0.6773298", "text": "def src_info(self):\n if not self._src_info:\n self._src_info = self.SrcGraph.load_src_info()\n\n return self._src_info", "title": "" }, { "docid": "cdbdf78c0145ff69b8581dfd41b73ef1", "score": "0.67519623", "text": "def get_source_description(self) -> str:\n pass", "title": "" }, { "docid": "d506681df2f6e25c262ebcb2d216f6dd", "score": "0.67491776", "text": "def source(self):\n return self.__source", "title": "" }, { "docid": "1cca18c1194d2604b17174ade7a06791", "score": "0.6706362", "text": "def source(self):\n return self.identifier.source", "title": "" }, { "docid": "9a239a731098759e3f556433c2d07e5f", "score": "0.6701506", "text": "def source(self) -> str:\n return SOURCE", "title": "" }, { "docid": "c83bf0cc2826968874a0081084dd9081", "score": "0.6683889", "text": "def getSource(self):\n return self.source", "title": "" }, { "docid": "c83bf0cc2826968874a0081084dd9081", "score": "0.6683889", "text": "def getSource(self):\n return self.source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "47eea1a586face4add1e3109351a6539", "score": "0.6656895", "text": "def source(self):\n return self._source", "title": "" }, { "docid": "1c857a4ddd16c7061ef71523e42886ec", "score": "0.6646954", "text": "def test_infers_source(self):\n # works for string\n source = pathlib.Path(\"path/blah.fasta\")\n aln = make_aligned_seqs(\n {\"A\": \"ACGT\"}, info=dict(source=str(source), random_key=1234)\n )\n gr = generic_result(aln)\n self.assertEqual(gr.source, source.name)\n\n # or Path\n aln.info.source = source\n gr = generic_result(aln)\n self.assertEqual(str(gr.source), source.name)\n\n # or DataMember\n aln.info.source = DataMember(data_store=None, unique_id=source.name)\n gr = generic_result(aln)\n self.assertEqual(str(gr.source), source.name)\n\n aln.info = {}\n with self.assertRaises(ValueError):\n generic_result(aln)", "title": "" }, { "docid": "1cfc40a32a6226c17b5a17950fbcced9", "score": "0.66293186", "text": "def source(self):\n return self.source_object", "title": "" }, { "docid": "9ab3579c94e9f0ffda111ae8cf63ee04", "score": "0.6614819", "text": "def get_src(self) -> dict:\n raise NotImplementedError", "title": "" }, { "docid": "47a80dbcda9e3f15bc349acc22c5ca9a", "score": "0.6594878", "text": "def source(self) -> str:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "47a80dbcda9e3f15bc349acc22c5ca9a", "score": "0.6594878", "text": "def source(self) -> str:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "47a80dbcda9e3f15bc349acc22c5ca9a", "score": "0.6594878", "text": "def source(self) -> str:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "47a80dbcda9e3f15bc349acc22c5ca9a", "score": "0.6594878", "text": "def source(self) -> str:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "c39c73773f67bc9cee8853292010dbd0", "score": "0.65606713", "text": "def source(self) -> str:\n return self._source", "title": "" }, { "docid": "c39c73773f67bc9cee8853292010dbd0", "score": "0.65606713", "text": "def source(self) -> str:\n return self._source", "title": "" }, { "docid": "c39c73773f67bc9cee8853292010dbd0", "score": "0.65606713", "text": "def source(self) -> str:\n return self._source", "title": "" }, { "docid": "c39c73773f67bc9cee8853292010dbd0", "score": "0.65606713", "text": "def source(self) -> str:\n return self._source", "title": "" }, { "docid": "44f3b84d4d023cb2bbb197617d7462fb", "score": "0.6558622", "text": "def load_source(self, source):\n return source", "title": "" }, { "docid": "842309ea4745d639cc9b0d723ef2485f", "score": "0.6555884", "text": "def source(self):\n return self.config['source']", "title": "" }, { "docid": "bfb50ca3f8b99a8bc8e6668829bc536f", "score": "0.65217596", "text": "def set_source(self, source: str) -> None:", "title": "" }, { "docid": "d04e8941f3e183426618aa93afa43634", "score": "0.65159917", "text": "def get_source(self):\n starts, stops = self._get_field_indices(\"SOURCE\")\n return self.lines[starts[0]][12:].strip()", "title": "" }, { "docid": "d39e92b227490e19f4c6561537177541", "score": "0.6513514", "text": "def source(self):\n return self._current_source", "title": "" }, { "docid": "fd3baa928c16cf102ae97784bac85398", "score": "0.6499168", "text": "def source(self):\n for pretty_name, name in self._source_list.items():\n if self._mediasource == name:\n return pretty_name", "title": "" }, { "docid": "d8b800d4976ed11ba48ca4c5907849e9", "score": "0.6473119", "text": "def source(self):\n try:\n return self.evidence.source\n except AttributeError:\n return None", "title": "" }, { "docid": "def49e2e001efbac0c6a7f6f0a7d346f", "score": "0.6472403", "text": "def __init__(self, source):\n self.source = source", "title": "" }, { "docid": "def49e2e001efbac0c6a7f6f0a7d346f", "score": "0.6472403", "text": "def __init__(self, source):\n self.source = source", "title": "" }, { "docid": "def49e2e001efbac0c6a7f6f0a7d346f", "score": "0.6472403", "text": "def __init__(self, source):\n self.source = source", "title": "" }, { "docid": "6594bd228e3c006c5cb73fb1484093e7", "score": "0.64443946", "text": "def get_sources(self):\n return self.sources", "title": "" }, { "docid": "befd90def06fdce3c2b59011225163e3", "score": "0.64442575", "text": "def get_source_name(self) -> str:\n pass", "title": "" }, { "docid": "6e8c83e963e81f0a4c857fc226fd0324", "score": "0.64432526", "text": "def get_source(self, obj):\n\n data = self._get_data(obj)\n return data['source']", "title": "" }, { "docid": "ad46a4f34aecf69266d563615a7623a0", "score": "0.644053", "text": "def metadata(self):\n return self.source_to_metadata_dict[self.source]", "title": "" }, { "docid": "fe1295cca77e5c3cdd0827e3e88347d6", "score": "0.6417345", "text": "def __init__(self, source):\n \n self.source = source", "title": "" }, { "docid": "44c7e3e78fc3719bdf7ca16c89fa00fa", "score": "0.6399405", "text": "def source(self): # noqa: D401\n return self.__source", "title": "" }, { "docid": "5a51f66b2e558222d0cc9bb604fba0ce", "score": "0.6391858", "text": "def source(self):\n return self.config.get('source', '').lower()", "title": "" }, { "docid": "3f002942936b1841af4b48ec4908b1a4", "score": "0.63630587", "text": "def get_source(self, fullname:str) -> str:\r\n return NotImplementedError", "title": "" }, { "docid": "8e1e5b8f2684c3e102ea0e6c340f48c8", "score": "0.6362757", "text": "def source(self):\n return self._status.source", "title": "" }, { "docid": "8a67864fefb2c7d8abc9f2fc662f4f9a", "score": "0.6354617", "text": "def fetch_source_info(self):\n\n if self._cloud_id is None:\n return {}\n\n resource = getattr(self._api, self._source_type)\n try:\n data = resource(self._cloud_id).get()\n except RestHttpBaseException as exc:\n raise CloudError(\"Error calling method on iotile.cloud\", exception=exc, response=exc.response.status_code)\n\n return data", "title": "" }, { "docid": "1ad0f46889969c24d106f6af050bada3", "score": "0.63412017", "text": "def source(self) -> str:\n return self._get_current(\"SI\")", "title": "" }, { "docid": "be0514a4de5cc817dbd4c71e164c53e5", "score": "0.63409966", "text": "def source(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "d6e40130f10030f590cfa678cf1f5b8e", "score": "0.63056546", "text": "def _get_source(self):\n if self.type == self.TYPE_VIRTUAL:\n return self._network\n if self.type == self.TYPE_BRIDGE:\n return self._bridge\n if self.type == self.TYPE_DIRECT:\n return self._source_dev\n if self.type == self.TYPE_USER or self.type == self.TYPE_ETHERNET:\n return None\n return self._network or self._bridge or self._source_dev", "title": "" }, { "docid": "76286b5592932b6524590e908dbbc694", "score": "0.629968", "text": "def get_source(self):\n\t\treturn self", "title": "" }, { "docid": "07a4e993b0fc78bb874e179f92952b7c", "score": "0.62915814", "text": "def get_source(self):\n return self._payload.get_source()", "title": "" }, { "docid": "8eb155c3451996d7f2335c0eb8b2db1c", "score": "0.6281648", "text": "def mminfo(source):\n return MMFile.info(source)", "title": "" }, { "docid": "b518bd368d2517b0708a820cc5f97cbc", "score": "0.62770337", "text": "def source(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"source\")", "title": "" }, { "docid": "cd51c4a2b4d58c06844d406503a9a8e1", "score": "0.6274847", "text": "def source_display(self, obj):\n return obj.get_source_display()", "title": "" }, { "docid": "99256c2bf560e0a4ebd8f05bd5d1ad1f", "score": "0.6271238", "text": "def source_reader_info(self) -> Optional[dict]:\n if self.source_buffer:\n return self.source_buffer.source_reader_info\n return None", "title": "" }, { "docid": "5194eca3f5a6f5fb60971fe107bfb93d", "score": "0.6246243", "text": "def source(self):\n return self.station.source", "title": "" }, { "docid": "466a55ff65e31f73a4d2cfc9e46a3e0a", "score": "0.62452996", "text": "def __init__(self, source=None):\r\n self.source = source", "title": "" } ]
a3aefbba4f3fe7499f753e9cc4189344
Return the command prefix used to access this instance. If the access method is 'local_netns', return the prefix to SSH to the instance throught the DHCP network namespace.
[ { "docid": "0497a1c5bc2c4f7841917b86195be32c", "score": "0.8331157", "text": "def _get_access_ssh_prefix_command(self):\n if CONF.instance_access == INSTANCE_ACCESS_LOCAL_NETNS:\n server = self.server.manager.get(self.server.id)\n network_name = server.networks.popitem()[0]\n neutron = get_neutron_client(\n project_name=self.project_fixture.name,\n user_name=self.project_fixture.admin_user.name,\n password=self.project_fixture.admin_user_fixture.password)\n network = neutron.list_networks(name=network_name)['networks'][0]\n netns = 'qdhcp-%s' % network['id']\n check_network_namespace(netns)\n return 'sudo ip netns exec %s' % netns\n else:\n return ''", "title": "" } ]
[ { "docid": "d034930ba238d451a6a15ec6fb6787e5", "score": "0.66399455", "text": "def connection_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_prefix\")", "title": "" }, { "docid": "3e04c860ff4fb0d40a0607f04029da14", "score": "0.6254952", "text": "def get_cmd(self):\n return self.prefix_commands", "title": "" }, { "docid": "db42761fac5a5c4db9db276d9b4bd759", "score": "0.61900914", "text": "def connection_string_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_string_prefix\")", "title": "" }, { "docid": "5b1050768aa689af713905fc570c15ff", "score": "0.6158466", "text": "def connection_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_prefix\")", "title": "" }, { "docid": "5b1050768aa689af713905fc570c15ff", "score": "0.6158466", "text": "def connection_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_prefix\")", "title": "" }, { "docid": "3a76c5e4e2379face6e0ccf51e40a967", "score": "0.615106", "text": "def IpPrefix(self):\r\n\t\treturn self._get_attribute('ipPrefix')", "title": "" }, { "docid": "2361a29a42d2eea69a316809e6f827e4", "score": "0.61308765", "text": "async def get_cmdfix(self, msg):\n return await self.get_prop(msg, 'command_prefix')", "title": "" }, { "docid": "76777d0c1fa8e458515f2293acea5bff", "score": "0.60213923", "text": "def _get_vim_network_name(self):\n return self.__vim_network_name", "title": "" }, { "docid": "76777d0c1fa8e458515f2293acea5bff", "score": "0.60213923", "text": "def _get_vim_network_name(self):\n return self.__vim_network_name", "title": "" }, { "docid": "93d91c1957f637bdfd49eb1ef7125fed", "score": "0.5965479", "text": "def natprefix(self) :\n\t\ttry :\n\t\t\treturn self._natprefix\n\t\texcept Exception as e:\n\t\t\traise e", "title": "" }, { "docid": "15430af3694d3e8dcdc24562d7c4a0ee", "score": "0.5906206", "text": "def get_prefix(self):\n return self.prefix", "title": "" }, { "docid": "897c5ade945493b1f1a03be1621b0438", "score": "0.5813015", "text": "def get_prefix(self):\n # pass\n return self._prefix", "title": "" }, { "docid": "439879ff7786c1c3bdb1d838c8eca058", "score": "0.5807457", "text": "def cluster_identifier_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_identifier_prefix\")", "title": "" }, { "docid": "439879ff7786c1c3bdb1d838c8eca058", "score": "0.5807457", "text": "def cluster_identifier_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_identifier_prefix\")", "title": "" }, { "docid": "49bbbf6d0962e874d360fafe361a707a", "score": "0.5802878", "text": "def prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "1fbe5f803ef72710beb6347546b39c94", "score": "0.5800424", "text": "def address_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address_prefix\")", "title": "" }, { "docid": "1fbe5f803ef72710beb6347546b39c94", "score": "0.5800424", "text": "def address_prefix(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"address_prefix\")", "title": "" }, { "docid": "ee7cbef300b9169d4694d1de4db6a801", "score": "0.574716", "text": "def prefix(self) -> Optional[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "ee7cbef300b9169d4694d1de4db6a801", "score": "0.574716", "text": "def prefix(self) -> Optional[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "ee7cbef300b9169d4694d1de4db6a801", "score": "0.574716", "text": "def prefix(self) -> Optional[str]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "3d85e6bc614a01f72ca5bb699e17be14", "score": "0.5694493", "text": "def Prefix(self):\n if self.force_auto_sync:\n self.get('Prefix')\n return self._Prefix", "title": "" }, { "docid": "0d5b30006a7d7f9c76840e0e0278e323", "score": "0.5689165", "text": "def getPrefix(self):\n return self.prefix", "title": "" }, { "docid": "7352cda3f16aa86de0637f19c2013155", "score": "0.56777513", "text": "def prefix(self):\n return self._prefix", "title": "" }, { "docid": "62ed5fa2e927ee88765deff2137dfca9", "score": "0.5677584", "text": "def get_priv_net_name(self):\n priv_net_name = self.get_cluster_name() + \"_priv\"\n return priv_net_name", "title": "" }, { "docid": "8b38023cc618d5bf753e67edcfe29e40", "score": "0.5673416", "text": "def __cmd_for(self, cmd):\n return settings.RP_NSSH + ' \"' + cmd + '\"'", "title": "" }, { "docid": "f0662ee77eed310c45d5c0d7c72a4b32", "score": "0.5671889", "text": "def getPrefix(self):\n return self.__prefix", "title": "" }, { "docid": "5cd293fa3e8ffd1cca14d8e0efc62ad7", "score": "0.56289196", "text": "def _get_prefix_ingress(self):\n return self.__prefix_ingress", "title": "" }, { "docid": "9a00b03ab5537b2dc9f112c4238de304", "score": "0.5612822", "text": "def connection_string_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_string_prefix\")", "title": "" }, { "docid": "9a00b03ab5537b2dc9f112c4238de304", "score": "0.5612822", "text": "def connection_string_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"connection_string_prefix\")", "title": "" }, { "docid": "28ccee436572cc2c853be4c1ec7753bb", "score": "0.5609369", "text": "def _get_if_prefix(self, agent_mode, router):\n if not router.router.get('distributed'):\n return INTERNAL_DEV_PREFIX\n\n if agent_mode == 'dvr_snat':\n return SNAT_INT_DEV_PREFIX\n\n if router.rtr_fip_connect:\n return ROUTER_2_FIP_DEV_PREFIX", "title": "" }, { "docid": "94be7e45202f53da4c69e9461ad28eca", "score": "0.5595559", "text": "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "title": "" }, { "docid": "03ba1a748ca6b3b4ff3ff96965273323", "score": "0.55922174", "text": "async def _prefix_callable(bot, message):\n base = ['<@{}> '.format(bot.user.id)]\n if isinstance(message.channel, discord.abc.PrivateChannel):\n base.append('!')\n else:\n result = bot.db.get_prefix(message.guild.id)\n if result:\n base.append(result.get('prefix'))\n else:\n bot.db.add_guild(message.guild.id)\n base.append('!')\n return base", "title": "" }, { "docid": "7513de195cafeebc595a905fa4c1b795", "score": "0.5581194", "text": "def get_prefix(bot, message):\n return bot.prefix", "title": "" }, { "docid": "72cd5ec863852802a7f85227e0b947c2", "score": "0.55765504", "text": "def management_network(self) -> str:\n return pulumi.get(self, \"management_network\")", "title": "" }, { "docid": "ccac3bbd3d75b944e49471cb7f7b0927", "score": "0.55750346", "text": "def getPrefix(self):\n return self._prefix", "title": "" }, { "docid": "c016b7fb9e608a08051b8b499235b25d", "score": "0.55553174", "text": "def public_ip_prefix(self) -> Optional[pulumi.Input['SubResourceArgs']]:\n return pulumi.get(self, \"public_ip_prefix\")", "title": "" }, { "docid": "c016b7fb9e608a08051b8b499235b25d", "score": "0.55553174", "text": "def public_ip_prefix(self) -> Optional[pulumi.Input['SubResourceArgs']]:\n return pulumi.get(self, \"public_ip_prefix\")", "title": "" }, { "docid": "abaea94c345d661cc0e4cb972b454ae2", "score": "0.55089104", "text": "def get_prefix(client, message):\n\n prefix = os.getenv('WHOLESOME_PREFIX')\n\n if not message.guild:\n # no prefix necessary in DMs\n prefix = ''\n\n return prefix", "title": "" }, { "docid": "19666664ba3ded40d8793830a3a635be", "score": "0.54756325", "text": "def command_name(self):\n return self.__class__.__name__", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "fb5e5d2bed860e4da99095e9aaea5454", "score": "0.5475517", "text": "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "title": "" }, { "docid": "975a0f90a8934944327e14af9ef6f1de", "score": "0.5465192", "text": "def getPrefix(self, schema):\n if self.ignorePrefix:\n return \"\"\n return schema.__name__", "title": "" }, { "docid": "bb244c9b7af29b20981124c5a13edeab", "score": "0.54492307", "text": "def dns_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dns_prefix\")", "title": "" }, { "docid": "dbf4d441b8a39c1a3e48f7f68bbc1e2b", "score": "0.54385936", "text": "def network_name(self) -> str:\n return self._network_name", "title": "" }, { "docid": "53cc5af229cb9396b9f60da688ace1c8", "score": "0.5429798", "text": "def network_interface_name(self) -> Optional[str]:\n return pulumi.get(self, \"network_interface_name\")", "title": "" }, { "docid": "7b6f63f827f94b49dc70d4d98e68a295", "score": "0.5428814", "text": "def netmask_or_prefix(self):\n return self._netmask_or_prefix", "title": "" }, { "docid": "2f702989b217a228c9989ea5d7f8e031", "score": "0.5422693", "text": "def get_fully_qualifying_key(self):\n res = self.prefix\n if self.config.key is not _ANONYMOUS:\n res += self.prefix and '.' + self.config.key or self.config.key\n return res", "title": "" }, { "docid": "ea314ce0a23b05ae200e15fafc806152", "score": "0.5416722", "text": "def get_prefix():\n\n return TEST_PREFIX if get_test_mode() else config.get(\"ckanext.doi.prefix\")", "title": "" }, { "docid": "2f7a7d79676252749becc30bd5c28dc4", "score": "0.54137355", "text": "def network_connection_name(self) -> str:\n return pulumi.get(self, \"network_connection_name\")", "title": "" }, { "docid": "91b51682263233f218afbb2fc3730ac5", "score": "0.5412391", "text": "def prefix(self):\n return str(os.path.basename(self.prefix_path))", "title": "" }, { "docid": "0c9dacebb6445be0f4fe61dc8dea023e", "score": "0.5411666", "text": "def build_command_string(self):\n cmd = \"\"\n if self.term_mono:\n cmd = \"{} \".format(self.term_mono)\n cmd += \"ssh\"\n if self.port:\n cmd = \"{} -p {}\".format(cmd, self.port)\n if self.login:\n cmd = \"{} -l {}\".format(cmd, self.login)\n if self.options:\n cmd = \"{} {}\".format(cmd, self.options)\n if self.host:\n cmd = \"{} {}\".format(cmd, self.host)\n if self.suffix:\n cmd = \"{} {}\".format(cmd, self.suffix)\n return cmd", "title": "" }, { "docid": "5c1efe81677f88fd08c6c1de3316cf23", "score": "0.5409481", "text": "def clan(self):\n try:\n return minqlx.parse_variables(minqlx.get_configstring(529 + self._id))[\"cn\"]\n except KeyError:\n return \"\"", "title": "" }, { "docid": "6e15bbd972c5d46ee62f7c3e9ed5b454", "score": "0.5391754", "text": "def get_command_name(value):\n if value == 1:\n return 'CONNECT'\n elif value == 2:\n return 'BIND'\n elif value == 3:\n return 'UDP_ASSOCIATE'\n else:\n return None", "title": "" }, { "docid": "54d95c0e3fe5ff13d07a516b9a64cfe0", "score": "0.5390153", "text": "def logPrefix(self):\n return self.logstr", "title": "" }, { "docid": "0b2bad01dcfb13efcbf36e46f47bd31b", "score": "0.53831244", "text": "def getGlobalPrefix(cls):\n\t\treturn cls._DB_PREFIX;", "title": "" }, { "docid": "623d2fd28a57542906801e46d976734d", "score": "0.5375126", "text": "def command(self) -> str:\n return self._command", "title": "" }, { "docid": "623d2fd28a57542906801e46d976734d", "score": "0.5375126", "text": "def command(self) -> str:\n return self._command", "title": "" }, { "docid": "2d18298abc179b0f4aa1e7a1e8f061dd", "score": "0.5373546", "text": "def get_prefix(self):\n return ''", "title": "" }, { "docid": "b6223de3c811f144dd1f85d648b00bf3", "score": "0.53649724", "text": "def get_topic_prefix(self):\n return \"/carla/\" + self.prefix", "title": "" }, { "docid": "cc035dd1504d32eb4ae361f9ffc4e9ed", "score": "0.5362462", "text": "def name(self):\n return self.command", "title": "" }, { "docid": "a86103f17b4120962595a04f038bdbbc", "score": "0.5354293", "text": "def cluster_identifier_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier_prefix\")", "title": "" }, { "docid": "a86103f17b4120962595a04f038bdbbc", "score": "0.5354293", "text": "def cluster_identifier_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier_prefix\")", "title": "" }, { "docid": "a86103f17b4120962595a04f038bdbbc", "score": "0.5354293", "text": "def cluster_identifier_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier_prefix\")", "title": "" }, { "docid": "a86103f17b4120962595a04f038bdbbc", "score": "0.5354293", "text": "def cluster_identifier_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier_prefix\")", "title": "" }, { "docid": "5f8d0e23240758feb34c75298cae9d00", "score": "0.5352706", "text": "def address_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"address_prefix\")", "title": "" }, { "docid": "3e02276ad2d9fdc106ec536ccf76e53d", "score": "0.53442353", "text": "def local(self):\n return self.socket.getsockname()", "title": "" }, { "docid": "3742aad381e703da52382e3b97dfd4d5", "score": "0.5343615", "text": "def computer_name_prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"computer_name_prefix\")", "title": "" }, { "docid": "269bf2ea7abc0ac8361152f0a4cd11e0", "score": "0.533519", "text": "def _guild_prefix_key(guild_id: int):\n return 'prefix_' + str(guild_id)", "title": "" }, { "docid": "27b120395c9e5c41d3993d5aa7cad809", "score": "0.5329661", "text": "def __str__(self):\n ssh_alias = getattr(self.context, 'ssh_alias', None)\n return '%s:%s' % (ssh_alias, self.directory) if ssh_alias else self.directory", "title": "" }, { "docid": "ebb2e1521b5609f55accc8946678ee1b", "score": "0.53233165", "text": "def get_command_name(self):\n return self.__module__.rsplit('.', 1)[1]", "title": "" }, { "docid": "83bac798377380c488bb10ed3ed5609a", "score": "0.531666", "text": "def _prefix(self, call):\n return \"{} \".format(call)", "title": "" }, { "docid": "7f691f730c634b351ea1933fbf9ea0a3", "score": "0.529992", "text": "def prefix() -> str:\n if brevitas.NATIVE_STE_BACKEND_LOADED:\n prefix = NATIVE_PREFIX\n else:\n prefix = AUTOGRAD_OPS_PREFIX\n return prefix", "title": "" }, { "docid": "71c37f9feb1b178a998aa16475a0c266", "score": "0.52950835", "text": "def get_public_delegated_prefix_output(project: Optional[pulumi.Input[Optional[str]]] = None,\n public_delegated_prefix: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPublicDelegatedPrefixResult]:\n ...", "title": "" }, { "docid": "ed5e4f5e2c45fddbaab726384b886ad5", "score": "0.52771634", "text": "async def prefix(self, ctx, *_):\n\n prefix = await ctx.bot.get_cog(\"PrefixStore\").get(ctx.message)\n await ctx.send(ctx._(\"current_prefix\").format(prefix=prefix))", "title": "" }, { "docid": "02238e25c0fbe5fcbf6a3743b0df26f8", "score": "0.5263785", "text": "def getCmd(self) -> str:\n raise NotImplementedError", "title": "" }, { "docid": "eb16b9f7df1c50b4548e8d2203e2db8a", "score": "0.52506894", "text": "async def prefix_command(self, ctx: commands.Context) -> None:\n model = await GuildModel.get_or_none(guild_id=ctx.guild.id)\n prefix = model.prefix\n response = f\"Hello {ctx.author.name}, My prefix here is {self.bot.user.mention} or `{prefix}`\\nUse the `{prefix}changeprefix <new_prefix>` command to change it.\"\n await ctx.send(response)", "title": "" }, { "docid": "1bcf50bebed094e3fc431991ad900ac8", "score": "0.5248128", "text": "async def prefix(self, ctx, new_prefix=None):\n if new_prefix is not None:\n if ctx.guild is None:\n await ctx.send(embed=embeds.error(\"You can only change prefixes in a server.\"))\n else:\n if ctx.author.permissions_in(ctx.channel).manage_guild:\n user_id = self.bot.user.id\n if new_prefix not in [f'<@{user_id}>', f'<@!{user_id}>']:\n if len(new_prefix) < 20:\n config = await self.bot.get_config()\n prefixes = config[\"prefixes\"]\n prefixes[str(ctx.guild.id)] = new_prefix\n await self.bot.config.find_one_and_update(\n {\"_id\": \"config\"},\n {\"$set\": {\"prefixes\": prefixes}},\n upsert=True,\n )\n embed = embeds.success(f\"Changed prefix to `{new_prefix}`.\")\n embed.set_footer(text=\"You can always mention me!\")\n await ctx.send(embed=embed)\n else:\n await ctx.send(embed=embeds.error(\"Prefix must be under 20 characters.\"))\n else:\n await ctx.send(embed=embeds.error(\"Mentions are already usable.\"))\n else:\n await ctx.send(embed=embeds.error(\"You need the `manage_guild` permission to do this.\"))\n else:\n embed = discord.Embed(title=f\"Command Prefixes\", colour=discord.Colour.dark_magenta())\n\n default_prefix = os.environ.get(\"DEFAULT_PREFIX\", \"c!\")\n embed.add_field(name=\"Default Prefix\", value=code(default_prefix), inline=True)\n\n config = await self.bot.get_config()\n prefixes = config[\"prefixes\"]\n if ctx.guild is not None:\n server_prefix = prefixes.get(str(ctx.guild.id), default_prefix)\n embed.add_field(name=\"Server Prefix\", value=code(server_prefix), inline=True)\n\n dm_prefixes = [code(default_prefix)]\n for guild in self.bot.guilds:\n if str(guild.id) in prefixes and ctx.author in guild.members:\n dm_prefixes.append(code(prefixes[str(guild.id)]))\n embed.add_field(name=ctx.author.name + \"'s DM prefixes.\", value=\", \".join(dm_prefixes))\n\n embed.set_footer(text=\"You can always mention me!\")\n await ctx.send(embed=embed)", "title": "" }, { "docid": "045c8d8b860f87029bf0c70db5b5671b", "score": "0.5244609", "text": "def _prefix_key(self, key):\n if self.prefix:\n prefix_key = '%s:%s' % (self.prefix, key)\n else:\n prefix_key = key\n\n return str(prefix_key)", "title": "" }, { "docid": "d060aed4ae2e79a5265f6cc796ae58db", "score": "0.5241203", "text": "def network(self) -> str:\n return pulumi.get(self, \"network\")", "title": "" }, { "docid": "59ffcfb24d11d6e6ce1f98f0b40f708b", "score": "0.5237652", "text": "def provisioning_network(self) -> str:\n return pulumi.get(self, \"provisioning_network\")", "title": "" }, { "docid": "711d58e016cb400d865fd049fd9c6c56", "score": "0.5237056", "text": "def logPrefix(self):\n return _wrappedLogPrefix(self, self.wrappedProtocol)", "title": "" }, { "docid": "7c0c2ddaf4d9eeec3bc01e26b9ce2651", "score": "0.523059", "text": "def netloc(self):\n if self.port:\n return '%s:%d' % (self.host, self.port)\n return self.host", "title": "" }, { "docid": "4a912d9f7b81764bd434954b9a51147a", "score": "0.5218234", "text": "async def _get_prefix(self, message):\n server = message.guild\n default_prefixes = self.config['prefix']\n try:\n server_info = await self.prefix_settings.find_one({\"server_id\":str(server.id)})\n prefixes = server_info[\"prefix\"]\n except:\n prefixes = None\n\n if not prefixes or prefixes == []:\n prefix = default_prefixes\n else:\n prefix = prefixes\n return prefix", "title": "" }, { "docid": "3ea8ad02259b46baa00d55bb36875f29", "score": "0.5214068", "text": "def get_extn_prefix(self):\n return str(self.admin_model.portal_prefix) + str(self.extn_prefix)", "title": "" } ]
8ab59f3b21a98b54621cd7170c19f9f1
list events from calendar, where start date >= start
[ { "docid": "80a4a8c3b7a3fab3c8d915a327a2f432", "score": "0.767218", "text": "def list_events_from(self, start: datetime) -> EventList:\n fields: str = \"nextPageToken,items(id,iCalUID,updated)\"\n events: EventList = []\n page_token: Optional[str] = None\n time_min: str = (\n utc.normalize(start.astimezone(utc)).replace(tzinfo=None).isoformat() + \"Z\"\n )\n while True:\n response = (\n self.service.events()\n .list(\n calendarId=self.calendar_id,\n pageToken=page_token,\n singleEvents=True,\n timeMin=time_min,\n fields=fields,\n )\n .execute()\n )\n if \"items\" in response:\n events.extend(response[\"items\"])\n page_token = response.get(\"nextPageToken\")\n if not page_token:\n break\n self.logger.info(\"%d events listed\", len(events))\n return events", "title": "" } ]
[ { "docid": "ce00275f45df9e6338bd946652e6d651", "score": "0.72512436", "text": "def events(self, start, end):\n events = Event.query.filter(\n Event.group_id == self.id,\n Event.is_active == True,\n Event.parent_id == None # TODO: remove ugly hack - prevent dups\n ).filter(\n ((Event.start <= start) & (Event.until > start)) |\n ((Event.start > start) & (Event.start < end))\n ).order_by(asc(Event.start)).all()\n\n week_start = start.floor('week')\n displayed_events = []\n for event in events:\n if event.start > start:\n displayed_events.append(event)\n else:\n for i, day in enumerate(event.days_of_the_week_booleans):\n if day:\n displayed_events.append(Event.from_parent(\n event,\n week_start.replace(days=+i)))\n return displayed_events", "title": "" }, { "docid": "b314142bd4e667ab7cf3c99b315df088", "score": "0.7003589", "text": "async def async_get_events(self, hass, start_date, end_date):\n collections = []\n for a in self._scraper.get_upcoming(include_today=True):\n if a.date >= start_date.date() and a.date <= end_date.date():\n collections.append(self._convert(a))\n return collections", "title": "" }, { "docid": "2482bc7b5daf32a7643e567783a09259", "score": "0.6770718", "text": "def get_events(calendar_id, start_time, end_time, auth=None):\n if not auth:\n auth = authorization.auth_headers()\n\n try:\n resp = requests.get(\n CALENDAR_API_URL + '/calendars/{}/events'\n '?timeMin={}&timeMax={}&singleEvents=true'.format(\n calendar_id, start_time, end_time),\n headers=auth)\n\n if resp.status_code != 200:\n print 'Error retrieving event from calendar ' + calendar_id\n print resp.content\n return []\n\n events = []\n for i in resp.json().get('items', []):\n events.append({\n 'summary': i.get('summary'),\n 'startTime': i.get('start').get('dateTime'),\n 'endTime': i.get('end').get('dateTime')})\n\n return events\n\n except Exception as e:\n print e.message", "title": "" }, { "docid": "c2f9b688848b683aace7808caa66d9cd", "score": "0.6759751", "text": "async def _async_calendar_schedule_get_events(\n self, hass, calendar_schedule, start_date, end_date\n ):\n query = calendar_schedule.new_query(\"start\").greater_equal(start_date)\n query.chain(\"and\").on_attribute(\"end\").less_equal(end_date)\n if self._search is not None:\n query.chain(\"and\").on_attribute(\"subject\").contains(self._search)\n # _LOGGER.debug(\"get events: %s\", self._calendar_id)\n try:\n return await hass.async_add_executor_job(\n ft.partial(\n calendar_schedule.get_events,\n limit=self._limit,\n query=query,\n include_recurring=True,\n )\n )\n except RetryError:\n _LOGGER.warning(\"Retry error getting calendar events\")\n return None", "title": "" }, { "docid": "b20c7818e4d92897855a5911d0378ec8", "score": "0.6759192", "text": "def getEvents(cal, weeks):\n events = []\n margin = datetime.timedelta(days = (7 * weeks))\n for e in cal.walk('vevent'):\n start = e.get('DTSTART')\n # Get now date relative to timezone of event\n now = datetime.datetime.now(start.dt.tzinfo)\n # If event started within margin (or within the last day)\n if (now - datetime.timedelta(days = 1) <= start.dt <= now + margin):\n events.append(e)\n return events", "title": "" }, { "docid": "1c5615e0ec984db12e019835fd04ccc3", "score": "0.6753033", "text": "async def async_get_events(self, hass, start_date, end_date):\n results = await self.async_o365_get_events(hass, start_date, end_date)\n if not results:\n return\n vevent_list = list(results)\n vevent_list.sort(key=attrgetter(\"start\"))\n event_list = []\n for vevent in vevent_list:\n event = CalendarEvent(\n self.get_hass_date(vevent.start, vevent.is_all_day),\n self.get_hass_date(self.get_end_date(vevent), vevent.is_all_day),\n vevent.subject,\n clean_html(vevent.body),\n vevent.location[\"displayName\"],\n )\n event_list.append(event)\n\n return event_list", "title": "" }, { "docid": "5b0430350ba393a67cdcad4489bfb9f5", "score": "0.6743291", "text": "def calendar(request):\n events = paginated_list(request, Event, 20, 'from_date',\n from_date__gte=datetime.date.today,\n organization__is_active=True)\n \n past = paginated_list(request, Event, 20, 'from_date',\n from_date__lt=datetime.date.today,\n organization__is_active=True)\n return render(request, 'site/calendar.html', {'events': events, 'past': past})", "title": "" }, { "docid": "bbd76a805aa6ec12bfaa142d79c1fdae", "score": "0.67083585", "text": "async def events(self, ctx):\n # Check if environment variable exists\n if not os.environ.get(\"GOOGLE_CALENDAR_ENDPOINT\"):\n return await ctx.send(\"No Google Calendar endpoint specified!\")\n\n # Get upcoming events data from calendar\n current_time = datetime.datetime.utcnow()\n formatted_time = current_time.isoformat(\"T\") + \"Z\"\n calendar = os.environ.get(\"GOOGLE_CALENDAR_ENDPOINT\") + \"&timeMin=\" + formatted_time\n data = requests.get(calendar).json()\n\n # Check if there are any events\n if not data[\"items\"]:\n return await ctx.send(\"There are no upcoming events!\")\n\n # Get all upcoming events as a list\n list_of_events = data[\"items\"]\n embeds = []\n\n for event in list_of_events:\n # Create data set\n title = event[\"summary\"]\n start_time = event[\"start\"][\"dateTime\"]\n description = \"No description.\"\n if \"description\" in event:\n description = event[\"description\"]\n formatted_start_time = datetime.datetime.strftime(parse(start_time), format=\"%B %d, %Y\")\n\n # Create embed for single event and add to embeds list\n embed = discord.Embed(color=ctx.author.color, title=title, description=description)\n embed.add_field(name=\"Starts On\", value=formatted_start_time, inline=True)\n embeds.append(embed)\n\n # Create paginator\n paginator = DiscordUtils.Pagination.CustomEmbedPaginator(ctx)\n paginator.add_reaction('⏮️', \"first\")\n paginator.add_reaction('⏪', \"back\")\n paginator.add_reaction('⏩', \"next\")\n paginator.add_reaction('⏭️', \"last\")\n await paginator.run(embeds)", "title": "" }, { "docid": "b0f7a3485a8b53fd63489212fb8a871d", "score": "0.6632418", "text": "def EventList(service):\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n endTm = event['end'].get('dateTime', event['end'].get('date'))\n strtTm = EventListGetTm(start, False) + ' - ' + EventListGetTm(endTm, True)\n print(strtTm, event['summary'])", "title": "" }, { "docid": "59441fcb989c2e749f0d79e357a6a572", "score": "0.6626888", "text": "def get_queryset(self):\n return Event.objects.filter(\n start_date__gte=timezone.now()\n ).order_by('start_date')[:self.limit]", "title": "" }, { "docid": "59441fcb989c2e749f0d79e357a6a572", "score": "0.6626888", "text": "def get_queryset(self):\n return Event.objects.filter(\n start_date__gte=timezone.now()\n ).order_by('start_date')[:self.limit]", "title": "" }, { "docid": "45813e3b3ce3ae0c2a27dab8e23b65d0", "score": "0.6623031", "text": "def get_events(cls, start, stop=None):\n import hashlib\n\n from . import scrape\n\n tstart = DateTime(start).secs\n tstop = DateTime(stop).secs\n\n events = scrape.get_fot_major_events() + scrape.get_fdb_major_events()\n\n # Select events within time range and sort by tstart key\n events = sorted(\n (x for x in events if tstart <= x[\"tstart\"] <= tstop),\n key=lambda x: x[\"tstart\"],\n )\n\n # Manually generate a unique key for event since date is not unique\n for event in events:\n key = \"\".join(event[x] for x in (\"start\", \"descr\", \"note\", \"source\"))\n key = key.encode(\n \"ascii\", \"replace\"\n ) # Should already be clean ASCII but make sure\n event[\"key\"] = event[\"start\"] + \":\" + hashlib.sha1(key).hexdigest()[:6]\n\n return events", "title": "" }, { "docid": "27c8936ca8d92c8cc7cd33f639cbaa7f", "score": "0.65967554", "text": "async def async_get_events(\n self, hass: HomeAssistant, start_date: datetime, end_date: datetime\n ) -> list[CalendarEvent]:\n return await self.hass.data[DOMAIN][CALENDAR_PLATFORM].async_get_events(\n hass, start_date, end_date\n )", "title": "" }, { "docid": "0cd7fc8553d808891fda9ed32597e0aa", "score": "0.65851176", "text": "def get_queryset(self):\n return Event.objects.filter(\n start_date__lte=timezone.now()\n ).order_by('-start_date')[:self.limit]", "title": "" }, { "docid": "01ba8c1994323ecbaa666ea89f3ae9f6", "score": "0.6583573", "text": "def show_calendar_entries():\n\n user_id = session['current_user_id']\n user = User.query.get(user_id)\n\n dates = []\n titles = []\n links = []\n event = {}\n events = []\n\n for entry in user.entries:\n date = str(entry.date)[:10]\n dates.append(date)\n title = str(entry.title)\n titles.append(title)\n link = \"/entry_details/\" + str(entry.entry_id) \n links.append(link)\n\n for i in range(0, len(dates)):\n event['title'] = titles[i]\n event['start'] = dates[i]\n event['url'] = links[i]\n event['sort'] = '-1'\n\n events.append(event.copy())\n \n return jsonify(events)", "title": "" }, { "docid": "4eeff41b9216940c2fa938c497482ef5", "score": "0.65772134", "text": "def events(context: CallContext, pretty: bool = True) -> None:\n\n if context.target_calendar_id is None:\n search_cals = [calendar for calendar in context.gcal.client.calendars()]\n else:\n search_cals = [context.target_calendar_id]\n\n load_opts = {\"span\": context.gcal.span}\n # TODO - progress bar? See _make_progress_iter.\n\n for cal_id in search_cals:\n cal_data = context.gcal.client.calendar_info(cal_id)\n if pretty:\n click.secho(f\"{cal_data['summary']} [{cal_id}]\", bold=True)\n timespan = context.gcal.client.load_timespan(cal_id, **load_opts)\n if pretty:\n click.secho(f\"Found {len(timespan)} events.\")\n for event in timespan.iter_tags():\n indent = \"\\t\" if pretty else \"\"\n category = f\" ({event.category.fullpath})\" if pretty else \"\"\n click.secho(\n f\"{indent}{event.name} <{event.valid_from.isoformat()}, {event.valid_to.isoformat()}>{category}\"\n )", "title": "" }, { "docid": "1b83798778fc7acaf13a102088896a61", "score": "0.65616274", "text": "def _get_events(service, calendar_id):\n now = datetime.datetime.utcnow().isoformat() + 'Z'\n events = service.events().list(\n calendarId=calendar_id,\n timeMin=now,\n maxResults=10,\n singleEvents=True,\n orderBy='startTime'\n ).execute()\n events = events.get('items', [])\n return events", "title": "" }, { "docid": "e3164516e29e98e22c4add941e87ff8b", "score": "0.6560984", "text": "def find_events(events, start, end, at_words=None):\n things = set()\n for event in events:\n things.update(event.get_dates(start, end, at_words))\n\n return things", "title": "" }, { "docid": "257016cb8a2f83034251ddc842ec8bbd", "score": "0.6446632", "text": "def by_date_range(events, start, end):\n local_start = start.astimezone(tz.local)\n local_end = end.astimezone(tz.local)\n log.debug('filtering between %s (%s) and %s (%s)', start, local_start, end, local_end)\n for event in events:\n # Handle events in other timezones\n if event.dtstart.params.get('X-VOBJ-ORIGINAL-TZID') != None:\n event_tz = pytz.timezone(event.dtstart.params['X-VOBJ-ORIGINAL-TZID'][0])\n # http://stackoverflow.com/questions/12659108/converting-an-ambigious-time-to-a-correct-local-time-with-python\n local_tz = dateutil.tz.tzlocal() \n event_tz_aware = event_tz.localize(event.dtstart.value)\n event.dtstart.value = event_tz_aware.astimezone(local_tz)\n event_tz_aware = event_tz.localize(event.dtend.value)\n event.dtend.value = event_tz_aware.astimezone(local_tz)\n log.debug('checking %s - %s == %s for %s',\n event.dtstart.value,\n event.dtend.value,\n event.summary.value,\n getattr(event, 'uid', 'unknown UID'),\n )\n\n # Add time zones to date objects so we can do proper date\n # range comparisons.\n event_start = event.dtstart.value\n event_end = event.dtend.value\n strip_times = False\n if not isinstance(event_start, datetime.datetime):\n # Convert date to datetime.\n\n # Remember that we need to strip the times\n # back out of the event before yielding.\n strip_times = True\n \n # Start at beginning of the start day.\n event_start = datetime.datetime.combine(event.dtstart.value,\n datetime.time.min,\n )\n\n # Because the DTEND is the non-inclusive end of the\n # event, we use the earliest time on that day when\n # there's a day or less span. From\n # http://www.ietf.org/rfc/rfc2445.txt, section 4.6.1.\n event_end = datetime.datetime.combine(event.dtend.value,\n datetime.time.min,\n )\n\n # Convert all times to UTC for comparison\n event_start = tz.assign_tz(event_start)\n event_end = tz.assign_tz(event_end)\n\n # Replace the dates in case we updated the timezone\n event.dtstart.value = event_start\n event.dtend.value = event_end\n\n # More detailed report\n# event.prettyPrint()\n# sys.stdout.flush()\n\n # Look for a recurrance rule\n event_rrule = getattr(event, 'rrule', None)\n \n if event_rrule is not None:\n # Repeating event, check for occurances within the\n # time range.\n duration = event.dtend.value - event.dtstart.value\n rruleset = event.getrruleset(False)\n\n # Clean up timezone values in rrules.\n # Based on ShootQ calendarparser module.\n for rrule in rruleset._rrule:\n # normalize start and stop dates for each recurrance\n if rrule._dtstart:\n rrule._dtstart = tz.assign_tz(rrule._dtstart)\n if hasattr(rrule, '_dtend') and rrule._dtend:\n rrule._dtend = tz.assign_tz(rrule._dtend)\n if rrule._until:\n rrule._until = tz.assign_tz(rrule._until)\n if rruleset._exdate:\n # normalize any exclusion dates\n exdates = []\n for exdate in rruleset._exdate:\n exdate = tz.assign_tz(exdate)\n exdates.append(exdate)\n rruleset._exdate = exdates\n if hasattr(rruleset, '_tzinfo') and rruleset._tzinfo is None:\n # if the ruleset doesn't have a time zone, give it\n # the local zone\n rruleset._tzinfo = tz.local\n\n # Explode the event into repeats\n for recurrance in rruleset.between(local_start, local_end, inc=True):\n log.debug('Including recurrance %s', recurrance)\n dupe = event.__class__.duplicate(event)\n dupe.dtstart.value = recurrance\n dupe.dtend.value = recurrance + duration\n\n# print '\\nYIELDING'\n# dupe.prettyPrint()\n# sys.stdout.flush()\n# event.serialize(sys.stdout)\n# sys.stdout.flush()\n\n if strip_times:\n dupe.dtstart.value = dupe.dtstart.value.date()\n dupe.dtend.value = dupe.dtend.value.date()\n yield dupe\n \n elif event_start >= start and event_end <= end:\n # Single occurance event.\n if strip_times:\n event.dtstart.value = event.dtstart.value.date()\n event.dtend.value = event.dtend.value.date()\n yield event\n\n else:\n log.debug('skipping')", "title": "" }, { "docid": "ebf98132b68bd85f948dccc5ab44af30", "score": "0.6421156", "text": "def search_date_range():\n print(\"Beginning date:\")\n start_date = get_date()\n print(\"End date:\")\n end_date = get_date()\n entries = Entry.select()\n entries = entries.where(\n (Entry.date >= start_date) &\n (Entry.date <= end_date))\n if entries:\n view_entries(entries)\n else:\n no_results()\n return None", "title": "" }, { "docid": "dfc593fed334c84bc830fdcf130f3a1b", "score": "0.6402166", "text": "def main():\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('calendar', 'v3', http=creds.authorize(Http()))\n\n #DATE\n \n dt = datetime.now()\n start = dt - timedelta(days=dt.weekday()) \n end = start + timedelta(days=6)\n start = start - timedelta(hours=dt.hour)\n print('start: ', start)\n print('end: ', end)\n \n print('Getting the upcoming 10 events')\n events_result = service.events().list(calendarId='lbjlpmmfi6bvvp3j5lc450a3rk@group.calendar.google.com', # Work OKE \n timeMin=str(start.isoformat()) +\"Z\",\n timeMax=str(end.isoformat()) +\"Z\",\n maxResults=100, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n\n dates = {}\n if not events:\n print('No upcoming events found.')\n for event in events:\n try:\n if( \"You\" in event['summary']):\n print(event['summary'])\n print()\n #print(date.year, date.month, date.day, date.hour, date.minute )\n date = parse(event['start']['dateTime'])\n \n if(date.day not in dates):\n dates[date.day] = []\n \n if(date.day in dates):\n if( \"entered\" in event['summary']):\n dates[date.day].append( ('S', date.hour, date.minute) )\n else:\n dates[date.day].append( ('E', date.hour, date.minute) )\n except:\n pass\n \n print(dates)\n days = []\n for date, value in dates.items():\n try:\n start = [x for x in value if x[0] == 'S'][0]\n end = [x for x in value if x[0] == 'E'][0]\n span = timedelta(hours = end[1], minutes = end[2]) - timedelta(hours = start[1], minutes = start[2])\n days.append(span)\n print( date, span )\n except:\n pass\n \n worktime = timedelta()\n for each in days:\n worktime += each\n \n worked = worktime.days * 24 + worktime.seconds /3600\n print(worked)", "title": "" }, { "docid": "361628908510abc1780a4973cb202f43", "score": "0.63483584", "text": "def get_events_by_date(self):\r\n # Getting all actual events\r\n cursor = self.db['events'].find()\r\n events = list()\r\n for row in cursor:\r\n row[\"_id\"] = str(row[\"_id\"])\r\n events.append(row)\r\n\r\n # Sorting by date\r\n self.logger_rpc.log(self.name, self.get_events_by_date.__name__, None, \"Info\", \"Getting all events by date\")\r\n return sorted(events, key=lambda event: self._date_key(event['startDate']))", "title": "" }, { "docid": "6c8dba7cd334cf5dc9389a54bd3d806d", "score": "0.6293812", "text": "def get_appointments():\n # This code was from week 8 lab\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('calendar', 'v3', http=creds.authorize(Http()))\n\n # Call the Calendar API\n now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n events_result = service.events().list(calendarId='primary', timeMin=now,\n maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "title": "" }, { "docid": "afa447fe2fe5a6d2ef41289b92b345bf", "score": "0.6272645", "text": "def demo(UserCal):\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n \n print('Getting the upcoming 10 events')\n eventsResult = UserCal.service.events().list(\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "title": "" }, { "docid": "de0fb1ccaed43263b32a74b63a884896", "score": "0.62645775", "text": "def academic_event_set(self):\n\t\tnow = datetime.datetime.now()\n\t\tcurrent_year = now.year\n\t\tstart_date = datetime.date(current_year, 7, 1)\n\t\tend_date = datetime.date((current_year + 1), 6, 30)\n\t\treturn self.event_set.filter(status=3).filter(event_date__range=(start_date, end_date))", "title": "" }, { "docid": "e83431dbd544cf69c87a53dc7c20e1ef", "score": "0.6254189", "text": "def events(self, msg, args):\n\n # Get cal object from google\n res = requests.get(\"https://calendar.google.com/calendar/ical/q3n3pce86072n9knt3pt65fhio%40group.calendar.google.com/public/basic.ics\")\n cal = Calendar.from_ical(res.text)\n\n dateFormat = \"%b %d %H:%M\"\n weeks = 0\n full = False\n events = []\n eventStr = \"\"\n\n # Check arg 1 (weeks/full)\n arguments = args.split(\" \")\n if len(arguments) == 1 and arguments[0] == '':\n weeks = 2\n elif arguments[0] == \"full\":\n full = True\n else:\n try:\n weeks = int(arguments[0])\n except ValueError:\n return \"Invalid parameters\"\n\n # Process full/weeks\n if full:\n # Let's say there's not gonna be events planned ahead more than a year\n events = self.getEvents(cal, 52)\n eventStr = \"All upcoming events:\\n\"\n else:\n events = self.getEvents(cal, weeks)\n eventStr = \"Events in the **next \" + str(weeks) + \" weeks:**\\n\\n\"\n \n # Iterate through our chosen events and format them\n for e in events:\n summary = str(e.get('SUMMARY'))\n start = e.get('DTSTART').dt\n end = e.get('DTEND').dt\n location = e.get('LOCATION')\n \n if (location == \"\"):\n location = \"TBA\"\n\n # This is sorta messy (note it's markdown formatting not slack!)\n eventStr += (\"**\" + str(start.strftime(dateFormat))\n + \" - \"\n + str(start.strftime(dateFormat)) + \"**\"\n + \" - \"\n + \"*\" + location + \"*\"\n + \": \"\n + \"`\"\n + summary\n + \"`\\n\\n\")\n return eventStr", "title": "" }, { "docid": "be542abff5c6231be843513b37d4c78e", "score": "0.6229359", "text": "def get_calendar_data(credentials):\n http = httplib2.Http()\n http = credentials.authorize(http)\n service = build('calendar', 'v3', http=http)\n min_time = re.sub(r'\\.[0-9]*$', '+08:00', datetime.isoformat(datetime.now()))\n print('>> Min-time: %s' % min_time)\n http_request = service.events().list(\n calendarId='primary',\n maxResults=2,\n timeMin=min_time,\n orderBy='startTime',\n singleEvents=True)\n\n res = { 'status': 'OK', 'events': [] }\n\n http_response = http_request.execute()\n for event in http_response.get('items', []):\n item = {\n 'summary': event['summary'],\n 'organizer': event['organizer']['displayName'],\n 'location': 'location' in event and event['location'] or 'N/A',\n 'start': 'dateTime' in event['start'] and event['start']['dateTime'] or event['start']['date']+'T00:00:00+08:00',\n 'end': 'dateTime' in event['end'] and event['end']['dateTime'] or event['end']['date']+'T23:59:59+08:00'\n }\n res['events'].append(item)\n\n return res", "title": "" }, { "docid": "e62e9deeb83179d1a76d13ccdc578572", "score": "0.6210195", "text": "def get_jobs_for_calendar(self, _from, _to):\n _from = JobManager.timestamp_to_datetime(_from)\n _to = JobManager.timestamp_to_datetime(_to)\n jobs = self.getJobsInRange(_from, _to)\n URL = getattr(settings, \"URL\", \"\")\n events=[]\n for job in jobs:\n start, end = self.__get_start_end_datetime(job)\n handymen = ', '.join([item.get('name') for item in job.handyman.values('name')])\n event = {\n \"id\":job.id,\n \"title\":\"{0} : {1} - {2} ~~ {3}\".format(job.id, job.customer, job.remarks, handymen),\n \"url\": URL + \"/jobs/{0}\".format(job.jobref) if URL else \"\",\n \"class\": EVENT_CLASSES[job.status],\n \"start\": JobManager.datetime_to_timestamp(start),\n \"end\": JobManager.datetime_to_timestamp(end)\n }\n events.append(event)\n return events", "title": "" }, { "docid": "23469e01cbefdfbb12951af6af575aa6", "score": "0.6192027", "text": "async def eventlist(self, ctx, *, timezone: str=\"UTC\"):\n\t\tserver = ctx.message.server\n\t\tevents = []\n\t\tfor event in self.events[server.id]:\n\t\t\tif not event[\"has_started\"]:\n\t\t\t\temb = discord.Embed(title=event[\"event_name\"],\n\t\t\t\t\t\t\t\t\tdescription=event[\"description\"],\n\t\t\t\t\t\t\t\t\turl=\"https://time.is/UTC\")\n\t\t\t\temb.add_field(name=\"Organiser\",\n\t\t\t\t\t\t\t value=discord.utils.get(\n\t\t\t\t\t\t\t\t self.bot.get_all_members(),\n\t\t\t\t\t\t\t\t id=event[\"creator\"]))\n\t\t\t\temb.add_field(name=\"Event ID\", value=str(event[\"id\"]))\n\t\t\t\temb.add_field(\n\t\t\t\t\tname=\"Participant count\", value=str(\n\t\t\t\t\t\tlen(event[\"participants\"])))\n\t\t\t\temb.add_field(\n\t\t\t\t\tname=\"Start time (UTC)\", value=datetime.utcfromtimestamp(\n\t\t\t\t\t\tevent[\"event_start_time\"]))\n\t\t\t\tevents.append(emb)\n\t\tif len(events) == 0:\n\t\t\tawait self.bot.say(\"No events available to join!\")\n\t\telse:\n\t\t\tawait self.event_menu(ctx, events, message=None, page=0, timeout=30)", "title": "" }, { "docid": "6632edd42f3dc777853c2eecdcb3c3f2", "score": "0.61591876", "text": "def main():\r\n credentials = get_credentials()\r\n http = credentials.authorize(httplib2.Http())\r\n service = discovery.build('calendar', 'v3', http=http)\r\n\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n #print('Getting the upcoming 10 events')\r\n eventsResult = service.events().list(\r\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = eventsResult.get('items', [])\r\n \r\n if not events:\r\n p1='No upcoming events found.'\r\n mes.showinfo(\"Event List\",p1)\r\n p3=[]\r\n i=0\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n p3.append(start+ \" <---> \"+event['summary']+'\\n')\r\n #blank= Entry(top1,width=60).pack(ipady=3)\r\n mes.showinfo(\"Event list\",p3)", "title": "" }, { "docid": "7fe35007400ee6cdd361d82d2c984d3c", "score": "0.6158645", "text": "def request_calendar(jsessionid: str, start: str, end: str) -> List[dict]:\n session = Session()\n\n response = session.post(\n url=AJAX_REQUEST_URL.format(start, end),\n headers={\n \"Cookie\": \"JSESSIONID={};\".format(jsessionid),\n \"Accept\": \"*/*\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n \"Content-Type\": \"application/json; charset=uft-8\",\n \"Host\": \"gestioacademica.upf.edu\",\n \"Referer\": \"https://gestioacademica.upf.edu/pds/control/PubliHoraAlumCalendario?rnd=176.0\",\n \"Sec-Fetch-Dest\": \"empty\",\n \"Sec-Fetch-Mode\": \"cors\",\n \"Sec-Fetch-Site\": \"same-origin\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n )\n\n return json.loads(response.text)", "title": "" }, { "docid": "86c2a83307bf0aae32200918765eb34a", "score": "0.61525625", "text": "def get_queryset(self):\n return Event.objects.exclude(\n attendees__id__exact=self.request.user.id\n ).filter(\n start_date__gte=timezone.now()\n ).order_by('start_date')[:self.limit]", "title": "" }, { "docid": "f4ac7e6a092e25dd2b7facba7cbe5bf6", "score": "0.61283916", "text": "def list():\n\tif g.is_default_school:\n\t\tevents = Event.objects.filter().order_by('-start')\n\telse:\n\t\tevents = Event.objects.filter(schools=g.school).order_by('-start')\n\treturn render_template('event/list.html',\n\t\ttitle=_('Events'),\n\t\tevents=events, current_user=current_user)", "title": "" }, { "docid": "a09e6a28338106aa9d26e25d3548c896", "score": "0.61207885", "text": "def event_dates(obj):\n if obj.start is None or obj.end is None:\n return None\n\n event_days = set()\n occurences = expand_events([obj], RET_MODE_ACCESSORS)\n for occurence in occurences:\n start = occurence.start\n event_days.add(start.date())\n end = occurence.end\n duration = (end.date() - start.date()).days\n for idx in range(1, duration + 1):\n day = start + timedelta(days=idx)\n event_days.add(day.date())\n\n return tuple(event_days)", "title": "" }, { "docid": "d493f41ff2fd4182241f2d5345653435", "score": "0.61038333", "text": "def calendar_events(self):\n return CalendarEvents(self)", "title": "" }, { "docid": "27e3997b65eb3abc29b4026476bbe836", "score": "0.61035824", "text": "def _generate_events(count, start_datetime):\n result = []\n description = 'all the same'\n\n while count > 0:\n result.append({\n \"description\": description,\n \"start\": (start_datetime + timedelta(days=count)).strftime('%Y-%m-%dT%H:%M:%SZ'),\n \"end\": (start_datetime + timedelta(days=count + 1)).strftime('%Y-%m-%dT%H:%M:%SZ')\n })\n count -= 1\n\n return result", "title": "" }, { "docid": "ce8be3505bb96e9ad348a923e364f90a", "score": "0.6078324", "text": "def list_events():\n events = db_helper.get_all_events()\n return jsonify({\"events\": events})", "title": "" }, { "docid": "c6c296970c2f462b276b677a0f1f6afc", "score": "0.6062814", "text": "def tasks_on_date(self, date):\n tasks = list(filter(lambda t: t.start_date == date, self.lst))\n return tasks", "title": "" }, { "docid": "ca7709fb956a643670913eb45c410a41", "score": "0.6060582", "text": "def get_camp_events():\n start = request.args.get('start') # get events on/after start\n end = request.args.get('end') # get events before/on end\n print(start, end)\n\n event_schema = CampEventSchema(many=True)\n events = CampEvent.query.all() # get all data for now\n\n for event in events:\n event.add_color_attr()\n\n result = event_schema.dump(events).data\n\n return jsonify(result)", "title": "" }, { "docid": "81316daf355f25d6e875a0cf67203cfa", "score": "0.6058321", "text": "def dayEvents(self):\n start = datetime.datetime.combine(self.default_day, datetime.time(0,0))\n months = self._getEventList(start=start,\n stop=start + datetime.timedelta(1))\n return self.eventlist(months=months, show_past=True)", "title": "" }, { "docid": "3c4ed9e95d2f52682e1f1fed2bc6ea53", "score": "0.6055746", "text": "def get_events(cal_id, url, message_req):\n # REST request\n URI = f'https://content.googleapis.com/calendar/v3/calendars/' \\\n f'{urllib.parse.quote(cal_id)}/events' \\\n f'?key={KEY}' \\\n f'&maxResults=10' \\\n f'&singleEvents=true' \\\n f'&fields=items(summary%2Cstart)' \\\n f'&timeMin={datetime.datetime.utcnow().isoformat()}Z'\n print(URI)\n response = requests.get(URI).json()[\"items\"] # retrieving events\n scheduler = Scheduler(\"randomseed\", connection=Redis()) # constructs\n # scheduler\n # with cal_id as identifier\n for event in response:\n try:\n st_time = datetime.datetime.strptime(f'{event[\"start\"][\"date\"]}Z',\n '%Y-%m-%dT%H:%M:%SZ')\n except ValueError:\n st_time = datetime.datetime.strptime(f'{event[\"start\"][\"date\"]}Z',\n '%Y-%m-%dZ')\n message_req[\"message\"][\"text\"] = f'Are you trying to fail this ' \\\n f'class!? SMH - {event[\"summary\"]} is ' \\\n f'coming up!'\n scheduler.enqueue_at(st_time, \"send_message(url, message_req)\")", "title": "" }, { "docid": "a3685a112a891bcceb4973ae359aebff", "score": "0.6055119", "text": "def show_events():\n\n query = 'women+technology'\n location = 'san francisco'\n distance = '75mi'\n sort = 'date'\n category_id ='101,102'\n payload = {'q': query,\n 'location.address': location,\n 'location.within': distance,\n 'categories': category_id,\n 'sort_by': sort,}\n\n headers = {'Authorization': 'Bearer ' + EVENTBRITE_TOKEN}\n\n response = requests.get(EVENTBRITE_URL + 'events/search/',\n params=payload,\n headers=headers)\n\n data = response.json()\n\n if response.ok:\n events = data['events']\n\n else:\n flash(\"Nothing found: {}\".format(data['error_description']))\n events = [] \n\n\n return render_template(\"events.html\", data=pformat(data),\n results=events)", "title": "" }, { "docid": "5d18ee7e000bd34cde3b3c2b166d73e0", "score": "0.6051703", "text": "def find_events():", "title": "" }, { "docid": "4ac17ae03095f4e2e2a1f0a7eb778ed7", "score": "0.6051639", "text": "def list(request):\n events = Event.objects.all()\n\n return render_to_response(\"events/list.html\", {\n \"events\": events,\n }, context_instance=RequestContext(request))", "title": "" }, { "docid": "205ee34ab80453a97994e7b9784ff0ad", "score": "0.6028907", "text": "def events_view(request, category_name):\n events = paginated_list(request, Event, 20, 'from_date',\n from_date__gte=datetime.date.today,\n categories__name=category_name)\n past = paginated_list(request, Event, 20, 'from_date',\n from_date__lt=datetime.date.today,\n organization__is_active=True)\n return render(request, 'site/list.html',\n {'events' : events, 'past': past})", "title": "" }, { "docid": "0f03da0504e83f765a01af3a0981e92d", "score": "0.6027374", "text": "def events_between (cls, env, stamp1, stamp2, user):\n\n import time\n\n db = env.get_db_cnx()\n cursor = db.cursor()\n if user == \"admin\":\n selector = \"\"\n else:\n selector = \"(type=0 OR author=\\'%s\\') AND\" % user\n\n sql = \"\"\"\n SELECT *\n FROM azcalendar_event\n WHERE %(selector)s\n (time_end >= %(time_beg)s AND time_begin < %(time_end)s)\n OR (time_begin < %(time_beg)s AND time_end >= %(time_end)s )\n \"\"\" % {'time_beg' : int(stamp1),\n 'time_end' : int(stamp2),\n 'selector' : selector}\n cursor.execute(sql)\n rows = cursor.fetchall()\n return [Event.parse_row (row) for row in rows]", "title": "" }, { "docid": "4a0d1160b8fd5a885b912b3b176e8577", "score": "0.6027012", "text": "async def async_get_events(\n self, hass: HomeAssistant, start_datetime: datetime, end_datetime: datetime\n ) -> list[CalendarEvent]:\n events: list[CalendarEvent] = []\n _LOGGER.debug(\"Anniversaries Calendar - Get Events\")\n if SENSOR_PLATFORM not in hass.data[DOMAIN]:\n return events\n start_date = start_datetime.date()\n end_date = end_datetime.date()\n for ent in self.entities:\n _LOGGER.debug(\"Get Events: Entity Name: \" + str(ent))\n if (ent not in hass.data[DOMAIN][SENSOR_PLATFORM]):\n continue\n entity = self._hass.data[DOMAIN][SENSOR_PLATFORM][ent]\n if (\n entity\n and entity.name\n and entity._next_date.date()\n and start_date <= entity._next_date.date() <= end_date\n ):\n event = CalendarEvent(\n summary=entity.name,\n start=entity._next_date.date(),\n end=entity._next_date.date() + timedelta(days=1),\n description=entity.extra_state_attributes[\"description\"]\n if \"description\" in entity.extra_state_attributes\n else None,\n )\n events.append(event)\n return events", "title": "" }, { "docid": "2e22d65109f902da043a5447400d64d8", "score": "0.6021999", "text": "def event_dates(self):\n return self._collect_dates('EVENTS')", "title": "" }, { "docid": "c453e049093fb45e58cb7bcef221ecbb", "score": "0.6006142", "text": "def main():\n credentials = get_credentials()\n service = build('calendar', 'v3', http=credentials.authorize(Http()))\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('Getting the upcoming 10 events')\n eventsResult = service.events().list(\n calendarId='primary', timeMin=now, maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n print(start, event['summary'])", "title": "" }, { "docid": "c9f4d07539fec6b7a8da24698737e2ce", "score": "0.5991699", "text": "def main():\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n store = file.Storage('token.json')\r\n creds = store.get()\r\n if not creds or creds.invalid:\r\n flow = client.flow_from_clientsecrets('credentials.json', SCOPES)\r\n creds = tools.run_flow(flow, store)\r\n service = build('calendar', 'v3', http=creds.authorize(Http()))\r\n page_token = None\r\n calendar_ids = []\r\n\r\n while True:\r\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\r\n for calendar_list_entry in calendar_list['items']:\r\n calendar_ids.append(calendar_list_entry['id'])\r\n page_token = calendar_list.get('nextPageToken')\r\n if not page_token:\r\n break\r\n\r\n\r\n # This code is to look for all-day events in each calendar for the month of September\r\n # Src: https://developers.google.com/google-apps/calendar/v3/reference/events/list\r\n # You need to get this from command line\r\n # # Bother about it later!\r\n # now = datetime.datetime.utcnow() + timedelta(days = 7)\r\n # now = now.isoformat() + 'Z' # 'Z' indicates UTC time\r\n start_date = datetime.datetime(\r\n 2016, 01, 01, 00, 00, 00, 0).isoformat() + 'Z'\r\n end_date = datetime.datetime(2018, 12, 01, 23, 59, 59, 0).isoformat() + 'Z'\r\n eventID = []\r\n Dates = []\r\n Days = []\r\n CategoryID = []\r\n for calendar_id in calendar_ids:\r\n count = 0\r\n print('\\n----%s:\\n' % calendar_id)\r\n eventsResult = service.events().list(\r\n calendarId=calendar_id,\r\n timeMin=start_date,\r\n timeMax=end_date,\r\n singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = eventsResult.get('items', [])\r\n for event in events:\r\n eventDate = event['start'].get('dateTime', event['start'].get('date'))\r\n eventTitle = event['summary']\r\n print(type(eventDate), eventTitle)\r\n weekDays = datetime.datetime.strptime(eventDate,\"%Y-%m-%d\").weekday()\r\n Dates.append(eventDate)\r\n eventID.append(eventTitle)\r\n Days.append(numToDays(weekDays))\r\n CategoryID.append('Special')\r\n toFile = pd.DataFrame(np.column_stack([eventID, CategoryID, Dates, Days]), columns = ['Events', 'CategoryID', 'Date', 'Days'])\r\n fileName = '{}{}.csv'.format(\"C:\\\\Users\\\\krata\\\\Documents\\\\BIT 5524\\\\Scrapping\\\\\", \"events_2018-12-3\")\r\n toFile.to_csv(fileName, index = False)", "title": "" }, { "docid": "53ee2d0b690a35ad3dad66c380069a93", "score": "0.59521115", "text": "def list_calendar_events_for_user(self, **kwargs):\n return PaginatedList(\n CalendarEvent,\n self._requester,\n 'GET',\n 'users/%s/calendar_events' % (self.id),\n **combine_kwargs(**kwargs)\n )", "title": "" }, { "docid": "7bdb4e828fda64586615e9ecb6b78269", "score": "0.59358376", "text": "def test_events_list(self):\n\n pco = self.pco\n\n events = [event for event in pco.check_ins.events.list()]\n\n self.assertEqual(len(events), 1)\n self.assertEqual(events[0].name, 'Weekly Worship Services')", "title": "" }, { "docid": "a2b1ce220114c3411b6e455c8adcf205", "score": "0.5933739", "text": "async def search_events(\n form: event_models.EventSearchForm) -> event_models.ListOfEvents:\n events = events_collection().find()\n result_events = []\n\n event_matches_query = lambda event: form.keyword in {\n event[\"title\"], event[\"description\"]\n }\n for event in events:\n if event_matches_query(event):\n event_data = event_models.EventQueryResponse(**event,\n event_id=event[\"_id\"])\n result_events.append(event_data)\n\n return event_models.ListOfEvents(events=result_events)", "title": "" }, { "docid": "c11c3fcf14410264c0e6b44a2070460c", "score": "0.5928687", "text": "def get_events(self, instance):\r\n if instance.type == \"for_person\":\r\n all_events = instance.person.event_featuring.all().order_by('date', 'start_time')\r\n elif instance.type == \"for_place\":\r\n all_events = instance.place.event_set.all().order_by('date', 'start_time')\r\n # most likely, we're getting events related to an entity\r\n elif MULTIPLE_ENTITY_MODE and instance.entity:\r\n all_events = self.model.objects.filter(Q(hosted_by=instance.entity) | \\\r\n Q(publish_to=instance.entity)).distinct().order_by('date', 'start_time')\r\n else:\r\n all_events = self.model.objects.all().order_by('date', 'start_time')\r\n\r\n all_events = all_events.filter(published=True, in_lists=True)\r\n\r\n actual_events = all_events.filter(\r\n # if it's (not a series and not a child) - series events are excluded, children too unless:\r\n # the child's parent is a series and its children can be advertised\r\n # tough luck if it's the child of a series and can't be advertised\r\n Q(series = False, parent = None) | \\\r\n Q(parent__series = True),\r\n )\r\n\r\n instance.forthcoming_events = actual_events.filter(\r\n # ... and it's (a single-day event starting after today) or (not a single-day event and ends after today)\r\n Q(single_day_event = True, date__gte = datetime.now()) | \\\r\n Q(single_day_event = False, end_date__gte = datetime.now())\r\n )\r\n\r\n instance.previous_events = actual_events.exclude(\r\n # ... and it's (a single-day event starting after today) or (not a single-day event and ends after today)\r\n Q(single_day_event = True, date__gte = datetime.now()) | \\\r\n Q(single_day_event = False, end_date__gte = datetime.now())\r\n ).order_by('-date', '-start_time')\r\n\r\n instance.series_events = all_events.filter(series = True)", "title": "" }, { "docid": "4a051c1c8557c52e711a4ea699c8a398", "score": "0.59101015", "text": "def get_events_from_min_max(technician,start,end):\n\n creds = None\n if technician.token_pickle:\n with open(technician.token_pickle.path, 'rb') as token:\n\n creds = pickle.load(token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n events_result = service.events().list(calendarId='primary', timeMin=start.isoformat(),\n maxResults=1000, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n\n\n # start_times = []\n # for event in events:\n # start = dateutil.parser.parse(event['start'].get('dateTime', event['start'].get('date')))\n # start_times.append(start)\n # for event in events:\n # start = dateutil.parser.parse(event['start'].get('dateTime', event['start'].get('date')))\n # end = dateutil.parser.parse(event['end'].get('dateTime', event['end'].get('date')))\n #\n # TechnicianAppointments.objects.get_or_create(technician=technician,start_datetime=start,end=end)\n return events", "title": "" }, { "docid": "b64965e47d79c2dcfa7fd64ba07fe00f", "score": "0.5897783", "text": "def event_date_range():\n event_type = request.args.get('eventType', '')\n if not event_type:\n return \"error: must provide a eventType field\", 400\n place = request.args.get('place', '')\n if not place:\n return \"error: must provide a place field\", 400\n use_cache = request.args.get('useCache', '')\n result = {'minDate': \"\", 'maxDate': \"\"}\n date_list = []\n if use_cache == '1':\n date_list = fetch.event_collection_date(event_type,\n place).get('eventCollectionDate',\n {}).get('dates', [])\n else:\n disaster_data = current_app.config['DISASTER_DASHBOARD_DATA']\n date_list = sorted(list(disaster_data.get(event_type, {}).keys()))\n if len(date_list) > 0:\n result = {'minDate': date_list[0], 'maxDate': date_list[-1]}\n return Response(json.dumps(result), 200, mimetype='application/json')", "title": "" }, { "docid": "7df091ad74d717c02aebcdc66189c7f7", "score": "0.5896381", "text": "def get_upcoming_events(self):\n if self.event.repeats('NEVER'):\n has_ended = False\n now_gt_start = self.now > self.event.l_start_date\n now_gt_end = self.now > self.event.end_date\n if now_gt_end or now_gt_start:\n has_ended = True\n has_not_started = self.event.l_start_date > self.finish\n if has_ended or has_not_started:\n return self.events\n self.events.append((self.event.l_start_date, self.event))\n return self.events\n if self.event.repeats('WEEKDAY'):\n self._weekday()\n elif self.event.repeats('MONTHLY'):\n self._monthly()\n elif self.event.repeats('YEARLY'):\n self._yearly()\n else:\n self._others()\n return self.events", "title": "" }, { "docid": "0ebcf94829c5fd7e8944a92c4607ae58", "score": "0.5879512", "text": "def resource_show_my_events(request, item_container):\n app_name = get_app_name()\n my_user_id = get_user(request.user.username).id\n my_events_0 = get_my_events(my_user_id)\n my_events = []\n for e in my_events_0:\n dummy, my_typedescr = get_type_of_resource(e.resource_id)\n my_resdescr = get_resource(e.resource_id).description\n datetime_start = e.datetime_start.strftime(\"%d.%m.%Y %H:%M\")\n my_event = {'id':e.id, 'typedescr':my_typedescr, 'resdescr':my_resdescr, 'dtstart':datetime_start}\n date_start = e.datetime_start.strftime(\"%d.%m.%Y\")\n date_end = e.datetime_end.strftime(\"%d.%m.%Y\")\n if date_start!=date_end:\n datetime_end = e.datetime_end.strftime(\"%d.%m.%Y %H:%M\")\n else:\n datetime_end = e.datetime_end.strftime(\"%H:%M\")\n my_event['dtend'] = datetime_end\n if mx.DateTime.Parser.DateTimeFromString(datetime_start)<mx.DateTime.now():\n my_event['in_use'] = 1\n my_events += [ my_event ]\n vars = get_folderish_vars_show(request, item_container, app_name, '', \n False)\n vars['title'] = _(u\"Ressourcenverwaltung: Übersicht\")\n vars['next'] = item_container.get_absolute_url() + '/show_my_events/'\n if len(my_events)>0:\n vars['my_events'] = my_events\n vars['next_del'] = item_container.get_absolute_url() + \"/del_event/\"\n vars['action_block'] = show_link(item_container.get_absolute_url(), _(u'Reservieren'))\n #'<a href=\"index.html\">Reservieren</a>'\n return render_to_response('app/resource/my_reservations.html', vars)", "title": "" }, { "docid": "b12f8fa2bcc4cb8add4040b07ba0f03a", "score": "0.5872256", "text": "def get_events(cls, start, stop=None):\n events = []\n # Get the event telemetry MSID objects\n event_msidset = fetch.Msidset(cls.event_msids, start, stop)\n obsid = event_msidset[\"cobsrqid\"]\n\n if len(obsid) < 2:\n # Not enough telemetry for state_intervals, return no events\n return events\n\n states = obsid.state_intervals()\n # Skip the first and last states as they are likely incomplete\n for state in states[1:-1]:\n event = dict(\n start=state[\"datestart\"],\n stop=state[\"datestop\"],\n tstart=state[\"tstart\"],\n tstop=state[\"tstop\"],\n dur=state[\"tstop\"] - state[\"tstart\"],\n obsid=state[\"val\"],\n )\n events.append(event)\n return events", "title": "" }, { "docid": "20f85fa68ea68ffcb4b9ad6f418f60ae", "score": "0.5872194", "text": "def callist(context: CallContext) -> None:\n click.secho(\"Google Calendars:\", bold=True)\n for cal_id in context.gcal.client.calendars():\n cal = context.gcal.client.calendar_info(cal_id)\n click.secho(f\"{cal['summary']} [{cal['id']}]\")", "title": "" }, { "docid": "665efce75e79e1cafabdc9d9847b667b", "score": "0.5868549", "text": "def do_event_list(self, arg):\n print(\"Event Code Date Description\")\n print(\"---------- ---------- --------------------------------------------------\")\n print(\"UC1 03/25/2016 Chicago Blackhawks vs St. Louis Blues at the United Center\")\n print(\"UC2 03/27/2016 Chicago Blackhawks vs Minnesota Wild at the United Center\")\n print(\"H1 04/11/2016 Hamilton at Chicago Theater\")\n print(\"H2 04/12/2016 Hamilton at Chicago Theater\")\n print(\"H3 04/13/2016 Hamilton at Chicago Theater\")\n print(\"U2 07/06/2016 U2 at Soldier Field\")\n print()\n print(\"To view available tickets, enter: 'event [event code]'\")", "title": "" }, { "docid": "8f48d24d31e57abaa8835e18acad8d2e", "score": "0.5864984", "text": "def get_group_events(bot, calcodes, tzinfo, count, days, now=None): # pylint: disable=too-many-arguments,too-many-locals\n\n calendar_view = bot.multibot.multical.view(calcodes)\n if now is None:\n now = time.time()\n nowdt = datetime.datetime.fromtimestamp(now, tzinfo)\n midnight = nowdt.replace(hour=0, minute=0, second=0, microsecond=0)\n period = (midnight + datetime.timedelta(days=days + 1) - nowdt).total_seconds()\n all_events = list(calendar_view.get_overlap(now, now + max(period, 60 * 60 * 24 * 100)))\n alerts = {}\n for event in all_events:\n if event['location']:\n try:\n alertlist = geoutil.weatheralerts(event['location'])\n except Exception: # pylint: disable=broad-except\n logging.exception('Ignoring:')\n else:\n for alert in alertlist or ():\n alerts[alert['id']] = alert\n end = now + period\n return ([event for event in all_events[:count] if event['start'] <= end],\n [alert for alertid, alert in sorted(alerts.items())])", "title": "" }, { "docid": "8fbe6653f479568d411e490b658e515d", "score": "0.58639276", "text": "def test_filter_course_by_start_date(self):\n params = {'startCourseFrom': timestamp_today,\n 'startCourseTo': timestamp_week}\n response = requests.get(url=course_url, params=params)\n\n assert response.ok, response.text\n\n for course in response.json():\n assert timestamp_today <= int(course[\"start_date\"]) <= timestamp_week, \"Filtered course by start_date \" \\\n \"is not found\"\n course_found = False\n for course in response.json():\n if post_payload['title'] == course['title']:\n course_found = True\n break\n if not course_found:\n pytest.fail(f\"Just created course with start date - {post_payload['start_date']} not in response\")", "title": "" }, { "docid": "cf06778416b8f727d5f44af669ba6e14", "score": "0.58601046", "text": "def get_all_events(self):\n\n i = 0\n for index in range(0, self.recurrences):\n # extracts time interval in the format [start week, end week]\n week = self.get_week_entities(index)\n i += 1\n for week_number in range(week[0], week[1]):\n print(week_number)\n print(\"Event from A: \" + str(i))\n print(\n Event.week_string_to_date(2017, week_number, self.day, self.start_time[0:2], self.start_time[3:5]))\n date = str(Event.week_string_to_date(2017, week_number, self.day, self.start_time[0:2],\n self.start_time[3:5]))\n DatabaseInserter.add_course_event(date, self.course_code, self.room, self.type)\n\n if week[0] == week[1]:\n print(week_number)\n print(\"Event from B: \" + str(i))\n print(\n Event.week_string_to_date(2017, week_number, self.day, self.start_time[0:2], self.start_time[3:5]))\n date = str(Event.week_string_to_date(2017, week_number, self.day, self.start_time[0:2],\n self.start_time[3:5]))\n DatabaseInserter.add_course_event(date, self.course_code, self.room, self.type)", "title": "" }, { "docid": "062e85830263722b3955906a33d16984", "score": "0.5840578", "text": "def get_dates(self):\n return [ed.date for ed in EventDate.objects.filter(event=self)]", "title": "" }, { "docid": "6e663e4d824f0cd16e1faa24b23e4abf", "score": "0.58386356", "text": "def test_get_day_events():\n print(\"get_day_events\")\n c = CalendarModel(1,2019)\n print(c.get_day_events(12))", "title": "" }, { "docid": "ec80345dd111cf68736a3a08b30ee275", "score": "0.5835974", "text": "def getEvents(self, date):\n for event_id in self._date_index.get(date, []):\n yield self[event_id]", "title": "" }, { "docid": "6ab3b19db5aa9ed41aae0cae77dd5734", "score": "0.5830275", "text": "def start_date(start):\n \"\"\"When given the start only, calculate `TMIN`, `TAVG`, and `TMAX` for all dates greater than and equal to the start date\"\"\"\n \"\"\"Range of Dates: 2010-01-01 to 2017-08-23\"\"\"\n \n canonicalized = start.replace(\" \", \"-\")\n \n #Check if input is in date range\n if (canonicalized > '2017-08-23') or (canonicalized < '2010-01-01'):\n return jsonify({\"error\": f\"{canonicalized} is out of range.\"}), 404\n \n session = Session(engine)\n \n t_calcs = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= canonicalized).all()\n \n session.close()\n \n t_calcs_list = list(t_calcs)\n \n return jsonify(t_calcs_list)", "title": "" }, { "docid": "19594bcc268c5f95a92441a25afa59fb", "score": "0.5819049", "text": "def index(request):\n events_list = Event.objects.filter(is_published=True)\n context = {'events_list': events_list, 'active':'events'}\n\n context[\"current_events\"] = events_list.filter(start_datetime__gte=now()).order_by('start_datetime')\n\n ## Get past events\n past_events = events_list.filter(start_datetime__lt=now())\n\n # Get sorted list of months from pre-defined dict; get sorted list of years in descending order from Event model\n month_dict = {1:\"jan\", 2:\"feb\", 3:\"mar\", 4:\"apr\", 5:\"may\", 6:\"jun\", 7:\"jul\", 8:\"aug\", 9:\"sep\", 10:\"oct\", 11:\"nov\", 12:\"dec\"}\n month_list = list(set(month_dict.keys()))\n years_list = list(set([e.start_datetime.year for e in events_list]))\n years_list.sort()\n years_list.reverse()\n\n # Return list to template, e.g.\n # ((2014, (\"jan\":(<event>,<event2>,...)), ((2013, (...))) )\n yearly_events = []\n for y in years_list:\n monthly_events = []\n for m in month_list:\n m_events = events_list.filter(start_datetime__lte=now()).filter(start_datetime__year=y).filter(start_datetime__month=m).order_by(\"start_datetime\")\n if len(m_events) > 0:\n monthly_events.append( (month_dict[m], m_events ) )\n yearly_events.append( (y, monthly_events) )\n\n context[\"past_events\"] = yearly_events\n\n return render(request, 'events/index.html', context)", "title": "" }, { "docid": "3e6c39a29a10a4273a1cb503cbe967db", "score": "0.58024913", "text": "def current_events(self):\n return Event.query.filter(\n Event.start <= arrow.now(config['tz'] or 'local').replace(hours=2),\n Event.end >= arrow.now(config['tz'] or 'local').replace(hours=-2),\n Event.group_id==self.id,\n Event.is_active==True\n ).all()", "title": "" }, { "docid": "28a8c329c3f02e9dc4a1f7fba0e7f349", "score": "0.58017546", "text": "def events(self, from_dt=None, to_dt=None):\n self.refresh_client(from_dt, to_dt)\n return self.response['Event']", "title": "" }, { "docid": "57b96aae36ce259f7fdf221f413a41d8", "score": "0.58008385", "text": "def get_events_in_time_range(start_time, end_time):\n events = Event.objects.filter(start_time__gte=start_time).filter(\n start_time__lte=end_time\n )\n return events", "title": "" }, { "docid": "29c89d6e357c98d55c707ebaeddd0faa", "score": "0.57896334", "text": "def get_reminders():\n reminders_list = []\n request = requests.Session()\n url = 'https://www.googleapis.com/calendar/v3/calendars/' + \\\n 'fsdet2017@gmail.com/events?alwaysIncludeEmail=true&key=' + \\\n 'AIzaSyAu4LrLeEWF_3TWg4Mg2VNp5W0pmWVtW1w'\n response = request.get(url)\n data = response.json()\n now = str(datetime.datetime.now())\n for item in data[\"items\"]:\n reminder_start = item[\"start\"][\"dateTime\"]\n date_start = reminder_start[0:10]\n date_time = date_start + \" \" + reminder_start[11:16]\n if now > date_time:\n continue\n year_start = reminder_start[0:4]\n month_start = calendar.month_name[int(reminder_start[5:7])]\n day_start = reminder_start[8:10]\n day_number = weekday(int(year_start), int(reminder_start[5:7]),\n int(day_start))\n day_name_start = day_name[day_number]\n time_start = reminder_start[11:16]\n reminder_end = item[\"end\"][\"dateTime\"]\n date_end = reminder_end[0:10]\n year_end = reminder_end[0:4]\n month_end = calendar.month_name[int(reminder_end[5:7])]\n day_end = reminder_end[8:10]\n day_number = weekday(int(year_end), int(reminder_end[5:7]),\n int(day_end))\n day_name_end = calendar.day_name[day_number]\n time_end = change_time(reminder_end[11:16])\n reminder = [item[\"summary\"], date_start,\n day_name_start, day_start, month_start, year_start,\n time_start, date_end, day_name_end, day_end, month_end,\n year_end, time_end]\n reminders_list.append(reminder)\n # sort by time\n reminders_list.sort(key=lambda reminders_list: reminders_list[6])\n # sort by date\n reminders_list.sort(key=lambda reminders_list: datetime.datetime.strptime(\n reminders_list[1], '%Y-%m-%d'))\n for i in range(len(reminders_list)):\n reminders_list[i][6] = change_time(reminders_list[i][6])\n return reminders_list", "title": "" }, { "docid": "9ad4e750cb3aaae250a700db6c906f86", "score": "0.5779887", "text": "def get_all_events_with_time_range(self, **kwargs):\n\n all_params = ['earliest_start_time_epoch_millis', 'latest_start_time_epoch_millis', 'cursor', 'limit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_all_events_with_time_range\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v2/event'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'earliest_start_time_epoch_millis' in params:\n query_params['earliestStartTimeEpochMillis'] = params['earliest_start_time_epoch_millis']\n if 'latest_start_time_epoch_millis' in params:\n query_params['latestStartTimeEpochMillis'] = params['latest_start_time_epoch_millis']\n if 'cursor' in params:\n query_params['cursor'] = params['cursor']\n if 'limit' in params:\n query_params['limit'] = params['limit']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ResponseContainerPagedEvent',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "title": "" }, { "docid": "29d66729862051e37483cac0160b91a8", "score": "0.57712936", "text": "def get_events():\n ds = DataStore()\n with ds.open():\n res = ds.get_all()\n doc = []\n for r in res:\n doc.append(json.loads(r))\n doc.sort(cmp_events)\n response.set_content_type('application/json')\n return json.dumps(doc)", "title": "" }, { "docid": "b27c86ace726754850264fd56206c523", "score": "0.5769291", "text": "def getnextevent():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build(\"calendar\", \"v3\", http=http)\n\n # 'Z' needed for calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n #now = datetime.datetime.now().isoformat() + 'Z' # 'Z' indicates UTC time\n today = datetime.date.today().isoformat()\n today = datetime.datetime.strptime(today, \"%Y-%m-%d\")\n\n location = 'Australia/Perth'\n tz = timezone(location)\n # Need to change times to non-naive\n event_title_low = None\n page_token = None\n\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\n # Get next event in each calendar\n timed_events =[]\n allday_events = []\n for calendar_list_entry in calendar_list['items']:\n eventsResult = (\n service.events()\n .list(\n calendarId=calendar_list_entry['id'],\n timeMin=now,\n singleEvents=True,\n orderBy=\"startTime\",\n maxResults=2,\n )\n .execute()\n )\n event = eventsResult.get(\"items\", [])\n if event != []:\n #Determine start time\n try:\n event_time = {event[0][\"start\"][\"dateTime\"]}\n for start_time in event_time:\n if start_time[-1] == 'Z':\n start_time = datetime.datetime.strptime(start_time, \"%Y-%m-%dT%H:%M:%SZ\")\n start_time = start_time + datetime.timedelta(hours = 8)\n start_time = tz.localize(start_time)\n else:\n start_time = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S%z')\n #calendar_entry = [start_time, end_time, event[0][\"summary\"]]\n #timed_events.append(calendar_entry)\n except:\n event_time = {event[0][\"start\"][\"date\"]}\n for start_time in event_time:\n start_time = start_time\n #calendar_entry = [start_time, end_time, event[0][\"summary\"]]\n #allday_events.append(calendar_entry)\n\n #Determine end time\n try:\n event_time = {event[0][\"end\"][\"dateTime\"]}\n for end_time in event_time:\n if end_time[-1] == 'Z':\n end_time = datetime.datetime.strptime(end_time, \"%Y-%m-%dT%H:%M:%SZ\")\n end_time = end_time + datetime.timedelta(hours = 8)\n end_time = tz.localize(end_time)\n else:\n end_time = datetime.datetime.strptime(end_time, '%Y-%m-%dT%H:%M:%S%z')\n duration = end_time - start_time\n duration = duration.seconds\n calendar_entry = [start_time, end_time, event[0][\"summary\"], duration]\n timed_events.append(calendar_entry)\n except:\n event_time = {event[0][\"end\"][\"date\"]}\n for end_time in event_time:\n end_time = end_time\n calendar_entry = [start_time, end_time, event[0][\"summary\"]]\n allday_events.append(calendar_entry)\n\n event_start_low = allday_events[0][0]\n for i in allday_events:\n #print(f'{i[0]} - {i[1]}: {i[2]}')\n if i[0] <= event_start_low:\n event_start_low = i[0]\n lowest_allday = [i[0],i[1], i[2]]\n event_start_low = timed_events[0][0]\n for i in timed_events:\n #Only want events shorter than 11hrs\n #print(f'{i[0]} - {i[1]}: {i[2]}')\n if i[0] <= event_start_low and 1 < i[3] < 39601:\n event_start_low = i[0]\n lowest_timed = [i[0], i[1], i[2], i[3]]\n\n #Determine which is lower\n #If event occurs less than 24hrs after allday event starts its in the same day\n if (lowest_timed[0] - tz.localize(datetime.datetime.strptime(lowest_allday[0], \"%Y-%m-%d\"))).seconds < 86400:\n start_time_print = humanize.naturalday(lowest_timed[0]).title() + \"-\" + datetime.datetime.strftime(lowest_timed[0], \"%H:%M\")\n #start_time_print = datetime.datetime.strftime(lowest_timed[0], \"%H:%M\")\n print(f'{start_time_print}-{lowest_timed[2]}')\n #print(lowest_timed)\n elif str(datetime.datetime.strftime(datetime.date.today(), \"%Y-%m-%d %H:%M:%S\")) == str(datetime.datetime.strptime(lowest_allday[0], \"%Y-%m-%d\")):\n print(f'Today: {lowest_allday[2]}')", "title": "" }, { "docid": "222ceb68ba74f95e60eb8d3bc62d1c5a", "score": "0.57661164", "text": "def main():\n store = file.Storage('token.json')\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets('client_secret_google_calendar.json', SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('calendar', 'v3', http=creds.authorize(Http()))\n\n start_date = datetime.datetime(\n 2018, 10, 01, 00, 00) \n start_date_sec = time.mktime(start_date.timetuple())\n end_date = datetime.datetime(2019, 10, 01, 00, 10 ) \n end_date_sec = time.mktime(end_date.timetuple())\n\n print(end_date_sec - start_date_sec)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n print('----- Getting the upcoming 10 events -----')\n events_result = service.events().list(calendarId='primary', timeMin=now,\n maxResults=10, singleEvents=True,\n orderBy='startTime').execute()\n events = events_result.get('items', [])\n delta = 0\n if not events:\n print('No upcoming events found.')\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n #ds = dateutil.parser.parse(start).strftime('%Y-%m-%d %I:%M:%S')\n ds = dateutil.parser.parse(start)\n ds_s = time.mktime(ds.timetuple())\n current = datetime.datetime.now();\n current_s = time.mktime(current.timetuple())\n delta = current_s - ds_s\n print(\"delta = \", delta)\n print(start, event['summary'])\n print(\"----------------------------------\")\n if delta > 0:\n run_vlc()", "title": "" }, { "docid": "c65d733ae80c023e18cba87fbe3f84a9", "score": "0.57598037", "text": "def get_entries_with_dates(dates, entries):\n for entry in entries:\n if (entry.start.date() in dates or\n entry.end and entry.end.date() in dates):\n yield entry", "title": "" }, { "docid": "d064a69069850831f71e0baac6cdce88", "score": "0.57586646", "text": "def events(self, uid, from_datetime, to_datetime):\n info = {\n 'uid': uid,\n 'from_datetime': from_datetime,\n 'to_datetime': to_datetime\n }\n r = requests.get(url=EVENTS_URL, params=info)\n return r.content", "title": "" }, { "docid": "5ada2af6966dfa48ff4f4154e1809e90", "score": "0.57581127", "text": "def get_events(cls, start, stop=None):\n states, event_msidset = cls.get_msids_states(start, stop)\n\n # Assemble a list of dicts corresponding to events in this tlm interval\n events = []\n for state in states:\n tstart = state[\"tstart\"]\n tstop = state[\"tstop\"]\n event = dict(\n tstart=tstart,\n tstop=tstop,\n dur=tstop - tstart,\n start=DateTime(tstart).date,\n stop=DateTime(tstop).date,\n )\n\n # Reject events that are shorter than the minimum duration\n if hasattr(cls, \"event_min_dur\") and event[\"dur\"] < cls.event_min_dur:\n continue\n\n # Custom processing defined by subclasses to add more attrs to event\n event.update(cls.get_extras(event, event_msidset))\n\n events.append(event)\n\n return events", "title": "" }, { "docid": "0e32e54bb9305190950ccd61cfb3c476", "score": "0.5751959", "text": "def get_logs_by_date_range(start=\"2014/01/01/00\", end=\"2017/08/20/05\"):\n db = MongoClient(settings.MONGO_HOST, settings.MONGO_PORT).slogs\n\n matches = db.access_raw.find({\"$and\": [\n {\"name\": {\"$lte\": end}},\n {\"name\": {\"$gte\": start}},\n ]})\n return list(matches)", "title": "" }, { "docid": "2fd25e217662fac30d9fa87f8f963b40", "score": "0.57434577", "text": "def get_events(self):\n data = self._request('GET', '/events')\n return data.json()", "title": "" }, { "docid": "987d58ac7b5280f82e64bbac4cbf68e4", "score": "0.5742999", "text": "def get_events_by_date_handler(self, request):\r\n events = self.get_events_by_date()\r\n return 200, {\"Content-Type\": \"application/json\"}, json.dumps(events, ensure_ascii=False)", "title": "" }, { "docid": "3976385cc7a62f74dfe1de52fabaf4e6", "score": "0.5742321", "text": "def search_events(self, keyword: str):\n\n events = self.get_past_events()\n events += self.get_future_events()\n resultList = []\n for event in events:\n if keyword.lower() in event['summary'].lower():\n result = 'Event:' + event['summary'] + ' at ' + event['start'].get('dateTime',\n event['start'].get('date'))\n\n if event['reminders']['useDefault']:\n result += '\\nReminder in 10 minutes before event'\n else:\n for reminder in event['reminders']['overrides']:\n result += '\\nReminder in ' + str(reminder['minutes']) + ' minutes before event as ' + reminder[\n 'method']\n resultList.append(result)\n if len(resultList) < 1:\n resultList.append(\"Nothing showed up in your search\")\n return resultList", "title": "" }, { "docid": "1b7f42e3103b57db0208681c3f805a2d", "score": "0.57289684", "text": "def get_events(cls, start, stop=None):\n from . import orbit_funcs\n\n datestart = DateTime(start).date\n datestop = DateTime(stop).date\n years = sorted(set(x[:4] for x in (datestart, datestop)))\n file_dates = []\n for year in years:\n file_dates.extend(orbit_funcs.get_tlr_files(year))\n tlr_files = [\n x[\"name\"] for x in file_dates if datestart <= x[\"date\"] <= datestop\n ]\n\n # Get all orbit points from the tlr files as a list of tuples\n orbit_points = orbit_funcs.get_orbit_points(tlr_files)\n\n # Process the points, doing various cleanup and return as a np.array\n orbit_points = orbit_funcs.process_orbit_points(orbit_points)\n\n # Get the orbits from the orbit points\n orbits = orbit_funcs.get_orbits(orbit_points)\n\n events = []\n for orbit in orbits:\n ok = orbit_points[\"orbit_num\"] == orbit[\"orbit_num\"]\n event = {key: orbit[key] for key in orbit.dtype.names}\n event[\"foreign\"] = {\n \"OrbitPoint\": orbit_points[ok],\n \"RadZone\": [orbit_funcs.get_radzone_from_orbit(orbit)],\n }\n events.append(event)\n\n return events", "title": "" }, { "docid": "28c519a9190ebfbf2245201252c916e0", "score": "0.5721897", "text": "def get_calendar_view(self, start_dt, end_dt):\n return_type = EntityCollection(self.context, Event, ResourcePath(\"calendarView\", self.resource_path))\n qry = ServiceOperationQuery(self, \"calendarView\", None, None, None, return_type)\n self.context.add_query(qry)\n\n def _construct_request(request):\n \"\"\"\n :type request: office365.runtime.http.request_options.RequestOptions\n \"\"\"\n request.method = HttpMethod.Get\n request.url += \"?startDateTime={0}&endDateTime={1}\".format(start_dt.isoformat(), end_dt.isoformat())\n\n self.context.before_execute(_construct_request)\n return return_type", "title": "" }, { "docid": "5307e7a66a22d9763b708bd0d4717f58", "score": "0.5721119", "text": "def get_existing_events(service):\n events_result = service.events().list(calendarId=CAL_ID).execute()\n existing_events = events_result.get('items', [])\n return existing_events", "title": "" }, { "docid": "ad1681942733bfd219625fe7219dbd29", "score": "0.5719279", "text": "def main(argv):\n parser = argparse.ArgumentParser(\n description='Retrieve Google Calendar events.')\n parser.add_argument('--no-of-days', type=str, default=\"7\",\n help='number of days to include')\n parser.add_argument('--calendar', type=str, default=['*'], nargs='*')\n parser.add_argument(\"--list-calendars\", action='store_true')\n parser.add_argument(\"--client_id\", type=str, help='the Google client id')\n parser.add_argument(\"--client_secret\", type=str,\n help='the Google client secret')\n args = parser.parse_args()\n\n # Extract arguments\n no_of_days = int(args.no_of_days)\n client_id = args.client_id\n client_secret = args.client_secret\n selected_calendars = [x.lower() for x in args.calendar]\n all_calendars = '*' in selected_calendars\n\n current_time = datetime.now(timezone.utc).astimezone()\n time_zone = str(current_time.tzinfo)\n start_time = str(current_time.isoformat())\n end_time = str((current_time + relativedelta(days=no_of_days)).isoformat())\n\n if not client_id or not client_secret:\n client_id = DEFAULT_CLIENT_ID\n client_secret = DEFAULT_CLIENT_SECRET\n\n # Authenticate and construct service.\n service = create_service(client_id, client_secret)\n\n calendar_events = []\n try:\n page_token = None\n while True:\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\n for calendar_list_entry in calendar_list['items']:\n if args.list_calendars:\n print(calendar_list_entry['summary'])\n continue\n if all_calendars or (calendar_list_entry['summary'].lower() in selected_calendars):\n events = retrieve_events(\n service, calendar_list_entry['id'], calendar_list_entry['backgroundColor'], start_time, end_time, time_zone)\n if events == -1:\n break\n elif events:\n calendar_events.extend(events)\n page_token = calendar_list.get('nextPageToken')\n if not page_token:\n break\n\n if calendar_events:\n calendar_events = sorted(\n calendar_events, key=lambda k: k['start_date'] + k['start_time'])\n print(json.dumps(calendar_events))\n elif not args.list_calendars:\n print('[{\"calendar_color\": \"#ffffff\", \"summary\": \"NO_EVENTS_FOUND_GOOGLE_CALENDAR\", \"start_date\": \"%s\", \"start_time\": \"00:00\", \"end_date\": \"%s\", \"end_time\": \"00:00\", \"location\": \"\"}]' % (\n current_time.date(), (current_time + relativedelta(days=1)).date()))\n except client.AccessTokenRefreshError:\n # The credentials have been revoked or expired, please re-run the application to re-authorize.\n return -1\n except BaseException:\n return -2", "title": "" }, { "docid": "621c1ba4af81290d374e7c8c96b8cde8", "score": "0.5704777", "text": "def get_day_calendar(email, date):\n calendar = []\n\n conn = sqlite3.connect(DATABASE)\n conn.text_factory = sqlite3.OptimizedUnicode\n cursor = conn.cursor()\n\n sql = \"SELECT code, ora, message, priority FROM CALENDAR WHERE email=? AND data=? ORDER BY ora ASC\"\n\n cursor.execute(sql, (email, date))\n calendar = cursor.fetchall()\n\n conn.close()\n return calendar", "title": "" }, { "docid": "72da84739555ac6cbcba41327629ee41", "score": "0.56931174", "text": "def getEvents(self):\n return [GoogleEvent(le) for le in self.base.get(\"items\", [])]", "title": "" }, { "docid": "4f6e7fe492e73368a5edd60b31bcc292", "score": "0.56899977", "text": "def datelist_regular_coverage(root, start_date, start, cur_date):\n\n #print start\n tmp_date=datetime(start.year,cur_date.month,cur_date.day)\n if tmp_date > start :\n start=(tmp_date-datetime(1601,1,1)).days\n else: start=(datetime(start.year+1,cur_date.month,cur_date.day)-datetime(1601,1,1)).days\n datelist=range(start+1,end_date-1,365)\n print datelist\n\n #find the position of the requested date in the datelist\n cur_epoch=(cur_date-datetime(1601,1,1)).days\n cur_pos=min(range(len(datelist)),key=lambda x:abs(datelist[x]-cur_epoch))\n print ('Current position:',cur_pos) \n \n return datelist, cur_pos", "title": "" }, { "docid": "1d1647170ce087085250fef1a167ca51", "score": "0.5687179", "text": "def get_daily_games_gte(self, start):\n pass # TODO", "title": "" }, { "docid": "2bf88d5843de4cabe8f9f75b152a4a9d", "score": "0.5671056", "text": "def get_event_dates(self):\n\t\tdate_list = []\n\t\tfor ev in self.events:\n\t\t\tev_id = ev[\"id\"]\n\t\t\tev_date = datetime(day=int(ev[\"day\"]), month=int(ev[\"month\"]), year=int(ev[\"year\"]),\n\t\t\t\t\t\t\t hour=23, minute=59, second=59)\n\t\t\tdate_list.append((ev_id, ev_date))\n\t\treturn date_list", "title": "" }, { "docid": "605cdd0888a0824e281f57ed1d96e72d", "score": "0.5662322", "text": "def get_upcoming_events(self, user: User) -> [Event]:\n service = self._get_service(user)\n if service is None:\n return []\n\n # Call the Calendar API\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n self._logger.info('Getting the upcoming 10 events', 1)\n try:\n events_result = service.events().list(\n calendarId='primary',\n timeMin=now,\n maxResults=10,\n singleEvents=True,\n orderBy='startTime',\n showDeleted=True\n ).execute()\n events = events_result.get('items', [])\n except (\n ConnectionResetError, ConnectionAbortedError, google.auth.exceptions.TransportError,\n ConnectionError, ConnectionRefusedError, http.client.RemoteDisconnected, socket.gaierror\n ) as exc:\n self._logger.error('A network-related error occurred while fetching upcoming events: %s' % repr(exc), -1)\n return []\n self._logger.untab()\n\n formatted_events = []\n if not events:\n return []\n for event in events:\n is_declined = False\n if 'attendees' in event:\n # Eventually, this should be moved to Event itself and taken into account when handling\n # updated events (changed time or acceptance status).\n self_attendees = list(\n filter(lambda attendee: 'self' in attendee and attendee['self'], event['attendees']))\n declined_self_attendees = list(filter(\n lambda attendee: 'responseStatus' in attendee and 'declined' == attendee['responseStatus'],\n self_attendees\n ))\n if len(declined_self_attendees) > 0 and len(declined_self_attendees) == len(self_attendees):\n is_declined = True\n\n is_cancelled = ('status' in event and 'cancelled' == event['status'])\n\n formatted_events.append(\n Event(event['id'], event['start'], event['end'], event['summary'], user, is_cancelled, is_declined)\n )\n\n return formatted_events", "title": "" }, { "docid": "5fdccc9beb5cfbfdb0cb976f0c09725b", "score": "0.5645892", "text": "def test_event_list(self):\n self.client.login(username=self.user5.username, password=\"test\")\n\n e2_start = timezone.now() - timezone.timedelta(days=3)\n e2_end = timezone.now() - timezone.timedelta(days=2)\n Event.objects.create(\n code=\"test-event-2\",\n club=self.club1,\n name=\"Past Test Event 2\",\n start_time=e2_start,\n end_time=e2_end,\n type=Event.FAIR,\n )\n Event.objects.create(\n code=\"test-event-3\",\n club=self.club1,\n name=\"Present Test Event 3\",\n start_time=timezone.now() - timezone.timedelta(days=3),\n end_time=timezone.now() + timezone.timedelta(days=2),\n type=Event.FAIR,\n )\n self.event1.type = Event.FAIR\n self.event1.save()\n\n # test global event\n Event.objects.create(\n code=\"test-event-4\",\n club=None,\n name=\"Test Global Event\",\n start_time=timezone.now() + timezone.timedelta(days=2),\n end_time=timezone.now() + timezone.timedelta(days=3),\n type=Event.OTHER,\n )\n\n # list events without a club to namespace.\n now = timezone.now()\n resp = self.client.get(\n reverse(\"events-list\"),\n {\"end_time__gte\": now},\n content_type=\"application/json\",\n )\n self.assertIn(resp.status_code, [200], resp.content)\n self.assertEquals(len(resp.data), 3, resp.content)\n\n # list events with a filter\n resp = self.client.get(\n reverse(\"events-list\"),\n {\n \"start_time__gte\": e2_start.isoformat(),\n \"end_time__lte\": e2_end.isoformat(),\n },\n content_type=\"application/json\",\n )\n self.assertIn(resp.status_code, [200], resp.content)\n self.assertEquals(1, len(resp.data), resp.data)", "title": "" }, { "docid": "1ddbec8668db271cffad7d2923feee45", "score": "0.5640538", "text": "def get_date_range_where(start_date, end_date):\n where = {}\n if start_date:\n where[\"event_date__gt\"] = start_date.date()\n if end_date:\n where[\"event_date__lt\"] = end_date.date() + timedelta(days=1)\n return where", "title": "" }, { "docid": "e4aa628de52b169ff85e02a178535d50", "score": "0.56391954", "text": "def events():\n ts_start = request.args.get(\"ts_start\", \"\").strip()\n ts_end = request.args.get(\"ts_end\", \"\").strip()\n event_types = request.args.get(\"event_types\", \"\").strip()\n zipcode = extract_postal_code(request.args.get(\"zipcode\", \"\").strip())\n max_dist = request.args.get(\"max_dist\", \"50\").strip()\n utm_source = request.args.get(\"utm_source\", \"\").strip()\n\n if not (ts_start and ts_end and event_types and zipcode):\n return (\"Events lookup requires ts_start, ts_end, event_types, zipcode\", 400)\n\n # We compose URL manually (without using a params hash) because some\n # of the params (like event_types) can be specified more than once,\n # and that can be tricky to produce as intended.\n url = EVENTS_URL_BASE\n url += f\"&zipcode={zipcode}&timeslot_start=gte_{ts_start}&timeslot_start=lte_{ts_end}&max_dist={max_dist}\"\n for event_type in event_types.split(\",\"):\n url += f\"&event_types={event_type.strip()}\"\n\n result = requests.get(url).json()\n\n if result.get(\"data\"):\n result[\"data\"] = [\n massage_event(e, utm_source) for e in result[\"data\"] if should_keep_event(e)\n ]\n\n data = result.get(\"data\", [])\n result[\"count\"] = len(data) if data else 0\n\n return jsonify(result)", "title": "" }, { "docid": "8161fdf0242f495290cfa1e65d3c9900", "score": "0.56213886", "text": "def current_event_set(self):\n\t\treturn self.event_set.filter(event_date__gte=datetime.date.today()).filter(status=3)", "title": "" }, { "docid": "06abff8a269458177c427d9ffabb3d9e", "score": "0.5614046", "text": "def search(self, start_ts, end_ts):\r\n ret_list = []\r\n for stored_doc in self.doc_dict.values():\r\n time_stamp = stored_doc['_ts']\r\n if time_stamp <= end_ts or time_stamp >= start_ts:\r\n ret_list.append(stored_doc)\r\n\r\n return ret_list", "title": "" }, { "docid": "633ca378519793bc31d6ecff46ccf3a3", "score": "0.56127065", "text": "def __init__(self, events):\n\n self.events = [\n {\n 'start': event['start'],\n 'end': event['end'],\n 'name': event['name']\n } for event in events\n ]\n self.events.sort(key=lambda e: e['start'])", "title": "" } ]
9d7e1f7f5df28310d15c9647baa93f57
Print statistics of identified and unidentified in the dialog_occurrences
[ { "docid": "e959cdd5a0b58a5b01a40a7b18d2ba2b", "score": "0.759004", "text": "def compute_statistics(dialog_occurrences): \n cUnk = 0\n cOth = 0\n for d in dialog_occurrences:\n if d['from'] == []:\n cUnk += 1\n else:\n cOth += 1\n percentID = (1.0*cOth)/(cOth+cUnk)*100\n print(\"Statistics : \"+str(percentID)+ \"% of recognized [UNKNOWN, Identified] = [\"+str(cUnk)+\",\"+str(cOth)+\"]\")\n return percentID", "title": "" } ]
[ { "docid": "1a3c67914860fa14b0b9023ba7674167", "score": "0.67646086", "text": "def compute_statistics_CID(dialog_occurrences): \n cUnk = 0\n cOth = 0\n for d in dialog_occurrences:\n if len(d['from']) == 0:\n cUnk += 1\n else:\n cOth += 1\n percentID = (1.0*cOth)/(cOth+cUnk)*100\n print(\"Statistics CID: \"+str(percentID)+ \"% of recognized [UNKNOWN, Identified] = [\"+str(cUnk)+\",\"+str(cOth)+\"]\")\n return percentID", "title": "" }, { "docid": "ffc38e4ea7bfef9ea88a34313e744040", "score": "0.6208024", "text": "def print_stats(self):\n print 'Number of primaries:', self.primaries\n print 'Normalisation:', self.normalisation\n count = 0\n if not self.cosmonuc is None:\n print 'Cosmogenic nuclide histogram present'\n count += 1 \n tmpcount = 0\n for particle in self.flux2d_up:\n for detector in self.flux2d_up[particle]:\n tmpcount += len(self.flux2d_up[particle][detector])\n print 'Upwards 2D flux histograms:', tmpcount\n count += tmpcount\n tmpcount = 0\n for particle in self.flux2d_down:\n for detector in self.flux2d_down[particle]:\n tmpcount += len(self.flux2d_down[particle][detector])\n print 'Downwards 2D flux histograms:', tmpcount\n count += tmpcount\n print 'Other 2D histograms:', len(self.hists2d)\n count += len(self.hists2d)\n tmpcount = 0\n for particle in self.primhists:\n tmpcount += len(self.primhists[particle])\n print 'Primaries histograms:', tmpcount\n count += tmpcount\n print 'Atmosphere energy deposition histograms:', len(self.edep_atmo)\n count += len(self.edep_atmo)\n print 'Soil energy deposition histograms:', len(self.edep_soil)\n count += len(self.edep_soil)\n if not len(self.flux_up) == 0:\n print 'Upward flux histograms:', len(self.flux_up)*\\\n len(self.flux_up[self.flux_up.keys()[0]])\n count += len(self.flux_up)*\\\n len(self.flux_up[self.flux_up.keys()[0]])\n else:\n print 'No upward flux histograms'\n if not len(self.flux_down) == 0:\n print 'Downward flux histograms:', len(self.flux_down)*\\\n len(self.flux_down[self.flux_down.keys()[0]])\n count += len(self.flux_down)*\\\n len(self.flux_down[self.flux_down.keys()[0]])\n else:\n print 'No downward flux histograms'\n if not len(self.flux_angular) == 0:\n print 'Angular flux histograms:', len(self.flux_angular)*\\\n len(self.flux_angular[self.flux_angular.keys()[0]])\n count += len(self.flux_angular)*\\\n len(self.flux_angular[self.flux_angular.keys()[0]])\n else:\n print 'No angular flux histograms'\n print 'Other 1D histograms:', len(self.hists1d)\n count += len(self.hists1d)\n print\n print 'Total:', count, 'histograms'\n return", "title": "" }, { "docid": "7dc362a350ca70114ac6fecbb72028c1", "score": "0.60279405", "text": "def report(self):\n myprint(' Found %d %s entries of %s on segment %s' \\\n % (self.enum, self.catname, str(self.pkeys), str(self.segids)))", "title": "" }, { "docid": "ea25ac4d8b449755b3cf4b5d8d6b5bad", "score": "0.59622633", "text": "def collect_show_statistics(self):\n epocs = 0\n examples = 0\n channels = 0\n learning_rate = 0.0\n self.show_statistics(epocs=epocs,\n examples=examples, \n channels=channels,\n learning_rate=learning_rate)", "title": "" }, { "docid": "b61ca244a78fcdf65ef5c8be3b59434b", "score": "0.5949486", "text": "def report_messages_stats(sect, stats, _):\r\n if not stats['by_msg']:\r\n # don't print this report when we didn't detected any errors\r\n raise EmptyReport()\r\n in_order = sorted([(value, msg_id)\r\n for msg_id, value in stats['by_msg'].iteritems()\r\n if not msg_id.startswith('I')])\r\n in_order.reverse()\r\n lines = ('message id', 'occurrences')\r\n for value, msg_id in in_order:\r\n lines += (msg_id, str(value))\r\n sect.append(Table(children=lines, cols=2, rheaders=1))", "title": "" }, { "docid": "b055713e46f3b7fc67757985cddaac0c", "score": "0.5820985", "text": "def remote_display_summary(self, period_content):\n logger.info(u\"{} Summary\".format(self._le2mclt.uid))\n self.histo.append([period_content.get(k) for k in self.histo_vars])\n if self._le2mclt.simulation:\n return 1\n else:\n defered = defer.Deferred()\n ecran_recap = GuiRecapitulatif(\n defered, self._le2mclt.automatique, self._le2mclt.screen,\n self.currentperiod, self.histo,\n texts_EXPERIENCE_NOM_COURT.get_text_summary(period_content))\n ecran_recap.show()\n return defered", "title": "" }, { "docid": "ccb0f4485990ae8ac0a4817a9364785b", "score": "0.5733359", "text": "def print_summary_metrics(associations: dict) -> None:\n for filter_type, results in associations.items():\n print(f'Summary results for {filter_type}:')\n if results is None:\n print('No filter was specified for this query.')\n elif len(results) == 0:\n print('The query did not return any results.')\n else:\n print(f'Minimum = {results.score_overall.min():.3f}')\n print(f'Maximum = {results.score_overall.max():.3f}')\n print(f'Mean = {results.score_overall.mean():.3f}')\n print(f'Standard deviation = {results.score_overall.std():.3f}')\n print()", "title": "" }, { "docid": "18d98cef1f0aa25fb3629884e5fd2504", "score": "0.57328266", "text": "def logs_analysis():\n popular_article()\n popular_author()\n error_days()", "title": "" }, { "docid": "fcd0f22b89a0e1eb26ec92f31d42cbb2", "score": "0.5711998", "text": "def psummary(self):\n histo = dict()\n llen = 0\n for item in self:\n t = type(item)\n if t in histo:\n histo[t] += 1\n else:\n histo[t] = 1\n t_str = str(t)\n if len(t_str) > llen:\n llen = len(t_str)\n f_str = '{:>' + str(llen) + '}'\n for k in histo:\n print(f_str.format(str(k)) + '\\t' + str(histo[k]))", "title": "" }, { "docid": "6dab4eb50b404bfdf1f84cbacf3eeacb", "score": "0.5695393", "text": "def showStatistics(self):\r\n print('%s won %d and lost %d games.' %(self.getName(),\r\n self.__gamesWon,\r\n self.__gamesLost))", "title": "" }, { "docid": "b2bc98e7c7c73d73760992215375719a", "score": "0.5594234", "text": "def search_summaries():\n records = db.recent_n_searches(10)\n # # FOR DEMO HARD CODED\n # records = []\n # records.append(db.recent_n_searches(1)[0])\n # for desired_search_id in [109, 110, 129, 140, 128, \n # 132, 138, 130, 100]:\n # records.append(db.demo_search_record_by_id(desired_search_id))\n return build_summary_info(records)", "title": "" }, { "docid": "f200e668c00d1666d7684c3f9b200e61", "score": "0.5573008", "text": "def summaries(self):\n ...", "title": "" }, { "docid": "0ed10a9c4528f16f68361546657fe2bb", "score": "0.55666584", "text": "def diagnostics():\n data = get_validation_data()\n for i in range(38):\n print(ATTACK_TYPES_RMAP[i], np.sum(data[:,-1] == i))", "title": "" }, { "docid": "437c2db1540efb7830726318f52bc80b", "score": "0.55572665", "text": "def report(word_dict):\n # report on various statistics based on the given word count dictionary\n with open('out.txt', 'w', encoding='utf-8') as file:\n for k, v in sorted(word_dict.items()):\n print(k, ': ', v, file=file)\n return", "title": "" }, { "docid": "daac39d4ebc054b93e2c00db108d6c7d", "score": "0.5549232", "text": "def display_count(self):\r\n\t\tfor state in self.state_report_count:\r\n\t\t\tprint(state, self.state_report_count[state])", "title": "" }, { "docid": "e3b8a976a63097dfdb677815f271d7d0", "score": "0.55311227", "text": "def print_full_results(self):\n for n in self.combined_tweet_count.keys():\n print 'Processing stock: ', n\n for date, count in self.combined_tweet_count[n]:\n print date,' ', count", "title": "" }, { "docid": "44930674f440cdd5001b9a744011d742", "score": "0.5529274", "text": "def print_empty(self):\n def parse_title(hist):\n \"\"\"Parses and returns title of histogram.\"\"\"\n if 'Title' in hist.params:\n titleparse = re.match('(.*)\\s*\\[(.*)\\]',\n hist.params['Title'])\n res = titleparse.group(1)\n else:\n res = 'Unknown histogram title'\n return res\n message = ''\n count = 0\n #cosmonuc:\n if self.cosmonuc.isempty():\n message += '\\tCosmogenic nuclides histogram\\n'\n count += 1\n #flux2d_down:\n for particle in self.flux2d_down:\n for detector in self.flux2d_down[particle]:\n for hist in self.flux2d_down[particle][detector]:\n if hist.isempty():\n message += '\\tflux2d_down, particle ' + particle +\\\n ', detector ' + str(detector) + ':' +\\\n parse_title(hist) + '\\n'\n count += 1\n #flux2d_up:\n for particle in self.flux2d_up:\n for detector in self.flux2d_up[particle]:\n for hist in self.flux2d_up[particle][detector]:\n if hist.isempty():\n message += '\\tflux2d_up, particle ' + particle +\\\n ', detector ' + str(detector) + ':' +\\\n parse_title(hist) + '\\n'\n count += 1\n #hists2d:\n for hist in self.hists2d:\n if hist.isempty():\n message += '\\thists2d: ' + parse_title(hist) + '\\n'\n count += 1\n #edep_atmo:\n for hist in self.edep_atmo:\n if hist.isempty():\n message += '\\tedep_atmo: ' + parse_title(hist) + '\\n'\n count += 1\n #edep_soil:\n for hist in self.edep_soil:\n if hist.isempty():\n message += '\\tedep_soil: ' + parse_title(hist) + '\\n'\n count += 1\n #primaries:\n for particle in self.primhists:\n for hist in self.primhists[particle]:\n if hist.isempty():\n message += '\\tprimhists, particle ' + particle + '\\n'\n count += 1\n #flux_down:\n for particle in self.flux_down:\n for detector in self.flux_down[particle]:\n if self.flux_down[particle][detector].isempty():\n message += '\\tflux_down, particle ' + particle +\\\n ', detector ' + str(detector) + '\\n'\n count += 1\n #flux_up:\n for particle in self.flux_up:\n for detector in self.flux_up[particle]:\n if self.flux_down[particle][detector].isempty():\n message += '\\tflux_up, particle ' + particle +\\\n ', detector ' + str(detector) + '\\n'\n count += 1\n #flux_angular:\n for particle in self.flux_angular:\n for detector in self.flux_angular[particle]:\n if self.flux_angular[particle][detector].isempty():\n message += '\\tflux_angular, particle ' + particle +\\\n ', detector ' + str(detector) + '\\n'\n count += 1\n #hists1d:\n for hist in self.hists2d:\n if hist.isempty():\n message += '\\thists1d: ' + parse_title(hist) + '\\n'\n count += 1\n #finalize and print message:\n if message == '':\n message = 'No all-zero histograms detected.'\n else:\n message = 'The following all-zero histograms \\\n have been detected:\\n' + message\n message += '\\nTotal count: ' + str(count)\n print message\n return", "title": "" }, { "docid": "6e78d84cb837e2e7344a67d88cdb23c1", "score": "0.55106425", "text": "def report_statistics_after_training(self):\n\n print \"REPORTING CORPUS STATISTICS\"\n print \"NUMBER OF DOCUMENTS IN POSITIVE CLASS:\", self.class_total_doc_counts[POS_LABEL]\n print \"NUMBER OF DOCUMENTS IN NEGATIVE CLASS:\", self.class_total_doc_counts[NEG_LABEL]\n print \"NUMBER OF TOKENS IN POSITIVE CLASS:\", self.class_total_word_counts[POS_LABEL]\n print \"NUMBER OF TOKENS IN NEGATIVE CLASS:\", self.class_total_word_counts[NEG_LABEL]\n print \"VOCABULARY SIZE: NUMBER OF UNIQUE WORDTYPES IN TRAINING CORPUS:\", len(self.vocab)", "title": "" }, { "docid": "bcce0851d9b282c0413716da897ff75a", "score": "0.5504341", "text": "def summary_stats(self, data):", "title": "" }, { "docid": "c3954b7352f546853f3849a6916f00c8", "score": "0.54819834", "text": "def analyze():\n print('[STATS]')\n stats = {\n 'All species': len(db.all()),\n 'Largest genome': _largest_genome(),\n }\n\n _print_dictionary(stats.items())\n\n # @TODO rewrite into single entire db iteration\n print('Groups')\n _print_dictionary(_count_groups().items())", "title": "" }, { "docid": "8a9f5c7b425c9833492f5fcebf4ba7ce", "score": "0.546097", "text": "def report (self):\n\t\tself.sort()\n\t\tfor spec in self:\n\t\t\tprint \"%s %s (%s)\" % (spec.recId, spec.term, spec.pubType)", "title": "" }, { "docid": "67325b3bbad076ebd1402624e5cadfc4", "score": "0.5458686", "text": "def print_stats(self):\n reports = stats.items()\n reports.sort(key=lambda x: -1 * x[1])\n for row in reports:\n print(\"%s: %s\" % (row[0], row[1]))", "title": "" }, { "docid": "06d0528f12938a4d2171a4f2c2f24f7e", "score": "0.5449861", "text": "def matching_summary(search_1,search_2):\n print ('Common words in the summary of', search_1,'&', search_2)\n\n text_1 = text_to_word(wikipedia.summary(search_1))\n text_2 = text_to_word(wikipedia.summary(search_2))\n histogram_1 = histogram(text_1)\n histogram_2 = histogram(text_2)\n\n preposition = ['as','by','in','at','on','with','of','for','to','through','after','from','over','until','during','under','all']\n pronoun = ['i','me','my','he','his','him','she','her','hers','we','our','ours','it','its','they','them','their']\n number = ['one','two','three','four','five','first','second','third']\n article = ['also','an','a','the','non','most']\n commonverb = ['is','are','was','were','be','been','have','has','had','get','got','gotten','can','could','make','made']\n alphabet = ['a','b','c','d','e','g','f','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','–','&']\n conjunction = ['and','but','or','yet','that','which','where','what','when','why','how']\n\n common_count = dict()\n for word in text_1:\n geometric_mean = math.sqrt(histogram_1.get(word, 0)*histogram_2.get(word, 0))\n if geometric_mean != 0:\n if word not in preposition and word not in pronoun and word not in article and word not in commonverb and word not in alphabet and word not in conjunction and word not in number:\n common_count[word] = geometric_mean\n\n return common_count", "title": "" }, { "docid": "75f7928e53d3a9f2b3078e921e9f1118", "score": "0.5442338", "text": "def report_statistics_after_training(self):\n\n\t\tprint \"REPORTING CORPUS STATISTICS\"\n\t\tprint \"NUMBER OF DOCUMENTS IN POSITIVE CLASS:\", self.class_total_doc_counts[POS_LABEL]\n\t\tprint \"NUMBER OF DOCUMENTS IN NEGATIVE CLASS:\", self.class_total_doc_counts[NEG_LABEL]\n\t\tprint \"NUMBER OF TOKENS IN POSITIVE CLASS:\", self.class_total_word_counts[POS_LABEL]\n\t\tprint \"NUMBER OF TOKENS IN NEGATIVE CLASS:\", self.class_total_word_counts[NEG_LABEL]\n\t\tprint \"VOCABULARY SIZE: NUMBER OF UNIQUE WORDTYPES IN TRAINING CORPUS:\", len(self.vocab)", "title": "" }, { "docid": "b926252f98c367205c87173b3f430669", "score": "0.54411966", "text": "def command_summary(gamefile):\n with open(gamefile, 'rb') as f:\n rec = janissary.RecordedGame(f)\n body = janissary.BodyParser(janissary.BinReader(rec.body_bytes()))\n \n command_counts = {}\n for op in body:\n if isinstance(op, janissary.Sync):\n print(\"Sync %d\" % op.time)\n if isinstance(op, janissary.Command):\n if op.type not in command_counts:\n command_counts[op.type] = 0\n command_counts[op.type] += 1\n if op.type == 0x81:\n print(\"Command %x\" % op.type)\n print_hex(op.data)\n\n\n print(\"Cmd | Count\")\n print(\"------------\")\n\n rows = []\n for k, v in command_counts.items():\n rows.append((\"0x%02x\" % k, command_name(k), v))\n rows = sorted(rows, key=lambda x: x[0])\n \n print(tabulate(rows, headers=[\"CMD ID\", \"CMD Name\", \"Count\"]))", "title": "" }, { "docid": "b5190884beddbc6afaf6786e8abf08ad", "score": "0.5435347", "text": "def print_stats(self):\n if self.optimalAssignment:\n print \"Found %d optimal assignments with weight %f in %d operations\" % \\\n (self.numOptimalAssignments, self.optimalWeight, self.numOperations)\n print \"First assignment took %d operations\" % self.firstAssignmentNumOperations\n else:\n print \"No solution was found.\"", "title": "" }, { "docid": "8c29196f390cbc3f27093877a672ff87", "score": "0.5421965", "text": "def _print_stats(self, stats):\r\n print('[%(name)s] low: %(low)d, high: %(high)d, avg: '\r\n '%(average).1f, total: %(total)d, '\r\n 'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '\r\n 'reported: %(reported)d' % stats)", "title": "" }, { "docid": "8b2489f5fc3194f3482be4c9a380f154", "score": "0.5411929", "text": "def print_stats(ccd, cnam, x, y, hsbox, warn=True):\n\n wnam = ccd.inside(x,y,0)\n if wnam is not None:\n wind = ccd[wnam]\n ix = int(round(wind.x_pixel(x)))\n iy = int(round(wind.y_pixel(y)))\n ix1 = max(0, ix - hsbox)\n ix2 = min(wind.nx, ix + hsbox + 1)\n iy1 = max(0, iy - hsbox)\n iy2 = min(wind.ny, iy + hsbox + 1)\n\n print('\\nClicked on x,y = {:.2f},{:.2f} in CCD {:s}, window {:s}'.format(\n x,y,cnam,wnam)\n )\n\n print(' Stats box in window pixels, X,Y = [{:d}:{:d},{:d}:{:d}] ({:d}x{:d}), central pixel = [{:d},{:d}], value = {:.2f}'.format(\n ix1,ix2,iy1,iy2,ix2-ix1,iy2-iy1,ix,iy,wind.data[iy,ix])\n )\n\n box = wind.data[iy1:iy2,ix1:ix2]\n print(\n ' Mean = {:.4g}, RMS = {:.4g}, min = {:.4g}, max = {:.4g}, median = {:.4g}'.format(\n box.mean(),box.std(),box.min(),box.max(),np.median(box)\n )\n )\n\n else:\n wind = None\n if warn:\n print('\\n *** selected position ({:.1f},{:.1f}) not in any window'.format(x,y))\n\n return (wnam, wind)", "title": "" }, { "docid": "056b1a43c90bce3e0bfb4f81f3e8a985", "score": "0.54004383", "text": "def summary(self) :", "title": "" }, { "docid": "271f01e660a16cdb2bbc60fcbf941a5d", "score": "0.53723013", "text": "def compute_built_in_summary_results(self):", "title": "" }, { "docid": "8a6764fc59fbb2939d39b9c0cf95f45a", "score": "0.5365417", "text": "def collect_stats(term: str, data):\n stats = {\n \"num_courses\": {\n \"b\": 0,\n \"g\": 0,\n },\n \"num_instructors\": {\n \"b\": 0,\n \"g\": 0,\n },\n \"num_sections\": {\n \"b\": 0,\n \"g\": 0,\n \"mw\": {\n \"b\": 0,\n \"g\": 0,\n },\n \"tu\": {\n \"b\": 0,\n \"g\": 0,\n },\n },\n \"num_seats_available\": {\n \"b\": 0,\n \"g\": 0,\n },\n \"num_seats_taken\": {\n \"b\": 0,\n \"g\": 0,\n },\n \"total_credit_hours\": {\n \"b\": 0,\n \"g\": 0,\n },\n }\n\n courses = {\n \"b\": set(),\n \"g\": set(),\n }\n\n instructors = {\n \"b\": set(),\n \"g\": set(),\n }\n\n for section in data:\n class_info = [s[\"meetingTime\"] for s in section[\"meetingsFaculty\"]\n if s[\"meetingTime\"][\"meetingType\"] == \"CLAS\"]\n if not class_info or not class_info[0] or not class_info[0][\"campus\"]:\n continue\n\n class_info = class_info[0]\n\n if \"B\" in class_info[\"campus\"]:\n campus = \"b\"\n else:\n campus = \"g\"\n\n subject, number = section[\"subject\"], section[\"courseNumber\"]\n course_code = f\"{subject} {number}\".upper()\n courses[campus].add(course_code)\n\n for f in section[\"faculty\"]:\n instructors[campus].add(f[\"displayName\"])\n\n stats[\"num_sections\"][campus] += 1\n\n if class_info[\"monday\"] and class_info[\"wednesday\"]:\n stats[\"num_sections\"][\"mw\"][campus] += 1\n elif class_info[\"sunday\"] and class_info[\"tuesday\"]:\n stats[\"num_sections\"][\"tu\"][campus] += 1\n\n stats[\"num_seats_available\"][campus] += section[\"maximumEnrollment\"]\n stats[\"num_seats_taken\"][campus] += (\n section[\"maximumEnrollment\"] - section[\"seatsAvailable\"]\n )\n\n if class_info[\"creditHourSession\"]:\n stats[\"total_credit_hours\"][campus] += class_info[\"creditHourSession\"]\n\n for c in [\"b\", \"g\"]:\n stats[\"num_courses\"][c] = len(courses[c])\n stats[\"num_instructors\"][c] = len(instructors[c])\n\n stats[\"timestamp\"] = time.time() * 1000\n\n return stats", "title": "" }, { "docid": "896c96693b6cb4b1cfd7f9bd34a8b8a6", "score": "0.5362439", "text": "def _print_statistics(self) -> None:\n statistics = self._model.get_statistics()\n self._window_statistics.addstr(0, 1, \"Level: \" + str(statistics.get_level()))\n self._window_statistics.addstr(1, 1, \"Score: \" + str(statistics.get_score()))\n self._window_statistics.addstr(2, 1, \"Lines: \" + str(statistics.get_lines_completed()))\n self._window_statistics.addstr(3, 1, \"Time: \" + str(statistics.get_duration()))\n\n self._window_statistics.refresh()", "title": "" }, { "docid": "846ec8bec821710b5e49b1f193962166", "score": "0.5359234", "text": "def print_stats(self):\n\t\tif self.condition == 'CNV':\n\t\t\tprint('Condition: ', self.condition)\n\t\t\tprint('Total images: ', self.total_imgs)\n\n\t\telif self.condition == 'DME':\n\t\t\tprint('Condition: ', self.condition)\n\t\t\tprint('Total images: ', self.total_imgs)\n\n\t\telif self.condition == 'Drusen':\n\t\t\tprint('Condition: ', self.condition)\n\t\t\tprint('Total images: ', self.total_imgs)\n\t\tpass", "title": "" }, { "docid": "a74d86dcf69ea71d6e4c681eb8166f34", "score": "0.5347264", "text": "def log_stats(self):\n scores = [t[1] for t in self.score_population()] #.sort(reverse=True)\n if len(scores) == 0:\n stats = (0.0, 0.0, 0.0)\n else:\n stats = (scores[0], sum(scores) / len(scores), scores[-1])\n\n self.stats_logger.info(\n \"%s,%i,%f,%f,%f\",\n self.id,\n self.iteration,\n stats[0],\n stats[1],\n stats[2]\n )", "title": "" }, { "docid": "88d0af30e99aee5315e173115c06d80f", "score": "0.53462976", "text": "def output_values(self):\n print('\\n==========================\\n' + f'Просмотренно вакансий: {self.count}')\n for key, value in self.dict_word_count.items():\n print(f'Упоминаний \\'{key}\\': {value}')\n print('==========================')\n for key, value in self.dict_avg_count.items():\n print(f'AVG {key}: {value}')", "title": "" }, { "docid": "a72d1777b372b524d1ac5d65ee6aa9f4", "score": "0.5344813", "text": "def show_summary(status: GameCode) -> None:\n print(GAME_OUTPUT_MESSAGES[status])", "title": "" }, { "docid": "71b82bef285d01534a45556f43f62fbc", "score": "0.5313436", "text": "def analyse(results: dict):\n for desc, results_list in results.items(): # desc = key, results_list = item\n mode = statistics.mode(results_list)\n print(f\"{desc}: Mode = {mode}\")\n average = statistics.mean(results_list)\n print(f\"{desc}: Average = {average}\")\n standard_deviation = statistics.stdev(results_list)\n print(f\"{desc}: Standard deviation = {standard_deviation}\")\n quartiles = statistics.quantiles(results_list, method='inclusive')\n print(f\"{desc}: Quartiles = {quartiles}\")\n iqr = quartiles[2] - quartiles[0]\n if standard_deviation:\n skewness_mode = (average - mode) / standard_deviation\n skewness_median = 3 * (average - quartiles[1]) / standard_deviation\n print(f\"{desc}: Pearson skewness (mode) = {skewness_mode}\")\n print(f\"{desc}: Pearson skewness (median) = {skewness_median}\")\n else:\n print(\"Skewness = 0\")\n fig = plt.figure()\n grid = GridSpec(5, 1, figure=fig)\n ax_box = fig.add_subplot(grid[0, 0])\n ax_hist = fig.add_subplot(grid[1:, 0])\n ax_box.axis('off')\n ax_hist.grid()\n ax_box.get_shared_x_axes().join(ax_box, ax_hist)\n fig.suptitle(f\"{desc}: transmissions\")\n ax_hist.set_xlabel(f\"Number of packets sent: {desc}\")\n ax_hist.set_ylabel(\"Occurences\")\n q0 = quartiles[0] - 1.5 * iqr\n q4 = quartiles[2] + 1.5 * iqr\n # boxplot and histogram\n ax_box.boxplot([q0, quartiles[0], quartiles[1], quartiles[2], q4], vert=False)\n hist_bins = np.arange(min(results_list), max(results_list) + 1, 1)\n if len(hist_bins) > 20:\n hist_bins = 20\n counts, bins, bars = ax_hist.hist(results_list, bins=hist_bins)\n x_data = []\n for i in range(len(bins) - 1):\n x_data.append((bins[i] + bins[i + 1]) / 2)\n y_data = counts\n try:\n params,params_cov=opt.curve_fit(gauss_function, x_data, y_data, p0=[max(y_data), quartiles[1], iqr / 1.349])\n print(f\"{desc}: Gauss parameters (a, mu, sigma): {params}\")\n ax_hist.plot(x_data, gauss_function(x_data, params[0], params[1], params[2]))\n except Exception as e:\n print(\"Couldn't estimate Gauss function\")\n plt.waitforbuttonpress()", "title": "" }, { "docid": "49175ce98ea4a4f0f1abb54c508167f9", "score": "0.53106225", "text": "def print_stats(self):\n if self.optimalAssignment:\n print (\"Found %d optimal assignments with weight %f in %d operations\" % \\\n (self.numOptimalAssignments, self.optimalWeight, self.numOperations))\n else:\n print (\"No solution was found.\")", "title": "" }, { "docid": "47a6fd5e27f3e4bd51b5ecddf62cc7df", "score": "0.52981615", "text": "def print_summary(ug_data, wp_data, cmp_data, cmp_data_fuzzy=None, cmp_data_hist=None):\n\n vals_total = len(ug_data.index) * len(ug_data.columns)\n vals_nan = 0 # nan vals in orig data, e.g. there's no 'Capital' for 'Antarctica'\n vals_nowp = 0 # nan vals in Wikipedia data where value does exist in UG data\n vals_matching_fuzzy = 0\n vals_mismatch = 0\n vals_mismatch_en = 0\n vals_mismatch_de = 0\n vals_mismatch_es = 0\n vals_mismatch_fr = 0\n vals_mismatch_nb = 0\n\n print('\\n\\n\\nFields for which UG data exists, but Wikipedia data could not be found:')\n for country in cmp_data.index:\n for col in cmp_data.columns:\n if not cmp_data[col][country]:\n if pandas.isna(wp_data[col][country]):\n vals_nowp += 1\n print(country + ':' + col + ' (UG=' +\n ug_data[col][country] + '), ', end='')\n else:\n if pandas.isna(ug_data[col][country]):\n vals_nan += 1\n\n if cmp_data_fuzzy is not None:\n print('\\n\\n\\nFields which probably/mostly match, but should get human',\n 'verification for missing accents, incorrect capitalization,',\n 'inconsistent abbreviations (St. vs Saint), extra or missing',\n 'capitals for countries with more than one, etc.:')\n for country in cmp_data.index:\n for col in cmp_data.columns:\n if cmp_data_fuzzy[col][country] and not cmp_data[col][country]:\n if pandas.notna(wp_data[col][country]):\n vals_matching_fuzzy += 1\n print(one_result_comparison(\n ug_data, wp_data, country, col))\n\n print('\\n\\n\\nMismatches beween UG and Wikipedia:')\n for country in cmp_data.index:\n for col in cmp_data.columns:\n if not (cmp_data_fuzzy[col][country] or cmp_data[col][country]):\n if pandas.notna(wp_data[col][country]):\n vals_mismatch += 1\n print(one_result_comparison(ug_data, wp_data, country, col))\n if col.endswith(':de'):\n vals_mismatch_de += 1\n elif col.endswith(':es'):\n vals_mismatch_es += 1\n elif col.endswith(':fr'):\n vals_mismatch_fr += 1\n elif col.endswith(':nb'):\n vals_mismatch_nb += 1\n else:\n vals_mismatch_en += 1\n\n vals_total_notna = vals_total - vals_nan\n vals_matching = (vals_total_notna\n - vals_matching_fuzzy\n - vals_nowp\n - vals_mismatch)\n\n print('Total values (incl. UG=NaN): ' + str(vals_total))\n print(' Total values (w/o UG=NaN): ' + str(vals_total_notna))\n print(' Values matching: ' + str(vals_matching))\n print(' Values w/o Wikipedia data: ' + str(vals_nowp))\n print(' Values with fuzzy matching: ' + str(vals_matching_fuzzy))\n print(' Values not matching: ' + str(vals_mismatch))\n print(' Values not matching (en): ' + str(vals_mismatch_en))\n print(' Values not matching (de): ' + str(vals_mismatch_de))\n print(' Values not matching (es): ' + str(vals_mismatch_es))\n print(' Values not matching (fr): ' + str(vals_mismatch_fr))\n print(' Values not matching (nb): ' + str(vals_mismatch_nb))", "title": "" }, { "docid": "7401969781abed272c9efe47ef5c20d8", "score": "0.52895474", "text": "def display_counts_update(self):\n # NOTE: current all counts are the same\n # so fetching val[0] is representative of the pot\n r1str = str(self.r1.vals[0])\n r2str = str(self.r2.vals[1])\n if self.k1.get(): r1str+='*'\n if self.k2.get(): r2str+='*'\n self.disp.fill(0)\n self.disp.text(\"X1=\", self.disp_tabs[0], self.disp_rows[0])\n self.disp.text(r1str, self.disp_tabs[1], self.disp_rows[0])\n self.disp.text(\"X2=\", self.disp_tabs[0], self.disp_rows[1])\n self.disp.text(r2str, self.disp_tabs[1], self.disp_rows[1])\n self.disp.show()", "title": "" }, { "docid": "1565b62fa522065054abd6dc5852de77", "score": "0.5288691", "text": "def _DisplayResults(self):\r\n print\r\n print '=' * 78\r\n print 'DIAGNOSTIC RESULTS'.center(78)\r\n print '=' * 78\r\n\r\n if 'latency' in self.results:\r\n print\r\n print '-' * 78\r\n print 'Latency'.center(78)\r\n print '-' * 78\r\n print ('Operation Size Trials Mean (ms) Std Dev (ms) '\r\n 'Median (ms) 90th % (ms)')\r\n print ('========= ========= ====== ========= ============ '\r\n '=========== ===========')\r\n for key in sorted(self.results['latency']):\r\n trials = sorted(self.results['latency'][key])\r\n op, numbytes = key.split('_')\r\n numbytes = int(numbytes)\r\n if op == 'METADATA':\r\n print 'Metadata'.rjust(9), '',\r\n print MakeHumanReadable(numbytes).rjust(9), '',\r\n self._DisplayStats(trials)\r\n if op == 'DOWNLOAD':\r\n print 'Download'.rjust(9), '',\r\n print MakeHumanReadable(numbytes).rjust(9), '',\r\n self._DisplayStats(trials)\r\n if op == 'UPLOAD':\r\n print 'Upload'.rjust(9), '',\r\n print MakeHumanReadable(numbytes).rjust(9), '',\r\n self._DisplayStats(trials)\r\n if op == 'DELETE':\r\n print 'Delete'.rjust(9), '',\r\n print MakeHumanReadable(numbytes).rjust(9), '',\r\n self._DisplayStats(trials)\r\n\r\n if 'write_throughput' in self.results:\r\n print\r\n print '-' * 78\r\n print 'Write Throughput'.center(78)\r\n print '-' * 78\r\n write_thru = self.results['write_throughput']\r\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\r\n MakeHumanReadable(write_thru['file_size']),\r\n write_thru['num_copies'],\r\n MakeHumanReadable(write_thru['total_bytes_copied']))\r\n print 'Write throughput: %s/s.' % (\r\n MakeBitsHumanReadable(write_thru['bytes_per_second'] * 8))\r\n\r\n if 'read_throughput' in self.results:\r\n print\r\n print '-' * 78\r\n print 'Read Throughput'.center(78)\r\n print '-' * 78\r\n read_thru = self.results['read_throughput']\r\n print 'Copied a %s file %d times for a total transfer size of %s.' % (\r\n MakeHumanReadable(read_thru['file_size']),\r\n read_thru['num_times'],\r\n MakeHumanReadable(read_thru['total_bytes_copied']))\r\n print 'Read throughput: %s/s.' % (\r\n MakeBitsHumanReadable(read_thru['bytes_per_second'] * 8))\r\n\r\n if 'listing' in self.results:\r\n print\r\n print '-' * 78\r\n print 'Listing'.center(78)\r\n print '-' * 78\r\n\r\n listing = self.results['listing']\r\n insert = listing['insert']\r\n delete = listing['delete']\r\n print 'After inserting %s objects:' % listing['num_files']\r\n print (' Total time for objects to appear: %.2g seconds' %\r\n insert['time_took'])\r\n print ' Number of listing calls made: %s' % insert['num_listing_calls']\r\n print (' Individual listing call latencies: [%s]' %\r\n ', '.join('%.2gs' % lat for lat in insert['list_latencies']))\r\n print (' Files reflected after each call: [%s]' %\r\n ', '.join(map(str, insert['files_seen_after_listing'])))\r\n\r\n print 'After deleting %s objects:' % listing['num_files']\r\n print (' Total time for objects to appear: %.2g seconds' %\r\n delete['time_took'])\r\n print ' Number of listing calls made: %s' % delete['num_listing_calls']\r\n print (' Individual listing call latencies: [%s]' %\r\n ', '.join('%.2gs' % lat for lat in delete['list_latencies']))\r\n print (' Files reflected after each call: [%s]' %\r\n ', '.join(map(str, delete['files_seen_after_listing'])))\r\n\r\n if 'sysinfo' in self.results:\r\n print\r\n print '-' * 78\r\n print 'System Information'.center(78)\r\n print '-' * 78\r\n info = self.results['sysinfo']\r\n print 'IP Address: \\n %s' % info['ip_address']\r\n print 'Temporary Directory: \\n %s' % info['tempdir']\r\n print 'Bucket URI: \\n %s' % self.results['bucket_uri']\r\n print 'gsutil Version: \\n %s' % self.results.get('gsutil_version',\r\n 'Unknown')\r\n print 'boto Version: \\n %s' % self.results.get('boto_version', 'Unknown')\r\n\r\n if 'gmt_timestamp' in info:\r\n ts_string = info['gmt_timestamp']\r\n timetuple = None\r\n try:\r\n # Convert RFC 2822 string to Linux timestamp.\r\n timetuple = time.strptime(ts_string, '%a, %d %b %Y %H:%M:%S +0000')\r\n except ValueError:\r\n pass\r\n\r\n if timetuple:\r\n # Converts the GMT time tuple to local Linux timestamp.\r\n localtime = calendar.timegm(timetuple)\r\n localdt = datetime.datetime.fromtimestamp(localtime)\r\n print 'Measurement time: \\n %s' % localdt.strftime(\r\n '%Y-%m-%d %I-%M-%S %p %Z')\r\n\r\n print 'Google Server: \\n %s' % info['googserv_route']\r\n print ('Google Server IP Addresses: \\n %s' %\r\n ('\\n '.join(info['googserv_ips'])))\r\n print ('Google Server Hostnames: \\n %s' %\r\n ('\\n '.join(info['googserv_hostnames'])))\r\n print 'Google DNS thinks your IP is: \\n %s' % info['dns_o-o_ip']\r\n print 'CPU Count: \\n %s' % info['cpu_count']\r\n print 'CPU Load Average: \\n %s' % info['load_avg']\r\n try:\r\n print ('Total Memory: \\n %s' %\r\n MakeHumanReadable(info['meminfo']['mem_total']))\r\n # Free memory is really MemFree + Buffers + Cached.\r\n print 'Free Memory: \\n %s' % MakeHumanReadable(\r\n info['meminfo']['mem_free'] +\r\n info['meminfo']['mem_buffers'] +\r\n info['meminfo']['mem_cached'])\r\n except TypeError:\r\n pass\r\n\r\n netstat_after = info['netstat_end']\r\n netstat_before = info['netstat_start']\r\n for tcp_type in ('sent', 'received', 'retransmit'):\r\n try:\r\n delta = (netstat_after['tcp_%s' % tcp_type] -\r\n netstat_before['tcp_%s' % tcp_type])\r\n print 'TCP segments %s during test:\\n %d' % (tcp_type, delta)\r\n except TypeError:\r\n pass\r\n\r\n if 'disk_counters_end' in info and 'disk_counters_start' in info:\r\n print 'Disk Counter Deltas:\\n',\r\n disk_after = info['disk_counters_end']\r\n disk_before = info['disk_counters_start']\r\n print '', 'disk'.rjust(6),\r\n for colname in ['reads', 'writes', 'rbytes', 'wbytes', 'rtime',\r\n 'wtime']:\r\n print colname.rjust(8),\r\n print\r\n for diskname in sorted(disk_after):\r\n before = disk_before[diskname]\r\n after = disk_after[diskname]\r\n (reads1, writes1, rbytes1, wbytes1, rtime1, wtime1) = before\r\n (reads2, writes2, rbytes2, wbytes2, rtime2, wtime2) = after\r\n print '', diskname.rjust(6),\r\n deltas = [reads2-reads1, writes2-writes1, rbytes2-rbytes1,\r\n wbytes2-wbytes1, rtime2-rtime1, wtime2-wtime1]\r\n for delta in deltas:\r\n print str(delta).rjust(8),\r\n print\r\n\r\n if 'tcp_proc_values' in info:\r\n print 'TCP /proc values:\\n',\r\n for item in info['tcp_proc_values'].iteritems():\r\n print ' %s = %s' % item\r\n\r\n if 'boto_https_enabled' in info:\r\n print 'Boto HTTPS Enabled: \\n %s' % info['boto_https_enabled']\r\n\r\n if 'request_errors' in self.results and 'total_requests' in self.results:\r\n print\r\n print '-' * 78\r\n print 'In-Process HTTP Statistics'.center(78)\r\n print '-' * 78\r\n total = int(self.results['total_requests'])\r\n numerrors = int(self.results['request_errors'])\r\n numbreaks = int(self.results['connection_breaks'])\r\n availability = (((total - numerrors) / float(total)) * 100\r\n if total > 0 else 100)\r\n print 'Total HTTP requests made: %d' % total\r\n print 'HTTP 5xx errors: %d' % numerrors\r\n print 'HTTP connections broken: %d' % numbreaks\r\n print 'Availability: %.7g%%' % availability\r\n if 'error_responses_by_code' in self.results:\r\n sorted_codes = sorted(\r\n self.results['error_responses_by_code'].iteritems())\r\n if sorted_codes:\r\n print 'Error responses by code:'\r\n print '\\n'.join(' %s: %s' % c for c in sorted_codes)\r\n\r\n if self.output_file:\r\n with open(self.output_file, 'w') as f:\r\n json.dump(self.results, f, indent=2)\r\n print\r\n print \"Output file written to '%s'.\" % self.output_file\r\n\r\n print", "title": "" }, { "docid": "7e09c2c342838f2c0bfe10722b9d4559", "score": "0.5282836", "text": "def print_word_counts(self):\n for word in self.word_cound:\n count = self.word_cound[word]\n print('{0} {1}'.format(word, count))", "title": "" }, { "docid": "22a9f061159d191f9aa46b8c841cebcb", "score": "0.5274358", "text": "def _print_obstacle_information():\n\n max_window = max(f_factory.hw, f_factory.cw, f_factory.gradient_w)\n\n df_list = [df[df['Time'] > max_window] for df in sd.df_list]\n df = pd.concat(df_list, ignore_index=True)\n\n grouped2 = df.groupby(['physDifficulty', 'Logtype']).size()\n\n print('\\nOn physDifficulty=HIGH, there were ' + str(grouped2[2]+grouped2[1]) +\n ' obstacles, out of which the user crashed ' + str(grouped2[1]) +\n ', i.e. ' + str(round(grouped2[1] / grouped2[2], 2) * 100) + '%.')\n\n print('On physDifficulty=MEDIUM, there were ' + str(grouped2[10] + grouped2[9]) +\n ' obstacles, out of which the user crashed ' + str(grouped2[9]) +\n ', i.e. ' + str(round(grouped2[9] / grouped2[10], 2) * 100) + '%.')\n\n print('On physDifficulty=LOW, there were ' + str(grouped2[6]+grouped2[5]) +\n ' obstacles, out of which the user crashed ' + str(grouped2[5]) +\n ', i.e. ' + str(round(grouped2[5] / grouped2[6], 2) * 100) + '%.')", "title": "" }, { "docid": "d4e60dcc0560e5d4b6d469b65f8ab750", "score": "0.5273248", "text": "def log_statistics(self):\n counts = {}\n for g in self.batch_games:\n m = g.max()\n if m in counts:\n counts[m] += 1\n else:\n counts[m] = 1\n\n print(counts)\n\n file_name = \"game2048_statistics_{}.txt\".format(utils.miscellaneous.get_pretty_time())\n with open(file_name, \"w\") as f:\n f.write(\"--GAME 2048 STATISTICS--\")\n f.write(os.linesep)\n f.write(\"Model: {}\".format(self.model.get_name()))\n f.write(os.linesep)\n f.write(\"Total games: {}, Average score: {}, Average moves: {}\".format(self.game_batch_size,\n np.mean([s.score for s in\n self.batch_games]),\n np.mean([s.total_moves for s in\n self.batch_games])))\n f.write(os.linesep)\n f.write(\"Reached tiles:\")\n f.write(os.linesep)\n\n width = 5\n for key in sorted(counts):\n f.write(\"{}: {} = {}%\".format(str(key).rjust(width), str(counts[key]).rjust(width),\n str(100 * counts[key] / self.game_batch_size).rjust(width)))\n f.write(os.linesep)", "title": "" }, { "docid": "3684d4a5d923fe6e32b117384bb24125", "score": "0.52466255", "text": "def view_stats(fil, text_restriction):\n stats = pstats.Stats(fil)\n # Remove the long directory paths\n stats.strip_dirs()\n # Sort the stats by the total time (internal time)\n sorted_stats = stats.sort_stats('tottime')\n # Only show stats that have \"goldhunt\" in their 'name column'\n sorted_stats.print_stats(\"goldhunt\")", "title": "" }, { "docid": "eebd25ec75cd7d74d81c8ea1f1aadb53", "score": "0.523595", "text": "def printMeasResults(self):\n #elf.measResults[chName][amp][itr] = {\"wf\":vals,\"psd_x\":psd_x,\"psd_y\":psd,\"enob\":enob,\"sinad\":sinad,\"sfdr\":sfdr,\"snr\":snr}", "title": "" }, { "docid": "ddd21dc53d28fd624e8918d724f584a8", "score": "0.5229816", "text": "def print_stats(self):\n headers = []\n headers.append(\"IMAGES\")\n if self.prediction_type == \"r\":\n lbl_tmp = self.df[self.label_col].unique()\n hist, bin_edges = np.histogram(lbl_tmp, bins='auto')\n p = bin_edges[0]\n for s in bin_edges[1:]:\n headers.append(str(np.round(p, 2)) + \" - \" + str(np.round(s, 2)))\n p = s\n else:\n headers += [cls for cls in self.classes]\n headers.append(\"PATIENTS\")\n if self.prediction_type == \"r\":\n hist, bin_edges = np.histogram(self.df[self.label_col].unique(), bins='auto')\n p = bin_edges[0]\n for s in bin_edges[1:]:\n headers.append(str(np.round(p, 2)) + \" - \" + str(np.round(s, 2)))\n p = s\n else:\n headers += [cls for cls in self.classes]\n\n if self.hold_out_size is not None:\n stats = [\n [\"Train\"] + self._get_stats(self.df_trn, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Val\"] + self._get_stats(self.df_val, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Hold\"] + self._get_stats(self.df_ho, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Total\"] + self._get_stats(self.df, self.prediction_type, self.label_col, self.ptid_col, self.classes),\n ]\n else:\n stats = [\n [\"Train\"] + self._get_stats(self.df_trn, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Val\"] + self._get_stats(self.df_val, self.prediction_type, self.label_col, self.ptid_col,\n self.classes),\n [\"Total\"] + self._get_stats(self.df, self.prediction_type, self.label_col, self.ptid_col, self.classes),\n ]\n print(tabulate(stats, headers=headers))\n print()\n print(\"Data shape: {}\".format(self.train_ds.shape))\n if self.z_factor is not None:\n print(\n \"NOTE: data have been downsized by a factor of {}\".format(self.z_factor)\n )", "title": "" }, { "docid": "bbcf48e8be675e8f1b268f5726f97b7a", "score": "0.5228298", "text": "def summary(self, log=sys.stdout):\n hnu = self.get_energies()\n intensities = self.get_absolute_intensities()\n te = int(np.log10(intensities.max())) - 2\n scale = 10**(-te)\n\n if not te:\n ts = ''\n elif te > -2 and te < 3:\n ts = str(10**te)\n else:\n ts = '10^{0}'.format(te)\n\n if isinstance(log, str):\n log = paropen(log, 'a')\n\n parprint('-------------------------------------', file=log)\n parprint(' Mode Frequency Intensity', file=log)\n parprint(' # meV cm^-1 [{0}A^4/amu]'.format(ts), file=log)\n parprint('-------------------------------------', file=log)\n for n, e in enumerate(hnu):\n if e.imag != 0:\n c = 'i'\n e = e.imag\n else:\n c = ' '\n e = e.real\n parprint('%3d %6.1f%s %7.1f%s %9.2f' %\n (n, 1000 * e, c, e / u.invcm, c, intensities[n] * scale),\n file=log)\n parprint('-------------------------------------', file=log)\n # XXX enable this in phonons\n # parprint('Zero-point energy: %.3f eV' %\n # self.vibrations.get_zero_point_energy(),\n # file=log)", "title": "" }, { "docid": "6c6f2654c0103e79183d9cdccf9ef85f", "score": "0.5216034", "text": "def report(word_dict):\n # report on various statistics based on the given word count dictionary\n sorted_based_on_size = sorted(word_dict, key=len,reverse=True)\n largestWord = sorted_based_on_size[0]\n is_first = True\n for e in sorted_based_on_size:\n if(len(e)!=len(largestWord)):\n break\n else:\n if not is_first:\n print('or:')\n else:\n is_first=not is_first\n\n print('The longest word is:',e)\n\n\n five_common = sorted(word_dict,key=word_dict.get,reverse=True)[:5]\n print('The five most common words are:')\n for e in five_common:\n print(e, ':', str(word_dict.get(e)))\n\n with open('out.txt','w',encoding='UTF-8') as output:\n for word in sorted(word_dict):\n output.write(word+\":\"+str(word_dict.get(word))+'\\n')", "title": "" }, { "docid": "6cac164cfb55e88111a5f1c663750051", "score": "0.5204059", "text": "def _get_term_stats(self, term):\n p = subprocess.Popen([self.cmdpath, self.path, 'xcount', term],\n stdout=subprocess.PIPE)\n output = p.communicate()[0]\n cf = int(output.strip().split(':')[1])\n\n p = subprocess.Popen([self.cmdpath, self.path, 'dxcount', term],\n stdout=subprocess.PIPE)\n output = p.communicate()[0]\n df = int(output.strip().split(':')[1])\n return {'cf': cf, 'df': df}", "title": "" }, { "docid": "f2d36d43d7337e9b98ac39f36ff1f976", "score": "0.52022815", "text": "def print_all_stats():\n global time_dict\n if time_dict:\n if None in time_dict: del time_dict[None] # Remove None-instance first\n print(\"\\n\\n\\n---------> OVERVIEW OF CALCULATION TIME <---------\\n\")\n keys_space = max(map(lambda x: len(x), time_dict.keys()))\n line = ' {0:^' + str(keys_space) + 's} - {1:^s}'\n line = line.format('Keys', 'Total time')\n print(line)\n print(\"-\" * (len(line) + 3))\n line = '>{0:^' + str(keys_space) + 's} - {1:^s}'\n t = 0\n for k, v in sorted(time_dict.items()):\n t += v['sum']\n print(line.format(k, get_fancy_time(v['sum'])))\n end_line = line.format('Total time', get_fancy_time(t))\n print(\"-\" * (len(end_line)))\n print(end_line)", "title": "" }, { "docid": "1fdd6ffe0e01b48850051b48b0477fdc", "score": "0.5202046", "text": "def summary(self):\n return {\n 'mismatch_count': len(self.misses),\n 'mismatches': [data.to_dict()\n for data in sorted(list(self.misses))],\n 'total_count': self.total\n }", "title": "" }, { "docid": "579a0a29edab1e5587468a4ca7f62254", "score": "0.5200447", "text": "def print_stats(self):\r\n stats = \"STR: \" + str(self.special[\"STR\"]) + \"\\r\\n\" + \\\r\n \"PER: \" + str(self.special[\"PER\"]) + \"\\r\\n\" + \\\r\n \"END: \" + str(self.special[\"END\"]) + \"\\r\\n\" + \\\r\n \"CHA: \" + str(self.special[\"CHA\"]) + \"\\r\\n\" + \\\r\n \"INT: \" + str(self.special[\"INT\"]) + \"\\r\\n\" + \\\r\n \"AGL: \" + str(self.special[\"AGL\"]) + \"\\r\\n\" + \\\r\n \"LUCK: \" + str(self.special[\"LUCK\"])\r\n return stats", "title": "" }, { "docid": "5ca75ec188dff252374f9f5b513c73d1", "score": "0.51981705", "text": "def printStats(self):\n aHumans = []\n dHumans = []\n aInfected = []\n dInfected = []\n for char in range(len(self._charList)):\n character = self._charList[char]\n name = character.getName()\n if character.isAlive() == True and character.getInfectedStatus() == False:\n aHumans.append(name)\n if character.isAlive() == True and character.getInfectedStatus() == True:\n aInfected.append(name)\n if character.isAlive() == False and character.getInfectedStatus() == False:\n dHumans.append(name)\n if character.isAlive() == False and character.getInfectedStatus() == True:\n dInfected.append(name)\n score = (50*len(aHumans))-(25*len(dHumans))-(.3*(self.getTurnCount()))\n if len(aInfected) == 0:\n score += 100\n print(\"Surviving humans: \")\n for human in range(len(aHumans)):\n print(\" -\"+aHumans[human])\n print()\n print(\"Dead humans: \")\n for dHuman in range(len(dHumans)):\n print(\" -\"+dHumans[dHuman])\n print()\n print(\"Surviving Things: \")\n for thing in range(len(aInfected)):\n print(\" -\"+aInfected[thing])\n print()\n print(\"Dead Things: \")\n for dthing in range(len(dInfected)):\n print(\" -\"+dInfected[dthing])\n print()\n if score > 0 and self.checkPlayer()==True:\n print(\"Score: \"+str(round(score)))\n if score < 0 or self.checkPlayer()==False:\n print(\"Score: 0\")", "title": "" }, { "docid": "1c816be29ad6e77c82e8c33284334865", "score": "0.51893264", "text": "def print_stats(word, word_inserted, slot_list):\n\n text = \"checking {}\\n\" \\\n \"word inserted {}\\n\" \\\n \"current slot : {}\\n\".format(word['word'], word_inserted, slot_list[word_inserted])\n print(text)", "title": "" }, { "docid": "c9c56db64d19b5163db4f074c5e6de2b", "score": "0.5181254", "text": "def summary(self):\n pass", "title": "" }, { "docid": "08d3ea4f95709532069440921e0451c9", "score": "0.5179661", "text": "def summarize():\n manager = _get_manager()\n out = StringIO()\n\n print(\"Summary for '{}' ('{}').\\n\".format(manager.name, manager.path),\n file=out)\n\n m = len(manager.get_phenotypes_list(dummy=False))\n print(\"{:,d} samples; {:,d} variables:\\n\".format(manager.n, m), file=out,\n end=\"\")\n\n print(\n \"+====================+===========+\\n\"\n \"| Variable type | Count |\\n\"\n \"+--------------------+-----------+\",\n file=out\n )\n\n manager.cur.execute(\n \"SELECT count(*), variable_type \"\n \"FROM phenotypes p LEFT OUTER JOIN \"\n \" dummy_phenotypes d \"\n \" on p.name=d.name \"\n \"WHERE d.name IS NULL \"\n \"GROUP BY p.variable_type\"\n )\n for count, t in manager.cur:\n print(\"| {:<18} | {:>9} |\".format(t, count), file=out)\n print(\"+--------------------+-----------+\\n\", file=out)\n\n counts = {}\n counts[\"entries\"] = manager.cur.execute(\n \"SELECT count(*) FROM drug_users;\"\n ).fetchone()[0]\n\n counts[\"drugs\"] = manager.cur.execute(\n \"SELECT count(DISTINCT drug_id) FROM drug_users;\"\n ).fetchone()[0]\n\n counts[\"samples\"] = manager.cur.execute(\n \"SELECT count(DISTINCT sample_id) FROM drug_users;\"\n ).fetchone()[0]\n\n print(\n \"Drug data contains {entries:,d} entries on {drugs:,d} drugs for \"\n \"{samples:,d} samples.\".format(**counts), file=out\n )\n\n return {\"success\": True, \"message\": out.getvalue()}", "title": "" }, { "docid": "6401eb07353b16ed98348f6d09c097de", "score": "0.5172243", "text": "def printStatistics(self):\n if self.pbmsAsked:\n rightPercentage = ' (' + str(self.noRight*100//self.pbmsAsked) + ' % )'\n wrongPercentage = ' (' + str(100 - (self.noRight*100//self.pbmsAsked) ) + ' % )'\n else: rightPercentage = wrongPercentage = ''\n return _('Number of problems in database: ') + str(self.noOfPbms) +'\\n' \\\n + _('Number of questions asked : ') + str(self.pbmsAsked) +'\\n' \\\n + _('Number of right answers : ') + str(self.noRight) \\\n + rightPercentage + '\\n' \\\n + _('Number of wrong answers : ') + str(self.noWrong) \\\n + wrongPercentage + '\\n'", "title": "" }, { "docid": "a82bf36a56de2a4d5c5e16cd614a759c", "score": "0.51690066", "text": "def get_overview(self):\r\n \r\n levels_label = np.unique(self.data[self.label_col])\r\n \r\n if self.values_norm==None:\r\n self.values_norm = [val for val in levels_label \r\n if val not in self.values_anom]\r\n if self.values_anom==None:\r\n self.values_anom = [val for val in levels_label \r\n if val not in self.values_norm]\r\n \r\n label = self.data[self.label_col]\r\n \r\n self.overview = ( '= Summary Dataset =\\n' +\r\n '===================\\n' )\r\n \r\n \r\n ### get indices of anomalies\r\n self.overview = self.overview + ('Number of values for each anomaly'+ \r\n 'level:\\n')\r\n indicator_anom = label == \"\"\r\n # s = 0\r\n for val in self.values_anom:\r\n indicator_anom = np.logical_or( indicator_anom, (label == val) )\r\n self.overview = (self.overview + ' ' + val + ': ' \r\n + repr(np.sum(label==val)) + ' values\\n')\r\n \r\n (IDX_anom,) = np.where(indicator_anom.values)\r\n \r\n ### get indices of normal events\r\n self.overview = (self.overview \r\n + '\\nNumber of values for each normal level:\\n')\r\n indicator_norm = label == \"\"\r\n for val in self.values_norm:\r\n indicator_norm = np.logical_or( indicator_norm, (label == val) )\r\n self.overview = (self.overview + ' ' + val + ': ' \r\n + repr(np.sum(label==val)) + ' values\\n')\r\n \r\n (IDX_norm,) = np.where(indicator_norm.values)\r\n \r\n ratio = float(len(IDX_anom)) / float(len(IDX_norm))\r\n self.overview = self.overview + ( '\\nOverview:\\n' +\r\n ' normal samples: ' + repr(len(IDX_norm)) + '\\n' +\r\n ' anomalous samples: ' + repr(len(IDX_anom)) + '\\n' +\r\n ' anom/norm ratio: ' + repr(ratio) + '\\n' +\r\n '====================\\n' )\r\n \r\n print self.overview\r\n return self.overview;", "title": "" }, { "docid": "f603b8d800ee7eeca45cd104c670cfe0", "score": "0.5160805", "text": "def imprime(self):\n print u\"\\n ******** {0} ********* \\n\".format(self.report_type)\n for container in self._containers:\n print u\"(L{0} {1}_{2}) {3} [{5}-{6}] ({4}):\".format(\n container.tree_level,\n container.concept.schema,\n container.concept.value,\n container.concept.meaning.values(),\n len(container.attributes),\n container.properties.max_cardinality,\n container.properties.min_cardinality).encode('utf-8')\n for attr in container.attributes:\n print u\" - {0} ({1}) ({2})\".format(\n attr.concept.meaning,\n attr.type,\n attr.properties).encode('utf-8')\n print", "title": "" }, { "docid": "24c364423ae0f06898990920762fbeb7", "score": "0.51607424", "text": "def do_stats(self, arg):\n user_count = self.hist.get_user_count()\n url_count = self.hist.get_url_count()\n visit_count = self.hist.get_visit_count()\n table = AsciiTable([\n [\"Users\", user_count],\n [\"Urls\", url_count],\n [\"Visits\", visit_count]\n ], \"Stats\")\n table.inner_heading_row_border = False\n print(table.table)", "title": "" }, { "docid": "9d4b8daccd609a2f614ccd6fee14404f", "score": "0.51601684", "text": "def main():\n # print(histogram_new(get_pledge_list()))\n print(print_hist_new(histogram_new(get_pledge_list())))", "title": "" }, { "docid": "05c1844e6b6a75dad00fb2eec499f0d1", "score": "0.5159052", "text": "def carStatus(): \r\n for car in carsDetected:\r\n print('Car ID: {0}, detected {1} times. Counted: {2}, last detection {3}, active {4}.'.format(car.ID, \\\r\n car.timesDetected, car.counted, round(car.lastDetection,1), car.isActive))", "title": "" }, { "docid": "2ad154e0e2e63a94abfa408609e33a40", "score": "0.51589197", "text": "def _print_results(self, out=sys.stdout):\n self._results['SUCCESS'] = []\n self._results['WARNING'] = []\n self._results['DANGER'] = []\n\n for metric, score in self._metric_averages.items():\n if pd.isna(score):\n continue\n if score >= 0.9:\n self._results['SUCCESS'].append(\n DIAGNOSTIC_REPORT_RESULT_DETAILS[metric]['SUCCESS'])\n elif score >= 0.5:\n self._results['WARNING'].append(\n DIAGNOSTIC_REPORT_RESULT_DETAILS[metric]['WARNING'])\n else:\n self._results['DANGER'].append(\n DIAGNOSTIC_REPORT_RESULT_DETAILS[metric]['DANGER'])\n\n out.write('\\nDiagnosticResults:\\n')\n print_results_for_level(out, self._results, 'SUCCESS')\n print_results_for_level(out, self._results, 'WARNING')\n print_results_for_level(out, self._results, 'DANGER')", "title": "" }, { "docid": "382a6952c5b7de9e70ded5672df86c91", "score": "0.5157718", "text": "def _print_summary_counts(\n self, categories, result_events_by_status, extra_rows):\n\n # Get max length for category printed name\n category_with_max_printed_name = max(\n categories, key=lambda x: len(x[1]))\n max_category_name_length = len(category_with_max_printed_name[1])\n\n # If we are provided with extra rows, consider these row name lengths.\n if extra_rows is not None:\n for row in extra_rows:\n name_length = len(row[0])\n if name_length > max_category_name_length:\n max_category_name_length = name_length\n\n banner_text = \"Test Result Summary\"\n banner_separator = \"\".ljust(len(banner_text), \"=\")\n\n self.out_file.write(\"\\n{}\\n{}\\n{}\\n\".format(\n banner_separator,\n banner_text,\n banner_separator))\n\n # Prepend extra rows\n if extra_rows is not None:\n for row in extra_rows:\n extra_label = \"{}:\".format(row[0]).ljust(\n max_category_name_length + 1)\n self.out_file.write(\"{} {:4}\\n\".format(extra_label, row[1]))\n\n for category in categories:\n result_status_id = category[0]\n result_label = \"{}:\".format(category[1]).ljust(\n max_category_name_length + 1)\n count = len(result_events_by_status[result_status_id])\n self.out_file.write(\"{} {:4}\\n\".format(\n result_label,\n count))", "title": "" }, { "docid": "0f5b0ee7b6bf425d4b2e4708cbb8beaf", "score": "0.5157704", "text": "def extract_findings_statistics(findings: list) -> dict:\n logger.info(\"Extracting audit statistics...\")\n stats = {}\n total_pass = 0\n total_fail = 0\n resources = set()\n findings_count = 0\n\n for finding in findings:\n # Save the resource_id\n resources.add(finding.resource_id)\n if finding.status == \"PASS\":\n total_pass += 1\n findings_count += 1\n if finding.status == \"FAIL\":\n total_fail += 1\n findings_count += 1\n\n stats[\"total_pass\"] = total_pass\n stats[\"total_fail\"] = total_fail\n stats[\"resources_count\"] = len(resources)\n stats[\"findings_count\"] = findings_count\n\n return stats", "title": "" }, { "docid": "0942963755d6290d7f7a9fd352b84ada", "score": "0.51499844", "text": "def summary(self):\n\n print(\"Anxiety Disorder:\\nWhat is it? :\\n\"\n \"Anxiety is a feeling of nervousness, worry, or unease that is a normal human experience. \"\n \"It is also present in a wide range of psychiatric disorders, \"\n \"including generalized anxiety disorder, panic disorder, and phobias. \"\n \"Although each of these disorders is different, they all feature distress and dysfunction \"\n \"specifically related to anxiety and fear.\")", "title": "" }, { "docid": "f8872559796f4d5c18193a3b1fb3f268", "score": "0.5143147", "text": "def describe(self):\n def format_int(name, n):\n if n == 0 or n == 1:\n return \"%i %s\" % (n, name[:-1])\n else:\n return \"%i %s\" % (n, name)\n\n empty_box = Colors.OKGREEN + '[ ]' + Colors.ENDC + ' '\n filled_box = Colors.OKGREEN + '[x]' + Colors.ENDC + ' '\n\n if self.start_time is None:\n print(empty_box + \"No records stored\")\n else:\n print((filled_box + format_int(\"records\", len(self.records)) +\n \" from %s to %s\" % (self.start_time, self.end_time)))\n\n nb_contacts = bc.individual.number_of_contacts(\n self, interaction='callandtext', groupby=None)\n nb_contacts = nb_contacts['allweek']['allday']['callandtext']\n if nb_contacts:\n print(filled_box + format_int(\"contacts\", nb_contacts))\n else:\n print(empty_box + \"No contacts\")\n\n if self.has_attributes:\n print(filled_box + format_int(\"attributes\", len(self.attributes)))\n else:\n print(empty_box + \"No attribute stored\")\n\n if len(self.antennas) == 0:\n print(empty_box + \"No antenna stored\")\n else:\n print(filled_box + format_int(\"antennas\", len(self.antennas)))\n\n if self.has_recharges:\n print(filled_box + format_int(\"recharges\", len(self.recharges)))\n else:\n print(empty_box + \"No recharges\")\n\n if self.has_home:\n print(filled_box + \"Has home\")\n else:\n print(empty_box + \"No home\")\n\n if self.has_text:\n print(filled_box + \"Has texts\")\n else:\n print(empty_box + \"No texts\")\n\n if self.has_call:\n print(filled_box + \"Has calls\")\n else:\n print(empty_box + \"No calls\")\n\n if self.has_network:\n print(filled_box + \"Has network\")\n else:\n print(empty_box + \"No network\")", "title": "" }, { "docid": "91672819e2d8844a12c8aa7595d55130", "score": "0.51371145", "text": "def report(self):\n\n print(\"Average number of episodes: {:.2f}\".format(np.mean(self.episodes_to_complete)))", "title": "" }, { "docid": "fd7463568adcf00c343caf855fa17d50", "score": "0.51342547", "text": "def print_statistics(self, fixed_total_errors):\n single_edit_errors = self.total_errors\n multi_edit_errors = 0.0\n if fixed_total_errors > self.total_errors:\n multi_edit_errors = fixed_total_errors - self.total_errors\n self.total_errors = fixed_total_errors\n\n print '*****************************************************'\n print 'Single-edit/multi-edit errors percentage'\n print '*****************************************************'\n print 'Single-edit errors'.ljust(30) + '\\t:\\t' + repr(single_edit_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * single_edit_errors / self.total_errors)+'%'\n print 'Multi-edit errors'.ljust(30) + '\\t:\\t' + repr(multi_edit_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * multi_edit_errors / self.total_errors)+'%'\n print 'Total errors'.ljust(30) + '\\t:\\t' + repr(self.total_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * (single_edit_errors + multi_edit_errors) / self.total_errors)+'%'\n print '\\n'\n\n print '*****************************************************'\n print 'Error types and their frequencies in the error corpus'\n print '*****************************************************'\n print 'Substitution errors'.ljust(30) + '\\t:\\t' + repr(self.num_sub_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * self.num_sub_errors / self.total_errors)+'%'\n print ' - Case errors'.ljust(30) + '\\t:\\t' + repr(self.case_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * self.case_errors / self.num_sub_errors)+'% (of sub errors)'\n print ' - Diacritic errors'.ljust(30) + '\\t:\\t' + repr(self.diac_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * self.diac_errors / self.num_sub_errors)+'% (of sub errors)'\n print 'Insertion errors'.ljust(30) + '\\t:\\t' + repr(self.num_add_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * self.num_add_errors / self.total_errors)+'%'\n print 'Deletion errors'.ljust(30) + '\\t:\\t' + repr(self.num_del_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * self.num_del_errors / self.total_errors)+'%'\n print 'Swap errors'.ljust(30) + '\\t:\\t' + repr(self.num_swap_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * self.num_swap_errors / self.total_errors)+'%'\n print 'Multi-edit errors'.ljust(30) + '\\t:\\t' + repr(multi_edit_errors).rjust(10) + '\\t:\\t' + '{:5.2f}'.format(100.0 * multi_edit_errors / self.total_errors)+'%'\n print '\\n'\n print 'Total errors'.ljust(30) + '\\t:\\t' + repr(self.total_errors).rjust(10)\n print '\\n'\n\n print '*****************************************************'\n print 'Substitution errors'\n print '*****************************************************'\n sorted_sub = sorted(self.cm_sub.items(), key=operator.itemgetter(1), reverse=True)\n for edit_key, v in sorted_sub:\n print 's_'+ edit_key.encode('utf-8') + '\\t' + repr(v)\n print '\\n'\n\n print '*****************************************************'\n print 'Deletion errors'\n print '*****************************************************'\n sorted_del = sorted(self.cm_del.items(), key=operator.itemgetter(1), reverse=True)\n for edit_key, v in sorted_del:\n print 'd_'+ edit_key.encode('utf-8') + '\\t' + repr(v)\n print '\\n'\n\n print '*****************************************************'\n print 'Insertion errors'\n print '*****************************************************'\n sorted_add = sorted(self.cm_add.items(), key=operator.itemgetter(1), reverse=True)\n for edit_key, v in sorted_add:\n print 'i_'+ edit_key.encode('utf-8') + '\\t' + repr(v)\n print '\\n'\n\n print '*****************************************************'\n print 'Swap errors'\n print '*****************************************************'\n sorted_swap = sorted(self.cm_rev.items(), key=operator.itemgetter(1), reverse=True)\n for edit_key, v in sorted_swap:\n print 'swap_'+ edit_key.encode('utf-8') + '\\t' + repr(v)\n print '\\n'\n return", "title": "" }, { "docid": "89453a37811e57b919c29dd1f14cdd32", "score": "0.5120818", "text": "def print_info(self, outf=''):\n\t\tif outf != '':\n\t\t\tbackup = sys.stdout\n\t\t\tsys.stdout = open(outf, 'wb')\n\n\t\tfor SVC_CTRY,kdict in self.statD.iteritems():\n\t\t\tprint \"SVC_CTRY:,\", SVC_CTRY\n\t\t\t#print \"searches(top 1500) , \", kdict.sum_of_tops(1500)\n\t\t\tN = 1500\n\t\t\tprint \"# Top %s\" % (N)\n\t\t\tkdict.prn(N)\n\n\t\tif outf != '':\tsys.stdout = backup", "title": "" }, { "docid": "45068dee9958578d578fbb81ac69004e", "score": "0.51201975", "text": "def list_veh_counts(self):\n logger.info('Total Number Vehicles %s (Equipped Vehicles %s)' % (str(self.veh_count), str(self.equipped_veh_count)) )\n logger.info('PDM only %s (DCRC %s, Celluar %s, Dual %s) '\n % (str(self.PDM_count), str(self.PDM_DSRC_count), str(self.PDM_cell_count), str(self.PDM_both_count) ))\n logger.info('BSM only %s (DCRC %s, Celluar %s, Dual %s) '\n % (str(self.BSM_count), str(self.BSM_DSRC_count), str(self.BSM_cell_count), str(self.BSM_both_count) ))\n logger.info('PDM and BSM %s (DCRC %s, Celluar %s, Dual %s) '\n % (str(self.BSM_PDM_count), str(self.BSM_PDM_DSRC_count), str(self.BSM_PDM_cell_count), str(self.BSM_PDM_both_count) ))\n\n logger.info('CAM only %s (DCRC %s, Celluar %s, Dual %s) '\n % (str(self.CAM_count), str(self.CAM_DSRC_count), str(self.CAM_cell_count), str(self.CAM_both_count) ))\n logger.info('SPOT only %s (DCRC %s) '\n % (str(self.SPOT_count), str(self.SPOT_count) ))", "title": "" }, { "docid": "0e0c2eecdfb25995249c48ae212cbf1c", "score": "0.5119752", "text": "def output(cls) :\n print( '\\n *** results of Event selection *** < Number of selects : ',\\\n '{:4d}'.format( EventFlags.ndef() ), '>' )\n for flag in EventFlags.__dict_of_flags.values() :\n print( f'{flag.__accum:10.0f} : ', flag.__key )\n print( '' )", "title": "" }, { "docid": "310d67aa4496d7cf96cf1217865b9caa", "score": "0.511035", "text": "def game_summary(chat_id):\n\tcurr_game = get_curr_game(chat_id)\n\ttext = \"Current Mission progress: \\n \\n\"\n\ttext += \"Mission 1: \" \n\tif curr_game.msn1 == \"\":\n\t\tfor i in range(missionDict[curr_game.num_player][1]):\n\t\t\ttext += emoman\n\telse:\n\t\tnumfail1 = missionDict[curr_game.num_player][1] - curr_game.msn1numsucc\n\t\tfor i in range(curr_game.msn1numsucc):\n\t\t\ttext += emotick\n\t\tfor i in range(numfail1):\n\t\t\ttext += emocross\n\t\tif curr_game.msn1 == \"Success\":\n\t\t\ttext += \" Mission SUCCESS\"\n\t\telse:\n\t\t\ttext += \" Mission FAILURE\"\n\ttext += \"\\nMission 2: \"\n\tif curr_game.msn2 == \"\":\n\t\tfor i in range(missionDict[curr_game.num_player][2]):\n\t\t\ttext += emoman\n\telse:\n\t\tnumfail2 = missionDict[curr_game.num_player][2] - curr_game.msn2numsucc\n\t\tfor i in range(curr_game.msn2numsucc):\n\t\t\ttext += emotick\n\t\tfor i in range(numfail2):\n\t\t\ttext += emocross\n\t\tif curr_game.msn2 == \"Success\":\n\t\t\ttext += \" Mission SUCCESS\"\n\t\telse:\n\t\t\ttext += \" Mission FAILURE\"\n\ttext += \"\\nMission 3: \"\n\tif curr_game.msn3 == \"\":\n\t\tfor i in range(missionDict[curr_game.num_player][3]):\n\t\t\ttext += emoman\n\telse:\n\t\tnumfail3 = missionDict[curr_game.num_player][3] - curr_game.msn3numsucc\n\t\tfor i in range(curr_game.msn3numsucc):\n\t\t\ttext += emotick\n\t\tfor i in range(numfail3):\n\t\t\ttext += emocross\n\t\tif curr_game.msn3 == \"Success\":\n\t\t\ttext += \" Mission SUCCESS\"\n\t\telse:\n\t\t\ttext += \" Mission FAILURE\"\n\ttext += \"\\nMission 4: \"\n\tif curr_game.msn4 == \"\":\n\t\tfor i in range(missionDict[curr_game.num_player][4]):\n\t\t\ttext += emoman\n\telse:\n\t\tnumfail4 = missionDict[curr_game.num_player][4] - curr_game.msn4numsucc\n\t\tfor i in range(curr_game.msn4numsucc):\n\t\t\ttext += emotick\n\t\tfor i in range(numfail4):\n\t\t\ttext += emocross\n\t\tif curr_game.msn4 == \"Success\":\n\t\t\ttext += \" Mission SUCCESS\"\n\t\telse:\n\t\t\ttext += \" Mission FAILURE\"\n\ttext += \"\\nMission 5: \"\n\tif curr_game.msn5 == \"\":\n\t\tfor i in range(missionDict[curr_game.num_player][5]):\n\t\t\ttext += emoman\n\telse:\n\t\tnumfail5 = missionDict[curr_game.num_player][5] - curr_game.msn5numsucc\n\t\tfor i in range(curr_game.msn5numsucc):\n\t\t\ttext += emotick\n\t\tfor i in range(numfail5):\n\t\t\ttext += emocross\n\t\tif curr_game.msn5 == \"Success\":\n\t\t\ttext += \" Mission SUCCESS\"\n\t\telse:\n\t\t\ttext += \" Mission FAILURE\"\n\ttext += \"\\n\\nConsecutive nomination failures: \" + str(curr_game.conesequetive_fail_votes_num) + \"/5\\n \\n\"\n\treturn text", "title": "" }, { "docid": "a2e9f645b2e894e64b60642227ad2ad4", "score": "0.51009935", "text": "def occ_report_poisson(self) -> None:\n\n for ward in self.hospital.wards:\n self.occupancy_poisson[ward.index, ward.beds.count] += 1", "title": "" }, { "docid": "110f69c17a3174206ca0ba2586551c62", "score": "0.5099582", "text": "def _get_diagnostics(self, instance):\n vm_ref = vm_util.get_vm_ref(self._session, instance)\n lst_properties = [\"summary.config\",\n \"summary.quickStats\",\n \"summary.runtime\"]\n vm_props = self._session._call_method(vim_util,\n \"get_object_properties\", None, vm_ref, \"VirtualMachine\",\n lst_properties)\n query = vm_util.get_values_from_object_properties(self._session,\n vm_props)\n data = {}\n # All of values received are objects. Convert them to dictionaries\n for value in query.values():\n prop_dict = vim_util.object_to_dict(value, list_depth=1)\n data.update(prop_dict)\n return data", "title": "" }, { "docid": "81c52c16f4560ea164f3110d73882461", "score": "0.5098633", "text": "def describe_data(self):\n\n out_string = \"\"\"\n Simulation holds {} photons of which {} have escaped. This corresponds to {:2.2f} % escaped\n \"\"\"\n\n num_of_photons = len(self.photon_status)\n num_of_escaped = sum(invert(self.photon_status))\n print(out_string.format(num_of_photons, num_of_escaped, num_of_escaped/num_of_photons*1e2))", "title": "" }, { "docid": "9d7b4f41199a5090fe58ac72122ffbd1", "score": "0.5097145", "text": "def get_statistics():\n return 'Total: ' + str(total_amount) + ' ' +\\\n 'Good: ' + str(amount_of_good) + ' ' +\\\n 'Bad: ' + str(amount_of_bad)", "title": "" }, { "docid": "70b8241bfd050382a69453dbdfe16004", "score": "0.50853854", "text": "def print_counts_and_priors(self):\n line_to_string = lambda target, count: \"%s %.2f\" % (target, count)\n print \"\\n\".join([line_to_string(target, count) for target, count in self.priors.iteritems()])\n\n format_line = lambda target, (sensor, value), count: \"%s %s %s %.2f\" % (target, sensor, value, count)\n output_for_target = lambda target: \"\\n\".join([format_line(target, setting, count)\n for setting, count in sorted(self.counts.loc[target].iteritems())])\n print \"\\n\".join([output_for_target(target) for target in sorted(self.counts.index)])", "title": "" }, { "docid": "e5a073b63540c4b04b1014a4ed7b4824", "score": "0.50838655", "text": "def showData():\n counter = 0\n for key,dbclass in myglobal._ltre_.dbclassTable.items():\n for datum in dbclass.facts:\n counter += 1\n print(\"Fact #\", counter, \"=>\", datum)\n return counter", "title": "" }, { "docid": "2c374aca93d8c663a1cd08acc015d86a", "score": "0.50750613", "text": "def command_incidence(self):\n incidences = np.sum(self.counts, axis = 1)\n incidence_by_name = {}\n for name, count in zip(self.names, incidences):\n incidence_by_name[name] = count\n return incidence_by_name", "title": "" }, { "docid": "f4785a59de7e72b24b72ea9f0900ff9f", "score": "0.5070662", "text": "def hit_count_analysis(self):\n self.initialise_category_hit_counts()\n all_allowed_non_verbs = [non_verb.title for non_verb in NonVerb.objects.all()]\n lo_text = self.get_learning_outcome().text\n tokenised_LO = nlp(lo_text)\n for token in tokenised_LO:\n if token.pos_ == \"VERB\":\n self.handle_detected_verb(token.lemma_)\n elif token.text in all_allowed_non_verbs:\n self.handle_detected_allowed_non_verb(token.text)", "title": "" }, { "docid": "1e6a774096aac53790fb0d9f663a202e", "score": "0.5070321", "text": "def report(self):\n print(jc.bold(jc.yellow('\\n////// Report /////')))\n for x in self.actions:\n self.set_result(x[1], x[0], norecord=True)", "title": "" }, { "docid": "b71ed3aa19c46f7a02a52885567505dc", "score": "0.506864", "text": "def time_stats(df):\n\n\n print('\\nCalculating the most frequent times of travel...\\n')\n\n # display the most common month\n print('\\tMonth:', df['Month'].mode()[0])\n\n # display the most common day of week\n print('\\tDay:', df['Day'].mode()[0])\n\n # display the most common start hour\n print('\\tHour:', df['Hour'].mode()[0])\n\n print('\\033[0;046m-\\033[1;m'*80)", "title": "" }, { "docid": "56beb73537f78b72e83871d186a3ec05", "score": "0.50666785", "text": "def display_reported_traits(id,scores):\n\n reported_traits = set()\n for score in scores:\n rt = score.trait_reported\n reported_traits.add(rt)\n\n reported_trait_html = ''\n for rt in sorted(reported_traits):\n reported_trait_html += '<li>{}</li>'.format(rt)\n\n score_html = '<span class=\"pgs_result_button\"> <a class=\"toggle_btn pgs_btn_plus\" id=\"{}_scores\">Show Reported Traits</a></span>'.format(id)\n score_html += '<div class=\"toggle_content\" id=\"list_{}_scores\" style=\"display:none\">'.format(id)\n score_html += '<div class=\"pgs_result_reported_traits\"><ul>{}</ul></div>'.format(reported_trait_html)\n score_html += '</div>'\n\n return score_html", "title": "" }, { "docid": "2a6fd162fe3a46555861a745cf543630", "score": "0.5065322", "text": "def print_general_results(result, label):\n pass", "title": "" }, { "docid": "2a6fd162fe3a46555861a745cf543630", "score": "0.5065322", "text": "def print_general_results(result, label):\n pass", "title": "" }, { "docid": "0f5ce19ca1d6523365d77e6056336c78", "score": "0.5061222", "text": "def report_total_messages_stats(sect, stats, previous_stats):\r\n lines = ['type', 'number', 'previous', 'difference']\r\n lines += table_lines_from_stats(stats, previous_stats,\r\n ('convention', 'refactor',\r\n 'warning', 'error'))\r\n sect.append(Table(children=lines, cols=4, rheaders=1))", "title": "" }, { "docid": "bfe8238a379f51ffe93d9fe13a17bb3e", "score": "0.50600034", "text": "def printStatistics(self, showWindow=1):\n\n if showWindow and self.statisticsWindow.state() in ['withdrawn', 'iconic']:\n self.statisticsWindow.deiconify()\n\n if self.pbmRec:\n s = self.pbmRec.printStatistics()\n else: s=''\n if self.currentFile:\n cf = os.path.basename(self.currentFile)\n else:\n cf = 'None'\n s = _('Database: ')+ cf + '\\n\\n' + s\n self.statisticsText.set(s)", "title": "" }, { "docid": "201b378fee4758c18dcb06aa39b177ce", "score": "0.50545186", "text": "def print_status(self):\n # print(self.state_q_values_full)\n if self.episodes < self.supervised_episodes:\n pass\n else:\n print(self.feature_screen)", "title": "" }, { "docid": "5fb493c0a268f9ee88054f969e3da7ca", "score": "0.50535417", "text": "def summarize(self):\n for_print = super().summarize()\n if self.routine in (\"Running\", \"Bicycling\"):\n for_print += (\"Distance - Time - Avg. Speed - Calories Burned]:\\n\")\n for_print += (\" \"*8 + \"{:.2f}\".format(self.distance)+\" \"+self.units+\"\\n\")\n for_print += (\" \"*8 + \"{:.0f}\".format(self.duration)+\" minutes\\n\")\n for_print += (\" \"*8 + \"{:.1f}\".format(self.speed)+\" \"+self.units+\" per hour (avg.)\\n\")\n \n elif self.routine == \"Sprints\":\n for_print += (\"Distance - Rounds - Times - Avg. Speed - Calories Burned]:\\n\")\n for_print += (\" \"*8 + \"{:.0f}\".format(self.distance)+\" meters - \"+\"{:.0f}\".format(self.rounds)+\" rounds\\n\")\n for i in range(self.rounds):\n for_print += (\" \"*8 + \"{:.2f}\".format(self.sprint_list[i]) + \"s, \" + \n \"{:.1f}\".format(self.split_list[i]) + \"mph (avg.)\\n\")\n elif self.routine == \"Pick-up Basketball\":\n for_print += (\"Duration - Calories Burned]:\\n\")\n for_print += (\" \"*8 + \"{:.1f}\".format(self.duration)+\" minutes\\n\")\n else:\n for_print += \"PRINT ERROR\"\n for_print += (\" \"*8 + \"{:.0f}\".format(self.calories)+\" calories burned (est.)\\n\")\n return for_print", "title": "" }, { "docid": "68f779a21bccfc1536f145e0f9911e91", "score": "0.50529295", "text": "def _get_term_stats(self, term):\n p = subprocess.Popen([self.cmdpath, 'dump-key-value',\n os.path.join(self.path, self.part), term],\n stdout=subprocess.PIPE)\n output = p.communicate()[0]\n freqs = [line.count(',') - 1 for line in output.splitlines()[1:]]\n return {'cf': sum(freqs), 'df': len(freqs)}", "title": "" }, { "docid": "b18dcccd0e91b129c868e4c7fd23d568", "score": "0.5050712", "text": "def summarize(self):\n self.control_count = len(self.experiments)\n self.enabled_experiments = [experiment for experiment in self.experiments if experiment.is_enabled]\n self.enabled_count = len(self.enabled_experiments)\n self.contrary_experiments = [experiment for experiment in self.enabled_experiments\n if experiment.status == 'contrite']\n self.contrary_results = len(self.contrary_experiments)\n\n def elapsed(start, end):\n return end - start\n\n self.control_elapse_times = [elapsed(experiment.control.start_time, experiment.control.end_time) for experiment\n in self.enabled_experiments]\n self.control_avg_time = sum(self.control_elapse_times) / float(len(self.control_elapse_times))\n\n self.trial_elapse_times = [elapsed(experiment.trial.start_time, experiment.trial.end_time) for experiment in\n self.enabled_experiments]\n self.trial_avg_time = sum(self.trial_elapse_times) / float(len(self.trial_elapse_times))\n self.statuses = {}\n for experiment in self.experiments:\n if experiment.status not in self.statuses:\n self.statuses[experiment.status] = 0\n self.statuses[experiment.status] += 1", "title": "" }, { "docid": "1010714b87c9308f49d15e90ec7df67c", "score": "0.50500107", "text": "def report(query, label, refresh=True):\n # Header\n print('*** Report for {}\\n'.format(label))\n print('#+attr_latex: :placement [H] :center nil')\n\n # Perform query\n s = ScopusSearch(query, refresh=refresh)\n journal_res = [p for p in s.results if p.aggregationType == \"Journal\"]\n\n # Parse results\n doc_types = Counter([p.aggregationType for p in s.results])\n paper_cites = {(p.title, p.doi): int(p.citedby_count) for p in journal_res}\n Ncites = sum(paper_cites.values())\n papers = len(journal_res)\n author_count = [len(p.authid.split(\";\")) for p in journal_res]\n au_counts = defaultdict(lambda: 0)\n j_counts = defaultdict(lambda: 0)\n for p in journal_res:\n for auth in zip(p.authname.split(\";\"), p.authid.split(\";\")):\n key = (auth[0], auth[1])\n au_counts[key] += 1\n jkey = (p.publicationName, p.source_id, p.issn)\n j_counts[jkey] += 1\n\n # Document information\n print('#+caption: Types of documents found for {}.'.format(label))\n print('| Document type | count |\\n|-')\n for key, value in doc_types.items():\n print('| {} | {} |'.format(key, value))\n\n print('\\n\\n{} articles ({} citations) '\n 'found by {} authors'.format(papers, Ncites, len(au_counts)))\n\n # Author counts {(name, scopus-id): count}\n auth_url = \"[[https://www.scopus.com/authid/detail.uri?authorId={}][{}]]\"\n view = [(auth_url.format(k[1], k[0]), v, k[1])\n for k, v in au_counts.items()]\n view.sort(reverse=True, key=itemgetter(1))\n print('\\n#+attr_latex: :placement [H] :center nil')\n print('#+caption: Author publication counts for {0}.'.format(label))\n print('| name | count | categories |\\n|-')\n for name, count, identifier in view[:20]:\n cats = ', '.join(['{} ({})'.format(cat[0], cat[1])\n for cat in get_subject_docs(identifier, refresh)[0:3]])\n print('| {} | {} | {} |'.format(name, count, cats))\n\n # Journal information\n jour_url = '[[https://www.scopus.com/source/sourceInfo.url?sourceId={}][{}]]'\n jview = [(jour_url.format(k[1], k[0][0:50]), k[1], k[2], v)\n for k, v in j_counts.items()]\n jview.sort(reverse=True, key=itemgetter(3))\n print('\\n\\n#+attr_latex: :placement [H] :center nil')\n print('#+caption: Journal publication counts for {}.'.format(label))\n print('| Journal | count |\\n|-')\n for journal, sid, issn, count in jview[0:12]:\n print('| {} | {} |'.format(journal, count))\n\n # Top cited papers\n pview = [('[[{}][{}]]'.format(k[1], k[0][0:60]), int(v))\n for k, v in paper_cites.items()]\n pview.sort(reverse=True, key=itemgetter(1))\n h_index = hindex([p[1] for p in pview])\n print('\\n\\n#+attr_latex: :placement [H] :center nil')\n print('#+caption: Top cited publication'\n ' counts for {}. h-index = {}.'.format(label, h_index))\n print('| title | cite count |\\n|-')\n for title, count in pview[0:10]:\n print('| {} | {} |'.format(title, count))\n\n # Plot authors per publication\n plt.figure()\n plt.hist(author_count, 20)\n plt.xlabel('# authors')\n plt.ylabel('frequency')\n plt.savefig('{}-nauthors-per-publication.png'.format(label))\n\n # Bibliography\n print('\\n\\n#+caption: Number of authors '\n 'on each publication for {}.'.format(label))\n print('[[./{}-nauthors-per-publication.png]]'.format(label))\n print('''**** Bibliography :noexport:\n :PROPERTIES:\n :VISIBILITY: folded\n :END:''')\n for i, p in enumerate(journal_res):\n abstract = AbstractRetrieval(p.eid)\n print('{}. {}\\n'.format(i + 1, abstract))", "title": "" }, { "docid": "403273e41d9b9efbbcf3bab1696925aa", "score": "0.50449145", "text": "def summary(self):\r\n set_counts = {}\r\n print('There are %d classes.' % (len(self.word_to_index)))\r\n print(\"1%% <-> %d samples in 'training'\" % int(\r\n self.set_size('training') / 100))\r\n for set_index in ['training', 'validation', 'testing']:\r\n counts = {k: 0 for k in sorted(self.word_to_index.keys())}\r\n num_total = self.set_size(set_index)\r\n for data_point in self.data_index[set_index]:\r\n counts[data_point['label']] += (1.0 / num_total) * 100.0\r\n set_counts[set_index] = counts\r\n\r\n print('%-13s%-6s%-6s%-6s' % ('', 'Train', 'Val', 'Test'))\r\n for label_name in sorted(\r\n self.word_to_index.keys(), key=self.word_to_index.get):\r\n line = '%02d %-12s: ' % (self.word_to_index[label_name], label_name)\r\n for set_index in ['training', 'validation', 'testing']:\r\n line += '%.1f%% ' % (set_counts[set_index][label_name])\r\n print(line)", "title": "" }, { "docid": "153f531e48187d3828b05553d574a2da", "score": "0.50386024", "text": "def search_summary(self):\n # add tuning check\n invalidOperationError(self.use_hpo, \"No search summary when HPO is disabled.\")\n return self.trainer.search_summary()", "title": "" }, { "docid": "4b9ccf282fea596fde7965adc2c64af2", "score": "0.5029602", "text": "def printHits(self):\n for hit in self.hits:\n hit.printHit()", "title": "" }, { "docid": "624c2e4325ad46904ebb06b4d2481567", "score": "0.50231826", "text": "def get_report(self): \n t = ' '.join(['File: ', self.fn, ' # of atoms:', str(len(self.atoms)), '\\n'])\n for r,a in zip(self.residues, self.atoms ):\n t += ' '.join(['resi: ', str(r) ,' atom: ', str(a) , '\\n' ])\n return t", "title": "" }, { "docid": "e315fe881e5fd3fe02eee98ec3f10c38", "score": "0.50157064", "text": "def print_stats(stats):\n for key, item in stats.items():\n item['avg_len'] = int(item['lines'] / item['num'])\n item['oldest'] = item['oldest'].ctime()\n item['newest'] = item['newest'].ctime()\n if key != \"#global#\":\n print(OUTPUT_FMT.format(**item))\n else:\n item['name'] = '-GLOBAL-'\n print(OUTPUT_FMT.format(**stats['#global#']))", "title": "" }, { "docid": "9650d43ebaaee0ec9687f1ea6df42a7a", "score": "0.5012829", "text": "def report_similarities(sect, stats, old_stats):\r\n lines = ['', 'now', 'previous', 'difference']\r\n lines += table_lines_from_stats(stats, old_stats,\r\n ('nb_duplicated_lines',\r\n 'percent_duplicated_lines'))\r\n sect.append(Table(children=lines, cols=4, rheaders=1, cheaders=1))", "title": "" } ]
c0fd0ca17a9d16fbd8a9de5c0dacd58d
Loads a saved sklearn regression model or train new model
[ { "docid": "bf8e4a74820266704bb2e2f077c513fc", "score": "0.689888", "text": "def load_model(model_path):\n path_n = model_path\n if os.path.isfile(path_n):\n fil = open(path_n, \"rb\")\n prediction_model = pickle.load(fil)\n print(\"model found\")\n else:\n prediction_model = model_train()\n print(\"new model is created\")\n return prediction_model", "title": "" } ]
[ { "docid": "0be94cf408d24cc8ffed8a3fa6a308d2", "score": "0.705837", "text": "def model(self):\n filePath1 = self.config['model_data1']['train_data']\n data = self.loadCSV(filePath1)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n filepath2 = self.config['model_pkl_1']['model_path']\n self.linearReg(X, y, filepath2)", "title": "" }, { "docid": "bb68faa4ddfc44fc7780aaa4f2d4dd90", "score": "0.6907461", "text": "def test_model_loading(self):\n # case 1\n regressor = XLNetRegressor(pretrained_model_name=\"xlnet-base-cased\")\n _ = regressor(self.inputs)\n\n # case 2\n regressor = XLNetRegressor(pretrained_model_name=\"xlnet-large-cased\")\n _ = regressor(self.inputs)", "title": "" }, { "docid": "7bb7d24365a012d5bc48132ceb2b189e", "score": "0.6894906", "text": "def model_load():\n\n if not os.path.exists(SAVED_MODEL):\n raise Exception(\"Model '{}' cannot be found did you train the model?\".format(SAVED_MODEL))\n \n model = joblib.load(SAVED_MODEL)\n return(model)", "title": "" }, { "docid": "1a22047c82adfe2155462643ecef6118", "score": "0.6743368", "text": "def load_model():\n # print(\"LOADING THE MODEL...\")\n with open(MODEL_PATH, \"rb\") as model_file:\n saved_model = pickle.load(model_file)\n return saved_model", "title": "" }, { "docid": "bcb8346692cef1d33c2c6242075f0622", "score": "0.66996634", "text": "def load_model(self, train = False,fileName = 'best_predictor'):\n \n if not train:\n self.predictor = joblib.load(fileName + '.pkl')\n else:\n self.train_model()\n return", "title": "" }, { "docid": "91da7947d491328b36b0a0ae0e1a28d5", "score": "0.6628513", "text": "def load_clf(trained_mod):\n\n # save model with open(wb) + pickle.dump.\n with open(trained_mod, 'rb') as file:\n model = pickle.load(file)\n\n return model", "title": "" }, { "docid": "25085609557deea7caf7854f05c1ce6a", "score": "0.66048074", "text": "def train_model_regression(\n X,\n X_test,\n y,\n params,\n model_name,\n models_dir,\n folds=KFold(n_splits=5, shuffle=True, random_state=11),\n model_type=\"lgb\",\n eval_metric=\"mae\",\n columns=None,\n plot_feature_importance=False,\n model=None,\n verbose=10000,\n early_stopping_rounds=200,\n n_estimators=50000,\n n=5,\n):\n columns = X.columns if columns is None else columns\n X_test = X_test[columns]\n\n if model_name + \".sav\" in os.listdir(models_dir):\n regressor = pickle.load(open(models_dir + \"/\" + model_name + \".sav\", \"rb\"))\n result_dict = load_obj(\"result_dict_\" + model_name)\n return regressor, result_dict\n\n # to set up scoring parameters\n metrics_dict = {\n \"mae\": {\n \"lgb_metric_name\": \"mae\",\n \"sklearn_scoring_function\": metrics.mean_absolute_error,\n },\n \"group_mae\": {\n \"lgb_metric_name\": \"mae\",\n \"scoring_function\": group_mean_log_mae,\n },\n \"mse\": {\n \"lgb_metric_name\": \"mse\",\n \"sklearn_scoring_function\": metrics.mean_squared_error,\n },\n }\n\n result_dict = {}\n\n # out-of-fold predictions on train data\n oof = np.zeros(len(X))\n\n # averaged predictions on train data\n prediction = np.zeros(len(X_test))\n\n # list of scores on folds\n scores = []\n feature_importance = pd.DataFrame()\n\n # split and train on folds\n for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):\n print(f\"Fold {fold_n + 1} started at {time.ctime()}\")\n if type(X) == np.ndarray:\n X_train, X_valid = X[columns][train_index], X[columns][valid_index]\n y_train, y_valid = y[train_index], y[valid_index]\n else:\n X_train, X_valid = (\n X[columns].iloc[train_index],\n X[columns].iloc[valid_index],\n )\n y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]\n\n if model_type == \"lgb\":\n model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1)\n model.fit(\n X_train,\n y_train,\n eval_set=[(X_train, y_train), (X_valid, y_valid)],\n eval_metric=metrics_dict[eval_metric][\"lgb_metric_name\"],\n verbose=verbose,\n early_stopping_rounds=early_stopping_rounds,\n )\n\n y_pred_valid = model.predict(X_valid)\n y_pred = model.predict(X_test, num_iteration=model.best_iteration_)\n\n if model_type == \"sklearn\":\n model = model\n model.fit(X_train, y_train)\n\n y_pred_valid = model.predict(X_valid).reshape(-1,)\n score = metrics_dict[eval_metric][\"sklearn_scoring_function\"](\n y_valid, y_pred_valid\n )\n print(f\"Fold {fold_n}. {eval_metric}: {score:.4f}.\")\n print(\"\")\n\n y_pred = model.predict(X_test).reshape(-1,)\n\n\n oof[valid_index] = y_pred_valid.reshape(-1,)\n if eval_metric != \"group_mae\":\n scores.append(\n metrics_dict[eval_metric][\"sklearn_scoring_function\"](\n y_valid, y_pred_valid\n )\n )\n else:\n scores.append(\n metrics_dict[eval_metric][\"scoring_function\"](\n y_valid, y_pred_valid, X_valid[\"type\"]\n )\n )\n\n prediction += y_pred\n\n if model_type == \"lgb\" and plot_feature_importance:\n # feature importance\n fold_importance = pd.DataFrame()\n fold_importance[\"feature\"] = columns\n fold_importance[\"importance\"] = model.feature_importances_\n fold_importance[\"fold\"] = fold_n + 1\n feature_importance = pd.concat(\n [feature_importance, fold_importance], axis=0\n )\n\n prediction /= folds.n_splits\n\n print(\n \"CV mean score: {0:.4f}, std: {1:.4f}.\".format(np.mean(scores), np.std(scores))\n )\n\n result_dict[\"oof\"] = oof\n result_dict[\"prediction\"] = prediction\n result_dict[\"scores\"] = scores\n\n if model_type == \"lgb\":\n if plot_feature_importance:\n feature_importance[\"importance\"] /= folds.n_splits\n cols = (\n feature_importance[[\"feature\", \"importance\"]]\n .groupby(\"feature\")\n .mean()\n .sort_values(by=\"importance\", ascending=False)[:50]\n .index\n )\n\n best_features = feature_importance.loc[\n feature_importance.feature.isin(cols)\n ]\n\n plt.figure(figsize=(16, 12))\n sns.barplot(\n x=\"importance\",\n y=\"feature\",\n data=best_features.sort_values(by=\"importance\", ascending=False),\n )\n plt.title(\"LGB Features (avg over folds)\")\n\n result_dict[\"feature_importance\"] = feature_importance\n\n filename = models_dir + \"/\" + model_name + \".sav\"\n pickle.dump(model, open(filename, \"wb\"))\n save_obj(result_dict, \"result_dict_\" + model_name)\n\n return model, result_dict", "title": "" }, { "docid": "b8612985af6b4f59f2925a67e75975d3", "score": "0.6602088", "text": "def loadModel():\r\n global clf\r\n pickle_in = open(\"dict.pickle\", \"rb\")\r\n clf = pickle.load(pickle_in)", "title": "" }, { "docid": "9857f3a89b1713f1cac07a034e043498", "score": "0.65956074", "text": "def load_model():\n return joblib.load(MODEL_LOCAL_PATH)", "title": "" }, { "docid": "69b831fbddbdfa405ac531b106fb0439", "score": "0.65739834", "text": "def train_model(train_set_path, model_out_file):\n\n assert '.csv' in train_set_path, f'Received {train_set_path}! ' \\\n f'Please provide a .csv file'\n hp = {'colsample_bytree': 0.3,\n 'gamma': 0.1,\n 'learning_rate': 0.1,\n 'max_depth': 12,\n 'min_child_weight': 7}\n train_set = pd.read_csv(train_set_path)\n train_y = train_set[['points']]\n train_x = train_set.drop(columns=['points'])\n\n logger.info(f'XGBoost Regression with parameters: {hp}')\n model = XGBRegressor(random_state=42,\n colsample_bytree=hp['colsample_bytree'],\n learning_rate=hp['learning_rate'],\n max_depth=hp['max_depth'],\n min_child_weight=hp['min_child_weight'],\n gamma=hp['gamma'])\n\n logger.info('Training model...')\n started = time()\n model.fit(train_x, train_y)\n\n logger.info(f'Model trained in {time() - started} seconds')\n os.makedirs(os.path.dirname(model_out_file), exist_ok=True)\n model.save_model(Path(model_out_file))\n logger.info(f'Models saved to {model_out_file}')", "title": "" }, { "docid": "4fa5a89963ac44a32eaa42f6aaab7596", "score": "0.6555823", "text": "def load_model(model):\n pass \n # TODO", "title": "" }, { "docid": "1b12e76c703b3acbd57d15744a8514c7", "score": "0.6552335", "text": "def _fit_model(self, X, y):\n from sklearn.linear_model import LinearRegression\n linreg = LinearRegression(fit_intercept=True)\n model = linreg.fit(X, y)\n return model", "title": "" }, { "docid": "9c7eb6574038ba09afdf9488cfc4b77e", "score": "0.6518384", "text": "def train_model(self):\n self.check_task()\n\n self.train_dataset = self.__load_and_cache_examples(\"train\")\n self.__train()\n\n # Takes care of distributed/parallel training\n model_to_save = self.model.module if hasattr(self.model, 'module') else self.model\n\n self.model = model_to_save # new\n del self.train_dataset", "title": "" }, { "docid": "18ad33d60e09bfae3e842f06f0a67318", "score": "0.6513541", "text": "def load_model(self, model_file=None):", "title": "" }, { "docid": "d9af42e72f4c1de5d8cb3fd5064ca6f6", "score": "0.6487402", "text": "def load_model(self):\n #Load the model from disk\n self.loaded_model = pickle.load(open(self.model_file_path, 'rb' ))", "title": "" }, { "docid": "00241f7a77999596deadb18f971d59f5", "score": "0.6481353", "text": "def load_clf(filename):\n from sklearn.externals import joblib\n loaded_model = joblib.load(filename)\n return loaded_model", "title": "" }, { "docid": "5019e03587b3f861c20cdea75b6ba8da", "score": "0.6477127", "text": "def init_linearModel(training_path):\n from sklearn.linear_model import LinearRegression\n training = pandas.DataFrame.from_csv(training_path)\n training = training.as_matrix()\n X = training[:, 0:5]\n Y = training[:,5]\n lr = LinearRegression()\n lr.fit(X,Y)\n return lr", "title": "" }, { "docid": "45e7cf7958953d4b2b478ec581485f4a", "score": "0.6462413", "text": "def load_skl_model(models_path):\n with open(models_path, \"rb\") as model:\n model = pickle.load(model)\n print(\"Model loaded successfully.\")\n return model", "title": "" }, { "docid": "157289a05dfaca4d6fb08eaa3ff78a31", "score": "0.64510167", "text": "def train_model():\n train_X, train_Y = get_training_data()\n model = RandomForestClassifier(random_state=0)\n model.fit(train_X, train_Y)\n return model", "title": "" }, { "docid": "9235f38c8c7bd625362ab2320aa7d460", "score": "0.6447", "text": "def load_trained_model(self, save_path):\n self._train_saver.restore(self._session, save_path)\n self.__print(\"Trained model found in '\"+save_path+\"' restored!\")", "title": "" }, { "docid": "7c4bc7dabb01d24556f5e1d94dce6169", "score": "0.6430248", "text": "def train(*, data='data/formulas.pkl', out_model='models/model.pkl'):\n log.info('Loading dataset in {}...'.format(data))\n dataset = load_dataset(data)\n data = dataset['data']\n units = dataset['units']\n symbols = dataset['symbols']\n data_str = list(map(as_str, data))\n model = _fit_model(data_str)\n log.info('Save model to {}'.format(out_model))\n _save_model(model, out_model)", "title": "" }, { "docid": "a5c5a097a876e345e40a872e1a87cd44", "score": "0.641881", "text": "def linear1():\r\n #1.obatin dataset\r\n boston=load_boston()\r\n\r\n #2.split dataset\r\n x_train,x_test,y_train,y_test=train_test_split(boston.data, boston.target,random_state=22)\r\n\r\n #3.feature: normalization\r\n transfer=StandardScaler()\r\n x_train=transfer.fit_transform(x_train)\r\n x_test=transfer.transform(x_test)\r\n\r\n #4.estimator\r\n #fit()model\r\n estimator= LinearRegression()\r\n estimator.fit(x_train,y_train)\r\n #coef_intercept\r\n print(\"coef:\\n\",estimator.coef_)\r\n print(\"intercept:\\n\",estimator.intercept_)\r\n\r\n\r\n #save model\r\n joblib.dump(estimator,\"my_LR.pkl\")\r\n #load model\r\n estimator=joblib.load(\"my_LR.pkl\")\r\n\r\n #5.model evaluation\r\n y_predict = estimator.predict(x_test)\r\n print(\"y_predict:\\n\", y_predict)\r\n error = mean_squared_error(y_test, y_predict)\r\n print(\"LR error:\\n\", error)\r\n\r\n return None", "title": "" }, { "docid": "0f2394a7decefae77967aac9319e91cb", "score": "0.6416527", "text": "def load_model(path_to_model : str):\n return pickle.load(open(path_to_model, 'rb'))", "title": "" }, { "docid": "a1f24e5c6bbf50e247edceb58a56942f", "score": "0.6406823", "text": "def loadmodel(filename=\"Request\"):\r\n pickle.load(open(path+\"/model/\"+filename+\".pkl\", 'rb'))", "title": "" }, { "docid": "e588d24840feae26856b88d083322962", "score": "0.63980186", "text": "def load_model(save_folder, filename):\n logger.info(\"Warning: Make sure older models with this name have been trained on the same features! Otherwise,\"\n \"if the lengths of the features the model has been trained on, differ, an error will occur!\")\n import pickle\n _path = os.path.join(save_folder, filename)\n with open(_path, 'rb') as handle:\n return pickle.load(handle)", "title": "" }, { "docid": "e628363dac74846d0c9d6b51f63363c7", "score": "0.6397186", "text": "def myModel(features, target):\r\n X_train, X_test, y_train, y_test = train_test_split(features, target,test_size=0.25)\r\n model = LinearRegression()\r\n model.fit(X_train, y_train)\r\n y_actual = y_test\r\n y_pred = model.predict(X_test)\r\n mse = mean_squared_error(y_actual, y_pred)\r\n r2 = r2_score(y_actual, y_pred)\r\n rmse = np.sqrt(mse)\r\n return model.coef_, model.intercept_, rmse, r2, model", "title": "" }, { "docid": "72c5438f059c11a4538e42312b9a9ed0", "score": "0.6390317", "text": "def load_model(name: str):\n if not os.path.exists('{}{}'.format(paths.model_path, name)):\n raise FileNotFoundError('A model with this name does not exist yet.')\n\n # Load model\n model = joblib.load(open('{}{}/model.pkl'.format(paths.model_path, name), 'rb'))\n\n # Load list of prepared features from file\n with open('{}{}/prepared_features.txt'.format(paths.model_path, name)) as f:\n prepared_features = f.read().splitlines()\n\n # Load dict of actual features\n with open('{}{}/features.json'.format(paths.model_path, name)) as handle:\n features = json.loads(handle.read())\n\n return model, prepared_features, features", "title": "" }, { "docid": "052840928f5f2ff0f8038bb16e82f188", "score": "0.6383983", "text": "def load_model():\n model = keras.models.load_model(paths.MODEL)\n return model", "title": "" }, { "docid": "78c663796c2885da9cfe6346352ca8a4", "score": "0.63830286", "text": "def load_model(self, saved_model):\n self.total_model.load_weights(saved_model)", "title": "" }, { "docid": "d06dd0fa7436abc5d1cb1d83a163f0fd", "score": "0.63822037", "text": "def load_model(path_to_model:str):\n return pickle.load(open(path_to_model, 'rb'))", "title": "" }, { "docid": "d97fe121b9d11ae7224652699d6982d6", "score": "0.63785917", "text": "def load_model(path_to_model):\n model_path = os.path.join(path_to_model)\n model = pickle.load(open(model_path, 'rb'))\n return model", "title": "" }, { "docid": "f46b7e02df9cc508ae80b5271380d5c9", "score": "0.63764745", "text": "def train_regressors(all_data, yaml_filename=None, hyper_parameters_search=False):\n p = os.path.abspath(__file__)\n d = os.path.dirname(p)\n if yaml_filename is None:\n yaml_filename = os.path.join(d,'modeling_data','scalers_and_models_regression.yml')\n else:\n yaml_filename = os.path.join(d,'modeling_data',yaml_filename)\n\n accuracy_txt = os.path.join(d,'modeling_data','accuracy_regression.txt')\n current_version = list(map(int,sklearn.__version__.split('.')))\n\n scalers = {}\n models = {}\n accuracy = {}\n scalers_and_models = OrderedDict(\n version=current_version,\n scalers=scalers,\n models=models,\n accuracy=accuracy)\n\n possible_models = check_labels_regression(all_data)\n\n # r0_sphere model\n if possible_models['r0_sphere'] == True:\n features = saxs_math.profile_keys['unidentified']\n\n scaler, reg, acc = train(all_data, features, 'r0_sphere', hyper_parameters_search)\n\n scalers['r0_sphere'] = scaler.__dict__\n models['r0_sphere'] = reg.__dict__\n accuracy['r0_sphere'] = acc\n else:\n scalers['r0_sphere'] = None\n models['r0_sphere'] = None\n accuracy['r0_sphere'] = None\n\n\n # sigma_shpere model\n if possible_models['sigma_sphere'] == True:\n features = saxs_math.profile_keys['unidentified']\n features.extend(saxs_math.profile_keys['spherical_normal'])\n\n scaler, reg, acc = train(all_data, features, 'sigma_sphere', hyper_parameters_search)\n\n scalers['sigma_sphere'] = scaler.__dict__\n models['sigma_sphere'] = reg.__dict__\n accuracy['sigma_sphere'] = acc\n else:\n scalers['sigma_sphere'] = None\n models['sigma_sphere'] = None\n accuracy['sigma_sphere'] = None\n\n # rg_gp model\n if possible_models['rg_gp'] == True:\n features = saxs_math.profile_keys['unidentified']\n features.extend(saxs_math.profile_keys['guinier_porod'])\n\n scaler, reg, acc = train(all_data, features, 'rg_gp', hyper_parameters_search)\n\n scalers['rg_gp'] = scaler.__dict__\n models['rg_gp'] = reg.__dict__\n accuracy['rg_gp'] = acc\n else:\n scalers['rg_gp'] = None\n models['rg_gp'] = None\n accuracy['rg_gp'] = None\n\n # save scalers and models\n with open(yaml_filename, 'w') as yaml_file:\n yaml.dump(scalers_and_models, yaml_file)\n\n # save accuracy\n with open (accuracy_txt, 'w') as txt_file:\n txt_file.write(str(accuracy))", "title": "" }, { "docid": "1ffabcbf5009957ae58b3c5b7b922b24", "score": "0.6365097", "text": "def load_model():\n with open('./app/assets/iris_svc.pkl', 'rb') as model_file:\n loaded_model = pickle.load(model_file)\n\n return loaded_model", "title": "" }, { "docid": "a1467aff81365b675d7c6e0f248c3043", "score": "0.6361938", "text": "def load_model(self):\n\n # load old model if exists\n try:\n\n self.agent.load_model()\n print(\"Loaded model\")\n\n except:\n\n print(\"No model found\")\n pass", "title": "" }, { "docid": "edf94992d0f4fcc41e7894c966180039", "score": "0.63537616", "text": "def load_model(data, labels, filename, params, num_samples = 16):\n\n\t#reconstruct model without initializing parameters \n\tmodel = Model.get_model(data, labels, params) \n\n\t#Get sample data \n\tenc = OneHotEncoder(sparse = False) \n\tOHElabels = enc.fit_transform(labels.reshape(-1,1)) \n\tdata = data.reshape([-1]+params['image_shape']) \n\tselections = np.random.randint(len(data), size = num_samples) \n\ttdata, tlabels = data[selections], OHElabels[selections]\n\n\tsaver = tf.train.Saver() \n\twith tf.Session() as sess: \n\t\t#restore parameters \n\t\tsaver.restore(sess, filename) \n\n\t\tfeed_dict = {} \n\t\tfeed_dict[model.x] = tdata \n\t\tfeed_dict[model.y] = tlabels \n\t\tfeed_dict[model.training] = False \n\t\tpreds = sess.run(model.preds, feed_dict = feed_dict) \n\t\tprint(\"Predictions:\") \n\t\tprint(preds) \n\t\tprint(\"Labels:\") \n\t\tprint(labels) \n\t\tprint(\"Bad Predictions:\") \n\t\tprint(preds[preds != labels[selections]]) \n\t\tprint(\"Correct Labels:\") \n\t\tprint(labels[selections][preds != labels[selections]])", "title": "" }, { "docid": "967b5aec858ad3bc095a45258d98cfe0", "score": "0.6342597", "text": "def load_model():\n global bs_model\n\n model = loadbert(pretrained=True)\n model.eval()\n if use_gpu:\n model.cuda()", "title": "" }, { "docid": "a9632f53d825da0272464c88ab17f17d", "score": "0.63419557", "text": "def load_model():\n global model\n model = Net(n_feature=6, n_hidden=8, n_output=2)\n model.load_state_dict(torch.load('./trainedModel.pt'))\n model.eval()", "title": "" }, { "docid": "78db36cb88a4daeb38b0c4d05c3f316f", "score": "0.6333419", "text": "def load_model(model_path):\n with open(model_path, 'rb') as model_f:\n estimator = joblib.load(model_path)\n return estimator", "title": "" }, { "docid": "e2cfab7a41c549420df2e79c87fbc22a", "score": "0.6332758", "text": "def model_load(self, in_path_model):\n\t\tout_model = RandomForestClassificationModel.load(in_path_model)\n\t\treturn out_model", "title": "" }, { "docid": "205c0e25fdb992bf4a1a24de8deed4ff", "score": "0.63295394", "text": "def __import_model(self,mlInstance):\r\n self.model = ALSModel.load(mlInstance)\r\n predictions = self.model.transform(self.test)\r\n evaluator = RegressionEvaluator(\r\n metricName = 'rmse', labelCol = 'product_rating', predictionCol = 'prediction')\r\n self.rmse = evaluator.evaluate(predictions)", "title": "" }, { "docid": "49a4e89ca45a55f1afa3c0c25e1f40f2", "score": "0.63239783", "text": "def load_model(self, model_path=None):\n act_path = self.hparams.load_saved_model\n if model_path is not None:\n act_path = model_path\n\n try:\n self.saver.restore(self.sess, act_path)\n except:\n raise IOError(\"Failed to find any matching files for {0}\".format(act_path))", "title": "" }, { "docid": "1b12ed244f1a56f653aaf75c1a90c087", "score": "0.6315077", "text": "def loadModel(self, saveFile=\"model.h5\"):\n pass", "title": "" }, { "docid": "2399e4e4dbe36285722d171df9ab9768", "score": "0.63137925", "text": "def load_model(model_path):\n model = joblib.load(model_path)\n return model", "title": "" }, { "docid": "c7ed89bcc2d33d50befe8b77fa73b129", "score": "0.6313769", "text": "def load_model(self, path):\n\n print(colored(f'\\nLoading keras model from {path}\\n', \"green\"))\n self.model = load_model(path)", "title": "" }, { "docid": "9ade0fe4f0c359f555f61c57fabb9ace", "score": "0.6310141", "text": "def train():\n model = train_model()\n is_model_valid, metrics = validate_model(model)\n if is_model_valid is False:\n raise Exception(\"Invalid model.\")\n else:\n save_model(model)\n return metrics", "title": "" }, { "docid": "2cab4c4c48e3d4248586f2e5f9e4b748", "score": "0.6301592", "text": "def load_model(from_file):\n\n raise NotImplementedError", "title": "" }, { "docid": "58ce021ae435fbb2be05804918660bd1", "score": "0.6298538", "text": "def load_model(self, filepath=None):\n filepath = filepath or self.model_base_path + '.m'\n try:\n if self.is_training():\n self.tmodel = keras.models.load_model(filepath)\n sclog('Loaded model \"{0}.\"'.format(filepath))\n except IOError:\n raise ScNetError('Error reading model file \"{0}\"'.format(filepath))", "title": "" }, { "docid": "032b25874e1885a3e8cc9c538929060d", "score": "0.6289668", "text": "def train_model(dataset_filename):\n pass", "title": "" }, { "docid": "ba5eba60abce8b8e004f5525e8cca95a", "score": "0.62829787", "text": "def load_model(self, model_save_folder, train_config, dataset):\r\n self.preprocessor = simple_transform.SimplePreprocessModel()\r\n self.dataspec = self.preprocessor.load(os.path.join(model_save_folder, \"sklearn/preprocessor.pickle\"))\r\n self._build_model(train_config)\r\n self.config = train_config\r\n \r\n #Reason for below line https://stackoverflow.com/questions/63658086/tensorflow-2-0-valueerror-while-loading-weights-from-h5-file\r\n self.simple_predict(dataset, train_config)\r\n self.load_weights(os.path.join(model_save_folder, \"keras\", \"weights\"))", "title": "" }, { "docid": "5925451b057c9f55f466d119537ecb58", "score": "0.62736124", "text": "def train_model(in_csv, filename, out_dir):\n log = logging.getLogger('train-model')\n\n df = pd.read_csv(in_csv, index_col=\"Unnamed: 0\")\n X_train = df.drop('points', axis=1)\n y_train = df['points']\n\n log.info(\"Data read\")\n\n regression = Ridge()\n\n regression.fit(X_train, y_train)\n log.info(\"Model learned\")\n\n out_path = Path(out_dir) / f'{filename}.pckl'\n pickle.dump(regression, open(out_path, 'wb'))\n log.info(\"Model saved\")", "title": "" }, { "docid": "44c37ed227c1103c444f5ae1e7722a10", "score": "0.62718546", "text": "def load_model(filename):\n\n from keras.models import load_model\n return load_model(filename)", "title": "" }, { "docid": "2a097a36c4e0e34a2ac3bce3e3c3897d", "score": "0.6271669", "text": "def load(self, model_path, *args, **kwargs):\n import pickle\n with open(model_path, 'rb') as file:\n self.clf = pickle.load(model_path)\n print(f'Loaded pretrained model from {model_path}.')", "title": "" }, { "docid": "16e4d97fed1bef7a4c374fc8a2a85f73", "score": "0.62704396", "text": "def create_linear_regression_model(self):\n\n model = LinearRegression()\n model.fit(self.X_train, self.y_train)\n score = model.score(self.X_test, self.y_test)\n print('Linear regression model:') \n print('score:\\t\\t{}'.format(score))", "title": "" }, { "docid": "835059ea04e48fe8715700771b935b3a", "score": "0.6266662", "text": "def test_models_are_restored():\n sh = SuccessiveHalvingRegressor([\"Tree\", \"RF\", \"AdaB\", \"LGB\"], random_state=1)\n sh.run(reg_train, reg_test)\n assert \"Tree\" not in sh._models # Original model is deleted\n assert all(m in sh.models for m in (\"Tree4\", \"RF2\", \"AdaB1\"))", "title": "" }, { "docid": "5b66a93a771fafe3e8e0dc6b581dfa31", "score": "0.6266607", "text": "def try_scikit_model(X_train, X_test, y_train, y_test, model_kwargs,\n eval_func=model_evaluation):\n start = time.time()\n model = model_kwargs['model'](**model_kwargs.get('model_params', {}))\n fitted = model.fit(X_train.values, y_train.values,\n **model_kwargs.get('fit_params', {}))\n training_time = time.time() - start\n print(\n f\"{model_kwargs['model_name']} trained in {training_time:.2f} seconds\"\n )\n return eval_func(y_test, X_test, fitted, training_time, start)", "title": "" }, { "docid": "c61c7e6a552575f3304b1db300b34d96", "score": "0.6265605", "text": "def pickleModel(self):\n print 'Saving model to file...'\n logit = LogisticRegression(C=self.C, penalty='l1')\n logit.fit(self.X_mapped,self.y)\n \n with open('model','w') as myFile:\n pickle.dump({'logit':logit,'degree':self.degree,'useInverse':self.useInverse,'mean':self.mean,'stdev':self.stdev,'n':self.n,'m':self.m},myFile)", "title": "" }, { "docid": "dd20bbb67c42cd2b0ac47508f8d52a91", "score": "0.62630767", "text": "def load_model(filename):\r\n return K.models.load_model(filename)", "title": "" }, { "docid": "fc344fbb1f15c96cbed92b52cf1841d9", "score": "0.6260273", "text": "def load_model(self, model):\n self.brain.load_model(model)", "title": "" }, { "docid": "937985b1975ddcf45aea46691efb0f93", "score": "0.62586033", "text": "def load_model():\n # TODO: INSERT CODE\n # return model", "title": "" }, { "docid": "e5cc9afc8f4498687ccd2a8e123d696d", "score": "0.6257939", "text": "def load_model(self):\n file = open(self.config.MODEL_PATH, \"rb\")\n self.model = pickle.load(file, encoding=\"ASCII\")", "title": "" }, { "docid": "6d24f9644735a8843ce5e9db1acec9e6", "score": "0.62560064", "text": "def load_model(file_path):\n model = HabrHubRatingRegressor('')\n model.load(file_path)\n return model", "title": "" }, { "docid": "75682d9dbc92bf56ea09edb77d7eb83b", "score": "0.62514", "text": "def get_model(model):\n clf = joblib.load(model)\n\n return clf", "title": "" }, { "docid": "0a721dd86e030d762e278ac52c95a178", "score": "0.62435913", "text": "def linreg(training_data, target_data):\n\n model = linear_model.LinearRegression()\n model.fit(training_data, target_data)\n\n return model", "title": "" }, { "docid": "fc5780cfd914af83c4b540b45effc860", "score": "0.6242975", "text": "def load_model(model_file_name):\n model = liblinear.load_model(_cstr(model_file_name))\n if not model:\n print(\"can't open model file %s\" % model_file_name)\n return None\n model = toPyModel(model)\n return model", "title": "" }, { "docid": "e09875a4fe0538a521694ea484521b85", "score": "0.6232657", "text": "def un_pickle_model():\n with open(\"src/models/RF_model.pkl\", \"rb\") as model_file:\n loaded_model = pickle.load(model_file)\n return loaded_model", "title": "" }, { "docid": "661842d8986f95c4507020860835d5e0", "score": "0.62270707", "text": "def load_model(model_name=None):\n if model_name is None:\n model_filepath = find_latest_model_filepath()\n else:\n model_filepath = get_model_filepath(model_name)\n model = load(model_filepath)\n return model", "title": "" }, { "docid": "ad8b9ded9110abb8bad883e6923468db", "score": "0.62256867", "text": "def load_model(self):\n print(\"Loading vectorizer and model...\")\n self.vectorizer = utils.load_binary(self.vectorizer_path)\n self.clf = utils.load_binary(self.model_path)\n print(\"Finished loading vectorizer and model.\\n\")", "title": "" }, { "docid": "009b60d657c8ed667248c840b531c873", "score": "0.6221139", "text": "def load_and_restore_model(self, session):\n if self.settings_object.model_path:\n pt(\"Restoring model...\", self.settings_object.model_path)\n try:\n # TODO (@gabvaztor) Do Generic possibles models\n model_possible_1 = self.settings_object.model_path + \"model\" + Dictionary.string_ckpt_extension\n model_possible_2 = model_possible_1 + Dictionary.string_meta_extension\n model_possible_3 = model_possible_1 + Dictionary.string_ckpt_extension\n model_possible_4 = model_possible_3 + Dictionary.string_meta_extension\n possibles_models = [model_possible_1, model_possible_2, model_possible_3, model_possible_4]\n model = [x for x in possibles_models if file_exists_in_path_or_create_path(x)]\n if model:\n saver = tf.train.import_meta_graph(model[0])\n # Restore variables from disk.\n saver.restore(session, model_possible_1)\n pt(\"Model restored without problems\")\n else:\n if self.ask_to_continue_creating_model_without_exist:\n response = recurrent_ask_to_continue_without_load_model()\n if not response:\n raise Exception()\n else:\n pt(\"The model won't load because it doesn't exist\",\n \"You chose 'continue_creating_model_without_exist\")\n except Exception as e:\n pt(Errors.error, e)\n raise Exception(Errors.error + \" \" + Errors.can_not_restore_model)", "title": "" }, { "docid": "9d58251dbd8eecc072d9d29e50756d89", "score": "0.6216922", "text": "def test_load_model():\n assert model != None\n assert model.predict(X_prepared) is not None", "title": "" }, { "docid": "e076cb972d90f5a3127f5161c35880c8", "score": "0.6216277", "text": "def loadSavedModel(self, fileName=None):\n fileName = fileName + '.sav'\n if fileName is not None:\n self.fileName = fileName\n #return pickle.load(open(self.fileName, 'rb')) \n print(\"Loading saved model from: %s\\n\" % (self.fileName))\n return joblib.load(self.fileName)", "title": "" }, { "docid": "54adf78ebd14a639c536419cda4ddea7", "score": "0.6214564", "text": "def upload_model(self, model_for_upload, new_model_name=None):\r\n model = pickle.load(open(model_for_upload, 'rb'))\r\n self.model = sklearn.base.clone(model)\r\n if new_model_name is not None:\r\n self.model_name = new_model_name", "title": "" }, { "docid": "4dfcbbe0444eace94decad2bd91075a1", "score": "0.6212893", "text": "def load_model(self, name_addition=None):\n\t\tname = self.model_name\n\t\tif name_addition is not None:\n\t\t\tname += name_addition\n\n\t\tjson_file = open(name+'.json', 'r')\n\t\tmodel_json = json_file.read()\n\t\tjson_file.close()\n\t\tself.model = model_from_json(model_json)\n\t\tself.model.load_weights(name+'_weights.h5')\n\t\tprint('Model %s loaded from disk'%(name))", "title": "" }, { "docid": "705b77c7604a7b7521b0f4f4d540d9de", "score": "0.6203827", "text": "def train_model(X,y,saved_model):\n\n ## Perform a train-test split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\n\n ## Specify parameters and model\n params = {'C':1.0,'kernel':'linear','gamma':0.5}\n clf = svm.SVC(**params,probability=True)\n\n ## fit model on training data\n clf = clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n print(classification_report(y_test,y_pred))\n\n ## retrain using all data\n clf.fit(X, y)\n print(\"... saving model: {}\".format(saved_model))\n joblib.dump(clf,saved_model)\n\n print(y_test[:5])\n print(X_test[:5,:])", "title": "" }, { "docid": "21e70d2a2d8114f76e61ef8f92f84fd9", "score": "0.6196922", "text": "def train_model(self):\n \n self.predictor = LogisticRegression().fit(self.x_train, self.y_train)\n return", "title": "" }, { "docid": "693704ffefa06355b9f6424c9ef860c2", "score": "0.6196806", "text": "def load_model(self, filepath):\n self.model = load_model(filepath)", "title": "" }, { "docid": "063735b4b053c5a80b5b9dd9073ffdff", "score": "0.61812955", "text": "def load_model(filename):\n return K.models.load_model(filename)", "title": "" }, { "docid": "41b856448960dee7061352511105590f", "score": "0.61775666", "text": "def load(self, path='model/'):\r\n if path[-1] != '/':\r\n path = path + '/'\r\n\r\n self.model = tf.keras.models.load_model(path + 'model.h5')\r\n with open(path+'dataset.pickle', 'rb') as f:\r\n self.dataset = pickle.load(f)", "title": "" }, { "docid": "d68ab7a8900181cb43fb4e1053ea70c1", "score": "0.6173084", "text": "def restore_model(self, saved_model):\n print(\"loading trained model\", saved_model)\n if torch.cuda.is_available() is False:\n map_location = 'cpu'\n else:\n map_location = None\n self.current_net.load_state_dict(torch.load(saved_model,map_location=map_location))\n self.target_net.load_state_dict(self.current_net.state_dict())", "title": "" }, { "docid": "d68ab7a8900181cb43fb4e1053ea70c1", "score": "0.6173084", "text": "def restore_model(self, saved_model):\n print(\"loading trained model\", saved_model)\n if torch.cuda.is_available() is False:\n map_location = 'cpu'\n else:\n map_location = None\n self.current_net.load_state_dict(torch.load(saved_model,map_location=map_location))\n self.target_net.load_state_dict(self.current_net.state_dict())", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.6173011", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.6173011", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.6173011", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.6173011", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "8aca508aa9581be29cbbc872b7941372", "score": "0.6173011", "text": "def load_model(self):\n pass", "title": "" }, { "docid": "e0beafa546cad31a7e619dd6ff8632df", "score": "0.61723953", "text": "def load(*args, **kwargs) -> keras.Model:\n pass", "title": "" }, { "docid": "01c0028d3a9f73e38e4a090c2f0876f5", "score": "0.61681783", "text": "def _set_regression_model(self, ridge: float=None, sklearn_model: Callable=None):\n if ridge is not None and sklearn_model is not None:\n raise ValueError(\"ridge and sklearn_model can't be defined at the same time.\")\n \n elif ridge is not None:\n self.ridge = ridge\n return ridge_linear_model(self.ridge)\n \n elif sklearn_model is not None:\n self.sklearn_model = sklearn_model\n return sklearn_linear_model(self.sklearn_model)\n \n else:\n return pseudo_inverse_linear_model()", "title": "" }, { "docid": "900eaa63631a1cc34305469c8d1fba41", "score": "0.6161496", "text": "def train_model(df, method = None, save_model = None, **kwargs):\n #Assum method defined, in this case, logistic regression\n assert method in methods.keys()\n \n #get predictors from get_features method\n if \"get_features\" in kwargs:\n X = get_features(df, **kwargs[\"get_features\"])\n else:\n X = df\n \n X = normalize_features(X)\n\n #get predicted value from get_target method\n if \"get_target\" in kwargs:\n y = get_target(df, **kwargs[\"get_target\"])\n df = df.drop(labels = [kwargs[\"get_target\"][\"target\"]],axis = 1)\n else:\n y = None\n\n #Split train set and test set\n\n X,y = train_test_split(X, y, **kwargs[\"train_test_split\"])\n\n #Specified the method. In this case, logistic regression.\n model = methods[method]()\n\n #Fit model\n\n model.fit(X[\"train\"], y[\"train\"])\n\n #Save model if specified\n if save_model is not None:\n with open(save_model, \"wb\" ) as f: #write and binary\n pickle.dump(model,f)\n logger.info(\"Trained model save to %s\", save_model)\n return model", "title": "" }, { "docid": "49d7a31d26d517adcdbc28517149ffff", "score": "0.61566347", "text": "def load_pretrained_lm(vocab) :\n lm = get_language_model(AWD_LSTM, len(vocab))\n model_path = untar_data('https://s3.amazonaws.com/fast-ai-modelzoo/wt103-1', data=False)\n fnames = [list(model_path.glob(f'*.{ext}'))[0] for ext in ['pth', 'pkl']]\n old_itos = pickle.load(open(fnames[1], 'rb'))\n old_stoi = {v:k for k,v in enumerate(old_itos)}\n wgts = torch.load(fnames[0], map_location=lambda storage, loc: storage)\n wgts = convert_weights(wgts, old_stoi, vocab)\n lm.load_state_dict(wgts)\n return lm", "title": "" }, { "docid": "6cce719b7f452097a88fa0ce77d83f63", "score": "0.61552626", "text": "def load_model(is_perturbed):\n model_path = f\"model_best_weights{'_pert' if is_perturbed else ''}.h5\"\n return tf.keras.models.load_model(model_path)", "title": "" }, { "docid": "39060def6169e8405a133cc176b35e83", "score": "0.61543083", "text": "def load_saved_model(model_path):\n l_bert, model_ckpt = fetch_bert_layer()\n model = load_model(model_path,\n custom_objects={\n \"BertModelLayer\": l_bert,\n \"argument_candidate_acc\": class_acc(3)\n })\n return model", "title": "" }, { "docid": "7f15036174476db35093111cfe3b875d", "score": "0.6152933", "text": "def load_model(model_name, environment_name):\n # Build the appropriate flags here.\n flags = Namespace(\n alg=model_name,\n env=environment_name\n )\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args(vars(flags))\n extra_args = parse_cmdline_kwargs(unknown_args)\n\n model, env = train(args, extra_args)", "title": "" }, { "docid": "a8ad50792c682c0c84e97e70c8cb3aa1", "score": "0.61487067", "text": "def load(self):\n model_file, _ = self.get_model('.pt')\n device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\n model = BertSentimentClassifier(n_classes=3)\n model.load_state_dict(torch.load(model_file, map_location=device))\n model.eval()\n self.model = model", "title": "" }, { "docid": "0489e4546100cda9f6653f63310f2472", "score": "0.61423963", "text": "def _load_model(self):\n pass", "title": "" }, { "docid": "9c11bdd02480955231f12d862d2320cf", "score": "0.6139674", "text": "def model_train():\n logging.info(\"model training.....\")\n housing_prepared, housing_labels = data_preprocessing.data_preprocess()\n linear = linear_model_(housing_prepared, housing_labels)\n dt = dtreg(housing_prepared, housing_labels)\n rnd = rnd_forest(housing_prepared, housing_labels)\n return linear, dt, rnd", "title": "" }, { "docid": "d60be20e4b746c3f03437bcb58dfb4eb", "score": "0.6134067", "text": "def train_model(self):\n pass", "title": "" }, { "docid": "461e58d3c148f0a7e98f17b3ef005b09", "score": "0.61315686", "text": "def load_model():\n options = {\"model\": \"cfg/yolo.cfg\",\n \"load\": \"bin/yolo.weights\",\n \"threshold\": 0.1}\n\n tfnet = TFNet(options)\n return tfnet", "title": "" }, { "docid": "3dd65fb5382c4ee880e84ce9185c989f", "score": "0.61308837", "text": "def load_model(filename):\n # https://github.com/tensorflow/docs/blob/r1.12/site/en/api_docs/python\n # /tf/keras/models/load_model.md\n return K.models.load_model(\n filepath=filename\n )", "title": "" }, { "docid": "c37811897f738402f426a77f5a09bb08", "score": "0.61285853", "text": "def load_model(self, filename):\n model = np.load(f\"models/{filename}\", allow_pickle=True)\n self.beta = model[\"beta\"].reshape(-1, 1)", "title": "" }, { "docid": "1a6a92fc6ecf946be7a8027b2a5983a3", "score": "0.61280817", "text": "def fit_model(X_train_enc, y_train, random_state):\n try:\n # fit model\n gb = GradientBoostingClassifier(random_state=random_state)\n gb_model = gb.fit(X_train_enc, y_train)\n logger.info(\"Model fit\")\n return gb_model\n except Exception as e:\n logger.error(e)", "title": "" }, { "docid": "a63a57563f8e85b2149a4d18d15b396f", "score": "0.6127886", "text": "def load_model(self, trial):\n raise NotImplementedError", "title": "" } ]
39bfd9139fd023c1319d792d2f52643d
Test case for update_entry_membership
[ { "docid": "8d360ca07a627cb720c473ff7dd388d7", "score": "0.947138", "text": "def test_update_entry_membership(self):\n pass", "title": "" } ]
[ { "docid": "7ef61fa44c05bc75b72ab5b0b5efd3dd", "score": "0.77704763", "text": "def test_update_entry(self):\n pass", "title": "" }, { "docid": "7ca7fab9168ea8c5b33959eaa2673f7f", "score": "0.76735425", "text": "def test_update_membership_type(self):\n pass", "title": "" }, { "docid": "cf1c9473498649dc08db9f682ac35bd6", "score": "0.7479196", "text": "def test_update_entry_enrollment(self):\n pass", "title": "" }, { "docid": "25c3fa0e697454494a0417da34f36345", "score": "0.7412441", "text": "def test_update_entry_invitation(self):\n pass", "title": "" }, { "docid": "b454c5583c1084fefcde14bdfc425324", "score": "0.73654145", "text": "def test_update_entry_group(self):\n pass", "title": "" }, { "docid": "63571dd758021c1ad02b7e2092470cf8", "score": "0.7363401", "text": "def test_update_entry_profile(self):\n pass", "title": "" }, { "docid": "ef4b0b870a694584d17c2356e5e22382", "score": "0.73386544", "text": "def test_update_entry_correspondence(self):\n pass", "title": "" }, { "docid": "23ca60fb7bed0cda71993dccb2b12881", "score": "0.73232466", "text": "def test_update_entry_result(self):\n pass", "title": "" }, { "docid": "eab857ea29448c4ed78c8fc0a3c9c8a5", "score": "0.73228717", "text": "def test_update_entry_visitor(self):\n pass", "title": "" }, { "docid": "32aa490be29c0fbb856a5a0e48cf2f8a", "score": "0.72906846", "text": "def test_update_entry_detail(self):\n pass", "title": "" }, { "docid": "ba082666f72629f47456c4a31778bd2e", "score": "0.72857547", "text": "def test_update_group_contact_entry(self):\n pass", "title": "" }, { "docid": "2a18aeb74422131e816a3f93926e835b", "score": "0.71369493", "text": "def test_update_entry_portal_activity(self):\n pass", "title": "" }, { "docid": "ecc758219db016dc510cdf395939b452", "score": "0.7085049", "text": "def test_update_entry_alumni(self):\n pass", "title": "" }, { "docid": "4bf2a345a4cc63233167e6b089fa075a", "score": "0.7066863", "text": "def test_api_can_update_entry(self):\n self.create_authenticated_entry()\n entry = Entry.objects.get()\n\n self.login()\n response = self.update_entry(entry.id)\n self.logout()\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "783217ffcdfbbf4a07d89701d3c40a40", "score": "0.70589787", "text": "def test_to_update_entry(self):\n tester = self.app.test_client(self)\n response = tester.put('/API/v1/entries/1', data=TestBase.update_entry,\n headers=self.access_header,\n content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "e0e15fcc65d94ad3715844cf41d0c871", "score": "0.70536184", "text": "def test_update_incident_entry(self):\n pass", "title": "" }, { "docid": "016943724a23da5ac8ea85fec7ce73d3", "score": "0.70002866", "text": "def test_update_event_contact_entry(self):\n pass", "title": "" }, { "docid": "cf710e7570ee534f41aadddf8b907605", "score": "0.6986772", "text": "def test_search_entry_membership(self):\n pass", "title": "" }, { "docid": "2606363c3f29b5cc6d14375bb057f364", "score": "0.69772655", "text": "def test_update_concern_entry(self):\n pass", "title": "" }, { "docid": "326f2cbf464e5335111c5dbcb6907bc4", "score": "0.6957985", "text": "def test_update_entry_event(self):\n pass", "title": "" }, { "docid": "5001871a3587772e79d01825d8dcd611", "score": "0.6924267", "text": "def test_update_entry_school(self):\n pass", "title": "" }, { "docid": "433af8c00f2700e127127841e4f26c3c", "score": "0.68796986", "text": "def test_update_entry_custom_field(self):\n pass", "title": "" }, { "docid": "190767cba670bac923d0f4ac17c0f46d", "score": "0.6847269", "text": "def test_update_entry_scholarship(self):\n pass", "title": "" }, { "docid": "73cc82b582ee3182cd66e895dd46f75a", "score": "0.6837821", "text": "def test_update_entry_family(self):\n pass", "title": "" }, { "docid": "41c2ed0d6ba78c7679fed1c8ac3d6575", "score": "0.6825599", "text": "def test_update_contribution_entry(self):\n pass", "title": "" }, { "docid": "c808004c25a9d734a1b5435bf7883373", "score": "0.67992204", "text": "def test_update_entry_address(self):\n pass", "title": "" }, { "docid": "326f836b631b1655bb85305f9da2f853", "score": "0.67796427", "text": "def test_update_account(self):\n pass", "title": "" }, { "docid": "81ef3d8e975e4e3e3e1abf7f5cec69e8", "score": "0.67780596", "text": "def test_update_incident_entry_sanction(self):\n pass", "title": "" }, { "docid": "9c41b7d3c39c3e04ef086d1a25400cd3", "score": "0.6758902", "text": "def test_update_entry_parcel(self):\n pass", "title": "" }, { "docid": "6b5f67c51fba3edff5b2988844fd1161", "score": "0.675328", "text": "def test_update_incident_entry_violation(self):\n pass", "title": "" }, { "docid": "4e800655f12f2d8ad5d20f0bf8a178cc", "score": "0.67324245", "text": "def test_update_incident_action_entry(self):\n pass", "title": "" }, { "docid": "4f62b8040b95a0f1588e0214aa2cfac2", "score": "0.6722867", "text": "def test_update_incident_entry_appeal(self):\n pass", "title": "" }, { "docid": "d26711b8b9b50c7b3a3348706585fd2e", "score": "0.6630314", "text": "def test_update_entry_alumni_status(self):\n pass", "title": "" }, { "docid": "af2a40a463e876f08a93cf7c5e950baa", "score": "0.6584624", "text": "def test_update_incident_entry_correspondence(self):\n pass", "title": "" }, { "docid": "09c5012600c187953b751b373b33d4ce", "score": "0.65495694", "text": "def test_function_update_member_record_returns_updated_record_with_username(self):\n properties_to_update = {\n \"username\": \"Yule Msee\",\n \"password\": \"ati-what?\"\n }\n expected_output = {\n \"status\": 200,\n \"data\" : {\n \"id\":1,\n # plus input member record attributes\n # and plus properties to update w/o password\n }\n }\n expected_output['data'].update(self.input_member_record)\n expected_output['data'].update({\"username\" : properties_to_update[\"username\"]})\n with self.app.test_request_context():\n # 1. create new membership_class record\n obj_membership_class = MembershipClass(class_name=self.input_member_record['class_name'], monthly_contrib_amount=1450.00)\n obj_membership_class.save()\n # 2. create new member record\n member_record = self.save_new_member(self.input_member_record)\n # 3. test the function\n output = self.update_member_record(properties_to_update, member_record['data']['public_id'])\n \n # make the assertions\n self.assertIn('status', output, 'status key missing in output')\n self.assertIn('data', output, 'data key is missing in output')\n self.assertIn('public_id', output['data'], 'public_id key missing in output.data')\n self.assertIn('username', output['data'], 'username key missing in output.data')\n\n self.assertEqual(output['status'], expected_output['status'], 'status returned Not status expected ')\n self.assertEqual(output['data']['first_name'], expected_output['data']['first_name'], 'first_name returned Not first_name supplied ')\n self.assertEqual(output['data']['middle_name'], expected_output['data']['middle_name'], 'middle_name supplied Not middle_name returned')\n self.assertEqual(output['data']['last_name'], expected_output['data']['last_name'], 'last_name returned Not last_name supplied')\n self.assertEqual(output['data']['email'], expected_output['data']['email'], 'email returned Not email supplied')\n self.assertEqual(output['data']['phone_number'], expected_output['data']['phone_number'], 'monthly_contrib_amount returned Not monthly_contrib_amount supplied')\n self.assertEqual(output['data']['class_name'], expected_output['data']['class_name'], 'class_name returned Not class_name supplied')\n self.assertEqual(output['data']['username'], expected_output['data']['username'], 'username returned Not username supplied')", "title": "" }, { "docid": "13c771fe7a99c715579aaa852c5feae8", "score": "0.6520161", "text": "def test_update_entry_meal(self):\n pass", "title": "" }, { "docid": "c8accfc29d9f82c690ddfdffc8e21a2e", "score": "0.6509985", "text": "def test_update_entry_application(self):\n pass", "title": "" }, { "docid": "d32cf09b13497e3691433f288c818aa6", "score": "0.65050185", "text": "def test_update(self):\n pass", "title": "" }, { "docid": "d32cf09b13497e3691433f288c818aa6", "score": "0.65050185", "text": "def test_update(self):\n pass", "title": "" }, { "docid": "da0c323cc0ae1cdc199ff243fc6a82ba", "score": "0.64895195", "text": "def test_update_entry_application_room_mate(self):\n pass", "title": "" }, { "docid": "29eed723f90719a0239d5285222b784a", "score": "0.6479994", "text": "def test_update_entry_sdas(self):\n pass", "title": "" }, { "docid": "a83a34c14fda88c3978c2ac1a195be3e", "score": "0.6384394", "text": "def test_update_entry_application_portal_section(self):\n pass", "title": "" }, { "docid": "26f969bba45aa5d889b178428c15a300", "score": "0.63759816", "text": "def test_update_profile_item(self):\n pass", "title": "" }, { "docid": "dbce2f3395ce1f47fdeccb9fa1f14b76", "score": "0.6363525", "text": "def test_update_program_entry(self):\n pass", "title": "" }, { "docid": "16323db507a535228024206dc18470f3", "score": "0.6353474", "text": "def test_function_update_member_record_returns_record_if_new_username_for_existing_record_set(self):\n \n existing_username = {\"username\":\"Niko\"}\n new_username = {\"username\":\"NikoNewMsee\"}\n expected_output = {\n \"status\": 200,\n \"data\" : {\n # plus input member record attributes\n # and plus the username\n }\n }\n expected_output['data'].update(self.input_member_record)\n expected_output['data'].update(new_username)\n with self.app.test_request_context():\n # 1. create new membership_class record\n obj_membership_class = MembershipClass(class_name=self.input_member_record['class_name'], monthly_contrib_amount=1450.00)\n obj_membership_class.save()\n # 2. create new member\n existing_member = self.save_new_member(self.input_member_record)\n # 3. update username for new member\n output_0 = self.update_member_record(existing_username, existing_member['data']['public_id'])\n # 4. update username for new member with new username\n output = self.update_member_record(new_username, existing_member['data']['public_id'])\n \n # Make the assertions\n assert type(output) == dict\n self.assertIn('status', output, 'status key missing in output')\n self.assertIn('data', output, 'data key is missing in output')\n self.assertIn('public_id', output['data'], 'public_id key missing in output.data')\n self.assertIn('username', output['data'], 'username key missing in output.data')\n\n self.assertEqual(output['status'], expected_output['status'], 'status returned Not status expected ')\n self.assertEqual(output['data']['first_name'], expected_output['data']['first_name'], 'first_name returned Not first_name supplied ')\n self.assertEqual(output['data']['middle_name'], expected_output['data']['middle_name'], 'middle_name supplied Not middle_name returned')\n self.assertEqual(output['data']['last_name'], expected_output['data']['last_name'], 'last_name returned Not last_name supplied')\n self.assertEqual(output['data']['email'], expected_output['data']['email'], 'email returned Not email supplied')\n self.assertEqual(output['data']['phone_number'], expected_output['data']['phone_number'], 'monthly_contrib_amount returned Not monthly_contrib_amount supplied')\n self.assertEqual(output['data']['class_name'], expected_output['data']['class_name'], 'class_name returned Not class_name supplied')\n self.assertEqual(output['data']['username'], expected_output['data']['username'], 'username returned Not username supplied')", "title": "" }, { "docid": "180918a9bd47a4d606756badfb998af1", "score": "0.63419366", "text": "def test_function_update_member_record_returns_record_if_email_for_existing_record(self):\n \n existing_email = {\"email\":self.input_member_record['email']}\n expected_output = {\n \"status\": 200,\n \"data\" : {\n # plus input member record attributes\n }\n }\n expected_output['data'].update(self.input_member_record)\n with self.app.test_request_context():\n # 1. create new membership_class record\n obj_membership_class = MembershipClass(class_name=self.input_member_record['class_name'], monthly_contrib_amount=1450.00)\n obj_membership_class.save()\n # 2. create new member\n existing_member = self.save_new_member(self.input_member_record)\n # 3. update email for new member with member's own existing email\n output = self.update_member_record(existing_email, existing_member['data']['public_id'])\n \n # Make the assertions\n assert type(output) == dict\n self.assertIn('status', output, 'status key missing in output')\n self.assertIn('data', output, 'data key is missing in output')\n self.assertIn('public_id', output['data'], 'public_id key missing in output.data')\n self.assertIn('username', output['data'], 'username key missing in output.data')\n\n self.assertEqual(output['status'], expected_output['status'], 'status returned Not status expected ')\n self.assertEqual(output['data']['first_name'], expected_output['data']['first_name'], 'first_name returned Not first_name supplied ')\n self.assertEqual(output['data']['middle_name'], expected_output['data']['middle_name'], 'middle_name supplied Not middle_name returned')\n self.assertEqual(output['data']['last_name'], expected_output['data']['last_name'], 'last_name returned Not last_name supplied')\n self.assertEqual(output['data']['email'], expected_output['data']['email'], 'email returned Not email supplied')\n self.assertEqual(output['data']['phone_number'], expected_output['data']['phone_number'], 'monthly_contrib_amount returned Not monthly_contrib_amount supplied')\n self.assertEqual(output['data']['class_name'], expected_output['data']['class_name'], 'class_name returned Not class_name supplied')", "title": "" }, { "docid": "d19b4b213e4dd782033f4117ae59a380", "score": "0.63313615", "text": "def test_update_concern(self):\n pass", "title": "" }, { "docid": "6993c2534d08a255b356be425a4dda23", "score": "0.63249975", "text": "def test_update_entry_schedule_transaction(self):\n pass", "title": "" }, { "docid": "2a7872530481b3d8867584ad3de1f24c", "score": "0.6303837", "text": "def test_update_permission_set(self):\n pass", "title": "" }, { "docid": "88e6dbc25530a053bc87c3eeda8493be", "score": "0.6300661", "text": "def test_update_group(self):\n pass", "title": "" }, { "docid": "3370458d088fbc83253ed59215889dfc", "score": "0.6279379", "text": "def test_function_update_member_record_returns_record_if_username_for_existing_record(self):\n \n existing_username = {\"username\":\"Niko\"}\n expected_output = {\n \"status\": 200,\n \"data\" : {\n # plus input member record attributes\n # and plus the username\n }\n }\n expected_output['data'].update(self.input_member_record)\n expected_output['data'].update(existing_username)\n with self.app.test_request_context():\n # 1. create new membership_class record\n obj_membership_class = MembershipClass(class_name=self.input_member_record['class_name'], monthly_contrib_amount=1450.00)\n obj_membership_class.save()\n # 2. create new member\n existing_member = self.save_new_member(self.input_member_record)\n # 3. update username for new member\n output_0 = self.update_member_record(existing_username, existing_member['data']['public_id'])\n # 4. update username for new member with same username\n output = self.update_member_record(existing_username, existing_member['data']['public_id'])\n \n # Make the assertions\n assert type(output) == dict\n self.assertIn('status', output, 'status key missing in output')\n self.assertIn('data', output, 'data key is missing in output')\n self.assertIn('public_id', output['data'], 'public_id key missing in output.data')\n self.assertIn('username', output['data'], 'username key missing in output.data')\n\n self.assertEqual(output['status'], expected_output['status'], 'status returned Not status expected ')\n self.assertEqual(output['data']['first_name'], expected_output['data']['first_name'], 'first_name returned Not first_name supplied ')\n self.assertEqual(output['data']['middle_name'], expected_output['data']['middle_name'], 'middle_name supplied Not middle_name returned')\n self.assertEqual(output['data']['last_name'], expected_output['data']['last_name'], 'last_name returned Not last_name supplied')\n self.assertEqual(output['data']['email'], expected_output['data']['email'], 'email returned Not email supplied')\n self.assertEqual(output['data']['phone_number'], expected_output['data']['phone_number'], 'monthly_contrib_amount returned Not monthly_contrib_amount supplied')\n self.assertEqual(output['data']['class_name'], expected_output['data']['class_name'], 'class_name returned Not class_name supplied')", "title": "" }, { "docid": "8d6ae01bc0054ea79e12d327e40d1ac2", "score": "0.6259937", "text": "def test_update_entry_note(self):\n pass", "title": "" }, { "docid": "c96f8efaab4ff430c899cddabbde7895", "score": "0.6254981", "text": "def test_update_contact(self):\n pass", "title": "" }, { "docid": "139a64fabd12a1f600a638279c6bf9fd", "score": "0.6252445", "text": "def test_update_entry_electronic_identity(self):\n pass", "title": "" }, { "docid": "b72c71b13ff63945a021b18f8dfcf141", "score": "0.6241784", "text": "def test_update_instance(self):\n pass", "title": "" }, { "docid": "785e8fb3ac23d9ab998fde90bf532077", "score": "0.62298536", "text": "def test_update_collection(self):\n pass", "title": "" }, { "docid": "10b81e1d2668e587cbfec5bf572d785c", "score": "0.6225732", "text": "def test_updating_group_can_update_members(self):\n self.client.force_login(self.user)\n\n third = get_user_model().objects.create_user(username='third', email='third@example.com')\n\n response = self.client.post(\n f'/users/{self.user.username}/groups/',\n data=json.dumps({\n 'name': 'Some Group',\n 'members': [self.other.username]\n }),\n content_type=\"application/json\"\n )\n group_id = response.json()['id']\n\n response = self.client.put(\n f'/users/{self.user.username}/groups/{group_id}/',\n data=json.dumps({\n 'name': 'Some Group',\n 'members': [third.username]\n }),\n content_type=\"application/json\"\n )\n\n body = response.json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(body['members'], [{'username': third.username}])", "title": "" }, { "docid": "fe532e15a513c7a42fff07c754e00aa3", "score": "0.6223809", "text": "def test_update_1(self):", "title": "" }, { "docid": "7548644df1c6f9fc7fb722afd14146cb", "score": "0.6201462", "text": "def test_update_catering(self):\n pass", "title": "" }, { "docid": "9254abf6b3a363431a1975e279b8c1d5", "score": "0.6199807", "text": "def test_update_lookup(self):\n pass", "title": "" }, { "docid": "29d5ca13ed16358e775e50723ae040a0", "score": "0.61893064", "text": "def test_function_update_member_record_returns_error_if_email_not_unique(self):\n \n new_member_info = {\n \"first_name\": \"LaKwanza\",\n \"middle_name\": \"ZaKati\",\n \"last_name\": \"Mwishowe\",\n \"email\": \"newmember@domain.com\",\n \"phone_number\": \"0700987654\",\n \"class_name\": \"Test Class A B C\"\n }\n existing_email = {\"email\":self.input_member_record['email']}\n expected_output = {\n \"status\": 400,\n \"error\": \"Cannot set email: '{}' already exists.\".format(existing_email['email'])\n }\n with self.app.test_request_context():\n # 1. create new membership_class record\n obj_membership_class = MembershipClass(class_name=self.input_member_record['class_name'], monthly_contrib_amount=1450.00)\n obj_membership_class.save()\n # 2. create new member 1\n existing_member = self.save_new_member(self.input_member_record)\n # 3. create second new member\n new_member = self.save_new_member(new_member_info)\n # 4. test the update function - set email of new member = existing member's email\n output = self.update_member_record({\"email\":existing_member['data']['email']}, new_member['data']['public_id'])\n\n # Make the assertions\n assert type(output) == dict\n self.assertIn('status', output, \"status_code key is missing!\")\n self.assertIn('error', output, \"error key is missing!\")\n \n self.assertNotEqual(output['status'], \"\", \"No status_code information provided\")\n self.assertNotEqual(output['error'], \"\", \"No error information provided\")\n\n self.assertIsInstance(output['error'], str, \"Error info not string data\")\n \n self.assertEqual(output['status'], expected_output['status'], \"Output received does not match expected\")\n self.assertEqual(output['error'], expected_output['error'], \"Output received does not match expected\")", "title": "" }, { "docid": "8cf324f2839f116fe4282b51bf8724bf", "score": "0.6171484", "text": "def test_update_roommate_group_request(self):\n pass", "title": "" }, { "docid": "73a4b79340f11723213e5039d5f8de02", "score": "0.6159024", "text": "def test_update_housekeeping(self):\n pass", "title": "" }, { "docid": "345b9c68e26b666e061d846f1bd00b87", "score": "0.6156566", "text": "def test_user_update(self):\n announcement = Announcement.objects.create(\n title=self.title,\n content=self.content,\n creator=self.staff,\n site_wide=False\n )\n new_title = \"Bigger Announcement\"\n post_args = dict(\n title=new_title,\n )\n url = reverse(self.update_urlname, kwargs=dict(pk=announcement.pk))\n request = self.factory.post(url)\n request.user = self.user\n request.POST = post_args\n response = AnnouncementUpdateView.as_view()(request, pk=announcement.pk)\n self.assertRedirectsToLogin(response, url)", "title": "" }, { "docid": "f753f3bdca8192864902e9df67891a7a", "score": "0.61478084", "text": "def test_update_incident_entry_note(self):\n pass", "title": "" }, { "docid": "ef9f66e1a56f53e25c1cc8ec6eb51600", "score": "0.61326194", "text": "def test_unit_update(self):\n pass", "title": "" }, { "docid": "6a32791ededeb3f965e0286e260ed6a1", "score": "0.6130556", "text": "def test_function_update_member_record_returns_error_if_username_not_unique(self):\n \n new_member = {\n \"first_name\": \"LaKwanza\",\n \"middle_name\": \"ZaKati\",\n \"last_name\": \"Mwishowe\",\n \"email\": \"newmember@domain.com\",\n \"phone_number\": \"0700987654\",\n \"class_name\": \"Test Class A B C\"\n }\n existing_username = {\"username\":\"Niko\"}\n expected_output = {\n \"status\": 400,\n \"error\": \"Cannot set username: '{}' already exists.\".format(existing_username['username'])\n }\n with self.app.test_request_context():\n # 1. create new membership_class record\n obj_membership_class = MembershipClass(class_name=self.input_member_record['class_name'], monthly_contrib_amount=1450.00)\n obj_membership_class.save()\n # 2. create new member 1\n existing_member = self.save_new_member(self.input_member_record)\n # 3. set username for new member\n output = self.update_member_record(existing_username, existing_member['data']['public_id'])\n # 4. create second new member\n new_member = self.save_new_member(new_member)\n # 5. test the update function - set username of new member 2 = new member 1's username\n output = self.update_member_record(existing_username, new_member['data']['public_id'])\n\n # Make the assertions\n assert type(output) == dict\n self.assertIn('status', output, \"status_code key is missing!\")\n self.assertIn('error', output, \"error key is missing!\")\n \n self.assertNotEqual(output['status'], \"\", \"No status_code information provided\")\n self.assertNotEqual(output['error'], \"\", \"No error information provided\")\n\n self.assertIsInstance(output['error'], str, \"Error info not string data\")\n \n self.assertEqual(output['status'], expected_output['status'], \"Output received does not match expected\")\n self.assertEqual(output['error'], expected_output['error'], \"Output received does not match expected\")", "title": "" }, { "docid": "bc44f1d06df21a8a029111903a784826", "score": "0.6123192", "text": "def test_update_log_add_in(self):\n pass", "title": "" }, { "docid": "4896e26f583064bcb2168acae3000f22", "score": "0.6122115", "text": "def test_update_roommate_group(self):\n pass", "title": "" }, { "docid": "81a04ee463970826c37669a926ab32b0", "score": "0.6119605", "text": "def test_update_entry_application_custom_field(self):\n pass", "title": "" }, { "docid": "3787d8d7acc84978e9412f68fd50c5ad", "score": "0.61129117", "text": "def test_cases_update(self):\n pass", "title": "" }, { "docid": "8a95efe75b7e8296cd1ad86ba154af76", "score": "0.6104554", "text": "def test_update_access_resource(self):\n pass", "title": "" }, { "docid": "5e3fab11476cda819ddbfbb0a17c4a58", "score": "0.60886747", "text": "def test_update_team(self):\n pass", "title": "" }, { "docid": "ec5ec12738fe8cf294d4c539bed5acd1", "score": "0.60757256", "text": "def test_to_update_invalid_entry(self):\n tester = self.app.test_client(self)\n response = tester.put('/API/v1/entries/11111', data=TestBase.update_entry,\n headers=self.access_header,\n content_type=\"application/json\")\n self.assertEqual(response.status_code, 404)\n self.assertIn(b\"The Page cannot be found\", response.data)", "title": "" }, { "docid": "a9591a73fdbd03f767fb29be29f601c2", "score": "0.6073524", "text": "def test_update_function_booking_attendee(self):\n pass", "title": "" }, { "docid": "0266b7dedeaef08c3fa653044ba27b05", "score": "0.6070361", "text": "def test_update_process(self):\n pass", "title": "" }, { "docid": "bcfb4154adfea0222011ca4066c668ef", "score": "0.6057843", "text": "def test_moderations_partial_update(self):\n pass", "title": "" }, { "docid": "e8689966e13b79f343fae4fc2476fd48", "score": "0.6045094", "text": "def test_update_result(self):\n pass", "title": "" }, { "docid": "8c29fe841cac75477ebdf6ea3d9bd047", "score": "0.6031739", "text": "def test_api_team_membership_list_view_put(self):\n results = self.client.post(\"/teams/team/\", {'name': \"test1\"})\n create_url = str(results)\n create_url_split_array = create_url.split(\"/\")\n url_object_number = create_url_split_array[4]\n response = self.client.put(\"/teams/api/team/\" + url_object_number +\n \"/membership/\")\n self.assertContains(response, \"id\", 0, 405)", "title": "" }, { "docid": "f41473b84601eae436b76f51e863dcf4", "score": "0.6015919", "text": "def test_update_entry_position(self):\n pass", "title": "" }, { "docid": "2c4e5a0c29b29866cab4f4cc96449fdb", "score": "0.6015651", "text": "def test_update_role(self):\n pass", "title": "" }, { "docid": "de449f37c9162c8f78bff481c2a1c946", "score": "0.5999903", "text": "def test_update_incident(self):\n pass", "title": "" }, { "docid": "e6c991e0a98d98ca6490509f8cfa7f76", "score": "0.59976757", "text": "def test_moderations_update(self):\n pass", "title": "" }, { "docid": "97d55225abd8b8fd6c43b3713d719877", "score": "0.59970623", "text": "def test_update_auth_user(self):\n pass", "title": "" }, { "docid": "ff142dfcfa7973744b689c5cd649ffed", "score": "0.59894776", "text": "def test_update_account(self):\n account = core_models.User.objects.get(username=\"user@test.com\")\n url = reverse(\"v1:account-detail\", args=[account.pk])\n data = {\n \"username\": \"fromapi@test.com\",\n \"role\": account.role,\n \"password\": \"Toto1234\",\n \"mailbox\": {\n \"full_address\": \"fromapi@test.com\",\n \"quota\": account.mailbox.quota\n }\n }\n response = self.client.put(url, data, format=\"json\")\n self.assertEqual(response.status_code, 200)\n account.refresh_from_db()\n account.mailbox.refresh_from_db()\n self.assertEqual(account.email, account.mailbox.full_address)\n self.assertTrue(account.check_password(\"Toto1234\"))\n\n del data[\"password\"]\n response = self.client.put(url, data, format=\"json\")\n self.assertEqual(response.status_code, 200)\n account.refresh_from_db()\n self.assertTrue(account.check_password(\"Toto1234\"))", "title": "" }, { "docid": "6645c61a816cd9c24b43a18b413f62e6", "score": "0.5974461", "text": "def test_update_entry_meal_plan_detail(self):\n pass", "title": "" }, { "docid": "3a00543a8114263f2ba11e642b47fdda", "score": "0.59737664", "text": "def test_update_entry_application_proxy(self):\n pass", "title": "" }, { "docid": "4ab87a12cf3eb0f083a1056698090acc", "score": "0.59732395", "text": "def test_update_incident_action(self):\n pass", "title": "" }, { "docid": "b98ad5b6f77fdbbe2fa45c3eb0c52fd5", "score": "0.596913", "text": "def test_update_auth_group(self):\n pass", "title": "" }, { "docid": "0d4c7a1d3656306b9f419b6812acc699", "score": "0.5957788", "text": "def test_cases_partial_update(self):\n pass", "title": "" }, { "docid": "190795409a46d18ab8dda645f81079ab", "score": "0.59506047", "text": "def test_update_group_custom_field(self):\n pass", "title": "" }, { "docid": "866618fa9d51eabdf02d47323ace209f", "score": "0.5940877", "text": "def test_update_providers_ldap_by_id(self):\n pass", "title": "" }, { "docid": "f3003067288317c55e10c013f8cb057d", "score": "0.59401137", "text": "def test_update_portal_step(self):\n pass", "title": "" }, { "docid": "0df9a782f1eca75987d8cc77e8fd7b07", "score": "0.59349877", "text": "def test_update_contribution(self):\n pass", "title": "" }, { "docid": "978352b5cfd3e6fed6fbaccb13995f1f", "score": "0.59269863", "text": "def test_update_resource(self):\n pass", "title": "" }, { "docid": "67291aab8664bd1c06aeae7c0a4c7fb1", "score": "0.59210765", "text": "def test_update_catering_item(self):\n pass", "title": "" }, { "docid": "ae77e08161feded18e9928d94bd954cf", "score": "0.5909698", "text": "def test_update_mapping_users_rules(self):\n pass", "title": "" }, { "docid": "d35581219ec15a0fec49a7811577b088", "score": "0.59091175", "text": "def test_update_transaction(self):\n pass", "title": "" }, { "docid": "c55e1eb4df85a7f5e7a9780e37e5f18d", "score": "0.58928734", "text": "def test_user_can_update_their_personnal_info(self):\n self.login_the_user()\n self.go_to_url_name('updateUserInfoPage')\n self.write_in_id('id_username_update', 'testusername3')\n self.write_in_id('id_first_name_update', 'testfirstname3')\n self.write_in_id('id_email_update', 'testusername3@mail.com')\n self.click_on_id('button-user-info-update-submit')\n confirmation_messages = self.get_html_in('messages')\n self.assertTrue(\n \"Votre nom d'utilisateur a été modifié\" in confirmation_messages\n and \"Votre prénom a été modifié\" in confirmation_messages\n and \"Votre adresse email a été modifié\" in confirmation_messages\n )", "title": "" }, { "docid": "d1954c6e3cfdb021469f0558e392c65f", "score": "0.5892307", "text": "def test_update_workflow_permission(self):\n pass", "title": "" } ]
abe9377762dc8ca7dae12be903171755
Download bilingual file and parse it as [models.MxliffUnit]
[ { "docid": "6567f5aabc1fc5107c73953c4230f147", "score": "0.5796927", "text": "def get_bilingual_as_mxliff_units(\n self,\n project_id: int,\n job_uids: List[str]\n ) -> models.MxliffUnit:\n return mxliff.MxliffParser().parse(self.get_bilingual_file_xml(project_id, job_uids))", "title": "" } ]
[ { "docid": "e408405ca2df7c6e47a2b616631da004", "score": "0.6113033", "text": "def retrieve_text(file_name):\n\n # Initialization\n he_rev_id = []\n en_rev_id = []\n he_page_name = []\n en_page_name = []\n # translated = []\n\n with open(file_name, 'r') as info:\n info.readline()\n for line in info.readlines():\n clean_line = line.split()\n he_rev_id.append(int(clean_line[1]))\n he_page_name.append(clean_line[0])\n en_rev_id.append(int(clean_line[3]))\n en_page_name.append(clean_line[2])\n # translated.append(clean_line[4])\n\n # English revisions\n en_site = pywikibot.Site(\"en\", \"wikipedia\") # The site we want to run our bot on\n en_rev_text = [pywikibot.Page(en_site, en_page_name[i]).getOldVersion(en_rev_id[i])\n for i in range(0, len(en_rev_id))]\n\n # Hebrew revisions\n he_site = pywikibot.Site(\"he\", \"wikipedia\") # The site we want to run our bot on\n # he_rev_text = [page.getOldVersion(he_id) for he_id in small_he_rev_id]\n he_rev_text = [pywikibot.Page(he_site, he_page_name[i]).getOldVersion(he_rev_id[i])\n for i in range(0, len(he_rev_id))]\n\n # print(en_rev_text[4])\n # print(he_rev_text[4])\n output = open('en_he.pkl', 'wb')\n pickle.dump((he_rev_text, en_rev_text), output)\n output.close()", "title": "" }, { "docid": "c3bdd47f8a89d05672da069b829f2dd1", "score": "0.5578788", "text": "def loadUnit( filename ):\n filename += '.lre'\n with open( filename, 'r' ) as file:\n unit = pickle.load( file )\n return unit", "title": "" }, { "docid": "0550af80771ece192cb78f91a16b3e31", "score": "0.5474136", "text": "def loadFileURL(self, url=\"http://rapdb.dna.affrc.go.jp/download/archive/RAP-MSU_2017-04-14.txt.gz\"):\n\n self.url = url\n\n # Fetch the file by the url and decompress it\n r = requests.get(self.url)\n\n # Create the file .txt\n with open(self.pathToFile, \"wb\") as f:\n f.write(r.content)\n f.close()", "title": "" }, { "docid": "d62526f87e4b1d3100859ab43eb94c79", "score": "0.5453151", "text": "def download_files(self, word, base, ruby, split):\n self.downloads_list = []\n if split:\n # Avoid double downloads\n return\n self.set_names(word, base, ruby)\n if not self.language.lower().startswith('sv'):\n return\n if not word:\n return\n # Replace special characters with ISO-8859-1 oct codes\n m_word = munge_word(word)\n extras = dict(Source=\"Lexin\")\n self.maybe_get_icon()\n audio_url = self.url + m_word + self.file_extension\n word_data = self.get_data_from_url(audio_url)\n word_file_path, word_file_name = self.get_file_name()\n with open(word_file_path, 'wb') as word_file:\n word_file.write(word_data)\n self.downloads_list.append(\n (word_file_path, word_file_name, extras))", "title": "" }, { "docid": "bce10934f07f99776fd38e91eff2129e", "score": "0.53691167", "text": "def loadFileURL(self):\n\n # URL of the web page to analyze\n html_page = requests.get(\"http://rapdb.dna.affrc.go.jp/download/irgsp1.html\")\n soup = BeautifulSoup(html_page.content, \"lxml\")\n\n # Find links\n for link in soup.findAll('a'):\n linkfound = link.get('href')\n\n # Choose the file which begun by RAP-MSU\n if (\"./archive/RAP-MSU_\" in linkfound):\n # Format the URL\n self.url = \"http://rapdb.dna.affrc.go.jp/download\" + linkfound[1:]\n\n #Give the name of the file with the extension .gz\n filename = self.url.split(\"/\")[-1]\n\n # Give the name of the file without .gz\n uncompressName = filename[:-3]\n\n # Fetch the file by the url and decompress it\n r = requests.get(self.url)\n decompressedFile = gzip.decompress(r.content)\n\n # Create the file .txt\n with open(uncompressName, \"wb\") as f:\n f.write(decompressedFile)\n f.close()\n\n # Store the name of the created file\n self.nameFile = uncompressName\n\n print(\"File created\")", "title": "" }, { "docid": "69346e6e7329b027462d19bfb7ee3b38", "score": "0.5363386", "text": "def download(lang=\"en\", use_cached=True):\n # remove oold localfile\n old_local_file = os.path.join(CACHE_FOLDER, \"wikidata-latest-all.json.bz2\")\n if os.path.exists(old_local_file):\n os.remove(old_local_file);\n \n # remote_file = 'https://dumps.wikimedia.org/wikidatawiki/entities/latest-all.json.bz2'\n # latest-all.json.bz2 - is not stable link. sometime locate to absent file. 404 - error\n remote_file = 'https://dumps.wikimedia.org/wikidatawiki/entities/20190701/wikidata-20190701-all.json.bz2.not'\n\n create_storage(CACHE_FOLDER)\n local_file = os.path.join(CACHE_FOLDER, \"wikidata-20190701-all.json.bz2\")\n\n # check cache\n if use_cached and os.path.exists(local_file):\n return local_file\n\n # download\n log.info(\"Downloading (%s)....\", remote_file)\n if downloader.download_with_resume(remote_file, local_file):\n log.info(\"Downloaded... [ OK ]\")\n else:\n log.error(\"Downloading... [ FAIL ]\")\n raise Exception(\"Downloading... [ FAIL ]\")\n\n return local_file", "title": "" }, { "docid": "38871c81cfbee20fe721a94786af4eec", "score": "0.52317595", "text": "def download():", "title": "" }, { "docid": "4e0949e928be0c70a8663de4d7071195", "score": "0.5227431", "text": "def do_GET_TRANS(self):\n try:\n self.do_HEAD(headers={'Content-type': 'text/plain; charset=utf-8'})\n text = urllib.unquote(self.path.split(\"/translate/\")[-1].decode('utf-8'))\n proxy = self.get_xmlrpc_proxy()\n response = proxy.translate({'text' : text})\n self.wfile.write(response['text'].encode('utf-8'))\n except Exception as e:\n print(e)", "title": "" }, { "docid": "aa48b64c4fcd7219c55d73cb0c129616", "score": "0.5192178", "text": "def download_data():\r\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\r\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\r\n zfile = zipfile.ZipFile('ml-latest-small.zip')\r\n zfile.extractall()\r\n zfile.close()", "title": "" }, { "docid": "f3816f5d18c29c9f3b84006f43fe024b", "score": "0.51792496", "text": "def test_function2():\n filename = os.path.join(os.getcwd(), \n r\"MusicXML_files/Antonio Carlos Jobim - The Girl From Ipanema.mxl\")\n m = MusicXMLExtractor(filename)\n m.read_xml_from_zip()\n print(m.parse_melody_with_harmony())", "title": "" }, { "docid": "603de25b09ee7b7d02af13e4841313d0", "score": "0.51595587", "text": "def _download_model(self, lang_code, version):\n \n model_name = '{}-{}'.format(lang_code, version)\n model_path_cloud = os.path.join(self.cloud_path, '{}.zip'.format(model_name))\n model_path_local = os.path.join(self.disk_path, '{}.zip'.format(model_name))\n \n # Download and extract models for provided language. \n self._download_and_extract_lang_model(model_path_cloud, model_path_local) \n self.metadata.read(os.path.join(self.disk_path,lang_code+\"-\"+str(version),\"metadata.json\"))\n \n # Download Facebook embeddings based on the metadata read from the model\n self._download_embeddings(self.metadata.embeddings_remote_link, self.metadata.embeddings_file_name)\n sys.stdout.write(\"\\n\")", "title": "" }, { "docid": "f09695fcd95d3357125a51340776f15c", "score": "0.5153687", "text": "def load_results(filename):\n with bz2.open(filename, \"rb\") as fp:\n save_object = pickle.load(fp)\n\n # Rehydrate the Ignis/external models\n model_type = save_object[\"model_type\"]\n save_model = save_object[\"save_model\"]\n external_model_bytes = save_object[\"external_model_bytes\"]\n\n if model_type[:3] == \"tp_\":\n # Tomotopy model\n external_model = _load_tomotopy_model(model_type, external_model_bytes)\n else:\n raise ValueError(f\"Unknown model type: '{model_type}'\")\n\n save_model.model = external_model\n\n # Rehydrate the Aurum object\n aurum = Aurum(save_model)\n\n aurum.vis_type = save_object[\"vis_type\"]\n aurum.vis_options = save_object[\"vis_options\"]\n aurum.vis_data = save_object[\"vis_data\"]\n\n return aurum", "title": "" }, { "docid": "5db3b7427929af334ea54fb901d6589b", "score": "0.5132078", "text": "def manage_export_xliff(self, export_all, language, REQUEST, RESPONSE):\n fname = ('%s_%s.xlf'\n % (self.get_message_catalog()._default_language, language))\n # Generate the XLIFF file header\n RESPONSE.setHeader('Content-Type', 'text/xml; charset=UTF-8')\n RESPONSE.setHeader('Content-Disposition',\n 'attachment; filename=\"%s\"' % fname)\n export_tool = self.get_importexport_tool()\n return export_tool.export_xliff(language,\n export_all=bool(int(export_all)))", "title": "" }, { "docid": "74c6283a6da7fe0f98c9a93c63a9b755", "score": "0.51019067", "text": "def import_wiki_file(store, filename='wiki', bagname='wiki'):\n wikifile = codecs.open(filename, encoding='utf-8', errors='replace')\n wikitext = wikifile.read()\n wikifile.close()\n return import_wiki(store, wikitext, bagname)", "title": "" }, { "docid": "05cabcd6b7bca2f6cebb4fed03822d2e", "score": "0.5100825", "text": "def read_noaa_mbl_url(noaa_mbl_url, dest):\n import re\n\n from pathlib import Path\n\n import numpy as np\n import pandas as pd\n import pooch\n\n # save to temporary location with pooch\n print(\n f\"[SeaFlux] Downloading {noaa_mbl_url} to {dest} and reading in as pd.DataFrame\"\n )\n\n dest = Path(dest)\n fname = pooch.retrieve(\n url=noaa_mbl_url,\n known_hash=None,\n path=str(dest.parent),\n fname=str(dest.name),\n )\n\n # find start line\n is_mbl_surface = False\n for start_line, line in enumerate(open(fname)):\n if re.findall(\"MBL.*SURFACE\", line):\n is_mbl_surface = True\n if not line.startswith(\"#\"):\n break\n if not is_mbl_surface:\n raise Exception(\n \"The file at the provided url is not an MBL SURFACE file. \"\n \"Please check that you have provided the surface url. \"\n )\n\n # read fixed width file CO2\n df = pd.read_fwf(fname, skiprows=start_line, header=None, index_col=0)\n df.index.name = \"date\"\n # every second line is uncertainty\n df = df.iloc[:, ::2]\n # latitude is given as sin(lat)\n df.columns = np.rad2deg(np.arcsin(np.linspace(-1, 1, 41)))\n\n # resolve time properly\n year = (df.index.values - (df.index.values % 1)).astype(int)\n day_of_year = ((df.index.values - year) * 365 + 1).astype(int)\n date_strings = [\"{}-{:03d}\".format(*a) for a in zip(year, day_of_year)]\n date = pd.to_datetime(date_strings, format=\"%Y-%j\")\n df = df.set_index(date)\n df = df.iloc[:-1] # remove the last value that is for 2020-01-01\n\n # renaming indexes (have to stack for that)\n df = df.stack()\n index = df.index.set_names([\"time\", \"lat\"])\n df = df.set_axis(index)\n\n df.source = noaa_mbl_url\n\n return df", "title": "" }, { "docid": "286eb86bdeaa9a2eeaf50b574f2f61ac", "score": "0.5094112", "text": "def get_bilingual_file(\n self,\n project_id: int,\n job_uids: List[int],\n dest_file_path: str\n ) -> None:\n with open(dest_file_path, \"wb\") as f:\n for chunk in self._get_bilingual_stream(project_id, job_uids):\n f.write(chunk)", "title": "" }, { "docid": "279e799e0e8f37a072a0d56af5afe1f7", "score": "0.5079618", "text": "def fetch_glove() -> None:\n if not os.path.exists(\"res/glove.6B.50D.txt\"):\n if not os.path.exists(\"res/glove.zip\"):\n urllib.request.urlretrieve(\"http://nlp.stanford.edu/data/glove.6B.zip\", \"res/glove.zip\", _reporthook)\n\n with zipfile.ZipFile(\"res/glove.zip\") as zip_file:\n for member in zip_file.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n\n # copy file (taken from zipfile's extract)\n if member.endswith('6B.50d.txt'):\n source = zip_file.open(member)\n target = open(os.path.join(\"res\", filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)", "title": "" }, { "docid": "ab62c53cdd786a450147914cac07caa0", "score": "0.5052364", "text": "def download_html(target_directory: str, celex: str):\n celex_part = urllib.parse.quote_plus(celex)\n print(f\"Downloading HTML for celex {celex}\")\n file_name = celex.replace(\"/\", \"_\")\n file_name = file_name.replace(\"(\", \"_\")\n file_name = file_name.replace(\")\", \"_\")\n url = f\"http://publications.europa.eu/resource/celex/{celex_part}\"\n content_types = {\n \"text/html\": \".html\",\n \"application/xhtml+xml\": \".xhtml\",\n \"application/xml;type=fmx4\": \".xml\"\n }\n for content_type in content_types.keys():\n extension = content_types[content_type]\n # Generating file name\n file_path = path.join(target_directory, f\"{file_name}_full_text{extension}\")\n if path.exists(file_path):\n print(f\"File exists, skipping\")\n return\n # Building headers\n headers = {\n \"Accept\": content_type,\n \"Accept-Language\": \"en\"\n }\n response = requests.get(url, headers=headers)\n # If the content exists, it's ok\n if response.status_code == 200:\n with open(file_path, \"w\", encoding=\"utf-8\") as target:\n target.write(response.text)\n return\n elif response.status_code == 404:\n # Nothing to do\n print(f\"No content for {celex}\")\n elif response.status_code == 406:\n # No content of the request type\n print(f\"No content of {content_type} type for celex {celex}\")\n else:\n print(f\"Other status {response.status_code}\")", "title": "" }, { "docid": "4512c5912664013894176d2d1c51a6ab", "score": "0.5050754", "text": "def load(self, path, *args, **kwargs):\n\n\n self.MONTHMAP={\"January\":1, \"januari\": 1, \"February\":2, \"februari\":2,\"March\":3,\"maart\":3,\n \"April\":4, \"april\":4, \"mei\":5, \"May\":5, \"June\":6,\"juni\":6, \"July\":7, \"juli\":7,\n \"augustus\": 8, \"August\":8,\"september\":9,\"September\":9, \"oktober\":10,\"October\":10,\n \"November\":11,\"november\":11,\"December\":12,\"december\":12}\n self.SOURCENAMEMAP={'ad/algemeen dagblad (print)':'ad (print)',\n 'de telegraaf (print)': 'telegraaf (print)',\n 'de volkskrant (print)': 'volkskrant (print)',\n 'nrc handelsblad (print)': 'nrc (print)'}\n\n forced_encoding = kwargs.pop('encoding',False)\n exists = os.path.exists(path)\n if not exists:\n logger.warning(\"Unable to open {path} : DOES NOT EXIST\".format(path=path))\n else:\n is_dir = os.path.isdir(path)\n if not is_dir:\n list_of_files = glob(path)\n else:\n if path[-1]!='/': path+='/'\n list_of_files = glob(path + \"*.txt\") + glob(path + \"*.TXT\")\n if len(list_of_files) == 0:\n logger.error('There are no files to be process. Please use a valid, non-empty directory')\n article = 0\n for item in list_of_files:\n logger.info(\"Now processing file {}\".format(item))\n if not forced_encoding:\n encoding = _detect_encoding(item)\n else:\n encoding = forced_encoding\n with open(item, \"r\", encoding=encoding, errors=\"replace\") as f:\n if _detect_has_header(item, encoding):\n for skiplines in range(22):\n next(f)\n i = 0\n for line in f:\n i = i + 1\n line = line.replace(\"\\r\", \" \")\n if line == \"\\n\":\n continue\n matchObj = re.match(r\"\\s+(\\d+) of (\\d+) DOCUMENTS\", line) # beginning of a new article\n # dealing with different date notations\n matchObj2 = re.match(r\"\\s+(\\d{1,2}) (januari|februari|maart|april|mei|juni|juli|augustus|september|oktober|november|december) (\\d{4}) (maandag|dinsdag|woensdag|donderdag|vrijdag|zaterdag|zondag)\", line)\n matchObj2a = re.match(r\"\\s+(\\d{1,2}) ([jJ]anuari|[fF]ebruari|[mM]aart|[aA]pril|[mM]ei|[jJ]uni|[jJ]uli|[aA]ugustus|[sS]eptember|[Oo]ktober|[nN]ovember|[dD]ecember) (\\d{4}).*\", line)\n matchObj3 = re.match(r\"\\s+(January|February|March|April|May|June|July|August|September|October|November|December) (\\d{1,2}),? (\\d{4})\", line)\n matchObj4 = re.match(r\"\\s+(\\d{1,2}) (January|February|March|April|May|June|July|August|September|October|November|December) (\\d{4}) (Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)\", line)\n matchObj4a = re.match(r\"\\s+(\\d{1,2}) (January|February|March|April|May|June|July|August|September|October|November|December) (\\d{4}).*\", line)\n if matchObj:\n # new article starts\n if article > 0:\n # yield article, but not before we processed the first one\n formattedsource = \"{} (print)\".format(journal2.lower())\n formattedsource = self.SOURCENAMEMAP.get(formattedsource, formattedsource) # rename source if necessary\n # minimal fields to be returned. These really need to be present\n try:\n art = {\n \"title\":title.strip(),\n \"doctype\": formattedsource,\n \"text\":text,\n \"publication_date\":datetime.datetime(int(pubdate_year),int(pubdate_month),int(pubdate_day)),\n \"suspicious\":check_suspicious(text)\n }\n except Exception as e:\n logger.error('Error processing article number {}. Yielding an empty article'.format(article))\n # add fields where it is okay if they are absent\n if len(section)>0:\n art[\"category\"] = section.lower()\n if len(byline)>0:\n art[\"byline\"] = byline\n \n yield art\n\n article += 1\n if article%50 == 0:\n logger.info('{} articles processed so far'.format(article))\n # logger.info('Now processing article {}'.format(article))\n\n istitle=True #to make sure that text before mentioning of SECTION is regarded as title, not as body\n firstdate=True # flag to make sure that only the first time a date is mentioned it is regarded as _the_ date\n text = \"\"\n title = \"\"\n byline = \"\"\n section = \"\"\n length = \"\"\n loaddate = \"\"\n language = \"\"\n pubtype = \"\"\n journal = \"\"\n journal2=\"\"\n pubdate_day = \"\"\n pubdate_month = \"\"\n pubdate_year = \"\"\n pubdate_dayofweek = \"\"\n suspicious=True\n \n while True:\n nextline=next(f)\n if nextline.strip()!=\"\":\n journal2=nextline.strip()\n break\n continue\n \n if line.startswith(\"BYLINE\"):\n byline = line.replace(\"BYLINE: \", \"\").rstrip(\"\\n\")\n elif line.startswith(\"SECTION\"):\n istitle=False # everything that follows will be main text rather than title if no other keyword is mentioned\n section = line.replace(\"SECTION: \", \"\").rstrip(\"\\n\")\n elif line.startswith(\"LENGTH\"):\n length = line.replace(\"LENGTH: \", \"\").rstrip(\"\\n\").rstrip(\" woorden\")\n elif line.startswith(\"LOAD-DATE\"):\n loaddate = line.replace(\"LOAD-DATE: \", \"\").rstrip(\"\\n\")\n elif matchObj2 and firstdate==True:\n # print matchObj2.string\n pubdate_day=matchObj2.group(1)\n pubdate_month=str(self.MONTHMAP[matchObj2.group(2)])\n pubdate_year=matchObj2.group(3)\n pubdate_dayofweek=matchObj2.group(4)\n firstdate=False\n elif matchObj2a and firstdate==True:\n # print matchObj2.string\n pubdate_day=matchObj2a.group(1)\n pubdate_month=str(self.MONTHMAP[matchObj2a.group(2).lower()])\n pubdate_year=matchObj2a.group(3)\n firstdate=False\n elif matchObj3 and firstdate==True:\n pubdate_day=matchObj3.group(2)\n pubdate_month=str(self.MONTHMAP[matchObj3.group(1)])\n pubdate_year=matchObj3.group(3)\n pubdate_dayofweek=\"NA\"\n firstdate=False\n elif matchObj4 and firstdate==True:\n pubdate_day=matchObj4.group(1)\n pubdate_month=str(self.MONTHMAP[matchObj4.group(2)])\n pubdate_year=matchObj4.group(3)\n pubdate_dayofweek=matchObj4.group(4)\n firstdate=False\n elif matchObj4a and firstdate==True:\n pubdate_day=matchObj4a.group(1)\n pubdate_month=str(self.MONTHMAP[matchObj4a.group(2)])\n pubdate_year=matchObj4a.group(3)\n firstdate=False\n\n elif (matchObj2 or matchObj2a or matchObj3 or matchObj4 or matchObj4a) and firstdate==False:\n # if there is a line starting with a date later in the article, treat it as normal text\n text = text + \" \" + line.rstrip(\"\\n\")\n elif line.startswith(\"LANGUAGE\"):\n language = line.replace(\"LANGUAGE: \", \"\").rstrip(\"\\n\")\n elif line.startswith(\"PUBLICATION-TYPE\"):\n pubtype = line.replace(\"PUBLICATION-TYPE: \", \"\").rstrip(\"\\n\")\n elif line.startswith(\"JOURNAL-CODE\"):\n journal = line.replace(\"JOURNAL-CODE: \", \"\").rstrip(\"\\n\")\n elif line.lstrip().startswith(\"Copyright \") or line.lstrip().startswith(\"All Rights Reserved\"):\n pass\n elif line.lstrip().startswith(\"AD/Algemeen Dagblad\") or line.lstrip().startswith(\n \"De Telegraaf\") or line.lstrip().startswith(\"Trouw\") or line.lstrip().startswith(\n \"de Volkskrant\") or line.lstrip().startswith(\"NRC Handelsblad\") or line.lstrip().startswith(\n \"Metro\") or line.lstrip().startswith(\"Spits\"):\n pass\n else:\n if istitle:\n title = title + \" \" + line.rstrip(\"\\n\")\n else:\n text = text + \" \" + line.rstrip(\"\\n\")\n\n # yield the very last article of the whole set \n formattedsource = \"{} (print)\".format(journal2.lower())\n formattedsource = self.SOURCENAMEMAP.get(formattedsource, formattedsource) # rename source if necessary\n # minimal fields to be returned. These really need to be present\n art = {\n \"title\":title.strip(),\n \"doctype\": formattedsource,\n \"text\":text,\n \"publication_date\":datetime.datetime(int(pubdate_year),int(pubdate_month),int(pubdate_day)),\n }\n # add fields where it is okay if they are absent\n if len(section)>0:\n art[\"category\"] = section.lower()\n if len(byline)>0:\n art[\"byline\"] = byline\n yield art", "title": "" }, { "docid": "da049539cb3741029844b13d3a37247c", "score": "0.5045592", "text": "def getfile(self):\n license = self.registryValue(\"licenseKey\")\n u = \"https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key={}&suffix=tar.gz\".format(\n license\n )\n f = \"%s%sgeo%sGeoLite2-City.tar.gz\" % (\n conf.supybot.directories.data(),\n os.sep,\n os.sep,\n )\n f2 = \"%s%sgeo%sGeoLite2-City.tar\" % (\n conf.supybot.directories.data(),\n os.sep,\n os.sep,\n )\n self.make_sure_path_exists(\n r\"%s%sgeo\" % (conf.supybot.directories.data(), os.sep)\n )\n path = \"%s%sgeo\" % (conf.supybot.directories.data(), os.sep)\n self.log.info(\"Starting download: %s\" % f)\n h = utils.web.getUrl(u)\n if h:\n tempfile = open(f, \"w+b\")\n if tempfile:\n tempfile.write(h)\n tempfile.close()\n self.log.info(\"Completed: %s\" % f)\n self.log.info(\"Unzipping: %s\" % f)\n f_in = gzip.open(f, \"rb\")\n f_out = open(f2, \"wb\")\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n self.log.info(\"Finished Unzipping: %s\" % f)\n self.log.info(\"Untarring: %s\" % f2)\n tar = tarfile.open(f2)\n tar.getmembers()\n for member in tar.getmembers():\n if \"GeoLite2-City.mmdb\" in member.name:\n member.name = \"GeoLite2-City.mmdb\"\n self.log.info(member.name)\n tar.extract(member, path=path)\n self.log.info(\"Finished Untarring: %s\" % f2)\n tar.close()\n os.remove(f)\n os.remove(f2)\n else:\n self.log.info(\"Could not download: %s\" % f)\n return", "title": "" }, { "docid": "5c262e6e927f9f59933ab77d0b266f77", "score": "0.50408816", "text": "def _get_bilingual_stream(self, project_id: int, job_uids: List[str]) -> Iterator[bytes]:\n return self._post_stream(\n path=\"v1/projects/{}/jobs/bilingualFile\".format(project_id),\n data={\"jobs\": [{\"uid\": job_uid} for job_uid in job_uids]},\n ).iter_content(constants.CHUNK_SIZE)", "title": "" }, { "docid": "28fa00f3231fe845b6b273f4f39145e5", "score": "0.50342464", "text": "def _download(args):\n base = \"http://lyrics.wikia.com/api.php?\"\n args = urllib.urlencode(args)\n text= _json_formatting(urllib.urlopen(base + args).read())\n return text", "title": "" }, { "docid": "b1b81c76bb9ef250bf2477deac2c84a8", "score": "0.50238836", "text": "def english_xml_parser(language,shorts,infile,outfile):\n language_capitalized = language[0].upper() + language[1:] # for matching in text\n tree = ET.parse(infile)\n root = tree.getroot() \n for child in root:\n for child2 in child:\n if child.tag == '{http://www.mediawiki.org/xml/export-0.10/}page':\n for grandchild in child2: # the \"page\" part of the xml file\n if grandchild.tag == \"{http://www.mediawiki.org/xml/export-0.10/}title\":\n title = grandchild.text # this is the title of the entry -> the word, this happens in the first iteration\n grandchild.clear()\n elif grandchild.tag == \"{http://www.mediawiki.org/xml/export-0.10/}revision\":\n for element in grandchild.findall(\"{http://www.mediawiki.org/xml/export-0.10/}text\"): # this is the case for every iteration after the first one\n # we are talking about the text-part containing the languages, not the one containing information on flection\n text_wiki = element.text\n if text_wiki: # when there is any text in this part of the tree\n for textbit in text_wiki.split('----'): \n if \"==\" + language_capitalized + \"==\" in textbit: # find the section for the current language (English Spanish German)\n if \"===Etymology\" not in textbit: # when there is no etymology\n for cat in textbit.split(\"\\n==\"): # find the different categories in this subtree\n if title != None:\n write_file(language,shorts,title,outfile,cat)\n else:\n segments = textbit.split('===Etymology') # find etymology\n for segment in segments: # for each part of the etymology\n if segment.startswith(\"===\\n\"): # find the categories\n for cat in segment.split(\"\\n==\"): # for each category\n if title != None:\n write_file(language,shorts,title,outfile,cat)\n elif re.match(\"\\s*\\d+===\",segment): # find other kinds of categories\n for cat in segment.split(\"\\n==\"): # for each category\n if title != None:\n write_file(language,shorts,title,outfile,cat)\n else:\n element.clear()\n else:\n grandchild.clear()", "title": "" }, { "docid": "4458530fab643296393e2397292f9cfb", "score": "0.5019753", "text": "def load(self, file):", "title": "" }, { "docid": "2e0805df7482332bd8e0dbb35c1e20aa", "score": "0.49949676", "text": "def get_bilingual_file_xml(self, project_id: int, job_uids: List[str]) -> bytes:\n buffer = io.BytesIO()\n\n for chunk in self._get_bilingual_stream(project_id, job_uids):\n buffer.write(chunk)\n\n return buffer.getvalue()", "title": "" }, { "docid": "121660394d34d0f7bce070d2f34ef20e", "score": "0.4988048", "text": "def _custom_download_file(self, url, path):\n\n test_folder = \"/tmp/test_Relationships_Parser/\"\n if not os.path.exists(test_folder):\n os.makedirs(test_folder)\n test_path = test_folder + \"1.decompressed\"\n test_file = [\"1|11537|0|bgp\\n\",\n \"1|21616|-1|bgp\\n\",\n \"1|34732|-1|bgp\\n\",\n \"1|41387|-1|bgp\\n\",\n \"1|44222|0|bgp\"]\n with open(test_path, \"w\") as test:\n test.writelines(test_file)", "title": "" }, { "docid": "4f3c152a891f02c45cb5a0369b41f093", "score": "0.49755517", "text": "def unpack(lang_completed):\n os.path.isdir(DEST_DIR) and shutil.rmtree(DEST_DIR)\n zipfile.ZipFile(FILENAME).extractall(DEST_DIR)\n for root, dirs, files in os.walk(DEST_DIR):\n for name in files:\n if lang_completed[name.split('.')[0]] != 0:\n shutil.move(os.path.join(root, name), DEST_DIR)\n # Prepend all JSONs with Ember declaration.\n with open(os.path.join(DEST_DIR, name), 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write('/* eslint-disable */\\nynabToolKit.l10nData = ' + content)\n for root, dirs, files in os.walk(DEST_DIR):\n for name in dirs:\n shutil.rmtree(os.path.join(root, name))\n os.remove(FILENAME)", "title": "" }, { "docid": "e003fee578c3f3836ac1d953feb183f6", "score": "0.4966552", "text": "def testDownload1MBFile(self):\n download_url = 'http://172.22.12.98:8080/1024.lf'\n self._ConnectToRouterAndVerify('Nfiniti')\n self._DownloadAndVerifyFile(download_url)\n self.DisconnectFromWifiNetwork()", "title": "" }, { "docid": "d95fd1b760f25b58335fa0df6178b934", "score": "0.49641407", "text": "def extract(self, targetfile):\n\t\tself.log('* Downloading file {0} of {1}'.format(self.file_no, self.no_files))\n\n\n\t\tf_download = 'http://oceandata.sci.gsfc.nasa.gov/cgi/getfile/{}.nc'.format(os.path.basename(targetfile))\n\t\tf_compress = '{}.nc'.format(targetfile)\n\t\tf_uncompress = targetfile\n\n\t\tself.log(\"* Dataset: \" + os.path.basename(targetfile))\n\n\t\ttry:\n\t\t\tthefile = urllib2.urlopen(f_download)\n\t\t\tself.log(\"File size: \" + str(thefile.info().getheader('Content-Length').strip()) )\n\t\t\tself.chunk_read(f_compress, thefile) # Instead of the open/write/close trio\n\n\t\t\treturn f_compress\n\n\t\texcept urllib2.URLError, e: \t\t\n\t\t\tself.log(\"URLError - could not download file\\n\")\n\t\t\treturn 1", "title": "" }, { "docid": "ece6685127908376ae6220c5c937e275", "score": "0.49604794", "text": "def download_notice(target_directory: str, celex: str):\n celex_part = urllib.parse.quote_plus(celex)\n url = f\"http://publications.europa.eu/resource/celex/{celex_part}\"\n headers = {\n \"Accept\": \"application/xml;notice=branch\",\n \"Accept-Language\": \"en\"\n }\n print(f\"Downloading notice for celex {celex}\")\n file_name = celex.replace(\"/\", \"_\")\n file_name = file_name.replace(\"(\", \"_\")\n file_name = file_name.replace(\")\", \"_\")\n file_path = path.join(target_directory, f\"{file_name}_notice.xml\")\n if path.exists(file_path):\n print(f\"File exists, skipping\")\n return\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n with open(file_path, \"w\", encoding=\"utf-8\") as target:\n target.write(response.text)", "title": "" }, { "docid": "10eb971af73949ae9a6cec3f57bd42cf", "score": "0.4959707", "text": "def download_data(self):\r\n\t\tdownload_path = os.getcwd()\r\n\t\tpath = tf.keras.utils.get_file('tiny-imagenet-200.zip', extract=True, cache_subdir=download_path, \r\n\t\t\t\torigin='http://cs231n.stanford.edu/tiny-imagenet-200.zip')", "title": "" }, { "docid": "996061d67329f057aaa8acb8b693a458", "score": "0.49466616", "text": "def fetch_difumo(dimension=64, resolution_mm=2, data_dir=None):\r\n dic = {64: 'pqu9r',\r\n 128: 'wjvd5',\r\n 256: '3vrct',\r\n 512: '9b76y',\r\n 1024: '34792',\r\n }\r\n valid_dimensions = [64, 128, 256, 512, 1024]\r\n valid_resolution_mm = [2, 3]\r\n if dimension not in valid_dimensions:\r\n raise ValueError(\"Requested dimension={} is not available. Valid \"\r\n \"options: {}\".format(dimension, valid_dimensions))\r\n if resolution_mm not in valid_resolution_mm:\r\n raise ValueError(\"Requested resolution_mm={} is not available. Valid \"\r\n \"options: {}\".format(resolution_mm,\r\n valid_resolution_mm))\r\n url = 'https://osf.io/{}/download'.format(dic[dimension])\r\n opts = {'uncompress': True}\r\n\r\n csv_file = os.path.join('{0}', 'labels_{0}_dictionary.csv')\r\n if resolution_mm != 3:\r\n nifti_file = os.path.join('{0}', '2mm', 'maps.nii.gz')\r\n else:\r\n nifti_file = os.path.join('{0}', '3mm', 'maps.nii.gz')\r\n\r\n files = [(csv_file.format(dimension), url, opts),\r\n (nifti_file.format(dimension), url, opts)]\r\n\r\n dataset_name = 'difumo_atlases'\r\n\r\n data_dir = _get_dataset_dir(data_dir=data_dir, dataset_name=dataset_name,\r\n verbose=1)\r\n\r\n # Download the zip file, first\r\n files = _fetch_files(data_dir, files, verbose=2)\r\n labels = pd.read_csv(files[0])\r\n\r\n # README\r\n readme_files = [('README.md', 'https://osf.io/4k9bf/download',\r\n {'move': 'README.md'})]\r\n if not os.path.exists(os.path.join(data_dir, 'README.md')):\r\n _fetch_files(data_dir, readme_files, verbose=2)\r\n\r\n return Bunch(maps=files[1], labels=labels)", "title": "" }, { "docid": "94a8935fc2e4599ed818fdf7366549a6", "score": "0.4945124", "text": "def from_gutenberg(file_id,threshold): \n url = 'http://www.gutenberg.org/files/'+str(file_id)+'/'+str(file_id)+'.txt'\n response = request.urlopen(url)\n raw = response.read().decode('utf8')\n txt = word_tokenize(raw)\n L = [word.lower() for word in txt]\n \n generate_net(L,threshold)", "title": "" }, { "docid": "9e29c0250820d4636ec11d9a770a95d1", "score": "0.49375397", "text": "def download_mitie_fe_file(fe_file): # pragma: no cover\n # type: (Text) -> None\n\n logger.info(\"Downloading MITIE feature extractor files\")\n _fe_file_url = \"https://s3-eu-west-1.amazonaws.com/mitie/total_word_feature_extractor.dat\"\n logger.info(\"Downloading from {}\".format(_fe_file_url))\n response = requests.get(_fe_file_url, stream=True)\n\n with io.open(fe_file, \"wb\") as output:\n for data in tqdm(response.iter_content(chunk_size=1024*1024), unit='MB', unit_scale=True):\n output.write(data)\n logger.debug(\"file written! {0}, {1}\".format(fe_file, os.path.exists(fe_file)))", "title": "" }, { "docid": "fa9dc5c9fdd7e4aeda382970e01156ce", "score": "0.4937418", "text": "def fetch_translation(featurepage_id, locale):\n fp = FeaturePage.objects.get(id=featurepage_id)\n if fp.status in (fp.STATUS_PARSING, fp.STATUS_PARSED, fp.STATUS_NO_DATA):\n # Already fetched\n t = TranslatedContent.objects.get(page=fp, locale=locale)\n assert t.status == t.STATUS_FETCHED, t.get_status_display()\n return\n assert fp.status == fp.STATUS_PAGES, fp.get_status_display()\n t = TranslatedContent.objects.get(page=fp, locale=locale)\n assert t.status == t.STATUS_STARTING, t.get_status_display()\n\n # Avoid double fetching\n t.status = t.STATUS_FETCHING\n t.save(update_fields=['status'])\n\n # Request the translation\n url = t.url() + '?raw'\n r = requests.get(t.url() + '?raw', headers={'Cache-Control': 'no-cache'})\n t.raw = r.text\n if r.status_code == requests.codes.ok:\n t.status = t.STATUS_FETCHED\n else:\n t.status = t.STATUS_ERROR\n issue = ((\n 'failed_download', 0, 0,\n {'url': url, 'status': r.status_code, 'content': r.text[:100]}))\n fp.add_issue(issue)\n fp.save()\n t.save()\n\n fetch_all_translations.delay(fp.id)", "title": "" }, { "docid": "acdcd55546e72f4549f55b1c6750b259", "score": "0.49291393", "text": "def download_model(lang_id, if_exists='strict', dimension=None):\n if lang_id not in valid_lang_ids:\n raise Exception(\"Invalid lang id. Please select among %s\" %\n repr(valid_lang_ids))\n\n file_name = \"cc.%s.300.bin\" % lang_id\n gz_file_name = \"%s.gz\" % file_name\n\n if os.path.isfile(file_name):\n if if_exists == 'ignore':\n return file_name\n elif if_exists == 'strict':\n print(\"File exists. Use --overwrite to download anyway.\")\n return\n elif if_exists == 'overwrite':\n pass\n\n if _download_gz_model(gz_file_name, if_exists):\n with gzip.open(gz_file_name, 'rb') as f:\n with open(file_name, 'wb') as f_out:\n shutil.copyfileobj(f, f_out)\n\n return file_name", "title": "" }, { "docid": "753fb2078991e7dcd96ab214580c2084", "score": "0.4919247", "text": "def testDownload100MBFile(self):\n download_url = 'http://172.22.12.98:8080/102400.lf'\n self._ConnectToRouterAndVerify('Trendnet_639gr')\n self._DownloadAndVerifyFile(download_url)\n self.DisconnectFromWifiNetwork()", "title": "" }, { "docid": "2874f2aa91bd789c259643d37658f95a", "score": "0.4911394", "text": "def donwload_l10ns():\n url = API_PREFIX + 'download/' + FILENAME + KEY_SUFFIX\n l10ns_file = urllib2.urlopen(url)\n with open('all.zip','wb') as f:\n f.write(l10ns_file.read())\n return True", "title": "" }, { "docid": "6b72eedb67c52cbabb3ef42d66907cde", "score": "0.49093226", "text": "def DownstreamBinaryLoadUnit(self):\r\n\t\treturn self._get_attribute('downstreamBinaryLoadUnit')", "title": "" }, { "docid": "f16f2451f7573f9c9ef4607e5db2dc85", "score": "0.4906064", "text": "def get_file_bytes_to_recognize(self, file_url):\n \n response = requests.get(file_url, stream=True)\n buff = response.raw.read()\n del response\n \n return buff", "title": "" }, { "docid": "c85198f01254e3e6dcbdae425ff52ae8", "score": "0.48963046", "text": "def download_model(url):\n url = hb_util.correct_url(url)\n resp = requests.get(url)\n return BytesIO(resp.content)", "title": "" }, { "docid": "a83554048204e57079229c2f37b6f4e5", "score": "0.4894903", "text": "def _file_downloaded(self, work_message, url, status='ok'):\n work_message['url'] = url\n if work_message['autoext']:\n fname = ext_decorator(self.magicf, work_message['output'])[1]\n work_message['output'] = fname\n if work_message['hash']:\n work_message['hash'] = hashfile(work_message['output'])\n work_message['size'] = os.path.getsize(work_message['output'])\n if work_message['size'] == 0:\n if self.empty_error:\n work_message['status'] = 'error'\n work_message['error'] = 'Empty file'\n os.remove(work_message['output'])\n _LOGGER.debug(\"%s empty file removed\", work_message['output'])\n else:\n work_message['status'] = status\n _LOGGER.debug('Empty file: %s', work_message['output'])\n else:\n work_message['status'] = status\n self.results.append(work_message)\n _LOGGER.info(\"[%s] downloaded from %s to %s\",\n work_message['status'],\n work_message['url'],\n work_message['output'])", "title": "" }, { "docid": "89acf9bce862f2f0ead7b73422ec7bbb", "score": "0.48790172", "text": "def download_model():\n with requests.get(DOWNLOAD_LINK, stream=True) as r:\n r.raise_for_status()\n with open(\"model-best.zip\", \"wb\") as f:\n for chunk in r.iter_content(chunk_size=8192):\n f.write(chunk)\n\n with zipfile.ZipFile(\"model-best.zip\", \"r\") as f:\n f.extractall()", "title": "" }, { "docid": "3ca6a1b3fad2935f0a5d30dc0675aa77", "score": "0.48750618", "text": "def download_one(self, d):\n # this will keep this method usable for any arbitrary date.\n d = mkdate(d)\n # ex_url = \"https://www.nseindia.com/content/historical/EQUITIES/2011/NOV/cm08NOV2011bhav.csv.zip\"\n url = self.get_bhavcopy_url(d)\n print(url)\n filename = self.get_bhavcopy_filename(d)\n # response = requests.get(url, headers=self.headers)\n response = self.nse.opener.open(Request(url, None, self.nse.headers))\n zip_file_handle = io.BytesIO(response.read())\n zf = zipfile.ZipFile(zip_file_handle)\n return zf.read(filename).decode(\"utf-8\")", "title": "" }, { "docid": "6e74683d733c7e66b515bab3a322fa09", "score": "0.48679146", "text": "def getTestFile(self, fname):\n try: open(fname,'r')\n except: return ''\n\n with open(fname, 'rb') as handle:\n return xmlrpclib.Binary(handle.read())", "title": "" }, { "docid": "a8480d4b8d84c9fccc5f6f399ea3005b", "score": "0.48656306", "text": "def load_results(self, file=None):\n file_flag = False\n\n if not file:\n file = self.zip_file\n try:\n m = self.unzip_file(file)\n if m:\n return self.msg\n except OSError:\n raise\n\n # look for .HY3 file\n for file in os.listdir(self.zip_extract_dir):\n if file.endswith('.HY3') or file.endswith('.hy3'):\n file_flag = True\n file = os.path.join(self.zip_extract_dir, file)\n with open(os.path.abspath(file), 'r') as f_hy3:\n self.parse_results(f_hy3)\n\n if not file_flag:\n # return error message if no .HY3 file exists\n self.msg.append(('error', 'Couldn\\'t find .HY3 file in zip file'))\n\n return self.msg", "title": "" }, { "docid": "d4500c166be7310b718e2d37a26463d3", "score": "0.4854234", "text": "def Download(self, url):\n print 'Downloading: %s' % url\n leaf = url.split('/')[-1]\n tmpdir = tempfile.mkdtemp('.buildman')\n response = urllib2.urlopen(url)\n fname = os.path.join(tmpdir, leaf)\n fd = open(fname, 'wb')\n meta = response.info()\n size = int(meta.getheaders('Content-Length')[0])\n done = 0\n block_size = 1 << 16\n status = ''\n\n # Read the file in chunks and show progress as we go\n while True:\n buffer = response.read(block_size)\n if not buffer:\n print chr(8) * (len(status) + 1), '\\r',\n break\n\n done += len(buffer)\n fd.write(buffer)\n status = r'%10d MiB [%3d%%]' % (done / 1024 / 1024,\n done * 100 / size)\n status = status + chr(8) * (len(status) + 1)\n print status,\n sys.stdout.flush()\n fd.close()\n if done != size:\n print 'Error, failed to download'\n os.remove(fname)\n fname = None\n return tmpdir, fname", "title": "" }, { "docid": "c68435e49f0168406a4e7acdea7fb0ab", "score": "0.48541513", "text": "def leer_texto(texto):\n with open(texto, 'r') as text:\n return text.read()\n # Descargando copus de wikipedia", "title": "" }, { "docid": "6c8d6072de30d3331c1272bc56a81265", "score": "0.48482054", "text": "def load_liwc(filename):\n fh = open(filename)\n percent_mark_count = 0\n for line in fh:\n if line.strip() == '%':\n percent_mark_count += 1 \n continue\n if percent_mark_count < 2:\n #We are in an area of the file describing the categories and their ids\n cat = line.strip().split()\n cat_id_map[cat[1]] = cat[0]\n categories[cat[0]] = set() #initialize the second structure now\n elif percent_mark_count >= 2:\n #We are in the bottom half of the file\n items = line.strip().split()\n cats = items[1:]\n word = items[0]\n word = word.replace('*', '.*')\n word = '^' + word + '$' #make it a real regex\n for cat_id in cats:\n categories[cat_id].add(word)\n s = set()\n for cat in depression_categories:\n s = s.union(categories[cat_id_map[cat]])\n depression_words = list(s)\n q = set()\n for cat in cognitive_categories:\n q = q.union(categories[cat_id_map[cat]])\n cognitive_words = list(q)\n print \"Loaded emotion.categories and emotion.cat_id_map structures...\"\n return (depression_words, cognitive_words)", "title": "" }, { "docid": "5601c367b654788c53073d292faecbf3", "score": "0.48468494", "text": "def download_word_embeddings_nl() -> None:\n print('--- Beginning word embedding file download ---')\n url = 'https://www.clips.uantwerpen.be/dutchembeddings/combined-320.tar.gz'\n with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,\n desc=url.split('/')[-1]) as t:\n file_tmp = urlretrieve(url, filename=None, reporthook=t.update_to)[0]\n t.total = t.n\n\n base_name = os.path.basename(url)\n file_name, file_extension = os.path.splitext(base_name)\n tar = tarfile.open(file_tmp)\n tar.extractall(ROOT_DIR+'/resources/word_embeddings/'+file_name)\n return None", "title": "" }, { "docid": "88c22b5405c3278e7b110f97921758e6", "score": "0.484456", "text": "def test1b_readfromfile(self):\n global FOLIAEXAMPLE\n #write example to file\n f = bz2.BZ2File(os.path.join(TMPDIR,'foliatest.xml.bz2'),'w')\n f.write(FOLIAEXAMPLE.encode('utf-8'))\n f.close()\n\n doc = folia.Document(file=os.path.join(TMPDIR,'foliatest.xml.bz2'))\n self.assertTrue(isinstance(doc,folia.Document))\n\n #sanity check: reading from file must yield the exact same data as reading from string\n doc2 = folia.Document(string=FOLIAEXAMPLE)\n self.assertEqual( doc, doc2)", "title": "" }, { "docid": "d9aca3900025d53f20f111b45524f5c4", "score": "0.4837986", "text": "def testDownload10MBFile(self):\n download_url = 'http://172.22.12.98:8080/10240.lf'\n self._ConnectToRouterAndVerify('Belkin_N+')\n self._DownloadAndVerifyFile(download_url)\n self.DisconnectFromWifiNetwork()", "title": "" }, { "docid": "b5233d34ec0ad86c16d365ae41b52180", "score": "0.48310938", "text": "def maybe_download():\n\n print(\"Downloading Inception v3 Model ...\")\n download.maybe_download_and_extract(url=data_url, download_dir=data_dir)", "title": "" }, { "docid": "b40bc191a69a7c0f8387e2e11e34a3d3", "score": "0.48303777", "text": "def download_models(self, lang_code):\n sys.stdout.write('TODO: Downloading models for ' + lang_code + \"\\n\")", "title": "" }, { "docid": "b56076a8b8171fd2d4719b66fece27b2", "score": "0.48273864", "text": "def load_unit(self):\n\n pass", "title": "" }, { "docid": "08fdf4a28e8aaf2d04d56f3cca888891", "score": "0.48273352", "text": "def load_unit(self):\n # TODO: verify variables, import simson's CEF_UNIT class, textFile might only work with single file not directory. needs testing.\n spark = SparkSession.builder.getOrCreate()\n return spark.sparkContext.textFile(self.unit_path).map(CEF_UNIT).map(unit_keyed_by_oidtb)", "title": "" }, { "docid": "508f3875a5613e8a9d614a0b9224999d", "score": "0.4817055", "text": "def downloader(inegi_csv,folder):\n df=pandas.read_csv(inegi_csv,\n na_values=[\"\"],\n usecols=[\"upc\", \"titulo\",\"formatos\",\"escala\",\"url\"])\n \n df=df[df['formatos'].str.contains('grid')]\n urls=[\"https://www.inegi.org.mx/\"+str(url) for url in df['url'].to_list()]\n session=requests_html.HTMLSession()\n \n rexp=re.compile(\".*gr.zip*\")\n \n for url in urls:\n r=session.get(url)\n r.html.render()\n links=list(filter(rexp.match,r.html.absolute_links))\n \n try:\n logging.info(\"Downloading %s\", links[0]) \n wget.download(links[0],folder)\n except Exception as e:\n logging.info(\"Download of %s failed.\", links)\n \n logging.info(\"Done running downloader\")", "title": "" }, { "docid": "9fb634a4f05d27a3b56c589eef8e12a0", "score": "0.48104805", "text": "def load_from_url(self,url,save_as_file=None):\n\n #The accept incoding part is to disable gzip\n fp=requests.get(url,steam=True, headers={'Accept-Encoding': None}).raw\n if save_as_file is None:\n return self.load_from_file(fp)\n else:\n with open(save_as_file,\"wb+\") as file:\n while True:\n buffer=fp.read(81920)\n if not buffer:\n break\n file.write(buffer)\n with open(save_as_file,\"r\") as fp:\n return self.load_from_file(fp)", "title": "" }, { "docid": "a1e52ea167422bb0585797e85640a73a", "score": "0.48079133", "text": "def parse(self, filename):\n with open(filename, 'r') as file:\n data = file.read()\n\n return unicode(data.decode('utf-8', 'replace'))", "title": "" }, { "docid": "c8fe9f7b89bf4eeaa30d2b5e45b5bf2d", "score": "0.48052207", "text": "def test_valid_download_file(eld):\n file = eld.get_data(\"little-text-file\")\n assert os.path.isfile(file)", "title": "" }, { "docid": "e38ec6b0406ab72cdec9a6289deef4fe", "score": "0.48007974", "text": "def load_wiktionary(configuration, verbose=0):\n\n df = pandas.read_csv(configuration['wiktionary_translations_path'],\n sep='\\t', usecols=['ID', 'Concept_ID', 'Concept', 'Languoid', 'Language_name', 'Form'])\n\n\n if verbose:\n print()\n print('number of available languages', len(set(df.Language_name)))\n print('language that have Dutch in the name')\n for language in set(df.Language_name):\n if 'Dutch' in language:\n print(language)\n print('we use: Dutch; Flemish')\n\n df = df[df.Language_name == 'Dutch; Flemish']\n\n english_lemmas = []\n english_definitions = []\n\n for index, row in df.iterrows():\n concept = row['Concept']\n lemma, *definitions = concept.split('/')\n english_lemmas.append(lemma)\n english_definitions.append('/'.join(definitions))\n\n df['English_lemma'] = english_lemmas\n\n dutch2english = defaultdict(set)\n english2dutch = defaultdict(set)\n\n for index, row in df.iterrows():\n english_lemma = row['English_lemma']\n dutch_lemma = row['Form']\n dutch2english[dutch_lemma].add(english_lemma)\n english2dutch[english_lemma].add(dutch_lemma)\n\n if verbose:\n print(f'Dutch lemmas with English translations: {len(dutch2english)}')\n print(f'English lemmas with Dutch translations: {len(english2dutch)}')\n\n return dutch2english, english2dutch", "title": "" }, { "docid": "eb697cb2ca249f145677b015eb11af7c", "score": "0.47891998", "text": "def test_get_extract(self):\n with requests_mock.Mocker() as mocker:\n mt_st_michel = WikiRequest(48.636063, -1.511457)\n result = '{\"batchcomplete\":\"\",\"warnings\":{\"extracts\":{\"*\":\"\"}},' \\\n '\"query\":{\"pages\":{\"1187468\":{\"pageid\":1187468,\"ns\":0,\"title\":\"Le Mont-Saint-Michel\",' \\\n '\"extract\":\"Le Mont-Saint-Michel est une commune française située dans le département de la ' \\\n 'Manche en Normandie. Elle tire son nom de l\\'îlot rocheux consacré à saint Michel où ' \\\n 's’élève aujourd’hui l’abbaye du Mont-Saint-Michel. L’architecture du Mont-Saint-Michel et ' \\\n 'sa baie en font le site touristique le plus fréquenté de Normandie et l\\'un des dix plus ' \\\n 'fréquentés en France — premier site après ceux d\\'Île-de-France — avec près de deux millions ' \\\n 'et demi de visiteurs chaque année (3 250 000 en 2006, 2 300 000 en 2014).\"}}}}'\n url_extract = \"https://fr.wikipedia.org/w/api.php?action=query&pageids=1187468\" \\\n \"&prop=extracts&explaintext=true&exsectionformat=plain&exsentences=3&format=json\"\n page_id_result = '{\"query\": {\"geosearch\": [{\"pageid\": 1187468}]}}'\n mocker.get(mt_st_michel.url_geoloc, text=page_id_result)\n mocker.get(url_extract, text=result)\n assert mt_st_michel.get_extract() == \"Le Mont-Saint-Michel est une commune française située \" \\\n \"dans le département de la Manche en Normandie. Elle tire \" \\\n \"son nom de l'îlot rocheux consacré à saint Michel où s’élève \" \\\n \"aujourd’hui l’abbaye du Mont-Saint-Michel. L’architecture \" \\\n \"du Mont-Saint-Michel et sa baie en font le site touristique\" \\\n \" le plus fréquenté de Normandie et l'un des dix plus \" \\\n \"fréquentés en France — premier site après ceux \" \\\n \"d'Île-de-France — avec près de deux millions et \" \\\n \"demi de visiteurs chaque année (3 250 000 en 2006, \" \\\n \"2 300 000 en 2014).\"", "title": "" }, { "docid": "903b39ff42ec130e7c102de2fa7efd27", "score": "0.47874", "text": "def download_data():\r\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\r\n urllib.request.urlretrieve(url, 'imdb.tgz')\r\n tar = tarfile.open(\"imdb.tgz\")\r\n tar.extractall()\r\n tar.close()", "title": "" }, { "docid": "9ba8ed89a06c6750fe86255eea8aa645", "score": "0.47829717", "text": "def _download_data(url):\n data = data_utils.download_data(url)\n magic_number = int.from_bytes(data[:4], byteorder=\"big\", signed=False)\n\n if \"labels\" in url:\n if magic_number != 2049:\n raise ValueError(\"Wrong magic number ({}) in file {}!\"\n .format(magic_number, url))\n data = np.frombuffer(data[8:], dtype=\"uint8\")\n else:\n if magic_number != 2051:\n raise ValueError(\"Wrong magic number ({}) in file {}!\"\n .format(magic_number, url))\n data = np.frombuffer(data[16:], dtype=\"uint8\").reshape(-1, 28, 28)\n\n return data", "title": "" }, { "docid": "e2c87258f5281d6085d226696fd8b257", "score": "0.47754803", "text": "def get_data(locale_name):\n file_path = path.join(PATH + '/' + locale_name, file)\n # Needs explicit encoding for Windows\n with open(file_path, 'r', encoding='utf8') as f:\n return json.load(f)", "title": "" }, { "docid": "3c5dc81ccdbf09ec85690451b15075c4", "score": "0.4775441", "text": "def locale_file_out(dictionary, filename, source, lang):\r\n\r\n def create_locale_tree():\r\n \"\"\"Creates a new bs4 tree.\r\n Caution: outed strings are deleted from dictionary.\r\n \"\"\"\r\n\r\n def append_new_tag(tree, string, name):\r\n \"\"\"Append a tag to bs4 tree\"\"\"\r\n new_tag = tree.new_tag('text')\r\n new_tag.string = string\r\n new_tag['name'] = name\r\n if lang is not None:\r\n new_tag['language'] = lang\r\n new_tree_root.append(new_tag)\r\n\r\n new_tree = BeautifulSoup(EMPTY_XML, \"lxml-xml\")\r\n new_tree_root = new_tree.find('FTL')\r\n dict_for_delete = {}\r\n for entry in dictionary:\r\n if (source is None) or (source in dictionary[entry][1]):\r\n append_new_tag(new_tree, entry, dictionary[entry][0])\r\n dict_for_delete[entry] = []\r\n for entry in dict_for_delete:\r\n del dictionary[entry]\r\n return new_tree\r\n\r\n locale_file = open(os.path.join(outputdir, filename), 'w+', encoding='utf-8')\r\n locale_file.write(pretty_xml(create_locale_tree()))\r\n print(f'Finished creating result file {filename}...')", "title": "" }, { "docid": "ca82b2c81f9bbe96d45c7fa58957a27e", "score": "0.47741997", "text": "def load(self):", "title": "" }, { "docid": "ca82b2c81f9bbe96d45c7fa58957a27e", "score": "0.47741997", "text": "def load(self):", "title": "" }, { "docid": "ca82b2c81f9bbe96d45c7fa58957a27e", "score": "0.47741997", "text": "def load(self):", "title": "" }, { "docid": "df8ca96ce0bf2041233f57ac0d24245d", "score": "0.47738507", "text": "def load_data(**kwargs):\n\n file_name = kwargs.get('file_name','b3test1')\n #TODO:change f ile path\n file_path = kwargs.get('file_path',None)\n if file_path is None:\n # file_path=os.path.expanduser('~')+'/media/yxy/E4E8BAE1E8BAB0E2/englishPathBags/processed_data/'\n file_path='/media/yxy/E4E8BAE1E8BAB0E2/englishPathBags/processed_data/'\n\n f=file_path+file_name\n\n try: \n import cPickle as pickle\n except ImportError:\n import pickle\n \n file1 = f+'rss.p'\n file2 = f+'poses.p'\n file3 = f+'odom.p'\n \n with open(file1, 'rb') as f:\n rss = pickle.load(f)\n with open(file2, 'rb') as f:\n poses = pickle.load(f)\n with open(file3, 'rb') as f:\n odom = pickle.load(f)\n \n return (rss,poses,odom)", "title": "" }, { "docid": "2f507cb392e4e6271f5b4e3e7235863e", "score": "0.47690478", "text": "def download_po_files(urls):\n langs = {}\n\n for url in urls:\n ret = re.search('-([a-z]{2,3}(_[A-Z]{2})?).po$', url, re.I)\n lang_id = ret.groups(0)[0]\n if not os.path.exists('%s.po' % lang_id):\n raise RuntimeError('file %s.po does not exist' % lang_id)\n\n for url in urls:\n doc = read_http(url)\n ret = re.search('-([a-z]{2,3}(_[A-Z]{2})?).po$', url, re.I)\n lang_id = ret.groups(0)[0]\n with open(lang_id + '_new.po', 'w') as f:\n f.write(doc)\n cmd = process_po(lang_id)\n if cmd:\n langs[lang_id] = cmd\n\n for lang_id, cmd in langs.items():\n print('mv %s_new.po %s.po' % (lang_id, lang_id))\n os.rename('%s_new.po' % lang_id, '%s.po' % lang_id)\n print(cmd)\n # cmd is a Unicode, so encode to bytestring to avoid\n # UnicodeEncodeError: 'ascii' codec can't encode character u'\\xf6' in position 49: ordinal not in range(128)\n os.system(cmd.encode('utf-8'))", "title": "" }, { "docid": "8fc47ef43e191d718f7928d2e259d61d", "score": "0.4766635", "text": "def download_pml(url=\"https://github.com/brorfred/oceandata/raw/master/data/\",\n filename=\"GLOBAL_PE_W_LOV_2019.csv\", params={}):\n download(url=f\"{url}/{filename}\", filename=filename, params=params)", "title": "" }, { "docid": "1e3ee3ed83c92fdef601902127af4814", "score": "0.47660166", "text": "def __init__(self, model_dir_path, lang):\n self.lang = lang\n self.dictionary = corpora.Dictionary.load(model_dir_path + file_name[self.lang]['dict_file_name'])\n self.corpus = corpora.MmCorpus(model_dir_path + file_name[self.lang]['corpus_file_name'])\n self.model = models.LdaMulticore.load(model_dir_path + file_name[self.lang]['model_file_name'])\n\n if self.lang == 'HINDI':\n self.lang_obj = Hindi()", "title": "" }, { "docid": "bf44cc83e0162985f2a5fe71377fd27f", "score": "0.4764464", "text": "def load(self) -> None:", "title": "" }, { "docid": "bf44cc83e0162985f2a5fe71377fd27f", "score": "0.4764464", "text": "def load(self) -> None:", "title": "" }, { "docid": "c91da918791caa3b1bcc076c7cc40c12", "score": "0.47573432", "text": "def download_txt_file(url,file_name=\"downlod.txt\"):\n\n f1 = open(file_name, \"w\")\n\n \n for i in url:\n f1.write(i.decode())\n f1.close()", "title": "" }, { "docid": "fd818e213668bd85f0b288a5419cdae7", "score": "0.4755667", "text": "def _get_content_from_unicode_file(file_path):\n with open(file_path, 'r') as fp:\n return fp.read()", "title": "" }, { "docid": "a7eede209a744474af29e927c0379c09", "score": "0.47551325", "text": "def load_binary(self, path_to_binary):\n saved_model = torch.load(path_to_binary)\n self.num_user = saved_model['num_user']\n self.num_movie = saved_model['num_movie']\n self.emb_size = saved_model['emb_size']\n\n self.user_encoding = saved_model['user_encoding']\n self.user_decoding = saved_model['user_decoding']\n self.item_encoding = saved_model['item_encoding']\n self.item_decoding = saved_model['item_decoding']\n\n self.model = MFBias(self.num_user, self.num_movie, self.emb_size)\n self.model.load_state_dict(saved_model['model_state_dict'])\n self.model.eval()", "title": "" }, { "docid": "3b1a7342a79bb8707a11161bbfbe7017", "score": "0.47536463", "text": "def get_mapbox_mbtiles(self):\n mapbox_mbtiles = 'http://a.tiles.mapbox.com/v3/%s.mbtiles'\n local_mbtiles = '%s.mbtiles'\n remote_file = mapbox_mbtiles % (self.source)\n local_file = local_mbtiles % (self.source)\n\n # Check if file exists already\n if os.path.exists(local_file) and os.path.isfile(local_file):\n self.out('- Local file, %s, already exists; using this file.\\n' % (local_file))\n else:\n self.out('- Downloading file from Mapbox ...\\n')\n urllib.urlretrieve (remote_file, local_file)\n\n self.source = local_file", "title": "" }, { "docid": "2307f3efcc70c95d650a18de448f04a3", "score": "0.4749928", "text": "def loadArticle(self, url):\n cmd = (downloader_common.XIDEL_CMD.format(url) +\n ' --xpath \\'//article[@class=\"material\"]//div[@class=\"header\"]/div[@class=\"date\"]/time\\'' #datetime in format dd month yyyy, hh:mi\n ' --xpath \\'//article[@class=\"material\"]//div[@class=\"header\"]/h1\\'' #title\n ' --xpath \\'//article[@class=\"material\"]//div[@class=\"header\"]/h2\\'' #summary\n ' --xpath \\'//article[@class=\"material\"]//div[@itemprop=\"articleBody\"]/*[self::p or self::h4]\\'' #article body\n ' --xpath \\'//article[@class=\"material\"]//div[@class=\"header\"]/div[@class=\"authors\"]\\'' #authors\n ' --output-format=json-wrapped') #output as json\n #print('cmd: '+cmd)\n #xidel http://ukr.lb.ua/blog/vitaliy_skotsyk/355817_persha_dekada_roku_kudi_pryamuie.html -q --xpath '//div[@class=\"article-text\"]//span[@itemprop=\"articleBody\"]//p'\n # http://ukr.lb.ua/world/2012/03/27/142868_aeroportah_germanii_zavtra_proydut.html\n\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n result = p.communicate()[0].decode('utf-8')\n #print(result)\n jsonArt = json.loads(result)\n\n article = None\n try:\n if len(jsonArt) > 0 :\n article = Article(url, jsonArt)\n else:\n #logging.warning(\"Nothing can be load from: \"+url)\n print(\"Nothing can be load from: \"+url)\n return None\n\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n print (\"Unexpected error: \", exc_type, \"In article \", result)\n traceback.print_exception(exc_type, exc_value, exc_traceback)\n\n return article", "title": "" }, { "docid": "d177cdcb91f480b5ba7f38d50c27481f", "score": "0.4748764", "text": "def __init__(self, url, filename, startByte = 0, listener = None): #$NON-NLS-1$\r\n self.startByte = startByte\r\n customHeaders = {}\r\n if startByte > 0:\r\n rangeHeader = u\"bytes=%d-\" % startByte #$NON-NLS-1$\r\n customHeaders[u\"Range\"] = rangeHeader #$NON-NLS-1$\r\n ZHttpBinaryFileDownload.__init__(self, url, filename, listener, customHeaders)", "title": "" }, { "docid": "9b561b49014d7d291bdb094299e6eda2", "score": "0.47484434", "text": "def read_file(self):", "title": "" }, { "docid": "610dd3df5365d43551c771c53cc9f809", "score": "0.47476226", "text": "def test_LM(in_file, out_file, LM):\n print(\"testing language models...\")\n # This is an empty method\n # Pls implement your code in below\n \n results = []\n text = []\n # read text input and compute estimate\n with open(in_file, mode=\"r\", encoding=\"utf-8\") as f:\n textList = f.readlines()\n \n for line in textList:\n text.append( preprocess_line(line) )\n grams = convert_line_to_4gramList( text[-1] )\n resulting_lang = estimate_sentence(grams, LM)\n results.append(resulting_lang + ' ' + text[-1])\n \n # write the prediction\n with open(out_file, mode=\"w\", encoding=\"utf-8\") as f:\n for result in results:\n f.write(result + '\\n')", "title": "" }, { "docid": "49e379e18d9631dc743af2f483b5392f", "score": "0.47467664", "text": "def download_aemo_data(data_path = 'D:/Data/AEMO/Dispatch',\n dir='http://nemweb.com.au/Reports/Archive/Dispatch_SCADA/',\n zip_levels=2,startdate='20190801',enddate='20190802'):\n\n # data_path = 'D:/Data/AEMO/PreDispatch'\n # dir = 'https://nemweb.com.au/Reports/Current/Next_Day_PreDispatch/'\n # zip_levels = 1\n # startdate = '20190801'\n # enddate = '20190802'\n data_path = Path(data_path)\n # data_path = Path('D:/Data/AEMO/Dispatch')\n # dispatch_dir = 'http://nemweb.com.au/Reports/Archive/Dispatch_SCADA/'\n parser = 'html.parser'\n\n (data_path / 'zip').mkdir(exist_ok=True, parents=True)\n (data_path / 'zip2').mkdir(exist_ok=True, parents=True)\n resp = urllib.request.urlopen(dir)\n soup = BeautifulSoup(resp, parser, from_encoding=resp.info().get_param('charset'))\n #\n all_files = [l.text for l in soup.find_all('a', href=True) if '.zip' in l.text]\n print(all_files)\n\n # %%\n # find index of date\n\n split_str = [s.strip('.zip') for s in all_files[0].split('_')]\n print(split_str)\n idx = 0\n while True:\n try:\n int(split_str[idx])\n break\n except:\n idx += 1\n print(idx)\n\n dl_files = [f for f in all_files if f.split('_')[idx] >= startdate and f.split('_')[idx] <= enddate]\n print(dl_files)\n\n for idx, fname in enumerate(dl_files):\n print(f'{fname}')\n file = requests.get(dir + fname, allow_redirects=True)\n with open(data_path / 'zip' / fname, 'wb') as f:\n f.write(file.content)\n\n # %% unzip files (creates many zip files)\n if zip_levels == 2: # zip files contain other zip files which contain csv files\n zipfiles = [f for f in os.listdir(data_path / 'zip') if '.zip' in f]\n for f in zipfiles:\n with zipfile.ZipFile(data_path / 'zip' / f, \"r\") as zip_ref:\n zip_ref.extractall(path=data_path / 'zip2')\n\n # unzip files (to csv files)\n zipfiles = [f for f in os.listdir(data_path / 'zip2') if '.zip' in f]\n for f in zipfiles:\n with zipfile.ZipFile(data_path / 'zip2' / f, \"r\") as zip_ref:\n zip_ref.extractall(path=data_path)\n else: # zip files only contain csv files\n zipfiles = [f for f in os.listdir(data_path / 'zip') if '.zip' in f]\n for f in zipfiles:\n with zipfile.ZipFile(data_path / 'zip' / f, \"r\") as zip_ref:\n zip_ref.extractall(path=data_path)", "title": "" }, { "docid": "51437055a3fcefeb594b4b2b57a9e83d", "score": "0.4740623", "text": "def german_xml_parser(language,shorts,infile,outfile):\n tree = ET.parse(infile)\n root = tree.getroot()\n plurals = set()\n entries = dict()\n for child in root:\n for child2 in child:\n # find page\n if child2.tag == \"{http://www.mediawiki.org/xml/export-0.10/}page\":\n for child3 in child2:\n # find word/title\n if child3.tag == \"{http://www.mediawiki.org/xml/export-0.10/}title\":\n if child3.text != None:\n word = child3.text # this is the title of the entry -> the word, this happens in the first iteration\n if language == \"German\" and word[-1] == \"s\" or word[-1] == \"n\" or word[-2:] == \"en\" or word[-1] == \"e\" or word[-2:] == \"er\": # when the word contains a typical ending for regular plural\n plurals.add(word) # add word into set of all words that could be a regular plural of some other word\n # this will be iterated over later on to see which words need to be added to the dictionary\n elif child3.tag == \"{http://www.mediawiki.org/xml/export-0.10/}revision\":\n for child4 in child3:\n if child4.tag == \"{http://www.mediawiki.org/xml/export-0.10/}text\":\n text_wiki = child4.text\n if text_wiki:\n # add textbit to dictionary\n for textbit in text_wiki.split(\"---\"):\n if language == \"german\" and \"Substantiv|Deutsch\" in textbit: # find the German-section for nouns \n entries[word] = {}\n entries[word][\"textbit\"] = textbit\n elif language == \"spanish\" and \"Substantiv|Spanisch\" in textbit:\n entries[word] = {}\n entries[word][\"textbit\"] = textbit\n elif language == \"english\" and \"Substantiv|Englisch\" in textbit:\n entries[word] = {}\n entries[word][\"textbit\"] = textbit\n for title in entries:\n if \"textbit\" in entries[title].keys():\n write_dict(entries,title,language)\n del entries[title][\"textbit\"]\n # find words that look like a plural of another word, but are not actually\n if language == \"german\":\n word = plurals.pop()\n for item in entries: # title\n if word[:-1] == item or word[:-2] == item: # if the word looks like a pluralversion of the title\n for subitem in entries[item]: # i\n if word not in entries[item][subitem][\"plural\"]: # when the word is not already noted as a possible plural\n entries[item][subitem][\"plural\"].append(word)\n # write the file\n for el in entries:\n outfile.write(\"title: \" + str(el) + \"\\n\")\n for el2 in entries[el]:\n if \"flection\" in entries[el][el2] and entries[el][el2][\"flection\"] != set():\n outfile.write(\"\\tinflection: \" + str(entries[el][el2][\"flection\"]) + \"\\n\")\n if \"gender\" in entries[el][el2]:\n outfile.write(\"\\tgender: \" + str(entries[el][el2][\"gender\"]) + \"\\n\")\n if \"plural\" in entries[el][el2]:\n outfile.write(\"\\tplural: \" + str(entries[el][el2][\"plural\"]) + \"\\n\")\n if \"senses\" in entries[el][el2]:\n for number in entries[el][el2][\"senses\"]:\n outfile.write(\"\\t\\tsense\" + str(number) + \": \" + str(entries[el][el2][\"senses\"][number]) + \"\\n\")\n if \"examples\" in entries[el][el2] and number in entries[el][el2][\"examples\"]:\n outfile.write(\"\\t\\t\\texample(s)\" + str(number) + \": \" + str(entries[el][el2][\"examples\"][number]) + \"\\n\")\n outfile.write(\"\\n\")", "title": "" }, { "docid": "a8576a3786deab8822562461e7e3d7a3", "score": "0.47318223", "text": "def load_data(filename):", "title": "" }, { "docid": "9009d9d15dbe0b8655e11e9b7f9d2abd", "score": "0.47303358", "text": "def load():", "title": "" }, { "docid": "ba72d74309e1805ad1647c7a4dd7a55d", "score": "0.4722163", "text": "def ReadUnitInfo(basefilename):\n\n\tfilename = basefilename + \".units\"\n\n\tif (os.path.isfile(filename)==False):\n\t\tprint(\"file not found\")\n\t\treturn []\n\n\tunitdata = {}\n\tunitsfile = open(filename,\"r\")\n\tline = unitsfile.readline().strip().split(\" : \")\n\twhile(line[0]!=\"\"):\n\t\tunitdata[line[0]] = float(line[1])\n\t\tline = unitsfile.readline().strip().split(\" : \")\n\tunitsfile.close()\n\treturn unitdata", "title": "" }, { "docid": "034cb1cb9f0370fea0c5a495298d7663", "score": "0.47203737", "text": "def load_lexicon(babel_code):\n\n lang_path = Path(f\"/export/babel/data/{babel_code}-{babel2name[babel_code]}/release-current/conversational/reference_materials/\")\n if babel_code == \"202\":\n lang_path = Path(f\"/export/babel/data/{babel_code}-{babel2name[babel_code]}/IARPA-babel{babel_code}b-v1.0d-build/BABEL_OP2_202/conversational/reference_materials/\")\n elif babel_code in [\"205\", \"302\", \"303\"]:\n lang_path = Path(f\"/export/babel/data/{babel_code}-{babel2name[babel_code]}/IARPA-babel{babel_code}b-v1.0a-build/BABEL_OP2_{babel_code}/conversational/reference_materials/\")\n elif babel_code in [\"304\"]:\n lang_path = Path(f\"/export/babel/data/{babel_code}-{babel2name[babel_code]}/IARPA-babel{babel_code}b-v1.0b-build/BABEL_OP2_{babel_code}/conversational/reference_materials/\")\n elif babel_code in [\"404\"]:\n lang_path = Path(f\"/export/corpora/LDC/LDC2016S12/IARPA_BABEL_OP3_{babel_code}/conversational/reference_materials/\")\n lexicon_subtrain_path = lang_path / \"lexicon.sub-train.txt\"\n lexicon_path = lang_path / \"lexicon.txt\"\n\n types = set()\n with open(lexicon_path) as f:\n for line in f:\n types.add(line.split(\"\\t\")[0])\n #subtrain_types = set()\n #with open(lexicon_subtrain_path) as f:\n # for line in f:\n # subtrain_types.add(line.split(\"\\t\")[0])\n\n # Let's assess coverage of the train/dev data with these lexicons.\n dev_toks = load_dev_toks(babel_code)\n print(dev_toks[:100])\n print(len(dev_toks))\n dev_types = set(dev_toks)\n\n print(len(dev_types.intersection(types))/len(dev_types))\n return types", "title": "" }, { "docid": "207f8738483d20c9e48cf1cb4e14c4b2", "score": "0.4718527", "text": "def decode(args,model):\n\n print(\"load test source sentences from [{}]\".format(args.test_data_dir))\n test_data_src,test_data_tgt = read_corpus(args.test_data_dir)\n\n print(\"load model from {}\".format(args.model_path))\n model.load_state_dict(torch.load(args.model_path))\n model.to(args.device)\n\n hypotheses = beam_search(model, test_data_src,\n beam_size=int(args.beam_size),\n max_decoding_time_step=args.max_decoding_time_step)\n\n top_hypotheses = [hyps[0] for hyps in hypotheses]\n bleu_score = compute_corpus_level_bleu_score(test_data_tgt, top_hypotheses)\n print('Corpus BLEU: {}'.format(bleu_score * 100))\n\n with open(args.args.output_file, 'w') as f:\n for src_sent, hyps in zip(test_data_src, hypotheses):\n top_hyp = hyps[0]\n hyp_sent = ' '.join(top_hyp.value)\n f.write(hyp_sent + '\\n')", "title": "" }, { "docid": "b006ba6a8f3e12efd374edf9239361ba", "score": "0.47166666", "text": "def manage_export_po(self, language, REQUEST, RESPONSE):\n export_tool = self.get_importexport_tool()\n if language == 'locale.pot':\n filename = language\n else:\n filename = '%s.po' % language\n RESPONSE.setHeader('Content-type','application/data')\n RESPONSE.setHeader('Content-Disposition',\n 'inline;filename=%s' % filename)\n return export_tool.export_po(language)", "title": "" }, { "docid": "bf5a593f73794c65f03a14daa8dc2806", "score": "0.47135404", "text": "def download_data():\n url = 'https://www.dropbox.com/s/xk4glpk61q3qrg2/imdb.tgz?dl=1'\n urllib.request.urlretrieve(url, 'imdb.tgz')\n tar = tarfile.open(\"imdb.tgz\")\n tar.extractall()\n tar.close()", "title": "" }, { "docid": "f26ffb4da10e781e848899af5c9ab30d", "score": "0.47123384", "text": "def extract_text_php(file):\n file = file + '.zip'\n with zipp.ZipFile(file) as zfile:\n docs = list(map(lambda name: zfile.read(name).decode(\"latin1\"), zfile.namelist()[:-1]))\n \n\n asci_docs = []\n for doc in docs:\n s = \"\"\n #s = doc.encode('ascii', errors='ignore').decode()\n s = re.sub(r'[^\\x41-\\x7f]',r' ',doc)\n asci_docs.append(s)\n\n return asci_docs", "title": "" }, { "docid": "8de7ee1811a90078393b9413a6ba3b5b", "score": "0.47058287", "text": "def download(self):\n self.__fileDownloaded = self.downloadFile()\n if self.__fileExt not in self.BINARY_EXT:\n tmpExtractedPath = self.extractFile(self.__fileDownloaded)\n self.__extractedPath = os.path.join(tmpExtractedPath, os.listdir(tmpExtractedPath)[0])", "title": "" }, { "docid": "2bddc5dc889af2a1616b949c76a6e2aa", "score": "0.46981782", "text": "def test_decode_modified_utf8():\n sample_path = os.path.join(\n os.path.dirname(__file__),\n 'data',\n 'ModifiedUTF8.class'\n )\n\n with open(sample_path, 'rb') as fin:\n # We simply need to make sure no encoding exceptions are raised\n # when we parse the ClassFile.\n ClassFile(fin)", "title": "" }, { "docid": "e5f9c5f88890ffbe3b0e6b724ff8719f", "score": "0.46964416", "text": "def loadCachedResponse(baseCurrency):\n cacheFile = Utility.createFileName(baseCurrency)\n with open(cacheFile, 'rb') as file:\n return pickle.load(file)", "title": "" }, { "docid": "db5d552ca439af79bfe2b139617722af", "score": "0.4695692", "text": "def load(self, data):", "title": "" }, { "docid": "70f13235f1226a6b6e8f311a4873f9ad", "score": "0.46950617", "text": "def _load_from_binary_file(self):\r\n binary_file = open(self._file_name, 'rb') # read text\r\n try:\r\n self.chronological_list = pickle.load(binary_file)\r\n binary_file.close()\r\n except EOFError:\r\n return\r\n except Exception as exception_message:\r\n raise ActivityBinaryFileRepositoryException(str(exception_message))", "title": "" }, { "docid": "80df2424d35c31922b47202e0f544ba8", "score": "0.46939635", "text": "def text_download(request, title, file_format, juan=0):\n import pinyin\n\n pinyin_title = pinyin.get(title)\n\n qs = QuerySet(using=ExistDB(), xpath='/tei:TEI',\n collection='docker/texts/', model=RocheTEI)\n\n qs = qs.filter(title=title)\n if juan:\n qs = qs.filter(chapter=juan)\n\n\n result = \"\"\n for q in qs:\n for d in q.body.div:\n result += d.text.replace(\" \", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \"\").replace(u\"。\", u\"。\\n\\n\")\n\n if file_format == 'txt':\n response = HttpResponse(content_type='text/plain')\n response['Content-Disposition'] = 'attachment; filename=\"{}.txt\"'.format(pinyin_title)\n response.write(result)\n else:\n from fpdf import FPDF\n\n pdf = FPDF(unit='mm', format='A4')\n pdf.add_page()\n pdf.add_font('Droid', '', 'DroidSansFallbackFull.ttf', uni=True)\n pdf.set_font('Droid', '', 12)\n pdf.write(5, unicode(result))\n response = HttpResponse(pdf.output(dest='S'), content_type='application/pdf') \n response['Content-Disposition'] = 'attachment; filename=\"{}.pdf\"'.format(pinyin_title)\n\n return response", "title": "" }, { "docid": "2c8557954fa2bdaaced693943b99aca4", "score": "0.4693787", "text": "def download_model(self, url):\n self.driver.get(url)\n\n title = self.driver.find_element_by_class_name('title-bar').find_element_by_tag_name('h1').text\n logger.info('title: %s' % title)\n\n # get tags\n elem_tags = self.driver.find_elements_by_class_name('indiv-tag')\n tags = [t.text for t in elem_tags]\n logger.info('tags: %s' % tags)\n\n # get download button\n dl_btn = self.driver.find_element_by_id(\"download-prod\")\n\n # description\n desc = self.driver.find_element_by_class_name('desc').text\n logger.info('desc: %s' % desc)\n\n views = self.driver.find_element_by_class_name('p-stat-left').find_element_by_class_name('num').text\n logger.info('views: %s' % views)\n\n downloads = self.driver.find_element_by_class_name('p-stat-right').find_element_by_class_name('num').text\n logger.info('downloads: %s' % downloads)\n\n img_elems = self.driver.find_elements_by_class_name('rsImg')\n\n img_links = [i.get_attribute('src') for i in img_elems]\n logger.info('images: %s' % img_links)\n\n # get download folder before starting download!\n paths, dirs, files = os.walk(self.dldir).__next__()\n file_set = set(files)\n\n dl_btn.click()\n _rand_sleep(4, 14)\n dl_link = self.driver.find_element_by_class_name('file')\n dl_link.click()\n\n zip_size_kb, zip_name, files = self.save_download(file_set)\n\n views = int(views.replace(',', ''))\n downloads = int(downloads.replace(',', ''))\n\n model = {\n 'uri': url,\n 'name': title,\n 'description': desc,\n 'tags': tags,\n 'views': views,\n 'downloads': downloads,\n 'filename': zip_name,\n 'archive': {\n 'files': files,\n 'name': zip_name,\n 'bytes': zip_size_kb\n },\n 'images': img_links,\n 'source': 'free3d'\n }\n\n logger.info('model: %s' % model)\n\n r = requests.post(asset_uri('/api/v2/crawl/asset/'), json=model)\n logger.info('status code: %s' % r.status_code)\n return model\n # if r.status_code != requests.codes.ok or r.status_code != requests.codes.created:\n # raise Exception('Failed to add asset!')", "title": "" } ]
1891419710c609da67c3b8d25c49a07a
Look into portal_catalog and return first story found
[ { "docid": "88b2634f12ffb71e518cf9db737fe470", "score": "0.6264447", "text": "def get_story(project_id, story_id):", "title": "" } ]
[ { "docid": "6dead8b4a9a802a90f7dd6f84bdf331f", "score": "0.59180546", "text": "def get_object(self, catalog, portal_type, title=None, **kwargs):\n if not title and not kwargs:\n return None\n contentFilter = {\"portal_type\": portal_type}\n if title:\n contentFilter['title'] = to_unicode(title)\n contentFilter.update(kwargs)\n brains = catalog(contentFilter)\n if len(brains) > 1:\n logger.info(\"More than one object found for %s\" % contentFilter)\n return None\n elif len(brains) == 0:\n logger.info(\"No objects found for %s\" % contentFilter)\n return None\n else:\n return brains[0].getObject()", "title": "" }, { "docid": "7fbf1a99e49909fd7097f50ab35d4032", "score": "0.55094033", "text": "def get_story(story_id):\n Logger(__name__).info('Looking for story {}.'.format(story_id))\n story = Story._get_one_by_id(story_id)\n if story is None:\n raise StoryNotFoundException\n return Story._serialize_story(story)", "title": "" }, { "docid": "08c6138169ea4b970e5659ef7b870969", "score": "0.5496543", "text": "def extract_catalog(dictio):\n if 'url' in dictio:\n result = [ e.split('.xml')[0]+'.xml' for e in dictio['url'] if e[-7:] == 'Catalog']\n if result==[]:\n result = [ e.split('.xml')[0]+'.xml' for e in dictio['url'] if e[-7:] == 'THREDDS']\n if result and len(result) == 1:\n return result[0]\n else:\n raise Exception(\"Can't find unique catalog\")", "title": "" }, { "docid": "935a9fdc1aab996a8b5de4fea7f70b0a", "score": "0.5345682", "text": "def scrape_story():\n d = max_id()\n if d:\n lid = d\n mid = db['items'].find_one(sort=[(\"id\", -1)])\n if mid:\n while int(lid) > int(mid['id']):\n get_item.delay(lid)\n lid -= 1\n else:\n get_item.delay(lid)\n return", "title": "" }, { "docid": "d06d01e20dedfc4aaa09cfb00897a196", "score": "0.5293464", "text": "def find_story_from_story_page(story_page: object):\n print(f\"Finding story from story page\")\n\n story = {}\n\n content_box = story_page.body \\\n .find(\"div\", {\"id\": \"wrapper\"}) \\\n .find(\"div\", {\"id\": \"main\"}) \\\n .find(\"div\", {\"id\": \"content\"}) \\\n .find(\"div\", {\"class\": \"content_box\"}) \\\n\n story_type = content_box.find(\"h3\").contents[0]\n\n author = content_box \\\n .find(\"div\", {\"class\": \"about_author\"}) \\\n .find(\"h2\") \\\n .find(\"span\") \\\n .contents[0]\n\n post_div = content_box \\\n .find(\"div\", {\"id\": re.compile(\"^post-\")})\n\n title = post_div \\\n .find(\"h1\", {\"class\": \"posttitle\"}) \\\n .contents[0]\n\n issue_paragraph = post_div \\\n .find(\"p\", {\"class\": re.compile(\"postmetadata date\")})\n\n issue = issue_paragraph.find(\"a\").contents[0]\n issue_url = issue_paragraph.find(\"a\")[\"href\"]\n word_count = re.search(r'\\b\\d+\\b', issue_paragraph.contents[2]).group(0)\n\n story_div = post_div \\\n .find(\"div\", {\"class\": \"entry\"})\n\n story_div_elements = list(story_div.children)\n\n content = \"\"\n for element in story_div_elements:\n # ignore paragraphs containing images or links\n if element.find(\"img\") or element.find(\"a\"):\n continue\n if element.name in [\"p\"]:\n content += f\"{element.get_text()}\\n\\n\"\n if element.name in [\"ol\", \"ul\"]:\n text = element.get_text('\\n')\n content += f\"{text}\"\n if element.name == \"div\" and element[\"class\"] == \"divider\":\n content += f\". . . .\\n\\n\"\n\n story.update(\n { \"author\": str(author)\n , \"title\" : str(title)\n , \"issue\": str(issue)\n , \"issue_url\": str(issue_url)\n , \"word_count\": str(word_count)\n , \"type\": str(story_type)\n , \"content\": content\n })\n\n return story", "title": "" }, { "docid": "e4240c5f15bb45619eca13cdc579794d", "score": "0.5250441", "text": "def pick_story():\n return render_template(\"pickAStory.html\", stories = stories.values())", "title": "" }, { "docid": "5450776e3fa1239abdf828481109fb7d", "score": "0.52082795", "text": "def GetFirstSubstance(doc: BaseDocument) -> None:\n ...", "title": "" }, { "docid": "87ae16c9d713efd690ef4d10737ea946", "score": "0.5207635", "text": "def lookup(cls, page_id):\n\n # parse out the parts of the id\n m = re.match(r'/lccn/(.+)/(.+)/ed-(\\d+)/seq-(\\d+)/?', page_id)\n if not m:\n return None\n lccn, date, edition, sequence = m.groups()\n\n # unfortunately there can be more than one\n # default to the latest one\n q = Page.objects.filter(issue__title__lccn=lccn,\n issue__date_issued=date,\n issue__edition=edition,\n sequence=sequence)\n pages = q.order_by('-issue__date_issued').all()\n if len(pages) == 0:\n return None\n return pages[0]", "title": "" }, { "docid": "ef69512a24e52f8702c02a28d252ad66", "score": "0.5161017", "text": "def find_subscene(self, name):\n for ss in self.subscenes:\n if ss.name == name:\n return ss\n return None", "title": "" }, { "docid": "50f5e86e5e23bc797b7010315ebccf5f", "score": "0.51230115", "text": "def first(self, **kwargs):\n result = result = self._get(limit=1, **kwargs)\n if result:\n return result[0]\n else:\n raise self.source_class.DoesNotExist(repr(kwargs))", "title": "" }, { "docid": "90beb4edeee8e877cfd7a404365ff1e1", "score": "0.50778246", "text": "def find_one_if(predicate, coll):\n results = filter(predicate, coll)\n if len(results) != 1:\n return None\n else:\n print results\n return results[0]", "title": "" }, { "docid": "c9a4702faf6d23bfd032afdae5bb31c3", "score": "0.505469", "text": "def getOneNews():\n LOGGER.debug(\"getOneNews is called\")\n return operations.getOneNews()", "title": "" }, { "docid": "dc9e98b57170875af4ebb025316906c6", "score": "0.5040889", "text": "def get_portal(pk=1):\n # CACHE\n cache_key = \"%s-portal-%s\" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, pk)\n portal = cache.get(cache_key)\n if portal:\n return portal\n\n # At the moment the default portal should always exist.\n try:\n portal = lfc.models.Portal.objects.get(pk=pk)\n except lfc.models.Portal.DoesNotExist:\n portal = lfc.models.Portal.objects.filter()[0]\n\n cache.set(cache_key, portal)\n return portal", "title": "" }, { "docid": "b2486caf2f0f97e93bb9c57fc666ada0", "score": "0.5025098", "text": "def get_first_wikipedia_para(title):\n link = \"https://en.wikipedia.org/w/api.php?format=json&action=query&prop=extracts&exintro&explaintext&redirects=1&titles={}\" \n link = link.format(title)\n print(title)\n data = requests.get(link).json()\n print(link)\n pages = data['query']['pages']\n page_names = list(pages.keys())\n para = pages[page_names[0]]['extract']\n return para", "title": "" }, { "docid": "205b2a3dab96d2f770fd9cff3b7f29fc", "score": "0.5022011", "text": "def recall_story(self, title, author=None, is_module=False):\n cached = self.find_cached(title, author, is_module)\n if cached:\n return cached\n\n table = \"modules\" if is_module else \"stories\"\n cur = self.connection.cursor()\n if author is None:\n cur.execute(\n \"SELECT package FROM {} WHERE title = ?;\".format(table),\n ( title.title(), )\n )\n else:\n cur.execute(\n \"SELECT package FROM {} WHERE title = ? AND author = ?;\".format(table),\n ( title.title(), author.title() )\n )\n rows = cur.fetchall()\n if len(rows) < 1:\n return None\n\n if len(rows) > 1: # Don't cache anything in this case\n result = [ unpack(json.loads(r[\"package\"]), Story) for r in rows ]\n result.set_module_finder(self.find_module)\n else:\n result = unpack(json.loads(rows[0][\"package\"]), Story)\n result.set_module_finder(self.find_module)\n\n if is_module:\n self.cache_module(result)\n else:\n self.cache_story(result)\n\n return result", "title": "" }, { "docid": "f5a5636659e4edcd6f00a6bb18d60c21", "score": "0.49941188", "text": "def first(self):\n first = Article.objects.first()\n return first", "title": "" }, { "docid": "9c5fa847a06337ca6877a5c1e33ce1fc", "score": "0.49638855", "text": "def lookupNewsSource(conn, nsid):\n curs = conn.cursor(MySQLdb.cursors.DictCursor)\n curs.execute('''select * from newsSource where nsid = %s''', [nsid])\n return curs.fetchone()", "title": "" }, { "docid": "6f4c282bdfd565c45d7420e63add18d6", "score": "0.49626344", "text": "def object_detail(request, slug):\n # First see if a project exists for this slug\n project_response = project_detail(request, slug)\n if project_response:\n return project_response\n else:\n return document_detail(request, slug)", "title": "" }, { "docid": "c96e2c9176926470d4ee97f070fd9230", "score": "0.4940228", "text": "def category(request,slug):\n category = get_object_or_404(Category,slug=slug)\n story_list = Story.objects.filter(category=category)\n heading = \"Category: %s\" % category.label\n return render_to_response(\"cms/story_list.html\",locals())", "title": "" }, { "docid": "ecacd60ab4a93cadd2023df15f3a2796", "score": "0.4938153", "text": "def find_subscene(self, anchor_name):\n subscenes = [ss_name for ss_name, val in self.subscenes.iteritems() if val[\"anchor\"].name == anchor_name]\n \n subscene = subscenes[0] if subscenes else None\n return subscene", "title": "" }, { "docid": "43989501a13928a5bd097ea1c55ff355", "score": "0.4920585", "text": "def first(location):\n\t\treturn getitem(location, 0) if isinstance(location, list) else None", "title": "" }, { "docid": "81e81d84ab80335a94de5a2dada89f0b", "score": "0.4910496", "text": "def findOrCreate(self, target):\n worldServer = (target.getWorld()).getHandle()\n found = self.findPortal(target)\n if found == None:\n if self.getCanCreatePortal() and self.createPortal(target):\n found = self.findPortal(target)\n else:\n found = target\n # fallback to original if unable to find or create\n return found", "title": "" }, { "docid": "aca48c49b5ee76bc6b271c0c4c7184ad", "score": "0.4901526", "text": "def article_sport_one(url_page, title, desc, web, cat):\n context = ''\n source = requests.get(url_page).text\n soup = BeautifulSoup(source, features=\"html.parser\")\n text_p = soup.findAll('p')\n for line in list(text_p):\n context += \"\".join(line.text)\n context += \"\".join(\"\\n\")\n\n try:\n post = soup.find('div', class_=\"article-credit\").find('span').text\n split_post = post.split(\" \")\n date = split_post[len(split_post) - 2]\n time = split_post[len(split_post) - 1]\n insert_new_article(web, cat, title, desc, url_page, context, date, time)\n except:\n now = datetime.datetime.now()\n date = str(now.date().strftime(\"%d/%m/%Y\"))\n time = str(now.time())[:5]\n insert_new_article(web, cat, title, desc, url_page, context, date, time)", "title": "" }, { "docid": "8f251d932c0b7311df95b9a050b5ec07", "score": "0.490151", "text": "def first_or_404(self):\n rv = self.first()\n if rv is None:\n abort(404)\n return rv", "title": "" }, { "docid": "660f310e04c4da612523289e78019cf6", "score": "0.49014023", "text": "def search_first(self, external_id: str, look_for: str) -> Item:\n items_found = self.search(external_id, look_for)\n if len(items_found) < 1:\n return None\n else:\n return items_found[0]", "title": "" }, { "docid": "f5f20cccff36005a86bb32665ee5b611", "score": "0.49013013", "text": "def _find_specific_document(docker_documents, docker_resource_id):\n for document in docker_documents:\n if document[global_constants.ID] == docker_resource_id:\n return document\n return {}", "title": "" }, { "docid": "8c36e1e5a9a70e86ae93d55ea1108824", "score": "0.4898499", "text": "def get_first_log(log_items, author_id):\n sorted_log_items = sorted(log_items, reverse=False, key=lambda item: item[CREATED_INDEX])\n\n for log_item in sorted_log_items:\n if log_item[AUTHOR_INDEX] == author_id:\n return log_item\n\n return None", "title": "" }, { "docid": "2d1084cead73a9e3b967ee11fdb0911d", "score": "0.4870358", "text": "def _get_mainCategory(self):\n\n if self.category.count() > 0:\n return self.category.all()[0]\n else:\n return None", "title": "" }, { "docid": "62ae83de519318dc6a803c3d79d721ff", "score": "0.48238096", "text": "def lookup_scout_by_name(name):\n try:\n return scouts_by_name[name]\n except:\n # Any exception just passthrough\n pass\n return None", "title": "" }, { "docid": "17b247d23942eccd74a5b09e3e24f917", "score": "0.48184395", "text": "def _fetch_element(news_site_uid, host, link, element):\r\n if element == 'article':\r\n logger.info('Start fetching article at {}'.format(link))\r\n elif element == 'category':\r\n logger.info('Start fetching category at {}'.format(link))\r\n\r\n article = None\r\n category_homepage = None\r\n\r\n try:\r\n \tif element == 'article': \r\n \t article = news.ArticlePage(_build_link(host, link), news_site_uid)\r\n \telif element == 'category':\r\n \t category_homepage = news.CategoryHomePage(_build_link(host, link), news_site_uid)\r\n \r\n except (HTTPError, MaxRetryError) as e:\r\n logging.warning('Error while fetching the article or category', exc_info=False)\r\n\r\n if article and not article.body:\r\n logging.warning('Invalid article, there is no body')\r\n return None\r\n\r\n if element == 'article':\r\n return article \r\n elif element == 'category':\r\n return category_homepage", "title": "" }, { "docid": "df733c10b5138e8a32904bed320a5bc4", "score": "0.48177895", "text": "def fetch_page(self, page_title):\n sanitized_page_title = helpers.get_sanitized_page_title(page_title)\n\n query = 'SELECT * FROM pages WHERE title = ? COLLATE NOCASE;'\n query_bindings = (sanitized_page_title,)\n self.sdow_cursor.execute(query, query_bindings)\n\n # Because the above query is case-insensitive (due to the COLLATE NOCASE), multiple articles\n # can be matched.\n results = self.sdow_cursor.fetchall()\n\n if not results:\n raise ValueError(\n 'Invalid page title {0} provided. Page title does not exist.'.format(page_title))\n\n # First, look for a non-redirect page which has exact match with the page title.\n for current_page_id, current_page_title, current_page_is_redirect in results:\n if current_page_title == sanitized_page_title and not current_page_is_redirect:\n return (current_page_id, helpers.get_readable_page_title(current_page_title), False)\n\n # Next, look for a match with a non-redirect page.\n for current_page_id, current_page_title, current_page_is_redirect in results:\n if not current_page_is_redirect:\n return (current_page_id, helpers.get_readable_page_title(current_page_title), False)\n\n # If all the results are redirects, use the page to which the first result redirects.\n query = 'SELECT target_id, title FROM redirects INNER JOIN pages ON pages.id = target_id WHERE source_id = ?;'\n query_bindings = (results[0][0],)\n self.sdow_cursor.execute(query, query_bindings)\n\n result = self.sdow_cursor.fetchone()\n\n # TODO: This will no longer be required once the April 2018 database dump occurs since this\n # scenario is prevented by the prune_pages_file.py Python script during the database creation.\n if not result:\n raise ValueError(\n 'Invalid page title {0} provided. Page title does not exist.'.format(page_title))\n\n return (result[0], helpers.get_readable_page_title(result[1]), True)", "title": "" }, { "docid": "6cacec10ee218a6187a1e4806f5b673e", "score": "0.48134065", "text": "def get_first_comic_link(cls):\n return get_soup_at_url(cls.url).find(\"div\", id=\"st\").parent", "title": "" }, { "docid": "68033169d1fe1d17a50297c4eaa18f3e", "score": "0.48121727", "text": "def getStageTitle(self, name):\n i = 0\n for r in self.liststore:\n if (r[2] == name):\n return self.liststore[i][0]\n i += 1\n return None", "title": "" }, { "docid": "f782df7c70d383f4912f692af77c63b5", "score": "0.4804012", "text": "def get_by_id(self, id):\n with self.lock:\n for slide in self.slides:\n if slide.id() == id:\n return slide\n return None", "title": "" }, { "docid": "4135cd511c20213161fe29061be067aa", "score": "0.47806898", "text": "def _lookup(self, title, *remainder):\n # The TG request extension machinery will strip off the end of\n # a dotted wiki page name if it matches a known file extension. Here,\n # we reassemble the original page name.\n title, remainder = get_page_title_from_request()\n title = unquote(really_unicode(title))\n page_model = c.app.artifacts['page']['model']\n controller = c.app.artifacts['page']['controller']\n page = page_model.query.get(\n app_config_id=c.app.config._id, title=title)\n return controller(page, title), remainder", "title": "" }, { "docid": "193b54224454487d9f76056c8edf9353", "score": "0.47726232", "text": "def get_first_real_section(self) -> Section:\n for section in self:\n if section.real:\n return section", "title": "" }, { "docid": "ee8bfe5ab95001ce2678744eb36a087b", "score": "0.47717077", "text": "def get_first_comic_link(cls):\n return get_soup_at_url(cls.url).find('a', title=\"First\")", "title": "" }, { "docid": "733e090a6244674afca2bd5f4a4f37a9", "score": "0.47709438", "text": "def get_topic_by_title(self, title):\n\n all_topics = models.storage.all(Topic)\n for value in all_topics.values():\n if (value.title == title):\n return value\n return None", "title": "" }, { "docid": "3b293af8ef1dc5b7b749566a2086bc53", "score": "0.47615185", "text": "def search_item(name, collection):\n name = name.lower()\n items = [i for i in collection if i.name == name]\n if not items:\n # try the aliases or titles\n items = [i for i in collection if name in i.aliases or i.title.lower() == name]\n return items[0] if items else None", "title": "" }, { "docid": "c02b17a69f99c4d5c5e27a038444805f", "score": "0.47519848", "text": "def getFirstPage(self):\n for pn, pnPages in sorted(self.pages.items()):\n for index, page in enumerate(pnPages):\n return page\n return None", "title": "" }, { "docid": "ef94054d75e3a922819c61ed98bde5fe", "score": "0.47329152", "text": "def first(self):\r\n if len(self.__result_list) > 0:\r\n return self.__result_list[0]\r\n return None", "title": "" }, { "docid": "46eb5e1c588e3719c869961424093e65", "score": "0.47322023", "text": "def first(self, session=None):\n if self.first_url:\n matches = self._matchmaker(self.first_url, session)\n return matches\n else:\n raise VGPaginationError('This is the first page')", "title": "" }, { "docid": "8ad9e963612d3c5b5748ea9a59d36dc0", "score": "0.4731192", "text": "def project_detail(request, slug):\n cache_key = 'project_detail:%s' % slug\n cached_response = get_cached_response(request, cache_key)\n if cached_response:\n return cached_response\n else:\n # Pull the object\n obj = Project.get_by_key_name(slug)\n if not obj:\n # Return None so it can move along and look for matching document\n return None\n context = {\n 'object': obj,\n 'document_list': obj.document_set.filter(\"is_published =\", True).order(\"-publication_date\").order(\"order_in_project\"),\n }\n response = direct_to_template(request, 'project_detail.html', context)\n memcache.add(cache_key, response, 60)\n return response", "title": "" }, { "docid": "99332177c09ebc65414f14a1e7438833", "score": "0.4709649", "text": "def _find_title(self):\n if self.page:\n title = self.page.find(\"a\", title=self.book_title)\n if title:\n return self._find_price_and_stock(title)\n else:\n return self._crawl()\n return None", "title": "" }, { "docid": "a1b2902983d1c1b7e928b0eb75135ce9", "score": "0.47078508", "text": "def test_detail_page_exists(testapp):\n response = testapp.get(\"/journal/1\", status=200)\n html = response.html\n assert \"The what and the how of the day.\" in html.find('header').text", "title": "" }, { "docid": "8596ed1ea428b068619d5bdbfcd6ce5d", "score": "0.4703334", "text": "def _find_blog_post(title_part):\n try:\n return BlogPost.objects.filter(title__icontains=title_part)[0]\n except IndexError: # nothing matched\n return None", "title": "" }, { "docid": "b4a79238c3636b3cbfd3acb302d5537d", "score": "0.47003013", "text": "def select_template(templates):\n for tpl in templates:\n if os.path.exists(tpl):\n return tpl\n return None", "title": "" }, { "docid": "b25fec18d426ee72ab7ca4477bad0736", "score": "0.46912655", "text": "async def first(self, session=None):\n if self.first_url:\n matches = await self._matchmaker(self.first_url, session)\n return matches\n else:\n raise VGPaginationError('This is the first page')", "title": "" }, { "docid": "6acafeaf2737bef3938ec1982fe005e3", "score": "0.46899286", "text": "def main_title(content, soup, web, cat):\n try:\n first_url = url + content.find('a')['href'][1:]\n first_title = soup.find('div', id=\"LeagueTopArticle\")\n title = first_title.find('h2', class_=\"marpad0\").text.replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(\" \", \"\")\n if new_title(title):\n desc = first_title.find('h3', class_=\"marpad0 f12\").text.replace(\"\\n\", \"\").replace(\"\\r\", \"\").replace(\" \", \"\")\n print(\"Found new article in \" + web + \" category : \" + cat)\n print(\"Main : \" + title)\n article_sport_one(first_url, title, desc, web, cat)\n return 1\n else:\n return 0\n except:\n print(\"Error in first article\")\n return 0", "title": "" }, { "docid": "5ce6ea577ebce65f0780a497ef208340", "score": "0.4688169", "text": "def get_stories(story_type):\n return requests.get(\n \"https://hacker-news.firebaseio.com/v0/{0}stories.json\".format(story_type)\n ).json()[0:30]", "title": "" }, { "docid": "7b1f6e875b0e50bac893cd45bc6fc0c8", "score": "0.46841758", "text": "def get(self, id):\n for cat in CATS:\n if cat[\"id\"] == id:\n return cat\n api.abort(404)", "title": "" }, { "docid": "b7c02eecaaf4f35325d81746a4dbe022", "score": "0.46678528", "text": "def test_catalogs_generic(self):\n response = self.client.get(r('blog', category=Blogs.CATALOG, page='1'))\n self.assertEqual(200, response.status_code)", "title": "" }, { "docid": "3e80c275323beb667d476721485ad7c4", "score": "0.4666564", "text": "def get_category(url):\n categories = CACHE.get('categories')\n if categories is None:\n data = get_categories()\n category = next(filter(lambda x: x.url == url, data))\n return category\n category = next(filter(lambda x: x.url == url, categories))\n return category", "title": "" }, { "docid": "628f2b8308f973f6254a4e7e34c950b3", "score": "0.4666083", "text": "def first_or_none(self, **kwargs):\n try:\n return self.first(**kwargs)\n except self.source_class.DoesNotExist:\n pass", "title": "" }, { "docid": "295509a5791e47cef5b47bbb839cb01e", "score": "0.46609786", "text": "def get_section(self, identifier: str) -> Optional[Section]:\n\n if not self.sections:\n return None\n for s in self.sections:\n if s.identifier == identifier:\n return s\n raise SectionNotFoundError", "title": "" }, { "docid": "a93bf8e00419e5b9c06fbd13c8914c40", "score": "0.4658106", "text": "def find_object(self, scene, obj_uuid):\n for obj in scene.objects:\n if obj.get('uuid') == obj_uuid:\n return obj", "title": "" }, { "docid": "28873fcff8f88fa33c854fffa99c831d", "score": "0.46575695", "text": "def firstProduct():\n result = products.find_one()\n return result[\"name\"]", "title": "" }, { "docid": "1e81f36107620032574b15c95478b890", "score": "0.46530527", "text": "def test_learn_detail_view(self):\n url = reverse('learn-detail',\n kwargs={'slug': self.published_comm.slug})\n response = self.client.get(url)\n self.assertContains(response, 'published-comm', status_code=200)\n\n url = reverse('learn-detail',\n kwargs={'slug': self.unpublished_comm.slug})\n response_404 = self.client.get(url)\n self.assertEqual(response_404.status_code, 404)", "title": "" }, { "docid": "791e6d56118029580e1c75ec641bfa4f", "score": "0.46478397", "text": "def get_object(self):\n lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field\n queryset = self.get_queryset().filter(\n \"match_phrase\", **{\"_id\": self.kwargs[lookup_url_kwarg]}\n )\n hits = queryset.execute().hits\n count = len(hits)\n if count != 1:\n message = (\n \"No object matches the given query.\"\n if count == 0\n else \"Multiple results matches the given query. Expected a single result.\"\n )\n raise Http404(message)\n else:\n hits[0].offset = self.get_offset(hits[0])\n return hits[0]", "title": "" }, { "docid": "abf97edd90daf6368a30689690e1e2ed", "score": "0.4640465", "text": "def next_dictionary_gloss(self, staff=False):\n if self.sn == None:\n return None\n elif staff:\n set = Gloss.objects.filter(sn__gt=self.sn).order_by('sn')\n else:\n set = Gloss.objects.filter(sn__gt=self.sn, inWeb__exact=True).order_by('sn')\n if set:\n return set[0]\n else:\n return None", "title": "" }, { "docid": "abf97edd90daf6368a30689690e1e2ed", "score": "0.4640465", "text": "def next_dictionary_gloss(self, staff=False):\n if self.sn == None:\n return None\n elif staff:\n set = Gloss.objects.filter(sn__gt=self.sn).order_by('sn')\n else:\n set = Gloss.objects.filter(sn__gt=self.sn, inWeb__exact=True).order_by('sn')\n if set:\n return set[0]\n else:\n return None", "title": "" }, { "docid": "e38d34d7a8cae85ccacdaf945493330b", "score": "0.46382415", "text": "def test_ensure_fiction_category_is_displayed_for_short_story(self):\n\n add_cat('Short Story','short-story')\n\n\n user_prof = create_user_prof(username='Short')\n\n add_art(title='Short Story',author = user_prof,category='Short Story')\n\n response = self.client.get(reverse('show_category',kwargs={'category_name_slug' : 'short-story'}))\n self.assertEqual(response.status_code,200)\n self.assertContains(response,'Short Story')", "title": "" }, { "docid": "c222eba34a96b2a86c47799d62904df8", "score": "0.4625425", "text": "def get_source(institution_id, source_id):\n for _, s in yield_sources(institution_id):\n if s.canonical_name == source_id:\n return s", "title": "" }, { "docid": "29e7b44693871db4eb5a0cb4156e5564", "score": "0.4623392", "text": "def test_first(self):\n with vcr.use_cassette(\"species.yaml\"):\n self.api.species()\n with vcr.use_cassette(\"first.yaml\"):\n self.assertIsNotNone(self.api.first())", "title": "" }, { "docid": "29e7b44693871db4eb5a0cb4156e5564", "score": "0.4623392", "text": "def test_first(self):\n with vcr.use_cassette(\"species.yaml\"):\n self.api.species()\n with vcr.use_cassette(\"first.yaml\"):\n self.assertIsNotNone(self.api.first())", "title": "" }, { "docid": "f8a71963396c20e7d5f73b3ad1ced8cb", "score": "0.46224007", "text": "def entry_by_title(self, entry_title: str) -> Optional[Entry]:\n entries = [entry for entry in self.entries if entry.title == entry_title]\n if entries == []:\n Logging.print('No such entry found.')\n return\n return entries[0]", "title": "" }, { "docid": "280bd20bb92fbb5172b0725ef9d688ae", "score": "0.46167815", "text": "def first_existing(paths):\n for p in paths:\n if os.path.exists(expand(p)):\n return p", "title": "" }, { "docid": "bbded08a4e495af8983e424878fea5ed", "score": "0.46106037", "text": "def _pull_team_page(self, url):\n try:\n return pq(url)\n except HTTPError:\n return None", "title": "" }, { "docid": "6847c2ca0d5fdf9d603a09f51b9d2d48", "score": "0.4609604", "text": "def get_campaign_or_404(request, key):\n campaign = get_object_or_404(Campaign, key=key)\n return campaign", "title": "" }, { "docid": "8f60bfe62edb20f88ddbb93316a96e9e", "score": "0.4606986", "text": "def getDocByName(docName,doc_collection):\n for doc in doc_collection:\n if doc.name == docName:\n return doc", "title": "" }, { "docid": "8bcbd2c3dc4c563b597286fe1d7bdc77", "score": "0.46021008", "text": "def get_first_movie(self) -> Movie:\n raise NotImplementedError", "title": "" }, { "docid": "8bcbd2c3dc4c563b597286fe1d7bdc77", "score": "0.46021008", "text": "def get_first_movie(self) -> Movie:\n raise NotImplementedError", "title": "" }, { "docid": "2f5da204434b540c1ab1a99d5fba264b", "score": "0.45949915", "text": "def get_item(self, layer_properties):\n if 'title' not in layer_properties:\n raise Exception('layer_properties must define title')\n\n if 'type' in layer_properties:\n search_results = self.gis.content.search(query=\"title:%s\" % layer_properties['title'],\n item_type=layer_properties['type'])\n else:\n search_results = self.gis.content.search(query=\"title:%s\" % layer_properties['title'])\n\n for search_result in search_results:\n if search_result.title == layer_properties['title']:\n return search_result\n return None", "title": "" }, { "docid": "3e7d96f97932b36ec94bdbaa5e157b51", "score": "0.45856324", "text": "def getScene(fullPath=False):\r\n\t#if os.environ['IC_ENV'] == 'MAYA':\r\n\tif os.environ['PREVIEW_APPCONNECT'] == 'maya':\r\n\t\tscene = mc.file(q=True, sceneName=True)\r\n\r\n\t\tif fullPath:\r\n\t\t\treturn scene\r\n\t\telse:\r\n\t\t\tsceneName = os.path.splitext(os.path.basename(scene))[0]\r\n\r\n\t\t\tif sceneName:\r\n\t\t\t\treturn sceneName\r\n\t\t\telse:\r\n\t\t\t\treturn \"untitled\"", "title": "" }, { "docid": "5f7e4c334f490e852ea9bfbb3a48e475", "score": "0.4583832", "text": "def load_study(\n request,\n pk=None,\n slug=None,\n permission_type=models.StudyPermission.CAN_VIEW,\n):\n permission = Q()\n if not request.user.is_superuser:\n permission = models.Study.access_filter(request.user, access=permission_type)\n if pk is not None:\n return get_object_or_404(models.Study.objects.distinct(), permission, Q(pk=pk))\n elif slug is not None:\n return get_object_or_404(\n models.Study.objects.distinct(), permission, Q(slug=slug)\n )\n raise Http404()", "title": "" }, { "docid": "487d704fb962675f44bf800079660b3d", "score": "0.45809343", "text": "def show(slug):\n article = None\n\n # Searching articles ..\n for file_ in listdir(ARTICLE_DIR):\n if file_.endswith(EXTENSIONS):\n with open(os.path.join(ARTICLE_DIR, file_), 'r') as f:\n if slug == slugify(f.readline()):\n article = os.path.join(ARTICLE_DIR, file_)\n break\n\n # If we didn't find the article, it doesn't exist.\n if not article:\n article = os.path.join(PAGES_DIR, 'article-404.md')\n\n with open(article, 'r') as f:\n lines = f.read().split('\\n')\n # Title should be the first line of the file. \n title = lines.pop(0).strip().decode('utf8')\n # Category should be second.\n category = lines.pop(0).strip().decode('utf8')\n # The rest is the article itself.\n source = '\\n'.join(lines).decode('utf8')\n \n return render_template('show.html', article=dict(title=title, source=source))", "title": "" }, { "docid": "bdc112ccedeecdb4e8250004c48b29a0", "score": "0.4575177", "text": "def get_new_stories(db=None) -> list:\n\n api = AssociatedPressAPI()\n feed_data = api.feed(page_size=50)\n items = [] # list of dict items to return\n\n for obj in feed_data['items']:\n\n item = obj['item']\n guid = item['altids']['itemid']\n version = item['version']\n\n # If DB handle was passed, check if this story has previously been retrieved (to avoid unnecessary API calls to content endpoint)\n if db:\n guid_exists = db.query(\"select 1 from stories s join media m using (media_id) where m.name = 'AP' and s.guid = %(a)s\", {'a': guid}).hash()\n if guid_exists:\n logger.info('Story with guid: {} is already in the database -- skipping.')\n continue\n\n logger.info('Found new story (guid: {}, version: {})'.format(guid,version))\n\n # Get item content\n content_uri = item['uri']\n parsed_content_uri = urlparse.urlparse(content_uri)\n content_path = parsed_content_uri.path.rsplit('/',1)[-1]\n content_params = {k:v[0] for k,v in urlparse.parse_qs(parsed_content_uri.query).items()}\n logger.info(\"Fetching content for story (guid: {})\".format(guid))\n content = json.loads(api.content(content_path,**content_params))['data']['item']\n try:\n story_url = content['links'][0]['href'] # This is held in an array which suggests more than one link for a story is possible?\n except:\n logger.warning('No URL link found for guid {}. Skipping story.'.format(guid))\n continue\n publish_date = content['firstcreated'] # There is a first created date and a version created date (last edit datetime?)\n\n # Get nitf rendition for story\n nitf_href = content['renditions']['nitf']['href']\n parsed_nitf_uri = urlparse.urlparse(nitf_href)\n nitf_params = {k:v[0] for k,v in urlparse.parse_qs(parsed_nitf_uri.query).items()}\n nitf_path = \"{guid}.{version}/download\".format(guid=guid,version=version)\n logger.info(\"Fetching story text using nitf rendition (guid: {})\".format(guid))\n nitf_content = api.content(nitf_path,**nitf_params).decode()\n\n # Extract story text from nitf XML (body.content) and create story_data object\n soup = BeautifulSoup(nitf_content,features=\"html.parser\")\n story_data = {}\n story_data['guid'] = guid\n story_data['publish_date'] = publish_date\n story_data['url'] = story_url\n story_data['text'] = soup.find('body.content').text\n story_data['title'] = content['headline']\n try:\n story_data['description'] = content['headline_extended']\n except:\n logger.warning(\"No extended headline present for guid: {}. Setting description to an empty string.\".format(guid))\n story_data['description'] = ''\n story_data['content'] = nitf_content\n items.append(story_data)\n\n logger.info(\"Returning {} new stories.\".format(len(items)))\n return items", "title": "" }, { "docid": "2ce54654e5a8d70adfadb70c50b3f683", "score": "0.4573632", "text": "def getSkinlessView(self):\n \n if self.context.Type() in ['Page', 'Division']:\n title = self.context.Title()\n content = self.context.getText()\n else:\n #Get Full Course Title\n portal_catalog = self.context.portal_catalog\n brain = portal_catalog(Title=self.context.Title())[0]\n self.ecutil = getUtility(IECUtility)\n title = self.ecutil.getFullCourseTitle(brain)\n content = self.context.getText()\n\n return title, content", "title": "" }, { "docid": "783bf59017da625b950df4425039d046", "score": "0.45712623", "text": "def find_one(content_type):", "title": "" }, { "docid": "4df14164db9a83186142f453e7a10f58", "score": "0.45697963", "text": "def fromUrl(self, url):\n for e in self.entities.itervalues():\n if e.url == url:\n return e\n return None", "title": "" }, { "docid": "254acce3cfdd348aa3df784e16093dcb", "score": "0.45661914", "text": "def test_can_access_by_url_name(self):\n url = reverse('TELLING:show_story')\n self.client.force_login(self.user)\n\n stories = Story.objects.filter(author=self.user)\n response = self.client.get(url, data={'user_stories': stories})\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "b96739b45abae2835c4aee3284c9f835", "score": "0.45647758", "text": "def is_first_course_on_page_of_sport_category(self):\r\n try:\r\n self.find_element_by_xpath(cpl.SPORT_COURSE_CATEGORY)\r\n return True\r\n except TimeoutException:\r\n return False", "title": "" }, { "docid": "defe6a7f582fa0017d9c50148517832f", "score": "0.45627", "text": "def retrieve_catalog(self, catalog_hash):\n if catalog_hash in self._opened_catalogs:\n return self._opened_catalogs[catalog_hash]\n return self._retrieve_and_open_catalog(catalog_hash)", "title": "" }, { "docid": "134548b27e1ce470de086564646cf10f", "score": "0.45568076", "text": "def first(self):\n return self.limit(1).one()", "title": "" }, { "docid": "56b30b44d795b23e837f84c2899be19e", "score": "0.4554371", "text": "def test_sources_category(self):\n source = get_sources(\"health\")[0]\n self.assertEqual(source.category,\"health\")", "title": "" }, { "docid": "0d2cf40a4157f0f8684b189c14647211", "score": "0.45531976", "text": "def _get_target_portal_for_host(self, host_id, host_address):\n request = self.client.factory.create('Request')\n request.Name = 'iscsi-portal-list-info'\n response = self.client.service.ApiProxy(Target=host_id,\n Request=request)\n self._check_fail(request, response)\n portal = {}\n portals = response.Results['iscsi-portal-list-entries']\n if self._api_elem_is_empty(portals):\n return portal\n portal_infos = portals[0]['iscsi-portal-list-entry-info']\n for portal_info in portal_infos:\n portal['address'] = portal_info['ip-address'][0]\n portal['port'] = portal_info['ip-port'][0]\n portal['portal'] = portal_info['tpgroup-tag'][0]\n if host_address == portal['address']:\n break\n return portal", "title": "" }, { "docid": "9d0293719c9eba58923f1f2c91ca2126", "score": "0.45521924", "text": "def starter(self):\n archive = self.url + 'archive.html'\n data = self.getPage(archive)\n search = compile(tagre(\"a\", \"href\", r\"(\\d\\d\\d\\d-\\d\\d.html)\"))\n archivepages = self.fetchUrls(archive, data, search)\n return archivepages[-1]", "title": "" }, { "docid": "e512dfc3381a337fe65ff7cd0b69afb6", "score": "0.45457092", "text": "def first(self, **kwargs):\n return self.find(**kwargs).first()", "title": "" }, { "docid": "3d948f024f4e96f052b9bac3f36dae11", "score": "0.45454302", "text": "def find(self, _id):\n for i in self._objects:\n if i.id == _id:\n return i\n\n return None", "title": "" }, { "docid": "77f212ad18dfbb58bca0cc89074484a8", "score": "0.45413658", "text": "def get_complete_story(self, id):\n from story_app.services import sentences, users\n story_obj = self.get(id)\n if not story_obj:\n return []\n sentence_objs = sentences.get_story(id)\n story = []\n for obj in sentence_objs:\n user_id = obj.user_id\n user_obj = users.get(user_id)\n username = user_obj.user_name\n sentence = {\"sentence_number\": obj.sentence_number,\n \"data\": obj.data,\n \"user_name\": username\n }\n story.append(sentence)\n return story", "title": "" }, { "docid": "519b60cfb1320ff90e8fababdb8c926c", "score": "0.45399186", "text": "def identify_climate_scenario_run(scen_info, target_scen):\n for scen in scen_info:\n # run_id = scen.split('_')[0]\n if target_scen in scen_info[scen]['climate_scenario']:\n return scen\n # End for\n\n return None", "title": "" }, { "docid": "25c99a32d8d545cb8bda6fb1a2fe2ea2", "score": "0.4528281", "text": "def find_one(self, *args, **kwargs):\n doc = self._collection_with_options(kwargs).find_one(*args, **kwargs)\n if doc is None:\n return None\n\n return doc", "title": "" }, { "docid": "6e2e0e6862c0824102944f1bcf223a71", "score": "0.4524397", "text": "def search_article_content(self, url):\n data = read_json_file(self.article_log)\n try:\n for stock_name in data.keys():\n if data[stock_name].get(url) is not None:\n return data[stock_name].get(url).get(\"content\")\n except KeyError:\n return None", "title": "" }, { "docid": "125277086c3e9896ff1aeb1053d6fd2a", "score": "0.4524182", "text": "def test_load_latest_single_article(self):\n # user can access a article\n response = self.client.get(\"/modules/latest/1\")\n self.assertEqual(response.status_code, HTTPStatus.OK)", "title": "" }, { "docid": "d726ffc6c2a4ae860fd34ba7b37cb4e0", "score": "0.45211664", "text": "def get_stage(self, stage_name: str) -> Stage:\n return next((stage for stage in self.stages if stage.name == stage_name), None)", "title": "" }, { "docid": "828ee6e7b596fe36a5dca5399170186a", "score": "0.4515446", "text": "def find_one(self, collection, query):\n collection = self.get_collection(collection)\n result = collection.find_one(query)\n if result is None:\n return None\n return self._transformer.transform_outgoing(result)", "title": "" }, { "docid": "b03b3c6231641ef440ab8c6ed55fb64c", "score": "0.4506865", "text": "def _extract_service_catalog(self, url, resp, body, extract_token=True):\r\n\r\n # content must always present\r\n if resp.status_code == 200 or resp.status_code == 201:\r\n try:\r\n self.auth_url = url\r\n self.service_catalog = \\\r\n service_catalog.ServiceCatalog(body)\r\n if extract_token:\r\n self.auth_token = self.service_catalog.get_token()\r\n self.tenant_id = self.service_catalog.get_tenant_id()\r\n\r\n management_url = self.service_catalog.url_for(\r\n attr='region',\r\n filter_value=self.region_name,\r\n endpoint_type=self.endpoint_type,\r\n service_type=self.service_type,\r\n service_name=self.service_name,\r\n volume_service_name=self.volume_service_name,)\r\n self.management_url = management_url.rstrip('/')\r\n return None\r\n except exceptions.AmbiguousEndpoints:\r\n print(_(\"Found more than one valid endpoint. Use a more \"\r\n \"restrictive filter\"))\r\n raise\r\n except KeyError:\r\n raise exceptions.AuthorizationFailure()\r\n except exceptions.EndpointNotFound:\r\n print(_(\"Could not find any suitable endpoint. Correct \"\r\n \"region?\"))\r\n raise\r\n\r\n elif resp.status_code == 305:\r\n return resp.headers['location']\r\n else:\r\n raise exceptions.from_response(resp, body, url)", "title": "" }, { "docid": "febc59d0874e55a88e59bdf66878d66d", "score": "0.45060018", "text": "def get_by_plan(cls, request, plan):\n for stack in Stack.list(request):\n if stack.plan and (stack.plan.id == plan.id):\n return stack", "title": "" }, { "docid": "e7574c3eae047deec3be2726233ecb43", "score": "0.45050728", "text": "def findone(self, collect_name, condition={}):\n logging.info('Enter database findone ...')\n check_objectId(condition)\n logging.info(\"collect_name=%s, condition=%s\" %\n (collect_name, condition)\n )\n\n try:\n ret = self._database[collect_name].find_one(condition)\n logging.info(\"Leaving findonet...\")\n return ret\n except Exception,e:\n logging.error(\"findone in collection '%s' error: %s\" % (collect_name, e))\n logging.info(\"Leaving findone_collect...\")\n return None", "title": "" }, { "docid": "d0d6821c47b39fa33d1b7aba99d58de3", "score": "0.44996914", "text": "def find_sale(cls, sale_id):\n sale = cls.query.filter_by(\n sale_id=sale_id\n ).first() # SELECT * FOM sales WHERE sale_id = $sale_id LIMIT 1\n\n if sale:\n return sale\n return None", "title": "" } ]
b1b503812f818fd617459f0a4601f2fe
iterator doing a breadth first expansion of args
[ { "docid": "45e4eae1349b0aa1782a10647a84baa1", "score": "0.6627926", "text": "def breadth(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n deq = _deque((x, 0) for x in iterable)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg", "title": "" } ]
[ { "docid": "acefc46a9b3b6448986eaaa7135d2b45", "score": "0.7489818", "text": "def breadthIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n deq = _deque((x, 0) for x in args)\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n yield arg", "title": "" }, { "docid": "32784426e836ae401f0dedad24d69265", "score": "0.66983086", "text": "def preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg", "title": "" }, { "docid": "6ac439a6769be452565ea3370ad9d49a", "score": "0.66308075", "text": "def breadthArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n deq = _deque((x, 0) for x in args)\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n for a in arg:\n deq.append((a, level + 1))\n else:\n result.append(arg)\n\n return tuple(result)", "title": "" }, { "docid": "d715fcc4aff6172155fe79ea63fe3e6f", "score": "0.66057295", "text": "def _expand_args(arglst):\n if not isinstance(arglst, Iterable):\n arglst = [arglst]\n elif isinstance(arglst, dict):\n arglst = [arglst]\n elif ('theano' in sys.modules\n and isinstance(arglst, _gettheano().graph.basic.Variable)):\n arglst = [arglst]\n elif isinstance(arglst, cf.TerminatingTypes):\n arglst = [arglst]\n for arg in arglst:\n if 'theano' in sys.modules and isinstance(arg, _gettheano().graph.basic.Variable):\n # Theano variables aren't iterable\n yield arg\n elif isinstance(arg, cf.TerminatingTypes):\n yield arg\n elif isinstance(arg, slice):\n yield arg.start\n yield arg.stop\n yield arg.step\n elif isinstance(arg, dict):\n for key in arg.keys():\n yield key\n for val in arg.values():\n #yield from nwlst.extend(_expand_args(val))\n yield from _expand_args(val)\n elif isinstance(arg, np.ndarray):\n if arg.ndim == 0:\n yield arg # can't iterate over a 0-dim array\n else:\n yield from _expand_args(arg)\n elif isinstance(arg, Iterable):\n try:\n yield from _expand_args(arg)\n except TypeError:\n # Pint objects with scalars report 'iterable' but then fail\n # on __iter__. Might be the case with other objects as well.\n # For Pint, see https://github.com/hgrecco/pint-pandas/issues/33#issuecomment-647198749\n # Should be fixed by this currently open PR https://github.com/hgrecco/pint/pull/1125\n yield arg\n else:\n yield arg", "title": "" }, { "docid": "f79051d6d43b7709516269787b9aa1a5", "score": "0.6265375", "text": "def postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg", "title": "" }, { "docid": "55548fd4d9389b37a27cf17a3b05d7d6", "score": "0.6155338", "text": "def preorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n if limit:\n for arg in iterable:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in iterable:\n yield arg", "title": "" }, { "docid": "6ffb9fd34ca0b20f6b8c5e4cf14d81dc", "score": "0.6097698", "text": "def recursive_iter(depth=1, *args):\r\n if depth <= 1:\r\n for i in range(*args):\r\n yield (i,)\r\n else:\r\n for i in range(*args):\r\n for j in recursive_iter(depth - 1, *args):\r\n yield (i,) + j", "title": "" }, { "docid": "96320b1e059c1b069640b7321ca7df88", "score": "0.6033857", "text": "def flatten(*args):\n for arg in args:\n for item in arg:\n yield item", "title": "" }, { "docid": "f706950d2a631d1460e6dba3409d2711", "score": "0.5951667", "text": "def with_outer(*args):\n def generator():\n for i in args:\n yield i\n return generator", "title": "" }, { "docid": "4e8d4d34c7b026cdb66c8cdb166409c8", "score": "0.5902121", "text": "def iterate_item_bfs(tree_item):\n if not tree_item.is_construct():\n return\n yield tree_item\n for index, arg in enumerate(tree_item.construct.args):\n if isinstance(arg, syntax.Construct):\n yield from iterate_item_bfs(TreeItem(arg, tree_item, index))\n elif isinstance(arg, list):\n for i, litem in enumerate(arg):\n yield from iterate_item_bfs(TreeItem(litem, tree_item, index, i))", "title": "" }, { "docid": "8236e492898893bc17020c51873ce761", "score": "0.58894", "text": "def preorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n stack = [(x, 0) for x in args]\n result = _deque()\n while stack:\n arg, level = stack.pop()\n if testFn(arg) and level < limit:\n stack += [(x, level + 1) for x in arg]\n else:\n result.appendleft(arg)\n\n return tuple(result)", "title": "" }, { "docid": "2af82b1b7c40ea15cf01f9a8af77b038", "score": "0.5804342", "text": "def __iter__(self):\n root = self.root()\n queue = Queue()\n queue.enqueue(root)\n return self._breadth_first(queue)", "title": "" }, { "docid": "fc0fe20d557c6f6f019a41a65f7665b8", "score": "0.5784455", "text": "def walk(query):\n stack = [[query, 0]]\n while len(stack) != 0:\n query, index = stack[-1]\n if isinstance(query, queries.QueryCombination):\n if index < len(query.subqs):\n stack[-1][1] = index + 1\n stack.append([query.subqs[index], None])\n continue\n yield len(stack) - 1, query\n del stack[-1]", "title": "" }, { "docid": "779857bfde1b70b762517a73905688d3", "score": "0.5733016", "text": "def postorder(iterable, testFn=isIterable, limit=sys.getrecursionlimit()):\n if limit:\n last = None\n for arg in iterable:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in iterable:\n yield arg", "title": "" }, { "docid": "63c88beb39580ca51e69564bc266fe75", "score": "0.57201535", "text": "def __iter__(self):\n # return self.options[:self.idx] + self.options[self.idx:]\n for op in self.queue():\n yield op", "title": "" }, { "docid": "9cb5507f6890f51bb7cbfb53b7e0933c", "score": "0.56360745", "text": "def callmany(self, allargs):\n seqs = []\n # add all inputs to queue\n for args, kw in allargs:\n t = time.time()\n seqs.append(t)\n self.inq.put((t, args, kw))\n # read all outputs\n outs = {}\n while len(seqs) > 0:\n t, ret = self.outq.get()\n # if we don't know this seq number, put it back on the queue\n if t not in seqs:\n self.outq.put((t, ret))\n time.sleep(0.01)\n continue\n # if this is the first item, yield it\n if not seqs: break\n if t == seqs[0]:\n seqs.pop(0)\n yield ret\n # also check if we have the next item(s) done\n while seqs and seqs[0] in outs:\n t = seqs.pop(0)\n ret = outs.pop(t)\n yield ret\n continue\n # else, save it for future use\n outs[t] = ret", "title": "" }, { "docid": "a8b6fef85cd94dd6345a914a2b3a6b8f", "score": "0.556806", "text": "def _breadth_first(self, queue, elements=True):\n while not queue.is_empty():\n node = queue.dequeue()\n if elements:\n yield node.element()\n else:\n yield node\n for child in self.children(node):\n queue.enqueue(child)", "title": "" }, { "docid": "aeeb53cc56b9e6b6d22611c3a30ab43f", "score": "0.5565615", "text": "def iterate_layers(self, *args):\n for layer in range(self.num_layers):\n yield layer, (\n (\n direction,\n tuple(arg[self.num_directions * layer + direction] for arg in args),\n )\n for direction in range(self.num_directions)\n )", "title": "" }, { "docid": "4021c9495e041285cc00640dd0614c3b", "score": "0.55517286", "text": "def explore(self, *args):", "title": "" }, { "docid": "d760a9101fb0f853966c6474b6a58c03", "score": "0.55238575", "text": "def __iter__(self):\n for b in self.x:\n yield b", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.54597753", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.54597753", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.54597753", "text": "def __iter__():", "title": "" }, { "docid": "6ba30e3a03707e367a0a71e5632e052e", "score": "0.54597753", "text": "def __iter__():", "title": "" }, { "docid": "bc8917ceb84452f2c80b2b561e085563", "score": "0.5441532", "text": "def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)", "title": "" }, { "docid": "71061d4077336e4efccefb438fe8dc7d", "score": "0.5421093", "text": "def __iter__(self):\n\n if not self.left and not self.right:\n raise StopIteration\n return self.children().__iter__()\n\n # def SP_traverse(self):\n \"\"\" Return a string of series-parallel partial order.\n\n A recursion way to implement in-order traversal.\n\n return\n -------\n A simple formula of series-parallel partial order\n\n \"\"\"\n # if self.left != None and self.right == None:\n # return str(self.left.SP_traverse()) + \" \" + str(self.data)\n #\n # if self.right != None and self.left == None:\n # return str(self.data) + \" \" + str(self.right.SP_traverse())\n #\n # if self.left != None and self.right != None:\n # return str(self.left.SP_traverse()) + \" \" + str(self.data) + \" \" + str(self.right.SP_traverse())\n #\n # if self.left == None and self.right == None:\n # return str(self.data)", "title": "" }, { "docid": "afed744898d1e8bed5c7ddc58e04eae7", "score": "0.5392897", "text": "def fold_args(self, n, op):\n ret = _CollectionBuilder()\n args = self._pop_args(n)\n if args is None:\n self.push(None)\n return None\n\n for elt in args:\n ret.add(elt)\n elt.op.folded = op\n return ret.build()", "title": "" }, { "docid": "dcfb42b873d66808eb410cf4c0e27323", "score": "0.5384898", "text": "def _batching_call(self, *args, **kw):\n b_start = kw.pop('b_start', None)\n b_size = kw.pop('b_size', None)\n results = list(self._original_call(*args, **kw))\n\n if b_start is None:\n return results\n\n if b_size is None:\n b_size = len(results)\n\n return results[b_start:b_start+b_size]", "title": "" }, { "docid": "7889fba777a244ea84e66c0907213c43", "score": "0.53720236", "text": "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "title": "" }, { "docid": "d49d2c6deb361fdfffe998c9b00ab95f", "score": "0.53628534", "text": "def postorderArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n if len(args) == 1:\n return (args[0],)\n else:\n deq = _deque((x, 0) for x in args)\n stack = []\n result = []\n while deq:\n arg, level = deq.popleft()\n if testFn(arg) and level < limit:\n deq = _deque([(x, level + 1) for x in arg] + list(deq))\n else:\n if stack:\n while stack and level <= stack[-1][1]:\n result.append(stack.pop()[0])\n stack.append((arg, level))\n else:\n stack.append((arg, level))\n while stack:\n result.append(stack.pop()[0])\n\n return tuple(result)", "title": "" }, { "docid": "0c12685e05ce4e97babb06d30f08bc6e", "score": "0.533388", "text": "def __iter__(self):\n\n # collector will fetch chunksize array for each 'get' call\n collector = FIFOArray(self.chunksize, self.axis)\n\n # make tmp array to hold generated subarrs\n tmp = []\n tmp_size = 0\n for subarr in self.data(**self.kwargs):\n\n tmp.append(subarr)\n tmp_size += subarr.shape[self.axis]\n\n # if tmp exceeds chunksize put in collector\n if tmp_size >= self.chunksize:\n arr = np.concatenate(tmp, axis=self.axis)\n collector.put(arr)\n\n # fetch chunksize till not full\n while collector.full():\n yield collector.get()\n\n # place leftover back into tmp and empty collector\n tmp = [collector.queue]\n tmp_size = collector.qsize()\n collector.queue = np.array([])\n\n else:\n\n # append to tmp again\n continue\n\n # else runs after normal loop exit -- required here\n else: #pylint: disable=useless-else-on-loop\n\n # yield whatever is left in tmp (its below chunksize)\n remaining = np.concatenate(tmp, axis=self.axis)\n if remaining.size > 0:\n yield remaining", "title": "" }, { "docid": "1fac93e7c17092e7f835ac32e6b80a28", "score": "0.5326577", "text": "def __iter__(self) -> Iterable[Node]:", "title": "" }, { "docid": "51925000646c460f46e7924838e68712", "score": "0.5323976", "text": "def __iter__(self):\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right", "title": "" }, { "docid": "59d580bda5f7a6a291cce2a20bffcd65", "score": "0.53183365", "text": "def breadth_first(self):\n import queue\n keeper = queue.Queue()\n keeper.enqueue(self)\n while(keeper.size() != 0):\n temp = keeper.dequeue()\n if temp.val is not None:\n yield temp.val\n if temp.left is not None:\n keeper.enqueue(temp.left)\n if temp.right is not None:\n keeper.enqueue(temp.right)", "title": "" }, { "docid": "9b9540995841fd049f45463a3148aaf2", "score": "0.5316405", "text": "def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue", "title": "" }, { "docid": "83d92b530f30457a1f663f5cd91b3e2a", "score": "0.5313513", "text": "def __iter__(self):\n\n yield from self._traverse_forward(self.root)", "title": "" }, { "docid": "b4258147ea75f1b83eac480f8574f045", "score": "0.53021395", "text": "def run_stacker(self):\n \tself.iterator()\n \tself.stack()", "title": "" }, { "docid": "dffe2d1b176a7f91c72aef0f8eea2091", "score": "0.52829593", "text": "def iterate_item(tree_item):\n if not tree_item.is_construct():\n return\n for index, arg in enumerate(tree_item.construct.args):\n if isinstance(arg, syntax.Construct):\n yield from iterate_item(TreeItem(arg, tree_item, index))\n elif isinstance(arg, list):\n for i, litem in enumerate(arg):\n yield from iterate_item(TreeItem(litem, tree_item, index, i))\n yield tree_item", "title": "" }, { "docid": "9200a4da8d6691496357fc14b0e50d4c", "score": "0.5266088", "text": "def __iter__(self):\n for x in self._order:\n yield x", "title": "" }, { "docid": "9f24c6029f3e4f23ffa3d5cb3a143ea0", "score": "0.52658486", "text": "def _build_iterable(self):", "title": "" }, { "docid": "61cd9f4267b1f85ce3a52ff68a7851a5", "score": "0.52440053", "text": "def tamari_inversions_iter(self):\n n1 = self.size() + 1\n for a in range(1, self.size()): # a == n will never work\n ipa = self.increasing_parent(a)\n if ipa is None:\n max_b_1 = n1\n else:\n max_b_1 = ipa\n for b in range(a + 1, max_b_1):\n dpb = self.decreasing_parent(b)\n if dpb is None or dpb < a:\n yield (a, b)", "title": "" }, { "docid": "9c343adbc2b68b030fbec92d45fd808b", "score": "0.5241337", "text": "def iterate_steps(steps):\n pop = None\n while steps:\n for step, depends in steps.items():\n if depends == []:\n pop = step\n if not pop:\n return\n pop_step(pop, steps)\n yield pop", "title": "" }, { "docid": "d1090fa4f8f9222af1f59042946a67cc", "score": "0.52357286", "text": "def __iter__(self):\n for x in self.seq: yield x", "title": "" }, { "docid": "955372311489d1376f45b1f97d95952e", "score": "0.5225762", "text": "def traverse(self, traverser, **kwargs):\n atr = []\n for arg in self.args:\n atr.append(arg.traverse(traverser, **kwargs))\n return traverser.function(self, atr, **kwargs)", "title": "" }, { "docid": "6551e36f7775c600961aa3a1ad8ca136", "score": "0.52159953", "text": "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "title": "" }, { "docid": "f3743287cf1a2e951fe13c0e05871d5f", "score": "0.52123964", "text": "def __iter__(self):\n leaf_paths, leaf_vals = self._find_combinatorial_leaves()\n return self._combinations_generator(leaf_paths, leaf_vals)", "title": "" }, { "docid": "3df5f311d83ee4b2b0c372c4ae428337", "score": "0.5204798", "text": "def chunkerator(obj: Iterable, stepsize: int = 10) -> Iterator:\n\n if obj:\n chunk, obj = obj[0:stepsize], obj[stepsize:]\n\n try:\n yield chunk\n yield from chunkerator(obj, stepsize=stepsize)\n except (RuntimeError, StopIteration, UnboundLocalError):\n pass", "title": "" }, { "docid": "b8f630a117122ef6d946c2662dd53fe6", "score": "0.51914257", "text": "def preorder_iterator(node):\n yield node\n for child in node.children:\n yield from preorder_iterator(child)", "title": "" }, { "docid": "54ee81230fe8a0e53c28a0c3bbc14bc8", "score": "0.51825345", "text": "def __iter__(self):\n handle = self.parent.handle\n cur = getattr(gv, \"first%s\" % self.type)(handle)\n nextitem = getattr(gv, \"next%s\" % self.type)\n while gv.ok(cur):\n yield self.get(gv.nameof(cur))\n cur = nextitem(handle, cur)", "title": "" }, { "docid": "129f27daf185fb16faae6777612dc3b8", "score": "0.5180216", "text": "def __iter__(self):\n yield from self.calls", "title": "" }, { "docid": "6191a21099b55013c00ab79e1ad9f702", "score": "0.5174856", "text": "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "title": "" }, { "docid": "7feed55b7a1cc555b920fc94b1891c63", "score": "0.5169323", "text": "def __iter__(self):\n for p in self.positions(): # use same order as positions()\n yield p.element() # but yield each element", "title": "" }, { "docid": "dfcc0523f00941f1e4f944d0dc733a9e", "score": "0.5167814", "text": "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "title": "" }, { "docid": "bb6509e17c37de417c6dc92b01e0067c", "score": "0.51666784", "text": "def __iter__(self):\n # set current node to front node\n current = self.front\n # while current != None\n while current:\n # send out current node's data\n yield current.data\n # move to next node\n current = current.prior", "title": "" }, { "docid": "c5c5585464a101e512565f4f7c2ed5bb", "score": "0.51604784", "text": "def test_breath_first_traversal(our_bsts):\n bft = []\n for i in our_bsts[0].breadth_first_traversal():\n bft.append(i)\n assert bft == our_bsts[3]", "title": "" }, { "docid": "cedd9bd1c00d638c31a0197b74ae6d69", "score": "0.5157327", "text": "def __opExpand1(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n axA = list(map(lambda x:A.v.index(x) if x in A.v else -1 ,vall))\n axB = list(map(lambda x:B.v.index(x) if x in B.v else -1 ,vall))\n if ( (not (out is None)) and (out.v == vall) ):\n f = out\n else:\n f = Factor(vall) # TODO: should also change \"out\" if specified!\n it = np.nditer([A.t, B.t, f.t], \n op_axes = [ axA, axB, None ], \n op_flags=[['readonly'], ['readonly'], ['writeonly']])\n for (i,j,k) in it:\n op(i,j,out=k)\n return f", "title": "" }, { "docid": "041b14c686367b2640f5e388b04a0385", "score": "0.51517135", "text": "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "title": "" }, { "docid": "322f3cd769ff093136773cfedb9bfe88", "score": "0.51445544", "text": "def inner(items, r):\n items = set(items)\n if not len(items):\n yield ()\n return\n first = next(iter(items))\n remainder = items.difference((first, ))\n for combination in combinations(remainder, r-1):\n first_subset = (first, ) + combination\n for partition in inner(remainder.difference(combination), r):\n yield (first_subset, ) + partition", "title": "" }, { "docid": "b5790d84724405f17a04a7e62b450a61", "score": "0.5144055", "text": "def iterate(self):\n yield self\n for x in self:\n for y in x.iterate():\n yield y", "title": "" }, { "docid": "04bfb6b3f90cd9b06e35bd78c4cf3dcc", "score": "0.51426584", "text": "def args_generator(args, num_exps, grid_search):\n od = OrderedDict(args)\n\n if grid_search:\n keys = [param for param in od]\n\n for i, vals in enumerate(itertools.product(*[value for param, value in od.items()])):\n yield zip(keys + ['-exp-id'], [str(val) for val in vals] + [str(i)])\n else:\n for _ in range(num_exps):\n args_setting = [(pname, str(next(pvalue))) for pname, pvalue in od.items()]\n\n yield args_setting", "title": "" }, { "docid": "6d434755fb09bf24d1a83d944b929c66", "score": "0.5137433", "text": "def scanl(f, base, l):\n yield base\n for x in l:\n base = f(base, x)\n yield base", "title": "" }, { "docid": "09b4c724eae2e6c2586024bf18fe850c", "score": "0.51343894", "text": "def inorder_generator(self):\n\n for val in self.inorder_helper_generator(self.root):\n yield val", "title": "" }, { "docid": "d7cb2a7cb0d59bcacc4d05d36c25568b", "score": "0.5129452", "text": "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "title": "" }, { "docid": "e777981bcb5c318534e23221898fd665", "score": "0.51204425", "text": "def expand_all(start, end, tags):\n if len(tags) == 1:\n for branch in expand(start, end, tags[0]):\n yield [branch]\n else:\n first, rest = tags[0], tags[1:]\n for middle in range(start+1, end+1-len(rest)): \n for first_branch in expand(start, middle, first):\n for rest_branches in expand_all(middle, end, rest):\n yield [first_branch] + rest_branches", "title": "" }, { "docid": "6cd205d449ac7a7796a6120a394311fd", "score": "0.5117666", "text": "def flat_names(args):\n for arg in args:\n if isinstance(arg, basestring):\n yield arg\n else:\n for arg in flat_names(arg):\n yield arg", "title": "" }, { "docid": "04607bc86155e93ef3be3ef3d6ff9d9b", "score": "0.5117191", "text": "def item_iter(self, *a):\r\n raise NotImplementedError", "title": "" }, { "docid": "0ab621859f15691dc1e04982c599a6b5", "score": "0.5111271", "text": "def item_iter(self, a):\r\n for i in a[0]:\r\n yield i", "title": "" }, { "docid": "e5af6f74cfdf8a5d4c98b4b89edd5894", "score": "0.5100592", "text": "def __iter__(self):\n n = self.head\n for _ in range(len(self)):\n if n == self.capacity:\n n = 0\n yield self.lst[n]\n n += 1", "title": "" }, { "docid": "52dcb21271e61201f1e5d7e382916967", "score": "0.5098948", "text": "def test_nested_gen(n):\n for a in range(n):\n yield (b for b in range(a))", "title": "" }, { "docid": "15e3582487e21bb3bc417c9e55dd436a", "score": "0.5098149", "text": "def _fa(items, s):\n if atomp(items):\n return items\n if len(s) == 0:\n return items[0]\n lst = [0] * s[0]\n stride = s[0]\n for i in range(s[0]):\n lst[i] = _fa(items[i::stride], s[1:])\n return lst", "title": "" }, { "docid": "15e3582487e21bb3bc417c9e55dd436a", "score": "0.5098149", "text": "def _fa(items, s):\n if atomp(items):\n return items\n if len(s) == 0:\n return items[0]\n lst = [0] * s[0]\n stride = s[0]\n for i in range(s[0]):\n lst[i] = _fa(items[i::stride], s[1:])\n return lst", "title": "" }, { "docid": "78b85c709198aac2c91fcf5af29676ca", "score": "0.5097105", "text": "def makeloop(keyword, G, *args):\n if not args:\n return []\n Nargs = len(args)\n lis = []\n for arg in args:\n lis.append(makeiter(G(\"%s%s\" % (keyword, arg))))\n try:\n Nlis = lis[0].count()\n except TypeError:\n Nlis = len(lis[0])\n olist = [[] for i in range(Nargs)]\n for i in range(Nlis):\n for k in range(Nargs):\n try:\n olist[k].append(lis[k][i])\n except Exception:\n olist[k].append(\"\")\n return olist", "title": "" }, { "docid": "5ccc4f29bbf0bd1a6ed67122802dff0a", "score": "0.5081077", "text": "def __iter__(self):\n for (_,_,path) in self.frontierpq:\n yield path", "title": "" }, { "docid": "13ce60b4d2552372c61f14e7a1173950", "score": "0.5078501", "text": "def fibonacci_inner_generator() -> Iterator[int]:\n yield 0\n yield 1\n fib1 = fibonacci_inner_generator()\n next(iter(fib1))\n yield from (f2 + f1 for f2, f1 in zip(fibonacci_inner_generator(), fib1))", "title": "" }, { "docid": "209b4cc92806eb59c8b55dd8b3b64753", "score": "0.50782406", "text": "def iterate():\n # States are of the form (coordinates, word so far, used spots)\n # Load the initial states into the stack\n global theStack\n for r,layer in enumerate(honeycomb):\n for e,el in enumerate(layer):\n theStack.append( ((e,r), [el],set([(e,r)])) )\n \n while (len(theStack) != 0):\n #pop the next run\n (e,r),soFar,used=theStack[-1]\n theStack=theStack[:-1]\n #run it!\n step((e,r),soFar,used)", "title": "" }, { "docid": "3db8691461eee30eb49d71a731244645", "score": "0.5073934", "text": "def _prog_nodes(self):\n\n self.arbor._grow_tree(self)\n my_node = self\n while my_node is not None:\n yield my_node\n ancestors = list(my_node.ancestors)\n if ancestors:\n my_node = my_node.arbor.selector(ancestors)\n else:\n my_node = None", "title": "" }, { "docid": "660f20148e021e2e3db3da20f3c0e1eb", "score": "0.50662196", "text": "def permutations(iterable):\n pass", "title": "" }, { "docid": "1351eb6b9bcfcd63d66095dcc8ca7fff", "score": "0.5066016", "text": "def breadth_first(self):\n q = Queue()\n q.enqueue(self)\n while q.size() > 0:\n node = q.dequeue()\n yield node.val\n if node.left:\n q.enqueue(node.left)\n if node.right:\n q.enqueue(node.right)", "title": "" }, { "docid": "157c2d22a46d18b0fc9ad2771eb1b0ba", "score": "0.50659364", "text": "def next(self) -> Iterable[RecordBatch]:\n for batch in self._parent_operator.next():\n args = self._process_arguments(self._arguments, batch=batch)\n yield self._kernel(batch, args)", "title": "" }, { "docid": "cdc6fb5c11b460e95c462e2507be5690", "score": "0.5058996", "text": "def __iter__(self):\n\n def inner(stack, node):\n if node.value is not None and stack != '' and stack != ():\n yield (stack, node.value)\n\n for key, child in node.children.items():\n for char, value in inner(stack + key, child):\n yield (char, value)\n\n init_stack = ''\n\n if self.type == tuple:\n init_stack = ()\n\n return inner(init_stack, self)", "title": "" }, { "docid": "d77193f0801a14eaacf308f8361fdef7", "score": "0.50577474", "text": "def merge_args ( self , num , *args ) :\n assert isinstance ( num , integer_types ) and 1 <= num ,\\\n \"merge_args: invalid chunk size ``%s''\" % num\n\n ## no action \n if len ( args ) < num : return args\n\n from ostap.utils.utils import chunked\n\n lst = flat_args ( *args )\n \n self.aux_keep.append ( lst )\n \n while num < len ( lst ) : \n \n nlst = chunked ( lst , 4 )\n ll = [ ROOT.RooFit.MultiArg ( *l ) if 1 < len ( l ) else l [ 0 ] for l in nlst ]\n \n self.aux_keep.append ( ll ) \n lst = tuple ( ll )\n \n return lst", "title": "" }, { "docid": "a0d90b3db90629898ed774f984cdc14b", "score": "0.505768", "text": "def recursive_unpack(self):\n\n def _genflatten(lst):\n if not lst:\n return []\n ##\n if isinstance(lst[0], Assembly):\n lst = lst[0].unpack()\n ##\n for elem in lst:\n if isinstance(elem, Assembly):\n apos = elem.GetPosition()\n asum = np.sum(apos)\n for x in elem.unpack():\n if asum:\n yield x.clone().shift(apos)\n else:\n yield x\n else:\n yield elem\n\n return list(_genflatten([self]))", "title": "" }, { "docid": "1783bf45c01298794b4040b6ff543cb4", "score": "0.5056925", "text": "def item_iter(self, a):\n for i in a[0]:\n yield i", "title": "" }, { "docid": "6f313f9798f20d0a042606b34bf8a0e8", "score": "0.5055435", "text": "def __init__(self, nums):\n self.args = nums", "title": "" }, { "docid": "c2a69bf149cfd9e3b23bc3f156e77059", "score": "0.5053057", "text": "def step(tree):\n if type(tree) == list and type(tree[0]) == tuple:#This basically looks for any applications it can do directly. These applications are the ones where the function is already defined through abstraction. That's why it checks whether the first element of the list (for application) is an abstraction\n func = tree[0]#The whole function with parameter and body\n name = func[0][1]#Only the parameter\n func = func[1]#Only the body\n arg = tree[1]\n nfunc = replace(name, arg, func)#The replacement of all occurences of the parameter in the body with the argument\n return nfunc\n elif type(tree) == list:\n return [step(tree[0]), step(tree[1])]#recursive checking, again\n elif type(tree) == tuple:\n return (tree[0], step(tree[1]))\n else:\n return tree", "title": "" }, { "docid": "8fd075fb5c67f0d81cff95ea5e262671", "score": "0.5047858", "text": "def bfs(self):\r\n Q = [self.head()]\r\n visited = []\r\n while Q != []:\r\n cur = Q[0]\r\n visited.append(cur)\r\n Q = Q[1:]\r\n Q.extend([ch for ch in self.get_deps(cur.get_field('index'))])\r\n for x in reversed(visited):\r\n yield x", "title": "" }, { "docid": "6806887efccbda1d7fdf14ed4dba0af3", "score": "0.50403017", "text": "def FastaM10Iterator(handle, seq_count=...):\n ...", "title": "" }, { "docid": "812d001bab64d5fef3dd6208afbf953b", "score": "0.503798", "text": "def next(self, initial):", "title": "" }, { "docid": "d3bd242c12e2195d2e0e0c1a64ce80ea", "score": "0.5035488", "text": "def simple():\n yield 1\n yield 2\n yield 3", "title": "" }, { "docid": "0a709990c1fde8ba580e2b936408bfed", "score": "0.5034933", "text": "def _iterator_codegen(resty):\n\n def codegen(context, builder, sig, args):\n [d] = args\n [td] = sig.args\n iterhelper = context.make_helper(builder, resty)\n iterhelper.parent = d\n iterhelper.state = iterhelper.state.type(None)\n return impl_ret_borrowed(\n context,\n builder,\n resty,\n iterhelper._getvalue(),\n )\n\n return codegen", "title": "" }, { "docid": "166e228fc22c95aee619ad010c9f038f", "score": "0.5032005", "text": "def batch(size, iterable):\r\n return list(xbatch(size, iterable))", "title": "" }, { "docid": "16332b7108c0d07aba0c20e85d142581", "score": "0.503174", "text": "def HierarchyIterator(obj):\n while obj:\n yield obj\n for opChild in SplineInputGeneratorHelper.HierarchyIterator(obj.GetDown()):\n yield opChild\n obj = obj.GetNext()", "title": "" }, { "docid": "20744b2f2772dcfb73ea03d51d273438", "score": "0.50275594", "text": "def traverse(self, visit, *args, **kwargs):\n if not self.__visited:\n visit(self, *args, **kwargs)\n self.__visited = True\n for c in self.parameters:\n c.traverse(visit, *args, **kwargs)\n self.__visited = False", "title": "" }, { "docid": "bab01a785b0b0de155f1aa96cb170404", "score": "0.5021064", "text": "def breadth_first_iter(root_elem):\r\n\t# Guard agains an empty tree\r\n\tresult = []\r\n\tif root_elem is None:\r\n\t\treturn result\r\n\tqueue = deque([root_elem])\r\n\twhile queue:\r\n\t\tcurrent = queue.popleft()\r\n\t\tresult.append(current.value)\r\n\t\tif current.left:\r\n\t\t\tqueue.append(current.left)\r\n\t\tif current.right:\r\n\t\t\tqueue.append(current.right)\r\n\treturn result", "title": "" }, { "docid": "f33e3d47f1e44c8379fa22a97116cf7a", "score": "0.50204086", "text": "def __iter__(self):\n n = self.getRoot()\n while n.left is not None:\n n = n.left\n while True:\n yield n.value\n n = n._successor()\n if n is None:\n break", "title": "" }, { "docid": "2729daeea222e22ac303244d24c9d019", "score": "0.5017816", "text": "def __iter__(self) -> Generator:\r\n yield from self.sequence", "title": "" }, { "docid": "a35c843ba878e2ec71e630565edc043f", "score": "0.501039", "text": "def __iter__(self):\n for tree in self._tree.subTrees():\n yield self.__class__(tree)", "title": "" }, { "docid": "525fa95461e1858dfa28206d94781915", "score": "0.5010081", "text": "def test_breadth_first(depth_one_tree):\n testlist = []\n depth_one_tree.breadth_first(lambda x: testlist.append(x))\n assert str(testlist) == str([0, 1, 2, 3, 4])", "title": "" }, { "docid": "30f3fa5947cc1fb916ee0ac8bc757350", "score": "0.5009786", "text": "def traverse_breadth_first(self, fn):\n queue = deque([self.root])\n while len(queue) > 0:\n node = queue.popleft()\n fn(node)\n queue.extend(node.children)", "title": "" }, { "docid": "c827ec19e0104449aeeea116918b31b2", "score": "0.50025415", "text": "def __iter__(self):\n for p in self.positions(): # use same order as positons()\n yield p.element() # but yield each element", "title": "" } ]
8ab13c10f751fee03b02fbd3306888af
Tally a user's vote on his favorite hint.
[ { "docid": "fec81930c6623627cc1a009a140be9e0", "score": "0.70770836", "text": "def tally_vote(self, data):\r\n if self.user_voted:\r\n return {'error': 'Sorry, but you have already voted!'}\r\n ans = data['answer']\r\n if not self.validate_answer(ans):\r\n # Uh oh. Invalid answer.\r\n log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))\r\n return {'error': 'Failure in voting!'}\r\n hint_pk = str(data['hint'])\r\n # We use temp_dict because we need to do a direct write for the database to update.\r\n temp_dict = self.hints\r\n try:\r\n temp_dict[ans][hint_pk][1] += 1\r\n except KeyError:\r\n log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:\r\n Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))\r\n return {'error': 'Failure in voting!'}\r\n self.hints = temp_dict\r\n # Don't let the user vote again!\r\n self.user_voted = True\r\n\r\n # Return a list of how many votes each hint got.\r\n pk_list = json.loads(data['pk_list'])\r\n hint_and_votes = []\r\n for answer, vote_pk in pk_list:\r\n if not self.validate_answer(answer):\r\n log.exception('In hinter tally_vote, couldn\\'t parse {ans}'.format(ans=answer))\r\n continue\r\n try:\r\n hint_and_votes.append(temp_dict[answer][str(vote_pk)])\r\n except KeyError:\r\n log.exception('In hinter tally_vote, couldn\\'t find: {ans}, {vote_pk}'.format(\r\n ans=answer, vote_pk=str(vote_pk)))\r\n\r\n hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)\r\n # Reset self.previous_answers and user_submissions.\r\n self.previous_answers = []\r\n self.user_submissions = []\r\n return {'hint_and_votes': hint_and_votes}", "title": "" } ]
[ { "docid": "032db2842605f6bc0bfef2feb4c8d36b", "score": "0.6199063", "text": "def t(p, vote_count):\n return vote_count[p]", "title": "" }, { "docid": "360a0e38b8979ef8f0598a1dbf058b32", "score": "0.59225565", "text": "def auto_fav(q, count=5, result_type=\"recent\"):\n\n result = search_tweets(q, count, result_type)\n\n for tweet in result[\"statuses\"]:\n try:\n # don't favorite your own tweets\n if tweet[\"user\"][\"screen_name\"] == TWITTER_HANDLE:\n continue\n\n result = t.favorites.create(_id=tweet[\"id\"])\n print(\"favorited: %s\" % (result[\"text\"].encode(\"utf-8\")))\n\n # when you have already favorited a tweet, this error is thrown\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))", "title": "" }, { "docid": "ceca38f4913bf367ceae655bae55b8fe", "score": "0.5919938", "text": "def up_vote(cls, user, message):\r\n pass", "title": "" }, { "docid": "75a795aa7b40b8adbf0c31372cba6b1b", "score": "0.5910275", "text": "def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()", "title": "" }, { "docid": "fd3fb373210efd653633f9e4c6981d94", "score": "0.5808141", "text": "def test_vote_withpermission(self):\r\n mock_module = CHModuleFactory.create(\r\n previous_answers=[['24.0', [0, 3, None]]])\r\n json_in = {'answer': '24.0', 'hint': 3, 'pk_list': json.dumps([['24.0', 0], ['24.0', 3]])}\r\n dict_out = mock_module.tally_vote(json_in)\r\n self.assertTrue(mock_module.hints['24.0']['0'][1] == 40)\r\n self.assertTrue(mock_module.hints['24.0']['3'][1] == 31)\r\n self.assertTrue(['Best hint', 40] in dict_out['hint_and_votes'])\r\n self.assertTrue(['Another hint', 31] in dict_out['hint_and_votes'])", "title": "" }, { "docid": "6199bb2a8ccb55be98a41f8575880905", "score": "0.57856137", "text": "def up_vote(cls, user, message):\n pass", "title": "" }, { "docid": "0cd8cf55aabc3c2a192d13913d481e74", "score": "0.57797664", "text": "async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)", "title": "" }, { "docid": "36246490a6c1a38ceef328a4eaeb6d17", "score": "0.57708484", "text": "def like_tweet(tweet_id):\n twitter.create_favorite(id=tweet_id)", "title": "" }, { "docid": "127037501b0b1a4b3ad1ff4a20d507c8", "score": "0.57653683", "text": "def count_favorite(self, obj):\n\n return obj.recipe_fav.count()", "title": "" }, { "docid": "a1076523e52e397ae694d5dd34221b6b", "score": "0.57623374", "text": "def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()", "title": "" }, { "docid": "5d82e28f2969460fe8c96fc889aa2e21", "score": "0.5743391", "text": "def toggle_vote(self, user, value):\n try:\n v = Vote.objects.get(user=user, content=self)\n except Vote.DoesNotExist:\n Vote.objects.create(user=user, content=self, value=value)\n else:\n if v.value == value:\n v.delete()\n else:\n v.value = value\n v.save(update_fields=['value'])\n\n self.up = self.votes.count_upvotes()\n self.down = self.votes.count_downvotes()\n self.set_points()\n self.set_timepoints()\n self.save(update_fields=['up', 'down', 'points', 'timepoints'])", "title": "" }, { "docid": "c83e6025c1ba506f855b61c62d082347", "score": "0.57198995", "text": "def favourite(self, favourite):\n\n self._favourite = favourite", "title": "" }, { "docid": "5d5e3f4fec89f7e65489cf91f8941288", "score": "0.56923956", "text": "def _vote(self, team):\r\n return True", "title": "" }, { "docid": "4e2cc02a68eeaaf05d2f1f789b8345e4", "score": "0.5668003", "text": "def opinion_vote(mode, verbose, revision):\n judge = VotingJudge(mode, revision)\n flags = judge.vote()\n if verbose is True:\n click.echo(\"Vote resulted in %i flags:\" % len(flags))\n for f in flags:\n format_flag(f)", "title": "" }, { "docid": "dd564b50725bc2bab881e2c3c8686142", "score": "0.5648516", "text": "def get_vote_tally(self):\r\n voters = []\r\n tally = {}\r\n for b in reversed(self.blocks):\r\n if b.user_id not in voters and type(b) == VoteBlock:\r\n voters.append(b.user_id)\r\n if b.choice in tally.keys():\r\n tally[b.choice] += 1\r\n else:\r\n tally[b.choice] = 1\r\n result = []\r\n for key in tally:\r\n d = {}\r\n d['name'] = key\r\n d['count'] = tally[key]\r\n result.append(d)\r\n return result", "title": "" }, { "docid": "2d45b981832c0fb705284c62e62f5d17", "score": "0.56234056", "text": "def upvote_feature(request, pk):\n if request.method == \"POST\":\n feature = get_object_or_404(Feature, pk=pk)\n feature.upvotes += 1\n feature.save()\n return redirect('get_feature')", "title": "" }, { "docid": "d3a6cbbe2b05f0cf347e227e78a8a5e2", "score": "0.5620895", "text": "def is_liked(value, user: User):\n return value.is_liked(user)", "title": "" }, { "docid": "cf53aa83df0275c0ed9fa338a27493b8", "score": "0.5615888", "text": "def rate_review_for_user():\n values = flask.request.values\n review_id = values.get('review_id')\n voted_helpful = values.get('voted_helpful')\n review_type = values.get('review_type')\n\n uc_review = None\n filtered_courses = m.UserCourse.objects(id=review_id)\n if len(filtered_courses) > 0:\n uc = filtered_courses[0]\n if review_type == 'course':\n uc_review = uc.course_review\n else:\n uc_review = uc.professor_review\n else:\n filtered_courses = m.MenloCourse.objects(id=review_id)\n if len(filtered_courses) > 0:\n uc = filtered_courses[0]\n uc_review = uc.professor_review\n\n vote_added_response = api_util.jsonify({\n 'success': True\n })\n voted_already_response = api_util.jsonify({\n 'already_voted': True\n })\n\n user = _get_user_require_auth()\n if review_type == 'course':\n if review_id in user.voted_course_review_ids:\n return voted_already_response\n user.voted_course_review_ids.append(review_id)\n elif review_type == 'prof':\n if review_id in user.voted_prof_review_ids:\n return voted_already_response\n user.voted_prof_review_ids.append(review_id)\n user.save()\n\n if uc_review:\n if voted_helpful == 'true':\n uc_review.num_voted_helpful += 1\n else:\n uc_review.num_voted_not_helpful += 1\n uc.save()\n\n return vote_added_response", "title": "" }, { "docid": "d48f610992e3ee4077efc303b6cd1bf8", "score": "0.5610389", "text": "def meal_liked(request, meal_pk):\n meal_pk = int(meal_pk)\n meal = Meal.objects.get(pk=meal_pk)\n like = True\n member = request.user\n\n try:\n rating = Rating.objects.get(member=member, meal=meal)\n r_percent = meal.percent()\n except ObjectDoesNotExist:\n Rating.objects.create_rating(member, meal, like)\n r_percent = meal.percent()\n return HttpResponse(r_percent)\n\n rating.like = like\n rating.save()\n r_percent = meal.percent()\n return HttpResponse(r_percent)", "title": "" }, { "docid": "732ca3edcf4d7e3406d943b7938e5442", "score": "0.5586133", "text": "def tally_answer(self, answer_123_type, answer_selected_int, answer_weight_int):\n\n if answer_selected_int <= 3:\n type_for_answer = answer_123_type\n else:\n type_for_answer = self.opposite_type[answer_123_type]\n\n # print('Score.tally_answer - answer_123_type:', answer_123_type)\n # print('Score.tally_answer - answer_selected_int:', answer_selected_int)\n print('Score.tally_answer - answer_weight_int:', answer_weight_int)\n print('Score.tally_answer - type_for_answer:', type_for_answer)\n\n if type_for_answer is \"E\":\n self.e_score += answer_weight_int\n elif type_for_answer is \"I\":\n self.i_score += answer_weight_int\n elif type_for_answer is \"N\":\n self.n_score += answer_weight_int\n elif type_for_answer is \"S\":\n self.s_score += answer_weight_int\n elif type_for_answer is \"F\":\n self.f_score += answer_weight_int\n elif type_for_answer is \"T\":\n self.t_score += answer_weight_int\n elif type_for_answer is \"J\":\n self.j_score += answer_weight_int\n elif type_for_answer is \"P\":\n self.p_score += answer_weight_int\n\n print('Score.tally_answer - self.__str__():', self.__str__())", "title": "" }, { "docid": "7ea89e212b8867c14b0ed3e24028b5c8", "score": "0.5558744", "text": "def get_voters():", "title": "" }, { "docid": "7ea89e212b8867c14b0ed3e24028b5c8", "score": "0.5558744", "text": "def get_voters():", "title": "" }, { "docid": "9a8215a4bf66aa929726b7393b827641", "score": "0.5554487", "text": "def test_vote_twice(self):\n idea = models.Idea(creator=random_user(), title='Transit subsidy to Mars', \n text='Aliens need assistance.', state=self.state)\n idea.save()\n\n self.client.login(username='testuser', password='password')\n resp = self.client.post(reverse('upvote_idea'), {'idea_id':idea.id, 'next':reverse('idea_detail', args=(idea.id,))})\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(idea.vote_set.all()), 1)\n\n resp = self.client.post(reverse('upvote_idea'), {'idea_id':idea.id, 'next':reverse('idea_detail', args=(idea.id,))})\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(len(idea.vote_set.all()), 1)", "title": "" }, { "docid": "efa222bdc61c1c66a198a751c37fe7d4", "score": "0.5545086", "text": "def vote(self, data, suffix=''): # pylint: disable=unused-argument\n # Here is where we would prevent a student from voting twice, but then\n # we couldn't click more than once in the demo!\n #\n # if self.voted:\n # log.error(\"cheater!\")\n # return\n\n votes = json.load(self.fs.open(u\"thumbsvotes.json\"))\n self.upvotes = votes['up']\n self.downvotes = votes['down']\n\n if data['voteType'] not in ('up', 'down'):\n log.error('error!')\n return\n\n if data['voteType'] == 'up':\n self.upvotes += 1\n else:\n self.downvotes += 1\n\n with self.fs.open(u'thumbsvotes.json', 'wb') as file_output:\n file_output.write(\n json.dumps({'up': self.upvotes, 'down': self.downvotes}).encode()\n )\n\n self.voted = True\n\n return {'up': self.upvotes, 'down': self.downvotes}", "title": "" }, { "docid": "31c690b609ed69f42cdb428c1aa46917", "score": "0.55253595", "text": "def upvote(id,vote_type):\n # Query for user\n votes = Votes.query.filter_by(user_id=current_user.id).all()\n to_str=f'{vote_type}:{current_user.id}:{id}'\n\n if not votes:\n new_vote = Votes(vote=vote_type, user_id=current_user.id, posts_id=id)\n new_vote.save_vote()\n flash('YOU HAVE VOTED', 'success')\n\n for vote in votes:\n if f'{vote}' == to_str:\n\n break\n else: \n new_vote = Votes(vote=vote_type, user_id=current_user.id, posts_id=id)\n new_vote.save_vote()\n \n break\n\n return redirect(url_for('.view_post', id=id))", "title": "" }, { "docid": "fd649a293d033b926fa6aaacb3052901", "score": "0.5522253", "text": "def vote(self, request, **kwargs):\n context = self.get_context_data(**kwargs)\n _logger.info(\"%s is trying to vote on %s\", request.user, context['song'])\n vote_dict = get_vote_dict(request.user)\n can_vote = context['song'].id not in vote_dict[request.user.id] and context['song'].ready\n if can_vote:\n vote = Vote()\n vote.user = request.user\n vote.song = context['song']\n vote.save()\n vote_dict[request.user.id].append(context['song'].id)\n cache.set('vote_dict', vote_dict)\n logging.info('%s voted on %s.', request.user, context['song'])\n return HttpResponse('Vote registered on %s.' % context['song'])\n else:\n logging.info('%s tried to vote more than once on %s.', request.user.username, context['song'])\n return HttpResponse(\"Du har allerede stemt på denne sangen i dag!\", content_type='text/plain', status=403)", "title": "" }, { "docid": "b1aaa24e4624dda87c0aec53492466ae", "score": "0.550889", "text": "def show_likes(user_id):\n\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n # define user whose favorites are being viewed\n profuser = User.query.get_or_404(user_id)\n # define logged-in user for navbar details\n user = User.query.get(session[CURRENT_USER_KEY])\n if session[CURRENT_USER_KEY] == user_id:\n like_active = 'active'\n else:\n like_active = ''\n\n return render_template('likes.html', user=user, profuser=profuser, likes=profuser.likes, like_active=like_active)", "title": "" }, { "docid": "e9ff6c4adb9c330a3db3d89b051807dd", "score": "0.5487508", "text": "def toggle_vote(self):\n\n self.vote = 1 - self.vote", "title": "" }, { "docid": "90f3c6f1293f8adbcb0b4b7528751094", "score": "0.5486011", "text": "def test_vote_nopermission(self):\r\n mock_module = CHModuleFactory.create(user_voted=True)\r\n json_in = {'answer': '24.0', 'hint': 1, 'pk_list': json.dumps([['24.0', 1], ['24.0', 3]])}\r\n old_hints = copy.deepcopy(mock_module.hints)\r\n mock_module.tally_vote(json_in)\r\n self.assertTrue(mock_module.hints == old_hints)", "title": "" }, { "docid": "fab5a14d05bba6a74642bd1a8768c130", "score": "0.5472311", "text": "def displayAlsoLike(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"11925205\", result.data)", "title": "" }, { "docid": "206440aaca144ca04ad8bb7277e5a7c2", "score": "0.54215324", "text": "def cast_vote(\n self, user_id: int, election_id: int, ranked_candidate_list: List[int]\n ) -> int:", "title": "" }, { "docid": "77cbaf95acf11823189e02778a5a7bce", "score": "0.54092944", "text": "def show_likes(user_id):\n\n\n user = User.query.get_or_404(user_id)\n\n return render_template('users/likes.html', user=user)", "title": "" }, { "docid": "e83ebd03ef124a6df98a15d29a0ef593", "score": "0.5390602", "text": "def tpc_vote(self, transaction):\n raise NotImplementedError", "title": "" }, { "docid": "1d2d06f9ac580397c9a96620199a1a51", "score": "0.5388158", "text": "def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()", "title": "" }, { "docid": "ea15edadd888b9251cf3b64ce12adc74", "score": "0.53774065", "text": "def set_user_suggestion(user_id: int):\n session = Session()\n\n user = get_user_by_id(user_id)\n\n user.suggestion_expiry = properties.vote_expiration()\n\n session.merge(user)\n session.commit()\n session.close()", "title": "" }, { "docid": "a1a1ab08749634f99cab674ade5092ad", "score": "0.53709024", "text": "def twitter_votes(contest):\n import application_only_auth\n import re\n\n #TODO(robnagler) Add ability to count tweets for non-matching\n # that is assign the tweet to a contestant manually\n c = biv.load_obj(contest)\n assert type(c) == pem.E15Contest\n cfg = ppc.app().config['PUBLICPRIZE']['TWITTER']\n ignore_list = ppc.app().config['PUBLICPRIZE']['TWEETS']['ignore_list']\n client = application_only_auth.Client(**cfg)\n res = client.request(\n 'https://api.twitter.com/1.1/search/tweets.json?q=%40BoulderChamber%20%23EspritVentureChallenge&result_type=recent&count=1000',\n )\n strip_re = re.compile(r'[^a-z]')\n def _strip(name):\n return strip_re.sub('', name.lower())[0:5]\n\n tweet_re = re.compile(r'for (.+) in')\n tweet_re2 = re.compile(r'2pp.us/(\\w+)')\n nominees = {}\n nominees_by_id = {}\n for nominee in c.public_nominees():\n nominees[_strip(nominee.display_name)] = nominee.biv_id\n nominees_by_id[nominee.biv_id] = nominee.display_name\n all_votes = pcm.Vote.query.filter(\n pcm.Vote.nominee_biv_id.in_(list(nominees.values())),\n ).all()\n votes_map = dict([(v.biv_id, v) for v in all_votes if v.twitter_handle])\n vote_not_found = {}\n #print(all_votes)\n events = {}\n ignore_handles = set()\n for s in reversed(res['statuses']):\n sn = pcm.Vote.strip_twitter_handle(s['user']['screen_name'])\n if sn in ignore_list:\n continue\n dt = s['created_at'][4:].replace('+0000 ', '')\n dt = datetime.datetime.strptime(dt, '%b %d %H:%M:%S %Y')\n err = None\n m = tweet_re2.search(s['text'])\n nominee_id = None\n if m:\n guess = m.group(1)\n try:\n nominee_id = nominees_by_id[biv.URI(guess).biv_id]\n except Exception:\n m = None\n #print('https://twitter.com/{}/status/{}'.format(sn, s['id']))\n if not nominee_id:\n m2 = tweet_re.search(s['text'])\n if m2:\n guess = _strip(m2.group(1))\n nominee_id = nominees.get(guess)\n m = m2\n else:\n err = 'tweet did not match regexes'\n if nominee_id:\n votes = pcm.Vote.query.filter_by(\n nominee_biv_id=nominee_id,\n twitter_handle=sn,\n ).all()\n if len(votes) == 1:\n if votes[0].biv_id in votes_map:\n if votes[0].vote_status != '2x':\n votes[0].vote_status = '2x'\n _add_model(votes[0])\n del votes_map[votes[0].biv_id]\n ignore_handles.add(sn)\n #print('{}: updated'.format(votes[0]))\n continue\n else:\n err = '{}: duplicate vote'.format(votes[0])\n continue\n elif len(votes) > 1:\n err = '{}: strange vote count, votes='.format(len(votes), votes)\n else:\n err = 'vote not found'\n vote_not_found[dt.replace(microsecond=0)] = dict(nominee_id=nominee_id, sn=sn)\n elif m:\n err = '{}: guess={} not found in nominees'.format(m.group(1), guess)\n else:\n err = '{}: does not match regexes'.format(s['text'])\n if not s['text'].startswith('RT '):\n events[dt] = '{}\\n {} => {}\\n https://twitter.com/{}/status/{}\\n {}'.format(\n err, sn, m and m.group(1), sn, s['id'], s['text'])\n\n sec = datetime.timedelta(seconds=1)\n # print('\\nVotes not found')\n for v in votes_map.values():\n # Ignore invalidated handles and already counted votes\n if not ('!' in v.twitter_handle or v.vote_status == '2x' or v.twitter_handle in ignore_list):\n u = biv.load_obj(biv.Id(v.user).to_biv_uri())\n events[v.creation_date_time] = '{} {} {} {} {}: no tweet'.format(\n v.twitter_handle,\n u.display_name,\n u.user_email,\n v.nominee_biv_id,\n nominees_by_id[v.nominee_biv_id],\n )\n vdt = v.creation_date_time.replace(microsecond=0)\n vnf = None\n for i in range(120):\n vnf = vote_not_found.get(vdt)\n if vnf and vnf['nominee_id'] == v.nominee_biv_id:\n events[v.creation_date_time] += '\\npython manage.py twitter_handle_update -c {} -o {} -n {}'.format(\n biv.Id(c.biv_id).to_biv_uri(), v.twitter_handle, vnf['sn'])\n break\n vdt += sec\n for k in reversed(sorted(events.keys())):\n print('{} {}'.format(k.strftime('%d %H:%M:%S'), events[k]))", "title": "" }, { "docid": "0b7204836645adb488e8a75d6426da2b", "score": "0.53693354", "text": "def update_weight(user, user_favs, rec_user, rec_user_favs):\n counter = 0\n for photo in user_favs:\n if photo in rec_user_favs:\n counter += 1\n weight, created = Weight.objects.get_or_create(against=user, to=rec_user)\n weight.weight = float(counter)/len(rec_user_favs)\n weight.save()", "title": "" }, { "docid": "3e385b971e30fd7e11c0778b096d6833", "score": "0.5363055", "text": "def vote(self, upvote, id, remote_addr):\n\n rv = self.db.fetchone(\n 'SELECT likes, dislikes, voters FROM comments WHERE id=%s', (id, ))\n\n if rv is None:\n return None\n\n likes, dislikes, votersPickle = rv\n voters = pickle.loads(votersPickle)\n if likes + dislikes >= 142:\n return {'likes': likes, 'dislikes': dislikes}\n\n bf = Bloomfilter(voters.array, likes + dislikes)\n if remote_addr in bf:\n return {'likes': likes, 'dislikes': dislikes}\n\n bf.add(remote_addr)\n self.db.commit([\n 'UPDATE comments SET',\n ' likes = likes + 1,' if upvote else 'dislikes = dislikes + 1,',\n ' voters = %s'\n 'WHERE id=%s;'], (pickle.dumps(bf.array), id))\n\n if upvote:\n return {'likes': likes + 1, 'dislikes': dislikes}\n return {'likes': likes, 'dislikes': dislikes + 1}", "title": "" }, { "docid": "a3ab3312a45c37e84e2de5bfe1c8ea88", "score": "0.53441626", "text": "def has_voted(self, user):\n return user.choice_set.filter(vote=self).exists()", "title": "" }, { "docid": "9d974d27579acf8715ffd68a8d378861", "score": "0.534172", "text": "def vote(request, question_id):\n\n user = request.user\n if not user.is_authenticated:\n return redirect('login')\n\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n context = {\n 'question' : question,\n 'error_message' : \"You didn't select a choice.\"\n }\n return render(request, 'polls/detail.html',context)\n else:\n selected_choice.votes += 1\n selected_choice.save()\n return HttpResponseRedirect(reverse('results',args=(question.id,)))", "title": "" }, { "docid": "cfbf3c2099ad68c997d8b90b695c05b4", "score": "0.5302716", "text": "def update_vote(self):\n if not self.answer_id:\n return False\n try:\n con = psycopg2.connect(**self.config)\n cur = con.cursor(cursor_factory=RealDictCursor)\n query = \"UPDATE votes SET vote=%s WHERE answer_id=%s AND user_id=%s\"\n cur.execute(query, (self.vote_value, self.answer_id, self.user_id))\n con.commit()\n except Exception as e:\n print(e)\n con.close()\n return False\n return True", "title": "" }, { "docid": "3197f0b60790932cc8b7869f9dc79a61", "score": "0.5299154", "text": "def add(self, score, user, ip_address, cookies={}, commit=True):\n try:\n score = int(score)\n except (ValueError, TypeError):\n raise InvalidRating(\"%s is not a valid choice for %s\" % (score, self.field.name))\n \n delete = (score == 0)\n if delete and not self.field.allow_delete:\n raise CannotDeleteVote(\"you are not allowed to delete votes for %s\" % (self.field.name,))\n # ... you're also can't delete your vote if you haven't permissions to change it. I leave this case for CannotChangeVote\n \n if score < 0 or score > self.field.range:\n raise InvalidRating(\"%s is not a valid choice for %s\" % (score, self.field.name))\n\n is_anonymous = (user is None or not user.is_authenticated())\n if is_anonymous and not self.field.allow_anonymous:\n raise AuthRequired(\"user must be a user, not '%r'\" % (user,))\n \n if is_anonymous:\n user = None\n \n defaults = dict(\n score = score,\n ip_address = ip_address,\n )\n \n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n user = user,\n )\n if not user:\n kwargs['ip_address'] = ip_address\n \n use_cookies = (self.field.allow_anonymous and self.field.use_cookies)\n if use_cookies:\n defaults['cookie'] = now().strftime('%Y%m%d%H%M%S%f') # -> md5_hexdigest?\n # TODO: move 'vote-%d.%d.%s' to settings or something\n cookie_name = 'vote-%d.%d.%s' % (kwargs['content_type'].pk, kwargs['object_id'], kwargs['key'][:6],) # -> md5_hexdigest?\n cookie = cookies.get(cookie_name) # try to get existent cookie value\n if not cookie:\n kwargs['cookie__isnull'] = True\n kwargs['cookie'] = cookie\n\n try:\n rating, created = Vote.objects.get(**kwargs), False\n except Vote.DoesNotExist:\n if delete:\n raise CannotDeleteVote(\"attempt to find and delete your vote for %s is failed\" % (self.field.name,))\n # print \"RATINGS_VOTES_PER_IP: \"\n # print getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP)\n if getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP):\n num_votes = Vote.objects.filter(\n content_type=kwargs['content_type'],\n object_id=kwargs['object_id'],\n key=kwargs['key'],\n ip_address=ip_address,\n ).count()\n if num_votes >= getattr(settings, 'RATINGS_VOTES_PER_IP', RATINGS_VOTES_PER_IP):\n raise Exception(\"Numero Maximo de votos por ip\")\n kwargs.update(defaults)\n if use_cookies:\n # record with specified cookie was not found ...\n cookie = defaults['cookie'] # ... thus we need to replace old cookie (if presented) with new one\n kwargs.pop('cookie__isnull', '') # ... and remove 'cookie__isnull' (if presented) from .create()'s **kwargs\n rating, created = Vote.objects.create(**kwargs), True\n \n has_changed = False\n if not created:\n if self.field.can_change_vote:\n has_changed = True\n self.score -= rating.score\n # you can delete your vote only if you have permission to change your vote\n if not delete:\n rating.score = score\n rating.save()\n else:\n self.votes -= 1\n rating.delete()\n else:\n raise CannotChangeVote()\n else:\n has_changed = True\n self.votes += 1\n if has_changed:\n if not delete:\n self.score += rating.score\n if commit:\n self.instance.save()\n #setattr(self.instance, self.field.name, Rating(score=self.score, votes=self.votes))\n \n defaults = dict(\n score = self.score,\n votes = self.votes,\n )\n \n kwargs = dict(\n content_type = self.get_content_type(),\n object_id = self.instance.pk,\n key = self.field.key,\n )\n \n try:\n score, created = Score.objects.get(**kwargs), False\n except Score.DoesNotExist:\n kwargs.update(defaults)\n score, created = Score.objects.create(**kwargs), True\n \n if not created:\n score.__dict__.update(defaults)\n score.save()\n \n # return value\n adds = {}\n if use_cookies:\n adds['cookie_name'] = cookie_name\n adds['cookie'] = cookie\n if delete:\n adds['deleted'] = True\n return adds", "title": "" }, { "docid": "05d5bcf8f654ba5e91b1918f89b9ac0a", "score": "0.52912796", "text": "def _force_vote(self, user, value):\n previous = 0\n if value == 0:\n # Delete any previous vote object\n for v in Vote.objects.filter(user=user, content=self):\n previous = v.value\n v.delete()\n else:\n # Create or change vote object\n v, created = Vote.objects.get_or_create(user=user, content=self)\n previous = v.value\n v.value = value\n v.save(update_fields=['value'])\n return (previous-value)*(-1)", "title": "" }, { "docid": "b88dfcb090c9dc639d0a79d41e8eecbd", "score": "0.5289296", "text": "def submit_hint(self, data):\r\n # Do html escaping. Perhaps in the future do profanity filtering, etc. as well.\r\n hint = escape(data['hint'])\r\n answer = data['answer']\r\n if not self.validate_answer(answer):\r\n log.exception('Failure in hinter submit_hint: Unable to parse answer: {ans}'.format(\r\n ans=answer))\r\n return {'error': 'Could not submit answer'}\r\n # Only allow a student to vote or submit a hint once.\r\n if self.user_voted:\r\n return {'message': 'Sorry, but you have already voted!'}\r\n # Add the new hint to self.hints or self.mod_queue. (Awkward because a direct write\r\n # is necessary.)\r\n if self.moderate == 'True':\r\n temp_dict = self.mod_queue\r\n else:\r\n temp_dict = self.hints\r\n if answer in temp_dict:\r\n temp_dict[answer][str(self.hint_pk)] = [hint, 1] # With one vote (the user himself).\r\n else:\r\n temp_dict[answer] = {str(self.hint_pk): [hint, 1]}\r\n self.hint_pk += 1\r\n if self.moderate == 'True':\r\n self.mod_queue = temp_dict\r\n else:\r\n self.hints = temp_dict\r\n # Mark the user has having voted; reset previous_answers\r\n self.user_voted = True\r\n self.previous_answers = []\r\n self.user_submissions = []\r\n return {'message': 'Thank you for your hint!'}", "title": "" }, { "docid": "75ebe076946118266da86672f6dc0bff", "score": "0.528754", "text": "def can_vote(age):\n return age >= 18", "title": "" }, { "docid": "5d5d27ff8b49fd0912c9ab45aec46065", "score": "0.52854156", "text": "def test_vote_nohint(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'answer': '24.0', 'hint': '25', 'pk_list': '[]'}\r\n dict_out = mock_module.tally_vote(json_in)\r\n self.assertTrue(dict_out == {'error': 'Failure in voting!'})", "title": "" }, { "docid": "9eb5ab1babf4a1fd425dc34d295b3662", "score": "0.52813494", "text": "def user_interaction_score(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n return pui", "title": "" }, { "docid": "dfd1e2ac89593c970e6a6998dd638ec5", "score": "0.5278905", "text": "def insert_favorite_food(self, user_answer_id_food, name_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n save_favorite_food = \"\"\"INSERT INTO Favorite\n (id_food, id_substitute_chooses)\n VALUES({0}, \n (SELECT id FROM Food WHERE name_food = {1}))\"\"\" \\\n .format(int(user_answer_id_food),\n \"\\'\" + name_substitute + \"\\'\")\n self.cursor.execute(save_favorite_food)\n self.data_base.commit()", "title": "" }, { "docid": "cfe30384d63e6d28b1008836020ad684", "score": "0.52743244", "text": "def upvote(self) -> Response:\n self.force_authenticate_user()\n response = self.upvote_question()\n return response", "title": "" }, { "docid": "e8b1e4124cb61f82963ec6b7e589c6b9", "score": "0.52702326", "text": "def vote(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'votes')\r\n request = http.Request('POST', url, {'to': '1'})\r\n\r\n return request, parsers.parse_json", "title": "" }, { "docid": "ffe563c633be2fc5390e7e02aa28cd4d", "score": "0.5267819", "text": "def get_vote_weight(var, target : users.User) -> int:\n # ensure we don't return negative numbers here;\n # we still track them for stacking purposes but anything below 0 counts as 0\n return max(WEIGHT.get(target, 1), 0)", "title": "" }, { "docid": "f53ab80caa0b6ca48b728509ecc4295b", "score": "0.5255498", "text": "def record_vote(self, obj, user, vote):\r\n if vote not in (+1, 0, -1):\r\n raise ValueError('Invalid vote (must be +1/0/-1)')\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n v = self.get(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val())\r\n if vote == 0:\r\n v.delete()\r\n else:\r\n v.vote = vote\r\n v.save()\r\n except models.ObjectDoesNotExist:\r\n if vote != 0:\r\n self.create(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val(), vote=vote)", "title": "" }, { "docid": "c94c86dc6205110b81d70c76c12ba0c0", "score": "0.52486813", "text": "def update_vote(name: str, party: str, votes: dict, vote_count: dict) -> bool:\r\n if auth_vote(name, party, votes):\r\n vote_count[party] += 1\r\n return True\r\n return False", "title": "" }, { "docid": "f38184b2dbd5dbecb48f2ad4dab91df0", "score": "0.52304107", "text": "def Vote(i, j, budget, count):\r\n if(count < budget):\r\n if(random.uniform(0, i+j) < i):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if(random.uniform(0, 1) < 0.5):\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "63db8892c17739f4945ff1988eec06e0", "score": "0.52117664", "text": "def add_vote_weight(var, target : users.User, amount : int = 1) -> None:\n if target not in get_players():\n return\n\n WEIGHT[target] = WEIGHT.get(target, 1) + amount\n if WEIGHT[target] == 1:\n del WEIGHT[target]", "title": "" }, { "docid": "d2f4da7cd4c2ecb0360fd979ec0cee6f", "score": "0.52054137", "text": "def result_poll(votes):\n return sum(votes) >= 2 / 3 * len(votes)", "title": "" }, { "docid": "e3cb84d67c9d9d0c0eb71593cde30e1d", "score": "0.5202772", "text": "async def love(ctx, user: discord.Member):\r\n author = ctx.message.author\r\n if user.id == ctx.bot.user.id:\r\n await ctx.send(\"I am not capable of loving like you can. I'm sorry.\" )\r\n else:\r\n await ctx.send(author.mention + \" is capable of loving \" + user.mention + \" a whopping \" +\r\n str(randint(0, 100)) + \"%!\")\r\n ctx.counter(n)", "title": "" }, { "docid": "a9eeb307575eaeb89456a54aba542479", "score": "0.5196707", "text": "def tagme_vote(self, e1, candidate_set):\n try:\n return sum([self.entity_relatedness(e1, e2) * score\n for score, e2 in candidate_set]) / len(candidate_set)\n except ZeroDivisionError:\n return 0.0", "title": "" }, { "docid": "bcdccb383e4c0da624cc14aa43c419e8", "score": "0.5195632", "text": "def vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'polls/detail.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n user = request.user\n Vote.objects.update_or_create(user=user, question=question, defaults={'choice': selected_choice})\n for choice in question.choice_set.all():\n choice.votes = Vote.objects.filter(question=question).filter(choice=choice).count()\n choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n date = datetime.now()\n log.info(\"User: %s, Poll's ID: %d, Date: %s.\", user, question_id, str(date))\n return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))", "title": "" }, { "docid": "10f6c7f6c20f8467fa33422a9e272f74", "score": "0.51915884", "text": "def user_interaction(uv, recommended_News, ranked=True):\n\n iv = recommended_News[\"topical_vector\"]\n\n product = simple_doct_product(uv, iv)\n\n epsilon = 10e-5\n\n if (product + epsilon) > 1.0:\n vui = 0.99\n else:\n vui = beta_distribution(product)\n\n # Awared preference\n ita = beta_distribution(0.98)\n pui = vui * ita\n\n rand_num = np.random.random()\n\n if rand_num < pui:\n return True\n else:\n return False", "title": "" }, { "docid": "d7bd311e07e7b1d47b8918eb82992600", "score": "0.51799345", "text": "def vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n configure()\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n configure()\n # Redisplay the question voting form.\n return render(request, 'polls/detail.html', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n if Vote.objects.filter(pk=question_id, user_id=request.user.id).exists():\n configure()\n user_vote = question.vote_set.get(user=request.user)\n user_vote.choice = selected_choice\n user_vote.choice.votes += 1\n user_vote.choice.save()\n user_vote.save()\n else:\n configure()\n selected_choice.vote_set.create(user=request.user, question=question)\n\n return HttpResponseRedirect(reverse('polls:results', args=(question_id,)))", "title": "" }, { "docid": "7fc8871f3bbf9b063224a7cccbd606ef", "score": "0.5178587", "text": "def count_votes(self):\n return self.annotate(sum=Sum('value'))", "title": "" }, { "docid": "b41b1774c575df3e4cdb033139025fa8", "score": "0.5176195", "text": "def GetUserInterest(user_id, current_context, suggestees):\n global db\n if db == None:\n db = MySQLdb.connect(\"localhost\", \"neva\", \"\", \"neva\")\n sql = \"SELECT `suggestee_id`, `timestamp`, `feedback` FROM \" + \\\n \"`user_recommendation_feedback` WHERE `user_id` = %s\"\n interest = {}\n with db.cursor() as cur:\n cur.execute(sql, (user_id, ))\n for raw_features in cur:\n features = np.array(raw_features).astype(np.float)\n for idx, field in enumerate(fields):\n features[idx] = ParseFeature(features[idx], field)\n suggestee_id, timestamp, feedback = features\n feedback = 1 if feedback == kLIKE else -1\n if GetDist([timestamp], current_context) > kMaxDistThreshold:\n continue\n if suggestee_id not in interest:\n interest[suggestee_id] = feedback\n else:\n interest[suggestee_id] += feedback\n\n if len(interest) < kColdStartThreshold:\n suggestees_shuffled = list(suggestees)\n shuffle(suggestees_shuffled)\n delta = kColdStartThreshold - len(interest) + 5\n if delta > len(suggestees_shuffled):\n delta = len(suggestees_shuffled)\n for suggestee_id in suggestees_shuffled[:delta]:\n if suggestee_id in interest:\n continue\n interest[suggestee_id] = 0\n if len(interest) >= kColdStartThreshold:\n break\n\n return interest", "title": "" }, { "docid": "b6f8d66826eb6192b7112bf3298c19d3", "score": "0.5169103", "text": "def vote(self, agents):\n\n # If the impostors have a set target, vote that\n if self.target != -1:\n vote = self.target\n else: # Vote a random living agents\n vote = random.sample([a.agent_id for a in agents if not a.agent_id == self.agent_id and a.alive and not a.is_impostor()], 1)[0]\n\n self.target = -1\n self.logger.log(f\"Impostor {self.agent_id} votes for {vote}\", Logger.LOG | Logger.PRINT_VISUAL)\n return vote", "title": "" }, { "docid": "5c1d78b6a847468b9a155f206e1c12ff", "score": "0.5159917", "text": "def vote_count(self):\n return QuestionVotes.objects.filter(question=self).count()", "title": "" }, { "docid": "6a3464b10527f4e74d08861bc9a0dfde", "score": "0.5158851", "text": "def like(self, n: int) -> None:\n\n # YOUR CODE HERE\n self.likes += 1", "title": "" }, { "docid": "3fb5e144eaac9492f687392c2ea7987b", "score": "0.51557314", "text": "async def applyVote(self, votes):\n voteCount = {vote: 0 for vote in self.getMembersName()}\n voteCount[None] = 0\n for vote in votes.values():\n voteCount[vote] += 1\n\n if voteCount[None] != 0:\n await self.textChannel.send(\n \"Attention, des joueurs n'ont pas voté / ont mal écrit, les votes peuvent être faussés.\")\n del voteCount[None]\n\n playerOrder = sorted(voteCount.items(), key=lambda x: x[1], reverse=True)\n print(\"playerOrder\", playerOrder)\n if playerOrder[0][1] == 0: # Nobody vote\n await self.textChannel.send(\"`Partie non valide`, personne n'a voté.\")\n\n elif playerOrder[0][1] == 1: # People think nobody is a werewolf\n await self.textChannel.send(\"Le village pense qu'il n'y a pas de loups-garou ? Vérification ...\")\n werewolves = self.getWolves()\n if len(werewolves) == 0:\n await self.textChannel.send(\"Le village a raison, il n'y a pas de loups-garous parmis eux.\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"Malheuresement, il y avait```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # Classic vote\n werewolves = self.getWolves()\n deaths = []\n for i in range(len(playerOrder)):\n player = self.getMemberFromName(name=playerOrder[i][0])\n isDead = await player.isDead(channel=self.textChannel)\n if isDead:\n deaths += await player.death(channel=self.textChannel, members=self.players)\n print(\"voteCount :\", voteCount)\n\n # Get player name with same number of vote against them\n playerEqualVote = []\n for p in playerOrder:\n if p[1] == playerOrder[i][1] and p[0] != playerOrder[i][0]:\n playerEqualVote.append(self.getMemberFromName(name=p[0]))\n print(\"Other players with equals number of vote :\", playerEqualVote)\n for otherPlayer in playerEqualVote:\n isDead = await otherPlayer.isDead(channel=self.textChannel)\n if isDead:\n deaths += await otherPlayer.death(channel=self.textChannel, members=self.players)\n break\n\n for i in range(len(deaths)):\n if deaths[i] is None:\n del deaths[i]\n\n if len(deaths) == 0: # No one die\n if len(werewolves) == 0: # No Werewolves\n await self.textChannel.send(\"Il n'ya pas eu de mort et il n'y a aucun Loup-Garou !\")\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else: # Werewolves among players\n await self.textChannel.send(\n \"Il n'y a pas eu de mort mais```\" + \", \".join(werewolves) + \"```\")\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n elif len(deaths) == 1:\n if deaths[0].lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]: # Werewolf die\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n elif deaths[0].lastRole in [\"Tanneur\"]: # Tanner died\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n if len(werewolves) > 0: # Wolves in game\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT ÉGALEMENT GAGNÉ```\")\n else: # Villager died\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")\n\n else: # more than 2 deaths\n rolesDead = []\n for dead in deaths:\n if dead.lastRole in [\"Loup-Garou\", \"Loup Alpha\", \"Loup Shamane\", \"Loup rêveur\"]:\n rolesDead.append(\"Loup-Garou\")\n elif dead.lastRole in [\"Tanneur\"]:\n await self.textChannel.send(\"```Fix\\n#LE TANNEUR A GAGNÉ#```\")\n else:\n rolesDead.append(\"Villageois\")\n print(\"rolesDead :\", rolesDead)\n rolesDead = list(dict.fromkeys(rolesDead))\n print(\"rolesDead unique :\", rolesDead)\n if \"Loup-Garou\" in rolesDead:\n await self.textChannel.send(\"```css\\nLES VILLAGEOIS ONT GAGNÉ```\")\n else:\n await self.textChannel.send(\"```diff\\n-LES LOUPS-GAROUS ONT GAGNÉ-```\")", "title": "" }, { "docid": "daf58b4eb0b98fdc9b608bd766392960", "score": "0.5155113", "text": "def do_vote(self, stats, vote_val):\n vote = self.get_vote(stats)\n if vote is None:\n vote = Vote()\n vote.user = self\n vote.stats = stats\n vote.value = vote_val\n return vote", "title": "" }, { "docid": "ec8b99ae72c114f8468639d297ff22c1", "score": "0.5153152", "text": "def step(self, user: torch.tensor, item: torch.tensor,\n rating: torch.tensor, preference: torch.tensor):\n pass", "title": "" }, { "docid": "a999456c6da2c0086a0421abbf357f30", "score": "0.5125639", "text": "def votes(self):\n vote_list = [\"%s (%s)\" % (n.name, n.vote) for n in self.name_set.all()]\n return \" / \".join(vote_list)", "title": "" }, { "docid": "7e6ed8a253351eba82fd5aaa82400a7d", "score": "0.5121645", "text": "def upvote_comment():\n x = upvoteBot.main(number=1, submissionid='7xbrcw')\n return x", "title": "" }, { "docid": "24d1d6ecc73fd14e4eb80b0c61c18911", "score": "0.5118665", "text": "async def create_random_vote(self):\n \"\"\"This is done for prevent automatic or robotic voting.\"\"\"\n random_number = random.randrange(0, 100000)\n current_time = int(time.time())\n result = await self.db.fetchrow(\n # 'select * from stickers order by random() limit 1;'\n 'SELECT * FROM stickers TABLESAMPLE SYSTEM_ROWS(1);'\n )\n random_sticker = json.loads(result[1])\n token = await self.db.fetchval(\n \"select md5('{}');\".format(random_number))\n await self.db.fetch(\n \"insert into secret (data) values\"\n \"('{}')\".format(json.dumps([\n token,\n current_time,\n random_sticker[0],\n random_sticker[2]\n ])))\n return (random_sticker[2], token)", "title": "" }, { "docid": "db3d29b9737fbee3ea338397d641da9b", "score": "0.5114699", "text": "def fans(self, request, pk=None):\n\n obj = self.get_object()\n users_list = like_func.get_liked_users(obj)\n serializer = UserSerializer(users_list, context={'request': request}, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "2cc58b7c9bd8268c2a9fe75d30bf2bfc", "score": "0.5104717", "text": "async def stats(self, ctx, user: discord.Member=None):\n user = user or ctx.author\n\n if optout.find_one({\"_id\": user.id}) or not collection.find_one({\"id_\":user.id}):\n await ctx.send(f\"Sorry, that user has either opted out or is not in our system!\")\n else:\n embed = discord.Embed(title=f\"{user}'s Soundness Score\",\n color=random.randint(0, 0xFFFFFF))\n \n embed.set_thumbnail(url=user.avatar_url)\n embed.add_field(name=\"Streak\", value=f'{collection.find_one({\"id_\":user.id})[\"streak\"]} songs')\n embed.add_field(name=\"Wellness Score\", value=collection.find_one({\"id_\":user.id})[\"wellness\"])\n\n embed.set_footer(text=f\"Requested by @{ctx.message.author}\", icon_url=ctx.message.author.avatar_url)\n\n await ctx.send('', embed=embed)", "title": "" }, { "docid": "e1444827aab5590bc5ff93b6075dd1ef", "score": "0.51034474", "text": "def relation_upvote(request, pk):\n try:\n relation = Relation.objects.get(pk=pk)\n except Relation.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'PUT':\n relation.positive_reaction_count += 1\n relation.save()\n serializer = RelationSerializer(relation)\n return Response(serializer.data)", "title": "" }, { "docid": "a7c251eaac29349d2d9a26862ce35223", "score": "0.510009", "text": "def votePost(votePostEvent):\n userID = votePostEvent[\"data\"][\"user_id\"]\n postID = votePostEvent[\"data\"][\"post_id\"]\n vote = int(votePostEvent[\"data\"][\"vote\"])\n query = ('SELECT * FROM vote WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(postID, userID))\n with conn.cursor() as cur:\n affectedRow = cur.execute(query)\n if affectedRow > 0:\n row = cur.fetchone()\n if vote > 0 and not row[2]:\n query = (\n 'UPDATE vote SET upvote = true, downvote = false WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(\n postID, userID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote+1, downvote = downvote-1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n elif vote < 0 and not row[3]:\n query = (\n 'UPDATE vote SET upvote = false, downvote = true WHERE post_id = \\\"{}\\\" AND user_id = \\\"{}\\\"'.format(\n postID, userID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote-1, downvote = downvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n else:\n if vote > 0:\n query = (\n 'INSERT INTO vote (user_id, post_id, upvote, downvote) VALUES ( \\\"{}\\\", \\\"{}\\\", true, false)'.format(\n userID, postID))\n cur.execute(query)\n query = 'UPDATE post SET upvote = upvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n else:\n query = (\n 'INSERT INTO vote (user_id, post_id, upvote, downvote) VALUES ( \\\"{}\\\", \\\"{}\\\", false, true)'.format(\n userID, postID))\n cur.execute(query)\n query = 'UPDATE post SET downvote = downvote+1 WHERE post_id = \\\"{}\\\"'.format(postID)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "5615ad11f7e5d9fa0bdbaeaca3d52898", "score": "0.5097896", "text": "def upvote(self, request, pk=None):\n post = self.get_object()\n post.upvotes += 1\n post.save()\n serializer = self.get_serializer(post)\n return Response(serializer.data, status.HTTP_200_OK)", "title": "" }, { "docid": "6a6dfb08bcf3608cfe8a43460f690778", "score": "0.50879854", "text": "def recommend_for_user(self, R, user, n=10, filter_previously_seen=False,\n return_scores=True, **kwargs):", "title": "" }, { "docid": "f04205752476859bd743f50642469d15", "score": "0.5087689", "text": "def record_vote_simple(self, obj, user, vote):#renamed from original record_vote\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n ctype = ContentType.objects.get_for_model(obj)\n try:\n v = self.get(user=user, content_type=ctype,\n object_id=obj._get_pk_val())\n if vote == 0:\n v.delete()\n else:\n v.vote = vote\n v.save()\n except models.ObjectDoesNotExist:\n if vote != 0:\n self.create(user=user, content_type=ctype,\n object_id=obj._get_pk_val(), vote=vote)", "title": "" }, { "docid": "6c66fcff0e0aeb6b6eb6300a1c843a00", "score": "0.5082745", "text": "def get_user_likes(self) -> int:\n return -1", "title": "" }, { "docid": "6461adea490fc8687193e193d55e90d1", "score": "0.5081986", "text": "def evaluateVoteCount(toCompare):\n\n #weight = 0\n\n if int(toCompare['vote_count']) >= 5000:\n weight = 100\n elif 3000 <= int(toCompare['vote_count']) < 5000:\n weight = 80\n elif 2000 <= int(toCompare['vote_count']) < 3000:\n weight = 60\n elif 1000 <= int(toCompare['vote_count']) < 2000:\n weight = 40\n elif 500 <= int(toCompare['vote_count']) < 1000:\n weight = 20\n else:\n weight = 0\n return weight", "title": "" }, { "docid": "22537041538c7ce4e1036106137a8f94", "score": "0.5080221", "text": "def _get_recommend(self, user):\n return self.user_cf.calculate(target_user_id=user, user_n=self.user_n,\n item_n=self.item_n, type=2)", "title": "" }, { "docid": "281a108e7a5cc026bb67816576da71d5", "score": "0.506795", "text": "def process_vote(self, comment_id, username, value):\n raise NotImplementedError()", "title": "" }, { "docid": "119ebba90236b1a33aa9384ac3a83b5a", "score": "0.5065115", "text": "def get_vote(self, int_img):\n score = self.get_score(int_img)\n return self.weight * (1 if score < self.polarity * self.threshold else -1)", "title": "" }, { "docid": "207220e27234089e743467ff4182c7d6", "score": "0.5059269", "text": "def test_changevotes(self):\r\n request = RequestFactory()\r\n post = request.post(self.url, {'field': 'hints',\r\n 'op': 'change votes',\r\n 1: [self.problem_id.to_deprecated_string(), '1.0', '1', 5]})\r\n view.change_votes(post, self.course_id, 'hints')\r\n problem_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=self.problem_id).value\r\n # hints[answer][hint_pk (string)] = [hint text, vote count]\r\n print json.loads(problem_hints)['1.0']['1']\r\n self.assertTrue(json.loads(problem_hints)['1.0']['1'][1] == 5)", "title": "" }, { "docid": "059946b9207a6a5492d5395bb327e6bb", "score": "0.50519586", "text": "def get_hint(self, data):\r\n # First, validate our inputs.\r\n try:\r\n answer = self.answer_to_str(data)\r\n except (ValueError, AttributeError):\r\n # Sometimes, we get an answer that's just not parsable. Do nothing.\r\n log.exception('Answer not parsable: ' + str(data))\r\n return\r\n if not self.validate_answer(answer):\r\n # Answer is not in the right form.\r\n log.exception('Answer not valid: ' + str(answer))\r\n return\r\n if answer not in self.user_submissions:\r\n self.user_submissions += [answer]\r\n\r\n # For all answers similar enough to our own, accumulate all hints together.\r\n # Also track the original answer of each hint.\r\n matching_answers = self.get_matching_answers(answer)\r\n matching_hints = {}\r\n for matching_answer in matching_answers:\r\n temp_dict = copy.deepcopy(self.hints[matching_answer])\r\n for key, value in temp_dict.items():\r\n # Each value now has hint, votes, matching_answer.\r\n temp_dict[key] = value + [matching_answer]\r\n matching_hints.update(temp_dict)\r\n # matching_hints now maps pk's to lists of [hint, votes, matching_answer]\r\n\r\n # Finally, randomly choose a subset of matching_hints to actually show.\r\n if not matching_hints:\r\n # No hints to give. Return.\r\n return\r\n # Get the top hint, plus two random hints.\r\n n_hints = len(matching_hints)\r\n hints = []\r\n # max(dict) returns the maximum key in dict.\r\n # The key function takes each pk, and returns the number of votes for the\r\n # hint with that pk.\r\n best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])\r\n hints.append(matching_hints[best_hint_index][0])\r\n best_hint_answer = matching_hints[best_hint_index][2]\r\n # The brackets surrounding the index are for backwards compatability purposes.\r\n # (It used to be that each answer was paired with multiple hints in a list.)\r\n self.previous_answers += [[best_hint_answer, [best_hint_index]]]\r\n for _ in xrange(min(2, n_hints - 1)):\r\n # Keep making random hints until we hit a target, or run out.\r\n while True:\r\n # random.choice randomly chooses an element from its input list.\r\n # (We then unpack the item, in this case data for a hint.)\r\n (hint_index, (rand_hint, _, hint_answer)) =\\\r\n random.choice(matching_hints.items())\r\n if rand_hint not in hints:\r\n break\r\n hints.append(rand_hint)\r\n self.previous_answers += [[hint_answer, [hint_index]]]\r\n return {'hints': hints,\r\n 'answer': answer}", "title": "" }, { "docid": "09fe3b9df874d2ad14ba6931be2bb702", "score": "0.5036828", "text": "def recall(self, user_list):\n hit = 0\n like_item = 0\n print('\\nCalculate recall: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n recom_item = set([data[0] for data in recom_data])\n user_item = set(\n self.test[self.test['userId'] == user]['movieId'].values)\n overlap = recom_item & user_item\n hit += len(overlap)\n like_item += len(user_item)\n print('\\nrecall is: ', hit / (like_item * 1.0))\n return hit / (like_item * 1.0)", "title": "" }, { "docid": "ba8d723ccea3f66a6317e203da6287f4", "score": "0.5036744", "text": "def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite", "title": "" }, { "docid": "92450f80b06331f3f65a94fa58f952a3", "score": "0.503564", "text": "async def favor(self, ctx):\n east = ctx.guild.get_member(339119069066297355)\n if not east or east.status != discord.Status.online:\n await ctx.send(f\"I'm afraid I can't do that, {ctx.author.display_name}.\")\n return\n await ctx.send(\"&East, could I ask you for a favor? I need someone to verify my code.\")\n await asyncio.sleep(2)\n async with ctx.typing():\n await asyncio.sleep(1)\n await ctx.send(\"Oh my. Well, if you insist ;)\")", "title": "" }, { "docid": "2643a26dfeec55da3e3236497cdd3996", "score": "0.5023393", "text": "def set_favorite(self, favorite):\n\n\t\tif favorite is not None and not isinstance(favorite, int):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: favorite EXPECTED TYPE: int', None, None)\n\t\t\n\t\tself.__favorite = favorite\n\t\tself.__key_modified['favorite'] = 1", "title": "" }, { "docid": "79ab39478a287446909c828b2a124408", "score": "0.5019926", "text": "def favorited(self: Article, request: Request):\n if not request.user:\n return False\n\n if self in request.user.favorites:\n return True\n\n return False", "title": "" }, { "docid": "5c445539b40306204cb47d5e147b5104", "score": "0.5017774", "text": "def favorite(request, movie_id):\n\n movie = get_object_or_404(Movie, pk=movie_id)\n try:\n if movie.liked:\n movie.liked = False\n else:\n movie.liked = True\n movie.save()\n except (KeyError, Movie.DoesNotExist):\n return JsonResponse({'success': False})\n else:\n return JsonResponse({'success': True})", "title": "" }, { "docid": "e95414c9b84980f45f6e70b268ef91c9", "score": "0.5017013", "text": "def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)", "title": "" }, { "docid": "480175aa32c0acc9bf8c9ff22df80626", "score": "0.5011808", "text": "def get_user_votes(user_id: int) -> int:\n session = Session()\n\n # get user by id to ensure user exists\n get_user_by_id(user_id)\n # count votes for the user that haven't expired\n user_votes: int = session.query(Vote)\\\n .filter(Vote.user_id == user_id)\\\n .filter(Vote.vote_expiry > datetime.datetime.now()).count()\n\n session.close()\n\n return user_votes", "title": "" }, { "docid": "e2508c0ab870b724fe56176d0d9c6ff3", "score": "0.50109863", "text": "def favorite(self, favorite: bool):\n if favorite is None:\n raise ValueError(\"Invalid value for `favorite`, must not be `None`\")\n\n self._favorite = favorite", "title": "" }, { "docid": "91360a2b3fd424ddfd79e342e70de0eb", "score": "0.50049436", "text": "def voteCheck(number):\n\n if number >= MIN_VOTES and number <= MAX_VOTES:\n return True\n else:\n return False\n number = input(\"\\n\\tEnter votes: \")", "title": "" }, { "docid": "5baf779ece7cdb3d673d9d97ccdd70ce", "score": "0.5003415", "text": "def assign_vote(from_userid, to_userid, group):", "title": "" }, { "docid": "a08281fef8fc26b8ef7cf3a0c97b88f5", "score": "0.50023836", "text": "def like_tweets(pos_tweets):\n\n for tweet in pos_tweets:\n twitter.CreateFavorite(status_id=tweet.id)\n\n return", "title": "" }, { "docid": "6799f31b617fa7c8cf56655d3d3d6094", "score": "0.49979877", "text": "def like(request, post_id):\n if request.method == \"PUT\":\n liked = None\n user = request.user\n post = Post.objects.get(id=post_id)\n # If user already liked, decrement the like count and remove as 'liker'\n if user in post.liked_by.all():\n post.liked_by.remove(user)\n post.likes -= 1\n post.save()\n liked = False\n # Else increase like count and add user\n else:\n post.liked_by.add(user)\n post.likes += 1\n post.save()\n liked = True\n # Return data for updating dynamically with javascript\n return JsonResponse({\"total_likes\": post.likes, \"liked\": liked})", "title": "" }, { "docid": "8a318bb9253b192f98b805f9e1a4a29c", "score": "0.49915192", "text": "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "title": "" } ]
193289eca65ad1de4c8f26be05866866
Save the trajectory to VTK file sols list of return values of odeint outputFile
[ { "docid": "2cbebc2b84e1f507d8560b59c16a3750", "score": "0.6825749", "text": "def saveTrajectory(sols, outputFile):\r\n\r\n # number of contours\r\n nContours = len(sols)\r\n\r\n # number of points for each contour\r\n nptsContour = [sol.shape[0] for sol in sols]\r\n\r\n # total number of points\r\n npts = functools.reduce(lambda x, y: x + y, nptsContour)\r\n\r\n # total number of segments\r\n nSegs = functools.reduce(lambda x, y: x + y, [nps - 1 for nps in nptsContour])\r\n\r\n # number of space dimensions\r\n ndims = 3\r\n\r\n pvals = numpy.zeros((npts, 3), numpy.float64)\r\n tarr = vtk.vtkDoubleArray()\r\n tpts = vtk.vtkPoints()\r\n tgrid = vtk.vtkUnstructuredGrid()\r\n\r\n tarr.SetNumberOfComponents(ndims)\r\n tarr.SetNumberOfTuples(npts)\r\n tpts.SetNumberOfPoints(npts)\r\n\r\n ptIds = vtk.vtkIdList()\r\n ptIds.SetNumberOfIds(2)\r\n\r\n tgrid.Allocate(nSegs, 1)\r\n\r\n # create the points and the unstructured grid that goes with it\r\n offset1 = 0\r\n offset2 = 0\r\n for iContour in range(nContours):\r\n\r\n ns = nptsContour[iContour]\r\n\r\n # store points\r\n for i in range(ns):\r\n pvals[i + offset1, :] = sols[iContour][i]\r\n pvals[i + offset1, 0] = max(0., min(360., pvals[i + offset1, 0]))\r\n pvals[i + offset1, 1] = max(-90., min(90., pvals[i + offset1, 1]))\r\n offset1 += ns\r\n\r\n # create new cells/segments\r\n for i in range(ns - 1):\r\n ptIds.SetId(0, i + offset2)\r\n ptIds.SetId(1, i + 1 + offset2)\r\n tgrid.InsertNextCell(vtk.VTK_LINE, ptIds)\r\n offset2 += ns\r\n\r\n # connect\r\n tpts.SetData(tarr)\r\n tgrid.SetPoints(tpts)\r\n tarr.SetVoidArray(pvals, npts*3, 1)\r\n\r\n # save\r\n writer = vtk.vtkUnstructuredGridWriter()\r\n writer.SetFileName(outputFile)\r\n writer.SetInputData(tgrid)\r\n writer.Update()", "title": "" } ]
[ { "docid": "dd27b61786d1a199f83439837f88fbea", "score": "0.6813716", "text": "def _writeDemVtk(self):\n zDim = 1\n v = open(self.vtkOutputFile, 'w')\n v.write('# vtk DataFile Version 2.0\\n')\n v.write('Resampled DEM\\n')\n v.write('ASCII\\n')\n v.write('DATASET RECTILINEAR_GRID\\n')\n dimString = 'DIMENSIONS ' + str(self.numXOut) + ' ' + str(self.numYOut) + \\\n ' ' + str(zDim) + '\\n'\n v.write(dimString)\n\n xString = 'X_COORDINATES ' + str(self.numXOut) + ' double\\n'\n v.write(xString)\n for point in range(self.numXOut):\n v.write(\"%15.11e \" % self.xOut[point])\n if ((point + 1) % 5 == 0):\n v.write(\"\\n\")\n\n yString = '\\nY_COORDINATES ' + str(self.numYOut) + ' double\\n'\n v.write(yString)\n for point in range(self.numYOut):\n v.write(\"%15.11e \" % self.yOut[point])\n if ((point + 1) % 5 == 0):\n v.write(\"\\n\")\n\n zString = '\\nZ_COORDINATES ' + str(zDim) + ' double\\n'\n v.write(zString)\n v.write('0.0\\n')\n\n zString1 = 'POINT_DATA ' + str(self.numZOut) + '\\n'\n v.write(zString1)\n zString2 = 'SCALARS elevation double 1\\n'\n v.write(zString2)\n zString3 = 'LOOKUP_TABLE default\\n'\n v.write(zString3)\n for yPoint in range(self.numYOut):\n for xPoint in range(self.numXOut):\n v.write(\"%15.11e \" % self.zOut[yPoint, xPoint])\n if ((xPoint + 1) % 5 == 0):\n v.write(\"\\n\")\n\n v.close()\n return", "title": "" }, { "docid": "95eccdb85d9dfdfdd95d3da808393ddf", "score": "0.6648889", "text": "def SaveTheoryCurves(self,filename):\n\t\t#print len(self.y_arrays)\n\t\t#print self.y_arrays\t\t\n\t\t\t\t\t\n\t\txy_data = list(zip(self.x_array,self.y_arrays[0].real,self.y_arrays[1].real,self.y_arrays[2].real,self.y_arrays[3].real,\\\n\t\t\tself.y_arrays[4][0].real, self.y_arrays[4][1].real,\\\n\t\t\tself.y_arrays[5][0].real, self.y_arrays[5][1].real, \\\n\t\t\tself.y_arrays[6][0].real, self.y_arrays[6][1].real,\\\n\t\t\tself.y_arrays[7][0].real, self.y_arrays[7][1].real, self.y_arrays[7][2].real))\n\t\tprint('Here fine1')\n\t\tsuccess = write_CSV(xy_data, filename, titles=['Detuning']+OutputTypes)\n\t\tif not success:\n\t\t\tproblem_dlg = wx.MessageDialog(self, \"There was an error saving the data...\", \"Error saving\", wx.OK|wx.ICON_ERROR)\n\t\t\tproblem_dlg.ShowModal()", "title": "" }, { "docid": "830a1009bb1b8b77405d74818b8f6b38", "score": "0.64904755", "text": "def save(self, fileName):\n with open(fileName, 'w') as f:\n f.write(\n ('# vtk DataFile Version 3.0\\n%s\\n' % self.name)+\n 'ASCII\\nDATASET POLYDATA\\n'\n )\n \n f.write('POINTS %i double\\n' % len(self.points))\n #self.points.tofile(f, sep=' ', format = '%f')\n for p in self.points:\n p.tofile(f, sep=' ', format = '%g')\n f.write('\\n')\n\n size = len(self.bonds)\n f.write('LINES %i %i\\n' % (size, 3*size))\n for b in self.bonds:\n f.write('2 ')\n b.tofile(f, sep=' ', format = '%i')\n f.write('\\n')\n #np.hstack((\n # np.ones((size,1), dtype=int)*2,\n # self.bonds\n # )).tofile(f, sep=' ', format = '%i')\n\n f.write('POINT_DATA %i\\n' % len(self.points))\n\n for name, field in self.scalars:\n if field.dtype.kind=='i':\n f.write('SCALARS %s int\\n'%name)\n f.write('LOOKUP_TABLE default\\n')\n field.tofile(f, sep='\\n', format = '%i')\n else:\n f.write('SCALARS %s double\\n'%name)\n f.write('LOOKUP_TABLE default\\n')\n field.tofile(f, sep='\\n', format = '%g')\n f.write('\\n')\n for name, field in self.vectors:\n f.write('VECTORS %s double\\n'%name)\n #field.tofile(f, sep=' ', format = '%f')\n for v in field:\n v.tofile(f, sep=' ', format = '%g')\n f.write('\\n')\n\n if len(self.bondsScalars)>0:\n f.write('CELL_DATA %i\\n' % len(self.bonds))\n for name, field in self.bondsScalars:\n if field.dtype.kind=='i':\n f.write('SCALARS %s int\\n'%name)\n f.write('LOOKUP_TABLE default\\n')\n field.tofile(f, sep='\\n', format = '%i')\n else:\n f.write('SCALARS %s double\\n'%name)\n f.write('LOOKUP_TABLE default\\n')\n field.tofile(f, sep='\\n', format = '%g')\n f.write('\\n')", "title": "" }, { "docid": "d2ffec054183594f84162f2a7f553652", "score": "0.6422889", "text": "def save_vtk(self):\n self.VTK.reset_data()\n\n # Here we save both Ms and spins as cell data\n self.VTK.save_scalar(self._Ms, name='M_s')\n self.VTK.save_vector(self.spin.reshape(-1, 3), name='spins')\n\n self.VTK.write_file(step=self.step)", "title": "" }, { "docid": "657c74296af6b2f57fd6d957594c4bfb", "score": "0.62348306", "text": "def writeRotatingTargetOssimTrajFile(filename, trajType, distance, xTargPos,\n yTargPos, zTargPos, xVel, yVel, zVel, engine, deltaTime ):\n\n if trajType == 'Rotate':\n (x, y, z, roll, pitch, yaw, vertices, triangles) = \\\n getRotateFromOffFile(filename, xTargPos, yTargPos, zTargPos)\n elif trajType == 'Orbit':\n (x, y, z, roll, pitch, yaw,vertices, triangles) = \\\n getOrbitFromOffFile(filename, xTargPos, yTargPos, zTargPos, distance)\n else:\n print('Unkown trajectory type')\n return\n\n zerov = numpy.zeros(yaw.shape).reshape(-1, 1)\n onesv = numpy.ones(yaw.shape).reshape(-1, 1)\n\n time = numpy.array([deltaTime * i for i in range(0,zerov.shape[0])]).reshape(-1, 1)\n #time = numpy.around(time,2) # rounding does not help. internal representation!!\n\n outp = time\n outp = numpy.hstack((outp, x))\n outp = numpy.hstack((outp, y))\n outp = numpy.hstack((outp, z))\n outp = numpy.hstack((outp, roll))\n outp = numpy.hstack((outp, yaw))\n outp = numpy.hstack((outp, pitch))\n outp = numpy.hstack((outp, xVel * onesv)) # x-velocity\n outp = numpy.hstack((outp, yVel * onesv)) # y-velocity\n outp = numpy.hstack((outp, zVel * onesv)) # z-velocity\n outp = numpy.hstack((outp, engine * onesv)) # engine setting\n\n outfile = os.path.basename(filename)\n idx=outfile.find('.')\n outfile = outfile[:idx]\n\n fid = open('Trajectory{0}{1}.txt'.format(trajType,outfile), 'w' )\n fid.write( 'Time x y z rol yaw pit vx vy vz engine \\n' )\n fid.write( '0.0 infty infty infty infty infty infty infty infty infty infty \\n' )\n fid.write( '0.0 infty infty infty infty infty infty infty infty infty infty\\n' )\n numpy.savetxt(fid , outp)\n fid.close()\n\n fid = open('triangles{0}.txt'.format(outfile), 'w' )\n numpy.savetxt( fid , triangles )\n fid.close()\n\n fid = open('vertex{0}.txt'.format(outfile), 'w' )\n numpy.savetxt( fid , vertices )\n fid.close()\n\n print('Set OSSIM clock to {0} increments and max time {1}\\n'.\\\n format(deltaTime, deltaTime * yaw.shape[0]))", "title": "" }, { "docid": "d5d4e7fd129378b67424937daf16ec42", "score": "0.6179806", "text": "def savetofile(self):\n outbuffer = self.outbuffer\n xyzstring = ''\n for currpos in outbuffer:\n rawtext = [' '.join('%0.3f' %x for x in y) for y in currpos.T]\n xyzstring += self.mattoxyz(rawtext)\n self.outputobject.write(xyzstring)\n self.outbuffer = []", "title": "" }, { "docid": "84f8805848ca3d5c92746680b29ed4e4", "score": "0.6171008", "text": "def getOutputVTKwithPointDataFromFile(fileName):\n\n # function display \n print '\\n---- DAEPy::getOutputVTKwithPointDataFromFile ----'\n \n # test if the file exists\n print '\\n--> Reading', fileName\n if not os.path.isfile(fileName):\n raise ValueError(\"Error : file does not exists\")\n\n extension = os.path.splitext(fileName)[-1]\n if extension == '.vtu':\n reader = vtk.vtkXMLUnstructuredGridReader()\n elif extension == '.vtk':\n reader = vtk.vtkStructuredGridReader()\n elif extension == '.pvtu':\n reader = vtk.vtkXMLPUnstructuredGridReader()\n elif extension == '.vtp':\n reader = vtk.vtkXMLPolyDataReader()\n elif extension == '.vtm':\n # TODO : To check\n reader = vtk.vtkXMLMultiBlockDataReader()\n reader = vtk.MergeBlocks(reader)\n else:\n raise ValueError(\"Error: unknown extension of file \" + fileName)\n\n reader.SetFileName(fileName)\n\n reader.ReadAllScalarsOn()\n reader.ReadAllVectorsOn()\n\n reader.Update()\n data_outVTK = reader.GetOutput()\n\n# # All the data are transfered to the nodes\n# c2p = vtk.vtkCellDataToPointData()\n# c2p.SetInputData(data_outVTK)\n# c2p.Update()\n# data_outVTK = c2p.GetOutput()\n\n # list the fields available\n\n n_fields = data_outVTK.GetPointData().GetNumberOfArrays()\n\n print '\\n--> Available:', n_fields, 'fields\\n'\n\n for i in range(n_fields):\n print ' -', data_outVTK.GetPointData().GetArrayName(i)\n\n\n print ''\n return data_outVTK", "title": "" }, { "docid": "812d40b69e0e78251b785e46c6b9a1ef", "score": "0.607724", "text": "def saveTrajectory(self):\n filepath = 'output/'\n if path.exists(filepath):\n filename = 'traj.npy'\n filepath += filename \n np.save(filepath, self.camera_traj)\n print(\"Trajectory saved to: \" + filepath)\n else:\n print(\"Writing path does not exist!\")", "title": "" }, { "docid": "5aa0db15261cede24b2a0a916f54be10", "score": "0.6060274", "text": "def onStructureTensorView(self, event):\r\n \r\n file = open(\"c:\\\\temp\\\\output.tab\", \"w\")\r\n \r\n volume = numpy.zeros(volumes['Original'].shape)\r\n #selected x, y, and z\r\n sx = (getNode(self.settingsTree, ('imageControls','xIndex'))).get()\r\n sy = (getNode(self.settingsTree, ('imageControls','yIndex'))).get()\r\n sz = (getNode(self.settingsTree, ('imageControls','zIndex'))).get()\r\n \r\n xG = volumes['xGradient'][sx,sy,sz]\r\n yG = volumes['yGradient'][sx,sy,sz]\r\n zG = volumes['zGradient'][sx,sy,sz]\r\n \r\n stAtSelectedPoint = structureTensor(xG,yG,zG)\r\n eigAtSelectedPoint = numpy.linalg.eigvals(stAtSelectedPoint)\r\n \r\n sh = volumes['Original'].shape\r\n \r\n file.write(\"eig1\\teig2\\teig3\\tis_membrane\\n\")\r\n file.write(\"c\\tc\\tc\\tdiscrete\\n\")\r\n file.write(\"\\t\\t\\tclass\\n\")\r\n \r\n for x in range(0,sh[0]):\r\n print x\r\n for y in range(0,sh[1]):\r\n for z in range(0,sh[2]):\r\n \r\n xG = volumes['xGradient'][x,y,z]\r\n yG = volumes['yGradient'][x,y,z]\r\n zG = volumes['zGradient'][x,y,z]\r\n \r\n st = structureTensor(xG,yG,zG)\r\n eigenValues = numpy.linalg.eigvals(st)\r\n \r\n for value in eigenValues:\r\n file.write(\"%f\\t\" % value)\r\n file.write(\"%s\" % particleGroup.containsIntegerPoint((x,y,z)))\r\n file.write(\"\\n\")\r\n \r\n volume[x,y,z] = distance(eigenValues, eigAtSelectedPoint)\r\n \r\n self.addVolumeAndRefreshDataTree(volume, 'eigenValueStructureTensorView')\r\n file.close()", "title": "" }, { "docid": "3c33e220bc6929a566905cef63b6a3f0", "score": "0.60137236", "text": "def write(gas, thermo_fileName_new, plot):\n with open(thermo_fileName_new, 'w+') as f:\n #f.write('THERMO ALL' + '\\n' +\n # ' 300.000 1000.000 5000.000' +'\\n')\n\n Tlow = 300\n Tcommon = 1000\n Thigh = 5000\n deltaT = 100\n T_low = np.arange(Tlow,Tcommon+deltaT,deltaT)\n T_high = np.arange(Tcommon,Thigh+deltaT,deltaT)\n temp_rangeAll = '{0:10.3f}'.format(Tlow) + '{0:10.3f}'.format(Tcommon) + '{0:10.3f}'.format(Thigh)\n f.write('THERMO ALL' + '\\n' + temp_rangeAll + '\\n')\n\n\n\n #write data for each species in the Solution object\n for sp_index in range(len(gas.species_names)):\n species = gas.species(sp_index)\n species_name = gas.species_name(sp_index)\n molecular_weight = gas.molecular_weights[sp_index]\n nasa_coeffs = species.thermo.coeffs # orininal coeffs, size = 15, [0]=Tcommon, [1-7] is high coeffs, [8-14] is low coeffs\n temp_range = '{:<10}'.format(Tlow) + '{:<10}'.format(Thigh) + '{:<8}'.format(Tcommon)\n\n if (nasa_coeffs[0] != Tcommon):\n print(\"fitting species: %s\"%(species_name), \"from \",nasa_coeffs[0], \"to \",Tcommon)\n cp_low = [species.thermo.cp(T)/molecular_weight for T in T_low] # [J/(kmol·K)] / [kg/kmol] = [J/(kg·K)]\n cp_high = [species.thermo.cp(T)/molecular_weight for T in T_high] # [J/(kmol·K)] / [kg/kmol] = [J/(kg·K)]\n\n cp_low_polynomials = np.polyfit(T_low, cp_low, 4)\n cp_high_polynomials = np.polyfit(T_high, cp_high, 4)\n\n R = ct.gas_constant/molecular_weight # [J/(kmol·K)] / [kg/kmol] = [J/(kg·K)]\n\n #reverse order and divide by R (np.ployfit has calculated coeffs*R so we must divide by R to get just the coeffs)\n a1_a5_low = np.flip(cp_low_polynomials,0)/R\n a1_a5_high = np.flip(cp_high_polynomials,0)/R\n\n T_ref = 298.15\n h = species.thermo.h(T_ref)/molecular_weight # [J/kmol] / [kg/kmol] = [J/kg]\n s = species.thermo.s(T_ref)/molecular_weight # [J/kmol/K] / [kg/kmol] = [J/(kg·K)]\n\n a6_low = h/R - np.sum(np.array([a1_a5_low[i]*T_ref**(i+1)/(i+1) for i in range(5)]))\n a6_high = h/R - np.sum(np.array([a1_a5_high[i]*T_ref**(i+1)/(i+1) for i in range(5)]))\n\n a7_low = s/R - np.sum(np.array([a1_a5_low[i]*T_ref**(i)/(i) for i in range(1,5)]))-a1_a5_low[0]*math.log(T_ref)\n a7_high = s/R - np.sum(np.array([a1_a5_high[i]*T_ref**(i)/(i) for i in range(1,5)]))-a1_a5_high[0]*math.log(T_ref)\n\n nasa_coeffs = np.concatenate([[Tcommon], a1_a5_high, [a6_high], [a7_high], a1_a5_low, [a6_low], [a7_low]])\n\n\n if plot:\n Tlist = np.concatenate([T_low, T_high])\n cp = [species.thermo.cp(T)/molecular_weight for T in Tlist] # [J/(kg·K)]\n cp_fit_low = [ np.sum(np.array([a1_a5_low[i]*T**(i) for i in range(5)])) for T in T_low] # cp/R\n cp_fit_high = [ np.sum(np.array([a1_a5_high[i]*T**(i) for i in range(5)])) for T in T_high] # cp/R\n cp_fit = np.concatenate([cp_fit_low, cp_fit_high])*R\n\n a1_a5_org_low = species.thermo.coeffs[8:13]\n a1_a5_org_high = species.thermo.coeffs[1:6]\n cp_bad_low = [ np.sum(np.array([a1_a5_org_low[i]*T**(i) for i in range(5)])) for T in T_low] # cp/R\n cp_bad_high = [ np.sum(np.array([a1_a5_org_high[i]*T**(i) for i in range(5)])) for T in T_high] # cp/R\n cp_bad = np.concatenate([cp_bad_low, cp_bad_high])*R\n\n\n enthalpy = [species.thermo.h(T)/molecular_weight for T in Tlist] # J/kg\n enthalpy_fit_low = [ a6_low + np.sum(np.array([a1_a5_low[i]*T**(i+1)/(i+1) for i in range(5)])) for T in T_low] # h/R\n enthalpy_fit_high = [ a6_high + np.sum(np.array([a1_a5_high[i]*T**(i+1)/(i+1) for i in range(5)])) for T in T_high] # h/R\n enthalpy_fit = np.concatenate([enthalpy_fit_low, enthalpy_fit_high])*R\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot()\n ax1.scatter(Tlist, cp, c='blue', label=\"cp_org, Tcommon=%d\"%(species.thermo.coeffs[0]))\n ax1.plot(Tlist, cp_fit, c='red', label=\"cp_fit, Tcommon=1000\")\n ax1.plot(Tlist, cp_bad, c='purple', label=\"cp_bad\")\n ax1.set_xlabel(\"T [K]\")\n ax1.set_ylabel(\"cp [J/kg]\")\n\n ax2 = plt.twinx()\n ax2.scatter(Tlist, enthalpy, c='black', label=\"h_org, Tcommon=%d\"%(species.thermo.coeffs[0]))\n ax2.plot(Tlist, enthalpy_fit, c='green', label=\"h_fit, Tcommon=1000\")\n ax2.set_ylabel(\"h [J/kg]\")\n\n fig.legend(loc=\"lower right\", bbox_to_anchor=(1,0), bbox_transform=ax1.transAxes)\n plt.savefig(\"%s.png\"%(species_name), dpi=500, bbox_inches='tight', pad_inches=0.1)\n plt.close()\n\n\n\n species_comp = ''\n for atom in species.composition:\n species_comp += '{:<3}'.format(atom.upper())\n species_comp += '{:<2}'.format(str(int(species.composition[atom])))\n\n species_phase = 'G'\n\n # Ref: Table 3. Summary of the Rules for Thermo Data\n # https://shepherd.caltech.edu/EDL/PublicResources/sdt/formats/chemkin.html#:~:text=The%20Chemkin%20thermo%20file%20format,%2C%20Sandia%20Report%20SAND89%2D8009.\n\n line_1 = (\n '{:<18}'.format(species_name) +\n '{:<6}'.format(' ') +\n '{:<20}'.format(species_comp) +\n '{:<1}'.format(species_phase) +\n '{:<28}'.format(temp_range) +\n '{:<5}'.format(' ') +\n '{:<1}'.format(' ') +\n '{:<1}'.format('1') +\n '\\n')\n f.write(line_1)\n\n line_2_coeffs = build_nasa(nasa_coeffs, 2)\n line_2 = line_2_coeffs + ' 2\\n'\n f.write(line_2)\n\n line_3_coeffs = build_nasa(nasa_coeffs, 3)\n line_3 = line_3_coeffs + ' 3\\n'\n f.write(line_3)\n\n line_4_coeffs = build_nasa(nasa_coeffs, 4)\n line_4 = line_4_coeffs + ' 4\\n'\n f.write(line_4)\n\n f.write('END\\n')", "title": "" }, { "docid": "95c7320506baff1183daed481bde4482", "score": "0.594346", "text": "def SaveAsVTK(self, filename):\n # place holder\n print('Run grid = archive.ParseVTK() and then\\ngrid.WriteGrid(filename, binary)')", "title": "" }, { "docid": "9eb4043d2461f43a5f3470f5f9c48421", "score": "0.5899905", "text": "def output(path_input,fin):\n\n for type in inputs.cOutputType:\n print(\"Writing output type: {} at {:6.1f}h\".\\\n format(type,(s.runTime-inputs.startTime).total_seconds()/3600.))\n days, remainder = \\\n divmod((s.runTime-inputs.startTime).total_seconds(),\\\n s.nSecondsInDay)\n hours, remainder = divmod(remainder, s.nSecoundsInHour)\n minutes, seconds = divmod(remainder, s.nSecondsInMinute)\n cTime = \\\n \"{:05d}_{:02d}{:02d}{:02d}\".format(int(days),\\\n int(hours),int(minutes),int(seconds))\n if len(fin)>0:\n file = path_input+\"/final.dat\".format(type,cTime)\n else:\n file = path_input+\"/{}_{}.dat\".format(type,cTime)\n try:\n outfile = open(file,'w')\n except:\n print(\"----Error: Issue opening file {}\".format(file))\n print(\"----Does directory and file exist?\")\n exit(1)\n\n outfile.write(\"#PlanetDistance: {:6.3f}\\n\".\\\n format(s.orbitalDistance))\n\n if type.lower() == \"all\":\n outfile.write(\"#Alt\\t[O]\\t[O2]\\t[O3]\\tOvmr\\tO2vmr\\tO3vmr\\n\")\n\n for iAlt in range(s.nLayers):\n\n outfile.write('{:05.2f}\\t'.format(s.Altitude[iAlt]/1e5))\n outfile.write('\\t'.join(\"{:09.7e}\".format(d) for d \\\n in s.density[:,iAlt]))\n outfile.write('\\t')\n outfile.write('\\t'.join(\"{:09.7e}\".format(d) for d \\\n in s.density[:,iAlt]/s.N[iAlt]))\n outfile.write('\\n')\n\n\n if type.lower() == \"photo\":\n\n outfile.write(\"#Alt\\tJ(O3)\\tJ(O2)\\n\")\n\n for iAlt in range(s.nLayers):\n\n outfile.write('{:05.2f}\\t'.format(s.Altitude[iAlt]/1e5))\n outfile.write('\\t'.join(\"{:09.7e}\".format(d) for d \\\n in s.PhotoDissRate_Alt[:,iAlt]))\n outfile.write('\\n')\n\n if type.lower() == \"user\":\n s.userdata[0,:] = s.Altitude/1.0e5\n iError = user.outputuser(outfile,s.userdata)\n\n outfile.close()\n return 0", "title": "" }, { "docid": "068e5b7cf457632832f8b04d981c8315", "score": "0.5882888", "text": "def SavePlot(workPath, inoutName, dataX, Fits, S_curves, best_shots, steptable, demo):\n # create header for csv file\n header = [\n \"index\",\n \"level before\",\n \"level after\",\n \"step\",\n \"dwell before\",\n \"dwell after\",\n \"predicted error\",\n \"measured error\",\n ]\n\n # create outpath\n if workPath != \"nopath\":\n outPath = workPath + str(\"\\\\steppy_results\\\\\")\n if not Path(outPath).is_dir():\n Path(outPath).mkdir()\n else:\n outPath = \"\"\n\n with open(\n outPath + inoutName + str(\"_steps\") + str(\".csv\"), \"w\"\n ) as csv_f: # will overwrite existing\n # create the csv writer\n writer = csv.writer(csv_f, delimiter=\"\\t\", lineterminator=\"\\n\")\n writer.writerow(header)\n\n # save characteristic variables to csv file in outpath\n with open(\n outPath + inoutName + str(\"_steps\") + str(\".csv\"), \"a\"\n ) as csv_f: # will overwrite existing # will append to file\n # create the csv writer\n writer = csv.writer(csv_f, delimiter=\"\\t\", lineterminator=\"\\n\")\n # write data to the csv file\n sz = np.shape(steptable)\n for sti in range(sz[0]):\n step_row = steptable[sti]\n writer.writerow(step_row)\n # show final result\n if demo - np.floor(demo) >= 0.1:\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.plot(dataX)\n ax1.plot(np.transpose(Fits))\n ax1.set_title(\"intermediate & final result\")\n ax1.set_xlabel(\"time, a.u.\")\n ax1.set_ylabel(\"position\")\n ax1.set_box_aspect(0.5)\n ax2.plot(0 * S_curves[0])\n ax2.plot(np.transpose(S_curves))\n if len(best_shots) == 1:\n ax2.plot(best_shots[0] - 1, S_curves[best_shots[0] - 1], \"ro\")\n else:\n for ii in range(len(best_shots)):\n ax2.plot(best_shots[ii] - 1, S_curves[ii, best_shots[ii] - 1], \"ro\")\n ax2.set_title(\"S-curve evaluation\")\n ax2.set_xlabel(\"no. of steps in fit\")\n ax2.set_ylabel(\"S-value, a.u.\")\n ax2.set_box_aspect(0.7)\n plt.show(block=False)\n png_nm = outPath + inoutName + str(\"_fit\") + str(\".png\")\n fig.savefig(png_nm, dpi=500)\n if demo < 1.5: # if not in batch mode\n input(\"Press Enter to continue...\")\n else:\n plt.pause(0.1)\n plt.close()", "title": "" }, { "docid": "990f6ee8695e74e37ba07faa4434bff2", "score": "0.5868906", "text": "def save_vtk(self, outfile, inflated=False):\n # Import here since vtk is not required by the package\n import vtk\n\n # Check that the inflated vertices are specified if requested\n if inflated and self.inflated_vertices is None:\n raise ValueError(\"Can't save inflated volume '{0}' since it has \"\n \"not been specified.\".format(outfile))\n\n # Create the desired polydata\n polydata = self._polydata(inflated=inflated)\n\n # Write the polydata\n writer = vtk.vtkPolyDataWriter()\n # writer.SetDataModeToAscii()\n writer.SetFileTypeToASCII()\n writer.SetFileName(outfile)\n if vtk.VTK_MAJOR_VERSION <= 5:\n writer.SetInput(polydata)\n else:\n writer.SetInputData(polydata)\n writer.Write()", "title": "" }, { "docid": "190c5016f732a16cfefe17660e45d8c9", "score": "0.5867323", "text": "def save_case(solver_obj, filename):\r\n filehandling.write_solver(solver_obj, filename+'.pk1')\r\n if hasattr(solver_obj, '_euler_path'):\r\n filehandling.write_plot(plot.plot2d(solver_obj),\r\n filename+'_plot2d.pdf')\r\n filehandling.write_plot(plot.plot3d(solver_obj),\r\n filename+'_plot3d.pdf')\r\n else:\r\n print('warning save_case: lorenz_solver object contains no euler_path'+\r\n ' attribute, plots have not been generated.')", "title": "" }, { "docid": "b9aa2cf3200b8ed03b928032f18b4579", "score": "0.58333474", "text": "def vtk_write_output(results_root, name, nodes, elements, output, overwrite = False):\n\n # Create result folder\n folder_path = __vtk_create_result_folder(results_root, name, overwrite)\n\n # Create grouping for time dependant data\n grp_name = name + \"-group\"\n grp_path = path.join(folder_path, grp_name)\n grp = VtkGroup(grp_path)\n\n # Create mappings\n enm = [e.enm for e in elements]\n elem_types = [type(e) for e in elements]\n nds = calculate_nds(enm, elem_types, len(nodes))\n ndm = calculate_ndm(nds)\n ntdm = calculate_ntdm(nds, enm, ndm)\n\n # Create a file for every time-step\n for t_id, t in enumerate(output.t):\n file_name = name + \"-%s\" % t\n file_path = path.join(folder_path, file_name)\n\n # Create file, add it to the group\n f = VtkFile(file_path, VtkUnstructuredGrid)\n grp.addFile(file_path+\".vtu\", t)\n\n num_nodes = len(nodes)\n num_elements = len(elements)\n\n f.openElement(\"UnstructuredGrid\")\n f.openPiece(npoints=num_nodes, ncells=num_elements)\n\n node_data = __vtk_add_nodes_to_file(f, nodes)\n\n f.openElement(\"PointData\")\n u_data = __vtk_add_vector_data_to_file(f, ntdm, output.u[t_id,:], \"displacement\")\n f.closeElement(\"PointData\")\n\n # Add elements\n conn_data, offset_data, type_data = __vtk_add_elements_to_file(f, enm, elements)\n\n f.closePiece()\n f.closeElement(\"UnstructuredGrid\")\n\n f.appendData(node_data)\n f.appendData(u_data)\n f.appendData(conn_data)\n f.appendData(offset_data)\n f.appendData(type_data)\n\n f.save()\n\n grp.save()", "title": "" }, { "docid": "4149cf4945ee4c4550290a26c49037d7", "score": "0.5830135", "text": "def test_openclose(self):\n output = OutputSolnSubset()\n output.inventory.label = \"2\"\n output.inventory.writer.inventory.filename = \"test.vtk\"\n output.inventory.writer._configure()\n output._configure()\n\n output.preinitialize()\n output.initialize(self.mesh, self.normalizer)\n\n from pythia.pyre.units.time import s\n output.open(totalTime=5.0*s, numTimeSteps=2)\n output.close()\n return", "title": "" }, { "docid": "744d8db293993424664486d6d15dfbc0", "score": "0.5821272", "text": "def save_to_vtk(data, filepath):\n x = np.arange(data.shape[0]+1)\n y = np.arange(data.shape[1]+1)\n z = np.arange(data.shape[2]+1)\n gridToVTK(filepath, x, y, z, cellData={'data':data.copy()})", "title": "" }, { "docid": "b432e674fe628bb7d650933b499353c4", "score": "0.58139646", "text": "def draw_trj(obj, varlist, filelist, idlist, cfile, rfiles, pfiles, \n savename = False,pollon = None, pollat = None, xlim = None, \n ylim = None, onlybool = False, startarray = None, \n stoparray = None, trjstart = None, idtext = '', linewidth = 0.7,\n carray = 'P', centdiff = False, sigma = None):\n \n # Get trj_time after model start \n tmpt = filelist[0].split('/')[-1]\n tmpt = int(tmpt.split('_')[1].lstrip('t'))\n \n # Plot contours\n draw_contour(obj, varlist, tmpt, idtext = idtext)\n \n # Plot trajectories\n cnt = 0 # continuous counter for startarray and stoparray\n for i in range(len(filelist)):\n print 'Plotting file', i+1, 'of', len(filelist)\n rootgrp = nc.Dataset(filelist[i], 'r')\n lonmat = rootgrp.variables['longitude'][:, :]\n latmat = rootgrp.variables['latitude'][:, :]\n pmat = rootgrp.variables[carray][:, :]\n \n lonmat[:, :] += (180 - pollon) # Convert to real coordinates\n latmat[:, :] += (90 - pollat)\n \n for j in idlist[i]:\n # Filter out zero values!\n if onlybool:\n parray = pmat[startarray[cnt]:stoparray[cnt], j]\n lonarray = lonmat[startarray[cnt]:stoparray[cnt], j]\n latarray = latmat[startarray[cnt]:stoparray[cnt], j]\n cnt += 1\n else:\n parray = pmat[:, j][pmat[:, j] != 0]\n lonarray = lonmat[:, j][pmat[:, j] != 0]\n latarray = latmat[:, j][pmat[:, j] != 0]\n \n if centdiff:\n if sigma != None:\n parray = ndi.filters.gaussian_filter(parray, sigma)\n parray = np.gradient(parray)\n \n single_trj(lonarray, latarray, parray, linewidth = linewidth, \n carray = carray)\n\n cb = plt.colorbar(lc, shrink = 0.7) \n cb.set_label(carray)\n if carray == 'P':\n cb.ax.invert_yaxis()\n plt.tight_layout()\n \n \n # Save Plot\n if savename != None:\n print \"Saving figure as\", savename\n plt.savefig(savename, dpi = 400, bbox_inches = 'tight')\n plt.close('all')\n plt.clf()", "title": "" }, { "docid": "ea49d7a488bbdd12540a4372ad270ad2", "score": "0.58078784", "text": "def write_results(self, save_dir, simulation_ID):\n outfile_name=save_dir+'/'+simulation_ID+'_'+self.simtype+'.csv'\n outfile=open(outfile_name, 'w+')\n line1=\"mu= \"+str(self.mu)+\"\\tk= \"+str(self.k)+\"\\n\"\n outfile.write(line1)\n line2= \"Time\\tx_1\\tx_2\\ty_1\\ty_2\\n\"\n outfile.write(line2)\n for i in range(len(self.x1)):\n line=str(self.dt*i)+';'+str(self.x1[i])+';'+str(self.x2[i])+';'+str(self.y1[i])+';'+str(self.y2[i])+'\\n'\n outfile.write(line)\n outfile.close()", "title": "" }, { "docid": "fe8ec610adfb77daaf936af2fdea819b", "score": "0.5804336", "text": "def outputs_and_paths(par):\n mkDirectory(par[\"path\"])\n mkDirectory(par[\"path\"]+'MeasureField/')\n mkDirectory(par[\"path\"]+'vel_field_plot_type_1/')\n mkDirectory(par[\"path\"]+'vel_field_plot_type_2/')\n mkDirectory(par[\"path\"]+'vel_field_plot_type_3/')\n mkDirectory(par[\"path\"]+'Adjoint_Field/')\n mkDirectory(par[\"path\"]+'shape_gradient_norm/')\n with open(par[\"path\"]+\"wave_out.txt\", \"w\") as text_file:\n text_file.write(\n 'x-axis length {} m, from start position {} m to end position {} m. \\n' .format(par[\"xMax\"] - par[\"xMin\"], par[\"xMin\"], par[\"xMax\"]))\n text_file.write(\n '{} km for z-axis depth from the {} ground . \\n' .format(par[\"zMax\"], par[\"zMin\"]))\n text_file.write('-*-Grid dimensions-*-\\n')\n text_file.write(\n ' Grid Size : {:d} x {:d} and {:d} time steps\\n' .format(par[\"nz\"], par[\"nx\"], par[\"nt\"]))\n text_file.write(\n 'This numerical simulation only records {} seconds of wave propagation. \\n' .format(par[\"tMax\"]))\n text_file.write('The damping term only works outside of square [{}, {}] x [{}, {}] x [{}, {}] x [{}, {}]. \\n'\n .format(par[\"dmp_xMin\"], par[\"zMin\"], par[\"dmp_xMin\"], par[\"dmp_zMax\"], par[\"dmp_xMax\"], par[\"dmp_zMax\"], par[\"dmp_xMax\"], par[\"zMin\"]))\n text_file.write(\n 'Damping layer width {} \\n' .format(par[\"dmp_layer\"]))\n text_file.write(\n 'Discretizations steps hx = {}, hz = {} and ht = {}. \\n' .format(par[\"hx\"], par[\"hz\"], par[\"ht\"]))\n text_file.write('Parameters set:\\n init_gu = {}\\n shots amount = {}\\n receivers amount = {}\\n' .format(\n par[\"i_guess\"], par[\"n_shots\"], par[\"n_receivers\"]))\n text_file.write('gamma = {}\\n gamma2 = {}\\n ls_max = {}\\n' .format(\n par[\"gamma\"], par[\"gamma2\"], par[\"ls_max\"]))\n text_file.write('stop_coeff = {}\\n add_noise = {}\\n add_weight = {}\\n '.format(\n par[\"stop_coeff\"], par[\"add_noise\"], par[\"add_weight\"]))\n text_file.write('beta0_init = {}\\n stop_decision_limit = {}\\n alpha1 = {}\\n alpha2 = {}\\n peak frequencies of Ricker wavelet = {}\\n'.format(\n par[\"beta0_init\"], par[\"stop_decision_limit\"], par[\"alpha1\"], par[\"alpha2\"], par[\"source_peak_frequency\"]))\n text_file.write(\n 'Courant number for state/adjoint: {}\\n' .format(par[\"CFL\"]))\n\n with open(par[\"path\"]+\"receivers_location.txt\", \"a\") as text_file:\n text_file.write(\n 'Grid receivers indexes for the state/adjoint:\\n {} ' .format(par[\"rec_index\"]))\n text_file.write('\\n'+'\\n')\n text_file.write(\n 'Grid receivers locations the state/adjoint:\\n rec = {}' .format(par[\"rec\"]))", "title": "" }, { "docid": "3ab083c4a80f229d183bb0a4559141f1", "score": "0.5783867", "text": "def exportAsVtk(self, output_name: str, var_name=None, missing_value=None,\n fmt=\"%.10g\", data_type='float', version=3.4):\n\n if var_name is None and self.nvariables > 1:\n raise UndefVarExc(\"exportAsVtk\")\n key = var_name if var_name is not None else self.get_variables()[0]\n data = self._data[key]\n\n nx, ny, nz = self.shape\n ox, oy, oz = self.orig\n sx, sy, sz = self.spacing\n\n # Set header (10 first lines)\n shead = (\n \"# vtk DataFile Version {0}\\n\"\n \"{1}\\n\"\n \"ASCII\\n\"\n \"DATASET STRUCTURED_POINTS\\n\"\n \"DIMENSIONS {2} {3} {4}\\n\"\n \"ORIGIN {5} {6} {7}\\n\"\n \"SPACING {8} {9} {10}\\n\"\n \"POINT_DATA {11}\\n\"\n \"SCALARS {12} {13} {14}\\n\"\n \"LOOKUP_TABLE default\\n\"\n ).format(version,\n key,\n nx, ny, nz,\n ox, oy, oz,\n sx, sy, sz,\n self.nxyz(),\n '/'.join(self.get_variables()),\n data_type, self.nvariables)\n\n # Replace np.nan by missing_value\n if missing_value is not None:\n np.putmask(data, np.isnan(data), missing_value)\n\n # Open the file in write binary mode\n with open(output_name, 'wb') as ff:\n ff.write(shead.encode())\n # Write variable values\n np.savetxt(ff, data.reshape(1, -1).T, delimiter=' ', fmt=fmt)\n\n # Replace missing_value by np.nan (restore)\n if missing_value is not None:\n np.putmask(data, data == missing_value, np.nan)", "title": "" }, { "docid": "76a2db1ee9955b3094a8b14b2d30eb58", "score": "0.57751274", "text": "def main():\n\n arguments = sys.argv\n\n if len(arguments) < 3:\n sys.exit('ERROR - provide at least 2 file names: <modeldata file> <responses file>')\n\n try:\n WSMTmodel = os.path.abspath(os.path.realpath(arguments[1]))\n WSMTresp = os.path.abspath(os.path.realpath(arguments[2]))\n\n try:\n VTKresist = os.path.abspath(os.path.realpath(arguments[3]))\n except:\n VTKresist = os.path.abspath(os.path.realpath('VTKResistivityGrid'))\n\n try:\n VTKstations = os.path.abspath(os.path.realpath(arguments[4]))\n except:\n VTKstations = os.path.abspath(os.path.realpath('VTKStationGrid'))\n\n\n except:\n sys.exit('ERROR - could not find file(s)')\n\n\n f = open(WSMTmodel, 'r')\n\n # skip first line in file\n f.readline()\n\n # read N,E,D mesh dimensions\n dims = []\n modeldata_firstline = f.readline().split()\n for n in range(3):\n dims.append(int(modeldata_firstline[n]))\n size = dims[0]*dims[1]*dims[2]\n print('Mesh ', dims)\n print('Data ', size)\n\n # read N,E,D spacing\n # (depends on line break only after final value)\n spacing = []\n for n in range(3):\n i=0\n while i < dims[n]:\n modeldata_nextlines = f.readline().split()\n for j in modeldata_nextlines:\n spacing.append(float(j)/1000.0)\n i += 1\n\n # read mt data\n # (depends on line break only after final value)\n mt = np.zeros(size)\n i=0\n while i < size:\n modeldata_morelines = f.readline().split()\n for j in modeldata_morelines:\n mt[i] = float(j)\n i += 1\n\n # calc North coordinates of vtk mesh\n Ndist = 0 # calculate total North distance\n for i in range(dims[0]):\n Ndist += spacing[i]\n N = np.zeros(dims[0]+1)\n N[0] = -0.5 * Ndist # zero center of model\n for i in range(dims[0]):\n N[i+1] = N[i] + spacing[i]\n\n # calc East coordinates of vtk mesh\n Edist = 0 # calculate total y distance\n for i in range(dims[1]):\n Edist += spacing[dims[0] + i]\n E = np.zeros(dims[1]+1)\n E[0] = -0.5 * Edist # zero center of model\n for i in range(dims[1]):\n E[i+1] = E[i] + spacing[dims[0] + i]\n\n # calc Down coordinates of vtk mesh\n D = np.zeros(dims[2]+1)\n D[0] = 0.0\n for i in range(dims[2]):\n D[i+1] = D[i] + spacing[dims[0] + dims[1] + i]\n\n # output to vtk format\n # first components read in reverse order!!\n mtNS = np.zeros((dims[0],dims[1],dims[2])) # North-to-South conversion\n n=0\n for idx_D in range(dims[2]):\n for idx_E in range(dims[1]):\n for idx_S in range(dims[0]):\n mtNS[(dims[0]-1)-idx_S,idx_E,idx_D] = mt[n]\n n += 1\n gridToVTK(VTKresist, N, E, D, cellData = {'resistivity' : mtNS})\n\n f.close()\n\n f = open(WSMTresp, 'r')\n\n # get station count\n respdata_firstline = f.readline().split()\n nstations = int(respdata_firstline[0])\n print('Stations ', nstations)\n\n # read North locations\n f.readline() #skip line\n N = np.zeros(nstations)\n i=0\n while i < nstations:\n respdata_nextlines = f.readline().split()\n for j in respdata_nextlines:\n N[i] = float(j)/1000.0\n i += 1\n\n # read East locations\n f.readline() #skip line\n E = np.zeros(nstations)\n i=0\n while i < nstations:\n respdata_morelines = f.readline().split()\n for j in respdata_morelines:\n E[i] = float(j)/1000.0\n i += 1\n f.close()\n\n\n # set Depths -- all stations at the surface!!\n D = np.zeros(nstations)\n\n # output to vtk format - dummy value for scalar field needed\n dummy = np.ones(nstations)\n #for j in range(nstations):\n # dummy[j] = 1.0\n\n print(np.shape(dummy),np.shape(N),np.shape(E),np.shape(D))\n\n pointsToVTK(VTKstations, N, E, D, data = {\"dummyvalue\" : dummy})\n\n\n\n print('Created Resistivity File: {0}.vtr '.format(VTKresist))\n print('Created Station File: {0}.vtu '.format(VTKstations))", "title": "" }, { "docid": "97c4b6f03b0151abd77143b85fdebf1d", "score": "0.574832", "text": "def save():\r\n with open(f'{self_folder}/{fileName}.txt', 'w') as new_data:\r\n for i in allLines.lines:\r\n new_data.write(f'{i} ')\r\n new_data.write('\\n')\r\n for i in controlPoints:\r\n new_data.write(f'{i} ')\r\n new_data.write('\\n')\r\n for i in polygonPoints:\r\n new_data.write(f'{i} ')\r\n print(f'файл \"{fileName}\" сохранён')", "title": "" }, { "docid": "d02f9e8089eb14e8d616df25ee9982b5", "score": "0.5737065", "text": "def write_outfile(self,outfile):\n \n file=open(outfile,'w')\n file.write(self.output_sol())\n file.close()", "title": "" }, { "docid": "a5b8a014c5212e7bdbc7e9e8106d512a", "score": "0.57293576", "text": "def save_data(self):\n # Unlogged initial positions (Step not updated by Webots)\n self.links[0, :, :] = self.links[1, :, :]\n self.joints[0, :, :] = self.joints[1, :, :]\n # Diff position to extract velocity\n # Save\n #os.makedirs(os.path.dirname(self.filename), exist_ok=True)\n np.savez(\n self.filename,\n links=self.links,\n joints=self.joints,\n network=self.network,\n **self.parameters\n )", "title": "" }, { "docid": "fee38e6bcbf2160257b20e3e05d41b63", "score": "0.5716604", "text": "def saveDataToFile(self, filename):\n fp = open(filename, 'w')\n for i in range(len(self.inputs)):\n try:\n vec = self.replacePatterns(self.inputs[i])\n for item in vec:\n fp.write(\"%f \" % item)\n except:\n pass\n try:\n vec = self.replacePatterns(self.targets[i])\n for item in vec:\n fp.write(\"%f \" % item)\n except:\n pass\n fp.write(\"\\n\")", "title": "" }, { "docid": "145daae86bab1fcf0fe6591bce1d0003", "score": "0.57068104", "text": "def writeTracers(self, file):\n file.write('1 ! itr_met 1:upwind 2:TVD\\n')\n file.write('3 ! itrtype(k), k=1, nope_global - 2: timeseries 3: initial value\\n')\n for x in xrange(self.grid.obNodes.nB):\n file.write('3\\n')\n file.write('0 ! inu_tr ! not valid?')", "title": "" }, { "docid": "c0d89997aa78f4161ad1e04f886feb62", "score": "0.5698708", "text": "def paraview(displacement,t):\n\n L = 20.0\n points = vtk.vtkPoints()\n myline = vtk.vtkLine()\n num_elems = len(displacement)\n num_nodes = num_elems+1\n displ_zero = [[0.0]]\n displ_list = np.vstack([displ_zero,displacement])\n mesh = vtk.vtkCellArray()\n\n\n for i in range(num_nodes):\n points.InsertNextPoint((i*L/(num_nodes-1)),0,0)\n for i in range(num_elems):\n myline.GetPointIds().SetId(0,i)\n myline.GetPointIds().SetId(1,i+1)\n mesh.InsertNextCell(myline)\n\n\n array = vtk.vtkDoubleArray()\n array.SetName(\"Displacement\")\n array.SetNumberOfComponents(1)\n for i in range(num_nodes):\n array.InsertNextTuple1(displ_list[i])\n grid = vtk.vtkUnstructuredGrid()\n grid.SetPoints(points)\n grid.SetCells(vtk.VTK_LINE, mesh)\n grid.GetPointData().SetScalars(array)\n\n\n writer = vtk.vtkXMLUnstructuredGridWriter()\n writer.SetFileName(\"Group_Elements\"+`t`+\".vtu\")\n writer.SetInputData(grid)\n writer.SetTimeStep(t*7200)\n writer.Write()\n\n return displ_list", "title": "" }, { "docid": "f3254206a357df085e8ad55298dfeffb", "score": "0.567553", "text": "def export_to_file():\n global points\n dirname = \"exported_files/\"\n filename = str(time.time()) + \".csv\"\n\n try:\n f = open(dirname + filename, \"w\")\n except Exception: #if dir isn't found, fallback\n f = open(filename, \"w\")\n\n for point in points:\n f.write(\"{}\\t{}\\n\".format(point.t, point.y))\n f.close()\n points = []\n messagebox.showinfo(\"\",\"{} written successfully!\".format(filename))", "title": "" }, { "docid": "9396762205d69cc8a2b813f6b9d43254", "score": "0.56688106", "text": "def save_integrals(self,folder=None):\n if folder is None:\n sys.exit('Please provide a folder to save the integrals.')\n else:\n if not self.is_built:\n self.build()\n os.makedirs(folder,exist_ok=True) # careful! will overwrite. \n\n np.savetxt(folder + '/enuc.dat',np.asarray(self.nuc_energy).reshape(1,))\n np.savetxt(folder + '/nbf.dat',np.asarray(self.nbasis,dtype=int).reshape(1,),fmt='%d')\n np.savetxt(folder + '/nelec.dat',np.asarray(self.nelec,dtype=int).reshape(1,),fmt='%d')\n np.savetxt(folder + '/s.dat',self.S)\n np.savetxt(folder + '/t.dat',self.T)\n np.savetxt(folder + '/v.dat',self.V)\n with open(folder + '/eri.dat','w') as f:\n for i,j,k,l in itertools.product(range(self.nbasis),range(self.nbasis),range(self.nbasis),range(self.nbasis)):\n print(i+1,j+1,k+1,l+1,self.TwoE[i,j,k,l],file=f)\n\n with open(folder + '/geometry.txt','w') as f:\n print(self.geometry_input,file=f)", "title": "" }, { "docid": "788da8ad766bf4811d6b09836e082e7f", "score": "0.56596243", "text": "def write_to_output_file(self):\n \n # Loop over data, skip invalid NO2 from each data: \n # (the if else statement here may be improved, can you skip the second condition if the first one is not met? If so, test land classification first, it will be quicker?)\n for i in range(len(self.NO2)):\n for j in range(self.NO2[i].shape[0]): \n if (math.isnan(self.NO2[i][j]) == False):\n tstr=\"{:8}\".format(self.line_count)+(\"{:15.6f}\"*4).format(self.lat[i][j],self.lon[i][j],self.sza[i][j],self.cld[i][j])+(\"{:15.6E}\"*3).format(self.NO2[i][j],self.pre[i][j],self.land[i][j])\n self.fId.write(tstr) # write tstr to the file \n self.fId.write(\"\\n\") # progresses to the next line\n self.line_count += 1 # increment the line number\n self.fId.close # close the txt file at end of loop", "title": "" }, { "docid": "3228086a452eff1abe306de95105c7ee", "score": "0.5658676", "text": "def nmtomeV():\n \n a = [13,12,11,10,8.5,7]\n b= array(pHdepfluorescenceTrial2())\n c = array([0,0,0,0,0,0,0,0,0])#pHdepfluorescence())\n d = array(MPAdots_pHdep())\n \n average_fluorshift = (1240000/b[0]+ 1240000/c[0]-1240000/b[0][0]-1240000/c[0][0])/2\n average_absshift = (1240000/b[1]+ 1240000/c[1]-1240000/b[1][0]-1240000/c[1][0])/2\n\n \n b[0] = 1240000/b[0]-1240000/422\n b[1] = 1240000/b[1]-1240000/422\n c[0] = 1240000/c[0]-1240000/422\n c[1] = 1240000/c[1]-1240000/422\n d[1] = 1240000/d[1]-1240000/422\n \n figure()\n print c[1]\n plot(a,c[0],'s-')\n plot(a,c[1],'s-')\n plot(a,b[0],'s-')\n plot(a,b[1],'s-')\n plot([13,12,11,10,8.5,7,5,4,3],d[1],'s-')\n \n savetxt('/home/chris/Desktop/nmtoeV.txt', transpose([a,average_absshift,average_fluorshift ]),header = 'pH absorbance_peak_(nm) fluorescence_peak_(nm)') \n \n legend(['fluorPPA1','absPPA1','fluorPPA2','absPPA2','absMPA'])\n xlabel('pH')\n ylabel('shift in band gap (meV)')\n return 0", "title": "" }, { "docid": "0713f5826775b569ad061907bf6e5b75", "score": "0.56557184", "text": "def write_output(self,\n filename: str) -> None:\n\n if self.radius is not None:\n fits.writeto(f'{filename}_radius.fits', self.radius, overwrite=True)\n\n if self.scatter is not None:\n fits.writeto(f'{filename}_scat_angle.fits', np.degrees(self.scatter), overwrite=True)\n\n if self.im_deproj is not None:\n fits.writeto(f'{filename}_deprojected.fits', self.im_deproj, overwrite=True)\n\n if self.im_scaled is not None:\n fits.writeto(f'{filename}_r2_scaled.fits', self.im_scaled, overwrite=True)\n\n if self.stokes_i is not None:\n fits.writeto(f'{filename}_total_intensity.fits', self.stokes_i, overwrite=True)\n\n if self.phase is not None:\n if self.image_type == 'polarized':\n header = 'Scattering angle (deg) - Normalized polarized flux - Error ' \\\n '- Normalized total flux - Error'\n\n else:\n header = 'Scattering angle (deg) - Normalized total flux - Error'\n\n np.savetxt(f'{filename}_phase_function.dat', self.phase, header=header)", "title": "" }, { "docid": "8ba8edc273ce94e286f4774072e5ec8a", "score": "0.5650444", "text": "def saveToFile(inval, outval):\n path = os.getcwd()+\"/Werkzeuge/params.dat\"\n myfile = open(path, 'w+')\n \n # write the file\n myfile.write(\"***** Crlcalc Value File *****\\n\")\n myfile.write(\"\\n> Input Values\\n\")\n for value in inval.params:\n myfile.write(value + \" \" + str(inval.getValueForUI(value)) + \"\\n\")\n myfile.write(\"\\n> Output Values\\n\")\n for value in outval.params:\n myfile.write(value + \" \" + str(outval.getValueForUI(value)) + \"\\n\")\n \n myfile.close()", "title": "" }, { "docid": "3f98d142e18c8ddb145c56bf5667c0d5", "score": "0.5637754", "text": "def main():\n\n #DEFINE FALCON 9:\n \n F9S1 = Stage(\"Stage 1\", 7426e3, 0.3, 10.75, 282, 27.2e3, 411e3, 116e3)\n F9S2 = Stage(\"Stage 2\", 934e3, 0.3, 10.75, 348, 4.5e3, 111.5e3, 0)\n \n \n #DEFINE INITIAL CONDITIONS:\n # UNITS ARE M AND M/S\n \n x0 = 0\n y0 = 6378e3\n vx0 = 0\n vy0 = 0\n \n STATE_0 = np.array([x0, y0, vx0, vy0])\n \n #Integration time:\n \n Tf = F9S1.tb\n \n T_array = np.arange(0,Tf,1)\n \n Stages = [F9S1, F9S2]\n \n sol = odeint(EqOfM, STATE_0, T_array, args = (Stages,))\n \n print(\"End\")\n \n rx = sol[:,0]\n ry = sol[:,1]\n vx = sol[:,2]\n vy = sol[:,3]\n \n h = (rx**2 + ry**2)**0.5 - 6378e3\n \n plt.figure()\n plt.plot(T_array, vy)\n plt.figure()\n plt.plot(T_array, h)\n plt.title(\"Altitude\")", "title": "" }, { "docid": "ccf7229ac868ff8c9b45dda12e4b005f", "score": "0.56278384", "text": "def batchVis(c1File,particleFile,step,saveAs):\n import paraview.simple as pv\n # visualize a vtk file \n c1 = pv.LegacyVTKReader(FileNames=c1File)\n p = pv.LegacyVTKReader(FileNames=particleFile)\n renderView1 = pv.GetActiveViewOrCreate('RenderView')\n renderView1.ViewSize = [1298, 860]\n renderView1.Background = [1.0, 1.0, 1.0]\n renderView1.InteractionMode = '2D'\n pDisplay = pv.Show(p, renderView1)\n c1Display = pv.Show(c1, renderView1)\n\n # create particle glyphs\n glyph = pv.Glyph(Input=p,GlyphType=\"Sphere\")\n glyph.ScaleFactor = 1.0\n glyph.GlyphMode = 'All Points'\n glyph.GlyphType.Radius = 1.0\n glyph.GlyphType.ThetaResolution = 20\n glyph.GlyphType.PhiResolution = 20\n glyph.Scalars = ['POINTS','radius']\n glyph.Vectors = ['POINTS','None']\n glyph.ScaleMode = 'scalar'\n\n # show data in view\n glyphDisplay = pv.Show(glyph, renderView1)\n pv.ColorBy(glyphDisplay, None)\n pv.SetActiveSource(c1)\n pv.ColorBy(c1Display, ('POINTS', 'c1'))\n c1Display.RescaleTransferFunctionToDataRange(True)\n c1Display.SetRepresentationType('Volume')\n\n # make box outline\n # box = pv.Box()\n # box.XLength = 128.0\n # box.YLength = 128.0\n # box.ZLength = 64.0\n # box.Center = [64.0, 64.0, 32.0]\n # boxDisplay = pv.Show(box, renderView1)\n # boxDisplay.SetRepresentationType('Outline')\n # boxDisplay.AmbientColor = [0.0, 0.0, 0.0]\n\n # set coloring of c1\n c1LUT = pv.GetColorTransferFunction('c1')\n c1LUT.RGBPoints = [0.006000000052154064, 0.231373, 0.298039, 0.752941, 0.5120000033639371, 0.865003, 0.865003, 0.865003, 1.0180000066757202, 0.705882, 0.0156863, 0.14902]\n c1LUT.ColorSpace = 'Diverging'\n c1LUT.BelowRangeColor = [0.0, 0.0, 0.0]\n c1LUT.AboveRangeColor = [1.0, 1.0, 1.0]\n c1LUT.NanColor = [1.0, 1.0, 0.0]\n c1LUT.Discretize = 1\n c1LUT.NumberOfTableValues = 256\n c1LUT.ScalarRangeInitialized = 1.0\n c1LUT.AllowDuplicateScalars = 1\n\n c1PWF = pv.GetOpacityTransferFunction('c1')\n c1PWF.Points = [0.0, 0.05, 0.5, 0.0, 0.3, 0.05, 0.5, 0.0, 0.4, 0.5, 0.5, 0.0, 0.6, 0.5, 0.5, 0.0, 0.7, 0.05, 0.5, 0.0, 1., 0.05, 0.5, 0.0]\n\n # annotate time step in rendering\n # text = pv.Text\n # text.Text = 'Step '+str(step)\n # textDisplay = pv.Show(text,renderView1)\n # textDisplay.Color = [0.0, 0.0, 0.0]\n # textDisplay.WindowLocation = 'UpperCenter'\n\n # reset view to fit data\n renderView1.ResetCamera()\n # pv.Render()\n\n # save screen shot\n viewLayout1 = pv.GetLayout()\n print(saveAs)\n pv.SaveScreenshot(saveAs, layout=viewLayout1, magnification=1, quality=100)\n\n # clean up\n # pv.Delete(box)\n pv.Delete(glyph)\n pv.Delete(p)\n pv.Delete(c1)\n del c1\n del p\n del glyph\n # del box", "title": "" }, { "docid": "8c2d1fc602f85f2a55a586c117add7cd", "score": "0.5622732", "text": "def saveVectorsVTKFile(spts, vectors, filename):\n npts = spts.shape[0]\n\n vpointData = vtk.vtkDoubleArray()\n vpointData.SetNumberOfComponents(3)\n vpointData.SetNumberOfTuples(npts)\n vpointData.SetVoidArray(spts, npts*3, 1)\n\n vpts = vtk.vtkPoints()\n vpts.SetData(vpointData)\n\n sgrid = vtk.vtkUnstructuredGrid()\n sgrid.SetPoints(vpts)\n\n # cloud of points\n sgrid.Allocate()\n ptIds = vtk.vtkIdList()\n ptIds.SetNumberOfIds(1)\n for i in range(npts):\n ptIds.SetId(0, i)\n sgrid.InsertNextCell(vtk.VTK_VERTEX, ptIds)\n\n vdata = vtk.vtkDoubleArray()\n vdata.SetNumberOfComponents(3)\n vdata.SetNumberOfTuples(npts)\n vdata.SetVoidArray(vectors, npts*3, 1)\n vdata.SetName('basis_function_vectors')\n sgrid.GetPointData().SetScalars(vdata)\n\n writer = vtk.vtkUnstructuredGridWriter()\n writer.SetFileName(filename)\n writer.SetInputData(sgrid)\n writer.Update()", "title": "" }, { "docid": "392ce2c7d68555d07d8a9e40a3bc7b62", "score": "0.56215435", "text": "def write_zeta_vtk(zeta, zeta_ref, filename_root):\n\n for i_surf in range(len(zeta)):\n\n filename = filename_root + \"_%02u.vtu\" % (i_surf,)\n _, M, N = zeta[i_surf].shape\n\n M -= 1\n N -= 1\n point_data_dim = (M + 1) * (N + 1)\n panel_data_dim = M * N\n\n coords = np.zeros((point_data_dim, 3))\n conn = []\n panel_id = np.zeros((panel_data_dim,), dtype=int)\n panel_surf_id = np.zeros((panel_data_dim,), dtype=int)\n point_struct_id = np.zeros((point_data_dim,), dtype=int)\n point_struct_mag = np.zeros((point_data_dim,), dtype=float)\n\n counter = -1\n # coordinates of corners\n for i_n in range(N + 1):\n for i_m in range(M + 1):\n counter += 1\n coords[counter, :] = zeta[i_surf][:, i_m, i_n]\n\n counter = -1\n node_counter = -1\n for i_n in range(N + 1):\n # global_counter = aero.aero2struct_mapping[i_surf][i_n]\n for i_m in range(M + 1):\n node_counter += 1\n # point data\n # point_struct_id[node_counter]=global_counter\n point_struct_mag[node_counter] = \\\n np.linalg.norm(zeta[i_surf][:, i_m, i_n] \\\n - zeta_ref[i_surf][:, i_m, i_n])\n\n if i_n < N and i_m < M:\n counter += 1\n else:\n continue\n\n conn.append([node_counter + 0,\n node_counter + 1,\n node_counter + M + 2,\n node_counter + M + 1])\n # cell data\n panel_id[counter] = counter\n panel_surf_id[counter] = i_surf\n\n ug = tvtk.UnstructuredGrid(points=coords)\n ug.set_cells(tvtk.Quad().cell_type, conn)\n ug.cell_data.scalars = panel_id\n ug.cell_data.scalars.name = 'panel_n_id'\n ug.cell_data.add_array(panel_surf_id)\n ug.cell_data.get_array(1).name = 'panel_surface_id'\n\n ug.point_data.scalars = np.arange(0, coords.shape[0])\n ug.point_data.scalars.name = 'n_id'\n # ug.point_data.add_array(point_struct_id)\n # ug.point_data.get_array(1).name = 'point_struct_id'\n ug.point_data.add_array(point_struct_mag)\n ug.point_data.get_array(1).name = 'point_displacement_magnitude'\n\n write_data(ug, filename)", "title": "" }, { "docid": "8ef8acb6491fac7cdb7652e643ee95b8", "score": "0.5618947", "text": "def vegajson():\n return write_to_file(rawinput=json,filetype='json',output_path=args.o,engine_name=args.engine,algorithm_name=args.algorithm,suffix=plot_obj.file_suffix,verbose=args.v)", "title": "" }, { "docid": "b772c21cef17be4fa1f1d13ca9ea8caf", "score": "0.56046146", "text": "def save_VTKs(self, coordinates_function=None):\n # Create the directory\n directory = \"vtks/{}_{:05d}\".format(self.name, self.iterations)\n self.VTK.directory = directory\n\n self.band.shape = (self.n_images, -1)\n\n # We use Ms from the simulation assuming that all the images are the\n # same\n for i in range(self.n_images):\n self.VTK.reset_data()\n # We will try to save for the micromagnetic simulation (Ms) or an\n # atomistic simulation (mu_s) TODO: maybe this can be done with an:\n # isinstance\n if self.sim._micromagnetic:\n self.VTK.save_scalar(self.sim.Ms, name='M_s')\n else:\n self.VTK.save_scalar(self.sim.mu_s, name='mu_s')\n\n if coordinates_function:\n self.VTK.save_vector(\n coordinates_function(self.band[i]).reshape(-1, 3),\n name='spins'\n )\n else:\n self.VTK.save_vector(\n self.band[i].reshape(-1, 3),\n name='spins'\n )\n\n self.VTK.write_file(step=i)\n\n self.band.shape = (-1, )", "title": "" }, { "docid": "d11f2d983163e0dcc3c53fa19998d5f0", "score": "0.56009704", "text": "def write(self, index=0):\n self.write_poscar(index=index)\n self.write_incar(index=index)\n self.write_kpoints(index=index)", "title": "" }, { "docid": "9ebe982051b98b54b45f44aebec5124d", "score": "0.55926865", "text": "def write_file_and_plot(f, par, Ez_values, energies, parameters, parameters_values):\n\t# If asymmetric device :\n\tif par.asymmetric == True:\n\t\t# Physical parameters in title : \n\t\tplt.title(\"$N_x$:%g,$N_x^A$:%g,$N_y$:%g,$\\mu$:%g,$\\Gamma$:%g,$\\phi_L$:%g,$\\phi_R$:%g,$a_x$:%.0f,$a_y$:%.0f\" % (\n\t\t\tpar.Nx, par.Nxasymmetric, par.Ny, par.mu, par.Gamma, par.phiL, par.phiR, par.ax, par.ay), fontsize=16)\n\t\tplt.xlabel(\"$E_z\\, [meV]$\", fontsize=16)\n\t\tplt.ylabel(\"$E_n\\, [meV]$\", fontsize=16)\n\t\t# Saving figure : \n\t\tf.savefig(\"EvsEz_Asym_%s_%g_%g_%g_%g_%g_%g_%g_%g_%g_'alphabar'_%g_%g_%g.pdf\" % (str(datetime.date.today(\n\t\t)), par.Nx, par.Ny, par.LM, par.LL, par.LR, par.LY, par.ax, par.ay, par.mu, par.Gamma, par.phiL, par.phiR))\n\t\toutfile = open(str(\"%s_datalog_Asym_EvsEz_%g_%g_%g_%g_%g_%g_%g_'alphabar'_%g_%g_%g.txt\" % (str(\n\t\t\tdatetime.date.today()), par.Nx, par.Ny, par.LM, par.LL, par.LR, par.LY, par.mu, par.Gamma, par.phiL, par.phiR)), \"w\")\n\t# If symmetric device :\n\telse:\n\t\t# Physical parameters in title : \n\t\tplt.title(\"$N_x$:%g,$N_y$:%g,$\\mu$:%g,$\\Gamma$:%g,$\\phi_L$:%g,$\\phi_R$:%g,$a_x$:%.0f,$a_y$:%.0f\" % (\n\t\t\tpar.Nx, par.Ny, par.mu, par.Gamma, par.phiL, par.phiR, par.ax, par.ay), fontsize=16)\n\t\tplt.xlabel(\"$E_z\\, [meV]$\", fontsize=16)\n\t\tplt.ylabel(\"$E_n\\, [meV]$\", fontsize=16)\n\t\t# Saving figure :\n\t\tf.savefig(\"EvsEz_Sym_%s_%g_%g_%g_%g_%g_%g_%g_%g_%g_'alphabar'_%g_%g_%g.pdf\" % (str(datetime.date.today(\n\t\t)), par.Nx, par.Ny, par.LM, par.LL, par.LR, par.LY, par.ax, par.ay, par.mu, par.Gamma, par.phiL, par.phiR))\n\t\toutfile = open(str(\"%s_datalog_Sym_EvsEz_%g_%g_%g_%g_%g_%g_%g_'alphabar'_%g_%g_%g.txt\" % (str(\n\t\t\tdatetime.date.today()), par.Nx, par.Ny, par.LM, par.LL, par.LR, par.LY, par.mu, par.Gamma, par.phiL, par.phiR)), \"w\")\n\n\tEz_values_str = \"np.linspace(%g,%g,%g)\" % (\n\t\tEz_values[0], Ez_values[-1], len(Ez_values))\n\t# Writing parameters, Zeeman field values and eigenenergies to file :\n\toutfile.write(\"Parameters=%s;\\nEz_values=...;\\nenergies=...: \\n%s\\n%s\\n%s.\\n\\n\" % (\n\t\tparameters, str(parameters_values), str(Ez_values_str), str(energies)))\n\toutfile.close()", "title": "" }, { "docid": "afa3d9b0b84fddf1ccc2a6f566db272f", "score": "0.5592244", "text": "def _write(self):\n # Reshape self.v2\n out = np.array(self.v2).reshape((self.nfreq, -1), order='F')\n\n # Write data to script directory\n shape = self.v_dset.shape\n self.v_dset.resize((self.nfreq, shape[1] + len(out[0])))\n self.t_dset.resize((1, shape[1] + len(out[0])))\n self.v_dset[:, shape[1]:] = out[:,:]\n self.t_dset[0, shape[1]:] = self.t[:]\n print 'Variance Estimator: writing variance data to', self.var_filename\n\n # Clear self.v2 and self.t\n self.v2 = []\n self.t = []", "title": "" }, { "docid": "de8ef3eed20ef0029be88902a8648a10", "score": "0.5587917", "text": "def write_parameters(self, output_file):\n sys.stdout.write(\"Writing t(f|e) values to output file...\\n\")\n\n file = codecs.open(output_file+'.tfe', encoding='utf-8', mode='w')\n for e in self.t:\n for f in self.t[e]:\n file.write(\"%s %s %E\\n\" % (e, f, self.t[e][f]))\n file.close()\n\n if self.model == 1: return\n\n sys.stdout.write(\"Writing q(j|i,l,m) values to output file...\\n\")\n\n file = codecs.open(output_file+'.qji', encoding='utf-8', mode='w')\n for (i, l, m) in self.q:\n for j in self.q[(i, l, m)]:\n file.write(\"%d %d %d %d %E\\n\" % (i, l, m, j, self.q[(i, l, m)][j]))\n file.close()", "title": "" }, { "docid": "127bf5570349baad72ca0658723c3e21", "score": "0.5581412", "text": "def _write_csv(self):\r\n\r\n file_name = self._model_name + '.csv'\r\n\r\n ## Dynamic data csv\r\n if self.options.IMODE > 3:\r\n #Start with time\r\n length = np.size(self.time)\r\n csv_data = np.hstack(('time',np.array(self.time).flatten().astype(object)))\r\n first_array = True\r\n ## SS data\r\n else:\r\n first_array = False\r\n if self.time is not None:\r\n print(\"Warning: model time only used for dynamic modes (IMODE>3)\")\r\n\r\n #check all parameters and arrays\r\n for vp in self._variables+self._parameters:\r\n #Only save csv data if the user changed the value (changes registered in vp.value.change)\r\n if vp.value.change is False:\r\n continue\r\n else:\r\n if first_array == False:\r\n length = np.size(np.array(vp.value).flatten())\r\n if self.options.IMODE in (1,3) and length > 1:\r\n raise Exception('This steady-state IMODE only allows scalar values.')\r\n elif self.options.IMODE == 2 and length == 1:\r\n #in MPU, the first vp checked could be a scalar value (FV, param, var initial guess)\r\n #but the CV is likely longer so skip scalar values (they will be set in the APM file)\r\n continue\r\n\r\n\r\n if vp.value.change is True: #Save the entire array of values\r\n #skip variable if its value is a string (ie symbolic initialization)\r\n if isinstance(vp.VALUE.value,GK_Operators):\r\n #reset change indicator\r\n vp.value.change = False\r\n #do nothing else, go to next variable\r\n continue\r\n\r\n #discretize all values to arrays\r\n if not isinstance(vp.VALUE.value, (list,np.ndarray)):\r\n vp.VALUE = np.ones(length)*vp.VALUE\r\n elif len(vp.VALUE) == 1:\r\n vp.VALUE = np.ones(length)*vp.VALUE[0]\r\n #confirm that previously discretized values are the right length\r\n elif np.size(vp.VALUE.value) != length:\r\n raise Exception('Data arrays must have the same length, and match time discretization in dynamic problems')\r\n #group data with column header\r\n t = np.hstack((vp.name,np.array(vp.VALUE.value).flatten().astype(object)))\r\n\r\n elif isinstance(vp.value.change,list): #only certain elements should be saved\r\n if not isinstance(vp.VALUE.value, (list,np.ndarray)):\r\n vp.VALUE.value = np.ones(length)*vp.VALUE.value\r\n elif len(vp.VALUE) == 1:\r\n vp.VALUE = np.ones(length)*vp.VALUE[0]\r\n t = np.array(vp.VALUE).astype(object)\r\n t[:] = ' '\r\n t[vp.value.change] = np.array(vp.value)[vp.value.change]\r\n t = np.hstack((str(vp),t.flatten().astype(object)))\r\n\r\n else: #somebody broke value.change\r\n raise Exception('Variable value modification monitor malfunction.')\r\n\r\n #reset change indicator\r\n vp.value.change = False\r\n\r\n #if a measurement exists, save a nonnumeric in\r\n #value array to allow measurement to be read in\r\n if hasattr(vp,'MEAS'):\r\n if vp.MEAS != None:\r\n #vp.VALUE = np.array(vp.VALUE).astype(object)\r\n if self.options.IMODE in [5,8] and vp.type=='CV':\r\n #measurements in estimation go at the end of the horizon\r\n #FDELAY shifts the location of the measurement\r\n t[-1-vp.FDELAY] = 'measurement'\r\n else:\r\n t[1] = \"measurement\"\r\n\r\n #reset MEAS so it doesn't get repeated on next solve\r\n vp.MEAS = None\r\n\r\n #If a value was specified through a connection, ensure consistency in the\r\n #csv file, otherwise the requested specified value will be overridden by\r\n #whatever initialization value is in the csv\r\n if hasattr(vp,'_override_csv'):\r\n for i in vp._override_csv: #for each tuple of (position,value)\r\n #set value in t array\r\n t[i[0]+1] = i[1] #index is +1 because of prepended header\r\n\r\n if first_array == False:\r\n csv_data = t\r\n first_array = True\r\n else:\r\n try:\r\n csv_data = np.vstack((csv_data,t))\r\n except ValueError:\r\n raise Exception('All variable value arrays must be the same length (and match the length of model time in dynamic problems).')\r\n\r\n #save array to csv\r\n if first_array == False: #no data\r\n self.csv_status = 'none'\r\n else:\r\n # create header separately for potential long variable names >=25 in length\r\n if csv_data.ndim==1:\r\n # with only one variable\r\n hdr = csv_data[0]\r\n np.savetxt(os.path.join(self._path,file_name), csv_data[1:],\\\r\n delimiter=\",\",comments='',header=hdr,fmt='%1.25s')\r\n else:\r\n # with multiple variables\r\n hdr = csv_data[0,0] + ',' + ','.join([csv_data[i,0] for i in range(1,np.size(csv_data,0))])\r\n np.savetxt(os.path.join(self._path,file_name), csv_data[:,1:].T,\\\r\n delimiter=\",\",comments='',header=hdr,fmt='%1.25s')\r\n self.csv_status = 'generated'", "title": "" }, { "docid": "264c54572f1600c42ad8194b09d9bdcb", "score": "0.55731857", "text": "def export(self, output_directory='.', tag='far_field'):\n if not os.path.exists(output_directory):\n os.makedirs(output_directory)\n np.savetxt(output_directory + '/' + tag + '_TE.dat', self.signal[0, :, :],\n header='Each line corresponds to a polar angle, each column corresponds to an azimuthal angle.')\n np.savetxt(output_directory + '/' + tag + '_TM.dat', self.signal[1, :, :],\n header='Each line corresponds to a polar angle, each column corresponds to an azimuthal angle.')\n np.savetxt(output_directory + '/' + tag + '_polar_TE.dat', self.azimuthal_integral()[0, :],\n header='Each line corresponds to a polar angle, each column corresponds to an azimuthal angle.')\n np.savetxt(output_directory + '/' + tag + '_polar_TM.dat', self.azimuthal_integral()[1, :],\n header='Each line corresponds to a polar angle, each column corresponds to an azimuthal angle.')\n np.savetxt(output_directory + '/polar_angles.dat', self.polar_angles,\n header='Polar angles of the far field in radians.')\n np.savetxt(output_directory + '/azimuthal_angles.dat', self.azimuthal_angles,\n header='Azimuthal angles of the far field in radians.')", "title": "" }, { "docid": "daee3eb4573fdca21cea85f1b2c8b1ae", "score": "0.5542305", "text": "def writeTextFiles(self, save_path=None, ptol=0.10):\n \n if save_path == None:\n try:\n svpath = os.path.dirname(self.mt_list[0].fn)\n except TypeError:\n raise IOError('Need to input save_path, could not find a path')\n else:\n svpath = save_path\n \n if self.resxy.mean() == 0 :\n self.get_rp_arrays()\n \n header_list = ['{0:^10}'.format('period(s)')]+\\\n ['{0:^8}'.format(ss) for ss in self.station_list]+['\\n']\n \n fn_dict = {'resxx':self.resxx, \n 'resxy':self.resxy, \n 'resyx':self.resyx,\n 'resyy':self.resyy,\n 'phasexx':self.phasexx,\n 'phasexy':self.phasexy,\n 'phaseyx':self.phaseyx,\n 'phaseyy':self.phaseyy}\n \n #write the arrays into lines properly formatted\n t1_kwargs = {'spacing':'{0:^10} ', 'value_format':'{0:.2e}', \n 'append':False, 'add':False}\n \n tr_kwargs = {'spacing':'{0:^8}', 'value_format':'{0: .2f}', \n 'append':False, 'add':False}\n \n tp_kwargs = {'spacing':'{0:^8}', 'value_format':'{0: .2f}', \n 'append':False, 'add':False}\n \n \n for key in list(fn_dict.keys()):\n fid = file(os.path.join(svpath, 'PseudoSection.'+key), 'w')\n fid.write(''.join(header_list))\n for ii, per in enumerate(self.plot_period):\n if key[0] == 'r':\n line = [mtpl._make_value_str(per, **t1_kwargs)]+\\\n [mtpl._make_value_str(rr, **tr_kwargs) \n for rr in fn_dict[key][ii]]+['\\n']\n elif key[0] == 'p':\n line = [mtpl._make_value_str(per, **t1_kwargs)]+\\\n [mtpl._make_value_str(rr, **tp_kwargs) \n for rr in fn_dict[key][ii]]+['\\n']\n fid.write(''.join(line))\n fid.close()\n \n print('Wrote files to: '+\\\n os.path.join(svpath, 'PseudoSection.component'))", "title": "" }, { "docid": "ad9931173db63c5a019a2d32ab86945f", "score": "0.5532155", "text": "def save_to_file(sigma,n_sigma,n_spectra,xsel,ysel,dx,dy,components,mix):\n \n n_comp = int(n_components.get())\n file_path = filename.replace('.txt','_save_{}_composantes.txt'.format(n_comp))\n f = open(file_path,'wb')\n np.savetxt(f,[[n_comp,n_sigma,n_spectra,dx,dy]],\n header='n_components n_sigma n_spectra dx dy')\n A = np.transpose(np.vstack((sigma,components)))\n np.savetxt(f,A,header='\\t sigma \\t\\t\\t components')\n B = np.transpose(np.vstack((xsel,ysel,mix.transpose())))\n np.savetxt(f,B,header='\\t x \\t\\t\\t y \\t\\t\\t mix')\n f.close()\n return", "title": "" }, { "docid": "9659e264db611b6bb37521a7688ae4b4", "score": "0.55301106", "text": "def mesh_to_legacy_vtk(odb, part_name, output_file = '', step='', frame=-1):\n try:\n # from odbAccess import openOdb\n from abaqusConstants import CENTROID, NODAL\n part = odb.rootAssembly.instances[part_name.upper()]\n except ImportError as ex:\n print(\"Error importing from Abaqus library: \" + str(ex))\n except KeyError as ex:\n instances = \"\"\n for key, _ in odb.rootAssembly.instances.items():\n instances += \" \\'\"+ key + \"\\',\"\n raise IOError(\"Could not find instance in odb-file: \" + str(ex) +\". Instance(s) found: \" + instances)\n\n num_nodes = len(part.nodes)\n num_elements = len(part.elements)\n\n if output_file == '':\n vtk_path = part_name + '.vtk'\n else:\n vtk_path = output_file\n\n with open(vtk_path, 'w') as vtk_file:\n\n if step == '':\n step = odb.steps.items()[0][0]\n\n # For debugging\n print('Generating VTK file '+ vtk_path)\n\n # Write VTK header data\n vtk_file.write('# vtk DataFile Version 4.0')\n vtk_file.write('\\nAbaqus part ' + part_name)\n vtk_file.write('\\nASCII' )\n vtk_file.write('\\nDATASET UNSTRUCTURED_GRID')\n\n # Append node coordinates to file\n print('Appending '+ str(num_nodes)+ ' node points to VTK file')\n vtk_file.write( '\\n\\nPOINTS ' + str(num_nodes) + ' double' )\n for i in range(num_nodes):\n node_position = part.nodes[i].coordinates\n vtk_file.write('\\n')\n for j in range(3):\n vtk_file.write(str(node_position[j]) + ' ')\n\n # Append cell connectivity to file\n cell_types = list()\n\n # Dictionary to translate Abaqus element types to corresponding VTK Legacy\n cell_type_dict = {'C3D8R': '12', 'C3D8': '12', 'C3D4': '10', 'C3D10': '24', 'CPS3': '5', 'CPE3': '5', 'CPS4R': '5', 'CPE4R': '5', 'CPE8R': '23', 'CPS8R': '23'}\n\n # Total number of data points in list of element connectivity\n num_cell_data_points = sum(len(element.connectivity) for element in part.elements)\n\n # Append elements to vtk file\n print('Appending {} elements points to VTK file'.format(str(num_elements)))\n vtk_file.write('\\n\\nCELLS ' + str(num_elements) + ' ' + str(num_cell_data_points+num_elements))\n for i in range(num_elements):\n cell_types.append(cell_type_dict[part.elements[i].type])\n connectivity = list(part.elements[i].connectivity)\n numNodesInElement = len(connectivity)\n vtk_file.write('\\n')\n vtk_file.write(str(numNodesInElement)+' ')\n for pos in connectivity:\n vtk_file.write(str(pos-1) + ' ')\n\n # Append Cell types to file.\n vtk_file.write('\\n\\nCELL_TYPES '+ str(num_elements))\n for cellType in cell_types:\n vtk_file.write('\\n'+cellType)\n\n findley_csv_file = 'C:/temp/conf/QuadQuad/max_findley.csv'\n frame = odb.steps[step].frames[frame]\n field = frame.fieldOutputs['S'].getSubset(region=part, position=CENTROID)\n vtk_file.write('\\n\\nCELL_DATA '+ str(len(field.values)))\n csv_to_legacy_vtk(vtk_file, findley_csv_file)\n cellDataToLegacyVTK(vtk_file, field, frame)\n cellDataToLegacyVTK(vtk_file, field, frame, invariant=\"MISES\")\n \n field = frame.fieldOutputs['LE'].getSubset(region=part, position=CENTROID)\n cellDataToLegacyVTK(vtk_file, field, frame)\n\n # Write nodal stress tensors\n # Abaqus does not record nodal stresses by default, either activate it,\n # or interpolate average the stresses from integration points?\n vtk_file.write('\\n\\nPOINT_DATA '+ str(num_nodes))\n #vtk_file.write('\\nTENSORS NodalCauchyStress double')\n #vtk_file.write('\\nLOOKUP_TABLE default')\n field_values = frame.fieldOutputs['S'].getSubset(region=part,\n position=NODAL).values\n\n # Append global node stress tensors. IF they exsists. Abaqus\n # does not record global node stresses by default, but can be\n # activated using These are averaged from the\n # surrounding elements.\n if len(field_values) == num_nodes:\n for tensor in field_values:\n vtk_file.write('\\n' + str(tensor.data[0]) + ' ') # Sxx\n vtk_file.write(str(tensor.data[3]) + ' ') # Sxy\n vtk_file.write(str(tensor.data[4])) # Sxz\n vtk_file.write('\\n' + str(tensor.data[3]) + ' ') # Syx\n vtk_file.write(str(tensor.data[1]) + ' ') # Syy\n vtk_file.write(str(tensor.data[5])) # Syz\n vtk_file.write('\\n' + str(tensor.data[4]) + ' ') # Szx\n vtk_file.write(str(tensor.data[5]) + ' ') # Szy\n vtk_file.write(str(tensor.data[2]) + '\\n') # Szz\n\n # Append displacement vectors at nodes.\n vtk_file.write('\\nVectors NodalDisplacements float')\n displacement_field = frame.fieldOutputs['U'].getSubset(region=part,\n position=NODAL).values\n for vector in displacement_field:\n if len(vector.data) == 3:\n vtk_file.write('\\n'+ str(vector.data[0]) + ' ' + str(vector.data[1]) + ' ' + str(vector.data[2]))\n elif len(vector.data) == 2:\n vtk_file.write('\\n'+ str(vector.data[0]) + ' ' + str(vector.data[1]) + ' 0.0')\n\n return", "title": "" }, { "docid": "6104f3ac3412147dd4e14b5c5083b881", "score": "0.5522887", "text": "def write_new_odv(self, output_dir='D:/temp', file_name=False, df=False, metadata=False, **kwargs):\n\n if df:\n self.df = df\n\n if metadata:\n self.metadata = metadata\n\n if not all([len(self.df), self.metadata]):\n print('No data loaded in file object for file {}'.format(self.file_path))\n return\n\n if not file_name:\n file_name = self.file_path.split('\\\\')[-1]\n\n with codecs.open(os.path.join(output_dir, file_name), 'w',\n encoding=kwargs.get('encoding_out', kwargs.get('encoding', 'cp1252'))) as fid_out:\n\n for line in self.metadata:\n line = line.strip()\n if line:\n fid_out.write(line + '\\n')\n\n data_dict = self.df.to_dict('split')\n\n fid_out.write('\\t'.join(list(self.df))+'\\n')\n\n # if file_name == 'Perca_fluviatilis_6719_20090817.txt':\n # print('=' * 50)\n # print('=' * 50)\n # print('=' * 50)\n # print('=' * 50)\n # print('=' * 50)\n # print('=' * 50)\n # print('\\n'.join(self.metadata))\n # print(self.metadata)\n for item in data_dict['data']:\n try:\n fid_out.write('\\t'.join(item)+'\\n')\n except:\n print('='*50)\n print(file_name)\n print(item)\n raise Exception", "title": "" }, { "docid": "83b7932d0158d37e4a677a84b09cad5b", "score": "0.55180585", "text": "def WriteData(self):\n \n Header = ',%s %d %% %.2f nm %.3f Ratio\\n%d pair DBR\\n%.2f nm porous layer\\n%.2f nm non-porous layer\\n%d %% Porosity\\n\\n'%(self.label,self.Phi*100, self.Period, self.T_Rat, self.NLayers, self.T_Por, self.T_GaN, self.Phi*100)\n FilePath = '%sTMM_%s_%dPr_%dnm_%d-%d_%dPc.csv'%(self.path,self.label,self.NLayers, self.Period, self.T_Por, self.T_GaN, self.Phi*100)\n Output=np.column_stack((self.Wav, self.Rnorm))\n np.savetxt(FilePath,Output, header= 'TMM Simulation result\\n'+Header+'Wavelength,Reflectance\\nnm,', comments='',delimiter = ',')", "title": "" }, { "docid": "2c3be3b595fe1949c1df991968413e4c", "score": "0.5513407", "text": "def trace_obj(self) -> None:\n n_file = self.get_executable()\n # matplotlib\n plt.figure(figsize=(4, 3))\n plt.rcParams[\"font.size\"] = 15\n plt.rcParams[\"axes.linewidth\"] = 1.5\n plt.rcParams[\"xtick.major.width\"] = 1.5\n plt.rcParams[\"ytick.major.width\"] = 1.5\n plt.rcParams[\"lines.linewidth\"] = 1.2\n plt.gca().spines[\"right\"].set_visible(False)\n plt.gca().spines[\"top\"].set_visible(False)\n # ---\n for paramset in n_file:\n with open(\n os.path.join(\n self.model.path,\n \"out\",\n f\"{paramset:d}\",\n \"optimization.log\",\n ),\n mode=\"r\",\n ) as f:\n traces = f.readlines()\n iters = []\n obj_val = []\n for line in traces:\n if line.startswith(\"Generation\"):\n iters.append(line.lstrip(\"Generation\").split(\":\")[0])\n obj_val.append(line.split(\"=\")[-1].strip())\n plt.plot([int(num) - 1 for num in iters], [float(val) for val in obj_val])\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Objective function value\")\n plt.savefig(\n os.path.join(\n self.model.path,\n \"optimization_results\",\n \"obj_func_traces.pdf\",\n ),\n bbox_inches=\"tight\",\n )\n plt.close()", "title": "" }, { "docid": "5c4b5dd16fefdd7cff7aee4566199e3b", "score": "0.55051064", "text": "def save_trajectory(self, button):\n\n # Find the trajectory in our saved list.\n for index,trajectory in enumerate(self.trajectories):\n if button in trajectory['buttons'].children[0].children:\n break\n else: # if the trajectory was not found, we have a problem!\n raise LookupError('could not find trajectory!')\n\n ## Prompt the user for a filepath to save the simulation, using Tkinter\n\n # We do not want to create a new window, so keep the Tkinter root window from appearing\n Tk().withdraw()\n # Prompt user for save path.\n filename = filedialog.asksaveasfilename(filetypes = ((\"comma separated values files\", \"*.csv\"),\n (\"all files\", \"*.*\")))\n\n ## Save the trajectory.\n\n if filename:\n droplet = trajectory['droplet']\n history = trajectory['trajectory']\n settings = trajectory['settings']\n settings = '\\n'.join(['# %s: %s' % (key, value) for key,value in settings.items()])\n data = droplet.complete_trajectory(history)\n with open(filename, 'w') as f:\n f.write(settings)\n f.write('\\n')\n data.to_csv(f, index=False)", "title": "" }, { "docid": "d8a2d5b87120ae91dede688c6c551416", "score": "0.5501056", "text": "def _write(self, *args, **kwargs):\n try:\n units = kwargs['units']\n except:\n units = ['adimensional','adimensional']\n \n x_labels = [args[i] for i in range(len(args)-1)]\n self.y = args[-1]\n fname = self.id+'_'+args[0]+args[1]+'.dat'\n self._backup_file(fname)\n\n self.info = '' + self.fname + ' ' + ' matteo.vallar@igi.cnr.it ' + \\\n time.strftime(\"%d/%m/%Y\")\n self.info2 = 'For each dimension: name units number of bins min max'\n self.header = '' \n for i in range(len(args)-1):\n self.header += args[i]+' '\n self.header += units[i]+' ' \n self.header += ' '+str(len(self.dict_dim[x_labels[i]]))\n self.header += ' '+str(min(self.dict_dim[x_labels[i]]))\n self.header += ' '+str(max(self.dict_dim[x_labels[i]]))\n self.header += ' '\n \n self.header += '\\n'\n self.header += \"# Normalisation : {0:.5e}\".format(round(self.norm, 2))\n \n with open(fname,'w') as f_handle:\n f_handle.write('# '+self.info+'\\n')\n f_handle.write('# '+self.info2+'\\n') \n f_handle.write('# '+self.header+'\\n')\n #for lab in x_labels:\n # np.savetxt(f_handle, self.dict_dim[lab])\n np.savetxt(f_handle, self.y, fmt='%.5e')", "title": "" }, { "docid": "d45c4999a8522f83e86638614129f66c", "score": "0.5488677", "text": "def main(p_vs, token_vs, t_in, t_out, file_name):\r\n\r\n f = open(file_name,\"r\")\r\n archivo = f.readlines()\r\n archivo_sin_espacio=[]\r\n\r\n x=np.random.randint(-300, 300)\r\n y=np.random.randint(-300, 300)\r\n\r\n flag_place=1\r\n flag_arco=1\r\n for i in range(len(archivo)):\r\n archivo_sin_espacio.append(archivo[i].strip()) #Elimina los espacios contenidos en el archivo y los guarda en 'archivo_sin_espacio'\r\n if(archivo_sin_espacio[len(archivo_sin_espacio)-1]==\"</place>\" and flag_place==1): #Busca el lugar del archivo donde se debe agregar la plaza con el marcado\r\n archivo_sin_espacio.append('<place>\\n<id>'+ p_vs + '</id>\\n<x>' + str(x) + '</x>\\n<y>'+ str(y) +'</y>\\n<label>'+ p_vs + '</label>\\n<tokens>'+ token_vs + '</tokens>\\n<isStatic>false</isStatic>\\n<type>1</type>\\n</place>')#+'<type>controlPlace</type>'\r\n flag_place=0\r\n\r\n if(archivo_sin_espacio[len(archivo_sin_espacio)-1]==\"</arc>\" and flag_arco==1): #Busca el lugar del archivo donde se debe agregar los arcos \r\n for i in range(len(t_in)): #Arco desde una transicion a la plaza supervisor (arcos de entrada a Vs)\r\n archivo_sin_espacio.append('<arc>\\n<type>regular</type>\\n<sourceId>'+ t_in[i] + '</sourceId>\\n<destinationId>'+ p_vs + '</destinationId>\\n<multiplicity>1</multiplicity>\\n</arc>')\r\n \r\n for i in range(len(t_out)): #Arco desde la plaza supervisor a una transicion (arcos de salida de Vs)\r\n archivo_sin_espacio.append('<arc>\\n<type>regular</type>\\n<sourceId>'+ p_vs + '</sourceId>\\n<destinationId>'+ t_out[i] +'</destinationId>\\n<multiplicity>1</multiplicity>\\n</arc>')\r\n \r\n flag_arco=0\r\n f.close()\r\n\r\n with open(file_name, 'w') as f: #Se reescribe el archivo '.pflow'\r\n for item in archivo_sin_espacio:\r\n f.write(\"%s\\n\" % item)", "title": "" }, { "docid": "5468f36944078ce47cd29eb6e443d6d0", "score": "0.5488542", "text": "def exportAsTxt(self, output_name: str, verbose=False):\n if \".txt\" in output_name:\n output_name = output_name.split(\".txt\")[0]\n\n if self.is3D:\n raise Exception(\"ERROR : Export as a txt file requires the data \\\n to be 2 dimensionnal.\")\n for key in self._data:\n output = self._data[key].reshape(self.shape[:2])\n final_output_name = output_name+\"_\"+key + \\\n \".txt\" if key != \"V0\" else output_name+\".txt\"\n np.savetxt(final_output_name, output)\n if verbose:\n print(\"Generated txt file as {}\".format(output_name))", "title": "" }, { "docid": "cc437176dc7497f1a67eaf3157a5258c", "score": "0.54868454", "text": "def saveVoxels(self, outputPath, save_binvox=False):\n self._getVoxels()\n startPoint = 0\n if self._fileName.rfind(\"/\") != -1:\n startPoint = self._fileName.rfind(\"/\") + 1\n \n fileName = self._fileName[startPoint:self._fileName.rfind('.')] # cut the format end\n # save npy\n np.save(os.path.join(outputPath, fileName) + \".npy\", self._voxels)\n \n if save_binvox:\n # save binvox\n bool_voxel = self._voxels.astype(np.bool)\n binvox = binvox_rw.Voxels(\n data = bool_voxel,\n dims = list(self._voxels.shape),\n translate = [0.0, 0.0, 0.0],\n scale = 1.0,\n axis_order = 'xzy')\n fp = open(os.path.join(outputPath, fileName) + \".binvox\", 'wb+')\n fp.truncate()\n binvox.write(fp)\n fp.close()", "title": "" }, { "docid": "d5d0be4e1992b0bf3b26f3a6897314b0", "score": "0.54805773", "text": "def _save_vectors(filename, vectors, region, mesh, data_name):\n nv = nm.zeros_like(mesh.coors)\n nmax = region.vertices.shape[0]\n nv[region.vertices] = vectors[:nmax]\n\n out = {data_name : Struct(name='output_data', mode='vertex', data=nv)}\n mesh.write(filename, out=out, io='auto')", "title": "" }, { "docid": "2ab10db013068ccf3b436f799fdd101b", "score": "0.54783136", "text": "def main():\n\n # Importing the solutions from the file\n file = open(\"altitudes.txt\", \"r\")\n\n # Reading the first line which contain the size of the matrix\n xSize, ySize = [int(i) for i in file.readline().split()]\n\n # Defining constants relating to the map\n radius = 6371009 # Earth radius\n lat_min = 45\n lat_max = 47.5\n long_min = 5\n long_max = 7.5\n\n # The latitudes and longitudes of each point of the matrix can be easily compute\n # using differences of latitudes/longitudes between each point\n delta_lat = (lat_max - lat_min)/ySize\n delta_long = (long_max - long_min)/xSize\n\n # Used to find water. At any time, we remember the altitudes from the 2 above latitudes, and the current latitude.\n # Doing so, we can find 3x3 squares which have the same altitudes. It forms a plain plane, which can be some water.\n # Using a 3x3 square denoises this detection.\n # To have a low memory allocation, we only track for 3 latitudes.\n altitudes_y3 = np.zeros([3, xSize], dtype=np.uint16) \n\n # Defining geometry\n points = vtk.vtkPoints()\n # Defining altitudes of each point\n altitudeValues = vtk.vtkIntArray()\n altitudeValues.SetNumberOfValues(xSize * ySize)\n\n \n def compute_point(x, y, alt):\n \"\"\"\n Method used to convert the x-y-z(altitude without earth radius) values from the matrix to x-y-z world coordinates\n \"\"\"\n t = vtk.vtkTransform()\n t.PostMultiply()\n\n # Rotating the point from latitude and longitude\n t.RotateY(-lat_min + delta_lat * y)\n t.RotateZ(long_min + delta_long * x)\n\n # Describing the point and setting it on the x axe, at the right altitude.\n p_in = [radius + alt, 0, 0]\n p_out = [0, 0, 0]\n t.TransformPoint(p_in, p_out)\n\n ''' \n # It can be done with a sphericalTransform too\n t = vtk.vtkSphericalTransform()\n\n # Apply transformation\n p_in = [radius + z, math.radians(lat_min + delta_lat * y), math.radians(long_min + delta_long * x)]\n p_out = [0, 0, 0]\n t.TransformPoint(p_in, p_out)\n '''\n\n return p_out\n\n # Minimum and maximum altitude found in the map\n min_alt = None\n max_alt = None\n\n for y in range(ySize): # Exploring each altitude from the file\n # array of altitudes of the current latitude\n altitudes = file.readline().split() \n\n if y % 100 == 0:\n print('Computing y = ' + str(y))\n\n for x in range(xSize): # For each x value \n alt = int(altitudes[x]) # Getting the associated altitude\n\n # finding min and max altitudes \n if min_alt == None or alt < min_alt:\n min_alt = alt\n if max_alt == None or alt > max_alt:\n max_alt = alt\n\n # Computing world coordinates from x-y-altitude\n point = compute_point(x, y, alt)\n \n # Inserting the point to the listOfPoints\n points.InsertNextPoint(point[0], point[1], point[2])\n\n # Computing the index of the point from the x and y value (flattening a 2 dimension matrix to a 1 dimension one)\n index = y * xSize + x\n \n # Setting the altitude of the current point as a point scalar\n altitudeValues.SetValue(index, alt)\n \n # checking for a square 3x3 (denoising)\n altitudes_y3[2, x] = alt # updating box\n if x >= 2 and y >= 2:\n # extracting the 3x3 box\n box = altitudes_y3[...,x - 2 : x + 1]\n\n # Exploring the box and checking if all of the altitudes are the same \n val = box[0, 0]\n ok = True\n for i in range(3):\n for j in range(3):\n if val != box[i, j]: \n ok = False\n break\n if not ok:\n break\n\n if ok: # if all of the altitudes are the same, it is probably some water\n for i in range(3):\n for j in range(3):\n # Updating the scalar associated with the point.\n # We are using the '0' value, which correspond to the sea level\n altitudeValues.SetValue((y - j) * ySize + (x - i), 0)\n\n # If the current altitude is below the sea-level, we can set the associated scalar as a water value\n if alt < sea_level:\n altitudeValues.SetValue(index, 0)\n\n # updating the 3 latitudes\n altitudes_y3[0] = altitudes_y3[1]\n altitudes_y3[1] = altitudes_y3[2]\n altitudes_y3[2] = np.zeros(xSize)\n\n file.close()\n\n # creating a dataset\n # vtkStructuredGrid has an implicit topology and take less memory than a polydata\n grid = vtk.vtkStructuredGrid()\n grid.SetDimensions(xSize, ySize, 1)\n grid.SetPoints(points)\n grid.GetPointData().SetScalars(altitudeValues)\n\n # Init our mapping to find the colors\n # We use a colorTranferFunction with point for different altitudes \n colorTable = vtk.vtkColorTransferFunction()\n colorTable.AdjustRange([min_alt, max_alt]) # Adjusting range from the minimum and maximum altitudes computed before\n colorTable.AddRGBPoint(min_alt, 0.185, 0.67, 0.29) # green land\n colorTable.AddRGBPoint(1000, 0.839, 0.812, 0.624) # brown land\n colorTable.AddRGBPoint(2000, 0.84, 0.84, 0.84) # grey rock\n colorTable.AddRGBPoint(max_alt, 1, 1, 1) # white snow\n # Water is set to 0m (sea-level) as an altitude\n colorTable.SetBelowRangeColor(0.513, 0.49, 1) # blue water\n colorTable.UseBelowRangeColorOn()\n colorTable.Build()\n\n # Create a mapper and actor\n mapper = vtk.vtkDataSetMapper()\n mapper.SetInputData(grid)\n mapper.UseLookupTableScalarRangeOn()\n mapper.SetLookupTable(colorTable)\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n\n # Create the renderer\n ren = vtk.vtkRenderer()\n ren.AddActor(actor)\n ren.SetBackground(1, 1, 1)\n\n # Moving the camera\n camera = vtk.vtkCamera()\n distance = 600000 # the camera will be at ~600000m above the surface\n center_high = compute_point(xSize/2, ySize/2, distance)[0:3]\n camera.SetPosition(center_high)\n center = compute_point(xSize/2, ySize/2, 0)[0:3]\n camera.SetFocalPoint(center) # looking at the center of the map\n camera.SetRoll(266)\n camera.SetClippingRange(1, 100000000)\n ren.SetActiveCamera(camera)\n\n # Creating a window to display the render\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(ren)\n renWin.SetSize(900, 900)\n renWin.Render()\n\n # Exporting a map if we want to\n if export_map:\n export_png(renWin, str(sea_level) + \"sea.png\")\n\n # start the interaction window and add TrackBall Style\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())\n iren.SetRenderWindow(renWin)\n iren.Start()", "title": "" }, { "docid": "c6d0314b978928931d29821841f28701", "score": "0.5461955", "text": "def ExportTs2Csv(ts, csvFilePath, stationId, stationName, timeOfSimulationRun):\n \n # Create new file and overwrite if it already exists.\n strBuilder = System.Text.StringBuilder(); \n \n currentDateTimeString = timeOfSimulationRun.ToString('yyyyMMddHHmm');\n ExportDateTimeString = System.DateTime.Now. ToString('yyyyMMddHHmm');\n \n # Write header\n h1 = 'R�sultats MIKE ALLIER;;;';\n h3 = System.String.Format('export g�n�r� le ;;{0};', ExportDateTimeString);\n h4 = ';;;'\n\n h2 = 'D�bit;;;';\n h5 = 'TYP;CODE;DATE(TU);DEBIT(m3/s)';\n fileName = 'PREQ_' + stationId + '_' + stationName + '_' + currentDateTimeString + '_' + ExportDateTimeString + '.txt';\n \n if (ts.YAxisVariable == 'Water Level'):\n h2 = 'Hauteur;;;';\n h5 = 'TYP;CODE;DATE(TU);HAUTEUR (m)';\n fileName = 'PREH_' + stationId + '_' + stationName + '_' + currentDateTimeString + '_' + ExportDateTimeString +'.txt';\n \n strBuilder.AppendLine(h1);\n strBuilder.AppendLine(h2);\n strBuilder.AppendLine(h3);\n strBuilder.AppendLine(h4);\n strBuilder.AppendLine(h5);\n \n #Write each time step.\n timeSteps = ts.GetAll();\n \n for timeStep in timeSteps:\n valueString = None;\n if (timeStep.YValue != None):\n valueString = timeStep.YValue.ToString(System.Globalization.CultureInfo.InvariantCulture) \n timeStepString = System.String.Format('{0};{1};{2};{3}', 'PRV', stationId, timeStep.XValue.ToString('yyyyMMddHHmm'), valueString);\n strBuilder.AppendLine(timeStepString);\n \n csvFileName = System.IO.Path.Combine(csvFilePath, fileName);\n System.IO.File.WriteAllText(csvFileName, strBuilder.ToString());\n return;", "title": "" }, { "docid": "a6f00082509424139933f97e0215f635", "score": "0.54584813", "text": "def OrbitCOM(galaxy, start, end, n):\n \n #composing filename for output\n fileout = \"Orbit_{}.txt\".format(galaxy)\n \n #setting tolerance and VolDec\n if galaxy == 'M33':\n VolDec = 4.0\n delta = 0.1\n else:\n VolDec = 5.0\n delta = 0.1\n #print(galaxy,VolDec,delta)\n #generating the snapshot id sequence \n snap_ids = np.arange(start,end,n)\n #initializing the array for orbital info: t, x, y, z, vx, vy, vz of COM\n orbit = np.zeros([len(snap_ids),7])\n \n for i, snap_id in enumerate(snap_ids):# loop over files\n #composing the data filename \n ilbl = '000' + str(snap_ids[i])\n ilbl = ilbl[-3:]\n filename = \"%s_\"%(galaxy)+\"VLowRes/\"+\"%s_\"%(galaxy) + ilbl + '.txt' #I kept my VLowRes files in a separate folder, \n #to keep my Homework6 directory from being polluted with .txt files\n\n #Initializing CenterOfMass class\n COM = CenterOfMass(filename,2)# Uses disk particles\n #Storing the COM pos and vel\n POS = COM.COM_P(delta,VolDec)\n VEL = COM.COM_V(POS[0],POS[1],POS[2])\n \n #storting t, x, y, z, vx, vy, vz in obrit array\n orbit[i]= COM.time.value/1000, *tuple(POS.value), *tuple(VEL.value)\n \n #print(snap_id)\n \n #Writing out data\n np.savetxt(fileout, orbit, fmt = \"%11.3f\"*7, comments='#',\n header=\"{:>10s}{:>11s}{:>11s}{:>11s}{:>11s}{:>11s}{:>11s}\"\\\n .format('t', 'x', 'y', 'z', 'vx', 'vy', 'vz'))", "title": "" }, { "docid": "0c2b4fa3902abc1bbf611cab68188a16", "score": "0.54559034", "text": "def write(self,out_file='surge.data',data_source=\"setrun.py\"):\n\n # print \"Creating data file %s\" % out_file\n self.open_data_file(out_file,data_source)\n\n self.data_write('wind_forcing', description='(Wind source term used)')\n self.data_write('drag_law', description='(Type of drag law to use)')\n self.data_write('pressure_forcing',\n description=\"(Pressure source term used)\")\n self.data_write()\n\n self.data_write(\"wind_index\", value=self.wind_index + 1,\n description=\"(Index into aux array - fortran indexing)\")\n self.data_write(\"pressure_index\", value=self.pressure_index + 1,\n description=\"(Index into aux array - fortran indexing)\")\n self.data_write(\"display_landfall_time\",\n description='(Display time relative to landfall)')\n self.data_write()\n\n if isinstance(self.wind_refine, bool):\n if not self.wind_refine:\n self.data_write('wind_refine', value=False,\n description='(Refinement ratios)')\n elif isinstance(self.wind_refine, type(None)):\n self.data_write('wind_refine', value=False,\n description='(Refinement ratios)')\n else:\n self.data_write('wind_refine',description='(Refinement ratios)')\n if isinstance(self.R_refine, bool):\n if not self.R_refine:\n self.data_write('R_refine', value=False,\n description='(Refinement ratios)')\n elif isinstance(self.R_refine, type(None)):\n self.data_write('R_refine', value=False,\n description='(Refinement ratios)')\n else:\n self.data_write('R_refine', description='(Refinement ratios)')\n self.data_write()\n\n # Storm specification\n if self.storm_type is not None:\n self.storm_specification_type = self.storm_type\n if type(self.storm_specification_type) is not int:\n if self.storm_specification_type in \\\n self.storm_spec_dict_mapping.keys():\n if self.storm_specification_type in \\\n self.storm_spec_not_implemented:\n raise NotImplementedError(\"%s has not been implemented.\"\n %self.storm_specification_type)\n\n else:\n self.data_write(\"storm_specification_type\",\n self.storm_spec_dict_mapping[\n self.storm_specification_type],\n description=\"(Storm specification)\")\n else:\n raise ValueError(\"Unknown storm specification type %s\"\n % self.storm_specification_type)\n else:\n self.data_write(\"storm_specification_type\",\n description=\"(Storm specification)\")\n self.data_write(\"storm_file\", description='(Path to storm data)')\n\n self.close_data_file()", "title": "" }, { "docid": "24285ce7b9209d03dbc9e45cddff340d", "score": "0.54508686", "text": "def test_parallel_vtk_file(actx_factory, dim):\n logging.basicConfig(level=logging.INFO)\n\n actx = actx_factory()\n\n nelements = 64\n target_order = 4\n\n if dim == 1:\n mesh = mgen.make_curve_mesh(\n mgen.NArmedStarfish(5, 0.25),\n np.linspace(0.0, 1.0, nelements + 1),\n target_order)\n elif dim == 2:\n mesh = mgen.generate_torus(5.0, 1.0, order=target_order)\n elif dim == 3:\n mesh = mgen.generate_warped_rect_mesh(dim, target_order, nelements_side=4)\n else:\n raise ValueError(\"unknown dimensionality\")\n\n from meshmode.discretization import Discretization\n discr = Discretization(actx, mesh,\n InterpolatoryQuadratureSimplexGroupFactory(target_order))\n\n from meshmode.discretization.visualization import make_visualizer\n vis = make_visualizer(actx, discr, target_order)\n\n class FakeComm:\n def Get_rank(self): # noqa: N802\n return 0\n\n def Get_size(self): # noqa: N802\n return 2\n\n file_name_pattern = f\"visualizer_vtk_linear_{dim}_{{rank}}.vtu\"\n pvtu_filename = file_name_pattern.format(rank=0).replace(\"vtu\", \"pvtu\")\n\n vis.write_parallel_vtk_file(\n FakeComm(),\n file_name_pattern,\n [\n (\"scalar\", discr.zeros(actx)),\n (\"vector\", make_obj_array([discr.zeros(actx) for i in range(dim)]))\n ],\n overwrite=True)\n\n import os\n assert os.path.exists(pvtu_filename)\n\n import filecmp\n assert filecmp.cmp(str(thisdir / f\"ref-{pvtu_filename}\"), pvtu_filename)", "title": "" }, { "docid": "daa7d35cee4a27328bf7dc87d0b4c5de", "score": "0.54483813", "text": "def save_tsv(filename,data):\n\n #print('Saving %s' %(filename))\n np.savetxt(filename,data,fmt='%.8f',delimiter='\\t',newline='\\n')", "title": "" }, { "docid": "5d5d16aefa72f6231fcaca5f69aeee3a", "score": "0.5445659", "text": "def leaveone(self):\n \n \n self.completeName2 = os.path.join(self.outpath, \"LeaveOneOut.txt\") \n f2 = open(self.completeName2, 'w') # creates an output file\n # Prepares all the data for input into solving script\n self.n = int(self.data[0])\n self.volume = self.data[1]\n self.eqenergy = self.data[2]\n self.deltas = self.data[3:self.n+3]\n self.energy = self.data[self.n+3:]\n self.values = np.array([0.0]*(int(self.n-1)*12))\n self.delt = np.array([0.0]*(int(self.n-1))) \n \n # THe main \"i\" loop is responsible for omitting one set of data each time.\n #This is done through integers being compared to the intiger on the values being sorted \n # It also stores the values each time they are calculated.\n #The \"k\" loop is for sorting value of strains \n #The \"j\" loop is for sorting the energy values\n for i in range(0, int(self.n)):\n j=0\n help2=0\n help=0\n # These help intigers are used to get rid of a line full of zeros.\n #E.g. after omitting one set of data the next is safed into the place of the previous one, instead of leaving a blank space\n \n for k in range(0, int(self.n)):\n if k!= i:\n self.delt[k-help] = self.deltas[k]\n else:\n help=1 \n for j in range (0, int(self.n*12)):\n if int(j/12)!= i:\n self.values[j-help2] = self.energy[j]\n else:\n help2=12\n \n a = Constants((self.n-1), self.volume, self.eqenergy,self.delt, self.values )\n self.results, residual = a.solve()\n \n f2.write(\"when the set number %d was omitted the following data was obtained\\n\" %i)\n constants = [\" #C11 in GPa\",\" #C12 in GPa\", \" #C44 in GPa\", \" #C111 in GPa\", \" #C112 in GPa\", \" #C123 in GPa\", \" #C144 in GPa\",\" #C166 in GPa\", \" #C456 in GPa\"]\n for i in range (0,9):\n f2.write(str(self.results[i]))\n f2.write(str(constants[i]))\n f2.write(\"\\n\")\n f2.write(str(residual))\n f2.write(\" # residuals\\n\")\n f2.write(\"\\n\")", "title": "" }, { "docid": "607e9f36cbf2aa4ec3a21c0c3f89fd73", "score": "0.5444777", "text": "def write(self):\n with open(Case.path+'/mesh.dat', 'wb') as outfile:\n print(len(Mesh.x), len(Mesh.y), len(Mesh.dx), len(Mesh.dy))\n np.savetxt(outfile, np.c_[Mesh.x, Mesh.y, Mesh.dx, Mesh.dy], \n fmt='%.6f', delimiter='\\t', \n header='Mesh (%d by %d): x, y, dx, dy' \n % (Mesh.Nx, Mesh.Ny))", "title": "" }, { "docid": "201fef74f56f7b4ed860c3a13d78b922", "score": "0.54331607", "text": "def save(self, out_path, out_name):\n def write_array(path, X):\n np.savetxt(path, X, fmt='%s')\n\n if not os.path.isdir(out_path):\n os.makedirs(out_path)\n\n write_array(\n os.path.join(out_path, out_name + '.data'),\n self.get_data('X'))\n write_array(\n os.path.join(out_path, out_name + '_feat.name'), \n self.feat_name)\n\n if 'y' in self.data:\n write_array(\n os.path.join(out_path, out_name + '.solution'),\n self.get_data('y'))\n\n if 'X_train' and 'X_test' in self.data:\n write_array(\n os.path.join(out_path, out_name + '_train.data'),\n self.get_data('X_train'))\n write_array(\n os.path.join(out_path, out_name + '_test.data'),\n self.get_data('X_test'))\n if 'y_train' and 'y_test' in self.data:\n write_array(\n os.path.join(out_path, out_name + '_test.solution'),\n self.get_data('y_train'))\n write_array(\n os.path.join(out_path, out_name + '_test.solution'),\n self.get_data('y_test'))\n write_array(\n os.path.join(out_path, out_name + '_label.name'),\n self.label_name)\n\n with open(os.path.join(out_path, out_name + '_public.info'), 'w') as f:\n for key, item in self.info.items():\n f.write(str(key))\n f.write(' = ')\n f.write(str(item))\n f.write('\\n')", "title": "" }, { "docid": "1f798fa59e7bfad6ed6826748962f104", "score": "0.5430745", "text": "def to_file(self, filename):\n with open(filename, 'w') as f:\n for i in range(0, self.ndim):\n f.write(\"\\n\".join((str(self.origins[i]),\n str(self.steps[i]),\n str(self.ends[i]), \"\"))\n )", "title": "" }, { "docid": "e7fa72f39822bd10e2bc5921530f6265", "score": "0.54213166", "text": "def _write(self, session, openFile, replaceParamFile):\n # Retrieve all time series\n timeSeries = self.timeSeries\n\n # Num TimeSeries\n numTS = len(timeSeries)\n\n # Transform into list of dictionaries for pivot tool\n valList = []\n\n for tsNum, ts in enumerate(timeSeries):\n values = ts.values\n for value in values:\n valDict = {'time': value.simTime,\n 'tsNum': tsNum,\n 'value': value.value}\n valList.append(valDict)\n\n # Use pivot function (from lib) to pivot the values into\n # a format that is easy to write.\n result = pivot(valList, ('time',), ('tsNum',), 'value')\n\n # Write lines\n for line in result:\n\n valString = ''\n # Compile value string\n for n in range(0, numTS):\n val = '%.6f' % line[(n,)]\n valString = '%s%s%s' % (\n valString,\n ' ' * (13 - len(str(val))), # Fancy spacing trick\n val)\n\n openFile.write(' %.8f%s\\n' % (line['time'], valString))", "title": "" }, { "docid": "a4b56d85b7e6c0f792b9e62f3a9ebbbd", "score": "0.54209137", "text": "def write(self, out_file='claw.data', data_source='setrun.py'):\n self.open_data_file(out_file,data_source)\n\n self.data_write('num_dim')\n self.data_write('lower')\n self.data_write('upper')\n self.data_write('num_cells')\n self.data_write() # writes blank line\n self.data_write('num_eqn')\n self.data_write('num_waves')\n self.data_write('num_aux')\n self.data_write() # writes blank line\n\n self.data_write('t0')\n self.data_write()\n self.data_write('output_style')\n\n if self.output_style == 1:\n self.data_write('num_output_times')\n self.data_write('tfinal')\n self.data_write('output_t0')\n elif self.output_style == 2:\n if len(self.output_times) == 0:\n raise AttributeError(\"*** output_style==2 requires nonempty list\" \\\n + \" of output times\")\n self.num_output_times = len(self.output_times)\n self.data_write('num_output_times')\n self.data_write('output_times')\n elif self.output_style==3:\n self.data_write('output_step_interval')\n self.data_write('total_steps')\n self.data_write('output_t0')\n else:\n raise AttributeError(\"*** Unrecognized output_style: %s\"\\\n % self.output_style)\n\n self.data_write()\n if self.output_format in [1,'ascii']:\n self.output_format = 1\n elif self.output_format in [2,'binary32']:\n self.output_format = 2\n elif self.output_format in [3,'binary64','binary']:\n self.output_format = 3\n else:\n errmsg = \"*** Error in data parameter: \" + \\\n \"output_format unrecognized: %s \" % self.output_format + \\\n \"\\n *** Expecting ascii, binary32, or binary64\"\n raise ValueError(errmsg)\n \n self.data_write('output_format')\n\n if self.output_q_components == 'all':\n iout_q = self.num_eqn * [1]\n elif self.output_q_components == 'none':\n iout_q = self.num_eqn * [0]\n else:\n #iout_q = np.where(self.output_q_components, 1, 0)\n print(\"*** WARNING: Selective output_q_components not implemented\")\n print(\"*** Will output all components of q\")\n iout_q = self.num_eqn * [1]\n \n\n # Write out local value of iout_q rather than a data member\n self.data_write('', value=iout_q, alt_name='iout_q')\n\n if self.num_aux > 0:\n if isinstance(self.output_aux_components,six.string_types):\n if self.output_aux_components.lower() == 'all':\n iout_aux = self.num_aux * [1]\n elif self.output_aux_components.lower() == 'none':\n iout_aux = self.num_aux * [0]\n else:\n raise ValueError(\"Invalid aux array component option.\")\n else:\n iout_aux = np.where(self.output_aux_components, 1, 0)\n print(\"*** WARNING: Selective output_aux_components not implemented\")\n print(\"*** Will output all components of aux\")\n iout_aux = self.num_aux * [1]\n self.data_write(name='', value=iout_aux, alt_name='iout_aux')\n self.data_write('output_aux_onlyonce')\n\n self.data_write()\n self.data_write('dt_initial')\n self.data_write('dt_max')\n self.data_write('cfl_max')\n self.data_write('cfl_desired')\n self.data_write('steps_max')\n self.data_write()\n self.dt_variable = bool(self.dt_variable) # in case 0 or 1\n self.data_write('dt_variable')\n self.data_write('order')\n\n if self.num_dim == 1:\n pass\n else:\n # Transverse options different in 2D and 3D\n if self.num_dim == 2:\n if self.transverse_waves in [0,'none']: \n self.transverse_waves = 0\n elif self.transverse_waves in [1,'increment']: \n self.transverse_waves = 1\n elif self.transverse_waves in [2,'all']: \n self.transverse_waves = 2\n else:\n raise AttributeError(\"Unrecognized transverse_waves: %s\" \\\n % self.transverse_waves)\n else: # 3D\n if self.transverse_waves in [0,'none']: \n self.transverse_waves = 0\n elif self.transverse_waves in [1,'increment']: \n self.transverse_waves = 11\n elif self.transverse_waves in [2,'all']: \n self.transverse_waves = 22\n if not (self.transverse_waves in [0, 10, 11, 20, 21, 22]):\n raise AttributeError(\"Unrecognized transverse_waves: %s\" \\\n % self.transverse_waves)\n self.data_write(None, self.transverse_waves, 'transverse_waves')\n\n if self.dimensional_split in [0,'unsplit']: \n self.dimensional_split = 0\n elif self.dimensional_split in [1,'godunov']: \n self.dimensional_split = 1\n elif self.dimensional_split in [2,'strang']: \n if self.num_dim == 3:\n raise AttributeError(\"Strang dimensional splitting not supported in 3D\")\n else:\n self.dimensional_split = 2\n else:\n raise AttributeError(\"Unrecognized dimensional_split: %s\" \\\n % self.dimensional_split)\n self.data_write('dimensional_split')\n \n self.data_write('verbosity')\n\n if self.source_split in [0,'none']: \n self.source_split = 0\n elif self.source_split in [1,'godunov']: \n self.source_split = 1\n elif self.source_split in [2,'strang']: \n self.source_split = 2\n else:\n raise AttributeError(\"Unrecognized source_split: %s\" \\\n % self.source_split)\n self.data_write('source_split')\n\n self.data_write('capa_index')\n self.data_write('use_fwaves')\n self.data_write()\n\n for i in range(len(self.limiter)):\n if self.limiter[i] in [0,'none']: self.limiter[i] = 0\n elif self.limiter[i] in [1,'minmod']: self.limiter[i] = 1\n elif self.limiter[i] in [2,'superbee']: self.limiter[i] = 2\n elif self.limiter[i] in [3,'vanleer']: self.limiter[i] = 3\n elif self.limiter[i] in [4,'mc']: self.limiter[i] = 4\n else:\n raise AttributeError(\"Unrecognized limiter: %s\" \\\n % self.limiter[i])\n self.data_write('limiter')\n\n self.data_write()\n\n self.data_write('num_ghost')\n if not isinstance(self.bc_lower, list):\n self.bc_lower = [self.bc_lower] # Allow bare number in 1D\n if len(self.bc_lower) != self.num_dim:\n raise AttributeError(\"Incorrect number of lower BC codes (expected %d, got %d)\" \\\n %(self.num_dim, len(self.bc_lower)))\n for i in range(self.num_dim):\n if self.bc_lower[i] in [0,'user']: self.bc_lower[i] = 0\n elif self.bc_lower[i] in [1,'extrap']: self.bc_lower[i] = 1\n elif self.bc_lower[i] in [2,'periodic']: self.bc_lower[i] = 2\n elif self.bc_lower[i] in [3,'wall']: self.bc_lower[i] = 3\n else:\n raise AttributeError(\"Unrecognized bc_lower: %s\" \\\n % self.bc_lower[i])\n self.data_write('bc_lower')\n\n if not isinstance(self.bc_upper, list):\n self.bc_upper = [self.bc_upper] # Allow bare number in 1D\n if len(self.bc_upper) != self.num_dim:\n raise AttributeError(\"Incorrect number of upper BC codes (expected %d, got %d)\" \\\n %(self.num_dim, len(self.bc_upper)))\n for i in range(self.num_dim):\n if self.bc_upper[i] in [0,'user']: self.bc_upper[i] = 0\n elif self.bc_upper[i] in [1,'extrap']: self.bc_upper[i] = 1\n elif self.bc_upper[i] in [2,'periodic']: self.bc_upper[i] = 2\n elif self.bc_upper[i] in [3,'wall']: self.bc_upper[i] = 3\n else:\n raise AttributeError(\"Unrecognized bc_upper: %s\" \\\n % self.bc_upper[i])\n self.data_write('bc_upper')\n\n self.data_write()\n self.data_write('restart')\n self.data_write('restart_file')\n self.data_write('checkpt_style')\n\n if self.checkpt_style in [-2,2]:\n num_checkpt_times = len(self.checkpt_times)\n self.data_write(name='', value=num_checkpt_times, alt_name='num_checkpt_times')\n self.data_write('checkpt_times')\n elif self.checkpt_style in [-3,3]:\n self.data_write('checkpt_interval')\n elif self.checkpt_style not in [0,1,-1]:\n raise AttributeError(\"*** Unrecognized checkpt_style: %s\"\\\n % self.checkpt_style)\n\n self.data_write()\n self.close_data_file()", "title": "" }, { "docid": "d7d9123d1e8d8ff507fc0b06264ebd57", "score": "0.5420677", "text": "def boxplot(deltaposB, deltaposE, deltaposD, extras, out_name, vector, name):\n filenamed = out_name+\"-delta-duration-\"+name+\".csv\"\n filenames = out_name+\"-delta-position-start-\"+name+\".csv\"\n filenamee = out_name+\"-delta-position-end-\"+name+\".csv\"\n\n fpb = codecs.open(filenames, \"w\", 'utf8')\n fpe = codecs.open(filenamee, \"w\", 'utf8')\n fpd = codecs.open(filenamed, \"w\", 'utf8')\n fpb.write(\"PhoneS,DeltaS\\n\")\n fpe.write(\"PhoneE,DeltaE\\n\")\n fpd.write(\"PhoneD,DeltaD\\n\")\n for i, extra in enumerate(extras):\n etiquette = extra[0]\n tag = extra[2]\n if etiquette in vector:\n if tag != 0:\n fpb.write(\"%s,%f\\n\" % (etiquette, deltaposB[i]))\n if tag != -1:\n fpe.write(\"%s,%f\\n\" % (etiquette, deltaposE[i]))\n fpd.write(\"%s,%f\\n\" % (etiquette, delta_durationur[i]))\n fpb.close()\n fpe.close()\n fpd.close()\n\n message = exec_Rscript(filenamed, filenames, filenamee, \n out_name+\".R\", out_name+\"-delta-\"+name+\".pdf\")\n\n os.remove(filenamed)\n os.remove(filenames)\n os.remove(filenamee)\n return message", "title": "" }, { "docid": "9fdb0395fc7c75e554ba038725744c68", "score": "0.5416098", "text": "def write_to_file(systems_linear_equations: List[List[float]], output_file: TextIO, step_size: int) -> None:\n\tcount = 0\n\tfor value in systems_linear_equations:\n\t\tx_value = \"{:.4f}\".format(round(value[2], 4))\n\t\tc_value = \"{:.4f}\".format(round(value[1], 4))\n\t\toutput_file.write(f'{value[0]} <= x < {value[0] + step_size}; \\t y_{count} = {c_value} + {x_value}x\\t\\tinterpolation \\n')\n\t\tcount += 1", "title": "" }, { "docid": "72d4102da7adc0033addaa3ada5c2396", "score": "0.5415663", "text": "def output_solutions_to_file(self, solutions, filename):\n if not os.path.exists(os.path.abspath(self.results_folder)):\n os.mkdir(os.path.abspath(self.results_folder))\n filename = os.path.join(os.path.abspath(self.results_folder),filename)\n with open(filename,'w') as f:\n f.write(\"Gen\\tFit\\tWins\\tLosses\\tTime\\n\")\n for s in solutions:\n f.write(\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\n {5}\\n\".format(s.generation, round(s.fitness), s.wins, s.losses, \\\n round(s.average_time,2), s.get_config_file()))", "title": "" }, { "docid": "a0f9aa39ce24b79f84eb03608acae685", "score": "0.5414569", "text": "def export_wd_results(self):\n if self.ui.wd_comboSelect.currentIndex() == 5: #DEMAND CALIBRATION\n f = open(self.project_path+\"/Output-DemandCalibration.csv\", 'w')\n f.write(\"Base Model, UrbanBEATS Modelled \\n\")\n plotdata = self.current_active_plotdata[\"inp vs. mod.\"]\n print plotdata\n for i in range(len(plotdata)):\n f.write(str(plotdata[i][0])+\",\"+str(plotdata[i][1])+\"\\n\")\n\n f.close()\n QtGui.QMessageBox.warning(self, \"Export Complete\", \"Results for current graph successfully \\n exported to project path!\", QtGui.QMessageBox.Ok)\n return True\n\n\n graphnames = [\"none\", \"AvgEnduses\", \"24hUse\", \"EPUse\", \"Recycled\"] #Based on plotting categories\n curgraph = graphnames[self.ui.wd_comboSelect.currentIndex()]\n\n if self.ui.wd_unitslps.isChecked():\n units = \"LPS\"\n cf = 1.0\n else:\n units = \"kL\"\n cf = 1.0/1000.0 #cf = conversion factor\n\n if self.current_active_plotdata == None:\n QtGui.QMessageBox.warning(self, \"No Export\", \"Nothing to export! Plot some results.\", QtGui.QMessageBox.Ok)\n return True\n else:\n curscope = self.ui.wd_listwidget.currentItem().text()\n #ParseFilename and create .csv file\n f = open(self.project_path+\"/Output-\"+curgraph+\"-[\"+units+\"]-\"+curscope+\".csv\", 'w')\n\n if curgraph == \"AvgEnduses\":\n f.write(\"Coming Soon!\")\n\n elif curgraph == \"24hUse\":\n f.write(\"Time, 0:00, 1:00, 2:00, 3:00, 4:00, 5:00, 6:00, 7:00, 8:00, 9:00, 10:00, 11:00, 12:00,\"\n \"13:00, 14:00, 15:00, 16:00, 17:00, 18:00, 19:00, 20:00, 21:00, 22:00, 23:00\\n\")\n for enduse in self.current_active_plotdata.keys():\n datastring = enduse+\",\"\n for v in range(len(self.current_active_plotdata[enduse])):\n datastring += str(float(self.current_active_plotdata[enduse][v])*cf)+\",\"\n datastring.rstrip(',')\n datastring += \"\\n\"\n f.write(datastring)\n\n elif curgraph == \"EPUse\":\n f.write(\"Coming Soon!\")\n\n elif curgraph == \"Recycled\":\n f.write(\"Coming Soon!\")\n\n f.close()\n QtGui.QMessageBox.warning(self, \"Export Complete\", \"Results for current graph successfully \\n exported to project path!\", QtGui.QMessageBox.Ok)\n return True", "title": "" }, { "docid": "ecbd831587de2c0e50782625549caae0", "score": "0.5400938", "text": "def write_text(self, path=None):\n ext = '.tsv'\n add_name = '_' + self.univariate.region.name\n condition_path = self._get_path(user_root=path, write=True)\n ffmt = '%.8e' # floating point numbers\n ifmt = '%d' # integers\n bname = 'onepoint' + add_name + ext\n fname = os.path.join(condition_path, bname)\n names = self.onepoint.dtype.names\n header = '\\t'.join(names)\n fmt = [ifmt if 'count' in n_ else ffmt for n_ in names]\n np.savetxt(fname, self.onepoint, fmt=fmt,\n delimiter='\\t', comments='', header=header)\n\n for key in ['count_two', 'autocorr']:\n array = self[key]\n bname = key + add_name + ext\n fname = os.path.join(condition_path, bname)\n if 'count' in key:\n fmt = ifmt\n else:\n fmt = ffmt\n np.savetxt(fname, array, fmt=fmt, delimiter='\\t')\n return", "title": "" }, { "docid": "5a86fc011355701468bbfbaaeb64d55f", "score": "0.53988516", "text": "def save_multiple_results(resultfile, obj_func_file, results):\n #Calculate the forward model using the inversion result\n \n for i in range(len(results)):\n #Save the best result\n# np.savetxt(resultfile, results[i].x, 'ab')\n# np.savetxt(obj_func_file, results[i].fun, 'ab')\n resultfile= open('result.csv', 'a')\n np.savetxt(resultfile, np.array([results[i].x]))\n obj_func_file= open('objfunc.csv', 'a')\n np.savetxt(obj_func_file, np.array([results[i].fun]))\n resultfile.close()\n obj_func_file.close()", "title": "" }, { "docid": "32fc08d1fc6bd338895fb9492c2f48ba", "score": "0.5393783", "text": "def _write4d(self, *args, **kwargs):\n try:\n units = kwargs['units']\n except:\n units = ['adimensional','adimensional',\n 'adimensional','adimensional']\n \n x_labels = [args[i] for i in range(len(args)-1)]\n self.y = args[-1]\n fname = self.id+'_'+args[0]+args[1]+'.dat'\n self._backup_file(fname)\n\n self.info = '' + self.fname + ' ' + ' matteo.vallar@igi.cnr.it ' + \\\n time.strftime(\"%d/%m/%Y\")\n self.info2 = 'For each dimension: name units number of bins min max'\n self.header = '' \n for i in range(len(args)-1):\n self.header += args[i]+' '\n self.header += units[i]+' ' \n self.header += ' '+str(len(self.dict_dim[x_labels[i]]))\n self.header += ' '+str(min(self.dict_dim[x_labels[i]]))\n self.header += ' '+str(max(self.dict_dim[x_labels[i]]))\n self.header += ' '\n \n self.header += '\\n'\n self.header += \"# Normalisation : {0:.5e}\".format(round(self.norm, 2))\n \n f_handle = open(fname,'w')\n f_handle.write('# '+self.info+'\\n')\n f_handle.write('# '+self.info2+'\\n') \n f_handle.write('# '+self.header+'\\n')\n #for lab in x_labels:\n # np.savetxt(f_handle, self.dict_dim[lab])\n for sl in self.y[0,:,:,:,:]:\n for ssll in sl:\n np.savetxt(f_handle, ssll, fmt='%.5e')\n f_handle.write('\\n')\n f_handle.write('\\n')", "title": "" }, { "docid": "3b9c9ddd8fd0f734a9224764ce135955", "score": "0.53901887", "text": "def solidsPy_DYN(write_VTK = True , folder = None):\n if folder is None:\n folder, name = pre.initial_params()\n start_time = datetime.now()\n\n # Pre-processing\n inipar , nodes, mats, elements, loads = pre.readin(folder=folder)\n ninc , T , Tc , fc , dt , ac , theta = pre.intparams(inipar)\n DME , IBC , neq = ass.DME(nodes, elements)\n print(\"Number of nodes: {}\".format(nodes.shape[0]))\n print(\"Number of elements: {}\".format(elements.shape[0]))\n print(\"Number of equations: {}\".format(neq))\n\n # System assembly\n KG , MG , CG = ass.assembler(elements, mats, nodes, neq, DME)\n RHSG = ass.loadasem(loads , IBC , neq , ninc , T , Tc , fc)\n KE = ass.effective(KG , MG , CG , ac )\n #%% INITIAL CONDITIONS \n U , V , A = sol.initial_conds(ninc , neq, RHSG , MG , KG , CG )\n print(\"Finished initial conditions....: {}\".format(0))\n del KG\n start_time = datetime.now()\n\n # System solution\n U = sol.time_implicit(neq , ninc , dt , theta , ac , U , V , A , RHSG , MG , CG , KE)\n end_time = datetime.now()\n print('Duration for system solution: {}'.format(end_time - start_time))\n\n # Post-processing\n start_time = datetime.now()\n if write_VTK:\n for i in range(0, ninc, 2):\n UC = pos.complete_disp(IBC, nodes, U[:, i])\n u_vec = np.zeros((nodes.shape[0], 3))\n u_vec[:, 0:2] = UC\n field = {\"displacements\": u_vec}\n pos.vtk_maker_chimba3(nodes, elements ,\n \"scatteter_{}\".format(i),\n field=field)\n \n end_time = datetime.now()\n print('Duration for post processing: {}'.format(end_time - start_time))\n print('Analysis terminated successfully!')\n return (U , folder , IBC , ninc , T)", "title": "" }, { "docid": "7eaff01e211687a4ac86c391ed0c4c2e", "score": "0.5382932", "text": "def save_function(self,button) :\n\n file_chooser = gtk.FileChooserDialog(\"Open...\", self, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))\n response = file_chooser.run()\n path=''\n if response == gtk.RESPONSE_OK :\n path = file_chooser.get_filename()\n self.log('Curve saved in file: ' + path)\n self.log(\"---------------------------------------------------------------------------\")\n if \".csv\" not in path:\n path = path + '.csv'\n file_chooser.destroy()\n\n ofile = open(path,\"wb\")\n writer = csv.writer(ofile, delimiter=',')\n writer.writerow(self.function.params)\n writer.writerow(self.function.std_err)\n writer.writerow(self.function.p_value)\n writer.writerow(self.function.cov_mtx[0])\n writer.writerow(self.function.cov_mtx[1])\n writer.writerow(self.function.cov_mtx[2])\n writer.writerow((self.function.rss, self.function.rmse, 0.0))\n ofile.close()\n else :\n file_chooser.destroy()", "title": "" }, { "docid": "5da081b5e34e5d636f7fbf230fbba907", "score": "0.5380933", "text": "def write(self, filename, contents=\"xyzrg\", output_mesh=True):\n\n if contents == \"xyzrg\":\n np.savetxt(filename, self.xyzrg, delimiter=' ')\n logger.debug(\"{0} written to xyzrg file: {1}\".format(self.name, filename))\n elif contents == \"xyzr\":\n np.savetxt(filename, self.xyzr, delimiter=' ')\n logger.debug(\"{0} written to xyzr file: {1}\".format(self.name, filename))\n elif contents == \"xyz\":\n np.savetxt(filename, self.xyz, delimiter=' ')\n logger.debug(\"{0} written to xyz file: {1}\".format(self.name, filename))\n\n if output_mesh:\n if self.mesh is None:\n logger.error(\"Cannot write out an uninitialized mesh\")\n raise ValueError(\"Mesh can not be written to file corresponding to {0}\".format(filename))\n else:\n output_mesh = \"{0}.obj\".format(os.path.splitext(filename)[0])\n self.mesh.export(file_obj = output_mesh)\n logger.debug(\"{0} written to obj file: {1}.obj\".format(self.name, os.path.splitext(filename)[0]))", "title": "" }, { "docid": "2a8ec90f35ed73f4e8fde148c1b5418c", "score": "0.5370901", "text": "def _write_file(self, result: str, ofile: t_filename) -> None:\n if not self._simulate:\n if self._verbose:\n logger.info(f'output: {ofile}')\n result.oscal_write(pathlib.Path(ofile))", "title": "" }, { "docid": "8e784030e2ff09e39db34a7f055a4732", "score": "0.53648376", "text": "def write_regression_data():\n def bound_to_float(s):\n value = float(s)\n if value < 0.0:\n value = 0.0\n elif value >= 240.0:\n value = 240.0\n else:\n pass\n return value\n\n train_dir = sorted(glob.glob('/home/wuyang/workspace/python/poke/train_cube/run*'))\n train_dir_total = []\n\n min_yaw = 0\n max_yaw = 0\n\n for i, i_dir in enumerate(train_dir):\n rgb_dir = sorted(glob.glob(i_dir+'/*.jpg'))\n rgb_num = [int(item[-8:-4]) for item in rgb_dir]\n\n train_dir_aug = []\n\n with open(i_dir+'/actions.dat') as f:\n actions = [line.strip().split() for line in f.readlines()]\n\n rgb_0 = rgb_dir[0]\n ind = rgb_0.index('ke')+3\n for j, item in enumerate(rgb_num):\n action = actions[item]\n dx = bound_to_float(action[2])/240.0\n dy = bound_to_float(action[3])/240.0\n\n qx = float(action[8])\n qy = float(action[9])\n qz = float(action[10])\n qw = float(action[11])\n\n euler = tff.transformations.euler_from_quaternion((qx, qy, qz, qw))\n\n roll = euler[0]\n pitch = euler[1]\n yaw = euler[2]\n\n if yaw < 0:\n yaw = yaw+np.pi\n if yaw > np.pi/2:\n yaw = yaw - np.pi/2\n yaw_normalized = yaw/(np.pi/2)\n #yaw_normalized = (yaw+np.pi)/(2*np.pi)\n # yaw range [-3.141590, 3.141587]\n #if yaw < min_yaw:\n # min_yaw = yaw\n #if yaw > max_yaw:\n # max_yaw= yaw\n\n #'%.4e'%roll, '%.4e'%pitch, \n train_dir_aug.append(\n ' '.join([rgb_dir[j][ind:], '%.4e'%dx, '%.4e'%dy, '%.4e'%yaw_normalized]))\n\n print('image until %s included'%rgb_dir[j][ind:])\n train_dir_total.extend(train_dir_aug)\n\n with open('/home/wuyang/workspace/python/poke/compare.txt', 'wb') as f:\n for item in train_dir_total:\n f.write('%s\\n'%item)", "title": "" }, { "docid": "867bb54a99ef08eb5495dc25882a2368", "score": "0.5361263", "text": "def save(self):\n if self.output_folder_path:\n\n # Creating directories if they don't exist\n makedirs(dirname(join(self.output_folder_path, \"file\")), exist_ok=True)\n\n # ### Steps data ###\n csv_array = []\n for k, v in self.step_traces[\"scores\"].items():\n csv_array.append([k] + v)\n for k, v in self.step_traces[\"additional_values\"].items():\n csv_array.append([k] + v)\n csv_array.append([\"n_replaced\"] + self.step_traces[\"n_replaced\"])\n csv_array.append([\"timestamps\"] + self.step_traces[\"timestamps\"])\n\n with open(join(self.output_folder_path, 'steps.csv'), \"w\", newline='') as f:\n writer = csv.writer(f)\n for row in np.array(csv_array).T:\n writer.writerow(row)\n\n # ### All inserted individuals data ###\n if self.record_all_generated_individuals:\n csv_array = [[\"step\"] + self.all_generated_individuals_step,\n [\"SMILES\"] + self.all_generated_individuals_smiles,\n [\"obj_calls\"] + self.all_generated_individuals_n_obj_calls,\n [\"obj_value\"] + self.all_generated_individuals_obj_value,\n [\"improver\"] + self.all_generated_individuals_improver]\n\n with open(join(self.output_folder_path, \"all_generated.csv\"), \"w\") as f:\n writer = csv.writer(f)\n for row in np.array(csv_array).T:\n writer.writerow(row)\n\n # ### Last step population data ###\n csv_array = []\n\n # Mutation success history\n n_success_mut_str = []\n n_fail_mut_str = []\n for i, ind in enumerate(self.pop):\n n_success_mut_str.append(str(self.n_success_mut[i]))\n n_fail_mut_str.append(str(self.n_fail_mut[i]))\n\n csv_array.append([\"smiles\"] + self.pop_tabu_list)\n\n # Mutation success and failures\n csv_array.append([\"n_success_mut\"] + n_success_mut_str)\n csv_array.append([\"n_failures_mut\"] + n_fail_mut_str)\n\n # Scores data\n self.curr_total_scores, self.curr_scores = self.evaluation_strategy.get_population_scores()\n step_scores_dict = scores_to_scores_dict(self.curr_total_scores,\n self.curr_scores,\n self.evaluation_strategy.keys())\n\n for k, scores_list in step_scores_dict.items():\n scores_list_np = np.full((self.pop_max_size,), None)\n scores_list_np[:len(scores_list)] = scores_list\n csv_array.append([k] + list(scores_list_np))\n\n # Action history data\n csv_array.append([\"history_data\"] + self.actions_history)\n\n with open(join(self.output_folder_path, 'pop.csv'), \"w\", newline='') as f:\n writer = csv.writer(f)\n for row in np.array(csv_array).T:\n writer.writerow(row)\n\n # ### Removed individuals actions recording ###\n if self.record_history:\n with open(join(self.output_folder_path, 'removed_ind_act_history.csv'), \"w\", newline='') as f:\n\n writer = csv.writer(f)\n writer.writerow([\"history_data\", \"total\"] + self.evaluation_strategy.keys() + [\"smiles\"])\n\n for removed_act_history in self.removed_actions_score_smi_tuple.keys():\n if removed_act_history != \"\":\n total_score = self.removed_actions_score_smi_tuple[removed_act_history][0]\n scores = self.removed_actions_score_smi_tuple[removed_act_history][1]\n smi = self.removed_actions_score_smi_tuple[removed_act_history][2]\n\n writer.writerow([removed_act_history, total_score] + list(scores) + [smi])\n\n # ### Errors data ###\n with open(join(self.output_folder_path, 'errors.csv'), \"w\", newline='') as f:\n writer = csv.writer(f)\n writer.writerow([\"step\", \"error\"])\n for error in self.errors:\n writer.writerow(error)", "title": "" }, { "docid": "541f031ad18e63cf0ab4cb495c876d83", "score": "0.53601897", "text": "def write_exyz(self):\n label = self.label\n\n idx = int( re.findall('(\\d+)', label)[0] )\n atoms = self.atoms\n ps = atoms.positions\n\n fo = label + '_U.XYZ'\n with open(fo,'w') as foh:\n foh.write('%d\\n'%na)\n # set all properties except H to 0.0\n foh.write('gdb %d 0. 0. 0. 0. 0. %.6f %.6f %.6f 0. 0. 0. 0. %.6f 0. 0.\\n'%(idx, homo, lumo, gap, E))\n for k in range(na):\n px, py, pz = ps[k]\n if k < na - 1:\n foh.write('%s %s %s %s 0.0\\n'%(symbs[k], px, py, pz))\n else:\n foh.write('%s %s %s %s 0.0'%(symbs[k], px, py, pz))", "title": "" }, { "docid": "1abccc6cf2075def67f1be343339ed5f", "score": "0.5354155", "text": "def writeNC(self, outfile, tt, x, y, Uwind, Vwind, Tair, Cloud, RH, Pair, Rain): \n Nstation = x.shape[0] \n Nt = len(tt)\n \n nc = Dataset(outfile, 'w', format='NETCDF4_CLASSIC')\n nc.Description = 'SUNTANS History file'\n nc.Author = ''\n nc.Created = datetime.now().isoformat()\n ####Create dimensions####\n nc.createDimension('NVwind', Nstation)\n nc.createDimension('NTair', Nstation)\n nc.createDimension('Nrain', Nstation)\n nc.createDimension('NUwind', Nstation)\n nc.createDimension('NPair', Nstation)\n nc.createDimension('NRH', Nstation)\n nc.createDimension('Ncloud', Nstation)\n nc.createDimension('nt', Nt)\n nc.close()\n \n def create_nc_var(outfile, name, dimensions, attdict, dtype='f8',zlib=False,complevel=0,fill_value=None):\n \n nc = Dataset(outfile, 'a')\n tmp=nc.createVariable(name, dtype, dimensions,zlib=zlib,complevel=complevel,fill_value=fill_value)\n for aa in attdict.keys():\n tmp.setncattr(aa,attdict[aa])\n #nc.variables[name][:] = var\t\n nc.close()\n \n ####adding variables####\n create_nc_var(outfile,'x_Vwind',('NVwind'),{'long_name':'Longitude at Vwind','units':'degrees_north'})\n create_nc_var(outfile,'y_Vwind',('NVwind'),{'long_name':'Latitude at Vwind','units':'degrees_east'})\n create_nc_var(outfile,'z_Vwind',('NVwind'),{'long_name':'Elevation at Vwind','units':'m'})\n \n create_nc_var(outfile,'x_Tair',('NTair'),{'long_name':'Longitude at Tair','units':'degrees_north'})\n create_nc_var(outfile,'y_Tair',('NTair'),{'long_name':'Latitude at Tair','units':'degrees_east'})\n create_nc_var(outfile,'z_Tair',('NTair'),{'long_name':'Elevation at Tair','units':'m'})\n \n create_nc_var(outfile,'x_rain',('Nrain'),{'long_name':'Longitude at rain','units':'degrees_north'})\n create_nc_var(outfile,'y_rain',('Nrain'),{'long_name':'Latitude at rain','units':'degrees_east'})\n create_nc_var(outfile,'z_rain',('Nrain'),{'long_name':'Elevation at rain','units':'m'})\n \n create_nc_var(outfile,'x_Uwind',('NUwind'),{'long_name':'Longitude at Uwind','units':'degrees_north'})\n create_nc_var(outfile,'y_Uwind',('NUwind'),{'long_name':'Latitude at Uwind','units':'degrees_east'})\n create_nc_var(outfile,'z_Uwind',('NUwind'),{'long_name':'Elevation at Uwind','units':'m'})\n \n create_nc_var(outfile,'x_Pair',('NPair'),{'long_name':'Longitude at Pair','units':'degrees_north'})\n create_nc_var(outfile,'y_Pair',('NPair'),{'long_name':'Latitude at Pair','units':'degrees_east'})\n create_nc_var(outfile,'z_Pair',('NPair'),{'long_name':'Elevation at Pair','units':'m'})\n \n create_nc_var(outfile,'x_RH',('NRH'),{'long_name':'Longitude at RH','units':'degrees_north'})\n create_nc_var(outfile,'y_RH',('NRH'),{'long_name':'Latitude at RH','units':'degrees_east'})\n create_nc_var(outfile,'z_RH',('NRH'),{'long_name':'Elevation at RH','units':'m'})\n \n create_nc_var(outfile,'x_cloud',('Ncloud'),{'long_name':'Longitude at cloud','units':'degrees_north'})\n create_nc_var(outfile,'y_cloud',('Ncloud'),{'long_name':'Latitude at cloud','units':'degrees_east'})\n create_nc_var(outfile,'z_cloud',('Ncloud'),{'long_name':'Elevation at cloud','units':'m'})\n \n create_nc_var(outfile,'Time',('nt'),{'units':'seconds since 1990-01-01 00:00:00','long_name':'time'})\n create_nc_var(outfile,'Vwind',('nt','NVwind'),{'units':'m s-1','long_name':'Northward wind velocity component','coordinates':'x_Vwind,y_Vwind'})\n create_nc_var(outfile,'Tair',('nt','NTair'),{'units':'Celsius','long_name':'Air Temperature','coordinates':'x_Tair,y_Tair'})\n create_nc_var(outfile,'rain',('nt','Nrain'),{'units':'kg m2 s-1','long_name':'rain fall rate','coordinates':'x_rain,y_rain'})\n create_nc_var(outfile,'Uwind',('nt','NUwind'),{'long_name':'Eastward wind velocity component','coordinates':'x_Uwind,y_Uwind','units':'m s-1'})\n create_nc_var(outfile,'Pair',('nt','NPair'),{'units':'hPa','long_name':'Air Pressure','coordinates':'x_Pair,y_Pair'})\n create_nc_var(outfile,'RH',('nt','NRH'),{'units':'percent','long_name':'Relative Humidity','coordinates':'x_RH,y_RH'})\n create_nc_var(outfile,'cloud',('nt','Ncloud'),{'units':'dimensionless','long_name':'Cloud cover fraction','coordinates':'x_cloud,y_cloud'})\n \n \n z = np.ones([Nstation])*2\n ## change time units\n time_new = SecondsSince(tt)\n# ##Tair, rain, Pair, RH, cloud are set to be constant due to a lack of information\n# Tair = np.ones([Nt, Nstation])*30.0\n# rain = np.ones([Nt, Nstation])*0.0\n# Pair = np.ones([Nt, Nstation])*1010.0\n# RH = np.ones([Nt, Nstation])*50.0\n# cloud = np.ones([Nt, Nstation])*0.0\n ######Now writting the variables######\n nc = Dataset(outfile,'a')\n nc.variables['x_Vwind'][:] = x\n nc.variables['y_Vwind'][:] = y\n nc.variables['z_Vwind'][:] = z\n \t\n nc.variables['x_Tair'][:] = x\n nc.variables['y_Tair'][:] = y\n nc.variables['z_Tair'][:] = z\n \n nc.variables['x_rain'][:] = x\n nc.variables['y_rain'][:] = y\n nc.variables['z_rain'][:] = z\t\n \t\n nc.variables['x_Uwind'][:] = x\n nc.variables['y_Uwind'][:] = y\n nc.variables['z_Uwind'][:] = z\n \n nc.variables['x_Pair'][:] = x\n nc.variables['y_Pair'][:] = y\n nc.variables['z_Pair'][:] = z\n \n nc.variables['x_RH'][:] = x\n nc.variables['y_RH'][:] = y\n nc.variables['z_RH'][:] = z\n \n nc.variables['x_cloud'][:] = x\n nc.variables['y_cloud'][:] = y\n nc.variables['z_cloud'][:] = z\n \n nc.variables['Time'][:] = time_new\n nc.variables['Vwind'][:] = Vwind\n nc.variables['Tair'][:] = Tair\n nc.variables['rain'][:] = Rain\n nc.variables['Uwind'][:] = Uwind\n nc.variables['Pair'][:] = Pair\n nc.variables['RH'][:] = RH\n nc.variables['cloud'][:] = Cloud\n \n print \"Ending writing variables into netcdf file !!!\"\n nc.close()", "title": "" }, { "docid": "94c2cdbe00a1586229821797e6d6b831", "score": "0.53538364", "text": "def save_thdata(th_results, ispin, ik, param):\n q_out = param.get('output quantities')\n fmt_out = param.get('output formats')\n path = param.get('output path')\n\n spink = \".spin-{:1d}.k-{:02d}\".format(ispin,ik)\n basename = path + \"/{}\" + spink\n\n # write numpy npy files\n if fmt_out.get('NPY'):\n for dname in ['rho_0_fd', 'drho_P', 'drho_I', 'rho_0_orig']:\n filename = basename.format(dname) + \".npy\"\n np.save(filename, th_results.get(dname))\n \n\n\n # write XCrysden xsf files\n if fmt_out.get('XSF'):\n pass\n\n # write NetCDF files\n if fmt_out.get('NetCDF'):\n pass", "title": "" }, { "docid": "5c62ccc4b75d9629007aa0a2afedb9e7", "score": "0.535006", "text": "def save_vtk(self, filename: str) -> \"Dispersion\":\n self._surface.save(filename)\n\n return self", "title": "" }, { "docid": "0c43681f32ea515e22903c7282f4bf6c", "score": "0.53477633", "text": "def write_0d_solver_file(mesh, params, model):\n # print(\"Write solver file.\")\n mesh.logger.info(\"Write 0d solver file.\")\n output_dir = params.output_directory\n file_name = path.join(output_dir, params.solver_output_file)\n model_name = params.model_name\n sp = mesh.space\n # print(\"Solver file %s\" % file_name)\n\n # Open file\n ofile = mesh.Open(file_name, \"w\")\n\n # write header\n header = Headers().model(model_name)\n write_solver_section_header(mesh, ofile, header)\n\n # Write node section.\n write_solver_nodes(mesh, ofile, params)\n\n # Write joint section.\n write_solver_joints(mesh, ofile, params)\n\n # Write junctions section.\n write_solver_junctions(mesh, ofile)\n\n # Write element section.\n write_solver_elements(mesh, model, ofile, params)\n\n # Write inlet boundary condition section.\n write_inlet_bc(mesh, ofile, params)\n\n # Write solver options section.\n write_solver_options(mesh, ofile, params)\n\n ofile.close()", "title": "" }, { "docid": "dd0f9ca69ed290f0a504aca121d5ad34", "score": "0.53451073", "text": "def output_file(filepath,datafile):\n filename = \"%s\"%(filepath.replace(\"\\n\", \"\")) + \"/\" + datafile\n try:\n f = h5py.File(filename, 'r')\n para_a,para_b,dset = f.keys()\n d_set = f[dset]\n data = np.array(d_set[:,:])\n f.close()\n except:\n print(\"Open file\", datafile, \"failed!\")\n\n if ifTxt == \"on\":\n outname = \"%s\"%(filename.replace(\"/FLD\",\"/txtdata/\"))\n outname = outname[:outname.index(\"//\")] + \"/\" + datafile\n outname = \"%s\"%(outname.replace(\".h5\",\".txt\"))\n try:\n np.savetxt(outname, data)\n except:\n print(\"Write file\", outname, \"failed!\")\n\n if ifImage == \"on\":\n try:\n outname = \"%s\"%(filename.replace(\"/FLD\",\"/imgdata/\"))\n outname = outname[:outname.index(\"//\")] + \"/\" + datafile\n outname = \"%s\"%(outname.replace(\".h5\",\".png\"))\n fig = plt.figure()\n ax = fig.gca()\n im = ax.imshow(data,cmap=\"bwr\",vmax=10,vmin=-10)\n plt.title(datafile[:datafile.index(\".\")])\n cb = plt.colorbar(im,orientation='horizontal')\n tick_locator = ticker.MaxNLocator(nbins=9)\n cb.locator = tick_locator\n cb.set_ticks([-10,-7.5,-5,-2.5,0,2.5,5,7.5,10])\n cb.update_ticks()\n plt.savefig(outname)\n plt.close()\n except:\n print(\"Write file\", outname, \"failed!\")", "title": "" }, { "docid": "f5d0e53d4269e488199c3811db0617e5", "score": "0.53444934", "text": "def save_beta_ode_result(self, fit_file, params_file):\n assert self.ode_model is not None, 'Must fit_beta_ode first.'\n # save ode fit\n self.get_beta_ode_fit().to_csv(fit_file, index=False)\n # save other parameters\n self.get_beta_ode_params().to_csv(params_file, index=False)", "title": "" }, { "docid": "ceeafb8ffeedf8e9ceb28cd57a131f4f", "score": "0.5343615", "text": "def write_processed(filename, date, eto):\r\n f1 = open(filename, \"w\")\r\n f1.write(\"Year,Month,Day,Hour,ETo (in)\\n\")\r\n for d in range(len(date)):\r\n f1.write(\"%d,%d,%d,%d,%f\\n\" % (date[d].year, date[d].month, date[d].day, date[d].hour, eto[d]))\r\n f1.close()", "title": "" }, { "docid": "109b0af2f2710ffde662f8598b422dda", "score": "0.53415", "text": "def write_mesh_to_vtk(self, filename):\n pts, conn = self.vert, self.elm[:]\n Points, Cells = vtk.vtkPoints(), vtk.vtkCellArray()\n\n # add node / connectivity information to VTK object\n for pt in pts:\n Points.InsertNextPoint(pt)\n\n grid = vtk.vtkUnstructuredGrid()\n grid.SetPoints(Points)\n\n for idx, cn in enumerate(conn):\n if cn[-1] == -1:\n cell = vtk.vtkWedge()\n cnRef = cn[0:6]\n for idx,pt in enumerate(cnRef):\n cell.GetPointIds().SetId(idx,pt)\n\n else:\n cell = vtk.vtkHexahedron()\n for idx, pt in enumerate(cn):\n cell.GetPointIds().SetId(idx, pt)\n grid.InsertNextCell(cell.GetCellType(), cell.GetPointIds())\n\n\n writer = vtk.vtkXMLUnstructuredGridWriter();\n writer.SetInputData(grid)\n writer.SetFileName(filename);\n writer.Write()", "title": "" }, { "docid": "38662b9bd7ffe9d9eea4c0ed93f0a57a", "score": "0.534044", "text": "def write_out(self):\n now = datetime.now()\n out_file = '/grp/hst/wfc3t/sasp/code/bey_viewer_{}_{}_{:02d}:{:02d}.log'. \\\n format(now.month, now.day, now.hour, now.minute)\n\n with open(out_file, 'w') as f:\n for coord in self.coords_list:\n f.write(coord)", "title": "" }, { "docid": "eb7029623b567cb8e46d55b3c4a30e98", "score": "0.5339711", "text": "def cellDataToLegacyVTK(output_handle, field_handle, frame_handle, invariant=''):\n field_name = field_handle.name.replace(\" \", \"_\")\n field_type = str(field_handle.type)[0:6] # SCALAR | VECTOR | TENSOR\n\n if field_type == \"TENSOR\" and invariant == \"\":\n output_handle.write(\"\\nTENSORS {} double\".format(field_name))\n print(\"Appending cell tensor field {}:\".format(str(len(field_handle.values))))\n if len(field_handle.componentLabels) == 6: # 3D tensor\n for tensor in field_handle.values: \n output_handle.write('\\n' + str(tensor.data[0]) + ' ') # Sxx\n output_handle.write(str(tensor.data[3]) + ' ') # Sxy\n output_handle.write(str(tensor.data[4])) # Sxz\n output_handle.write('\\n' + str(tensor.data[3]) + ' ') # Syx\n output_handle.write(str(tensor.data[1]) + ' ') # Syy\n output_handle.write(str(tensor.data[5])) # Syz\n output_handle.write('\\n' + str(tensor.data[4]) + ' ') # Szx\n output_handle.write(str(tensor.data[5]) + ' ') # Szy\n output_handle.write(str(tensor.data[2]) + '\\n') # Szz\n elif len(field_handle.componentLabels) == 4: # 2D tensor\n for tensor in field_handle.values:\n output_handle.write('\\n' + str(tensor.data[0]) + ' ') # Sxx\n output_handle.write(str(tensor.data[3]) + ' ') # Sxy\n output_handle.write('0.0') # Sxz\n output_handle.write('\\n' + str(tensor.data[3]) + ' ') # Syx\n output_handle.write(str(tensor.data[1]) + ' ') # Syy\n output_handle.write('0.0') # Syz\n output_handle.write('\\n' + '0.0 ') # Szx\n output_handle.write('0.0 ') # Szy\n output_handle.write(str(tensor.data[2]) + '\\n') # Szz\n elif field_type == \"TENSOR\" and invariant == \"MISES\":\n output_handle.write(\"\\nSCALARS MISES double\")\n output_handle.write('\\nLOOKUP_TABLE default\\n')\n print('Appending Von Mises field values:')\n for value in field_handle.values: \n output_handle.write(str(value.mises) + '\\n')\n elif field_type == \"VECTOR\": # Are there really any interesting cell vectors?\n output_handle.write(\"\\nVectors {} float\".format(field_name))\n for vector in field_handle.values:\n if len(vector.data) == 3:\n output_handle.write('\\n'+ str(vector.data[0]) + ' ' + str(vector.data[1]) + ' ' + str(vector.data[2]))\n elif len(vector.data) == 2:\n output_handle.write('\\n'+ str(vector.data[0]) + ' ' + str(vector.data[1]) + ' 0.0')\n elif field_type == \"SCALAR\":\n output_handle.write(\"\\nSCALARS {} double\".format(field_name))\n output_handle.write('\\nLOOKUP_TABLE default\\n')\n\n print(\"Appending cell scalar field {}:\".format(field_name))\n for value in field_handle.values: \n output_handle.write(str(value.mises) + '\\n')\n\n return", "title": "" }, { "docid": "270a3958df2704a7b8a94364b2158b4f", "score": "0.5336978", "text": "def writeGNOME(self, outfile):\n \n ## Step One: intepolate wind to a regular grid\n Num = 20\n lon = np.linspace(self.bbox[0], self.bbox[1], Num)\n lat = np.linspace(self.bbox[2], self.bbox[3], Num) \n \n lon_new, lat_new = np.meshgrid(lon, lat)\n \n x = np.zeros_like(lat_new)\n y = np.zeros_like(lon_new)\n for i in range(Num):\n for j in range(Num):\n (x[i,j], y[i,j]) = utm.from_latlon(lat_new[i,j], lon_new[i,j])[0:2]\n \n xncep = np.zeros_like(self.lat)\n yncep = np.zeros_like(self.lon)\n for i in range(len(self.lat)):\n (xncep[i], yncep[i]) = utm.from_latlon(self.lat[i], self.lon[i])[0:2]\n \n xy = np.vstack((x.ravel(),y.ravel())).T\n xy_ncep = np.vstack((xncep.ravel(),yncep.ravel())).T\n \n F = interpXYZ(xy_ncep, xy, method='idw')\n \n Nt = len(self.timei)\n air_u_new = np.zeros([Nt, Num, Num])\n air_v_new = np.zeros([Nt, Num, Num])\n \n for tstep in range(Nt):\n utem = F(self.air_u[tstep,:].ravel())\n vtem = F(self.air_v[tstep,:].ravel())\n air_u_new[tstep,:,:]=utem.reshape(Num, Num)\n air_v_new[tstep,:,:]=vtem.reshape(Num, Num)\n \n ## Step Two: write the data to GNOME file\n GNOME_wind(outfile, self.timei, lat_new, lon_new, air_u_new, air_v_new)", "title": "" }, { "docid": "a35c05b24c10a0153f6ddb82abb669bd", "score": "0.53366095", "text": "def writeFile(self,output,results):\n gen = results['gen']\n if self.ped: ped = self.ped\n else: ped = results['ped']\n pedlist = ped['pedlist']\n if self.mark: mark = self.mark\n else: mark = results['mark']\n marklist = mark['marklist']\n with open(output,'w') as fout:\n fout.write('I\\tid\\t%s\\n' % '\\t'.join([p+\"\\t\"+p for p in pedlist]))\n for i,m in enumerate(marklist):\n fout.write('M\\t%s' % m)\n try:\n rm = results['mark'][m]['rank']\n except KeyError:\n sys.stderr.write('ERROR: Missing marker \"%s\" in genotype file\\n' % m)\n rm = None\n for animal in pedlist:\n try:\n ra = results['ped'][animal]['rank']\n except KeyError:\n sys.stderr.write('ERROR: Missing sample \"%s\" in genotype file\\n' % animal)\n ra = None\n if ra and rm:\n a = gen[ra,rm]\n else:\n a = 0\n if a == 0:\n a = '00'\n else:\n a = '%.0f' % a\n fout.write('\\t%s\\t%s' % (self.trans.get(a[0],'0'),self.trans.get(a[1],'0')))\n fout.write('\\n')\n fout.close()", "title": "" }, { "docid": "746fddf66ef0d0fc19914d1a0d655a20", "score": "0.53356105", "text": "def output_file_dpckg(network_test_output, midi_note_testing_vector, testing_register_type, current_directory=os.getcwd()):\n network_test_output[:, 0] = [int(x) for x in midi_note_testing_vector]\n OUT_FOLDER_PATH = \"%s%s%s\" % (current_directory,'/Networks/',testing_register_type)\n if not os.path.exists(OUT_FOLDER_PATH):\n os.makedirs(OUT_FOLDER_PATH)\n PATH = \"%s%s%s%s%s\" % (current_directory,'/Networks/',testing_register_type,'/', testing_register_type)\n for rows in xrange(len(network_test_output)):\n\n if rows <10:\n tname = \"%s%s%d%s\" % (PATH,'_0',rows,'.txt')\n else:\n tname = \"%s%s%d%s\" % (PATH,'_', rows,'.txt')\n\n np.savetxt(tname,network_test_output[rows,:].reshape(1,len(network_test_output[rows,:])),newline=' ',fmt=\"%s%s\" %('%3d',' %1.9e'*59))", "title": "" }, { "docid": "ea0f99539cdc89777c6c6818f9e96b9b", "score": "0.5335056", "text": "def simulation_save_temp(self):\n with open(os.path.join('tmp','simulations',(str(self.id_simulation)+'full')), 'w') as stream:\n writer = csv.writer(stream)\n for row in range(self.gui.table_preview.rowCount()):\n rowdata = []\n for column in range(self.gui.table_preview.columnCount()):\n item = self.gui.table_preview.item(row, column)\n if item is not None:\n rowdata.append(\n unicode(item.text()).encode('utf8'))\n else:\n rowdata.append('')\n writer.writerow(rowdata)\n with open(os.path.join('tmp','simulations',(str(self.id_simulation)+'filter')), 'w') as stream:\n writer = csv.writer(stream)\n for row in range(self.gui.table_filtered.rowCount()):\n rowdata = []\n for column in range(self.gui.table_filtered.columnCount()):\n item = self.gui.table_filtered.item(row, column)\n if item is not None:\n rowdata.append(\n unicode(item.text()).encode('utf8'))\n else:\n rowdata.append('')\n writer.writerow(rowdata)\n with open(os.path.join('tmp','simulations',(str(self.id_simulation)+'stats')), 'w') as file_save:\n item = self.gui.tree_hits.topLevelItem(self.id_simulation)\n simulation = item.text(1) + ',' + item.text(2) + ',' +\\\n item.text(3) + ',' +item.text(4) + ',' +\\\n item.text(5) + ',' +item.text(6) + ',' +item.text(7) + ',' +\\\n item.text(8) + ',' +item.text(9)\n line = simulation+self.nl\n file_save.write(line)\n child_num = item.childCount()\n for i in range(0,child_num):\n name = item.child(i)\n name = name.text(0)\n line = name+self.nl\n file_save.write('.........'+line)", "title": "" }, { "docid": "f24dc108b7cc19595b2b13d634a74d01", "score": "0.5327364", "text": "def simulation(self, model_parameters, solver_parameters):\r\n\r\n self.set_model_parameters(parameters=model_parameters)\r\n self.generate_mesh()\r\n self.set_spaces()\r\n\r\n if self.save_pvd:\r\n self.pvd = File('paraview/result{}.pvd'.format(model_parameters['formulation']))\r\n for i in range(len(self.load_steps)):\r\n print('load step : {} kPa'.format(self.load_steps[i]))\r\n self.w_split()\r\n\r\n # the border conditions are set\r\n self.set_border_conditions(_pressure=self.load_steps[i])\r\n if model_parameters['formulation'] == 'primal':\r\n # the problem is solved\r\n self.solve(solver_parameters=solver_parameters)\r\n elif model_parameters['formulation'] == 'etvf':\r\n # the problem is solved\r\n self.solve(solver_parameters=solver_parameters)\r\n\r\n # save vtk\r\n if self.save_pvd:\r\n self.u = self.w.split()[0]\r\n self.u.rename('displacement', 'u')\r\n self.pvd << (self.u, float(i))\r\n\r\n print('Simulation finish')\r\n print('total iterations : {}'.format(self.total_iterations))\r\n print('simulation time : {} '.format(self.total_time))", "title": "" } ]
84e78c229edff92a2dfe9c829995bd95
Toma una dataframe de estados financieros y devuelve un vector de evolucion de una variable financiera
[ { "docid": "2576b42db78018823eb2d2d39eb98b62", "score": "0.6282502", "text": "def crea_df_de_puntos(df, concepto):\r\n #\r\n # Crea una lista vacia de fechas\r\n lista_de_fechas = []\r\n # Crea una lista vacia de valores\r\n lista_de_valores = []\r\n #\r\n # Donde 0 y 1 son las posiciones del index en las que estan las fechas y los valores respectivamente, hace un for loop para armar listas con las fechas y los valores.\r\n for i in list(df.columns):\r\n # Agrega a la lista vacia las fechas que tiene el dataframe en la fila llamada \"Close_date\"\r\n lista_de_fechas.append(df[i].loc['Close_date'])\r\n # Agrega a la lista vacia los importes que tiene el dataframe en la fila llamada \"LTM_NI\"\r\n lista_de_valores.append(df[i].loc[concepto])\r\n #\r\n # Imprime en pantalla la lista de fechas\r\n #print (lista_de_fechas)\r\n # Imprime en pantalla la lista de valores\r\n #print (lista_de_valores)\r\n #\r\n # Crea un dataframe llamado evolucion\r\n evolucion = pd.DataFrame() #Crea una matriz de pandas\r\n # Agrega la columna Fecha al dataframe con valor por default igual a 0 (cero)\r\n #evolucion[\"Fecha\"] = 0\r\n # Agrega la columna Fecha al dataframe con valor por default igual a \"\"\r\n evolucion[\"Fecha\"] = \"\"\r\n # Agrega la columna concepto al dataframe con valor por default igual a \"\"\r\n evolucion[concepto] = \"\"\r\n #\r\n # \r\n # Chequea que el largo de lista de fechas sea el mismo que lista de valores\r\n len(lista_de_fechas) == len(lista_de_valores)\r\n # Agrega al dataframe evolucion, una por una la fecha y un valor\r\n for i in range(0, (len(lista_de_fechas))):\r\n evolucion = evolucion.append({'Fecha': lista_de_fechas[i], concepto: lista_de_valores[i] }, ignore_index=True) \r\n #\r\n # Establece la fecha como el indice de dataframe\r\n evolucion = evolucion.set_index('Fecha', inplace=False) # Sete el indice como la columna Fecha\r\n # Establece el formato del indice en formato datetime\r\n evolucion.index = pd.to_datetime(evolucion.index, format=\"%Y-%m-%d\") # Convertimos el indice en date time con el formato ANO-MES-DIA\r\n # Convierte la columna \"concepto\" en punto flotante\r\n evolucion[concepto] = evolucion[concepto].astype(float) # Convierte la columna de concepto a punto flotante \r\n return evolucion", "title": "" } ]
[ { "docid": "6df014f7b61a113cc7a2f6ea2df380d6", "score": "0.624267", "text": "def get_dataframe_implantaciones(self):\r\n \r\n return self.v.df_implantaciones.copy()", "title": "" }, { "docid": "dd867f3ac997ed01c0a1b77a6882a3aa", "score": "0.6163258", "text": "def var_calc(i_input):\n # for i in range(0, len(unique_matches)):\n # - i = 10000\n \n player_sel = unique_matches.iloc[i_input, 1]\n date_sel = unique_matches.iloc[i_input, 0]\n \n condition1 = modelling_data.Player == player_sel\n condition2 = modelling_data.Date < date_sel\n \n modelling_temp = modelling_data[condition1 & condition2]\n \n # - calculate feature vector\n features_data = features_past(data_input = modelling_temp, \n last_n = (10, 15, 20, 25, 30, 40, 45, 50), \n date_input = date_sel, \n player_name = player_sel)\n return pd.DataFrame(features_data)", "title": "" }, { "docid": "d53ae8907000521fff625816b6c4a9fa", "score": "0.594219", "text": "def df_to_vector_1s(df):\n df_champs = pd.read_json(\"../data/champion/champion.json\")\n keys = sorted(list(df_champs[\"data\"].apply(lambda champ: int(champ['key']))))\n df_aux = df[[\"blueChamps\", \"redChamps\"]].copy()\n # Vector de 1 y -1 para los campeones\n df_aux[keys] = pd.DataFrame([[0] * len(keys)], index=df_aux.index)\n df_aux = df_aux.apply(convert_champs, axis=1)\n df_aux.drop(columns=['blueChamps', 'redChamps'], inplace=True)\n if 'blueWin' in df:\n df_aux[\"blueWin\"] = df['blueWin']\n return df_aux", "title": "" }, { "docid": "62c13f77d2c59027d07678bf9d65066d", "score": "0.5928287", "text": "def get_dataframe_ruedas(self):\r\n \r\n return self.v.df_ruedas.copy()", "title": "" }, { "docid": "f076ba7530874176e3dbbf97f14744ce", "score": "0.59032047", "text": "def get_dataframe_relaciones(self):\r\n \r\n return self.v.df_relaciones.copy()", "title": "" }, { "docid": "3564ee3cdf6b015988ce82714e3e1135", "score": "0.5798997", "text": "def data_frame_datos(self):\n \n df = pd.DataFrame()\n df['z_(m)'] = self.v.z\n df['Zona'] = self.v.zona\n df['v.b_(m/s)'] = self.v.vb\n df['q.b_(kN/m²)'] = self.v.qb\n df['pr_(años)'] = self.v.pr\n df['cc'] = self.v.cc\n df['Grado'] = self.v.grado\n df['k'] = self.v.k\n df['L'] = self.v.L\n df['Z'] = self.v.Z\n df['c.e'] = self.v.ce\n df['c.p'] = self.v.cp\n df['q.e (kN/m²)'] = self.v.qe\n df['Ratios'] = self.v.ratio\n \n return df", "title": "" }, { "docid": "15e8c2bbfa24d345547edf6b92d363cd", "score": "0.5793047", "text": "def from_vector() -> pandas.Series:\n return pandas.Series(data).infer_objects()", "title": "" }, { "docid": "ca7de5f02b8e58c1f450aa2fba3d1614", "score": "0.57774633", "text": "def unaEstacionDia(self, datam):\n firstR = datam.iloc[1,:]\n codigo=firstR.iloc[0]\n ai=int(firstR.iloc[1])\n mesi=1\n lastR = datam.iloc[-1, :]\n af = int(firstR.iloc[1])\n mesf=12\n # crea la serie con las fechas y valor vacio\n serie=pd.date_range((str(ai)+\"/01/01\"),(str(af)+\"/12/31\"),name=\"fecha\").to_frame(index=False)\n serie[\"val\"]=np.nan\n #se recorre cada fila del dataframe de datos y se calcula la posicion basado en el dia que ocupe\n posi=0\n posf=0\n cuentadias=0\n while ai <= af:\n while mesi <= mesf:\n dmes=self.getDiasmes(ai,mesi)\n posf+=dmes\n #filtrar los datos por fechas en las que se va recorriendo\n filtrado=datam[(datam['anio']==ai) & (datam['mes']==mesi)]\n if filtrado.empty == False:\n # print(\"fecha inicial \", ai, \"-\", mesi, \" fecha final \", af, \"-\", mesf, \"dias a contar \", dmes,\n # \"conteo \", str(cuentadias))\n serie.iloc[posi:posf,1] = filtrado.iloc[0,3:(dmes+3)].values\n mesi+=1\n posi=posf\n mesi=1\n ai+=1\n return serie", "title": "" }, { "docid": "0d11713e4c4be385eb50bc0f423d9647", "score": "0.57479966", "text": "def get_column_data(model):\n agent_state = model.datacollector.get_agent_vars_dataframe()\n model_state = model.datacollector.get_model_vars_dataframe()\n\n X = pd.pivot_table(agent_state.reset_index(),index='Step',columns='State',aggfunc=np.size,fill_value=0) \n X['r0'] = model_state['r0']\n X['viral_load_tree'] = model_state['viral_load_tree']\n\n labels = ['Susceptible','Infected','Removed','R0','Viral Load Tree']\n X.columns = labels[:len(X.columns)]\n X['Incidence'] = X['Susceptible'].diff() * -1\n X['Recovery'] = X['Removed'].diff()\n\n\n for j in range(X.shape[0],steps):\n X.loc[j] = 0\n X['Viral Load Tree'].loc[j] = []\n X['Days'] = X.index\n X['Days'] = X['Days'].div(steps_per_day)\n\n\n X['Incidence Sum'] = X['Incidence']\n X['Incidence Days'] = 0\n\n for i in range(0,days):\n X['Incidence Sum'].loc[i] = X['Incidence'][i*steps_per_day:(i+1)*steps_per_day].sum()\n X['Incidence Days'].loc[i] = i\n return X", "title": "" }, { "docid": "ca9e5c03588fb6435a2470591283d664", "score": "0.56887966", "text": "def dataframe(self):\n if self.sovt.clean:\n # Regardless create new dataframe\n self._make_df()\n elif not Path(self.sovt.path_out_root / \"data/segs.pkl\").is_file():\n self._make_df()\n else:\n self.sovt.segs = pd.read_pickle(self.sovt.path_out_root / \"data/segs.pkl\")", "title": "" }, { "docid": "657701161b1f5ecffd98f9e17187e9b8", "score": "0.56837314", "text": "def initialize_df():\n\n df = pd.DataFrame(columns=[\"Name\"], data=var[\"Name\"])\n df[\"type\"] = var[\"type\"]\n df[\"initial value\"] = var[\"init\"]\n df[\"min\"] = var[\"min\"]\n df[\"max\"] = var[\"max\"]\n df[\"getcmd\"] = var[\"xpath\"]\n\n return df", "title": "" }, { "docid": "37835f9bd2c9e805d0337da188f2d1c5", "score": "0.5625924", "text": "def get_features(self, date_month, qty_months_horizon=12):\r\n df_months_combined = pd.DataFrame() # The data frame which will contain all independent variables\r\n month_files = [] # List of month files in scope\r\n\r\n # Get all months\r\n date_start = self.month_delta(date_month, -qty_months_horizon)\r\n df_date_months = pd.DataFrame(pd.date_range(date_start, periods=qty_months_horizon, freq=\"M\").tolist(),\r\n columns=['date_month'])\r\n df_date_months['date_month'] = df_date_months['date_month'].values.astype('datetime64[M]') # First day of month\r\n\r\n month_files = self.get_month_filenames(df_date_months) # Get the file names of all required month files\r\n\r\n # Cleaning, transforming and combining month files\r\n for month_file in month_files:\r\n with self.gc_fs.open('graydon-data/' + month_file) as f:\r\n df_month = pd.read_csv(f, sep=';', usecols= self.columns_features, index_col=False, nrows = 5000)\r\n print('Read', month_file, \"with\", df_month.shape[0], \"rows and\", df_month.shape[1], \"columns\")\r\n df_month = df_month[(df_month['is_sole_proprietor'] == 0)] \r\n print('After removing sole proprietors there are', df_month.shape[0], \"rows are left\")\r\n df_month.columns = (df_month.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', ''))\r\n df_month = self.aggregate_board_members(df_month)\r\n df_month = self.clean_data(df_month)\r\n df_months_combined = df_months_combined.append(df_month)\r\n print('The number of rows so far by adding', month_file, \":\", df_months_combined.shape[0])\r\n\r\n df_months_combined = self.add_previous_relocation_dates(df_months_combined)\r\n df_months_combined['date_dataset'] = date_month # Add the identifier for a data-set\r\n\r\n return(df_months_combined)", "title": "" }, { "docid": "7a55dde341d9b5dd0cff2f83ecc0090b", "score": "0.55888426", "text": "def vector_vector(df):\n d = list(list(df.loc[df[c]].index.values) for c in df.columns)\n return(d)", "title": "" }, { "docid": "0e2afb45360bd133ccb5a96ae9295e7b", "score": "0.5567335", "text": "def func_df_iv(df, feature, target, pr=0):\n lst = []\n\n for i in range(df[feature].nunique()):\n val = list(df[feature].unique())[i]\n lst.append([feature, val, df[df[feature] == val].count()[feature], df[(df[feature] == val) & (df[target] == 1)].count()[feature]])\n\n data = pd.DataFrame(lst, columns=['Variable', 'Value', 'All', 'Bad'])\n data = data[data['Bad'] > 0]\n\n data['Share'] = data['All'] / data['All'].sum()\n data['Bad Rate'] = data['Bad'] / data['All']\n data['Distribution Good'] = (data['All'] - data['Bad']) / (data['All'].sum() - data['Bad'].sum())\n data['Distribution Bad'] = data['Bad'] / data['Bad'].sum()\n data['WoE'] = np.log(data['Distribution Good'] / data['Distribution Bad'])\n data['IV'] = (data['WoE'] * (data['Distribution Good'] - data['Distribution Bad'])).sum()\n\n data = data.sort_values(by=['Variable', 'Value'], ascending=True)\n\n\n if pr == 1:\n print(data)\n\n return data['IV'].values[0]", "title": "" }, { "docid": "69cbbf4884a9f76c96e964eedc952dd5", "score": "0.55408144", "text": "def calculate_iv(data, independent_var, dependent_var, is_continuous=None, category_count=10):\n try:\n values = calculate_woe(data, independent_var, dependent_var, is_continuous, category_count)\n df = pd.DataFrame(values)\n return df['iv'].sum()\n except Exception:\n traceback.print_exc()", "title": "" }, { "docid": "98ebf6694d6ef0d737dbf8c670572227", "score": "0.548133", "text": "def _load_vic_displacement_field_df(self):\r\n\r\n mat = sio.loadmat(self.data_path)\r\n for k in mat.keys():\r\n mat[k] = mat[k].reshape(-1, 1).squeeze()\r\n # print(k, len(mat[k]))\r\n\r\n df = pd.DataFrame(mat)\r\n # Mat files exported by vic3d includes supplementary data (unsuccessful corelations? )\r\n df = df.loc[((df.X != 0) & (df.Y != 0)) & (df.sigma >= 0)]\r\n df.reset_index(drop=True, inplace=True)\r\n\r\n # Remove useless columns\r\n df = df[[\"X\", \"Y\", \"Z\", \"U\", \"V\", \"W\", \"x\", \"y\", \"sigma\"]]\r\n\r\n # Reference basis was rotated when extracting the data\r\n df.rename(columns={\"x\": \"y_ref\", \"y\": \"x_ref\", \"sigma\": \"error\"}, inplace=True)\r\n\r\n df = df.astype({k: \"float\" for k in [\"X\", \"Y\", \"Z\", \"U\", \"V\", \"W\"]})\r\n df = df.astype({k: \"int\" for k in [\"x_ref\", \"y_ref\"]})\r\n\r\n # Inverse V?\r\n # df[\"V\"] = -df[\"V\"]\r\n\r\n # remove NaN\r\n df.dropna(axis=\"index\", inplace=True)\r\n\r\n flip_x = False\r\n if flip_x:\r\n # flib x axis\r\n df.X = abs(df.X - df.X.max())\r\n\r\n self.X_offset = df.X.min()\r\n df.X -= self.X_offset\r\n self.metadata[\"optical_crack_tip_e1\"] -= self.X_offset\r\n\r\n self.displacement_field_df = df\r\n self._compute_grid_params(step_agg=\"mean\")\r\n return df", "title": "" }, { "docid": "605b589c0afffdf4dbf4e561db1d817d", "score": "0.54812634", "text": "def restore_df(self, min_infected=1):\n df = self.df.copy()\n df[\"Time\"] = self.start_time + \\\n df[\"t\"].apply(lambda x: timedelta(minutes=x * self.tau))\n df = df.drop(\"t\", axis=1).set_index(\"Time\") * self.total_population\n df = df.astype(np.int64)\n upper_cols = [n.upper() for n in df.columns]\n df.columns = upper_cols\n df = self.reverse_f(df, self.total_population).drop(upper_cols, axis=1)\n df = df.loc[df[\"Infected\"] >= min_infected, :]\n return df", "title": "" }, { "docid": "79d8ecd8f9017b7dd6c6deee31dafb78", "score": "0.5478548", "text": "def evaluate(self):\n if not self.is_value:\n if len(self.data) == 0:\n # We're an empty DataFrame\n self.pandas_df = pd.DataFrame()\n return\n # Collect each vector into a struct rather than evaluating each Series individually:\n # this is more efficient so computations shared among vectors can be optimized.\n result = self.weld_value.evaluate()\n columns = result[0]\n new_data = []\n length = None\n for column in columns:\n data = column.copy2numpy()\n column_length = len(data)\n if length is not None:\n assert column_length == length, \"invalid DataFrame produced after evaluation\"\n else:\n length = column_length\n series = GrizzlySeries(data)\n new_data.append(series)\n\n # Columns is unchanged\n self.pandas_df = None\n self.data = new_data\n self.length = length\n # Reset the weld representation.\n delattr(self, \"weld_value_\")\n assert self.is_value\n return self", "title": "" }, { "docid": "2412523b08b04b90a87bcaa22fe7501e", "score": "0.54784423", "text": "def extracting_data():\n df_entity_raw = extract.extract_entity_contracts('INSTITUTO NACIONAL DE VÍAS (INVIAS)')\n df_names_raw = extract.extract_mun_names()\n\n return df_entity_raw, df_names_raw", "title": "" }, { "docid": "1ea1a76f42c1081ef9a13be503626afa", "score": "0.54461944", "text": "def calcul_eff_vit_moy(df, latitude_min, longitude_min, ecart_x, ecart_y, n_interval=10):\r\n \r\n effectif_cases = np.zeros((n_interval,n_interval))\r\n vitesse_cases = np.zeros((n_interval,n_interval))\r\n vitesse_var = np.zeros((n_interval,n_interval))\r\n for i in range(n_interval):\r\n for j in range(n_interval):\r\n case_df = trouve_data_case(df, (i, j), latitude_min, longitude_min, ecart_x, ecart_y)\r\n if case_df.shape[0] > 0 :\r\n effectif_cases[i,j] = case_df.shape[0]\r\n vitesse_cases[i,j] = case_df[\"GpsSpeed\"].mean()\r\n vitesse_var[i,j] = case_df[\"GpsSpeed\"].var()\r\n \r\n #Création d'une nouvelles colonnes stockant les données sur les portions de route \r\n sx,sy = affectation_2(df, latitude_min, longitude_min, ecart_x, ecart_y)\r\n\r\n sx.replace(n_interval, n_interval-1, inplace=True)\r\n sy.replace(n_interval, n_interval-1, inplace=True)\r\n \r\n e = [] #liste effectif moyen pour chaque ligne\r\n v = [] #liste vitesse moyenne pour chaque ligne\r\n v2 = [] #liste varaince vitesse pour chaque ligne\r\n \r\n for i in range(sx.shape[0]) :\r\n e.append(effectif_cases[sx.iloc[i],sy.iloc[i]])\r\n v.append(vitesse_cases[sx.iloc[i],sy.iloc[i]])\r\n v2.append(vitesse_var[sx.iloc[i],sy.iloc[i]])\r\n \r\n return e, v, v2", "title": "" }, { "docid": "556e3ad319b03e30745567e26e7fd42e", "score": "0.5435874", "text": "def get_dataframe_signos(self):\r\n \r\n return self.v.df_signos.copy()", "title": "" }, { "docid": "67e06a49c6702b3a785c57b28ea8e48d", "score": "0.5435622", "text": "def gen_data(self, normalize=True):\r\n\r\n if normalize:\r\n self.year_from_inv = (self.year_from_inv - np.min(self.year_from_inv)) / \\\r\n (np.max(self.year_from_inv) - np.min(self.year_from_inv)) - 0.5\r\n self.invested_amount = (self.invested_amount - np.min(self.invested_amount)) / \\\r\n (np.max(self.invested_amount) - np.min(self.invested_amount)) - 0.5\r\n self.regi_money = (self.regi_money - np.min(self.regi_money)) / \\\r\n (np.max(self.regi_money) - np.min(self.regi_money)) - 0.5\r\n self.year_from_establish = (self.year_from_establish - np.min(self.year_from_establish)) / \\\r\n (np.max(self.year_from_establish) - np.min(self.year_from_establish)) - 0.5\r\n self.radar_deltaday = (self.radar_deltaday - np.min(self.radar_deltaday)) / \\\r\n (np.max(self.radar_deltaday) - np.min(self.radar_deltaday)) - 0.5\r\n self.valuation = (self.valuation - np.min(self.valuation)) / \\\r\n (np.max(self.valuation) - np.min(self.valuation)) - 0.5\r\n\r\n self.variables_itjuzi = pd.concat(\r\n [self.dummy_round, self.dummy_class_first, self.dummy_class_second, self.dummy_tag, self.dummy_numemp,\r\n self.dummy_invested, self.year_from_inv, self.invested_amount, self.dummy_round, self.regi_money,\r\n self.dummy_company_type, self.year_from_establish],\r\n axis=1) # 19836 rows x 1382 columns\r\n self.variables_radar = pd.concat([self.radar_deltaday, self.valuation], axis=1) # 54975 rows x 2 columns\r\n self.data = pd.concat([self.variables_itjuzi, self.variables_radar], axis=1,\r\n join_axes=[self.variables_itjuzi.index]) # 19836 rows x 1384 columns\r", "title": "" }, { "docid": "7152a9282e511d329db9693cd0495ef3", "score": "0.5424346", "text": "def _data_frame(self):\r\n # data variable with three lists as values and three keys\r\n data = {\r\n 'Temperature_Degree': self.actual_temp, \r\n 'Efficiency_Rate': self.actual_eff, \r\n 'Quality': self.quality\r\n }\r\n \r\n # Create data frame for data variable and store it in dataset variabel\r\n dataset = pd.DataFrame(data, index= np.arange(len(self.actual_temp)))\r\n self.dataset = dataset\r\n \r\n # return dataset variable\r\n return self.dataset", "title": "" }, { "docid": "178428d367e46a33d6ccd2d7291d2a69", "score": "0.54228306", "text": "def get_data() -> pd.DataFrame:\n sql_engine = get_engine()\n data_df = assemble_df(sql_engine)\n # Clean data\n data_df.replace([None], False, inplace=True)\n\n # Spatial Engineered Features:\n data_df['distance_to_goal_mid'] = calc_distance_to_mid(data_df['x1'], data_df['y1'])\n data_df['distance_to_goal_nearest'] = calc_distance_to_nearest(data_df['x1'], data_df['y1'])\n data_df['angular_size_rad_goal'] = calc_angular_size_radians(data_df)\n data_df['projected_size_yds_goal'] = calc_projected_size_yds(data_df['angular_size_rad_goal'],\n data_df[\n 'distance_to_goal_nearest'],\n data_df['y1'])\n data_df['kicked'] = get_kicked(data_df)\n data_df['side_of_field_matching_foot'] = compare_foot_to_side_of_field(data_df)\n\n # Temporal Engineered Features:\n data_df['send_off_diff'] = get_send_off_diff(data_df, sql_engine)\n data_df['free_kick_30s_ago'] = get_free_kick_data(data_df, sql_engine)\n\n # Cross Referenced Data\n data_df['dominant_foot'] = get_dominant_foot(data_df, sql_engine)\n\n return data_df", "title": "" }, { "docid": "6ba1ba090fe48be1513cd2effbd34ddd", "score": "0.5417813", "text": "def get(self) -> DataFrame:\n ...", "title": "" }, { "docid": "fd6b5bb120f611ea049b187f2a6f91d8", "score": "0.5409369", "text": "def minhas_atividades_df(self):\n minhas_atividades_df = pd.DataFrame.from_dict(self.minhas_atividades, orient='index')\n minhas_atividades_df = minhas_atividades_df.reset_index()\n minhas_atividades_df.columns = ['Atividades', 'Meta_por_dia', 'data_cadastro', 'arquivamento']\n\n return minhas_atividades_df", "title": "" }, { "docid": "c47b81015adb1ee1d6a70065f9201038", "score": "0.5402831", "text": "def volume_position(df: pd.DataFrame, horizons: list): \n vol_serie = df.loc[:, (slice(None), \"Volume\")]\n \n vol_position = pd.DataFrame()\n for h in horizons: \n highest_high = vol_serie.rolling(window=h).max()#.to_numpy()\n lowest_low = vol_serie.rolling(window=h).min()#.to_numpy()\n \n # Relative position of the price (firm-specific)\n \n # preventivni reseni deleni nulou\n position_h_denom = np.subtract(highest_high, lowest_low).replace(to_replace=0, value=np.nan) # vyhodit nuly\n position_h_specific = np.subtract(vol_serie, lowest_low) / position_h_denom \n position_h_specific.columns = pd.MultiIndex.from_product(iterables=[df.columns.levels[0], [\"Volume_Position_{}\".format(h)]])\n \n vol_position = pd.concat(objs=[vol_position, position_h_specific], axis=\"columns\").sort_index(axis=\"columns\")\n \n return vol_position", "title": "" }, { "docid": "6d542dc81072823413bd173873733966", "score": "0.5384909", "text": "def get_dataframe_apoyos(self):\r\n \r\n return self.v.df_apoyos.copy()", "title": "" }, { "docid": "17f1f43ed279a9bc581b9e9fa0d85015", "score": "0.5373081", "text": "def get_data(self, obj):\n index=self.obj_votacao(only_keys=False)[obj]\n cham_votes=pd.DataFrame(self.raw.votos[index]['Deputado'])\n cham_votes.columns=['nome','id','partido','uf','voto']\n for col in cham_votes.columns:\n try:\n cham_votes[col]=cham_votes[col].str.strip()\n except:\n pass\n \n for data in [cham_votes,orientacao]:\n data['partido']=data['partido'].apply(lambda x: x.strip())\n\n cham_votes['orientacao_bancada']=cham_votes['partido'].map(self.orientacao(obj))\n\n for col in ['@Resumo','@Data','@Hora','@ObjVotacao','@codSessao']:\n cham_votes[col.lower().replace('@','')]=self.raw.loc[index,col]\n\n cham_votes=cham_votes.reindex(['id','nome', 'partido', 'uf', 'voto', 'orientacao_bancada', 'resumo',\n 'data', 'hora', 'objvotacao', 'codsessao'], axis=1)\n return cham_votes", "title": "" }, { "docid": "900bb4f0db5f0da192d55a301362585e", "score": "0.53708166", "text": "def VIF(self):\n X = self.data[self.cont_cols + self.ohe_cols].drop(columns=self.target).copy()\n X = sm.add_constant(X)\n\n self.vif =pd.DataFrame()\n self.vif[\"variables\"] = X.columns\n\n self.vif[\"VIF\"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]\n print(self.vif.sort_values(by=['VIF'], ascending=False).head(10))", "title": "" }, { "docid": "a1889f22962b8af4222c33a4d0415bd2", "score": "0.53542435", "text": "def _get_values(self) -> (coo_matrix, Dict[str, int], pd.Series):\n self.df = build_df('articles', self.start_date, self.end_date)\n self.ground_truth = build_df('ground_truth')\n self.df = clean_data(self.df)\n self.ground_truth = clean_data(self.ground_truth)\n res = transform_data(self.df, self.ground_truth,\n tfidf=self.tfidf, author=self.author,\n tags=self.tags, title=self.title,\n ngram=self.ngram,\n domain_endings=self.domain_endings,\n word_count=self.word_count,\n misspellings=self.misspellings,\n grammar_mistakes=self.grammar_mistakes,\n lshash=self.lshash,\n source_count=self.source_count,\n sentiment=self.sentiment,\n stop_words=self.stop_words)\n (self._X, self.ground_truth_X, self.feature_names,\n self._y, self.ground_truth_y) = res\n return res", "title": "" }, { "docid": "55fbd0f118ce77e0efcbc95062f9cf7c", "score": "0.53494", "text": "def df(self):\n return self._df", "title": "" }, { "docid": "6e93491aff2634a5edaea9b20d527394", "score": "0.53326315", "text": "def dataframe(self):\n return self.__call__()", "title": "" }, { "docid": "f194407227ea5363a2a8ddffa1b74746", "score": "0.53301805", "text": "def makefeatureVectors(dfConsumption,dfTemperature,dfSolar,beginDateTraining,EndDateTraining,beginDateTest,EndDateTest,sampleRate,selectedHouseHold,dataFrameTrainingName,dataFrameTestName):\n\n #MAKE FRESH featureVector for trainingset and testset\n #BETER PREDICTION WITHOUT THE YEAR INDEX INCLUDED\n augmentedFeaturevectorTraining = fm.makeFeatureVectorAllSelectedHousehold(dfConsumption,dfTemperature,dfSolar,beginDateTraining, EndDateTraining,sampleRate,selectedHouseHold,dataFrameTrainingName)\n augmentedFeaturevectorTest = fm.makeFeatureVectorAllSelectedHousehold(dfConsumption,dfTemperature,dfSolar,beginDateTest, EndDateTest,sampleRate,selectedHouseHold,dataFrameTestName)\n #if there's a error when changing the samplerate, check the number of selected days\n return augmentedFeaturevectorTraining, augmentedFeaturevectorTest", "title": "" }, { "docid": "2111a4c13741b337bb038d64acd5eb07", "score": "0.53171104", "text": "def currency_volume(df: pd.DataFrame, horizons: list, horizon_pairs: list):\n # get unique horizons from all horizons\n horizons_unq = horizons.copy()\n horizons_unq.extend(np.unique(horizon_pairs)) \n horizons_unq = np.unique(horizons_unq)\n \n # Calculate Firm-Specific Dollar Volume\n df_close = df.loc[:, (slice(None), \"Close\")]\n df_volume = df.loc[:, (slice(None), \"Volume\")]\n\n df_currvol_tmp = pd.DataFrame(df_volume.to_numpy()*df_close.to_numpy(), index=df.index) # HM_USDVol - taky vystup\n df_currvol_tmp.columns = pd.MultiIndex.from_product(iterables=[df.columns.levels[0], [\"CurrencyVolume_1\"] ])\n # zachovat index - kvuli diferencovani, kde by se to zmizenim 1. polozky posunulo\n #df_dcurrvol_tmp = df_currvol_tmp.diff(periods=1, axis=\"index\") # HM_DUSDVol - Ficura dal nepouziva\n \n features = df_currvol_tmp.copy() #pd.DataFrame() # jen pozadovane features \n features_tmp = pd.DataFrame() \n \n # firm-specific currency volume\n for h in horizons_unq:\n df_currvol = df_currvol_tmp.rolling(window=h, axis=\"index\").sum()\n df_currvol.columns = pd.MultiIndex.from_product(iterables=[df.columns.levels[0], [\"CurrencyVolume_{}\".format(h)] ])\n #scaled_currvol = standardize(df_currvol) # scaling data/data normalization\n # vsechny features potrebne pro dalsi vypocet\n features_tmp = pd.concat(objs=[features_tmp, df_currvol], axis=\"columns\")\n \n # jen features pozadovane na vystupu\n if h in horizons:\n features = pd.concat(objs=[features, df_currvol], axis=\"columns\")\n #features = pd.concat(objs=[features, scaled_currvol], axis=\"columns\")\n\n # firm-specific currency volume absolute and relative change \n for h_short, h_long in horizon_pairs:\n df_avgshort = features_tmp.loc[:, (slice(None), \"CurrencyVolume_{}\".format(h_short))]/h_short\n df_avglong = features_tmp.loc[:, (slice(None), \"CurrencyVolume_{}\".format(h_long))]/h_long\n df_chng_abs = df_avgshort.to_numpy() - df_avglong.to_numpy()\n captions = pd.MultiIndex.from_product(iterables=[df.columns.levels[0], [\"CurrencyVolumeAbsChange_{}vs{}\".format(h_short, h_long)] ])\n df_chng_abs = pd.DataFrame(data=df_chng_abs, index=df.index, columns=captions)\n #scaled_chng_abs = standardize(df_chng_abs) \n \n df_chng_rel = (df_avgshort.to_numpy()/df_avglong.to_numpy())-1\n captions = pd.MultiIndex.from_product(iterables=[df.columns.levels[0], [\"CurrencyVolumeRelChange_{}vs{}\".format(h_short, h_long)] ])\n df_chng_rel = pd.DataFrame(data=df_chng_rel, index=df.index, columns=captions)\n #scaled_chng_rel = standardize(df_chng_rel)\n \n features = pd.concat(objs=[features, df_chng_abs, df_chng_rel], axis=\"columns\")\n #features = pd.concat(objs=[features, scaled_chng_abs, scaled_chng_rel], axis=\"columns\")\n\n \n return features", "title": "" }, { "docid": "28b88eff92c9885fb0177ff0a358091f", "score": "0.5304872", "text": "def variable_categorique(x):\n # traitement des variables categorique\n x_object = x.iloc[:, :].values\n\n # I_Salarie_CR 3\n dummies = pd.get_dummies(pd.Series(x_object[:, 2]))\n # suppresion d'une variable dans notre cas Emp\n dummies = dummies.drop([\"Emp\"], axis=1)\n\n # Nouveau_Client 6\n labelEncoder_2 = LabelEncoder()\n dummies_2 = labelEncoder_2.fit_transform(pd.Series(x_object[:, 5]))\n dummies_2 = pd.DataFrame(dummies_2, columns=[\"Nouveau_Client\"])\n\n # Metier_portef 13\n dummies_3 = pd.get_dummies(pd.Series(x_object[:, 12]))\n # suppresion d'une variable, dans notre cas 0000\n\n dummies_3 = dummies_3.drop([\"0000\"], axis=1)\n # concatener les differents dataframe\n\n # regrouper les variable categorique dans une seul dataframe\n x_var_cat = pd.concat([dummies, dummies_2, dummies_3], axis=1)\n\n # suppression des colonnes de type String dans la dataframe\n x = x.drop([\"Nouveau_Client\", \"Metier_portef\", \"I_Salarie_CR\"], axis=1)\n\n # concat variables categorique codifier\n x = pd.concat([x, x_var_cat], axis=1)\n\n return x", "title": "" }, { "docid": "c08af7dfd6f94d1783f1f5d17c6dc6e8", "score": "0.52921754", "text": "def df_extract_data(df):\n game_id = df['gameId']\n platform_id = df['platformId']\n # La victoria o derrota del equipo Azul: 1 victoria, 0 derrota\n blue_win = pd.get_dummies(df.iloc[:, 10].apply(lambda teams: teams[0][\"win\"])).iloc[:, 1]\n\n # Campeone seleccionados\n blue_champs = df.iloc[:, 11].apply(lambda champs: champs[0:5]).apply(get_champ_ids)\n red_champs = df.iloc[:, 11].apply(lambda champs: champs[5:10]).apply(get_champ_ids)\n # Vetos\n blue_bans = df['teams'].apply(lambda team: team[0]['bans']).apply(lambda bans: [ban['championId'] for ban in bans])\n red_bans = df['teams'].apply(lambda team: team[1]['bans']).apply(lambda bans: [ban['championId'] for ban in bans])\n # Roles\n blue_roles = df['participants'].apply(lambda participants: [get_role(p['timeline']) for p in participants[0:5]])\n red_roles = df['participants'].apply(lambda participants: [get_role(p['timeline']) for p in participants[5:10]])\n\n df = pd.DataFrame()\n df['gameId'] = game_id\n df['platformId'] = platform_id\n df[\"blueChamps\"] = blue_champs\n df[\"redChamps\"] = red_champs\n df[\"blueBans\"] = blue_bans\n df[\"redBands\"] = red_bans\n df[\"blueRoles\"] = blue_roles\n df[\"redRoles\"] = red_roles\n df[\"blueWin\"] = blue_win\n return df", "title": "" }, { "docid": "8fdede1865b1f18eeec859324745e839", "score": "0.52892554", "text": "def df(self, sql: str) -> Any:\n return Spark.sqlContext.createDataFrame(px.data.iris())", "title": "" }, { "docid": "fbcb8af91a49cb6edd9583f6fdba7e8f", "score": "0.52865446", "text": "def create_features(df):\n last_day = df.date.max() \n feature_dfs=[]\n\n feature_dfs.append(get_fixed_fields(df)) \n\n for i in [1,3,4,6]:\n start = last_day - timedelta(days=i*28)\n end = last_day\n feature_dfs.append(get_cummulate_numeric_fields(df, start, end, suffix=\"_last_%s_months\" %(i)))\n\n for i in [2,3,4]:\n start = last_day - timedelta(days=i*28)\n end = start + timedelta(days=28)\n feature_dfs.append(get_cummulate_numeric_fields(df, start, end, suffix=\"_%s_months_ago\" %(i)))\n \n return pd.concat(feature_dfs, axis=1)", "title": "" }, { "docid": "4ccdf336196cedae604e2c38d26c2e52", "score": "0.5285993", "text": "def var(self, dof: int = 1) -> pd.DataFrame:\n return self.count_mean_var(dof).var", "title": "" }, { "docid": "354495af960fb15e31edb52099511d21", "score": "0.5284642", "text": "def store_results(df):\n # TODO", "title": "" }, { "docid": "5dfeb3be77d9e20fcabf20dcd4ab4cbc", "score": "0.5282955", "text": "def seir_solution(self, params: list, dates: pd.Series) -> pd.DataFrame:\n # Pop first parameter for E_0\n E_0 = params[0]\n params = params[1:]\n\n # Run initial value solver\n size = len(dates)\n solution = solve_ivp(\n self.seir_odes,\n y0=[self.S_0, E_0, self.I_0, self.R_0],\n t_span=[0, size],\n t_eval=np.arange(0, size),\n args=[params],\n )\n\n # Create results dataframe\n y = solution.y\n results = pd.DataFrame(\n {\n \"date\": dates,\n \"susceptible\": y[0],\n \"exposed\": y[1],\n \"infected\": y[2],\n \"removed\": y[3],\n }\n )\n\n return results", "title": "" }, { "docid": "482716185466e7078ade6eac53fe803b", "score": "0.5275952", "text": "def get_data():\n df = pd.read_csv('../data/divorce_set.csv', sep=',')\n return df", "title": "" }, { "docid": "2d693a3d4e0032667468499e623abf99", "score": "0.52713007", "text": "def variance_inflation_factors(exog: DataFrame):\n exog = add_constant(exog)\n vifs = Series(\n [1.1 / (1.0 - OLS(exog[col].values,\n exog.loc[:, exog.columns != col].values).fit().rsquared)\n for col in exog],\n index=exog.columns,\n name='VIF'\n )\n return vifs", "title": "" }, { "docid": "a4f968ccc598d17b7f98f9dcbe7f5602", "score": "0.52635604", "text": "def data_loader_simple() -> pd.DataFrame:\n return pd.DataFrame([2, 2, 3])", "title": "" }, { "docid": "c883a5f6bc42aeabe0616108ce8213f0", "score": "0.5251231", "text": "def __init__(self, dataframe):\n self.params = []\n\n for _, col_series in dataframe.iteritems():\n # Consider using Series.to_numpy() or Series.array instead of values. Introduced in Pandas 0.24\n col_min_max = self.x_min_max(col_series.values)\n self.params.append(col_min_max)", "title": "" }, { "docid": "d8fc6b8fcc1f30956b050a1b00c07ea4", "score": "0.524156", "text": "def data_frame_proyecto(self):\n \n proyecto = self.v.dlg_proyecto\n dcc = proyecto.proyecto()\n ser = pd.Series(dcc)\n df = ser.to_frame()\n df = df.T\n \n return df", "title": "" }, { "docid": "f4b2cde1c583317c469de251995cc21c", "score": "0.523321", "text": "def question03(df):\n\n return ...", "title": "" }, { "docid": "0d2f7e893b0025abc0a6d8f960139400", "score": "0.52301824", "text": "def calc_iv(self):\n result_ivs = []\n for date, data in self.df_stock.iterrows():\n close = data['close']\n date = pd.to_datetime(date)\n df_date = self.df_all.query('date == %r' % date)\n df_date = self.format_data(df_date)\n \"\"\":type: pd.DataFrame\"\"\"\n print output % ('INIT', 'date: %s, close: %.2f, df_date: %d' % (\n date.strftime('%Y-%m-%d'), close, len(df_date)\n ))\n print '-' * 70\n\n if len(df_date) == 0:\n print output % ('ERROR', 'no enough data...')\n print '=' * 70\n continue\n\n # check strikes\n strikes = np.sort(df_date['strike'].unique())\n s0, s1 = self.static.two_nearby(strikes, close)\n\n if close in strikes: # if close is exists in strikes\n i0, i1 = self.static.list_index(strikes, s0, s1, 1)\n strikes = strikes[i0:i1]\n elif strikes[s0] <= close <= strikes[s1]: # if not in range\n i0, i1 = self.static.list_index(strikes, s0, s1, 1)\n strikes = strikes[i0:i1]\n else: # if in range\n i0, i1 = self.static.list_index(strikes, s0, s1, 0)\n strikes = strikes[i0:i1]\n\n if close not in list(strikes):\n strikes = np.append(strikes, close) # need to be last\n\n dtes = np.sort(df_date['dte'].unique())\n print output % ('DATA', 'exists dtes: %s' % dtes)\n print output % ('DATA', 'require strike: %s' % strikes)\n\n dte_filled = []\n for dte in dtes:\n df_dte = df_date[df_date['dte'] == dte]\n dte_strikes = np.sort(df_dte['strike'])\n\n print output % ('LOOP', 'dte: %d, df_dte: %d' % (dte, len(df_dte)))\n for strike in strikes:\n if strike not in dte_strikes:\n calc = StrikeNotInDtes2dCalc(strike, dte, df_dte)\n dte_iv = calc.approx()\n\n if dte_iv > 0:\n dte_filled.append({\n 'date': date,\n 'dte': dte,\n 'strike': strike,\n 'impl_vol': dte_iv\n })\n else:\n continue\n\n print '-' * 70\n\n df_fill0 = pd.DataFrame(dte_filled)\n df_fill0 = pd.concat([df_date, df_fill0])\n \"\"\":type: pd.DataFrame\"\"\"\n df_fill0 = df_fill0[df_fill0['strike'].isin(strikes)].sort_values(['dte', 'strike'])\n\n print '=' * 70\n\n # check dte\n dtes = list(dtes)\n days = [d for d in calc_days if d not in dtes]\n strikes = [s for s in strikes if s != close]\n\n day_filled = []\n for day in days:\n print output % ('LOOP', 'day: %d' % day)\n\n d0, d1 = self.static.two_nearby(dtes, day)\n if dtes[d0] <= close <= dtes[d1]: # if not in range\n i0, i1 = self.static.list_index(dtes, d0, d1, 1)\n nearby_dtes = dtes[i0:i1]\n else: # if in range\n i0, i1 = self.static.list_index(dtes, d0, d1, 0)\n nearby_dtes = dtes[i0:i1]\n\n print output % ('NEAR', 'dte0: %d(%d) dte1: %d(%d)' % (\n d0, dtes[d0], d1, dtes[d1]\n ))\n print output % ('DATA', 'dtes: %s' % list(nearby_dtes))\n\n for strike in strikes:\n df_strike = df_fill0[df_fill0['strike'] == strike].sort_values('dte')\n calc = DayNotInStrikes2dCalc(day, strike, df_strike)\n strike_iv = calc.approx()\n\n day_filled.append({\n 'date': date,\n 'dte': day,\n 'strike': strike,\n 'dte_iv': strike_iv,\n 'strike_iv': strike_iv,\n 'impl_vol': strike_iv\n })\n\n # calc both strike/dte iv\n df_fill1 = pd.DataFrame(day_filled)\n df_fill = pd.concat([df_fill0, df_fill1])\n \"\"\":type: pd.DataFrame\"\"\"\n\n days_iv = []\n for day in calc_days:\n df_dte = df_fill[df_fill['dte'] == day]\n # noinspection PyTypeChecker\n if not np.any(df_dte['strike'] == close):\n # 3d calc iv only require when no dte strike exists\n calc = StrikeNotInDtes2dCalc(close, day, df_dte)\n dte_iv = calc.approx()\n\n print '-' * 70\n\n df_strike = df_fill[df_fill['strike'] == close]\n calc = DayNotInStrikes2dCalc(day, close, df_strike)\n strike_iv = calc.approx()\n impl_vol = np.mean([iv for iv in (dte_iv, strike_iv) if iv > 0])\n\n days_iv.append({\n 'date': date,\n 'dte': day,\n 'strike': close,\n 'dte_iv': dte_iv,\n 'strike_iv': strike_iv,\n 'impl_vol': impl_vol\n })\n\n # result iv\n df_iv0 = df_fill0[(df_fill0['dte'].isin(calc_days)) & (df_fill0['strike'] == close)]\n df_iv1 = pd.DataFrame(\n days_iv, columns=['date', 'dte', 'strike', 'dte_iv', 'strike_iv', 'impl_vol']\n )\n\n if len(df_iv0):\n df_days = pd.concat([df_iv0, df_iv1])\n \"\"\":type: pd.DataFrame\"\"\"\n else:\n df_days = df_iv1\n\n assert len(df_days) == len(calc_days)\n\n result_ivs.append(df_days)\n\n print output % ('LAST', 'final merge df_iv: %d' % len(result_ivs))\n df_iv = pd.concat(result_ivs)\n \"\"\":type: pd.DataFrame\"\"\"\n\n # format dataframe\n df_iv['symbol'] = str(self.symbol.upper())\n df_iv = df_iv[[\n 'date', 'dte', 'dte_iv', 'strike_iv', 'impl_vol'\n ]]\n df_iv['dte'] = df_iv['dte'].astype('int')\n df_iv = df_iv.round({'dte_iv': 2, 'strike_iv': 2, 'impl_vol': 2})\n df_iv = df_iv.reset_index(drop=True)\n\n return df_iv", "title": "" }, { "docid": "22757745017add26832efbfef7f24c01", "score": "0.5228386", "text": "def x(self) -> pd.DataFrame:\n return self.__x", "title": "" }, { "docid": "c8509c895cb354b8dcfdd1533c9a61c1", "score": "0.52157354", "text": "def dataframe(self):\n # type: () -> pd.DataFrame\n return pd.DataFrame(self.series)", "title": "" }, { "docid": "1ea2e5bbcf07348d3acc07794187f49b", "score": "0.52094525", "text": "def data(start, end, coordinator_id, f=None):\n info = hierarchy.employee_info(coordinator_id)\n if info is None:\n return DataFrame()\n\n agent_list = info['agent_id']\n if agent_list==[]:\n return DataFrame()\n\n g = [] if f is None else deepcopy(f)\n\n g += [Q('terms', agent_id = agent_list)]\n\n df_assigned = assigned(start=start, end=end, f=g)\n df_additional = additional_tasks__agents.df(start=start, end=end, f=g)\n df_additional_vp = additional_tasks__agents.df_vp(start=start, end=end, f=g)\n\n df_task_vp = df_assigned[['asignadas', 'efectivas']].merge(\n DataFrame(df_additional_vp['instalación adicional']), on='agent_id',\n how='outer')\n df_task_vp = df_task_vp.fillna(0).astype('int64')\n df_task_vp['asignadas'] = df_task_vp['asignadas'] + df_task_vp['instalación adicional']\n df_task_vp['efectivas'] = df_task_vp['efectivas'] + df_task_vp['instalación adicional']\n df_task_vp = df_task_vp.drop('instalación adicional', axis=1)\n df_task_vp['% efectivas'] = df_task_vp['efectivas']/df_task_vp['asignadas']\n df_task_vp = df_task_vp.fillna(0).replace((np.inf, -np.inf), (0,0))\n df_task_vp['pago'] = df_task_vp['% efectivas'].apply(task_vp_structure)\n\n return {\n 'assigned': df_assigned,\n 'additional': df_additional,\n 'additional_vp': df_additional_vp,\n 'task_vp': df_task_vp\n }", "title": "" }, { "docid": "0945e5a03d4dee374008fe5aa93c3cdc", "score": "0.5198869", "text": "def dw_fretes():\r\n\r\n # ESALQ-Log data\r\n url_cepea = \"https://sifreca.esalq.usp.br/mercado/fertilizantes\"\r\n df_fretes = pd.read_html(url_cepea, decimal=',', thousands='.')[1]\r\n\r\n # join columns\r\n df_fretes['Origem'] = df_fretes['Origem']+'/'+df_fretes['UF']\r\n df_fretes['Destino'] = df_fretes['Destino']+'/'+df_fretes['UF.1']\r\n\r\n df_fretes.drop(['UF', 'UF.1'], axis=1, inplace=True)\r\n\r\n # timestamp\r\n df_fretes['updated'] = pd.to_datetime('now')\r\n\r\n df_fretes.columns = fixnames(df_fretes.columns)\r\n\r\n return df_fretes", "title": "" }, { "docid": "d3ac778066886ca27953e7545211fa71", "score": "0.519615", "text": "def extract_complete(file_path:str, normalize=False):\n with open (file_path, \"r\") as handle:\n lines_list = handle.readlines()\n\n #Pulizia dati, da decidere in base a che formato sono i csv:\n\n if \";\" in lines_list[1]:\n for i in range(len(lines_list)):\n lines_list[i] = lines_list[i].replace(\",\", \".\").replace(\";\", \",\").replace(\"\\\"\", \"\").rstrip().split(\",\")\n else:\n for i in range(len(lines_list)):\n lines_list[i] = lines_list[i].replace(\";\", \",\").replace(\"\\\"\", \"\").rstrip().split(\",\")\n\n #Estrazione header per dataframe\n header_list = lines_list[0]\n\n #Divisione delle varie prove in dataframes\n\n extracted_df_list = list()\n\n data_to_insert = list()\n current_step_number = 0\n current_test = str()\n\n for line in lines_list[1:]:\n\n #Controllo se lo step number cambia\n\n step_num = line[0].split(\"_\")[0]\n\n #Se cambia inserisco i dati presenti in data_to_insert in un dataframe e prima di ricominciare svuoto la lista\n if step_num != current_step_number:\n\n current_step_number = step_num\n complete_df = pd.DataFrame(data_to_insert, columns=header_list)\n\n #Se chiesta la normalizzazione cambio la corrente\n if normalize == True:\n if \"Current (mA)\" in complete_df.columns.tolist():\n complete_df[\"Current (mA)\"] = complete_df[\"Current (mA)\"].div(1000)\n complete_df.rename(columns={\"Current (mA)\": \"Current (A)\"}, inplace=True)\n\n test = current_test\n name = file_path\n points = len(complete_df)\n\n new_element = {\n \"name\": name,\n \"test\": test,\n \"data\": complete_df,\n \"points\": points\n }\n\n extracted_df_list.append(new_element)\n\n data_to_insert = []\n current_test = line[1]\n\n #Converto tutti in float e inserisco\n for i in range(len(line)):\n if i < 2:\n continue\n else:\n line[i] = float(line[i])\n\n data_to_insert.append(line)\n\n #Alla fine del loop devo comunque aggiungere ciò che rimane in un ultimo\n #dataframe e cancellare il primo elemento della lista che è vuoto\n\n complete_df = pd.DataFrame(data_to_insert, columns=header_list)\n\n #Se chiesta la normalizzazione cambio la corrente\n if normalize == True:\n complete_df[\"Current (mA)\"] = complete_df[\"Current (mA)\"].div(1000)\n complete_df.rename(columns={\"Current (mA)\": \"Current (A)\"}, inplace=True)\n\n test = current_test\n name = file_path\n points = len(complete_df)\n\n new_element = {\n \"name\": name,\n \"test\": test,\n \"data\": complete_df,\n \"points\": points\n }\n\n extracted_df_list.append(new_element)\n\n del extracted_df_list[0]\n\n return extracted_df_list", "title": "" }, { "docid": "a12ee2ce2927e79a586141016cf1eadc", "score": "0.51946354", "text": "def frame_to_features(df):\n features = pd.DataFrame(index = df.index)\n \n features['sunday'] = (df['day_of_week'] == 'Sunday').astype(np.float64)\n features['monday'] = (df['day_of_week'] == 'Monday').astype(np.float64)\n features['tuesday'] = (df['day_of_week'] == 'Tuesday').astype(np.float64)\n features['wednesday'] = (df['day_of_week'] == 'Wednesday').astype(np.float64)\n features['thursday'] = (df['day_of_week'] == 'Thursday').astype(np.float64)\n features['friday'] = (df['day_of_week'] == 'Friday').astype(np.float64)\n features['saturday'] = (df['day_of_week'] == 'Saturday').astype(np.float64)\n features['female'] = (df['gender'] == 'F').astype(np.float64)\n features['marital_status'] = (df['marital_status'] == 'M').astype(np.float64)\n features['age'] = (df['age'] - 42.62266) / 13.340682\n features['income'] = (df['income'] - 85068.38652) / 37527.481327\n \n return features", "title": "" }, { "docid": "8b86acadeb95ddd9e63ee0f713a94445", "score": "0.51939464", "text": "def df(self, t, x, (p, stim)):\n\n\t\t# Unpack states and parameters as shown below.\n\t\tx1 = x[...,0]\n\t\tx2 = x[...,1]\n\t\tx3 = x[...,2]\n\t\t\n\t\tp1, p2, p3 = p\n\t\t\n\t\tdf_vec = sp.empty_like(x)\t\n\n\t\tdf_vec[..., 0] = -x1**2.0 + 3*(p1 + p2)\n\t\tdf_vec[..., 1] = -x1 + x3*p3**2.0\n\t\tdf_vec[..., 2] = -x3/p2\n\n\t\treturn df_vec", "title": "" }, { "docid": "1f4d63c7bd797874187e0df098c1c010", "score": "0.51852995", "text": "def explore_data(df):\n\n print(df.head())\n print(\"Shape of Data: \", df.shape)\n print(df.info())", "title": "" }, { "docid": "f40182bc9e94542bf4d05ce400339270", "score": "0.51821727", "text": "def train_val_df(features, y_train, models):\n master_eval = pd.DataFrame()\n\n for name, data in features.items():\n\n X_train_val, X_val , y_train_val, y_val = train_test_split(data, y_train, test_size = .25, random_state = 713)\n\n for model_name, model in models.items():\n #Training and scoring model\n instance = model\n instance.fit(X_train_val, y_train_val)\n score = f1_score(y_val, instance.predict(X_val), average = 'macro')\n\n #Updating dataframe\n row = master_eval.shape[0] + 1\n master_eval.loc[row, 'Data'] = name\n master_eval.loc[row, 'Model'] = model_name\n master_eval.loc[row, 'F1'] = np.round(score, 3)\n print('{:.2f}% complete'.format((master_eval.shape[0]/(len(models)*len(features))*100)))\n\n return master_eval", "title": "" }, { "docid": "e3592e41249058e1a97c385510430054", "score": "0.51755095", "text": "def get_run_vector(preferences):\n runlevel = preferences['runner_type']\n typerace = preferences['race_distance']\n run_vector = pd.read_csv('main/plan_vectors/' +\n str(runlevel) + '_' + str(typerace) + '.csv')\n return run_vector", "title": "" }, { "docid": "fa0383e7ee126da98b406929dc52c83a", "score": "0.51743156", "text": "def _collect_data(self, evaluator):\n features = []\n for feature in self.get_features():\n if evaluator(feature):\n features.append(feature)\n return PandasDataFrame.create_new(self, self.df[features])", "title": "" }, { "docid": "0558e75055214e85c628656f2f7b686b", "score": "0.51652944", "text": "def inversoAditivo_Vector(a):\n rtaVector = [[None for fila in range(1)] for column in range(len(a))]\n\n fila = len(a)\n \n for i in range(fila):\n rtaVector[i][0] = lab1.producto(a[i][0],[-1,0])\n\n return rtaVector", "title": "" }, { "docid": "20ef773e51250b470235418cfe544c49", "score": "0.51639956", "text": "def dataframe_periodo_retorno(self):\n \n dcc = Diccionarios.dcc_periodo_retorno \n self.v.df_periodo_retorno = pd.DataFrame.from_dict(dcc)\n \n return self.v.df_periodo_retorno", "title": "" }, { "docid": "b3612456e3c6a1e00036a2dbbfa6c800", "score": "0.5163094", "text": "def data(self):\n return self.__df", "title": "" }, { "docid": "b3612456e3c6a1e00036a2dbbfa6c800", "score": "0.5163094", "text": "def data(self):\n return self.__df", "title": "" }, { "docid": "628be0f8464e06b94ab2c5ab2937d841", "score": "0.5160248", "text": "def load_data():\n\n ###############################################\n # TODO: Complete this function. \n ###############################################\n\n data_set = pd.DataFrame(columns = [\"Number packets\", \"Total time\", \"Bytes Sent by Server\", \"Average Time Between Packets\", \"Std Time Between Packets\", \"Cell\"])\n\n i = 0\n for cell in range(1,101):\n for capture in range(1,21):\n data_set.loc[i] = list(excract_features(cell,capture))\n i += 1\n\n\n return data_set", "title": "" }, { "docid": "7deb49a360c1c008bd21af7a627f5d0f", "score": "0.5152849", "text": "def blank_vectors():\n return pd.DataFrame({\n \"WBT\": pd.Series([], dtype=\"object\"),\n \"NNS\": pd.Series([], dtype=\"object\"),\n \"NNS_MD\": pd.Series([], dtype=\"float64\"),\n \"Distance\": pd.Series([], dtype=\"float64\"),\n \"WBT_X\": pd.Series([], dtype=\"float64\"),\n \"WBT_Y\": pd.Series([], dtype=\"float64\"),\n \"WBT_Z\": pd.Series([], dtype=\"float64\"),\n \"NNS_X\": pd.Series([], dtype=\"float64\"),\n \"NNS_Y\": pd.Series([], dtype=\"float64\"),\n \"NNS_Z\": pd.Series([], dtype=\"float64\")\n })", "title": "" }, { "docid": "97df3e52d79d6b4b5e8bff794281edc4", "score": "0.5137799", "text": "def solid2(_, _input_dataframe):", "title": "" }, { "docid": "8d030946a3cc576f0f949e0e2bad8ee4", "score": "0.5132745", "text": "def ar1_factors(df: pd.DataFrame, horizons: list):\n\n logclose = np.log(df.loc[:, (slice(None), \"Close\")].droplevel(level=1, axis=\"columns\"))#[400:600]\n logclose_raw = pd.DataFrame(data=logclose.to_numpy(), columns=logclose.columns) # bez indexu\n # s indexem je pak v ARIMA warning: A date index has been provided... \n \n ar_factors = pd.DataFrame()\n for h in horizons: \n x_ar1 = logclose_raw.rolling(window=h, axis=\"index\").aggregate(ar1_coef) # rolling AR coefs for all tickers\n x_ar1.index = df.index # vraceni puvodniho indexu\n x_ar1.columns = pd.MultiIndex.from_product(iterables=[logclose.columns, [\"AR1_LogPrice_{}\".format(h)]])\n \n ar_factors = pd.concat(objs=[ar_factors, x_ar1], axis=\"columns\").sort_index(axis=\"columns\")\n print(\"Horizon \", h, \" completed\")\n \n return ar_factors", "title": "" }, { "docid": "855b356b108de3306f95fa63cc012bb9", "score": "0.51287913", "text": "def obtainIndividualFeatureVector(self, stock, fromWhen):\n\t\tfeatureVector = []\n\n\t\ttoday = datetime.datetime.today()\n\t\ttoday = datetime.datetime(2018, today.month, today.day, today.hour, today.minute, 0, 0)\n\t\ttoday = fromWhen\n\t\t# print(today)\n\t\tstart = today = datetime.datetime(2018, 1, today.day, today.hour, today.minute, 0, 0)\n\n\t\tfiveMinValue = stock.getAllSampleValues(0, today, 5, 0, 0, 1)\n\t\tfiveMinVolume = stock.getAllSampleValues(1, today, 5, 0, 0, 1)\n\t\t# featureVector += fiveMinValue #[] \n\t\t# featureVector += fiveMinVolume\n\n\t\ttenMinValue = stock.getAllSampleValues(0, today, 10, 0, 0, 1)\n\t\ttenMinVolume = stock.getAllSampleValues(1, today, 10, 0, 0, 1)\n\t\t# featureVector += tenMinValue\n\t\t# featureVector += tenMinVolume\n\n\t\tftnMinValue = stock.getAllSampleValues(0, today, 15, 0, 0, 1)\n\t\tftnMinVolume = stock.getAllSampleValues(1, today, 15, 0, 0, 1)\n\t\t# featureVector += ftnMinValue\n\t\t# featureVector += ftnMinVolume\n\n\t\ttwtyMinValue = stock.getAllSampleValues(0, today, 20, 0, 0, 1)\n\t\ttwtyMinVolume = stock.getAllSampleValues(1, today, 20, 0, 0, 1)\n\t\t# featureVector += twtyMinValue\n\t\t# featureVector += twtyMinVolume\n\n\t\tthrtyMinValue = stock.getAllSampleValues(0, today, 30, 0, 0, 1)\n\t\tthrtyMinVolume = stock.getAllSampleValues(1, today, 30, 0, 0, 1)\n\t\t# featureVector += thrtyMinValue\n\t\t# featureVector += thrtyMinVolume\n\n\n\t\tfourtyfiveMinValue = stock.getAllSampleValues(0, today, 45, 0, 0, 1)\n\t\tfourtyfiveMinVolume = stock.getAllSampleValues(1, today, 45, 0, 0, 1)\n\t\t# featureVector += fourtyfiveMinValue\n\t\t# featureVector += fourtyfiveMinVolume\n\n\n\t\thrValue = stock.getAllSampleValues(0, today, 0, 1, 0, 1)\n\t\thrVolume = stock.getAllSampleValues(1, today, 0, 1, 0, 1)\n\t\t# featureVector += hrValue\n\t\t# featureVector += hrVolume\n\n\t\ttwoHrValue = stock.getAllSampleValues(0, today, 0, 2, 0, 1)\n\t\ttwoHrVolume = stock.getAllSampleValues(1, today, 0, 2, 0, 1)\n\t\t# featureVector += twoHrValue\n\t\t# featureVector += twoHrVolume\n\n\t\t# return featureVector\n\n\t\tsvnHrValue = stock.getAllSampleValues(0, today, 0, 7, 0, 1)\n\t\tsvnHrVolume = stock.getAllSampleValues(1, today, 0, 7, 0, 1)\n\t\t# featureVector += svnHrValue\n\t\t# featurelVector += svnHrVolume\n\n\n\t\ttenMinValue = stock.getAllSampleValues(0, today, 10, 0, 0, 1)\n\t\ttenMinVolume = stock.getAllSampleValues(1, today, 10, 0, 0, 1)\n\t\t# featureVector += tenMinValue\n\t\t# featureVector += tenMinVolume\n\n\t\tftnMinValue = stock.getAllSampleValues(0, today, 15, 0, 0, 1)\n\t\tftnMinVolume = stock.getAllSampleValues(1, today, 15, 0, 0, 1)\n\t\t# featureVector += ftnMinValue\n\t\t# featureVector += ftnMinVolume\n\n\t\ttwtyMinValue = stock.getAllSampleValues(0, today, 20, 0, 0, 1)\n\t\ttwtyMinVolume = stock.getAllSampleValues(1, today, 20, 0, 0, 1)\n\t\t# featureVector += twtyMinValue\n\t\t# featureVector += twtyMinVolume\n\n\t\tthrtyMinValue = stock.getAllSampleValues(0, today, 30, 0, 0, 1)\n\t\tthrtyMinVolume = stock.getAllSampleValues(1, today, 30, 0, 0, 1)\n\t\t# featureVector += thrtyMinValue\n\t\t# featureVector += thrtyMinVolume\n\n\n\t\tfourtyfiveMinValue = stock.getAllSampleValues(0, today, 45, 0, 0, 1)\n\t\tfourtyfiveMinVolume = stock.getAllSampleValues(1, today, 45, 0, 0, 1)\n\t\t# featureVector += fourtyfiveMinValue\n\t\t# featureVector += fourtyfiveMinVolume\n\n\n\t\thrValue = stock.getAllSampleValues(0, today, 0, 1, 0, 1)\n\t\thrVolume = stock.getAllSampleValues(1, today, 0, 1, 0, 1)\n\t\t# featureVector += hrValue\n\t\t# featureVector += hrVolume\n\n\t\ttwoHrValue = stock.getAllSampleValues(0, today, 0, 2, 0, 1)\n\t\ttwoHrVolume = stock.getAllSampleValues(1, today, 0, 2, 0, 1)\n\t\t# featureVector += twoHrValue\n\t\t# featureVector += twoHrVolume\n\n\t\t# return featureVector\n\n\t\tsvnHrValue = stock.getAllSampleValues(0, today, 0, 7, 0, 1)\n\t\tsvnHrVolume = stock.getAllSampleValues(1, today, 0, 7, 0, 1)\n\t\t# featureVector += svnHrValue\n\t\t# featureVector += svnHrVolume\n\n\t\t# return featureVector\n\n\t\t#put from currentTime back to 12am, all of previous day here\n\n\t\tpreviousDayValue = stock.getAllSampleValues(0, today, 0, 0, 1, 1)\n\t\tpreviousDayVolume = stock.getAllSampleValues(1, today, 0, 0, 1, 1)\n\t\t# featureVector += previousDayValue\n\t\t# featureVector += previousDayVolume\n\n\t\t# return featureVector\n\n\t\tweekValue = stock.getAllSampleValues(0, today, 0, 0, 7, 1)\n\t\tweekVolume = stock.getAllSampleValues(1, today, 0, 0, 7, 1)\n\t\t# featureVector += weekValue\n\t\t# featureVector += weekVolume\n\n\t\t# return featureVector\n\n\t\ttwoWeekValue = stock.getAllSampleValues(0, today, 0, 0, 14, 1)\n\t\ttwoWeekVolume = stock.getAllSampleValues(1, today, 0, 0, 14, 1)\n\t\t# featureVector += twoWeekValue\n\t\t# featureVector += twoWeekVolume\n\n\t\t# return featureVector\n\n\t\t#put from currentTime back to 12am, all of previous day here\n\n\t\t# previousDayValue = stock.getAllSampleValues(0, today, 0, 0, 1, 1)\n\t\t# previousDayVolume = stock.getAllSampleValues(1, today, 0, 0, 1, 1)\n\t\t# featureVector += previousDayValue\n\t\t# featureVector += previousDayVolume\n\n\t\t# return featureVector\n\n\t\t# weekValue = stock.getAllSampleValues(0, today, 0, 0, 7, 1)\n\t\t# weekVolume = stock.getAllSampleValues(1, today, 0, 0, 7, 1)\n\t\t# featureVector += weekValue\n\t\t# featureVector += weekVolume\n\n\t\t# return featureVector\n\n\t\t# twoWeekValue = stock.getAllSampleValues(0, today, 0, 0, 14, 1)\n\t\t# twoWeekVolume = stock.getAllSampleValues(1, today, 0, 0, 14, 1)\n\t\t# featureVector += twoWeekValue\n\t\t# featureVector += twoWeekVolume\n\n\t\t# return featureVector\n\n\t\t# print(\"Slow boys\")\n\t\t#populations\n\t\t# stock.updatePopMean(0)\n\t\t# stock.updatePopMean(1)\n\t\t# stock.updatePopStdDev(0)\n\t\t# stock.updatePopStdDev(1)\n\t\t# xVal = stock.obtainXForZScore(0, start, today, 1)\n\t\t# xVol = stock.obtainXForZScore(1, start, today, 1)\n\t\t# stock.updatePopZScore(0, xVal)\n\t\t# stock.updatePopZScore(1, xVol)\n\t\t# stock.updatePopSkewness(0)\n\t\t# stock.updatePopSkewness(1)\n\t\t# stock.updatePopKurtosis(0)\n\t\t# stock.updatePopKurtosis(1)\n\n\t\t# featureVector.append(stock.popMeans[0])\n\t\t# featureVector.append(stock.popMeans[1])\n\t\t# featureVector.append(stock.popStdDevs[0])\n\t\t# featureVector.append(stock.popStdDevs[1])\n\t\t# featureVector.append(stock.popZScores[0])\n\t\t# featureVector.append(stock.popZScores[1])\n\t\t# featureVector.append(stock.popSkewness[0])\n\t\t# featureVector.append(stock.popSkewness[1])\n\t\t# featureVector.append(stock.popKurtosis[0])\n\t\t# featureVector.append(stock.popKurtosis[1])\n\n\t\t# [mean, stdDev, zScore, skewness, kurtosis]\n\n\t\t# fiveMin features\n\t\tfeatureVector.append(1 if fiveMinValue[0] > hrValue[0] else -1)\n\t\tfeatureVector.append(1 if fiveMinValue[1] > hrValue[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[2] > hrValue[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[3] > hrValue[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[4] > hrValue[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinVolume[0] > hrVolume[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[1] > hrVolume[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[2] > hrVolume[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[3] > hrVolume[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[4] > hrVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinValue[0] > thrtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[1] > thrtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[2] > thrtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[3] > thrtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[4] > thrtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinVolume[0] > thrtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[1] > thrtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[2] > thrtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[3] > thrtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[4] > thrtyMinVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinValue[0] > twtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[1] > twtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[2] > twtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[3] > twtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[4] > twtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinVolume[0] > twtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[1] > twtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[2] > twtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[3] > twtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[4] > twtyMinVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinValue[0] > ftnMinValue[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[1] > ftnMinValue[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[2] > ftnMinValue[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[3] > ftnMinValue[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[4] > ftnMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinVolume[0] > ftnMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[1] > ftnMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[2] > ftnMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[3] > ftnMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[4] > ftnMinVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinValue[0] > tenMinValue[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[1] > tenMinValue[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[2] > tenMinValue[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[3] > tenMinValue[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinValue[4] > tenMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if fiveMinVolume[0] > tenMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[1] > tenMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[2] > tenMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[3] > tenMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if fiveMinVolume[4] > tenMinVolume[4] else 0)\n\n\t\t# tenMin features\n\t\tfeatureVector.append(1 if tenMinValue[0] > hrValue[0] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[1] > hrValue[1] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[2] > hrValue[2] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[3] > hrValue[3] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[4] > hrValue[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinVolume[0] > hrVolume[0] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[1] > hrVolume[1] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[2] > hrVolume[2] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[3] > hrVolume[3] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[4] > hrVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinValue[0] > thrtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[1] > thrtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[2] > thrtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[3] > thrtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[4] > thrtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinVolume[0] > thrtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[1] > thrtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[2] > thrtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[3] > thrtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[4] > thrtyMinVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinValue[0] > twtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[1] > twtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[2] > twtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[3] > twtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[4] > twtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinVolume[0] > twtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[1] > twtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[2] > twtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[3] > twtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[4] > twtyMinVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinValue[0] > ftnMinValue[0] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[1] > ftnMinValue[1] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[2] > ftnMinValue[2] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[3] > ftnMinValue[3] else 0)\n\t\tfeatureVector.append(1 if tenMinValue[4] > ftnMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if tenMinVolume[0] > ftnMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[1] > ftnMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[2] > ftnMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[3] > ftnMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if tenMinVolume[4] > ftnMinVolume[4] else 0)\n\n\t\t# ftnMin (15) features\n\t\tfeatureVector.append(1 if ftnMinValue[0] > hrValue[0] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[1] > hrValue[1] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[2] > hrValue[2] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[3] > hrValue[3] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[4] > hrValue[4] else 0)\n\n\t\tfeatureVector.append(1 if ftnMinVolume[0] > hrVolume[0] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[1] > hrVolume[1] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[2] > hrVolume[2] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[3] > hrVolume[3] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[4] > hrVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if ftnMinValue[0] > thrtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[1] > thrtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[2] > thrtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[3] > thrtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[4] > thrtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if ftnMinVolume[0] > thrtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[1] > thrtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[2] > thrtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[3] > thrtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[4] > thrtyMinVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if ftnMinValue[0] > twtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[1] > twtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[2] > twtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[3] > twtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if ftnMinValue[4] > twtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if ftnMinVolume[0] > twtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[1] > twtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[2] > twtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[3] > twtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if ftnMinVolume[4] > twtyMinVolume[4] else 0)\n\n\t\t# twtyMin (20) features\n\t\tfeatureVector.append(1 if twtyMinValue[0] > hrValue[0] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[1] > hrValue[1] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[2] > hrValue[2] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[3] > hrValue[3] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[4] > hrValue[4] else 0)\n\n\t\tfeatureVector.append(1 if twtyMinVolume[0] > hrVolume[0] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[1] > hrVolume[1] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[2] > hrVolume[2] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[3] > hrVolume[3] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[4] > hrVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if twtyMinValue[0] > thrtyMinValue[0] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[1] > thrtyMinValue[1] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[2] > thrtyMinValue[2] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[3] > thrtyMinValue[3] else 0)\n\t\tfeatureVector.append(1 if twtyMinValue[4] > thrtyMinValue[4] else 0)\n\n\t\tfeatureVector.append(1 if twtyMinVolume[0] > thrtyMinVolume[0] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[1] > thrtyMinVolume[1] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[2] > thrtyMinVolume[2] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[3] > thrtyMinVolume[3] else 0)\n\t\tfeatureVector.append(1 if twtyMinVolume[4] > thrtyMinVolume[4] else 0)\n\n\t\t# thrty (30) features\n\t\tfeatureVector.append(1 if thrtyMinValue[0] > hrValue[0] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[1] > hrValue[1] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[2] > hrValue[2] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[3] > hrValue[3] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[4] > hrValue[4] else 0)\n\n\t\tfeatureVector.append(1 if thrtyMinVolume[0] > hrVolume[0] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[1] > hrVolume[1] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[2] > hrVolume[2] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[3] > hrVolume[3] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[4] > hrVolume[4] else 0)\n\n\t\tfeatureVector.append(1 if thrtyMinValue[0] > twoHrValue[0] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[1] > twoHrValue[1] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[2] > twoHrValue[2] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[3] > twoHrValue[3] else 0)\n\t\tfeatureVector.append(1 if thrtyMinValue[4] > twoHrValue[4] else 0)\n\n\t\tfeatureVector.append(1 if thrtyMinVolume[0] > twoHrVolume[0] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[1] > twoHrVolume[1] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[2] > twoHrVolume[2] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[3] > twoHrVolume[3] else 0)\n\t\tfeatureVector.append(1 if thrtyMinVolume[4] > twoHrVolume[4] else 0)\n\n\t\t# print (\"features:\\n\", featureVector)\n\n\t\treturn featureVector", "title": "" }, { "docid": "f126835a02fa5aebb8f8f5e599ca2ff1", "score": "0.5113317", "text": "def get_dataframe(self, *features):\n if features:\n return self.df.loc[:, features]\n return self.df[:]", "title": "" }, { "docid": "30f69c552906e144be53212f6b06ab0a", "score": "0.5110518", "text": "def get_port_val(self, df, allocs, start_value):\n df /= df.iloc[0]\n df *= allocs\n df *= start_value\n df = df.sum(axis = 1)\n return df", "title": "" }, { "docid": "18602d885c16f097eb0c5ec502ea4979", "score": "0.5107745", "text": "def retrieve_dataframe():\n\n raw_df = import_df('all_hospitals')\n raw_df = clean_question_marks(raw_df)\n zero_to_NaN(raw_df, 'chol_mg_dl')\n zero_to_NaN(raw_df, 'rest_bp')\n change_types(raw_df)\n df = NaN_to_modes(raw_df)\n df = reduce_diagnosis(df)\n men = df.loc[df['sex'] == 1.0, ]\n men['hospital'] = men['hospital'].apply(hospital_to_number)\n\n return men", "title": "" }, { "docid": "e1d4551b03a093fb77ef39eefb15ef82", "score": "0.5107024", "text": "def add_current_col(self):\n\n # create a list of current infections, with index correlated to that of the dataframe\n\n rows_num = len(self.get_covid_start().index)\n i = 0\n current_infected = []\n\n while (i < rows_num):\n current_infected.append(self.get_covid_start()['Confirmed'].iloc[i] - \\\n self.get_covid_start()['Recovered'].iloc[i] - \\\n self.get_covid_start()['Deaths'].iloc[i])\n i += 1\n\n # add this list as a column to the dataframe, for a new dataframe as output\n\n df_current = self.get_covid_start().assign(Current = current_infected)\n\n return df_current", "title": "" }, { "docid": "dd17e4681b3f91e0fbc5a32336178074", "score": "0.51019067", "text": "def vaccine(date):\r\n\r\n try:\r\n prefix_url = 'https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/' \\\r\n 'Informe_Comunicacion_'\r\n suffix_url = '.ods'\r\n nfile = f'{prefix_url}{date}{suffix_url}'\r\n file_vaccine = pd.read_excel(nfile, engine='odf')\r\n file_vaccine.set_index('Unnamed: 0', inplace=True)\r\n vcant = file_vaccine.loc['Cantabria']\r\n vcant = pd.DataFrame(vcant).T\r\n vcant.index = [datetime.datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y/%m/%d\")]\r\n\r\n return vcant\r\n\r\n except Exception as e:\r\n date = datetime.datetime.strptime(date, \"%Y%m%d\").strftime(\"%Y/%m/%d\")\r\n print(f\"Error downloading vaccination data for {date}\")\r\n # print(e)\r", "title": "" }, { "docid": "88335b23d90b3fc674643d37b0fa1b5f", "score": "0.51010096", "text": "def compute(self):\n df = self.data.copy()\n result = self.new()\n result.data = df\n print_log(df, \"compute\")\n return result", "title": "" }, { "docid": "4c5f32bb169bf60cd60774d9e184b4be", "score": "0.51006967", "text": "def basic_dataframe():\n return pd.DataFrame({\"lat\": [50, 51], \"lng\": [14, 15]})", "title": "" }, { "docid": "76a8a228311bacf765456c4d5b2776c7", "score": "0.5096315", "text": "def get_frovedis_series(self):\n if self.df is None:\n raise ValueError(\"column->series: column is not associated with a DataFrame!\")\n ret = self.df[[self.name]]\n ret.is_series = True\n return ret", "title": "" }, { "docid": "b98a08882b0abd78173b33eb639ee0c2", "score": "0.5093206", "text": "def X(self) -> pd.DataFrame:\n return self.get(column=\"X\", split=None)", "title": "" }, { "docid": "bc6b7f8133c5f08299d6029d8e1eaefe", "score": "0.50927603", "text": "def get_sofa_bilirubin(con) -> pd.DataFrame:\n # SOFA: Liver - Bilirubin (µmol/l)\n ##################################\n # get bilirubin\n print('Querying SOFA Liver...')\n filename = './sql/common/bilirubin.sql'\n sql_filename = os.path.join(dirname, filename)\n with open(sql_filename, 'r') as sql_file:\n sql_sofa_bilirubin = sql_file.read()\n sofa_bilirubin = read_sql(sql_sofa_bilirubin, con)\n\n # calculate SOFA liver score:\n print('Processing SOFA Liver...')\n sofa_bilirubin.loc[:, 'sofa_liver_score'] = 0\n sofa_bilirubin.loc[(sofa_bilirubin['value'] >= 20) & (sofa_bilirubin['value'] < 33), 'sofa_liver_score'] = 1\n sofa_bilirubin.loc[(sofa_bilirubin['value'] >= 33) & (sofa_bilirubin['value'] < 102), 'sofa_liver_score'] = 2\n sofa_bilirubin.loc[(sofa_bilirubin['value'] >= 102) & (sofa_bilirubin['value'] < 204), 'sofa_liver_score'] = 3\n sofa_bilirubin.loc[(sofa_bilirubin['value'] >= 204), 'sofa_liver_score'] = 4\n\n return sofa_bilirubin.sort_values(by=['admissionid', 'itemid', 'time']).reset_index(drop=True)", "title": "" }, { "docid": "dead8f557193cf2a2df7ca469b349855", "score": "0.5088119", "text": "def feature_extraction(df:pd.DataFrame, verbose=cfg.verbose):\r\n if verbose:\r\n print(\"Creating features from existing featurese\")\r\n # feature extraction: features related to room\r\n df['TotalBathAbvGrd'] = df['FullBath'] + 0.5 * df['HalfBath']\r\n df['TotalBathBsmt'] = df['BsmtFullBath'] + 0.5 * df['BsmtHalfBath']\r\n df['TotalRmsAbvGrdIncBath'] = df['TotRmsAbvGrd'] + df['TotalBathAbvGrd']\r\n df = df.drop(['FullBath', 'HalfBath', 'BsmtFullBath', 'BsmtHalfBath', 'TotalBathAbvGrd', 'TotRmsAbvGrd' ], axis='columns')\r\n # df['TotalRms'] = df['TotalRmsAbvGrdIncBath'] + df['TotalBathBsmt'] # low LASSO coeff\r\n\r\n\r\n # feature extraction: features related to area\r\n # df['TotalSF'] = df['TotalBsmtSF'] + df['GrLivArea'] # low LASSO coeff\r\n df['TotalPorch'] = df['OpenPorchSF'] + df['EnclosedPorch'] + df['ScreenPorch'] + df['3SsnPorch']\r\n df = df.drop(['OpenPorchSF', 'EnclosedPorch', 'ScreenPorch', '3SsnPorch'], axis='columns')\r\n # feature extraction: assigning number to ordinal features\r\n ordinal_features = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'HeatingQC',\r\n 'KitchenQual', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PoolQC']\r\n df[ordinal_features] = df[ordinal_features].replace('Ex', 5).\\\r\n replace('Gd', 4).replace('TA', 3).replace('Fa', 2).replace('Po', 1).replace('NA', 0)\r\n # creating Quality * Cond features\r\n # Example: if the condition and the quality of the bsmt is high the weight of the BsmtQualCond is 25!\r\n df['ExterQualCond'] = df['ExterQual'] * df['ExterCond']\r\n df['BsmtQualCond'] = df['BsmtQual'] * df['BsmtCond']\r\n df['GarageQualCond'] = df['GarageQual'] * df['GarageCond']\r\n df['OverallQualCond'] = df['OverallQual'] * df['OverallCond']\r\n # df = df.drop(['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond', 'OverallQual', 'OverallCond'], axis='columns')\r\n # feature extraction: assigning number to ordinal features\r\n ordinal_features = ['BsmtExposure']\r\n df[ordinal_features] = df[ordinal_features].replace('Gd', 4).replace('Av', 3).replace('Mn', 2).replace('No', 1)\\\r\n .replace('NA', 0)\r\n # feature extraction: assigning number to ordinal features\r\n ordinal_features = ['BsmtFinType1', 'BsmtFinType1']\r\n df[ordinal_features] = df[ordinal_features].replace('GLQ', 6).replace('ALQ', 5).replace('BLQ', 4).replace('Rec', 3)\\\r\n .replace('LwQ', 2).replace('Unf', 1).replace('NA', 0)\r\n # feature extraction: assigning number to ordinal features\r\n ordinal_features = ['GarageFinish']\r\n df[ordinal_features] = df[ordinal_features].replace('Fin', 3).replace('RFn', 2).replace('Unf', 1).replace('NA', 0)\r\n # feature extraction: assigning number to ordinal features\r\n ordinal_features = ['Fence']\r\n df[ordinal_features] = df[ordinal_features].replace('GdPrv', 1).replace('MnPrv', 0).replace('GdWo', 0)\\\r\n .replace('MnWw', 0).replace('NA', 0) # according to bar plot the main diff for sale price is between good privacy and the rest\r\n # creating time features\r\n df['YearBuiltRemod'] = df['YearRemodAdd'] - df['YearBuilt']\r\n df['YearBuiltSold'] = df['YrSold'] - df['YearBuilt']\r\n df['YearRemodSold'] = df['YrSold'] - df['YearRemodAdd']\r\n df['YearGarageSold'] = df['YrSold'] - df['GarageYrBlt']\r\n df['HasPool'] = df['PoolArea'] > 0\r\n df = df.drop(['PoolQC', 'PoolArea'], axis='columns')\r\n # df['PoolQcArea'] = df['PoolQC'] * df['PoolArea']\r\n # df = create_quality_area(quality='MasVnrType', area='MasVnrArea', df=df)\r\n # df = create_quality_area(quality='BsmtFinType1', area='BsmtFinSF1', df=df)\r\n # df = create_quality_area(quality='GarageQual', area='GarageArea', df=df)\r\n # df = create_quality_area(quality='PoolQC', area='PoolArea', df=df)\r\n # df.drop(['BsmtFinType2', 'BsmtFinSF2', 'ExterCond', '3SsnPorch',\r\n # 'ScreenPorch', 'YrSold', 'MoSold', 'OverallCond', 'PoolQC', 'PoolArea'], axis='columns', inplace=True)\r\n return df", "title": "" }, { "docid": "b3705d6dfc776a9798e3f624af4ef1f4", "score": "0.5073999", "text": "def get_data_operator(bucket, fecha_actual, dias, dataframe):\n\n # Data final\n data_final = pd.DataFrame()\n\n # Lista de fechas en el rango\n rutas = []\n\n # Calculamos los dias anteriores\n for i in range(dias):\n # print(i)\n d = (fecha_actual-timedelta(i)).day\n m = (fecha_actual-timedelta(i)).month\n y = (fecha_actual-timedelta(i)).year\n ruta = \"indicadores/\" + str(y) + \"/\" + str(m) + \"/\" + dataframe + f\"_{d}-{m}-{y}.pkl\"\n # print(prefijo)\n rutas.append(ruta)\n\n # Para cada dia\n for ruta in tqdm(rutas):\n\n try:\n s3 = boto3.client(\"s3\")\n obj = s3.get_object(Bucket=bucket,\n Key=ruta)\n body = obj[\"Body\"].read()\n data = pickle.loads(body)\n\n data_final = pd.concat([data_final, data], axis=0)\n \n except:\n \n pass\n\n return data_final", "title": "" }, { "docid": "f9b8270a86cb88552f70157221ec51f7", "score": "0.5071449", "text": "def build_dataframe(self):\n import pandas as pd\n headers = self.get_headers()\n assert 0 not in self.element\n if self.nonlinear_factor not in (None, np.nan):\n #Time 0.0 10.0\n #ElementID Item\n #10 fapplied 0.0 0.000000\n # free_conv 0.0 499.376068\n # force_conv 0.0 0.000000\n # frad 0.0 0.000000\n # ftotal 0.0 499.376068\n #20 fapplied 0.0 0.000000\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n # >=25.0\n #Static fapplied free_conv force_conv frad ftotal\n #ElementID\n #1 0.166667 -0.166667 0.0 0.0 0.0\n #2 0.166667 -0.166667 0.0 0.0 0.0\n #3 0.166667 -0.166667 0.0 0.0 0.0\n #4 0.166667 -0.166667 0.0 0.0 0.0\n #5 0.166667 -0.166667 0.0 0.0 0.0\n #6 0.166667 -0.166667 0.0 0.0 0.0\n #\n # <=24.2\n #Static 0\n #ElementID Item\n #1 fapplied 0.166667\n # free_conv -0.166667\n # force_conv 0.000000\n # frad 0.000000\n # ftotal 0.000000\n data_frame = pd.DataFrame(self.data[0], columns=headers, index=self.element)\n data_frame.index.name = 'ElementID'\n data_frame.columns.names = ['Static']\n #data_frame = pd.Panel(self.data, major_axis=self.element, minor_axis=headers).to_frame()\n #data_frame.columns.names = ['Static']\n #data_frame.index.names = ['ElementID', 'Item']\n self.data_frame = data_frame", "title": "" }, { "docid": "bb65022dff9b719cd0a7de3191de43af", "score": "0.5068317", "text": "def getdfmap(self,var,**kwargs):\n\n dataframe= self.dataframe\n if not isinstance(var, list):\n var = [var]\n\n\n data_file = pd.DataFrame()\n for index,row in dataframe.iterrows(): # COULD USE AN APPLY FUNCTION\n ARPS=None\n BASE=None\n \n ARPS = arps()\n\n vars_file = {}\n for v in var:\n try:\n BASE = BaseVars(row['InPath'], row['model'])\n ARPS.load(BASE)\n SPEV = SpecVar()\n ARPS.load(BASE)\n ARPS.load(SPEV)\n\n d = ARPS.get(v, **kwargs)\n d = d[:] # important\n\n d = d.flatten()\n vars_file[v] = d\n except IOError:\n vars_file[v] = np.array([np.nan])\n\n temp = pd.DataFrame(vars_file).T # MODIFICATION TO TAKE MULTIPLE POINT BUT JUST ONE VARIABLE\n data_file = pd.concat([data_file, temp],axis=0)\n\n data_file.index = dataframe.index\n return data_file", "title": "" }, { "docid": "1ab2980e9a5d2f55176917ce1f111a6e", "score": "0.50661486", "text": "def to_frame(self) -> DataFrame:\n if self.is_split:\n data = DataFrame(self).T\n data.index.name = 'date_time'\n data.columns = MultiIndex.from_product([[self._name], data.columns])\n return data.replace(nan, 0)\n else:\n return self.to_series().to_frame().replace(nan, 0)", "title": "" }, { "docid": "c8bcee81e66dd9d52f0723144309c24d", "score": "0.5063662", "text": "def get_results(self) -> pd.DataFrame:", "title": "" }, { "docid": "1379bdc1feede65ea58eb248a6474517", "score": "0.5061183", "text": "def pred_from_suprise_to_df(model_pred):\n\n \n list=[]\n for pred in model_pred:\n list.append(pred.est)\n return pd.DataFrame(list)", "title": "" }, { "docid": "ff7e8f6f4a44e80751ef0744949f045e", "score": "0.50576466", "text": "def build_dataframe(self):\n #Mode 1 2\n #EigenvalueReal -0.0 -0.0\n #EigenvalueImag -0.0 -0.0\n #Damping 0.0 0.0\n #ElementID Item\n #22 max_shear 5.855954e-09+0.000000e+00j 0.000000+0.000000j\n # avg_shear 5.855954e-09+0.000000e+00j 0.000000+0.000000j\n #import pandas as pd\n column_names, column_values = self._build_dataframe_transient_header()\n self.data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n self.headers, self.element, self.data)", "title": "" }, { "docid": "aff785c9b2a858eb3788514010ffe1e8", "score": "0.50528127", "text": "def getAllResults(mode):\n \n df = getMirex(mode)\n \n # put Iowa values into data frames in two new columns\n new_values = {'low':'KM1', 'high':'KM2'}\n \n for dmp, col in new_values.items():\n \n # put empty column at end of dataframe\n df.insert(len(df.columns), col, 0.0)\n \n # get Iowa results for current damping\n results = getIowaResults('f', dmp)\n \n # enter Iowa results into dataframe \n for k,v in results.items():\n df.at[k, col] = v\n \n return df", "title": "" }, { "docid": "13c2210ad81ac4421937e32d229513d9", "score": "0.50497824", "text": "def get_ts(self, lev):\n\n df = pd.DataFrame({'t': [], self.target_var: []})\n\n # To print an empty line before masked value error messages\n mask_i = 0\n\n for file in os.listdir(self.path):\n\n data = Dataset(os.path.join(self.path, file), 'r')\n i, j = self.get_ij(data)\n\n s = file.split('_')[2]+'_'+file.split('_')[3].split('.')[0]+':'\\\n + file.split('_')[4]+':'+file.split('_')[5].split('.')[0]\n t = datetime.strptime(s, '%Y-%m-%d_%H:%M:%S')\n\n height_ind = np.where(data['level'][:].data == lev)[0][0]\n\n u = data.variables[self.var[0]][height_ind][i][j]\n v = data.variables[self.var[1]][height_ind][i][j]\n ws = np.sqrt(u**2 + v**2)\n\n ws, mask_i = check_input_data.convert_mask_to_nan(ws, t, mask_i)\n ws = check_input_data.convert_flag_to_nan(ws, self.flag, t)\n\n data.close()\n\n df = df.append([{'t': t, self.target_var: ws}])\n\n df = df.set_index('t').sort_index()\n\n # Same process as in the crosscheck_ts class\n time_diff = df.index.to_series().diff()\n\n if len(time_diff[1:].unique()) == 1:\n\n if self.freq > time_diff[1].components.minutes:\n\n df = df.resample(\n str(self.freq)+'T', label='right',\n closed='right')\n\n if self.select_method == 'average':\n df = df.mean()\n if self.select_method == 'instance':\n df = df.asfreq()\n\n df = check_input_data.verify_data_file_count(df, self.target_var,\n self.path, self.freq\n )\n\n return df", "title": "" }, { "docid": "9fd5a54d7da68b8652a8e3f715c1aa47", "score": "0.5046183", "text": "def get_df(self):\r\n \r\n return self.df.copy()", "title": "" }, { "docid": "ebd29aa327dc18e627d3e802a138ceba", "score": "0.5044848", "text": "def _make_df(self):\n tasks = self.load()\n cols = \"Subject Task Trial State Start Stop Velum Pharynx UES Eso\".split()\n idxs = \"Subject State Task Trial\".split()\n df = pd.DataFrame(columns=cols)\n\n df = df.append(tasks, ignore_index=True)\n df = df.set_index(idxs).sort_index()\n\n # Create the folder that will contain the data\n save_path = self.sovt.path_out_root / \"data\"\n save_path.mkdir(parents=True, exist_ok=True)\n df.to_pickle(save_path / \"segs.pkl\")\n self.sovt.segs = df", "title": "" }, { "docid": "6d2db5fb927cf39c5ab7af30a3a3fb20", "score": "0.5041477", "text": "def process(cls, df):\n return df", "title": "" }, { "docid": "f0fca03cc610e5f9874ebdde7ec69e83", "score": "0.5037744", "text": "def get_data_to_parameter(data: pd.DataFrame, key: str, exact: bool = False):\n dic = {}\n for index, bacteria in data.iterrows():\n df_bac = pd.DataFrame(bacteria).loc[key, :]\n for vectors in df_bac:\n if (key == 'velocity' or key == 'position' or key == 'acceleration') & (exact is False):\n # calculate norm for velocity and position\n vectors = get_euclid_norm(vectors)\n dic.update({str(index) + '_' + key: vectors})\n df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in dic.items()]))\n\n def isnan(vector):\n if np.isnan(np.min(vector)) and exact:\n return False\n elif exact:\n return True\n elif not exact:\n return pd.isna(vector)\n\n df = df.transform(lambda x: sorted(x, key=isnan, reverse=True))\n return df", "title": "" }, { "docid": "2d656fd93724738b7aa089a380a7b5cf", "score": "0.5037513", "text": "def build_dataframe(self):\n import pandas as pd\n headers = self.get_headers()\n\n #nelements = self.element.shape[0]# // 2\n if self.nonlinear_factor not in (None, np.nan):\n #Time 0.0 10.0\n #ElementID Item\n #1 grad1 0.0 -1.734723e-18\n # grad2 0.0 -1.301043e-18\n # grad3 0.0 1.951564e-18\n # flux1 -0.0 3.538836e-16\n # flux2 -0.0 2.654127e-16\n # flux3 -0.0 -3.981190e-16\n column_names, column_values = self._build_dataframe_transient_header()\n data_frame = self._build_pandas_transient_elements(\n column_values, column_names,\n headers, self.element, self.data)\n else:\n df1 = pd.DataFrame(self.element)\n df1.columns = ['ElementID']\n df2 = pd.DataFrame(self.data[0])\n df2.columns = headers\n data_frame = df1.join(df2)\n #print(self.data_frame)\n self.data_frame = data_frame", "title": "" }, { "docid": "5f632211c62e442019e1cf40100d661b", "score": "0.5037273", "text": "def df(self, t, x, (p, stim)):\n\n\t\tMm = x[...,0]\n\t\tFR = x[...,1]\n\t\tK_off_a, K_on_a, Nn, \\\n\t\t\talpha_m, m_0, \\\n\t\t\ta_0, tau_m, \\\n\t\t\tk_FR, tau_FR = p\n\n\t\tdf_vec = sp.empty_like(x)\t\n\n\t\tf_c = sp.log((1. + stim/K_off_a)/(1. + stim/K_on_a))\n\t\tf_m = alpha_m*(m_0 - Mm)\n\t\tEe = Nn*(f_m + f_c)\n\t\tAa = (1. + sp.exp(Ee))**-1.0\n\n\t\tdf_vec[..., 0] = (a_0 - Aa)/tau_m\n\t\tdf_vec[..., 1] = (k_FR*Aa - FR)/tau_FR\n\n\t\treturn df_vec", "title": "" }, { "docid": "41a4406024bd13f8624bda8b0b29061a", "score": "0.5037232", "text": "def get_training_load_data(self, whole: str = None) -> Tuple[PandasDataFrame, PandasSeries]:\n query = f\"\"\"SELECT total_load, holiday,date,\n CASE EXTRACT(MONTH FROM date)\n WHEN '1' THEN 'january' WHEN 2 THEN 'february' WHEN '3' THEN 'march' WHEN '4' THEN 'april'\n WHEN '5' THEN 'may' WHEN '6' THEN 'june' WHEN '7' THEN 'july' WHEN '8' THEN 'august'\n WHEN '9' THEN 'september' WHEN '10' THEN 'october' WHEN '11' THEN 'november' WHEN '12' THEN 'december'\n END as str_month,\n CASE EXTRACT(HOUR FROM date)\n WHEN '1' THEN '1'\tWHEN '2' THEN '2'\tWHEN '3' THEN '3'\tWHEN '4' THEN '4'\n WHEN '5' THEN '5'\tWHEN '6' THEN '6'\tWHEN '7' THEN '7'\tWHEN '8' THEN '8'\n WHEN '9' THEN '9'\tWHEN '10' THEN '10'\tWHEN '11' THEN '11'\tWHEN '12' THEN '12'\n WHEN '13' THEN '13'\tWHEN '14' THEN '14'\tWHEN '15' THEN '15'\tWHEN '16' THEN '16'\n WHEN '17' THEN '17'\tWHEN '18' THEN '18'\tWHEN '19' THEN '19'\tWHEN '20' THEN '20'\n WHEN '21' THEN '21'\tWHEN '22' THEN '22'\tWHEN '23' THEN '23'\tWHEN '0' THEN '0'\n END as str_hour\n from energy.energy_load\"\"\"\n df = self.query_from_sql_to_pandas(query)\n if whole: return df\n else:\n predictors = df[[\"holiday\", \"str_hour\", \"str_month\"]]\n target = df.loc[:, ['total_load']]\n return predictors, target", "title": "" }, { "docid": "f6f8c159eac7caee714ab05dccee9fa3", "score": "0.5033387", "text": "def get_df_serie(df):\n df['Serie'] = df.apply(lambda x: get_serie_name(x['bookSeries'])[0],axis = 1 )\n df['Number'] = df.apply(lambda x: get_serie_name(x['bookSeries'])[1],axis = 1 )\n \n #removing all the wrong values of numbers and the book that doesn't have a serie\n df_serie = df.loc[df['Number'].str.contains('^((?![-,.]).)*$')][['index','bookTitle','Serie','Number','number_of_pages','first_publication_date']]\n \n #the first 10 series in order of appearance\n first_10= list(df_serie['Serie'])[:11]\n \n #filtering the df\n df_serie = df_serie.loc[df['Serie'].isin(first_10)]\n \n df_serie['Year'] = df.apply(lambda x : get_year(x['first_publication_date']),axis = 1)\n\n ord_df_series = pd.DataFrame()\n for serie_name, serie in df_serie.groupby('Serie'):\n serie = serie.sort_values('Year')\n starting_year = min(serie['Year'])\n serie['Year'] = serie.apply(lambda x: start_from(x['Year'],starting_year),axis = 1)\n serie['Sum of Pages']= serie['number_of_pages'].cumsum()\n ord_df_series = pd.concat([ord_df_series,serie])\n return ord_df_series", "title": "" }, { "docid": "299f84d54529b2aea6335f2d75bf399b", "score": "0.50292397", "text": "def provide_dataframe(self):\n query = self.wz.session.query(self.wz.be_sampling_day)\n df = pd.read_sql(query.statement, query.session.bind)\n return df", "title": "" }, { "docid": "5fdbaf3c7dd20b9fa8faa5c582c9ea0b", "score": "0.502253", "text": "def df(self):\n return self._f", "title": "" }, { "docid": "6ca6ebc767facdb300c65b87cd290790", "score": "0.50208277", "text": "def __init__(self,df):\n \n self.df = df", "title": "" } ]
7504c3669d05550d91d0e2c455e1eeb6
Helper function to get the value of the move with the minimum weight
[ { "docid": "5624456f1a3c79d0a5535eae1b9ccde5", "score": "0.6600952", "text": "def get_min_value(board, alpha, beta):\n min_value = math.inf\n min_move = None\n\n action_weights = []\n if terminal(board):\n return utility(board)\n\n for action in actions(board):\n min_value = min(min_value, get_max_value(result(board, action), alpha, beta))\n beta = min_value\n\n if beta <= alpha:\n break\n\n return min_value", "title": "" } ]
[ { "docid": "0145eba868631cb2c3642a449d530696", "score": "0.6817948", "text": "def min_value(game_state):\n v = sys.maxsize\n for move in game_state.possible_moves():\n v1 = value(game_state.successor(move, 'user'), 'AI') # little gud\n tup = [v, v1]\n # print(\"TUP\", tup)\n v = min(tup)\n # print('MIN: ', v)\n return v", "title": "" }, { "docid": "2fbb5895895552b48e916e8f2ce89b50", "score": "0.6773064", "text": "def min_value(board):\n if terminal(board):\n u = utility(board)\n return u\n \n v = float('inf')\n for action in actions(board):\n v = min(v, max_value(result(board, action)))\n \n return v", "title": "" }, { "docid": "56a63815604e210e6a7f46fb79efe1b9", "score": "0.6629827", "text": "def min_value(board):\n if terminal(board):\n return utility(board)\n v = math.inf\n for action in actions(board):\n v = min(v, max_value(result(board, action)))\n return v", "title": "" }, { "docid": "9fd30e7563ecaa60af21064988f762a9", "score": "0.6624486", "text": "def min_value(board):\n\n if terminal(board):\n return utility(board)\n\n v = math.inf\n for action in actions(board):\n v = min(v, max_value(result(board, action)))\n return v", "title": "" }, { "docid": "56fa2e301ce2ddfbeff22aef0d51b155", "score": "0.66005737", "text": "def min(self):", "title": "" }, { "docid": "d5d2f15bb750de3a49c365eb37336b5c", "score": "0.6587178", "text": "def min_value(game, depth):\n if terminal_test(game, depth):\n #print(\"min depth {0}\".format(depth))\n return self.score(game, self)\n v = float(\"inf\")\n for move in game.get_legal_moves():\n v = min(v, max_value(game.forecast_move(move), depth-1))\n return v", "title": "" }, { "docid": "7e37d9b0f897c8d17a6303dc0b5f8b63", "score": "0.6580979", "text": "def min_value(gameState, depth):\r\n # TODO: finish this function!\r\n #pass\r\n# if terminalTest(state) then return utility(state)\r\n# v = inf\r\n# for each a in actions(state) do:\r\n# v = min(v,max_value(results(state,a)))\r\n# return v\r\n if gameState.terminal_test():\r\n return gameState.utility(0)\r\n \r\n if depth <= 0: #Check if the depth limit is reached\r\n return my_moves(gameState)\r\n \r\n v = float(\"inf\")\r\n for a in gameState.actions():\r\n v = min(v,max_value(gameState.result(a), depth - 1))\r\n return v", "title": "" }, { "docid": "9bbb8916f7f28b32043357b6e7d342c0", "score": "0.6577414", "text": "def weighted_min(a_star, node):\n available_agents = a_star.agents - node.attributed_agents\n if not available_agents:\n return 0\n node_job = node.job\n val_min = float('inf')\n for job in range(node_job+1, a_star.cost_matrix.shape[0]):\n for agent in available_agents:\n val_min = min(val_min, a_star.cost_matrix[job][agent])\n return val_min*len(available_agents)", "title": "" }, { "docid": "9b163213a8f0218392803fd1f614fd4c", "score": "0.654991", "text": "def min_value(gameState):\n if terminal_test(gameState):\n return 1\n\n min_val = float(\"inf\")\n for m in gameState.get_legal_moves():\n g = gameState.forecast_move(m)\n min_val = min(min_val, max_value(g))\n return min_val", "title": "" }, { "docid": "7e65d9a5cfb27e294a2c4bfa474064ed", "score": "0.65191174", "text": "def min(self) -> float:\n return self.__min", "title": "" }, { "docid": "e7f511bfcd9cf2643ba75fac3ef2e13d", "score": "0.6511556", "text": "def min(self) -> float:\n return self._min", "title": "" }, { "docid": "48961d0f85ab798f382d80fa258fa12d", "score": "0.6507911", "text": "def min_value(self, state, l_action):\r\n ended, winner = self.terminal_test(state)\r\n\r\n if ended:\r\n # Calculate the utility value\r\n return self.utility(state), l_action\r\n\r\n # Variables to check against\r\n v = 999\r\n g_action = (5, 5)\r\n\r\n for action in self.actions(state):\r\n # resultant state after taking action on the state\r\n rd = self.result(state, action)\r\n vv = self.max_value(rd, action)\r\n\r\n # Minimising the value\r\n if v > vv[0]:\r\n v = vv[0]\r\n g_action = action\r\n\r\n # Updating the record of visited state and the corresponding minimax value\r\n self.visited_node.append((copy.deepcopy(rd), vv[0]))\r\n\r\n # (best action value, best action)\r\n return v, g_action", "title": "" }, { "docid": "a52eea50da1bd79e33d169d50a49056b", "score": "0.65022075", "text": "def get_computed_weight(self, name=None):\n weight = sum(\n map(\n lambda move: move.get_weight(self.weight_uom, silent=True),\n self.moves\n )\n )\n return weight", "title": "" }, { "docid": "437e361e0ceec82f2eb5422c83a0643e", "score": "0.6472138", "text": "def minimax(board):\n best_move = None\n\n current_turn = player(board)\n alpha = -math.inf\n beta = math.inf\n\n if terminal(board):\n return None\n\n # Maximizing Player\n if current_turn == X:\n max_weight = -math.inf\n\n for action in actions(board):\n weight = get_min_value((result(board, action)), alpha, beta)\n\n if weight > max_weight:\n max_weight = weight\n best_move = action\n\n\n # Minimizing Player\n elif current_turn == O:\n min_weight = math.inf\n for action in actions(board):\n weight = get_max_value((result(board, action)), alpha, beta)\n\n if weight < min_weight:\n min_weight = weight\n best_move = action\n\n else:\n return # Not a possible condition\n\n return best_move", "title": "" }, { "docid": "742ed85fd3a3573ae0c6631b3bedfda6", "score": "0.6458531", "text": "def get_minimum_fast(self):\n pass", "title": "" }, { "docid": "570f24c10e3bff004f237dd11e095847", "score": "0.64521736", "text": "def MIN(board):\n v = 69\n move = None\n for action in actions(board):\n if v > max_value(result(board, action)):\n move = action\n v = max_value(result(board, action))\n return move", "title": "" }, { "docid": "79032d3bd25eca48c1cff62386aeafeb", "score": "0.6440965", "text": "def knapsack_helper(value, weight, m, i, w):\n if m[i][w] >= 0:\n return m[i][w]\n \n if i == 0:\n q = 0\n elif weight[i] <= w:\n q = max(knapsack_helper(value, weight,\n m, i - 1 , w - weight[i])\n + value[i],\n knapsack_helper(value, weight,\n m, i - 1 , w))\n else:\n q = knapsack_helper(value, weight,\n m, i - 1 , w)\n m[i][w] = q\n return q", "title": "" }, { "docid": "d994862e10d7724d950e89fe2c961011", "score": "0.64180166", "text": "def min_value(self, gameState, index, depth):\n\n value = 99999\n bestAction = \"\"\n for action in gameState.getLegalActions(index):\n newValue = self.value(gameState.generateSuccessor(index,action), (index+1)%gameState.getNumAgents(), depth)\n if newValue < value:\n value = newValue\n bestAction = action\n # print \"index =\", index, \"depth :\", depth, \"value :\", value\n #input(\"okay ?\")\n if bestAction ==\"\":\n value = self.evaluationFunction(gameState)\n return value, bestAction", "title": "" }, { "docid": "779c113ab4cbb826e83872c8ec1a7fdb", "score": "0.641117", "text": "def min(self):\r\n return self.minimum.data", "title": "" }, { "docid": "24cf227e5350e4816f4d8012a0012560", "score": "0.6387037", "text": "def get_weight(self, name=None):\n return sum(\n map(\n lambda move: move.get_weight(self.weight_uom, silent=True),\n self.outgoing_moves\n )\n )", "title": "" }, { "docid": "72b6248f0b4d99fd224080b0849ed940", "score": "0.63774586", "text": "def minimax(board):\n if terminal(board):\n return None\n\n if player(board) == X:\n return max_value(board, -1, 1)[1]\n return min_value(board, -1, 1)[1]", "title": "" }, { "docid": "23c06d893982f10bacd78209f9cbb79e", "score": "0.637096", "text": "def my_min(self, s,depth, orig, alpha, beta):\n if self.cutoff_test(s, depth):\n return self.utility(s)\n v = sys.maxint - 1\n self.depthcount = self.depthcount + 1\n for move in s._legal_moves():\n self.statecounter = self.statecounter+1\n s.do_move(move) #do the move\n v = min(v, self.my_max(s,depth, orig, alpha, beta)) #recurse\n s.undo_move(move) #undo the move when we return from the above\n if v <= alpha:# \n self.depthcount = self.depthcount - 1\n return v\n else:\n beta = min(beta, v)\n self.depthcount = self.depthcount - 1\n return v", "title": "" }, { "docid": "e143bc06ec82e24b96a0a99f9709057e", "score": "0.6367579", "text": "def weight(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "5a720f55add34526dd4256aff4d89688", "score": "0.6358483", "text": "def minimax(board):\n \n if player(board) == X:\n return Max_value(board, float('-inf'), float('inf'))[1]\n else:\n return Min_value(board, float('-inf'), float('inf'))[1]", "title": "" }, { "docid": "414af0401c991719ec2efe3b99f852b8", "score": "0.6358289", "text": "def get_min(self):\n \n return self.minimum", "title": "" }, { "docid": "3dc750864ca02d950a16257d689791a8", "score": "0.6327754", "text": "def minimum(self):\n return self._minimum", "title": "" }, { "docid": "51d669c7983a2ea7fec337bbafd80f3a", "score": "0.6317152", "text": "def min_value(self, game_object, board):\n v = math.inf\n if self.terminal(game_object):\n return self.utility(game_object)\n for action in game_object.get_possible_move():\n game_copy = self.recopy_game(game_object)\n game_copy.make_move(game_copy.get_current_player(), action)\n board = game_copy.get_board()\n v = min(v, self.max_value(game_copy, board))\n return v", "title": "" }, { "docid": "514e48d6b76689ef999c036e393f2fb4", "score": "0.6314907", "text": "def min_value(board):\n if terminal(board):\n return utility(board)\n v = 69\n for action in actions(board):\n if v > max_value(result(board, action)):\n v = max_value(result(board, action))\n return v", "title": "" }, { "docid": "abf11734798b008cf342f5ff089ee6ca", "score": "0.62978196", "text": "def get_min(self):\n return self.min", "title": "" }, { "docid": "81fedf35048766c541f58c184ab01fbe", "score": "0.62868005", "text": "def min_temp(self) -> float:\n return None", "title": "" }, { "docid": "dc2c2cdd2fc9dc97fac5576e5ffef1b1", "score": "0.62808496", "text": "def abdlmin_value( game_state, alpha, beta, depth):\n b = -beta\n v = sys.maxsize\n for move in game_state.possible_moves():\n v = min([v, abdl_value(game_state.successor(move, 'user'), 'AI', alpha, b, depth -1 )])\n if v <= alpha:\n return v\n b = min([b, v])\n return v", "title": "" }, { "docid": "cac463f3cbb9362330f9754e522f6e49", "score": "0.62660927", "text": "def get_min(self):\n return self.serie.min()", "title": "" }, { "docid": "2bf188c0b338a360e991a6382726a067", "score": "0.6259423", "text": "def get_min(self,*args):\n return BSTree.get_min(self,*args)", "title": "" }, { "docid": "d4b9c822418d8118af83b0d215fb3e6b", "score": "0.62538326", "text": "def weight(self) -> Optional[float]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "bca84d6dcdb7be52150bab9bcf5c5c3b", "score": "0.62499183", "text": "def min_value(self, game, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n if self.is_game_over(game):\n return float(\"inf\")\n\n lowest_score = float(\"inf\")\n for move in game.get_legal_moves():\n if depth <= 1:\n lowest_score = min(lowest_score, self.score(game.forecast_move(move), self))\n else:\n lowest_score = min(\n lowest_score,\n self.max_value(game.forecast_move(move), depth - 1))\n\n return lowest_score", "title": "" }, { "docid": "83eb10f0f2548854639255de7bb40931", "score": "0.6245546", "text": "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "83eb10f0f2548854639255de7bb40931", "score": "0.6245546", "text": "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "83eb10f0f2548854639255de7bb40931", "score": "0.6245546", "text": "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "83eb10f0f2548854639255de7bb40931", "score": "0.6245546", "text": "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "83eb10f0f2548854639255de7bb40931", "score": "0.6245546", "text": "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "97a5b9af954c373e1376774307762fa8", "score": "0.6230148", "text": "def min_value(self):\n return self._first_value()", "title": "" }, { "docid": "d2131f90686ec18cba167b96e69dac76", "score": "0.6224616", "text": "def weight(self):\n\t\treturn self.x_coords[1] - self.x_coords[0]", "title": "" }, { "docid": "27c7bd89b58b5788c4c58e539f71f525", "score": "0.62235236", "text": "def get_minimum(self):\n self.atoms.set_positions(self.rmin)\n print 'get_minimum',self.Emin\n return self.Emin", "title": "" }, { "docid": "eeb9c18d6e09dfd2d3a414da50fd04ea", "score": "0.62224096", "text": "def min_value(self, node, depth):\n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n\n # check to see if we are done\n end_val = node.utility(self)\n if end_val != 0:\n return end_val\n if depth <= 0:\n return self.score(node, self)\n # get all possible next moves\n next_moves = node.get_legal_moves()\n min_val = float(\"inf\")\n for next_move in next_moves:\n next_node = node.forecast_move(next_move)\n val = self.max_value(next_node, depth - 1)\n if val < min_val:\n min_val = val\n return min_val", "title": "" }, { "docid": "05b024bde4cca6d47a37e9c3d3a9d580", "score": "0.62105626", "text": "def get_required_water_intake(weight: int) -> float:\n ounces = float(fractions.Fraction(numerator=2, denominator=3) * weight)\n return round(number=ounces, ndigits=2)", "title": "" }, { "docid": "30a08c0c1d78881bbd6cca28f3c6b8de", "score": "0.6200468", "text": "def extract_min(self):\n pass", "title": "" }, { "docid": "e2941298f7092de7b13f6f978fe267d4", "score": "0.6183652", "text": "def get_min_value(self, state_vector, legal_moves):\n if not legal_moves: # terminal state case\n return 0\n else:\n worst_action_score = float('inf')\n for action in legal_moves:\n cur_q_val = self.get_q_value(state_vector, action)\n if cur_q_val < worst_action_score:\n worst_action_score = cur_q_val\n return worst_action_score", "title": "" }, { "docid": "57639d66507500bba0a891deea904c07", "score": "0.6182366", "text": "def min_value(board, curr_max, curr_min):\n if terminal(board):\n return utility(board), None\n\n value, action = 1, None\n\n for a in actions(board):\n v = max_value(result(board, a), curr_max, curr_min)[0]\n curr_min = min(curr_min, v)\n\n if v <= curr_max:\n return v, a\n\n if v < value:\n value, action = v, a\n\n return value, action", "title": "" }, { "docid": "d60baff847f4e9e73c32333769b03747", "score": "0.61790985", "text": "def minimum(self) -> Optional[float]:\n\n return self._minimum", "title": "" }, { "docid": "f8e0c3d28f81ba75eb8bc246e9911f8d", "score": "0.61745226", "text": "def get_minimum(self):\n raise NotImplementedError", "title": "" }, { "docid": "b4f59a52ae20f279ad2efb55209b846d", "score": "0.61685604", "text": "def min_value(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"min_value\")", "title": "" }, { "docid": "b4f59a52ae20f279ad2efb55209b846d", "score": "0.61685604", "text": "def min_value(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"min_value\")", "title": "" }, { "docid": "b4f59a52ae20f279ad2efb55209b846d", "score": "0.61685604", "text": "def min_value(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"min_value\")", "title": "" }, { "docid": "a05497c000a57e5dbdae023485744b58", "score": "0.6168294", "text": "def min_value(self, game, depth): \n if self.time_left() < self.TIMER_THRESHOLD:\n raise SearchTimeout()\n \n player = game.inactive_player\n if depth == 0 or game.is_winner(player):\n return self.score(game, player), (-1, -1)\n \n moves = game.get_legal_moves()\n best_score = float(\"inf\")\n best_move = (-1, -1)\n for move in moves:\n after_move = game.forecast_move(move)\n score, max_move = self.max_value(after_move, depth - 1)\n if score < best_score:\n best_score = score\n best_move = move\n \n return best_score, best_move", "title": "" }, { "docid": "29c05d9781b10421b294860d2f89e533", "score": "0.61642313", "text": "def minimumDistance(self):\n return self.__minimumDistance", "title": "" }, { "docid": "6e2dbdc3e0895aa05c77792768220e51", "score": "0.6145074", "text": "def minimax(self, state, depth):\n def min_value(state, depth):\n if state.terminal_test(): return state.utility(self.player_id)\n if depth <= 0: return self.score(state)\n value = float(\"inf\")\n for action in state.actions():\n value = min(value, max_value(state.result(action), depth - 1))\n return value\n\n def max_value(state, depth):\n if state.terminal_test(): return state.utility(self.player_id)\n if depth <= 0: return self.score(state)\n value = float(\"-inf\")\n for action in state.actions():\n value = max(value, min_value(state.result(action), depth - 1))\n return value\n\n return max(state.actions(), key=lambda x: min_value(state.result(x), depth - 1))", "title": "" }, { "docid": "2d8b0cd5fbb653661e8df24a23b1c65b", "score": "0.6138757", "text": "def weight(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "2d8b0cd5fbb653661e8df24a23b1c65b", "score": "0.6138757", "text": "def weight(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "2d8b0cd5fbb653661e8df24a23b1c65b", "score": "0.6138757", "text": "def weight(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "2d8b0cd5fbb653661e8df24a23b1c65b", "score": "0.6138757", "text": "def weight(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "b103aa1483893b952e4f3950a576a182", "score": "0.6137626", "text": "def min_value(self, grid, depth=None, previous=None):\n pass", "title": "" }, { "docid": "15b26b793412016d45f96cfc2ef62084", "score": "0.61347795", "text": "def get_optimal_value(capacity, weights, values):\t\n\tvalue = 0.\n \n\tweight_value_pairs = sorted(\n [Item(v, w) for v, w in zip(values, weights)],\n key=lambda i: i.value / i.weight,\n reverse=True\n )\n\tprint (weight_value_pairs)\n\n\tfor item in weight_value_pairs:\n\t\tif capacity == 0:\n\t\t\treturn value\n\t\tif item.weight > capacity:\n\t\t\ta = capacity\n\t\telse:\n\t\t\ta = item.weight\n\t\tvalue = value + a * item.value/item.weight\n\t\tcapacity = capacity - int(a)\n\n\treturn value", "title": "" }, { "docid": "b5726851b4b487b6dfd83a9f6797e459", "score": "0.6129601", "text": "def min(self):\n return self._minval", "title": "" }, { "docid": "9708c4fddf8b2e0d063f4da65be43f76", "score": "0.6122123", "text": "def get_weight(self):\n # Find the current and total weight\n screenshot = utils.take_screenshot(False)\n weight = self.get_ui_element('weight', screenshot)\n weight = screenshot[weight[1]:(weight[1] + 12), (weight[0] + 40):(weight[0] + 84)]\n\n # Resize and parse the image to a string\n weight = cv2.resize(weight, (0, 0), fx=3, fy=3)\n weight = cv2.cvtColor(weight, cv2.COLOR_BGR2GRAY)\n weight = cv2.bitwise_not(weight)\n weight = cv2.fastNlMeansDenoising(weight, None, 9, 13)\n _, weight = cv2.threshold(weight, 180, 255, cv2.THRESH_BINARY)\n weight = cv2.blur(weight, (4, 2))\n\n weight_text = ''\n try:\n weight_text = pytesseract.image_to_string(weight, config=utils.TESSERACT_CONF)\n except UnicodeDecodeError:\n utils.log(\"SEVERE\", \"Tesseract failed to parse player weight from screenshot\")\n utils.quit_game()\n\n # Split the equation and calculate the difference\n current_weight, max_weight = weight_text.split(\"/\")[0::1]\n return int(current_weight), int(max_weight)", "title": "" }, { "docid": "9f720562883e475cf3d5ac29871ed0a3", "score": "0.6120169", "text": "def find_min(self) -> int:\n pass", "title": "" }, { "docid": "70edcb78324baa9ee1942a823b788aa1", "score": "0.60983163", "text": "def GetMin(self):\r\n return self.minValue", "title": "" }, { "docid": "2857eb390d757eba5b5c3c9adfaca882", "score": "0.60924", "text": "def MINV(board):\n # If the game is over, the only possible value will be\n # the value at that state\n if terminal(board):\n return utility(board)\n\n # Initializing v as the maximum possible value.\n v = math.inf\n\n # Iterates through all the actions in board and\n # checks for the minimum value recursively.\n for action in actions(board):\n v = min(v, MAXV(result(board, action)))\n\n # In this game, the minimum value is 1.\n if v == -1:\n return v\n\n return v", "title": "" }, { "docid": "6ac88108d8c59e7dd70a8e7aa2efada1", "score": "0.6084652", "text": "def ab_min_value(state, alpha, beta, depth):\n global AB_Baseline_NodeCount\n AB_Baseline_NodeCount += 1\n printDebugMsg(\"ab_min_value with depth = {}\".format(depth))\n\n if state.terminal_test():\n return state.utility(self.player_id)\n if (depth <= 0):\n return self.score(state)\n\n v = float(\"inf\")\n for a in state.actions():\n v = min(v, ab_max_value(state.result(a), alpha, beta, depth-1))\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v", "title": "" }, { "docid": "78e999ae9c8d5d7d97144d1897090455", "score": "0.6081575", "text": "def mini(moves):\n if len(moves) == 0:\n return None\n minScore = 1\n minmove = None\n for move in moves:\n if move.score <= minScore:\n minScore = move.score\n minmove = move\n return (minmove, minScore)", "title": "" }, { "docid": "5c8cb3ca940023096180bcf97d2e8e1e", "score": "0.60581094", "text": "def min_value(self, game, depth):\n if self.terminal_test(game):\n return game.utility(self)\n\n if depth == 0:\n return self.score(game, self)\n\n best_score = float(\"inf\")\n legal_moves = game.get_legal_moves()\n\n for m in legal_moves:\n self.check_time()\n new_game = game.forecast_move(m)\n current_score = self.max_value(new_game, depth-1)\n if current_score < best_score:\n best_score = current_score\n\n return best_score", "title": "" }, { "docid": "06be2faeef525a013af9c166bb4dc021", "score": "0.6025592", "text": "def minimax(board):\n # next move must be AI\n AI = player(board)\n human = X if AI == O else O\n # we want to max AI gain\n _, next_move = recur_min(board,AI,human)\n return next_move", "title": "" }, { "docid": "1e1981588471bb0d70d62b9a4d421824", "score": "0.6024754", "text": "def min_value(gameState, alpha, beta, depth):\n if gameState.terminal_test(): \n return gameState.utility(self.player_id)\n if depth <= 0: \n return self.score(gameState)\n\n v = float(\"inf\")\n for a in gameState.actions():\n v = min(v, max_value(gameState.result(a), alpha, beta, depth - 1))\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v", "title": "" }, { "docid": "aab73457833b8b72e9753b0211e4a5fd", "score": "0.60216635", "text": "def minvalue(state, maxdepth, alpha = None, beta = None):\n board = state[0]\n turn = state[1]\n if is_terminal(state, maxdepth):\n return utility(state)\n else:\n v = float('inf')\n (moves, captures) = get_hints(board, turn)\n if captures:\n for a in captures:\n v = min(v, maxvalue(transition(state, a, \"jump\"), \\\n maxdepth, alpha, beta))\n if alpha is not None and beta is not None:\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v\n elif moves:\n for a in moves:\n v = min(v, maxvalue(transition(state, a, \"move\"), \\\n maxdepth, alpha, beta))\n if alpha is not None and beta is not None:\n if v <= alpha:\n return v\n beta = min(beta, v)\n return v", "title": "" }, { "docid": "24992a2109ab9b81b4908a78dbf7e078", "score": "0.6021119", "text": "def min_temp(self) -> float:\n return TEMP_MIN", "title": "" }, { "docid": "3d77b988c14b3c5eaeccef3aed3fb9d4", "score": "0.6013284", "text": "def minimax(board):\n p = player(board)\n move = bestmove(board)\n ans = (move[0], move[1])\n return ans", "title": "" }, { "docid": "4800d2733a6f2a003a5c91d6c5a669e4", "score": "0.6010686", "text": "def min_value(game, depth, alpha, beta):\n v = float(\"inf\")\n if terminal_test(game, depth):\n return self.score(game, self)\n for move in game.get_legal_moves():\n v = min(v, max_value(game.forecast_move(move), depth-1, alpha, beta))\n if v<= alpha:\n return v\n beta = min(beta, v)\n return v", "title": "" }, { "docid": "658c480b172cb86ad8393e18b3e481c3", "score": "0.60079336", "text": "def minVal( self ):\n return self.cells.min()", "title": "" }, { "docid": "f5b0a083f3789df7bde3f9bec2008979", "score": "0.59983855", "text": "def target_rel(self):\n value = self.target\n if value == 0:\n return 0\n if value > 0:\n return (self.pos_min/value)\n return -(abs(self.pos_min)/abs(value))", "title": "" }, { "docid": "21de6d1564e52a6c466a643d4c0b27ce", "score": "0.5997981", "text": "def min(self):\n return self.__min", "title": "" }, { "docid": "d13c6bd68186a45ca2d02b1c1836e97c", "score": "0.5983951", "text": "def min(self):\n return self.waveform.min()", "title": "" }, { "docid": "982219dd23fba672a72ef03411654335", "score": "0.5973835", "text": "def min_value(dictionary):\n return min_item(dictionary)[1]", "title": "" }, { "docid": "94c3fb46a7069bd21c01dd3da9a07724", "score": "0.597125", "text": "def best_potential_step(self, game_map, cur_unit):\n next_pos = game_map.valid_next_pos(cur_unit)\n potential_values = game_map.next_pos_potential(cur_unit, next_pos)\n best_state = min(potential_values)\n return best_state[1]", "title": "" }, { "docid": "625d65a7c29e334719531a00d92d59f3", "score": "0.5969887", "text": "def weight(self) -> Optional[int]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "9c2d54e37ed5005bd59a721b1295935d", "score": "0.5966738", "text": "def minimum (\n\n self,\n items = None,\n default = None\n ) :\n\n if self.isEmpty( items ) : return default\n\n mValue = None\n\n for item in items :\n\n item = self.float( item )\n\n if item is None : None\n\n elif mValue is None : mValue = item\n\n elif item < mValue : mValue = item\n\n if mValue is None : mValue = default\n\n return mValue", "title": "" }, { "docid": "7a4794545ef246f772aba597b641408e", "score": "0.59628856", "text": "def minimum(self):\n if self.children:\n return self.children[0].minimum()\n else:\n return self.keys[0]", "title": "" }, { "docid": "5e6f31693dacd06e9d9cbe72f8866f7b", "score": "0.5957557", "text": "def min_temp(self):\n return TEMP_MINIMUM", "title": "" }, { "docid": "5e6f31693dacd06e9d9cbe72f8866f7b", "score": "0.5957557", "text": "def min_temp(self):\n return TEMP_MINIMUM", "title": "" }, { "docid": "53b5aff60cff5ecda551f5ddfce263cc", "score": "0.59541196", "text": "def argmin(self, ns):\n\n if (ns != None):\n\n try:\n\n length = len(ns[0][0][1])\n if length > 0:\n\n return ns[0][1]\n\n except: # if out of range\n\n print(\"\")\n\n bestMinMove = ((0,0),0)\n\n for i in range(0, len(ns)):\n\n moveList = ns[i][1]\n piece = ns[i][0]\n\n for j in range(0, len(moveList)):\n\n try:\n\n if (bestMinMove[1] > moveList[j][1]):\n # found move with lower utility than previous best\n bestMinMove = moveList[j]\n\n except TypeError:\n\n break\n # sample output: ((0,2),[(0,3),10]) \n returnVal = (piece, [(bestMinMove[0], bestMinMove[1])])\n \n return returnVal\n\n else:\n\n return None", "title": "" }, { "docid": "f89a8276a1eff875b3fa7f69a7077674", "score": "0.5953295", "text": "def minimax(board):\n if terminal(board):\n return None\n else:\n move = optimalMove(board, -2, 2)\n return move[1]", "title": "" }, { "docid": "d293d152f589190302fead7440c7c082", "score": "0.5952776", "text": "def pick_from_weight(weight, pows=1.0):\n weight = weight**pows\n t = np.cumsum(weight)\n s = np.sum(weight)\n return int(np.searchsorted(t, 0.5 * s))", "title": "" }, { "docid": "e428fd8725459acc4c022d1ea0cc0772", "score": "0.59527266", "text": "def minimax(self, state, depth):\n\n def min_value(state, depth, alpha, beta):\n if state.terminal_test(): return state.utility(self.player_id)\n if depth <= 0: return self.score(state)\n value = float(\"inf\")\n for action in state.actions():\n value = min(value, max_value(state.result(action), depth - 1, alpha, beta))\n if value <= alpha: return value\n beta = min(beta, value)\n return value\n\n def max_value(state, depth, alpha, beta):\n if state.terminal_test(): return state.utility(self.player_id)\n if depth <= 0: return self.score(state)\n value = float(\"-inf\")\n for action in state.actions():\n value = max(value, min_value(state.result(action), depth - 1, alpha, beta))\n if value >= beta: return value\n alpha = max(alpha, value)\n return value\n \n alpha = float(\"-inf\") \n beta = float(\"inf\")\n best_score = float(\"-inf\")\n best_move = None\n for a in state.actions():\n value = min_value(state.result(a), depth-1, alpha, beta)\n alpha = max(alpha, value)\n if value > best_score:\n best_score = value\n best_move = a\n return best_move", "title": "" }, { "docid": "aa85efc311a3a678bf194d47cab626f2", "score": "0.5950757", "text": "def find_min(self):\r\n return bstree_rb.find_min(self.tree)", "title": "" }, { "docid": "6ba886258cee47581bb7a1f52c9854e4", "score": "0.5947797", "text": "def move(self, state):\n\n # *You do not need to modify this method.*\n best_value = -1.0\n\n actions = state.actions()\n if not actions:\n actions = [None]\n\n best_move = actions[0]\n for action in actions:\n result_state = state.result(action)\n value = self.evaluate(result_state, state.player_row)\n if value > best_value:\n best_value = value\n best_move = action\n\n # Return the move with the highest evaluation value\n return best_move", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.5939049", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "11cf9d778eabd1cecb8bf06329b9135b", "score": "0.5939049", "text": "def weight(self):\n return self._weight", "title": "" }, { "docid": "01af26aaaa33a5309103f2879e646e85", "score": "0.59380734", "text": "def min(self):\n return self._min", "title": "" }, { "docid": "01af26aaaa33a5309103f2879e646e85", "score": "0.59380734", "text": "def min(self):\n return self._min", "title": "" }, { "docid": "7bb0004a6bdd0d918c79ddd90b458e81", "score": "0.59349364", "text": "def weight(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"weight\")", "title": "" }, { "docid": "269adf1a8e8ab74bc2f87b92e899175a", "score": "0.59324694", "text": "def minimax(board):\n a = None\n if terminal(board):\n return a\n\n maximize = player(board) == X\n sign = -1 if maximize else 1\n prev_value = 2*sign\n\n for action in actions(board):\n # The maximizing player picks action a in Actions(s) that\n # produces the highest value of Min-Value(Result(s, a)).\n # The minimizing player picks action a in Actions(s) that\n # produces the lowest value of Max-Value(Result(s, a)).\n newValue = minmaxvalue(result(board, action), maximize, prev_value)\n if prev_value*sign >= newValue*sign:\n a = action\n prev_value = newValue\n return a", "title": "" }, { "docid": "f2cc3ca4e6271d17345be1d6f9a2f9a9", "score": "0.5931628", "text": "def weight(self) -> int:\n return pulumi.get(self, \"weight\")", "title": "" } ]
e23cb37d97ac77657f5703518a9642e9
Define a nonequality test
[ { "docid": "95ec201ca7e997f00a3ab1a8028e341a", "score": "0.0", "text": "def __ne__(self, other):\n return not self.__eq__(other)", "title": "" } ]
[ { "docid": "24c5c676df052221a2504412bb2096be", "score": "0.71697515", "text": "def test_not_there_ok(self): # pragma: no branch", "title": "" }, { "docid": "f4314b49871a8596590a688621a3adec", "score": "0.6869145", "text": "def test_quality_is_never_negative_but_stays_at_0():\n assert_sell_in_quality(\"foo\", sell_in=5, quality=1, days=1 + random.randint(1, 100), expected_quality=0)", "title": "" }, { "docid": "b6a011041ed2ab8015e9ea0028eb7809", "score": "0.6806825", "text": "def test_faim(self):\n self.assertFalse(True)", "title": "" }, { "docid": "25955da7de56463fa65b5f4a566eaaae", "score": "0.6795595", "text": "def test_nothing_passes():\n # nothing to test\n pass", "title": "" }, { "docid": "d7083aeea3a1ff4947639f3f95426f80", "score": "0.67563796", "text": "def test_incomplete_parameters():\n with pytest.raises(AssertionError):\n processor = SpectrumProcessor(None)\n processor.add_filter(\"require_correct_ionmode\")", "title": "" }, { "docid": "9bc782e54bc5ecb88741115128e79bae", "score": "0.67538685", "text": "def testSampleWithCondition(self):\n self.assertTrue(False)", "title": "" }, { "docid": "10093b4a673c29f879f7b5ce6b62e8ca", "score": "0.66474074", "text": "def not_test(func):\n func.__test__ = False\n return func", "title": "" }, { "docid": "f816b2b118fa1a4735d50115f7a68b72", "score": "0.6632549", "text": "def testBetterThanFalse(self):\n self.assertFalse(HSP(5).betterThan(7))", "title": "" }, { "docid": "0c20493daa5091af64c8f529214b530f", "score": "0.6607773", "text": "def testable_nothing(self=None):", "title": "" }, { "docid": "94c72d311a4c232d76519f65f8e1e35c", "score": "0.6593491", "text": "def nottest(f):\n f.__test__ = False\n return f", "title": "" }, { "docid": "ac06761f4c7113cd5df9b83b10b708c5", "score": "0.6527657", "text": "def test_none(self):\n return None", "title": "" }, { "docid": "ab2cb617ee6f35e2df2cd86395385598", "score": "0.6519866", "text": "def test_function(self): # pragma: no branch", "title": "" }, { "docid": "67756800e4a67f3d67e0e2514718ce8a", "score": "0.65062", "text": "def test_skipOnCondition_call(self):", "title": "" }, { "docid": "59a4c73e70f7d83c526a46b12057ca7b", "score": "0.6485825", "text": "def test_suppress_alg(self):\r\n self.simulator.suppress_alg = True\r\n assert self.simulator.suppress_alg == True\r\n self.simulator.suppress_alg = False\r\n assert self.simulator.suppress_alg == False", "title": "" }, { "docid": "5afaef7f4bae489cd60ffb1c17415aed", "score": "0.64454216", "text": "def test_nothing():\n assert True", "title": "" }, { "docid": "c8cb6a7fa0faf3267b3d1c6a1fcfd080", "score": "0.6441097", "text": "def test_threshold(self):\n pass", "title": "" }, { "docid": "433e1b4055ea094fd45970738601bffe", "score": "0.6433017", "text": "def test_missing_fidelity(algorithm: dict):\n task = CustomRosenbrock(max_trials=30, with_fidelity=False)\n with pytest.raises(RuntimeError) as exc:\n workon(task, task.get_search_space(), algorithms=algorithm, max_trials=100)\n\n assert \"https://orion.readthedocs.io/en/develop/user/algorithms.html\" in str(\n exc.value\n )", "title": "" }, { "docid": "db0e4f291142ab0ae5335545d3d67d2c", "score": "0.6429954", "text": "def test_basic(self):\n\t\tself.basic_test(False)\n\t\tself.basic_test(True)", "title": "" }, { "docid": "4f46827f5cb0302fc2d24cbe9201ac3c", "score": "0.6422923", "text": "def test_none():\n _test_application(None)", "title": "" }, { "docid": "9e847e43a451994ef151b8a0a98c8f7d", "score": "0.6404696", "text": "def test_qualean_inequality():\n with pytest.raises(NotImplementedError) as info:\n session4.Qualean(1) <= 'hi'\n with pytest.raises(NotImplementedError) as info:\n session4.Qualean(1) >= 'hi'\n with pytest.raises(NotImplementedError) as info:\n session4.Qualean(1) < 'hi'\n with pytest.raises(NotImplementedError) as info:\n session4.Qualean(1) > 'hi'", "title": "" }, { "docid": "fb8533f7a1dbe3e6fc087730537d76cd", "score": "0.63868856", "text": "def test_neg(self):\n\t\tpass", "title": "" }, { "docid": "fb8533f7a1dbe3e6fc087730537d76cd", "score": "0.63868856", "text": "def test_neg(self):\n\t\tpass", "title": "" }, { "docid": "fb8533f7a1dbe3e6fc087730537d76cd", "score": "0.63868856", "text": "def test_neg(self):\n\t\tpass", "title": "" }, { "docid": "f20a1d9907111966b57387c54496ed7c", "score": "0.6367467", "text": "def test_sanity(self):\n\t\tself.assertTrue(False)", "title": "" }, { "docid": "544561f70919c17077dd324c22b8aa4c", "score": "0.6353885", "text": "def test_1(self): # the \"\"\" descripe what the function do in the unit test shell besides it works as comment\n self.assertFalse(is_prime(1)) # make sure that the output is False", "title": "" }, { "docid": "6fc2e057668d818300a9cf768c842341", "score": "0.6351684", "text": "def test_method_does_not_introduce_bias():", "title": "" }, { "docid": "a554c0fcd7276a1598f3d92774c9cd9a", "score": "0.6323042", "text": "def testBadEscalationRate(self):\n self.assertEqual(0,1)", "title": "" }, { "docid": "39ab1742a9093ff6220a10ca2c79da28", "score": "0.6321494", "text": "def test(fits):\n\n\tv = sxpar.sxpar(fits, \"quality\")\n\tif len(v) == 0:\n\t\treturn False\n\treturn v[0].lower() == \"test\"", "title": "" }, { "docid": "3f07ad0de977f278436c20250dd1092e", "score": "0.6320185", "text": "def test_no_parameters():\n assert mysum() == None", "title": "" }, { "docid": "b31baca698e508c896ff4ea49a9d2032", "score": "0.63164407", "text": "def test_check_precondition(self):\n pass", "title": "" }, { "docid": "44184e0401d2980d990268e7197d8bcc", "score": "0.6311199", "text": "def test_nothing_fails():\n assert False, \"nothing to test\"", "title": "" }, { "docid": "7860a3483ef544be45647f1c0c1ca3a5", "score": "0.6305523", "text": "def all_is_good():\n pass", "title": "" }, { "docid": "a4d7f899170b37510ac5c7e00e3f3b53", "score": "0.62799174", "text": "def test_noOps(self):\n # There are no assertions here because there's no reasonable way this\n # test will fail rather than error; however, coverage --branch picks up\n # on methods which haven't been executed and the fact that these\n # methods exist (i.e. for super() to invoke them) is an important\n # property to verify. -glyph\n\n # TODO: maybe make a policy of this or explain it somewhere other than\n # a comment. Institutional learning ftw.\n\n tube = NullTube()\n tube.started()\n tube.received(None)\n tube.stopped(None)", "title": "" }, { "docid": "96f796b7cda7fe237bc3627f8ec2b62f", "score": "0.6279231", "text": "def testBetterThanFalse(self):\n self.assertFalse(LSP(7).betterThan(5))", "title": "" }, { "docid": "bf7614929672ed3f95f621f35d89c89e", "score": "0.6259026", "text": "def test_skip_false(self):\n self.assertIsNone(self.DummyTestClass().test_should_run())", "title": "" }, { "docid": "8e20c3903af4c904b3a857750685044d", "score": "0.62465864", "text": "def test_falsiness(name):\n assert not name", "title": "" }, { "docid": "a0865efc61aa9dc09afa8a3fe84cb65c", "score": "0.62214506", "text": "def test_2_check_if_fever():\n assert check_if_fever(36.0) == False", "title": "" }, { "docid": "9d6f405d5038c48aecbe63d8a0218985", "score": "0.6210948", "text": "def test_fail(self):\n self.assertFalse(False)", "title": "" }, { "docid": "06f08364e9ba0acab6545b1e93f4c1ed", "score": "0.61902905", "text": "def test_1_check_if_fever():\n # this ended up becoming a very simple check because I don't have a fully developed statistical model to judge this on yet\n assert check_if_fever(38) == True", "title": "" }, { "docid": "96112e0123c2a64321d77fe5470f6bba", "score": "0.6173183", "text": "def test_check_no_env(self):\n grader = Notebook(tests_dir=TEST_FILES_PATH + \"tests\")\n\n for q_path in glob(TEST_FILES_PATH + \"tests/*.py\"):\n q = os.path.split(q_path)[1][:-3]\n result = grader.check(q) # global_env=global_env)\n if q != \"q2\":\n self.assertEqual(result.grade, 1, f\"Test {q} expected to pass but failed:\\n{result}\")\n else:\n self.assertEqual(result.grade, 0, f\"Test {q} expected to fail but passed:\\n{result}\")", "title": "" }, { "docid": "01192eeff855c16d80857805f7915a38", "score": "0.6164246", "text": "def safetyTest(self, **kwargs):\n return self._safetyTest(**kwargs) <= 0.0", "title": "" }, { "docid": "b2a5a219f4f848e1c254f367a89dca2e", "score": "0.616377", "text": "def not_a_test(obj):\n obj.__test__ = False\n return obj", "title": "" }, { "docid": "4c7e5822c8d33ac4862cf535e97e7b8e", "score": "0.61510235", "text": "def is_untested(self):\n return not self.is_governed()", "title": "" }, { "docid": "ab7edf77583054aa467888e8db9b1926", "score": "0.6137711", "text": "def test_normal_execution_summary_none(self):\n self.kwds[\"summary\"] = \"none\"\n self.test_normal_execution()", "title": "" }, { "docid": "9f40d9fd4d06f2e343fad732a6722adf", "score": "0.6120384", "text": "def optional_tests():\n return [\n \"operator_matmul_with_same_type\",\n \"operator_solve_with_same_type\",\n ]", "title": "" }, { "docid": "9f40d9fd4d06f2e343fad732a6722adf", "score": "0.6120384", "text": "def optional_tests():\n return [\n \"operator_matmul_with_same_type\",\n \"operator_solve_with_same_type\",\n ]", "title": "" }, { "docid": "9f40d9fd4d06f2e343fad732a6722adf", "score": "0.6120384", "text": "def optional_tests():\n return [\n \"operator_matmul_with_same_type\",\n \"operator_solve_with_same_type\",\n ]", "title": "" }, { "docid": "9f40d9fd4d06f2e343fad732a6722adf", "score": "0.6120384", "text": "def optional_tests():\n return [\n \"operator_matmul_with_same_type\",\n \"operator_solve_with_same_type\",\n ]", "title": "" }, { "docid": "2e3915c3978d04cff887cdf7e2d7ce6f", "score": "0.6104771", "text": "def test_noSupport(self):\n releaseAllClones(g_clonenames)\n #f_filename = downloadGooglePicture(creepyCharger)\n f_filename = singleClone(creepyCharger.fullname)\n for mtype in g_metadataTypes:\n for oper in g_supportedFunctions[mtype]:\n #check if this function has more than 1 argument\n if mtype in g_Func:\n if oper in g_sampleValues[mtype]:#does this function have extra values?\n if isinstance(g_sampleValues[mtype][oper], tuple):#are the extra values in a tuple?\n if len(g_sampleValues[mtype][oper])==2:#check if this function has 2 extra arguments\n # runs test with 2 extra provided arguments\n self.assertRaises(MetadataManager.UnsupportedFiletypeError,\n g_Func[mtype][oper], f_filename,\n g_sampleValues[mtype][oper][0], g_sampleValues[mtype][oper][1])\n else:\n print(\"this shouldn't happen.\")\n else: #there's only 1 extra argument\n # runs test with a provided sample value\n self.assertRaises(MetadataManager.UnsupportedFiletypeError, g_Func[mtype][oper],\n f_filename, g_sampleValues[mtype][oper])\n else: #function has no extra values\n #run functions with only the filename\n self.assertRaises(MetadataManager.UnsupportedFiletypeError, g_Func[mtype][oper],\n f_filename)\n #print(\"ErrorCheck_DelicateTests() removing\", f_filename)\n os.remove(f_filename)", "title": "" }, { "docid": "1e6101e60af95cb65c758dfd65a6409c", "score": "0.61023057", "text": "def check_testfunction(self, function):\n raise NotImplementedError", "title": "" }, { "docid": "e1f7f1c3efcd1bc2114c5094ff900cfb", "score": "0.6090982", "text": "def test_report_abuse(self):\n pass", "title": "" }, { "docid": "6f0489bd853252d3d026fce362ee9847", "score": "0.6074776", "text": "def test_inequality(self):\n self.assertNotEquals(self.n1, self.n3)\n self.assertNotEquals(self.n2, self.n3)", "title": "" }, { "docid": "c9cd485d2efa8813145708c26bc524da", "score": "0.6074556", "text": "def test_1(self):\n self.assertFalse(is_prime(7))", "title": "" }, { "docid": "08ba26883f3bec3f350bedbdec7d6292", "score": "0.6060818", "text": "def test_with_input_none():\n spectrum_in = None\n spectrum = require_minimum_of_high_peaks(spectrum_in)\n assert spectrum is None", "title": "" }, { "docid": "c547aba1b985cb097b3ad43f396ad5d8", "score": "0.6060612", "text": "def true_filter(test): # pylint: disable=unused-argument\n return True", "title": "" }, { "docid": "bd837bbe3741687abfe91b0de150199b", "score": "0.6058868", "text": "def test_quality_degrades_twice_as_fast_after_sell_in():\n assert_sell_in_quality(\"foo\", sell_in=1, quality=5, days=2, expected_quality=2)", "title": "" }, { "docid": "312c6a11be03d92f953483c6f9b31fcd", "score": "0.6054342", "text": "def self_test():\n # TODO implement\n pass", "title": "" }, { "docid": "bcca207fa64193a7f16d5a9663dda9a2", "score": "0.60483134", "text": "def test_hidden():", "title": "" }, { "docid": "b9bbcdf3f7c0fec0b8313c97ea94ea15", "score": "0.60429645", "text": "def test_1(self):\n self.assertFalse(is_prime(9))", "title": "" }, { "docid": "8b7344116fdba20fb6f74ea50e96c715", "score": "0.6033281", "text": "def test_EvaluatorLJFS_force_device(make_state):\n\n #assert(False)\n pass", "title": "" }, { "docid": "480118e991cd7c497e924805caf52ecc", "score": "0.60078555", "text": "def test_task_nostate_6a_exception():\n nn = fun_addvar_none(name=\"NA\", a=2)\n assert nn.inputs.b is attr.NOTHING\n with pytest.raises(TypeError) as excinfo:\n nn()\n assert \"unsupported\" in str(excinfo.value)", "title": "" }, { "docid": "68ea186542e8123fe5ed7c49e86d8aca", "score": "0.6002613", "text": "def test_func_noattr(x, eps=1e-8):\n return x", "title": "" }, { "docid": "3b7a50b6a612834aab92ee862d72c013", "score": "0.59965086", "text": "def test_fail(self):\n self.assertIsNone(volume_cube(\"75\"))", "title": "" }, { "docid": "4e60cc3ead3cbfbb9132073b67efa40a", "score": "0.59622234", "text": "def test_skipping(self):\n self.assertTrue(\"the world is flat\")", "title": "" }, { "docid": "c32cab3185eea178d8c574a322995ba4", "score": "0.5961024", "text": "def test_losing():", "title": "" }, { "docid": "73f8b5b1f9da0db07143956efad3fae6", "score": "0.59545463", "text": "def test_suppress_sens(self):\r\n assert self.sim.suppress_sens == False\r\n self.sim.suppress_sens = False\r\n assert self.sim.suppress_sens == False\r\n self.sim.suppress_sens = 0\r\n assert self.sim.suppress_sens == False\r\n self.sim.suppress_sens = 1\r\n assert self.sim.suppress_sens == True", "title": "" }, { "docid": "aa5bc94ac11fdd3aa9224772b007e765", "score": "0.5952403", "text": "def test():\n\traise NotImplementedError", "title": "" }, { "docid": "3809aec099da89a70a51a76ecf49ae2f", "score": "0.5949652", "text": "def test_no_photometry(self):\n s = copy.copy(self.sed)\n s.age = 455*q.Myr, 13*q.Myr\n s.radius = 2.362*q.Rsun, 0.02*q.Rjup\n s.parallax = 130.23*q.mas, 0.36*q.mas\n s.spectral_type = 'A0V'\n s.add_spectrum(self.spec1)\n\n s.results\n\n self.assertIsNotNone(s.Teff)", "title": "" }, { "docid": "bb466c0189f0e01a3e3f130c2c6664a9", "score": "0.5942945", "text": "def test_eat_unhealthy(self):\n self.assertEqual(\n eat(\"pizza\", is_healthy=False),\n \"I am eating pizza because YOLO!\"\n )", "title": "" }, { "docid": "31dd516263b38d1c8d57388794cfc883", "score": "0.5942316", "text": "def test_func7(self):\n pass", "title": "" }, { "docid": "6d5e3e14400c297e6ece1615fe80f3f3", "score": "0.59296465", "text": "def _regr_test():", "title": "" }, { "docid": "6e10ba0b43fa246dec2bd05d4acbf4be", "score": "0.5922349", "text": "def test_pretype(self):\r\n print self.simulator.options\r\n print self.simulator.precond\r\n assert self.simulator.precond == 'PREC_NONE'\r\n self.simulator.precond = 'prec_none'\r\n print self.simulator.precond\r\n assert self.simulator.precond == 'PREC_NONE'\r\n \r\n nose.tools.assert_raises(Exception, self.simulator._set_pre_cond, -1.0)\r\n nose.tools.assert_raises(Exception, self.simulator._set_pre_cond, 'PREC_BOTH1')", "title": "" }, { "docid": "807fa0f474ff2e4e7b03ac683eb8116d", "score": "0.59057415", "text": "def test_inequality(self):\n mask = np.array([0, 1, 0, 0, 1])\n value = np.array([0, 0, 0, 0, 1])\n pre = Condition(mask, value)\n action = PrePostConditionAction(pre, self.post)\n self.assertNotEqual(self.action, action)\n self.assertNotEqual(hash(self.action), hash(action))\n action = PrePostConditionAction(self.pre, self.pre)\n self.assertNotEqual(hash(self.action), hash(action))", "title": "" }, { "docid": "1fcb2cb06553f03879a4cbf51615d2b0", "score": "0.5903343", "text": "def test__ne__(parameter):\n par, _ = parameter\n assert par != 9\n assert not par != 10", "title": "" }, { "docid": "c825af99a6b243890e1896076c32cb77", "score": "0.59028053", "text": "def test_company_earnings_quality_score(self):\n pass", "title": "" }, { "docid": "a5f32d471a30d8f7478633767adc3595", "score": "0.58963484", "text": "def test_method(self): # pragma: no branch", "title": "" }, { "docid": "f98a30bfd0ad63d4b8445ef9f742bd34", "score": "0.5895703", "text": "def TestNoseFailed():\n a = 1\n b = 2\n assert a != b", "title": "" }, { "docid": "e6fd4273d007373726cfbad799fab1cd", "score": "0.5888675", "text": "def test_is_funny_tim(self):\n self.assertFalse(is_funny(\"tim\"), \"tim should not be funny\")", "title": "" }, { "docid": "a62eb1703d04d8019c899245798708a9", "score": "0.58878446", "text": "def test_call_missing(self, testobj, data):\n filter, blacklist = testobj\n data = data.copy() # record is NOT test-independent\n del data[\"value\"]\n expect = data if blacklist else None\n assert expect == filter(data)\n return", "title": "" }, { "docid": "f5dd075dff0ef7f04a81d810c863ddaa", "score": "0.58874136", "text": "def test_full_convertion_lower_safe(self):\n self.full_convertion_run_safe(False)", "title": "" }, { "docid": "bdad0fa79da779b01d3f43c09a3cf7b2", "score": "0.5884499", "text": "def test_eat_cake():\n raise NotImplementedError(\"No code written for problem 6 unit test!!\")", "title": "" }, { "docid": "3e676a3bf12357c7a3295b26627d67c6", "score": "0.5882567", "text": "def test_no_negative_energy_costs():\n env = TropicalPrecooling()\n done = False\n obs = env.reset()\n while not done:\n obs, reward, done, info = env.step(np.ones(156)*10**9)\n assert np.min(obs[4]) >= 0", "title": "" }, { "docid": "b77be7453ede51f65f90f9db80d707b8", "score": "0.5876684", "text": "def test_technical_indicator(self):\n pass", "title": "" }, { "docid": "648d8c798f2bf475bdfb0473f9af5a94", "score": "0.58642834", "text": "def example(param):\n\tassert param >0\n\t#if __debug__:\n\t#\tif not param >0 \n\t#\t\traise AssertionError \n\t# do stuf here...", "title": "" }, { "docid": "931ca8f5c5d09f2d8dd39122ca3654b1", "score": "0.58526653", "text": "def test_short_09_python_multiple_junk(self):\n config.OPTS.use_numpy = False\n self.multiple_junk()", "title": "" }, { "docid": "327c8ee0b2e7659406bc9babd7968ca7", "score": "0.5852598", "text": "def test_if_prime_f(self):\n self.assertFalse(self.prime(9))", "title": "" }, { "docid": "3141475ccc7d617cf8f4b1f2d2287c4a", "score": "0.58515006", "text": "def has_evaluate_noisy(self):\n return (self.obj_funct_noisy is not None)", "title": "" }, { "docid": "3aff45f8fc9f46e198065ad3c59ab75d", "score": "0.5844335", "text": "def test_negative_number1(self):\n self.assertFalse(is_prime(-1))\n self.assertFalse(is_prime(-2))\n self.assertFalse(is_prime(-3))\n self.assertFalse(is_prime(-4))\n self.assertFalse(is_prime(-5))\n self.assertFalse(is_prime(-6))\n self.assertFalse(is_prime(-7))\n self.assertFalse(is_prime(-8))\n self.assertFalse(is_prime(-9))", "title": "" }, { "docid": "1b33975fb5aed2b6d2c1f40765f8c2a1", "score": "0.5837958", "text": "def test_failing():\n assert False", "title": "" }, { "docid": "3ebc3aec4fd344191f3cb58ae5429e63", "score": "0.58366966", "text": "def __internal_tests():", "title": "" }, { "docid": "955569c3fe2239d24a8017d7d09dcfc0", "score": "0.58357584", "text": "def test_function_that_doesnt_return_and_doesnt_raise(self):\n @raise_spotipy_error_as_souffle_error\n def func():\n pass\n\n assert func() == None", "title": "" }, { "docid": "2dbc8ceee272b69fdac770faeb7d0e25", "score": "0.5832393", "text": "def test_getRatio_num_deno_zero(self):\n test_price_a = 0\n test_price_b = 0\n self.assertIsNone(getRatio(test_price_a, test_price_b))", "title": "" }, { "docid": "c283ad78b5005e68fcb0d54884591b2d", "score": "0.5832103", "text": "def prove_NO() -> Proof:\n # Optional Task 6.9c", "title": "" }, { "docid": "2ea63cc9265c754c268ba1b0808f75c9", "score": "0.5823222", "text": "def test_rightHandArgumentImplementsUnequality(self):\n self.assertFalse(Record(1, 2) != EqualToEverything())\n self.assertTrue(Record(1, 2) != EqualToNothing())", "title": "" }, { "docid": "823d176e70bb891eeed3eb2fc3d2c10e", "score": "0.582226", "text": "def __test__(verbose=False):\n#-------------------------------------------------------------------------------\n #import pylib.tester as tester\n return 0", "title": "" }, { "docid": "42ac5de3c373901157218f24d5a2322e", "score": "0.5821342", "text": "def testNotEqual(self):\n par2 = TBRMMDesignParameters(n_test=28, iroas=1.0)\n self.assertNotEqual(self.par, par2)", "title": "" }, { "docid": "0e3a3a32338c492aad657f176d03b0c5", "score": "0.58114934", "text": "def test_test():\n\n assert True == Test(test_string=\"divisible by 19\", operand=19).perform_test()\n assert False == Test(test_string=\"divisible by 19\", operand=20).perform_test()", "title": "" }, { "docid": "7c88cef6a767eb7fa4933f85ed0baa8e", "score": "0.5807991", "text": "def test_silly_setup():", "title": "" }, { "docid": "99b2b9fd61a56185752ee6062ba77430", "score": "0.5801254", "text": "def test_c_haines():", "title": "" }, { "docid": "34c48045d747c73de32ec0459c8007c5", "score": "0.5797307", "text": "def test_bypass(self):\n assert test_results.get('bypass') is None, test_results['bypass'].help", "title": "" }, { "docid": "2fb0319d02997bf1a064332496cdbd5d", "score": "0.57972944", "text": "def test_negatives(self):\n self.assertEqual(cube_volume.cube_volume(10,10,-10),-1000)\n self.assertLess(cube_volume.cube_volume(100,100,-1),0)", "title": "" } ]
5c4955589b646fadb7b033a550a8b280
Estimates the parameter X > Y.
[ { "docid": "55126a0452c8cc0dbdf99fd12f175219", "score": "0.0", "text": "def fit(self, X, Y, data, ivs=None, civs=None):\n if (ivs is None) and (civs is None):\n ivs = self.model.get_ivs(X, Y)\n civs = self.model.get_conditional_ivs(X, Y)\n\n civs = [civ for civ in civs if civ[0] not in ivs]\n\n reg_covars = []\n for var in self.model.graph.predecessors(X):\n if var in self.model.observed:\n reg_covars.append(var)\n\n # Get CIV conditionals\n civ_conditionals = []\n for civ in civs:\n civ_conditionals.extend(civ[1])\n\n # First stage regression.\n params = (\n sm.OLS(data.loc[:, X], data.loc[:, reg_covars + civ_conditionals])\n .fit()\n .params\n )\n\n data[\"X_pred\"] = np.zeros(data.shape[0])\n for var in reg_covars:\n data.X_pred += params[var] * data.loc[:, var]\n\n summary = sm.OLS(\n data.loc[:, Y], data.loc[:, [\"X_pred\"] + civ_conditionals]\n ).fit()\n return summary.params[\"X_pred\"], summary", "title": "" } ]
[ { "docid": "e71147f5b83e3d6679dbfe1da7b8c4eb", "score": "0.6575952", "text": "def __gt__(self, y):\n return self._binary_operation(y, \"__gt__\")", "title": "" }, { "docid": "a1eb6a04cb06cec52254f9109f3b7f23", "score": "0.6464306", "text": "def conditional_est(X,Y):\n return nparam.KernelReg(endog=Y,\n exog=X, reg_type='lc',\n var_type='c', bw='cv_ls',\n defaults=nparam.EstimatorSettings(efficient=True))", "title": "" }, { "docid": "b18be1983faed081ec3f3defbff11008", "score": "0.63819647", "text": "def _greater_tensor(x, y):\n return F.tensor_gt(x, y)", "title": "" }, { "docid": "20cd940cd4f008d6c18ceec44fd0b98a", "score": "0.63095915", "text": "def evaluate(self, X, Y):\n A = self.forward_prop(X)\n cost = self.cost(Y, A)\n prediction = np.where(A >= 0.5, 1, 0)\n return (prediction, cost)", "title": "" }, { "docid": "2a27ef2eaa688742c762e556644fb426", "score": "0.61787975", "text": "def _greater_scalar(x, y):\n return F.scalar_gt(x, y)", "title": "" }, { "docid": "498e00344c7c834996fc39f514c46884", "score": "0.61581206", "text": "def decision_function(self,X):\n pass", "title": "" }, { "docid": "72870726c096697c207846253bc70e4c", "score": "0.60797054", "text": "def decision_function(self, X):\n ...", "title": "" }, { "docid": "04af1ad45bc838f731247ca738610510", "score": "0.60674906", "text": "def test_greater():\n input1 = np.random.rand(1, 3, 4, 5).astype(\"float32\")\n input2 = np.random.rand(1, 5).astype(\"float32\")\n inputs = [helper.make_tensor_value_info(\"input1\", TensorProto.FLOAT, shape=(1, 3, 4, 5)),\n helper.make_tensor_value_info(\"input2\", TensorProto.FLOAT, shape=(1, 5))]\n\n outputs = [helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, shape=(1, 3, 4, 5))]\n\n nodes = [helper.make_node(\"Greater\", [\"input1\", \"input2\"], [\"output\"])]\n\n graph = helper.make_graph(nodes,\n \"greater_test\",\n inputs,\n outputs)\n\n greater_model = helper.make_model(graph)\n \n bkd_rep = mxnet_backend.prepare(greater_model)\n numpy_op = np.greater(input1, input2).astype(np.float32)\n output = bkd_rep.run([input1, input2])\n npt.assert_almost_equal(output[0], numpy_op)", "title": "" }, { "docid": "5b3477cb23670d05aaf4041c38070343", "score": "0.60252666", "text": "def validate(self, X, y):\r\n # Your code here\r\n self.rss = ((self.weight.T).dot(X.T).dot(X).dot(\r\n self.weight))-2*(((X.T).dot(y)).T).dot(self.weight)\r\n return self.rss", "title": "" }, { "docid": "f6b8d55c59acc7fdb5945d5789a41c10", "score": "0.5973176", "text": "def predict(self,X):\t\n return ((self.model.predict(X)>= self.threshold)*1).reshape(-1)", "title": "" }, { "docid": "9a08d87ef34a4cd532f31e7f2c3927eb", "score": "0.5886364", "text": "def __call__(self, x, y):\n return torch.maximum(1. - x, y)", "title": "" }, { "docid": "22b5caeec1d2efc4aefff9daf968dcfc", "score": "0.5882507", "text": "def Supp(theta):\n rt = ((theta[0] > 0) and (theta[0] < 1))\n return rt", "title": "" }, { "docid": "f097d462d8d0dd5782da443b72e337b9", "score": "0.5877672", "text": "def __gt__(self, other):\n \n if self.x == other.x:\n if self.y > other.y:\n return True\n elif self.x > other.x:\n return True\n return False", "title": "" }, { "docid": "ea4fa2507494eaa3728c5e5431dde2ed", "score": "0.587569", "text": "def __ge__(self, y):\n return self._binary_operation(y, \"__ge__\")", "title": "" }, { "docid": "66fd899298f2e6b58303a88c8d9a639c", "score": "0.5746448", "text": "def predict(self, X):\n\n\t### YOUR CODE HERE\n\t\treturn np.where(np.dot(X, self.W) >= 0.0, 1, -1)", "title": "" }, { "docid": "d41bdf834c20bcab6f33b683bf005428", "score": "0.57254773", "text": "def test(self, X, Y):\n total_error = 0\n for i in range(len(X)):\n x = self.forward_propagate(X[i])\n t = Y[i]\n vector_diff = 0\n for j in range(len(x)):\n vector_diff += (x[j]-t[j])**2\n total_error += (vector_diff/len(x))\n return total_error/len(Y)", "title": "" }, { "docid": "5cc7b4462a26175a3eddd0c99c3223e1", "score": "0.5706853", "text": "def predict(self, X):\n\n # print(\"invariants_dict: \", self.invariants_dict)\n # np.set_printoptions(threshold=np.inf)\n y_sum = np.zeros(X.shape[0])\n for cols, theta in self.invariants_dict.items():\n y_sum += np.fabs(np.dot(X[:, cols], np.array(theta)))\n # print(\"y_sum\", y_sum)\n y_pred = (y_sum > 1e-6).astype(int)\n for idx, value in enumerate(y_pred):\n if value > 0:\n print(idx)\n return y_pred", "title": "" }, { "docid": "7057af7fa4deb683f3548d73a1f03714", "score": "0.5704478", "text": "def below(self,object): \n if( isinstance(object,Feature) ): \n return( self.minY() > object.maxY() )\n elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):\n return( self.minY() > object[1] )\n elif( isinstance(object,float) or isinstance(object,int) ):\n return( self.minY() > object )\n else:\n warnings.warn(\"SimpleCV did not recognize the input type to feature.below(). This method only takes another feature, an (x,y) tuple, or a ndarray type.\")\n return None", "title": "" }, { "docid": "f158a8da7d9cf006def9474f322f3153", "score": "0.5695858", "text": "def predictValue(X, theta):\n return h(theta,X) >= 0.5", "title": "" }, { "docid": "4e1f5ba6690d9aaa22a739108f2e02cb", "score": "0.56595296", "text": "def __gt__(self, other):\n if self.n * other.d > other.n * self.d:\n return True\n else:\n return False", "title": "" }, { "docid": "1220b16156e24378531d6d1a40801bc2", "score": "0.5639477", "text": "def decision_function(self, X):\n X_ = self._check_array(X)\n return dot(X_, self._coef)", "title": "" }, { "docid": "c309dbeb3344f7a3cdc2f006fdb3b227", "score": "0.5626461", "text": "def select_threshold(X, Xval, yval):\r\n # create multivariate model using training data\r\n mu = X.mean(axis=0)\r\n cov = np.cov(X.T)\r\n multi_normal = stats.multivariate_normal(mu, cov)\r\n\r\n # this is key, use CV data for fine tuning hyper parameters\r\n pval = multi_normal.pdf(Xval)\r\n\r\n # set up epsilon candidates\r\n epsilon = np.linspace(np.min(pval), np.max(pval), num=10000)\r\n\r\n # calculate f-score\r\n fs = []\r\n for e in epsilon:\r\n y_pred = (pval <= e).astype('int')\r\n fs.append(f1_score(yval, y_pred))\r\n\r\n # find the best f-score\r\n argmax_fs = np.argmax(fs)\r\n\r\n return epsilon[argmax_fs], fs[argmax_fs]", "title": "" }, { "docid": "5e6daa3a3979f90b92f8b09ff6e8c107", "score": "0.56120473", "text": "def greater(x1, x2, dtype=None):\n return _apply_tensor_op(F.tensor_gt, x1, x2, dtype=dtype)", "title": "" }, { "docid": "118341f4e0a6aa725ee308f7b70bb3fc", "score": "0.5575775", "text": "def __gt__(self, value):\r\n return self.operation(gt=value)", "title": "" }, { "docid": "4cfa8084218c3d9ad0deab40782a4d62", "score": "0.5573631", "text": "def compare(x, y):\n return np.nansum((x - y) * (x - y))", "title": "" }, { "docid": "4e50a8e98adc3366aa080292c17f4b01", "score": "0.5565428", "text": "def prob_x_greater_than(self, x, y, theta):\n model_params, sigma, model_weights = self._split_theta(theta)\n\n y_predicted = self._predict_given_params(x, model_params, model_weights)\n\n cdf = norm.cdf(y, loc=y_predicted, scale=sigma)\n\n return 1.0 - cdf", "title": "" }, { "docid": "77fd0549e7b5e1d6bc6806905d0cfc30", "score": "0.55640256", "text": "def close_to2(self,x,y,theta):\n print theta, self.theta\n print abs(x-self.x)<POS_THRESHOLD and abs(y-self.y)<POS_THRESHOLD and abs(theta-self.theta)<THETA_THRESHOLD\n print self.x, x, abs(x-self.x)\n return (abs(x-self.x)<POS_THRESHOLD and abs(y-self.y)<POS_THRESHOLD and abs(theta-self.theta)<THETA_THRESHOLD)", "title": "" }, { "docid": "b7efdfb3d37746c668e7883d8fa1ce9b", "score": "0.55539286", "text": "def greater_equal(x1, x2, dtype=None):\n return _apply_tensor_op(F.tensor_ge, x1, x2, dtype=dtype)", "title": "" }, { "docid": "695d053224392d4be24c8f4a1d7d5a7f", "score": "0.554011", "text": "def predict(self,X):\n net_input=np.dot(X,self.w_[1:]) + self.w_[0]\n return np.where(net_input>=0.0,1,-1)", "title": "" }, { "docid": "829fb3b0702ff6bec420eedc0d9c8db4", "score": "0.5536942", "text": "def compute(y, p):\n\n best_F1 = 0\n best_threshold = 0\n\n # calculate step size for loop\n step_size = (mat.max(p) - mat.min(p)) / 1000\n\n threshold_array = mat.arange(start=mat.min(p), stop=mat.max(p), step=step_size)\n\n for threshold in threshold_array:\n # since in this model we are detecting Anomaly so positive output is 1 (Anomaly) if the probability of that\n # data point is less then the threshold\n predicted_values = mat.where(p < threshold, 1, 0)\n\n true_positive = len(mat.where((predicted_values == 1) & (y == 1))[0])\n false_positive = len(mat.where((predicted_values == 1) & (y == 0))[0])\n false_negative = len(mat.where((predicted_values == 0) & (y == 1))[0])\n\n # calculate precision\n if true_positive == 0:\n F1 = 0\n else:\n precision = true_positive / (true_positive + false_positive)\n recall = true_positive / (true_positive + false_negative)\n F1 = (2 * precision * recall) / (precision + recall)\n\n if F1 > best_F1:\n best_F1 = F1\n best_threshold = threshold\n\n return best_threshold, best_F1", "title": "" }, { "docid": "9e4bd8de2c8762c1a5f42a6a0275959b", "score": "0.55368865", "text": "def gradient(self, y_true, y_pred):\n y_true = y_true or -1\n if y_pred > y_true + self.eps:\n return 1\n elif y_pred + self.eps < y_true:\n return -1\n return 0", "title": "" }, { "docid": "234befef36363d4b66e92eaf767ea059", "score": "0.55292654", "text": "def both_positive(x, y):\n \"*** YOUR CODE HERE ***\"\n return x > 0 and y > 0", "title": "" }, { "docid": "21e5da7a1338e368dda26e5b8a170fb5", "score": "0.55256844", "text": "def test(self, x):\n y = np.ones(x.shape[0])\n y[x[:, self.i] < self.theta] *= -1\n return y", "title": "" }, { "docid": "b49efac5a5efee4d5c45f571e9019ebe", "score": "0.55225766", "text": "def ols_estimator(X, Y):\n\n params = np.linalg.inv(X.T @ X) @ X.T @ Y\n errors = Y - X @ params\n\n return params, errors", "title": "" }, { "docid": "11aa5d286d6b045dc83b4622c98e365a", "score": "0.5506641", "text": "def above(self,object):\n if( isinstance(object,Feature) ): \n return( self.maxY() < object.minY() )\n elif( isinstance(object,tuple) or isinstance(object,np.ndarray) ):\n return( self.maxY() < object[1] )\n elif( isinstance(object,float) or isinstance(object,int) ):\n return( self.maxY() < object )\n else:\n warnings.warn(\"SimpleCV did not recognize the input type to feature.above(). This method only takes another feature, an (x,y) tuple, or a ndarray type.\")\n return None", "title": "" }, { "docid": "e154948be36414355e6d553fe390e153", "score": "0.5500019", "text": "def __gt__(self, other):\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return None\n else:\n return self._value > other.value", "title": "" }, { "docid": "861ef441fbd94dbc7bb773f070cfc350", "score": "0.54879916", "text": "def g_true_l2(self, x):\n a = self.s2params_a\n b = self.s2params_b\n c = self.s2params_c\n return (a ** (x-b)) + c", "title": "" }, { "docid": "83ee5186c5f2b2ad9f8fcdceb738bb61", "score": "0.5483033", "text": "def __gt__(self, rhs: Any) -> ndarray:\n from ._ufunc import greater\n\n return greater(self, rhs)", "title": "" }, { "docid": "9f7430eb7f0badaca7ba712ac63b300f", "score": "0.5481486", "text": "def feq(x,y):\n return abs(x-y) <= .0000001", "title": "" }, { "docid": "475acfbb73772c907b606d7248ebf358", "score": "0.54718673", "text": "def predict(self, X, epsilon=None) :\r\n\r\n #Checking if a value has been provided for epsilon, else using the value set during training\r\n if(epsilon == None) :\r\n epsilon = self.epsilon\r\n\r\n probabilities = self.pdf(X) #Getting the probabilities for the inputs\r\n\r\n return probabilities < epsilon", "title": "" }, { "docid": "9bccd3f2909e5cd9eeccb6ae7f9e99fe", "score": "0.5462964", "text": "def call(self, x, is_prediction=True):\n ######### Your code starts here #########\n y_est = tf.matmul(x, self.W) - self.b\n if is_prediction:\n return tf.math.sign(y_est)\n return y_est\n ######### Your code ends here #########", "title": "" }, { "docid": "c0211c0bf65f69bd7b2e9abdf94052f0", "score": "0.5461286", "text": "def evaluate(self, x_test, y_test):\n pass", "title": "" }, { "docid": "36e1247c2d9e2f29ce22319e9ea35aa9", "score": "0.54571104", "text": "def is_greater_y(pos_1, pos_2):\n #print(\"comparing \", pos_1, pos_2)\n return pos_1[1] > pos_2[1]", "title": "" }, { "docid": "8ae992cbe86b776c05f33a4ff34df97e", "score": "0.54522485", "text": "def evaluate(self, X, y, metrics=None):", "title": "" }, { "docid": "913c961ad8f60aaf149ec760f8abe876", "score": "0.5451032", "text": "def __gt__(self, p):\n return self.p > p.p", "title": "" }, { "docid": "b1fa63d487b15d6c1da2f15ff57ad569", "score": "0.5445138", "text": "def predict(self, X):\n y_pred =[]\n pred = self.predict_proba(X)\n \n for i in range(pred.shape[1]):\n proba =pred[0,i] \n if proba >self.threshold:\n y_pred.append(1)\n else:\n y_pred.append(-1)\n # y_pred = [proba > self.threshold for proba in self.predict_proba(X)]\n return y_pred", "title": "" }, { "docid": "29cb4d2ad306e74964b64671b96e0716", "score": "0.54384303", "text": "def __gt__(self, other):\n return self._radius > other.radius", "title": "" }, { "docid": "85d5ffe6c7934055d6b12e55d33c8045", "score": "0.5435771", "text": "def posterior_prob_x_greater_than(self, x, y, thin=1):\n assert isinstance(x, float) or isinstance(x, int)\n assert isinstance(y, float) or isinstance(y, int)\n probs = []\n samples = self.get_burned_in_samples()\n for theta in samples[::thin]:\n probs.append(self.prob_x_greater_than(x, y, theta))\n\n return np.ma.masked_invalid(probs).mean()", "title": "" }, { "docid": "eccb1f21ebbf9ec59a4d12f60fe585b1", "score": "0.54331", "text": "def constraint(self, x, y):\n warnings.warn(\"constraint function not implemented.\")\n return 0.0", "title": "" }, { "docid": "eccb1f21ebbf9ec59a4d12f60fe585b1", "score": "0.54331", "text": "def constraint(self, x, y):\n warnings.warn(\"constraint function not implemented.\")\n return 0.0", "title": "" }, { "docid": "74709224f21f4e6936f27f8fffade6fe", "score": "0.54325503", "text": "def classify(self, X):\n ###############################################################\n # Fill your code in this function\n ###############################################################\n crit_val = 0.5\n f = self.forward(X)\n p = []\n for val in f:\n if (val<crit_val):\n p.append(-1)\n else:\n p.append(1)\n p = np.array(p)\n return p", "title": "" }, { "docid": "d9b5006eb0b9a13c9bc8d6b720a6f211", "score": "0.5431769", "text": "def objective(self, x, y):\n warnings.warn(\"objective function not implemented.\")\n return 0.0", "title": "" }, { "docid": "3e000dd8d6683da0239f7d1cf94f1dd1", "score": "0.5427026", "text": "def evaluate(self, X, y):\n return self._score(y, self.model.predict_proba(X)[:, 1])", "title": "" }, { "docid": "7ad66101cedc19becb58a08ea6098654", "score": "0.54177547", "text": "def __call__(self, x, y):\n return torch.where(torch.le(x, y), torch.ones_like(x), y)", "title": "" }, { "docid": "1be87eaba3d289b8124c58e4f14b327f", "score": "0.540684", "text": "def __gt__(self, rhs: Union[Simpy, float]) -> list[bool]:\n output: list[bool] = []\n bool_value: bool = False\n if isinstance(rhs, Simpy):\n assert len(rhs.values) == len(self.values)\n index: int = 0\n while index < len(rhs.values):\n bool_value = False\n if self.values[index] > rhs.values[index]:\n bool_value = True\n output.append(bool_value)\n index += 1\n elif isinstance(rhs, float):\n for each in self.values:\n bool_value = False\n if each > rhs:\n bool_value = True\n output.append(bool_value)\n return output", "title": "" }, { "docid": "eba3aee8f12a45e0964801e29f54e0bc", "score": "0.54060316", "text": "def evaluate(self, theta):\n\n return self.norm * ((theta >= self.low) and (theta <= self.high))", "title": "" }, { "docid": "f66a4190df3a78712bba688c6ef660ca", "score": "0.5404549", "text": "def decision_function(self, X):\n return self.best_estimator_.decision_function(X)", "title": "" }, { "docid": "23c6edf5df5f7f71e43bf6f636a99f1a", "score": "0.53986627", "text": "def fun(y):\n diff = x - y\n return 0.5 * jnp.sum(diff ** 2) + 0.5 * alpha * jnp.sum(y ** 2)", "title": "" }, { "docid": "23c6edf5df5f7f71e43bf6f636a99f1a", "score": "0.53986627", "text": "def fun(y):\n diff = x - y\n return 0.5 * jnp.sum(diff ** 2) + 0.5 * alpha * jnp.sum(y ** 2)", "title": "" }, { "docid": "97dda7ea530ca8c1aebee13aef3df9a3", "score": "0.53942144", "text": "def __gt__(self, other):\n return self.attack + 0.5 > other.defence", "title": "" }, { "docid": "2c4a261c00ed2624c5a2cf7808ae03bb", "score": "0.53812456", "text": "def __gt__(self, other):\n return self.data > other.data", "title": "" }, { "docid": "99234bc27c86612a0c70c2b5b9163e35", "score": "0.53752714", "text": "def gt(a, b):\n return a > b", "title": "" }, { "docid": "b81e8cc702463e8728cb9e8386299349", "score": "0.5370524", "text": "def test_far_away(self):\n xm, _ = gradient_descent(self.loss, 200, n_steps=40)\n\n with self.subTest(\"Point\"):\n self.assertLess(abs(xm), 0.05)", "title": "" }, { "docid": "be6ddfb5712814aecfdf37cf0b3f4172", "score": "0.5357488", "text": "def G(self, t, y_, y, xs):\n return math.exp( Counters.dot( self.parameters, self.featureFunction(t, y_, y, xs) ) )", "title": "" }, { "docid": "52a0b028d87b78b38824fdd18576cebb", "score": "0.535633", "text": "def fit(self, X: np.ndarray, y: np.ndarray) -> np.ndarray:\n coeffs = self.estimator.fit(X, y).coef_\n coeffs[np.abs(coeffs) < self.threshold] = 0.0\n\n return coeffs", "title": "" }, { "docid": "f96b5492b6cd705a2f935f6cb603b999", "score": "0.5354228", "text": "def evaluate(self, params):\n if self.value == params:\n return -0.0\n return np.inf", "title": "" }, { "docid": "738cfdc33e4810927baf91d199e13cde", "score": "0.5347481", "text": "def _statistic(self, x, y):", "title": "" }, { "docid": "7b612434c783faeb2de6cae5bc41847a", "score": "0.5347322", "text": "def validate(self, X, y=None):", "title": "" }, { "docid": "bbbdf34d5f13e005edfc24dda4378751", "score": "0.5341473", "text": "def decision_function(self, X: np.array) -> np.array:\n return (\n np.sum(self._kernel(X, self.X_) * self.y_ * self._lambdas, axis=1)\n + self._b\n )", "title": "" }, { "docid": "a2a59a1a0aad27a846d3083f3bed54c2", "score": "0.53372496", "text": "def score(self, X, Y):\n anomaly_scores = self.svm.decision_function(X).reshape(-1,1)\n return self.stump.score(anomaly_scores, Y)", "title": "" }, { "docid": "667e7cf7359a5943b0ed2d7f208344d3", "score": "0.53360146", "text": "def compare(x, y):\n return np.linalg.norm(x - y)", "title": "" }, { "docid": "667e7cf7359a5943b0ed2d7f208344d3", "score": "0.53360146", "text": "def compare(x, y):\n return np.linalg.norm(x - y)", "title": "" }, { "docid": "667e7cf7359a5943b0ed2d7f208344d3", "score": "0.53360146", "text": "def compare(x, y):\n return np.linalg.norm(x - y)", "title": "" }, { "docid": "43b548845a235dc0d73e45e39d31613e", "score": "0.53340834", "text": "def __gt__(self, other: AnyVec) -> bool:\n if isinstance(other, VecBase):\n return (\n (self._x - other.x) > 1e-6 and\n (self._y - other.y) > 1e-6 and\n (self._z - other.z) > 1e-6\n )\n elif _check_tuple3(other):\n return (\n (self._x - other[0]) > 1e-6 and\n (self._y - other[1]) > 1e-6 and\n (self._z - other[2]) > 1e-6\n )\n else:\n return NotImplemented", "title": "" }, { "docid": "460466860174d3870121836840464a16", "score": "0.53131807", "text": "def _check_optimality_cond(self, x, y, ctx=None):\n return (abs(self.fY(x, y)) <= self.eps).all()", "title": "" }, { "docid": "f63ffc73b68c22f3ce8352c204431ece", "score": "0.53104746", "text": "def select_threshold(X, Xval, yval):\r\n # create multivariate model using training data\r\n mu = X.mean(axis=0)\r\n cov = np.cov(X.T)\r\n multi_normal = stats.multivariate_normal(mu, cov)\r\n\r\n # this is key, use CV data for fine tuning hyper parameters\r\n # 这个pval的值是Xval对应的多元高斯值?\r\n pval = multi_normal.pdf(Xval)\r\n\r\n # set up epsilon candidates\r\n # linspace(start,stop,num) 把开始-终止之间的值切成num个\r\n epsilon = np.linspace(np.min(pval), np.max(pval), num=10000)\r\n\r\n # calculate f-score\r\n #小于的e会被标记为1,即为异常。\r\n fs = []\r\n for e in epsilon:\r\n y_pred = (pval <= e).astype('int')\r\n fs.append(f1_score(yval, y_pred))\r\n\r\n # find the best f-score\r\n argmax_fs = np.argmax(fs)\r\n # 返回最大的f1值对应的pval值,及fs值\r\n return epsilon[argmax_fs], fs[argmax_fs]", "title": "" }, { "docid": "71ed5fef57ef4344c2798d29d3ff53b7", "score": "0.5308917", "text": "def __ge__(self, other):\n other = as_dimension(other)\n if self._value is None or other.value is None:\n return None\n else:\n return self._value >= other.value", "title": "" }, { "docid": "c015ff620645359550058f34196e745b", "score": "0.53028375", "text": "def __lt__(self, other):\n area = self.x * other.y - other.x * self.y\n return area > 0", "title": "" }, { "docid": "f4451f5c02b455cc4a0e02b9d4dbafb9", "score": "0.5301502", "text": "def predict(self, X, beta=None, threshold=0.5):\n if beta is None:\n beta = self.coef\n y_pred = np.array(self.probability(X, beta) >= threshold).astype(np.int)\n return y_pred", "title": "" }, { "docid": "9eb0b60c074e5abdc2de23d59c5ae3dd", "score": "0.5301082", "text": "def both_positive(x, y):\n return x > 0 and y > 0", "title": "" }, { "docid": "5e82f4d78386e5677386349778a5a717", "score": "0.5300111", "text": "def _cond_apply(x, y, func, prob):\n return tf.cond((tf.random.uniform([], 0, 1) >= (1.0 - prob)), lambda: func(x,y), lambda: (x,y))", "title": "" }, { "docid": "05d5fa6d6eddf071530b0ff6e9a0d615", "score": "0.5298182", "text": "def gte(self, other):\n\n return self._post(\"gte\", Tensor, r=other)", "title": "" }, { "docid": "406da7d6199896f15744adaf9c31c075", "score": "0.5288483", "text": "def decision_function(self, X):\n dec_func = self._decision_function(X)\n if len(self.classes_) == 2:\n return dec_func[:, 1] - dec_func[:, 0]\n return dec_func", "title": "" }, { "docid": "50823ee50d0a10f9c517a17809826cdc", "score": "0.528195", "text": "def error(self, X, y):\n return np.sum(self.predict(X) != y) / X.shape[0]", "title": "" }, { "docid": "54516c5ed06f40a5b4cd1d2fe84fb66f", "score": "0.5280582", "text": "def _less_equal_tensor(x, y):\n return F.tensor_le(x, y)", "title": "" }, { "docid": "4bac28a4352789d0f0b32a32cbcc9d2c", "score": "0.5279085", "text": "def staged_decision_function(self, X):\n ...", "title": "" }, { "docid": "882464f05d9a97a9a60a2946b0140cc6", "score": "0.5276351", "text": "def error(self, X, y):\n y_hat = self.predict(X)\n y = np.array(y)\n return sum(y[i] != y_hat[i] for i in range(len(y))) / len(y)", "title": "" }, { "docid": "a2c3661442a1839dd1e90a37ce6d4a53", "score": "0.5276245", "text": "def test_not_greater(self):\n self.assertFalse(greater_than(self.value_a, self.value_b))", "title": "" }, { "docid": "f22fc73eecff102bc39b16824b065d56", "score": "0.52706385", "text": "def greater_than_test(self, x, y, bitlength=None):\n return self.protocol.gt(x, y, self.num_bits)", "title": "" }, { "docid": "5aae50f73e66f85d4798bb61a8f61d21", "score": "0.5268766", "text": "def predict(self, x):\n Xtest = (x-self.xbar)/self.xstd\n y, C = self.model.predict(Xtest)\n y_pred = y*self.ystd + self.ybar\n inds = [i for i in range(x.shape[0]) \\\n if np.any(np.all(np.abs(x[i] - self.X) <= 1e-9, axis=1))]\n C[inds] = 0.0\n count = 0\n if np.any(C<0):\n print('Negative variance.')\n print(C)\n print('Model status:')\n print([opt_run.status for opt_run in\\\n self.model.optimization_runs]) \n print('Model parameters:')\n print(self.model.param_array)\n print('Parameters from optimization runs:')\n print([opt_run.x_opt for opt_run in\\\n self.model.optimization_runs]) \n C = np.ones(y.shape)*1e-4\n std_dev = np.sqrt(C)*self.ystd\n return y_pred, std_dev", "title": "" }, { "docid": "4849c9c462faf7589d4136ca85c50e0c", "score": "0.52667785", "text": "def fun(y):\n diff = x - y\n return 0.5 * jnp.sum(diff ** 2) + alpha * jnp.sum(y)", "title": "" }, { "docid": "24adf74b39250d039301ced9e71d3a60", "score": "0.5254337", "text": "def both_positive(x, y):\n return (x > 0) and (y > 0) # You can replace this line!", "title": "" }, { "docid": "8148fd86e0886bf9af8b465a600d9556", "score": "0.52489376", "text": "def predict(self, X):\r\n X = np.insert(X, 0, 1, axis = 0)\r\n net = np.dot(self.weights,X)\r\n y_pred = np.where(net>=0.0,1.0,0.0)\r\n return y_pred", "title": "" }, { "docid": "e10975afef26f08663ad01200c579955", "score": "0.5248622", "text": "def metric_gt(metric1, metric2):\n\n return metric1 > metric2", "title": "" }, { "docid": "1c87304db1620a9b9cead2f297508d6f", "score": "0.524489", "text": "def debug_compare_gp_and_shivv_models(self, X, y, dy, shivv_model, debug_x=[], debug_y=[], pos_delta_mag=None, neg_delta_mag=None):\n #####\n import numpy as np\n from sklearn.gaussian_process import GaussianProcess\n from matplotlib import pyplot as pl\n\n t_min = numpy.min(X)\n t_max = numpy.max(X)\n x = np.atleast_2d(np.linspace(t_min, t_max, 10000)).T\n\n X = np.atleast_2d(X).T\n\n ### Instanciate a Gaussian Process model\n #gp = GaussianProcess(corr='squared_exponential', theta0=1e-3,\n # thetaL=1e-3, thetaU=1,\n # nugget=(dy / y) ** 2,\n # random_start=20)\n gp = GaussianProcess(corr='absolute_exponential', theta0=1e-3,\n nugget=(dy / y) ** 2,\n random_start=10)\n # Fit to data using Maximum Likelihood Estimation of the parameters\n gp.fit(X, y)\n\n # Make the prediction on the meshed x-axis (ask for MSE as well)\n y_pred, MSE = gp.predict(x, eval_MSE=True)\n sigma = np.sqrt(MSE)\n # Plot the function, the prediction and the 95% confidence interval based on\n # the MSE\n from matplotlib import rcParams\n rcParams.update({'legend.fontsize':8})\n ms = 4\n fig = pl.figure()\n pl.plot(x, [numpy.median(y)]*len(x), 'c', label='median')\n #pl.plot(x, [numpy.mean(y)]*len(x), 'm', label=u'mean')\n #pl.plot(X, y, 'r:', label=u'orig $m(t)$')\n pl.errorbar(X.ravel(), y, dy, fmt='ro', ms=5, label='Observations')\n pl.plot(X, shivv_model, 'g', ms=ms, label='Shivvers model')\n pl.plot(X, shivv_model, 'go', ms=ms)\n pl.plot(x, y_pred, 'b-', ms=ms, label='exp corr prediction')\n pl.fill(np.concatenate([x, x[::-1]]), \\\n np.concatenate([y_pred - 1.9600 * sigma,\n (y_pred + 1.9600 * sigma)[::-1]]), \\\n alpha=.5, fc='b', ec='None', label='95% confidence interval')\n if len(debug_x) > 0:\n pl.plot(debug_x, debug_y, 'y*', ms=10, label='cross threshhold')\n if pos_delta_mag != None:\n pl.plot(x, [pos_delta_mag]*len(x), 'y', label='median delta thresh')\n pl.plot(x, [neg_delta_mag]*len(x), 'y', label='median delta thresh')\n \n\n pl.xlabel('$t$')\n pl.ylabel('$m(t)$')\n pl.legend(loc='upper left')\n srcid = 244888\n img_fpath = '/home/dstarr/scratch/lcmodel_feature_asas_examples/%d.png' % (srcid)\n pl.title(\"Source ID=%d\" % (srcid))\n pl.savefig(img_fpath)\n #import os\n #os.system(\"eog %s\" % (img_fpath))\n #pl.show() \n #import pdb; pdb.set_trace()\n #print", "title": "" }, { "docid": "3dc5ce3d5ba52f7686683f3dc00f6a71", "score": "0.5241258", "text": "def gt():\n \n name, varsFrame, arg2Type, arg2Value, arg3Type, arg3Value = relation_operations_setup()\n result = None\n\n if (arg2Type == \"int\" and arg3Type == \"int\"):\n result = arg2Value > arg3Value\n elif (arg2Type == \"string\" and arg3Type == \"string\"):\n arg2Value = convert_unicode_values_in_string(arg2Value)\n arg3Value = convert_unicode_values_in_string(arg3Value)\n result = arg2Value > arg3Value\n elif (arg2Type == \"bool\" and arg3Type == \"bool\"):\n if arg2Value == \"true\" and arg3Value == \"false\":\n result = True\n else:\n result = False\n else:\n err_msg(\"Operand types are not supported in GT instruction\", WrongOperandTypeErr)\n\n result = make_correct_form_of_result(result)\n argStruct = {'type': 'bool', 'value': result}\n varsFrame[name] = argStruct", "title": "" }, { "docid": "4f37250f998b0984ea3200d99a7bef1b", "score": "0.5235338", "text": "def is_greater_x(pos_1, pos_2):\n #print(\"comparing \", pos_1, pos_2)\n return pos_1[0] > pos_2[0]", "title": "" }, { "docid": "dc25a729fbb853069b4f9e66b1a9a937", "score": "0.52347046", "text": "def decision(X, W, dev=False):\n if dev:\n return X\n else:\n X = add_bias(X)\n return numpy.dot(X, W)", "title": "" }, { "docid": "bc80facae540a1b51d0d579d16e1d357", "score": "0.5231903", "text": "def goalnotreached(var_x, var_y):\n return MAZE[var_x][var_y] != 'g'", "title": "" }, { "docid": "8d0033673eaecfabdc5e1e9255e7373c", "score": "0.5230975", "text": "def insupport(self, x):\n return a <= x < b", "title": "" }, { "docid": "7d8a3f20c7992442fd97e08a1a426930", "score": "0.5230755", "text": "def __call__(self, x):\n def f(a):\n return np.mean(self.observations <= a)\n vf = np.frompyfunc(f, 1, 1)\n return vf(x).astype(float)", "title": "" } ]
8a4bd4e73f5f820f2c92790c16a8da09
Test that manager handles dependencies correctly.
[ { "docid": "e274cd933131c2f7b7ea941410f67ca6", "score": "0.7210767", "text": "def test_dependencies(self):\n process_parent = Process.objects.filter(slug=\"test-dependency-parent\").latest()\n process_child = Process.objects.filter(slug=\"test-dependency-child\").latest()\n data_parent = Data.objects.create(\n name=\"Test parent\", contributor=self.contributor, process=process_parent\n )\n data_child1 = Data.objects.create(\n name=\"Test child\",\n contributor=self.contributor,\n process=process_child,\n input={},\n )\n data_child2 = Data.objects.create(\n name=\"Test child\",\n contributor=self.contributor,\n process=process_child,\n input={\"parent\": data_parent.pk},\n )\n data_child3 = Data.objects.create(\n name=\"Test child\",\n contributor=self.contributor,\n process=process_child,\n input={\"parent\": None},\n )\n\n data_parent.refresh_from_db()\n data_child1.refresh_from_db()\n data_child2.refresh_from_db()\n data_child3.refresh_from_db()\n\n # Check locks are created in manager.\n self.assertFalse(data_parent.access_logs.exists())\n self.assertFalse(data_child1.access_logs.exists())\n self.assertTrue(data_child2.access_logs.exists())\n self.assertFalse(data_child3.access_logs.exists())\n\n # Check that the data_parent location was locked.\n access_log = data_child2.access_logs.get()\n self.assertEqual(\n access_log.storage_location.file_storage.data.get().id, data_parent.id\n )\n # Check that the log is released.\n self.assertIsNotNone(access_log.started)\n self.assertIsNotNone(access_log.finished)\n\n # Check status.\n self.assertEqual(data_parent.status, Data.STATUS_DONE)\n self.assertEqual(data_child1.status, Data.STATUS_DONE)\n self.assertEqual(data_child2.status, Data.STATUS_DONE)\n self.assertEqual(data_child3.status, Data.STATUS_DONE)", "title": "" } ]
[ { "docid": "26700313d381242bcc845d3b15ccdddc", "score": "0.67114073", "text": "def test_manager(self):\n manager = ISubscriptionManager(self.root.document, None)\n self.assertNotEqual(manager, None)\n self.assertTrue(verifyObject(ISubscriptionManager, manager),)\n\n manager = ISubscriptionManager(self.root, None)\n self.assertNotEqual(manager, None)\n self.assertTrue(verifyObject(ISubscriptionManager, manager),)\n\n # They are not available on asset\n manager = ISubscriptionManager(self.root.file, None)\n self.assertEqual(manager, None)", "title": "" }, { "docid": "c274678ac78a9cd9c3c2dce901db52f1", "score": "0.6550478", "text": "def test_get_redis_manager_unpatched(self):\n persistence_helper = PersistenceHelper()\n err = self.assertRaises(\n PersistenceHelperError, persistence_helper.get_redis_manager)\n self.assertTrue('setup() must be called' in str(err))", "title": "" }, { "docid": "0b101397bbc338595713885fade81449", "score": "0.650485", "text": "def test_get_redis_manager_async(self):\n persistence_helper = self.add_helper(PersistenceHelper())\n manager_d = persistence_helper.get_redis_manager()\n self.assertIsInstance(manager_d, Deferred)\n manager = yield manager_d\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._TxRedisManager)\n self.assertEqual(persistence_helper._redis_managers, [manager])", "title": "" }, { "docid": "00676c4c7c1736c675efa9062142d81b", "score": "0.6488792", "text": "def manager():\n pass", "title": "" }, { "docid": "15e794551cda9cc6b2f80f4dcc3a6270", "score": "0.64762217", "text": "def test_get_riak_manager_unpatched(self):\n persistence_helper = PersistenceHelper()\n err = self.assertRaises(Exception, persistence_helper.get_riak_manager)\n self.assertTrue('setup() must be called' in str(err))", "title": "" }, { "docid": "717d4d9c3feb554f0a6f2d6d7dc4443d", "score": "0.6382587", "text": "def test_get_redis_manager_sync(self):\n persistence_helper = self.add_helper(PersistenceHelper(is_sync=True))\n manager = persistence_helper.get_redis_manager()\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._RedisManager)\n self.assertEqual(persistence_helper._redis_managers, [manager])", "title": "" }, { "docid": "8aaafcb3709054d8f9195ee46b1a0035", "score": "0.6376858", "text": "def setUp(self) -> None:\n self.manager = Manager()", "title": "" }, { "docid": "65a3108a970f522f5f0e8bf16602b71c", "score": "0.62933004", "text": "def test__get_redis_managers_for_cleanup(self):\n persistence_helper = PersistenceHelper()\n managers = [\n FakeRedisManagerForCleanup('prefix1'),\n FakeRedisManagerForCleanup('prefix2'),\n FakeRedisManagerForCleanup('prefix2'),\n FakeRedisManagerForCleanup('prefix1'),\n FakeRedisManagerForCleanup('prefix3'),\n ]\n persistence_helper._redis_managers.extend(managers)\n self.assertEqual(\n list(persistence_helper._get_redis_managers_for_cleanup()),\n list(reversed(zip([True, True, False, False, True], managers))))", "title": "" }, { "docid": "7dec4ec189d1e700f046e44c1abf2f2f", "score": "0.62411", "text": "def test_customer_manager(self):\n \n class BookManager(RestManager):\n def filter_on_author(self, author_resource):\n return self.params([('author', author_resource),])\n \n class Book(RestObject):\n objects = BookManager()\n class Meta:\n list = (r'^book/$', 'book_set')\n item = r'^book/(?P<id>\\d)$'\n \n class Author(RestObject):\n class Meta:\n item = r'^book/(?P<id>\\d)$'\n\n\n self.assertTrue(isinstance(Book.objects, BookManager))\n self.assertTrue(hasattr(Book.objects, 'filter_on_author'))\n self.assertTrue(Book.objects.object_class, Book)\n\n self.assertTrue(isinstance(Author.objects, RestManager))\n self.assertTrue(Author.objects.object_class, Author)\n\n self.assertNotEqual(Book.objects, Author.objects)\n\n book = Book()\n # Cannot test AttributeError with self.assertRaises\n try:\n book.objects.all()\n except AttributeError, e:\n self.assertEqual('%s' % e, 'Manager is not accessible via Book instances')", "title": "" }, { "docid": "d9fbe9ea7db8bb39367bf7e75c8f41f8", "score": "0.6233791", "text": "def testCheckDependencies(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency_helper.CheckDependencies(verbose_output=False)", "title": "" }, { "docid": "6e03aebd260661ea136f6650d0abf124", "score": "0.6221906", "text": "def test_default_manager(self):\n\n class Book(RestObject):\n pass\n\n class Author(RestObject):\n pass\n \n self.assertTrue(isinstance(Book.objects, RestManager))\n self.assertTrue(Book.objects.object_class, Book)\n\n self.assertTrue(isinstance(Author.objects, RestManager))\n self.assertTrue(Author.objects.object_class, Author)\n\n self.assertNotEqual(Book.objects, Author.objects)\n \n book = Book()\n # Cannot test AttributeError with self.assertRaises\n try:\n book.objects.all()\n except AttributeError, e:\n self.assertEqual('%s' % e, 'Manager is not accessible via Book instances')", "title": "" }, { "docid": "46afdd1bc687618b7d77b2006247044d", "score": "0.6188809", "text": "def test_register_dynamic_plugin_manager1(self):\n pass", "title": "" }, { "docid": "1b55e9f311687dc1e08797ffed54856f", "score": "0.61576146", "text": "def test_add_team_manager_to_team(self):\n pass", "title": "" }, { "docid": "235f660679cfb9067f09faba3b805a5c", "score": "0.61123097", "text": "def test_setup_applies_patches(self):\n manager_inits = self.get_manager_inits()\n persistence_helper = PersistenceHelper()\n self.assertEqual(persistence_helper._patches_applied, False)\n self.assertEqual(manager_inits, self.get_manager_inits())\n\n self.assertEqual(persistence_helper.setup(), None)\n self.assertEqual(persistence_helper._patches_applied, True)\n self.assertNotEqual(manager_inits, self.get_manager_inits())\n\n # Clean up after ourselves.\n persistence_helper._unpatch()\n self.assertEqual(persistence_helper._patches_applied, False)\n self.assertEqual(manager_inits, self.get_manager_inits())", "title": "" }, { "docid": "ddcde94e79e263f5bdc443caa08c31ae", "score": "0.61094993", "text": "def test__get_riak_managers_for_cleanup(self):\n persistence_helper = PersistenceHelper()\n managers = [\n FakeRiakManagerForCleanup('bucket1'),\n FakeRiakManagerForCleanup('bucket2'),\n FakeRiakManagerForCleanup('bucket2'),\n FakeRiakManagerForCleanup('bucket1'),\n FakeRiakManagerForCleanup('bucket3'),\n ]\n persistence_helper._riak_managers.extend(managers)\n self.assertEqual(\n list(persistence_helper._get_riak_managers_for_cleanup()),\n list(reversed(zip([True, True, False, False, True], managers))))", "title": "" }, { "docid": "8ca53aa0b103fce9c7d6d67efdf073f2", "score": "0.60978365", "text": "def test_twice_dependent_object_import(self):\n pass", "title": "" }, { "docid": "3a81b99cc50a64905037b06fd15f9940", "score": "0.6092088", "text": "def test_get_riak_manager_async(self):\n persistence_helper = self.add_helper(PersistenceHelper(use_riak=True))\n manager = persistence_helper.get_riak_manager()\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._TxRiakManager)\n self.assertEqual(persistence_helper._riak_managers, [manager])", "title": "" }, { "docid": "7a6d1ad4bd1b92e1fec460ed37a66e6d", "score": "0.6081467", "text": "def test_register_dynamic_plugin_manager(self):\n pass", "title": "" }, { "docid": "3e4836cdb0510450ed04f30256763e3a", "score": "0.60474753", "text": "def testCheckTestDependencies(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency_helper.CheckTestDependencies(verbose_output=False)", "title": "" }, { "docid": "a84e61cf11feb03e2084d974947b6243", "score": "0.60340434", "text": "def test_get_riak_manager_sync(self):\n persistence_helper = self.add_helper(\n PersistenceHelper(use_riak=True, is_sync=True))\n manager = persistence_helper.get_riak_manager()\n self.add_cleanup(manager.close_manager)\n self.assertIsInstance(manager, self._RiakManager)\n self.assertEqual(persistence_helper._riak_managers, [manager])", "title": "" }, { "docid": "1824aae821b7033e671de2e5af27c2b2", "score": "0.600903", "text": "def test_list_dependent_assets2(self):\n pass", "title": "" }, { "docid": "f7856fb02345fc44a1fc9996cefbede5", "score": "0.60036385", "text": "def setUp(self):\n super(ExtensionTestsMixin, self).setUp()\n\n self.manager = None", "title": "" }, { "docid": "e9fae8bce1dd0e5fc2c630e360d526f0", "score": "0.5996312", "text": "def test_list_dependent_assets1(self):\n pass", "title": "" }, { "docid": "664faa6ff7e9af1d2933909b522fbb33", "score": "0.59953845", "text": "def test_dependency_interface():\n c = TestClient()\n conanfile = textwrap.dedent(\"\"\"\n from conans import ConanFile\n class Pkg(ConanFile):\n name = \"dep\"\n version = \"1.0\"\n homepage = \"myhome\"\n url = \"myurl\"\n license = \"MIT\"\n \"\"\")\n user = textwrap.dedent(\"\"\"\n from conan import ConanFile\n class User(ConanFile):\n requires = \"dep/1.0\"\n def generate(self):\n self.output.info(\"HOME: {}\".format(self.dependencies[\"dep\"].homepage))\n self.output.info(\"URL: {}\".format(self.dependencies[\"dep\"].url))\n self.output.info(\"LICENSE: {}\".format(self.dependencies[\"dep\"].license))\n self.output.info(\"RECIPE: {}\".format(self.dependencies[\"dep\"].recipe_folder))\n self.output.info(\"CONANDATA: {}\".format(self.dependencies[\"dep\"].conan_data))\n\n \"\"\")\n c.save({\"dep/conanfile.py\": conanfile,\n \"dep/conandata.yml\": \"\",\n \"user/conanfile.py\": user})\n c.run(\"create dep\")\n c.run(\"install user\")\n assert \"conanfile.py: HOME: myhome\" in c.out\n assert \"conanfile.py: URL: myurl\" in c.out\n assert \"conanfile.py: LICENSE: MIT\" in c.out\n assert \"conanfile.py: RECIPE:\" in c.out\n assert \"conanfile.py: CONANDATA: {}\" in c.out", "title": "" }, { "docid": "63b31864b8b47d1ed72467af2ccb0f70", "score": "0.5989321", "text": "def setup(cls):\n super(TestRemoveAndDependencies, cls).setup()\n cls.DEPENDENCY_PACKAGE_ID = PackageId(\n cls.DEPENDENCY_TYPE, cls.DEPENDENCY_PUBLIC_ID\n )\n result = cls.runner.invoke(\n cli,\n [\"-v\", \"DEBUG\", \"add\", \"--local\", cls.ITEM_TYPE, str(cls.ITEM_PUBLIC_ID)],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "title": "" }, { "docid": "7ed62c124961daf4f45fe74b34ce3f06", "score": "0.5983793", "text": "def test_list_dependent_assets(self):\n pass", "title": "" }, { "docid": "f1a2264ab9d811fc3f943d92fe60d235", "score": "0.59833866", "text": "def test_check_dependencies_with_found(self):\n self.spy_on(check_install, op=kgb.SpyOpMatchAny([\n {\n 'args': (['cm', 'version'],),\n 'op': kgb.SpyOpReturn(True),\n },\n ]))\n\n client = self.build_client(setup=False)\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])", "title": "" }, { "docid": "be112e4cc747532a3d9a07b8d4f7f641", "score": "0.5916842", "text": "def test_depends_on_1(depends_on_1):\n assert depends_on_1.foo.value == 2", "title": "" }, { "docid": "b43541209a12440d69cc0ca040b9407e", "score": "0.5902296", "text": "def test_subsystems(self):\n pass", "title": "" }, { "docid": "68777ca6ff5a49022bde2ba54482ec3f", "score": "0.58901614", "text": "def test_init(self):\n manager = MovieManager()\n self.assertEqual([], manager.movies)", "title": "" }, { "docid": "a14c418519a4b23cf15b3a639d581c3b", "score": "0.58843696", "text": "def testInitialize(self):\n dependency_definition = dependencies.DependencyDefinition('test')\n self.assertIsNotNone(dependency_definition)", "title": "" }, { "docid": "90ffc253dc5c57241f1b4dad548561ee", "score": "0.5860845", "text": "def test_check_dependencies_with_missing(self):\n self.spy_on(check_install, op=kgb.SpyOpReturn(False))\n\n client = self.build_client(setup=False)\n\n message = \"Command line tools ('cm') are missing.\"\n\n with self.assertRaisesMessage(SCMClientDependencyError, message):\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])", "title": "" }, { "docid": "db98593b0295d4e7aab709f43e33b238", "score": "0.5858061", "text": "def test_depends_on_2(depends_on_2):\n assert depends_on_2.foo.value == 1", "title": "" }, { "docid": "3b4debacfa72fbdb647ca8ee95a5af48", "score": "0.5845471", "text": "def test_ParameterManagerGenerator_concurrent():\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n data_manager = mock_data_manager(components_1)\n\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" in apm.components_list\n\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n components_2 = {\"1\": mock_component(), \"2\": mock_component()}\n data_manager_1 = mock_data_manager(components_1)\n data_manager_2 = mock_data_manager(components_2)\n\n pmg = ParameterManagerGenerator(\n [data_manager_1, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n multi_apms = pmg.parameter_managers()\n assert len(multi_apms) == 1\n multi_apm = multi_apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n for apm in multi_apm.apm_list:\n assert isinstance(apm, active_parameter_manager)\n assert \"scale\" in multi_apm.apm_list[0].components_list\n assert \"decay\" in multi_apm.apm_list[0].components_list\n assert \"absorption\" in multi_apm.apm_list[0].components_list\n assert \"1\" in multi_apm.apm_list[1].components_list\n assert \"2\" in multi_apm.apm_list[1].components_list\n\n # now try fixing a component\n data_manager.fixed_components = [\"absorption\"]\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"concurrent\",\n )\n apms = pmg.parameter_managers()\n assert len(apms) == 1\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" not in apm.components_list", "title": "" }, { "docid": "b968dd89e057c6e652a0baab457fe523", "score": "0.581443", "text": "def test_require():", "title": "" }, { "docid": "9276a557eba741fef8c7cc01c27a07c6", "score": "0.5788804", "text": "def test_class_dependencies(self):\n dependencies = get_class_dependencies(ParentBlock)\n self.assertIsNotNone(dependencies)\n self.assertIn(\"nio\", dependencies)\n self.assertEqual(dependencies[\"nio\"][1], \"2.0.1\")", "title": "" }, { "docid": "d82b8545c846a85b2b3ab2fe09b20028", "score": "0.5764533", "text": "def test_access_sales_manager(self):\n SaleOrder = self.env['sale.order'].with_context(tracking_disable=True)\n # Manager can see the SO which is assigned to another salesperson\n self.order.read()\n # Manager can change a salesperson of the SO\n self.order.write({'user_id': self.company_data['default_user_salesman'].id})\n # Manager can create the SO for other salesperson\n sale_order = SaleOrder.create({\n 'partner_id': self.partner_a.id,\n 'user_id': self.company_data['default_user_salesman'].id\n })\n self.assertIn(sale_order.id, SaleOrder.search([]).ids, 'Sales manager should be able to create the SO of other salesperson')\n # Manager can confirm the SO\n sale_order.action_confirm()\n # Manager can not delete confirmed SO\n with self.assertRaises(UserError):\n sale_order.unlink()\n # Manager can delete the SO of other salesperson if SO is in 'draft' or 'cancel' state\n self.order.unlink()\n self.assertNotIn(self.order.id, SaleOrder.search([]).ids, 'Sales manager should be able to delete the SO')\n\n # Manager can create a Sales Team\n india_channel = self.env['crm.team'].with_context(tracking_disable=True).create({\n 'name': 'India',\n })\n self.assertIn(india_channel.id, self.env['crm.team'].search([]).ids, 'Sales manager should be able to create a Sales Team')\n # Manager can edit a Sales Team\n india_channel.write({'name': 'new_india'})\n self.assertEqual(india_channel.name, 'new_india', 'Sales manager should be able to edit a Sales Team')\n # Manager can delete a Sales Team\n india_channel.unlink()\n self.assertNotIn(india_channel.id, self.env['crm.team'].search([]).ids, 'Sales manager should be able to delete a Sales Team')", "title": "" }, { "docid": "4535c62a2c36f66c6e5c2bb17848a6b2", "score": "0.5746144", "text": "def test_service_support(self):\n self.assertFalse(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "title": "" }, { "docid": "cc757542fce8bea577ecba6b5af6e409", "score": "0.57425004", "text": "def test_rasterized_manager(self):\n rasterized_registry = self.manager._rasterized_registry()\n self.assertEqual(set(self.manager.registry), set(rasterized_registry))\n rast_manager = Manager(rasterized_registry)\n self.assertEqual(set(self.manager.registry), set(rast_manager.registry))\n self.assertEqual(self.manager.synonyms, rast_manager.synonyms)\n for prefix in self.manager.registry:\n with self.subTest(prefix=prefix):\n self.assertEqual(\n self.manager.is_deprecated(prefix),\n rast_manager.is_deprecated(prefix),\n )\n self.assertEqual(\n self.manager.get_example(prefix),\n rast_manager.get_example(prefix),\n )\n self.assertEqual(\n self.manager.get_uri_format(prefix),\n rast_manager.get_uri_format(prefix),\n )\n self.assertEqual(\n self.manager.get_name(prefix),\n rast_manager.get_name(prefix),\n )\n self.assertEqual(\n self.manager.get_pattern(prefix),\n rast_manager.get_pattern(prefix),\n )\n self.assertEqual(\n self.manager.get_preferred_prefix(prefix) or prefix,\n rast_manager.get_preferred_prefix(prefix),\n )\n self.assertEqual(\n self.manager.get_synonyms(prefix),\n rast_manager.get_synonyms(prefix),\n )\n self.assertEqual(\n self.manager.get_depends_on(prefix),\n rast_manager.get_depends_on(prefix),\n )\n self.assertEqual(\n self.manager.get_appears_in(prefix),\n rast_manager.get_appears_in(prefix),\n )\n self.assertEqual(\n self.manager.get_provides_for(prefix),\n rast_manager.get_provides_for(prefix),\n )\n self.assertEqual(\n self.manager.get_provided_by(prefix),\n rast_manager.get_provided_by(prefix),\n )\n self.assertEqual(\n self.manager.get_has_canonical(prefix),\n rast_manager.get_has_canonical(prefix),\n )\n self.assertEqual(\n self.manager.get_canonical_for(prefix),\n rast_manager.get_canonical_for(prefix),\n )\n self.assertEqual(\n self.manager.get_part_of(prefix),\n rast_manager.get_part_of(prefix),\n )\n self.assertEqual(\n self.manager.get_has_parts(prefix),\n rast_manager.get_has_parts(prefix),\n )", "title": "" }, { "docid": "8cd020800075d5ee50130932c5b72c22", "score": "0.57351416", "text": "def test_cleanup_asserts_riak_managers_closed(self):\n persistence_helper = PersistenceHelper(assert_closed=True)\n manager = FakeRiakManagerForCleanup('bucket1', [], client_closed=False)\n persistence_helper._riak_managers.append(manager)\n return self.assertFailure(\n persistence_helper.cleanup(), PersistenceHelperError)", "title": "" }, { "docid": "0f942303e8286de40a787cccb56575e3", "score": "0.57312536", "text": "def test_list_dependent_assets3(self):\n pass", "title": "" }, { "docid": "1ae097d3434f5a0516fceca3b2d68ef6", "score": "0.5729894", "text": "def test_find_dependencies_using_pack_metadata(mocker):\n mock_pack_meta_file = {\n \"dependencies\": {\n \"dependency_pack1\": {\n \"mandatory\": False,\n \"display_name\": \"dependency pack 1\",\n },\n \"dependency_pack2\": {\n \"mandatory\": False,\n \"display_name\": \"dependency pack 2\",\n },\n \"dependency_pack3\": {\n \"mandatory\": False,\n \"display_name\": \"dependency pack 3\",\n },\n }\n }\n\n dependencies_from_id_set = {\n \"dependency_pack1\": {\"mandatory\": False, \"display_name\": \"dependency pack 1\"},\n \"dependency_pack2\": {\"mandatory\": True, \"display_name\": \"dependency pack 2\"},\n \"dependency_pack3\": {\"mandatory\": True, \"display_name\": \"dependency pack 3\"},\n \"dependency_pack4\": {\"mandatory\": True, \"display_name\": \"dependency pack 4\"},\n }\n\n mocker.patch(\n \"demisto_sdk.commands.find_dependencies.find_dependencies.PackDependencies.get_metadata_from_pack\",\n return_value=mock_pack_meta_file,\n )\n\n first_level_dependencies = PackDependencies.update_dependencies_from_pack_metadata(\n \"\", dependencies_from_id_set\n )\n\n assert not first_level_dependencies.get(\"dependency_pack2\", {}).get(\"mandatory\")\n assert not first_level_dependencies.get(\"dependency_pack3\", {}).get(\"mandatory\")\n assert first_level_dependencies.get(\"dependency_pack4\", {}).get(\"mandatory\")", "title": "" }, { "docid": "d2518d9fdbc99754f20b88de26b48206", "score": "0.5728671", "text": "def test_installService(self):\n IService(self.store).startService()\n installOn(self.manager, self.store)\n self.assertTrue(self.manager.running)\n self.manager.deleteFromStore()\n self.assertFalse(self.manager.running)", "title": "" }, { "docid": "c607093a386ef1752dfa54b8111b2b65", "score": "0.57268596", "text": "def test_public_manager_call(self):\n\n # GIVEN public access method to model's manager\n\n # WHEN fetching data listing using unsupported call method\n response = self.api.GET(self.app_label, self.model_name2, params={'call': 'call2'})\n\n # THEN it should fail\n self.assertTrue(response.error)\n\n # -----\n\n # WHEN fetching data listing using supported call method\n response = self.api.GET(self.app_label, self.model_name2, params={'call': 'call'})\n\n # THEN it should succeed\n self.assertTrue(response.success)", "title": "" }, { "docid": "5792dd285b0f090ac4bbc65d372d1511", "score": "0.5719521", "text": "def test_service_support(self):\n self.assertTrue(self.service_class.supports_bug_trackers)\n self.assertTrue(self.service_class.supports_repositories)", "title": "" }, { "docid": "9ca96f041545f47c8a17fc0e4b6dc4e3", "score": "0.5719347", "text": "def testDependencies(self, mock_get_dependencies, mock_stacktrace):\n uma_data = self._GetDummyUMAData()\n dependencies = {'src': Dependency('src', 'https://repo', 'rev')}\n mock_get_dependencies.return_value = dependencies\n stack = stacktrace.CallStack(0, frame_list=[\n stacktrace.StackFrame(0, 'src', 'func', 'a.cc', 'src/a.cc', [5])])\n stacktrace_field = stacktrace.Stacktrace([stack], stack)\n mock_stacktrace.return_value = stacktrace_field\n\n self.assertEqual(uma_data.dependencies, dependencies)\n mock_get_dependencies.assert_called_with(uma_data.stacktrace.stacks)", "title": "" }, { "docid": "9a537d7b4b8ae0993cf3788dc6bc773d", "score": "0.5719072", "text": "def test_upgrade_and_dependency_not_removed_caused_required_by_another_item(self):\n assert self.DEPENDENCY_PUBLIC_ID in self.load_config().protocols\n # do not add dependencies for the package\n\n with self.with_oef_installed(), self.with_config_update(), patch(\n \"aea.cli.add._add_item_deps\"\n ):\n result = self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n catch_exceptions=True,\n )\n assert result.exit_code == 0\n assert self.DEPENDENCY_PUBLIC_ID in self.load_config().protocols", "title": "" }, { "docid": "61656c2ad0f50625cddb62d03d307994", "score": "0.57187223", "text": "def get_manager():\n return __manager__", "title": "" }, { "docid": "801e4fe6cb21b34c22f504997e44da33", "score": "0.5715073", "text": "def test_build_all_dependencies_graph(self, mocker):\n\n def mock_find_pack_dependencies(pack_id, *_, **__):\n dependencies = {\n \"pack1\": [(\"pack2\", True), (\"pack3\", True)],\n \"pack2\": [(\"pack3\", True), (\"pack2\", True)],\n \"pack3\": [],\n \"pack4\": [(\"pack6\", False)],\n }\n\n dependencies_items = {\n \"pack1\": {\n (\"type_item_a\", \"item_a\"): {\n \"pack2\": [(\"type_item_2\", \"item2\")],\n \"pack3\": [(\"type_item_3\", \"item3\")],\n }\n },\n \"pack2\": {\n (\"type_item_b\", \"item_b\"): {\n \"pack3\": [(\"type_item_3\", \"item3\")],\n \"pack2\": [(\"type_item_2\", \"item2\")],\n }\n },\n \"pack3\": {},\n \"pack4\": {\n (\"type_item_c\", \"item_c\"): {\"pack4\": [(\"type_item_4\", \"item4\")]}\n },\n }\n\n return dependencies[pack_id], dependencies_items[pack_id]\n\n {\n \"Expanse Behavior Severity Update\": {\n \"Expanse\": \"ExpanseParseRawIncident\",\n \"CommonScripts\": \"IsGreaterThan\",\n },\n \"ExpanseParseRawIncident\": {\"Expanse\": \"ExpanseParseRawIncident\"},\n \"Expanse Appearance\": {\"Expanse\": \"incident_expanseseverity\"},\n \"Expanse Behavior\": {\"Expanse\": \"Expanse Behavior Severity Update\"},\n }\n mocker.patch(\n \"demisto_sdk.commands.find_dependencies.find_dependencies.PackDependencies._find_pack_dependencies\",\n side_effect=mock_find_pack_dependencies,\n )\n pack_ids = [\"pack1\", \"pack2\", \"pack3\", \"pack4\"]\n dependency_graph = PackDependencies.build_all_dependencies_graph(\n pack_ids, {}, False\n )\n\n # Asserting Dependencies (mandatory and non-mandatory)\n assert [n for n in dependency_graph.neighbors(\"pack1\")] == [\"pack2\", \"pack3\"]\n assert [n for n in dependency_graph.neighbors(\"pack2\")] == [\"pack3\"]\n assert [n for n in dependency_graph.neighbors(\"pack3\")] == []\n assert [n for n in dependency_graph.neighbors(\"pack4\")] == [\"pack6\"]\n\n # Asserting mandatory dependencies\n nodes = dependency_graph.nodes(data=True)\n\n assert nodes[\"pack1\"][\"mandatory_for_packs\"] == []\n assert nodes[\"pack1\"][\"depending_on_items_mandatorily\"] == {\n (\"type_item_a\", \"item_a\"): {\n \"pack2\": [(\"type_item_2\", \"item2\")],\n \"pack3\": [(\"type_item_3\", \"item3\")],\n }\n }\n assert nodes[\"pack1\"][\"depending_on_packs\"] == [\n (\"pack2\", True),\n (\"pack3\", True),\n ]\n assert nodes[\"pack1\"][\"mandatory_for_items\"] == {}\n\n assert nodes[\"pack2\"][\"mandatory_for_packs\"] == [\"pack1\"]\n assert nodes[\"pack2\"][\"depending_on_items_mandatorily\"] == {\n (\"type_item_b\", \"item_b\"): {\n \"pack3\": [(\"type_item_3\", \"item3\")],\n \"pack2\": [(\"type_item_2\", \"item2\")],\n }\n }\n assert nodes[\"pack2\"][\"depending_on_packs\"] == [\n (\"pack3\", True),\n (\"pack2\", True),\n ]\n assert nodes[\"pack2\"][\"mandatory_for_items\"] == {\n (\"type_item_2\", \"item2\"): {\"pack1\": [(\"type_item_a\", \"item_a\")]}\n }\n\n assert nodes[\"pack3\"][\"mandatory_for_packs\"] == [\"pack1\", \"pack2\"]\n assert nodes[\"pack3\"][\"depending_on_items_mandatorily\"] == {}\n assert nodes[\"pack3\"][\"depending_on_packs\"] == []\n assert nodes[\"pack3\"][\"mandatory_for_items\"] == {\n (\"type_item_3\", \"item3\"): {\n \"pack1\": [(\"type_item_a\", \"item_a\")],\n \"pack2\": [(\"type_item_b\", \"item_b\")],\n }\n }\n\n assert nodes[\"pack4\"][\"mandatory_for_packs\"] == []\n assert nodes[\"pack4\"][\"depending_on_items_mandatorily\"] == {\n (\"type_item_c\", \"item_c\"): {\"pack4\": [(\"type_item_4\", \"item4\")]}\n }\n assert nodes[\"pack4\"][\"depending_on_packs\"] == [(\"pack6\", False)]\n assert nodes[\"pack4\"][\"mandatory_for_items\"] == {}\n\n assert nodes[\"pack6\"][\"mandatory_for_packs\"] == []\n assert nodes[\"pack6\"][\"depending_on_items_mandatorily\"] == {}\n assert nodes[\"pack6\"][\"depending_on_packs\"] == []\n assert nodes[\"pack6\"][\"mandatory_for_items\"] == {}", "title": "" }, { "docid": "c9119c4129ffbeb636d2a66d3b4fb14a", "score": "0.57110345", "text": "def test_upgrade_and_dependency_removed(self):\n assert self.DEPENDENCY_PUBLIC_ID in self.load_config().protocols\n\n # add empty component config to aea-config.py\n agent_config = self.load_config()\n component_id = ComponentId(self.DEPENDENCY_TYPE, self.DEPENDENCY_PUBLIC_ID)\n agent_config.component_configurations[component_id] = {} # just empty\n agent_config.component_configurations[\n ComponentId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n ] = {} # just empty\n self.dump_config(agent_config)\n\n agent_config = self.load_config()\n assert component_id in agent_config.component_configurations\n\n with patch(\n \"aea.cli.upgrade.ItemUpgrader.check_upgrade_is_required\",\n return_value=self.ITEM_PUBLIC_ID.version,\n ), patch(\"aea.cli.add._add_item_deps\"):\n result = self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n catch_exceptions=True,\n )\n try:\n assert result.exit_code == 0\n\n assert self.DEPENDENCY_PUBLIC_ID not in self.load_config().protocols\n agent_config = self.load_config()\n\n # check configuration was removed too\n assert component_id not in agent_config.component_configurations\n assert (\n ComponentId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n in agent_config.component_configurations\n )\n finally:\n # restore component removed\n result = self.runner.invoke(\n cli,\n [\n \"-v\",\n \"DEBUG\",\n \"add\",\n *self.LOCAL,\n self.DEPENDENCY_TYPE,\n f\"{self.DEPENDENCY_PUBLIC_ID.author}/{self.DEPENDENCY_PUBLIC_ID.name}:latest\",\n ],\n catch_exceptions=True,\n )\n assert self.DEPENDENCY_PUBLIC_ID in self.load_config().protocols", "title": "" }, { "docid": "f64b9fa9d41511977c835cf6b33aca32", "score": "0.57029384", "text": "def test_package(self):\n pass", "title": "" }, { "docid": "40659378b4c2d5ce104abe0ff4cbdc68", "score": "0.56993955", "text": "def __init__(self):\n self.dependsOn = [] # Expected to contain Dependency objects.", "title": "" }, { "docid": "61eb5bb4643be1aaca21c978942d7579", "score": "0.56953895", "text": "def test_default_manager(self):\n self.assertIsInstance(FlatPage._default_manager, UrlNodeManager)\n self.assertIsInstance(FlatPage.objects.all(), UrlNodeQuerySet)", "title": "" }, { "docid": "ccebfd7c654eb076162a69da941b0e79", "score": "0.5679901", "text": "def test_install(self):\n pass", "title": "" }, { "docid": "2c4e358f0d66c177bec8fa5baebff91a", "score": "0.56755084", "text": "def test_module(self):\n pass", "title": "" }, { "docid": "4fb7a122e437e2874f32f706a9090523", "score": "0.5670345", "text": "def _setManager(self, mgr: \"StrategyManager\") -> None:", "title": "" }, { "docid": "89929534a4962c10a690a537d8396063", "score": "0.5669346", "text": "def unitary_test():", "title": "" }, { "docid": "78e4e8c2bdb52a69c36901d4e1f6133b", "score": "0.5668781", "text": "def test_classical_as_conductor_various(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n group='Group 1', conductor='Conductor 1', composer='Composer 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 2', title='Title 2',\n group='Group 2', conductor='Conductor 2', composer='Composer 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 9)\n artist = Artist.objects.get(name='Conductor 2')\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n song_1 = Song.objects.get(title='Title 1')\n song_2 = Song.objects.get(title='Title 2')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_2)])\n self.assertContains(response, '1 song')\n self.assertContains(response, str(album))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n \n for artist in Artist.objects.exclude(name='Artist 1'):\n self.assertContains(response, str(artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n for artist in Artist.objects.filter(name='Artist 1'):\n self.assertNotContains(response, str(artist))\n self.assertNotContains(response, reverse('exordium:artist', args=(artist.normname,)))\n\n self.assertNotContains(response, str(song_1))\n self.assertNotContains(response, song_1.get_download_url_html5())\n self.assertNotContains(response, song_1.get_download_url_m3u())\n self.assertContains(response, str(song_2))\n self.assertContains(response, song_2.get_download_url_html5())\n self.assertContains(response, song_2.get_download_url_m3u())", "title": "" }, { "docid": "b3eba875f69a52a71e2dfe9d43989f63", "score": "0.5668527", "text": "def create_prerequisites(cls, dbmanager):\n # nothing to do in base class\n pass", "title": "" }, { "docid": "0db690e25b15756a2cde6ffb10dcc6c0", "score": "0.56669307", "text": "def test_dependency_glue(self):\n actor1 = self.create(ActorItem, UML.Actor)\n actor2 = self.create(ActorItem, UML.Actor)\n dep = self.create(DependencyItem)\n\n glued = self.allow(dep, dep.head, actor1)\n assert glued\n\n self.connect(dep, dep.head, actor1)\n\n glued = self.allow(dep, dep.tail, actor2)\n assert glued", "title": "" }, { "docid": "1fc3373bc66122775a1191e949fb5838", "score": "0.56659716", "text": "def test_dependent_object_import(self):\n # Initialize Importers\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,name in enumerate(self.usernames):\n user_manager.update_kvs(field_name='username', value=name, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row in range(self.n_objs):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n #: Test corresponding UserProfile has been returned\n for row in range(self.n_objs):\n objs = up_manager.get_objs_and_meta(row) #: Returns a list of objects only if manytomany, o/w just 1\n\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, UserProfile)\n self.assertIsNotNone(objs[0].query)\n\n self.assertEqual(objs[0].object.user.username, self.usernames[row])", "title": "" }, { "docid": "abd110ed41c653a92bbd99e5578d8edf", "score": "0.565967", "text": "def test_update_system(self):\n pass", "title": "" }, { "docid": "5ec3d5d6dcf6536786590e6a65dce3fc", "score": "0.56583744", "text": "def test_lockmgr(self):\n with LockMgr('test_lockmgr') as lck:\n lck.lock('test_lockmgr2')\n # Attempt to lock test_lockmgr again, which should cause a Locked exception\n with self.assertRaises(Locked, msg=\"getting lock test_lockmgr should raise Locked\"):\n lck.lock('test_lockmgr')\n self.assertTrue(is_locked('test_lockmgr2'), msg=\"is_locked('test_lockmgr2') == True\")\n # Confirm that both test_lockmgr + test_lockmgr2 are unlocked after the with statement.\n self.assertFalse(is_locked('test_lockmgr'), msg=\"is_locked('test_lockmgr') == False\")\n self.assertFalse(is_locked('test_lockmgr2'), msg=\"is_locked('test_lockmgr2') == False\")", "title": "" }, { "docid": "f691daf9c55733c89b86cd102b90ca3f", "score": "0.56545943", "text": "def test_check_soft_dependencies_raises_error():\n with pytest.raises(ModuleNotFoundError, match=r\".* soft dependency .*\"):\n _check_soft_dependencies(\"unavailable_module\")\n\n with pytest.raises(ModuleNotFoundError, match=r\".* soft dependency .*\"):\n _check_soft_dependencies(\"unavailable_module_1\", \"unavailable_module_2\")", "title": "" }, { "docid": "b0a95e8deb2f35f80b8b64f11681365e", "score": "0.564334", "text": "def setUp(self):\n self.test_graph = class_dependency.JavaClassDependencyGraph()", "title": "" }, { "docid": "45555125987b2e86c75a425e9de3818a", "score": "0.56360906", "text": "def test_action_mocked(self):\n with self.mock_global_connection:\n self.assertEqual(0, Action.count())\n\n manager = Manager(self.connection)\n self.assertFalse(manager.is_populated())\n manager.populate()\n self.assertTrue(manager.is_populated())\n\n self.assertEqual(1, Action.count())\n actions = Action.ls()\n action = actions[0]\n self.assertEqual(manager.module_name, action.resource)\n self.assertEqual('populate', action.action)", "title": "" }, { "docid": "26f5d53e0c1790df5adbaadb2957e6b0", "score": "0.5634474", "text": "async def test_dependencies(self):\n response = await self.collect(get_request_text=self.xml)\n expected_entities = [\n dict(\n key=\"12345\",\n url=\"https://owasp_dependency_check#l1_12345\",\n file_name=self.file_name,\n file_path=self.file_path,\n )\n ]\n self.assert_measurement(response, value=\"1\", entities=expected_entities)", "title": "" }, { "docid": "6d5996462f3368541b1e1de520d3c3be", "score": "0.5634342", "text": "def test_check_job_dependencies_has_unfinished_dependencies(database):\n sess = database.session\n sub = SubmissionFactory(submission_id=1)\n job = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['finished'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'],\n number_of_errors=0)\n job_2 = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['waiting'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'])\n job_3 = JobFactory(submission_id=sub.submission_id, job_status_id=JOB_STATUS_DICT['waiting'],\n job_type_id=JOB_TYPE_DICT['csv_record_validation'], file_type_id=FILE_TYPE_DICT['award'],\n number_of_errors=0)\n sess.add_all([sub, job, job_2, job_3])\n sess.commit()\n\n # Job 1 finished, it is a prerequisite for job 2 (waiting)\n job_dep = JobDependency(job_id=job_2.job_id, prerequisite_id=job.job_id)\n # Job 3 is also a prerequisite of job 2, it's not done, job 2 should stay in \"waiting\"\n job_dep_2 = JobDependency(job_id=job_2.job_id, prerequisite_id=job_3.job_id)\n sess.add_all([job_dep, job_dep_2])\n sess.commit()\n\n check_job_dependencies(job.job_id)\n\n assert job_2.job_status_id == JOB_STATUS_DICT['waiting']", "title": "" }, { "docid": "05f654343e11a1762a41ba605470160d", "score": "0.56253386", "text": "def test_resource_manager_on_driver():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"resource-manager\": {\n \"server\": \"driver\",\n \"port\": 4000,\n \"config\": {\n \"read_reqs\": 123,\n \"read_data\": 456,\n \"write_reqs\": 789,\n \"write_data\": 321\n }\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-resource-manager-on-driver-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n client = ResourceManagerClient('127.0.0.1', 4000)\n mgr_config = client.read_config()\n assert mgr_config == config[\"resource-manager\"][\"config\"], \\\n \"Resource manager config does not match the one in the workflow config\"\n \n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # FIXME: For mysterious reasons, the check below does not work on Travis-CI.\n # Somehow, read_config() succeeds despite the fact that\n # the resource manager server was already terminated??\n if os.environ.get('TRAVIS', '') == 'true':\n pytest.skip(\"Skipping resource manager shutdown check on Travis-CI\")\n\n # Server should not be running any more after workflow exits.\n with pytest.raises(TimeoutError):\n client2 = ResourceManagerClient('127.0.0.1', 4000)\n client2.read_config()", "title": "" }, { "docid": "4139dc9a3d495ee705c4fbd6a2e12687", "score": "0.561596", "text": "def test_index_dependencies(self):\n bar = create_library('bar')\n barf = create_library('barf')\n addon = create_addon('foo')\n addon.latest.dependency_add(bar.latest)\n addon.latest.dependency_add(barf.latest)\n es = self.es\n es.refresh()\n\n for lib in (bar, barf):\n r = es.search(query=FieldQuery(FieldParameter('dependencies',\n lib.id)))\n eq_(r['hits']['total'], 1)\n eq_(r['hits']['hits'][0]['_source']['name'], addon.name)\n return (addon, bar, barf)", "title": "" }, { "docid": "e587ddf45817daa1a41ab9d1cf9a464b", "score": "0.56081504", "text": "def test_classical_as_conductor(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n group='Group 1', conductor='Conductor 1', composer='Composer 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 2',\n group='Group 2', conductor='Conductor 2', composer='Composer 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 8)\n artist = Artist.objects.get(name='Conductor 2')\n\n self.assertEqual(Album.objects.count(), 1)\n album = Album.objects.get()\n\n self.assertEqual(Song.objects.count(), 2)\n song_1 = Song.objects.get(title='Title 1')\n song_2 = Song.objects.get(title='Title 2')\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_2)])\n self.assertContains(response, '1 song')\n self.assertContains(response, str(album))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n \n for artist in Artist.objects.exclude(name='Various'):\n self.assertContains(response, str(artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n\n self.assertNotContains(response, str(song_1))\n self.assertNotContains(response, song_1.get_download_url_html5())\n self.assertNotContains(response, song_1.get_download_url_m3u())\n self.assertContains(response, str(song_2))\n self.assertContains(response, song_2.get_download_url_html5())\n self.assertContains(response, song_2.get_download_url_m3u())", "title": "" }, { "docid": "20554ce57d8244d368311e6eeedf9a0f", "score": "0.56077945", "text": "def test_style_guide_manager():\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n options = create_options()\n guide = style_guide.StyleGuideManager(options, formatter=formatter)\n assert guide.default_style_guide.options is options\n assert len(guide.style_guides) == 1", "title": "" }, { "docid": "3923e31dfe03ff7ccef13076b5da493f", "score": "0.56033945", "text": "def test_multi_dependency(self):\n actoritem1 = self.create(ActorItem, UML.Actor)\n actoritem2 = self.create(ActorItem, UML.Actor)\n actor1 = actoritem1.subject\n actor2 = actoritem2.subject\n dep = self.create(DependencyItem)\n\n self.connect(dep, dep.head, actoritem1)\n self.connect(dep, dep.tail, actoritem2)\n\n assert dep.subject\n assert 1 == len(actor1.supplierDependency)\n assert actor1.supplierDependency[0] is dep.subject\n assert 1 == len(actor2.clientDependency)\n assert actor2.clientDependency[0] is dep.subject\n\n # Do the same thing, but now on a new diagram:\n\n diagram2 = self.element_factory.create(UML.Diagram)\n actoritem3 = diagram2.create(ActorItem, subject=actor1)\n actoritem4 = diagram2.create(ActorItem, subject=actor2)\n dep2 = diagram2.create(DependencyItem)\n\n self.connect(dep2, dep2.head, actoritem3)\n cinfo = diagram2.canvas.get_connection(dep2.head)\n assert cinfo is not None\n assert cinfo.connected is actoritem3\n self.connect(dep2, dep2.tail, actoritem4)\n assert dep2.subject is not None\n assert 1 == len(actor1.supplierDependency)\n assert actor1.supplierDependency[0] is dep.subject\n assert 1 == len(actor2.clientDependency)\n assert actor2.clientDependency[0] is dep.subject\n\n assert dep.subject is dep2.subject", "title": "" }, { "docid": "dc34a23a5ca8fba8ef4a1f064a0b3734", "score": "0.55992055", "text": "def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))", "title": "" }, { "docid": "1ef730cb28004b49ceeee7b20492985c", "score": "0.5587341", "text": "def test_0005_ensure_repositories_and_categories_exist(self):\n category = self.create_category(\n name=\"Test 0020 Basic Repository Dependencies\", description=\"Test 0020 Basic Repository Dependencies\"\n )\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n column_maker_repository = self.get_or_create_repository(\n name=column_maker_repository_name,\n description=column_maker_repository_description,\n long_description=column_maker_repository_long_description,\n owner=common.test_user_1_name,\n category=category,\n strings_displayed=[],\n )\n if self.repository_is_new(column_maker_repository):\n self.upload_file(\n column_maker_repository,\n filename=\"column_maker/column_maker.tar\",\n filepath=None,\n valid_tools_only=True,\n uncompress_file=True,\n remove_repo_files_not_in_tar=False,\n commit_message=\"Uploaded column_maker tarball.\",\n strings_displayed=[],\n strings_not_displayed=[],\n )\n emboss_repository = self.get_or_create_repository(\n name=emboss_repository_name,\n description=emboss_repository_description,\n long_description=emboss_repository_long_description,\n owner=common.test_user_1_name,\n category=category,\n strings_displayed=[],\n )\n self.upload_file(\n emboss_repository,\n filename=\"emboss/emboss.tar\",\n filepath=None,\n valid_tools_only=True,\n uncompress_file=True,\n remove_repo_files_not_in_tar=False,\n commit_message=\"Uploaded emboss.tar\",\n strings_displayed=[],\n strings_not_displayed=[],\n )\n repository_dependencies_path = self.generate_temp_path(\"test_1020\", additional_paths=[\"emboss\", \"5\"])\n repository_tuple = (\n self.url,\n column_maker_repository.name,\n column_maker_repository.owner,\n self.get_repository_tip(column_maker_repository),\n )\n self.create_repository_dependency(\n repository=emboss_repository,\n repository_tuples=[repository_tuple],\n filepath=repository_dependencies_path,\n )", "title": "" }, { "docid": "de610033b5340c7d06aa247371f57e65", "score": "0.5584518", "text": "def test_cleanup_closes_riak_managers(self):\n persistence_helper = PersistenceHelper(use_riak=True)\n persistence_helper.setup()\n # Manually override assert_closed because we want to check whether the\n # cleanup function closes, no matter what the env var is set to.\n persistence_helper._assert_closed = False\n\n manager = persistence_helper.get_riak_manager()\n self.assertEqual(manager.client._closed, False)\n\n yield persistence_helper.cleanup()\n self.assertEqual(manager.client._closed, True)", "title": "" }, { "docid": "167917de2fe813d0493a449d7ef435eb", "score": "0.5582824", "text": "def test_01(self):\n # User 1\n e1 = Emulator()\n e1.init()\n\n # User 2\n e2 = Emulator()\n e2.init()\n\n Emulator.run_transfer_prepare()\n Emulator.run_transfer_donkies_prepare()\n\n assert TransferDonkies.objects.count() == 2", "title": "" }, { "docid": "3ab630b6fce846b1cc73264c4bdb6ddb", "score": "0.5578736", "text": "def test_get_dependency_list(client, dependency):\n headers = {\"Accept\": \"application/json\"}\n response = client.open(\"/dependency\", method=\"GET\", headers=headers)\n assert response.status_code == 200\n assert len(response.json[\"dependencies\"]) == 1\n assert (\n response.json[\"dependencies\"][0][\"component_version_id\"] == dependency.component_version_id\n )\n assert (\n response.json[\"dependencies\"][0][\"dependency_version_id\"]\n == dependency.dependency_version_id\n )", "title": "" }, { "docid": "d767efad96ca00a670cd555bdde5932f", "score": "0.55770373", "text": "def test_installments_get(self):\n pass", "title": "" }, { "docid": "0cd1e6a00091e7bd94acdd9ff89e39f1", "score": "0.55703855", "text": "def ensure(ctx, connection):\n from pybel.manager.cache_manager import Manager\n ctx.obj = Manager(connection=connection)", "title": "" }, { "docid": "ab97a1e1a9a9d519030e6a522490b810", "score": "0.55699164", "text": "def test_model_manager_does_not_allow_duplicate_qualified_names(self):\n # arrange\n model_manager = ModelManager()\n\n # act\n # loading the first instance of the model object\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n exception_raised = False\n exception_message = \"\"\n try:\n # loading it again\n model_manager.load_model(\"tests.mocks.MLModelMock\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"A model with the same qualified name is already in the ModelManager singleton.\")", "title": "" }, { "docid": "3ed3fc14781e6bf1761a682bda995cca", "score": "0.5567481", "text": "def test_manager_instances_success(self):\n spider_name = 'valid_metadata'\n path = 'tests/sample_spiders/' + spider_name\n os.environ['SPIDER_PATH'] = path\n\n m = SpiderManager()\n inst = m.instance(spider_name)\n self.assertEqual(inst.__name__, ValidMetadataSpider.__name__)", "title": "" }, { "docid": "5366c0ca956cfec2b163f3e413332325", "score": "0.55595416", "text": "def test_ParameterManagerGenerator_consecutive():\n components_1 = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n data_manager = mock_data_manager(components_1)\n data_manager.consecutive_refinement_order = [[\"scale\", \"decay\"], [\"absorption\"]]\n\n # Test single dataset case.\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" in apm.components_list\n assert \"absorption\" not in apm.components_list\n apm = apms[1]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" not in apm.components_list\n assert \"decay\" not in apm.components_list\n assert \"absorption\" in apm.components_list\n\n # Test multi dataset case.\n components_2 = {\"1\": mock_component(), \"2\": mock_component()}\n data_manager_2 = mock_data_manager(components_2)\n data_manager_2.consecutive_refinement_order = [[\"1\"], [\"2\"]]\n\n pmg = ParameterManagerGenerator(\n [data_manager, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n multi_apm = apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n apm_1 = multi_apm.apm_list[0]\n assert \"scale\" in apm_1.components_list\n assert \"decay\" in apm_1.components_list\n assert \"absorption\" not in apm_1.components_list\n assert multi_apm.apm_list[1].components_list == [\"1\"]\n multi_apm = apms[1]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n assert multi_apm.apm_list[0].components_list == [\"absorption\"]\n assert multi_apm.apm_list[1].components_list == [\"2\"]\n\n # Test multi dataset case with different number of cycles for each data_manager.\n components_2 = {\"1\": mock_component()}\n data_manager_2 = mock_data_manager(components_2)\n data_manager_2.consecutive_refinement_order = [[\"1\"], [\"2\"]]\n pmg = ParameterManagerGenerator(\n [data_manager, data_manager_2],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n assert pmg.param_lists[0] == [[\"scale\", \"decay\"], [\"absorption\"]]\n assert pmg.param_lists[1] == [[\"1\"]]\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n multi_apm = apms[0]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n apm_1 = multi_apm.apm_list[0]\n assert \"scale\" in apm_1.components_list\n assert \"decay\" in apm_1.components_list\n assert \"absorption\" not in apm_1.components_list\n assert multi_apm.apm_list[1].components_list == [\"1\"]\n multi_apm = apms[1]\n assert isinstance(multi_apm, multi_active_parameter_manager)\n assert multi_apm.apm_list[0].components_list == [\"absorption\"]\n # Only change relative to previous test case.\n assert multi_apm.apm_list[1].components_list == []\n\n # Test fixing the decay parameter.\n data_manager.fixed_components = [\"decay\"]\n pmg = ParameterManagerGenerator(\n [data_manager],\n apm_type=active_parameter_manager,\n target=ScalingTarget(),\n mode=\"consecutive\",\n )\n apms = list(pmg.parameter_managers())\n assert len(apms) == 2\n apm = apms[0]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" in apm.components_list\n assert \"decay\" not in apm.components_list\n assert \"absorption\" not in apm.components_list\n apm = apms[1]\n assert isinstance(apm, multi_active_parameter_manager)\n assert \"scale\" not in apm.components_list\n assert \"decay\" not in apm.components_list\n assert \"absorption\" in apm.components_list", "title": "" }, { "docid": "a8e793108638e4c8ba6711ee2a25e0df", "score": "0.5558323", "text": "def test_get_riak_manager_no_riak(self):\n persistence_helper = self.add_helper(PersistenceHelper())\n err = self.assertRaises(\n RuntimeError, persistence_helper.get_riak_manager)\n self.assertTrue(\n 'Use of Riak has been disabled for this test.' in str(err))", "title": "" }, { "docid": "b5f08d309ffc542826874bb09c00384c", "score": "0.55552006", "text": "def mock_data_manager(components):\n dm = Mock()\n dm.components = components\n dm.fixed_components = []\n return dm", "title": "" }, { "docid": "6cc03eadf1217941ab143222319f58f9", "score": "0.55529916", "text": "def testGetInstallRequires(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n install_requires = dependency_helper.GetInstallRequires()\n self.assertEqual(len(install_requires), 1)", "title": "" }, { "docid": "305708935a1424ecc0e1d0cbb58a7f87", "score": "0.55520064", "text": "def test_deps( deps ):\n\n success = []\n error = []\n for dep in deps:\n mod_name = dep[0]\n\n try:\n mod = __import__( mod_name )\n except ImportError:\n print \"FAILURE: Could not import\", mod_name\n error.append( mod_name )\n continue\n\n try:\n mod_version = mod.__version__.split('.')\n requested_mod_version = dep[1].split('.')\n for i in range( len( requested_mod_version ) ):\n if int( mod_version[i] ) < int( requested_mod_version[i] ):\n raise ImportError\n except ImportError:\n print \"FAILURE: Module\", mod_name, \"needs version\", requested_mod_version, \"but version\", mod_version, \"found\"\n error.append( mod_name )\n continue\n except AttributeError:\n# no .__version__\n pass\n\n print \"Success: \", mod_name\n success.append( mod_name )\n\n return ( success, error )", "title": "" }, { "docid": "004dae67b65a63ca6a42c06c574c0d36", "score": "0.55513793", "text": "def setUp(self):\n self.store = yield buildStore(self, None)\n\n def doit(txn):\n return txn.execSQL(schemaText)\n\n yield inTransaction(\n lambda: self.store.newTransaction(\"bonus schema\"), doit\n )\n\n def indirectedTransactionFactory(*a):\n \"\"\"\n Allow tests to replace \"self.store.newTransaction\" to provide\n fixtures with extra methods on a test-by-test basis.\n \"\"\"\n return self.store.newTransaction(*a)\n\n def deschema():\n @inlineCallbacks\n def deletestuff(txn):\n for stmt in dropSQL:\n yield txn.execSQL(stmt)\n return inTransaction(\n lambda *a: self.store.newTransaction(*a), deletestuff\n )\n self.addCleanup(deschema)\n\n from twisted.internet import reactor\n self.node1 = PeerConnectionPool(\n reactor, indirectedTransactionFactory, 0, schema)\n self.node2 = PeerConnectionPool(\n reactor, indirectedTransactionFactory, 0, schema)\n\n class FireMeService(Service, object):\n def __init__(self, d):\n super(FireMeService, self).__init__()\n self.d = d\n\n def startService(self):\n self.d.callback(None)\n\n d1 = Deferred()\n d2 = Deferred()\n FireMeService(d1).setServiceParent(self.node1)\n FireMeService(d2).setServiceParent(self.node2)\n ms = MultiService()\n self.node1.setServiceParent(ms)\n self.node2.setServiceParent(ms)\n ms.startService()\n self.addCleanup(ms.stopService)\n yield gatherResults([d1, d2])\n self.store.queuer = self.node1", "title": "" }, { "docid": "d6c47da9f6606d8f0d03d15aaa845f7c", "score": "0.55366486", "text": "def test_only_ml_model_instances_allowed_to_be_stored(self):\n # arrange\n model_manager = ModelManager()\n\n # act\n exception_raised = False\n exception_message = \"\"\n try:\n model_manager.load_model(\"tests.mocks.SomeClass\")\n except Exception as e:\n exception_raised = True\n exception_message = str(e)\n\n # assert\n self.assertTrue(exception_raised)\n self.assertTrue(exception_message == \"ModelManager instance can only hold references to objects of type MLModel.\")", "title": "" }, { "docid": "bff35e43414a17116a61bee720bd8762", "score": "0.5522113", "text": "def testSingleton(self):\r\n self.assertEqual(id(self.res_mgr), id(ReservationManager()))", "title": "" }, { "docid": "9f4d3d61d4af172b8aa4370d322389a5", "score": "0.55155396", "text": "def setUp(self):\n self.store = yield buildStore(self, None)\n\n @inlineCallbacks\n def doit(txn):\n for statement in splitSQLString(schemaText):\n yield txn.execSQL(statement)\n\n yield inTransaction(\n self.store.newTransaction,\n doit,\n label=\"bonus schema\"\n )\n\n def indirectedTransactionFactory(*a, **b):\n \"\"\"\n Allow tests to replace \"self.store.newTransaction\" to provide\n fixtures with extra methods on a test-by-test basis.\n \"\"\"\n return self.store.newTransaction(*a, **b)\n\n def deschema():\n @inlineCallbacks\n def deletestuff(txn):\n for stmt in dropSQL:\n yield txn.execSQL(stmt)\n return inTransaction(\n lambda *a, **b: self.store.newTransaction(*a, **b), deletestuff\n )\n self.addCleanup(deschema)\n\n self.node1 = ControllerQueue(\n reactor, indirectedTransactionFactory, useWorkerPool=False)\n self.node2 = ControllerQueue(\n reactor, indirectedTransactionFactory, useWorkerPool=False)\n\n class FireMeService(Service, object):\n def __init__(self, d):\n super(FireMeService, self).__init__()\n self.d = d\n\n def startService(self):\n self.d.callback(None)\n\n d1 = Deferred()\n d2 = Deferred()\n FireMeService(d1).setServiceParent(self.node1)\n FireMeService(d2).setServiceParent(self.node2)\n ms = MultiService()\n self.node1.setServiceParent(ms)\n self.node2.setServiceParent(ms)\n ms.startService()\n @inlineCallbacks\n def _clean():\n yield ms.stopService()\n self.flushLoggedErrors(CancelledError)\n\n self.addCleanup(_clean)\n yield gatherResults([d1, d2])\n self.store.queuer = self.node1\n\n DummyWorkItem.results = {}", "title": "" }, { "docid": "ad6f307000848cabda1acf09d9a8b0f5", "score": "0.55140245", "text": "def setUp(self):\n self.dbus_mock = MagicMock()\n self.mainloop_mock = MagicMock()\n self.gobject_mock = MagicMock()\n\n modules = {\n 'dbus': self.dbus_mock,\n 'dbus.mainloop.glib': self.mainloop_mock,\n 'gi.repository': self.gobject_mock,\n }\n self.dbus_mock.Interface.return_value.GetManagedObjects.return_value = tests.obj_data.full_ubits\n self.module_patcher = patch.dict('sys.modules', modules)\n self.module_patcher.start()\n from bluezero import tools\n self.module_under_test = tools", "title": "" }, { "docid": "a07fa314c35a66f1c31151ca816fe888", "score": "0.55138797", "text": "def test_1_make(self):\n #We can compile all these modules together into a single shared library.\n writer = self.writers.values()[0]\n self.code = writer.make(remake=True, dependencies=self.dependencies)\n self.assertEqual(self.code, 0)", "title": "" }, { "docid": "d9206016f31bd0cb66ecaba5044cee6b", "score": "0.55037886", "text": "async def test_sync_ctx_manager():\n with pytest.raises(RuntimeError):\n with Scope():\n pass", "title": "" }, { "docid": "1ffd29a1773175d03324edff299d67b3", "score": "0.5501115", "text": "def test_multiple_factories(self, mocker):\n sdk_ready_flag = threading.Event()\n\n def _init(self, ready_flag, some, auth_api, streaming_enabled, telemetry_runtime_producer, telemetry_init_consumer, sse_url=None):\n self._ready_flag = ready_flag\n self._synchronizer = mocker.Mock(spec=Synchronizer)\n self._streaming_enabled = False\n self._telemetry_runtime_producer = telemetry_runtime_producer\n self._telemetry_init_consumer = telemetry_init_consumer\n mocker.patch('splitio.sync.manager.Manager.__init__', new=_init)\n\n def _start(self, *args, **kwargs):\n sdk_ready_flag.set()\n mocker.patch('splitio.sync.manager.Manager.start', new=_start)\n\n def _stop(self, *args, **kwargs):\n pass\n mocker.patch('splitio.sync.manager.Manager.stop', new=_stop)\n\n mockManager = Manager(sdk_ready_flag, mocker.Mock(), mocker.Mock(), False, mocker.Mock(), mocker.Mock())\n\n def _make_factory_with_apikey(apikey, *_, **__):\n return SplitFactory(apikey, {}, True, mocker.Mock(spec=ImpressionsManager), mockManager, mocker.Mock(), mocker.Mock(), mocker.Mock())\n\n factory_module_logger = mocker.Mock()\n build_in_memory = mocker.Mock()\n build_in_memory.side_effect = _make_factory_with_apikey\n build_redis = mocker.Mock()\n build_redis.side_effect = _make_factory_with_apikey\n build_localhost = mocker.Mock()\n build_localhost.side_effect = _make_factory_with_apikey\n mocker.patch('splitio.client.factory._LOGGER', new=factory_module_logger)\n mocker.patch('splitio.client.factory._build_in_memory_factory', new=build_in_memory)\n mocker.patch('splitio.client.factory._build_redis_factory', new=build_redis)\n mocker.patch('splitio.client.factory._build_localhost_factory', new=build_localhost)\n\n _INSTANTIATED_FACTORIES.clear() # Clear all factory counters for testing purposes\n\n factory1 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == []\n\n factory2 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 1,\n 'factory'\n )]\n\n factory_module_logger.reset_mock()\n factory3 = get_factory('some_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have %d %s with this SDK Key. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\",\n 2,\n 'factories'\n )]\n\n factory_module_logger.reset_mock()\n factory4 = get_factory('some_other_api_key')\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 3\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert factory_module_logger.warning.mock_calls == [mocker.call(\n \"factory instantiation: You already have an instance of the Split factory. \"\n \"Make sure you definitely want this additional instance. \"\n \"We recommend keeping only one instance of the factory at all times \"\n \"(Singleton pattern) and reusing it throughout your application.\"\n )]\n\n event = threading.Event()\n factory1.destroy(event)\n event.wait()\n assert _INSTANTIATED_FACTORIES['some_other_api_key'] == 1\n assert _INSTANTIATED_FACTORIES['some_api_key'] == 2\n factory2.destroy()\n factory3.destroy()\n factory4.destroy()", "title": "" }, { "docid": "1fe2b2c99a3b571ecb351a6f409592f3", "score": "0.5499262", "text": "def test_register_component_with_invalid_type_only_manager():\n\n with pytest.raises(InvalidComponentTypeError):\n component = OnlyManagerMock()\n application_services.register_component(component)", "title": "" }, { "docid": "72ddf278889c50ecaec09a950492c8d7", "score": "0.54960907", "text": "def testGetRPMRequires(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n rpm_requires = dependency_helper.GetRPMRequires()\n self.assertEqual(len(rpm_requires), 1)", "title": "" }, { "docid": "2658f6fd0f22c7c3586459b2fb9cbdfe", "score": "0.5486701", "text": "def check_existing_healthy_manager(environment, tester_conf):\n use_pre_existing_manager(environment, tester_conf)\n check_manager_is_healthy(environment)", "title": "" }, { "docid": "75003d05a97380aa26af55661833b2f6", "score": "0.54827976", "text": "def test_workflow(self):\n workflow = Process.objects.filter(slug=\"test-workflow-1\").latest()\n data1 = Data.objects.create(\n name=\"Test data 1\",\n contributor=self.contributor,\n process=workflow,\n input={\"param1\": \"world\"},\n )\n data2 = Data.objects.create(\n name=\"Test data 2\",\n contributor=self.contributor,\n process=workflow,\n input={\"param1\": \"foobar\"},\n )\n\n # Created and spawned objects should be done.\n self.assertEqual(Data.objects.filter(status=Data.STATUS_DONE).count(), 6)\n\n # Check correct dependency type is created.\n self.assertEqual(\n {d.kind for d in data1.children_dependency.all()},\n {DataDependency.KIND_SUBPROCESS},\n )\n self.assertEqual(\n {d.kind for d in data2.children_dependency.all()},\n {DataDependency.KIND_SUBPROCESS},\n )", "title": "" }, { "docid": "e6830ca1b1b3e0833db3af1a3a0e1568", "score": "0.54815394", "text": "def test__purge_riak(self):\n persistence_helper = PersistenceHelper()\n manager = FakeRiakManagerForCleanup('prefix1')\n self.assertEqual(manager.purged, False)\n self.assertEqual(\n persistence_helper._purge_riak(manager), 'maybe async')\n self.assertEqual(manager.purged, True)", "title": "" } ]
9df2c53b6f9220f5f435cda250380baf
Transforms data of arbitrary shape linear into a given range.
[ { "docid": "50387e328cc12ad552376c0c0b43bcd0", "score": "0.7565772", "text": "def normalize_linear(data, range=[-1, 1], data_range=None):\n if data_range is not None:\n xmin, xmax = data_range[0], data_range[1]\n else:\n xmin, xmax = np.min(data), np.max(data)\n\n data = (range[1] - range[0]) * (data - xmin) / (xmax - xmin) + range[0]\n return data", "title": "" } ]
[ { "docid": "21feeb116766e8566d03092050f7e572", "score": "0.6647987", "text": "def linearscale(input, boundfrom, boundto):\n\n\t### check args\n\tif len(input) < 1:\n\t\treturn input\n\tif len(boundfrom) != 2:\n\t\traise ValueError, 'boundfrom must be length 2'\n\tif len(boundto) != 2:\n\t\traise ValueError, 'boundto must be length 2'\n\n\tminfrom,maxfrom = boundfrom\n\tminto,maxto = boundto\n\n\t## prepare for fast math\n\trangefrom = float(maxfrom - minfrom)\n\trangeto = numpy.array((maxto - minto)).astype('f')\n\tminfrom = numpy.array(minfrom).astype('f')\n\n\t# this is a hack to prevent zero division\n\t# is there a better way to do this with some sort of \n\t# float limits module rather than hard coding 1e-99?\n\tif not rangefrom:\n\t\trangefrom = 1e-99\n\n\tscale = rangeto / rangefrom\n\toffset = minfrom * scale\n\toutput = input * scale - offset\n\n\treturn output", "title": "" }, { "docid": "585de78b593d4f362c20040a1c213e2e", "score": "0.66031027", "text": "def rescale_linear(array, new_min, new_max):\n minimum, maximum = np.min(array), np.max(array)\n m = (new_max - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n res = m * array + b\n return res", "title": "" }, { "docid": "9c2cca2e5dacda1773b07b7f7b2117ce", "score": "0.66011137", "text": "def rescale_linear(array, new_min, new_max):\n minimum, maximum = np.min(array), np.max(array)\n m = (new_max - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n return m * array + b", "title": "" }, { "docid": "85d98eb8a5d6b1a38184a619c8ae47c0", "score": "0.6583547", "text": "def normalization_range(self,data_np):\n max = data_np.max()\n min = data_np.min()\n return ((data_np - min)/(max-min)),max,min", "title": "" }, { "docid": "3584afbdd7d0460addc2b419773e97cf", "score": "0.6573722", "text": "def ScaleData(data, old_min, old_max, new_min, new_max):\n def ScalePoint(x):\n if x is None:\n return None\n return scale * x + translate\n\n if old_min == old_max:\n scale = 1\n else:\n scale = (new_max - new_min) / float(old_max - old_min)\n translate = new_min - scale * old_min\n return map(ScalePoint, data)", "title": "" }, { "docid": "686cee05314fa7bcfa3ab6c97f6829e2", "score": "0.6564579", "text": "def lin_map(value, left_min, left_max, right_min, right_max):\n # Figure out how 'wide' each range is\n left_span = left_max - left_min\n right_span = right_max - right_min\n\n # Convert the left range into a 0-1 range (float)\n value_scaled = float(value - left_min) / float(left_span)\n\n # Convert the 0-1 range into a value in the right range.\n return right_min + (value_scaled * right_span)", "title": "" }, { "docid": "8d00f6c373f130c4f3551cf3b58cffa2", "score": "0.633012", "text": "def get_linear(from_interval, to_interval):\n # compute coeffs of the mapping\n llim, ulim = from_interval\n new_llim, new_ulim = to_interval\n slope = (new_ulim - new_llim) / (ulim - llim)\n intercept = new_llim - slope * llim\n\n # define the map\n def linear(x):\n \"\"\" Transformation\n \"\"\"\n return slope * x + intercept\n\n return linear", "title": "" }, { "docid": "c50ebaa78e6345a2c58e341f064e7fa1", "score": "0.62838095", "text": "def map_linear(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / float(in_max - in_min) + out_min;", "title": "" }, { "docid": "83c1aa30d6f2a4b92eec0bbe72798fb2", "score": "0.6246692", "text": "def rescale_linear_max_min_with_known_max_min(array, new_min, new_max, input_min, input_max):\n m = (new_max - new_min) / (input_max - input_min)\n b = new_min - m * input_min\n output = m * array + b\n\n return output", "title": "" }, { "docid": "2cc095b55907afbad5d3e1d1b8f2aeef", "score": "0.62157357", "text": "def rescale_linear_max_min(array, new_min, new_max):\n minimum, maximum = np.min(array), np.max(array)\n m = (new_max - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n output = m * array + b\n\n return output", "title": "" }, { "docid": "4b6ab377617d5d6886e7035a170aa5fa", "score": "0.621552", "text": "def linear_rescale(min, max, value):\n x = (value - min)/(max - min)\n return x", "title": "" }, { "docid": "3e6b64b93c5490e6c9be1d03f67ac607", "score": "0.62028193", "text": "def mapLinear(x_in, x_min, x_max, y_min, y_max):\n m = ((y_max - y_min) / (x_max - x_min))\n y_out = m * (x_in - x_min) + y_min\n return y_out", "title": "" }, { "docid": "c6b16c9cd67d4781a0a5e64d1df6d1ed", "score": "0.6177316", "text": "def map_to_range(x, target_range, source_range=(-1, 1)):\n src_min, src_max = source_range\n tgt_min, tgt_max = target_range\n p = (x - src_min) / (src_max - src_min)\n return tgt_min + (tgt_max - tgt_min) * p", "title": "" }, { "docid": "e293c0975c8ebadeb7b740f89e6a0324", "score": "0.6174703", "text": "def project_series_to_range(series, low, high):\n norm_series = series / series.max()\n return norm_series.apply(lambda x: low + (high - low) * x)", "title": "" }, { "docid": "49a1e174d5bb2b9fbe18c4f19288fc0e", "score": "0.61399615", "text": "def _rescale(data, low=0, high=1, axis=0):\n\n data = np.asanyarray(data)\n\n dmin = data.min(axis=axis, keepdims=True)\n dmax = data.max(axis=axis, keepdims=True)\n rescaled = ((data - dmin) / (dmax - dmin)) * (high - low) + low\n\n return rescaled", "title": "" }, { "docid": "0e8e0bc2a09cf61cb58f9ca146b501f3", "score": "0.61378473", "text": "def linear(inputArray, scale_min=None, scale_max=None):\t\t\n\t#print \"img_scale : linear\"\n\timageData=np.array(inputArray, copy=True)\n\t\n\tif scale_min == None:\n\t\tscale_min = imageData.min()\n\tif scale_max == None:\n\t\tscale_max = imageData.max()\n\n\timageData = imageData.clip(min=scale_min, max=scale_max)\n\tfactor=(scale_max - scale_min)\n \tif factor == 0:\n\t\tfactor=1\n \n\timageData = (imageData -scale_min) / factor\n\tindices = np.where(imageData < 0)\n\timageData[indices] = 0.0\n\tindices = np.where(imageData > 1)\n\timageData[indices] = 1.0\n\t\n\treturn imageData", "title": "" }, { "docid": "3a4c48aa18fb085d6aa18a4695fb81e9", "score": "0.6128207", "text": "def _get_score_from_pvalue_linear(pvalue, range_min=1, range_max=1e-10, out_range_min=0., out_range_max=1.):\n def get_log(n):\n try:\n return math.log10(n)\n except ValueError:\n return math.log10(range_max)\n\n min_score = get_log(range_min)\n max_score = get_log(range_max)\n score = get_log(pvalue)\n return DataNormaliser.renormalize(score, [min_score, max_score], [out_range_min, out_range_max])", "title": "" }, { "docid": "10e7586bd1ecd9d9400bc3f855e11948", "score": "0.61145204", "text": "def normalize(img, from_range, to_range):\n return rescale(img, from_range, to_range)", "title": "" }, { "docid": "cf9c7a9cfc934d34bc03ffd9ea4e6022", "score": "0.6069645", "text": "def normalize(data_range, multiplier=1.0):\n base_value = data_range[0].value\n for i in range(len(data_range)):\n data_range[i].raw_value = data_range[i].value\n data_range[i].value = (float(data_range[i].value - base_value)) * \\\n multiplier", "title": "" }, { "docid": "5080035f13e896967a64a45449a0ed34", "score": "0.5938613", "text": "def denormalize(img, from_range, to_range):\n return rescale(img, from_range, to_range)", "title": "" }, { "docid": "40f5a07e7c76eaf5994a3b39c20e8c66", "score": "0.5929812", "text": "def map_range (self, x, in_min, in_max, out_min, out_max):\n\n x = float (x)\n in_min = float (in_min)\n in_max = float (in_max)\n out_min = float (out_min)\n out_max = float (out_max)\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min", "title": "" }, { "docid": "27cdb0279666a05f445e819c8f673702", "score": "0.5906684", "text": "def unnormalize_data(normalized_data, max_val, min_val):\n #normalized_data = normalized_data.astype(np.float32)\n #unnormalized_data = normalized_data*(max_val-min_val)+min_val\n return (normalized_data*(max_val-min_val)+min_val).astype(np.float32)", "title": "" }, { "docid": "1739a7896327d72a80dcf08edd8265ad", "score": "0.5900776", "text": "def scale(array, target_range = (-1,1)):\n if not is_included(array, target_range):\n mini = array.min()\n maxi = array.max()\n l = target_range[0]\n u = target_range[1]\n array = (array - mini) / (maxi - mini)\n array = array * (u - l) + l\n return array", "title": "" }, { "docid": "1739a7896327d72a80dcf08edd8265ad", "score": "0.5900776", "text": "def scale(array, target_range = (-1,1)):\n if not is_included(array, target_range):\n mini = array.min()\n maxi = array.max()\n l = target_range[0]\n u = target_range[1]\n array = (array - mini) / (maxi - mini)\n array = array * (u - l) + l\n return array", "title": "" }, { "docid": "f4639255d33060ebebf7aebaa71a6a48", "score": "0.58987296", "text": "def normalize_data(self, feature_range=(0,1)):\n print(\"Data normalization (scale): {}\".format(feature_range))\n\n min_max_scaler = preprocessing.MinMaxScaler(feature_range)\n self.data = min_max_scaler.fit_transform(self.data)\n self.data = [[round(v, 3) for v in row] for row in self.data]", "title": "" }, { "docid": "d874c790805db16a125ba39c70f34205", "score": "0.58729804", "text": "def mappingValues(value, leftMin, leftMax, rightMin, rightMax):\n leftSpan = leftMax - leftMin # Figure out how 'wide' each range is\n rightSpan = rightMax - rightMin\n valueScaled = float(value - leftMin) / float(leftSpan) # Convert the left range into a 0-1 range (float)\n return rightMin + (valueScaled * rightSpan) # Convert the 0-1 range into a value in the right range.", "title": "" }, { "docid": "5d4856580f139d3818fe59a9ce0203c4", "score": "0.5869244", "text": "def data_range(X):\n Y = np.empty((X.shape[1],2))\n Y[:,0] = X.min(axis=0)\n Y[:,1] = X.max(axis=0)\n\n return Y", "title": "" }, { "docid": "a4d7631f93d7f5b5fe9c932327e3d00b", "score": "0.5854502", "text": "def scale_range(x, input_range, target_range):\n\n range = [np.amin(x), np.amax(x)]\n x_std = (x - input_range[0]) / (1.0 * (input_range[1] - input_range[0]))\n x_scaled = x_std * (1.0 * (target_range[1] - target_range[0])) + target_range[0]\n return x_scaled, range", "title": "" }, { "docid": "df8e19c6cec51841ef8894e942013f05", "score": "0.58300006", "text": "def normalize(data, **kw):\n upper_bound = 1\n lower_bound = 0\n dtype = np.float64\n if 'upper_bound' in kw:\n upper_bound = kw['upper_bound']\n if 'lower_bound' in kw:\n lower_bound = kw['lower_bound']\n if 'dtype' in kw:\n dtype = kw['dtype']\n check_ndarray(data)\n newdata = data - data.min()\n newdata = newdata / newdata.max()\n newdata = newdata * (upper_bound - lower_bound)\n newdata += lower_bound\n return newdata.astype(dtype)", "title": "" }, { "docid": "f66fad0af75a934e07b4b2b8d2e16daa", "score": "0.5823373", "text": "def range_op(start, limit, delta, dtype):\n output_tensor = Tensor(list(range(start, limit, delta)), dtype)\n return output_tensor", "title": "" }, { "docid": "7339029809aa8b0fdc996414aca4086c", "score": "0.580773", "text": "def project_to_range(value, low, high):\n return low + (high - low) * value", "title": "" }, { "docid": "8b30f8e167066bd7c7568b38b92ce4b9", "score": "0.5800499", "text": "def normalize(self, data, _min, _max):\n return (data-_min)/(_max-_min)", "title": "" }, { "docid": "1dc59b6199222ab42542cf367fe528e7", "score": "0.57861644", "text": "def trange(start: int, end: int, step: int=1, dtype: T.Dtype = None) -> T.Tensor:\n return numpy.arange(start, end, step, dtype=dtype)", "title": "" }, { "docid": "d0f56551217ad45100d6df5d2a27585c", "score": "0.57512665", "text": "def normalize(data, low: float = 0, high: float = 1):\n if not isinstance(data, (pd.DataFrame, pd.Series)):\n arr = np.array(data)\n else:\n arr = data\n\n m = arr.min()\n norm = (arr - m) / (arr.max() - m)\n rescaled = norm * (high - low) + low\n return rescaled", "title": "" }, { "docid": "43f53307ab19b7d27c9e662e70a9f58d", "score": "0.5745861", "text": "def scale_data(data, high=1.0, low=-1.0, maxs=None, mins=None):\n if mins is None:\n mins = np.min(data, axis=0)\n if maxs is None:\n maxs = np.max(data, axis=0)\n rng = maxs - mins\n return high - (((high - low) * (maxs - data)) / rng)", "title": "" }, { "docid": "2da2f0e57bd8002c695ddea9a84be4e4", "score": "0.57300824", "text": "def _map_range(\n self,\n x,\n in_min,\n in_max,\n out_min,\n out_max,\n ):\n\n if x is 0:\n return 0\n out = (x - in_min) * (out_max - out_min) / (in_max - in_min) \\\n + out_min\n if out > out_max:\n return out_max\n elif out < out_min:\n return out_min\n return out", "title": "" }, { "docid": "eec5d649b5e340cbc497013a8a3c2f26", "score": "0.5722401", "text": "def transform_data(x, scale):\n return x / scale", "title": "" }, { "docid": "0217950bb9de0646675ed448f8455222", "score": "0.5718889", "text": "def rescale(arr, vmin, vmax):\n arrout = (vmax - vmin)*arr + vmin\n return arrout", "title": "" }, { "docid": "81b58baf440359df7da10977287169fa", "score": "0.57161486", "text": "def rescale(img, from_range, to_range):\n from_diff = from_range[1] - from_range[0]\n to_diff = to_range[1] - to_range[0]\n normalized_img = (img - from_range[0])/from_diff\n scaled_img = normalized_img*to_diff + to_range[0]\n return scaled_img", "title": "" }, { "docid": "9b775c5c496675edf70f05db4740f1ee", "score": "0.5697326", "text": "def remap(v, in_range, out_range):\n in_v_norm = (in_range[1]-v)/(in_range[1]-in_range[0])\n clamped_norm = min(1, max(0, in_v_norm))\n return out_range[0] + clamped_norm*(out_range[1] - out_range[0])", "title": "" }, { "docid": "38bfc9a51fd05a259af32a284693fb91", "score": "0.56967324", "text": "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "title": "" }, { "docid": "38bfc9a51fd05a259af32a284693fb91", "score": "0.56967324", "text": "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "title": "" }, { "docid": "8ee727cc0f5557a1252ba114c4d3f10a", "score": "0.5674637", "text": "def linspace(start, end, steps):\n if not isinstance(start, Tensor):\n start = Tensor(start, mstype.float32)\n if not isinstance(end, Tensor):\n end = Tensor(end, mstype.float32)\n return linspace_(start, end, steps)", "title": "" }, { "docid": "1d6003d01b5d459983044647badd461f", "score": "0.56561726", "text": "def transform_values(A, lb, ub, K, baseline):\n A = ensure_2d_array(A)\n lb, ub = ensure_bounds(lb, ub, A.shape[-1])\n baseline = ensure_value(baseline, 0, A.shape[0])\n A, baseline = apply_linear_transform(A, K, baseline)\n return A, lb, ub, baseline", "title": "" }, { "docid": "6fa81ef9c31f6e9874243046b637e85e", "score": "0.56450915", "text": "def linear_interpolation(iterable: Sequence, new_min: float, new_max: float, old_min=None, old_max=None):\n if old_min is None and old_max is None:\n old_min = min(iterable)\n old_max = max(iterable)\n return [((x - old_min) / (old_max - old_min)) * (new_max - new_min) + new_min for x in iterable]", "title": "" }, { "docid": "61c9b88efecec77845d749cca5c0803f", "score": "0.56413686", "text": "def make_linear_var(\n step,\n start, end,\n start_value, end_value,\n clip_min = 0.0, clip_max = 1.0):\n linear = (\n (end_value - start_value) /\n (end - start) *\n (tf.cast(step, tf.float32) - start) + start_value)\n return tf.clip_by_value(linear, clip_min, clip_max)", "title": "" }, { "docid": "da194f3a3ecfd7b8e481ae9763a4c2b6", "score": "0.5640579", "text": "def adjustRange(a, vmin=0, vmax=255):\n new_a = (\n (\n # Represent array as floats ranging between 0 and 1.\n a.astype(dtype=float) / np.nanmax(a)\n\n # Fill given range.\n * (vmax - vmin) + vmin\n )\n # Convert back to regular array.\n .astype(dtype=np.uint8)\n )\n\n return new_a", "title": "" }, { "docid": "3c73ac7ab8e30bdb492e16fe2f62c915", "score": "0.5636263", "text": "def scale(value, from_min, from_max, to_min=0, to_max=8):\n from_range = from_max - from_min\n to_range = to_max - to_min\n return (((value - from_min) / from_range) * to_range) + to_min", "title": "" }, { "docid": "1efed386d879f68a3a81f8a1104dbbe7", "score": "0.56322855", "text": "def scale_range(data, scale_factor=500, delta=0, discard_zeros=True):\n if discard_zeros:\n msk = ~np.isclose(data, 0)\n else:\n msk = np.ones(data.shape, dtype=bool)\n scale_factor = scale_factor - delta\n data[msk] = data[msk] - np.nanmin(data[msk])\n data[msk] = scale_factor / np.nanmax(data[msk]) * data[msk]\n if discard_zeros:\n data[~msk] = 0 # put back masked out voxels\n return data", "title": "" }, { "docid": "e87684cc38388925030f7205eba17695", "score": "0.5629108", "text": "def scale_to(x, from_max, to_min, to_max):\n return x / from_max * (to_max - to_min) + to_min", "title": "" }, { "docid": "705bbe9e546f7377efa1fc0ccf043d5f", "score": "0.562593", "text": "def _lagrange(x_new: np.ndarray) -> np.ndarray:\n if bounds_error and x_new.min() < x.min():\n raise ValueError(f\"Value {x_new.min()} in x_new is below the interpolation range {x.min()}.\")\n if bounds_error and x_new.max() > x.max():\n raise ValueError(f\"Value {x_new.max()} in x_new is above the interpolation range {x.max()}.\")\n\n y_new = np.zeros(x_new.shape[:1] + y.shape[1:])\n x_new_scaled = (x_new - _xm) / _xs\n\n # Figure out which points to use for the interpolation\n start_idxs = np.abs(x[:, None] - x_new[None, :]).argmin(axis=0) - window // 2\n start_idxs[start_idxs < 0] = 0\n start_idxs[start_idxs > len(x) - window] = len(x) - window\n\n # Interpolate for each unique set of interpolation points\n for idx in np.unique(start_idxs):\n y_idx = start_idxs == idx\n x_wd, y_wd = x_scaled[idx : idx + window], y[idx : idx + window]\n diff_x = np.subtract(*np.meshgrid(x_wd, x_wd)) + np.eye(window)\n\n r = np.array(\n [\n np.prod((x_new_scaled[y_idx, None] - x_wd[idxs]) / diff_x[idxs, i], axis=1)\n for i, idxs in enumerate(indices)\n ]\n )\n y_new[y_idx] = r.T @ y_wd\n return y_new", "title": "" }, { "docid": "7d461247967ff8cea9a9db40a1e2808b", "score": "0.5612509", "text": "def range_of(data):\n return {\"min\" : np.array([np.min(data, axis=0)]), \"max\" : np.array([np.max(data, axis=0)])}", "title": "" }, { "docid": "8c36a79a87efdc2e0527539cef1ca2af", "score": "0.560919", "text": "def make_linear_var(\r\n step,\r\n start, end,\r\n start_value, end_value,\r\n clip_min = 0.0, clip_max = 1.0):\r\n linear = (\r\n (end_value - start_value) /\r\n (end - start) *\r\n (tf.cast(step, tf.float32) - start) + start_value)\r\n return tf.clip_by_value(linear, clip_min, clip_max)", "title": "" }, { "docid": "897bba1e03e1e104404ed05252e72760", "score": "0.5604558", "text": "def unitscale_xv(xv: ArrayLike1d, xi_range: ArrayLike1d) -> ArrayLike1d: \n xunit = copy.deepcopy(xv)\n lb = xi_range[0] #the left bound\n rb = xi_range[1] #the right bound\n xunit = (xv - lb)/(rb - lb)\n \n return xunit", "title": "" }, { "docid": "16f76d57317023f3549f9e4c59912ce3", "score": "0.55883193", "text": "def _srs(data, low=0, high=1, axis=0):\n\n srs = _rs(np.asanyarray(data), axis=axis) # robust sigmoid transform\n normed = _rescale(srs, low=low, high=high, axis=axis) # scales data\n\n return normed", "title": "" }, { "docid": "8ab452ddada95e5d396c2d51882ca763", "score": "0.5571661", "text": "def normalize_input_data(data: [{}], bounds: {}) -> [{}]:\n result = []\n for row in data:\n row_result = {}\n for key, value in row.items():\n if key == \"value\":\n row_result[key] = value\n continue\n\n if isinstance(value, str):\n row_result[key] = value\n continue\n\n target_bound_min = bounds[key][\"min\"]\n target_bound_max = bounds[key][\"max\"]\n inverse = False\n\n if target_bound_max < target_bound_min:\n target_bound_max, target_bound_min = target_bound_min, target_bound_max\n inverse = True\n\n # reduce value to bounds\n if value > target_bound_max:\n temp_value = target_bound_max\n elif value < target_bound_min:\n temp_value = target_bound_min\n else:\n temp_value = value\n\n normalize_value = ((temp_value - target_bound_min) / (target_bound_max - target_bound_min))\n if inverse:\n normalize_value = 1 - normalize_value\n row_result[key] = normalize_value\n result.append(row_result)\n\n return result", "title": "" }, { "docid": "17a43ae7c24ae4221eae2e025cbd524f", "score": "0.5568992", "text": "def adapt_linear(self, start, end, delta, hmin):\n\n fill_value = self.fn(torch.tensor([float(start), float(end)]))\n fill_value = (fill_value[0],fill_value[1])\n\n # Forwards\n x = []\n y = []\n self.__recurse_adapt(start, end, delta, hmin, x, y, \n self.__eval_forward)\n x.append(end) # Stitch the end value on.\n y.append(self.__eval_forward(end)) # Stitch the end value on.\n self.xin = torch.tensor(x, dtype=torch.float32)\n self.yin = torch.tensor(y, dtype=torch.float32)\n self.forward_fn_interpolate = interpolate.interp1d(self.xin, \n self.yin, \n kind=self.kind,\n bounds_error=False,\n fill_value=fill_value)\n\n # Backwards\n x_grad = []\n y_grad = []\n self.__recurse_adapt(start, end, delta, hmin, x_grad, y_grad, \n self.__eval_backward)\n x_grad.append(end) # Stitch the end value on.\n y_grad.append(self.__eval_backward(end)) # Stitch the end value on.\n self.xin_grad = torch.tensor(x_grad, dtype=torch.float32)\n self.yin_grad = torch.tensor(y_grad, dtype=torch.float32)\n self.backward_fn_interpolate = interpolate.interp1d(self.xin_grad, \n self.yin_grad, \n kind=self.kind,\n bounds_error=False,\n fill_value=fill_value)", "title": "" }, { "docid": "2663a95d637ba153203111aed74555c1", "score": "0.5565536", "text": "def linear_ranges(values):\n ranges = []\n def close(x,y): return abs(y-x) < 1e-6\n for i in range(0,len(values)):\n is_linear_before = i >= 2 and \\\n close(values[i]-values[i-1],values[i-1]-values[i-2])\n is_linear_after = 1 <= i <= len(values)-2 and \\\n close(values[i]-values[i-1],values[i+1]-values[i])\n if is_linear_before or \\\n (len(ranges) > 0 and len(ranges[-1]) == 1 and is_linear_after):\n ranges[-1] += [values[i]]\n else: ranges += [[values[i]]]\n return ranges", "title": "" }, { "docid": "5990f24292f2b244b10af06857d25e51", "score": "0.5549769", "text": "def normalize_data(input_data):\n min_val = np.min(input_data, axis=0)\n max_val = np.max(input_data, axis=0)\n input_data = (input_data - min_val) / (max_val - min_val)\n return input_data", "title": "" }, { "docid": "48bbbc2e41bdb61ebe3a5e50eca797df", "score": "0.55477524", "text": "def scale_to_01_range(x) -> np.ndarray:\n value_range = (np.max(x) - np.min(x))\n starts_from_zero = x - np.min(x)\n return starts_from_zero / value_range", "title": "" }, { "docid": "2ec8f2d92c872cc1a4d24e29a9323c7c", "score": "0.5542122", "text": "def linearData():\n\tlength = 50\n\tx = np.linspace(1, 10, num=length) + np.random.rand(length)\n\ty = x + np.random.rand(length)\n\treturn np.array([x, y])", "title": "" }, { "docid": "60a23ca55520bcad731b8c78119dbede", "score": "0.55404645", "text": "def linspace(start, stop, num, dtype='int64', name=None, axis=0):\n return array_ops.linspace(\n start, stop, num, dtype=dtype, name=name, axis=axis)", "title": "" }, { "docid": "d85cd237793138dc2c2ef05e59b1d337", "score": "0.55379885", "text": "def lrange(start, stop, rate=1, endpoint=True, dtype=None):\r\n if rate == 1:\r\n return arange(start, stop+1*endpoint, dtype=int)\r\n else:\r\n return linspace(start, stop, round((stop-start)*rate)+1*endpoint,\r\n endpoint=endpoint, dtype=dtype)", "title": "" }, { "docid": "576b16c3b49316ab7550975811705a79", "score": "0.55222535", "text": "def rescale(data):\n maxd = max(data)\n mind = min(data)\n g=maxd - mind\n s=GRID_HEIGHT/g\n f = lambda x: GRID_HEIGHT*(x- mind)/(maxd-mind)\n scale_data = [f(i) for i in data]\n return scale_data\n pass", "title": "" }, { "docid": "055bdcfd63b70dd6edee3520828877d6", "score": "0.55202633", "text": "def map_range(val, in_a, in_b, out_a=0, out_b=1):\n return MathStat.lerp(out_a, out_b, MathStat.getpercent(val, in_a, in_b))", "title": "" }, { "docid": "0376ba69c72b129979b7c7afe5367ec5", "score": "0.5517023", "text": "def scale_samples(range_in,samples):\n range_samples = np.ptp(sample_bounds)\n range_target = np.ptp(range_in,1)\n mean = np.mean(range_in,1)\n\n coordinates = samples*range_target/range_samples+mean # full coordinates\n\n return coordinates", "title": "" }, { "docid": "ba538b34d503ec27ac20bcada5ea43e0", "score": "0.55145395", "text": "def maprangex(val):\r\n\ttomax = 1\r\n\ttomin = -1\r\n\tvalnorm = (val + width) / (width + width)\r\n\treturn (tomin + valnorm * (tomax - tomin) + zoomx) / zoompercent", "title": "" }, { "docid": "d395d0879c5d00db893c44ce4f96a9b0", "score": "0.55057067", "text": "def rescale_to_data(self, vmin=None, vmax=None, mask_info=None):\n return", "title": "" }, { "docid": "7830064477efcbfd2aba27dce05c3a41", "score": "0.550421", "text": "def normalizeFeatures(data, ranges_dict):\n \n for _, values in ranges_dict.items():\n if values['normalize']:\n columns = values['cols']\n max_ = values['max']\n min_ = values['min']\n data[columns] = (data[columns] - min_) / (max_ - min_)\n\n return data", "title": "" }, { "docid": "e366a83a60ef6e02755e0f711e905941", "score": "0.54942954", "text": "def rldecode(starts, lengths, values, minlength=None):\n starts, lengths, values = map(np.asarray, (starts, lengths, values))\n # TODO: check validity of rle\n ends = starts + lengths\n n = ends[-1]\n if minlength is not None:\n n = max(minlength, n)\n x = np.full(n, np.nan)\n for lo, hi, val in zip(starts, ends, values):\n x[lo:hi] = val\n return x", "title": "" }, { "docid": "e366a83a60ef6e02755e0f711e905941", "score": "0.54942954", "text": "def rldecode(starts, lengths, values, minlength=None):\n starts, lengths, values = map(np.asarray, (starts, lengths, values))\n # TODO: check validity of rle\n ends = starts + lengths\n n = ends[-1]\n if minlength is not None:\n n = max(minlength, n)\n x = np.full(n, np.nan)\n for lo, hi, val in zip(starts, ends, values):\n x[lo:hi] = val\n return x", "title": "" }, { "docid": "2d041b5273d3e85f3d4a262f5190300a", "score": "0.5491163", "text": "def normalize(data):\n no_offset = data - data.min()\n normalized = no_offset / no_offset.max()\n return normalized", "title": "" }, { "docid": "e168e8e1b8bc73fba1b062b33eaa42c0", "score": "0.54777265", "text": "def early(data, range=(0, 8)):\n return {**data, 'x_range': range}", "title": "" }, { "docid": "1d348377cb76c4932dd1e8d164ee9eab", "score": "0.5477652", "text": "def auto_scale(data, signal_min=0, signal_max=2*SIGNAL_SCALE):\n\n _, _, _, lf, uf = signal_stats(data)\n\n # Initialize the output (copy)\n output = data.astype('float32')\n\n # Clip\n output[output < lf] = lf\n output[output > uf] = uf\n\n # shift and scale\n output = (signal_max - signal_min)/(uf-lf) * (output - lf) + signal_min\n\n return output", "title": "" }, { "docid": "2f932ce736040d64be6f8e1b706e3211", "score": "0.5474259", "text": "def normalise_modular_range(value, min, max):\n return numpy.mod(value-min, max-min)+min", "title": "" }, { "docid": "3c5857a50c8bfd4bb9c6ca3d257dc68a", "score": "0.54703826", "text": "def rescale_data(values):\r\n if not isinstance(values, ndarray):\r\n values = ndarray(values)\r\n if len(values.shape) > 1:\r\n raise TypeError('Expected values with shape (N,)')\r\n\r\n min_value = min(values)\r\n max_value = max(values)\r\n values -= min_value\r\n values /= (max_value - min_value)\r\n\r\n return values", "title": "" }, { "docid": "b8db1f5f68e8d2d30f46915b36ba4415", "score": "0.5470194", "text": "def scale_range(x, new_min=0.0, new_max=1.0, old_min=None, old_max=None, squash_outside_range=True, squash_inf=False, ):\n if squash_inf and not squash_outside_range:\n warn(ValueError('Makes no sense to squash infinity but not other numbers outside the source range. \\\n Will squash all outside range.'))\n squash_outside_range = True\n\n if isinstance(x, list):\n x = np.array(x)\n\n if old_min is None:\n old_min = np.min(x[~np.isinf(x)])\n if old_max is None:\n old_max = np.max(x[~np.isinf(x)])\n old_range = old_max - old_min\n new_max = float(new_max)\n new_min = float(new_min)\n new_range = new_max - new_min\n\n retval = (new_range * (x - old_min) / old_range) + new_min\n if squash_inf:\n retval[np.isinf(x) & (x > 0)] = new_max\n retval[np.isinf(x) & (x < 0)] = new_min\n\n if squash_outside_range:\n retval[~np.isinf(x) & (x > old_max)] = new_max\n retval[~np.isinf(x) & (x < old_min)] = new_min\n\n return retval", "title": "" }, { "docid": "b7a39441074001dbd0ff07d401a73168", "score": "0.54590863", "text": "def mapping(x, i_min, i_max, o_min, o_max) -> float:\n\n return (x - i_min) * (o_max - o_min) / (i_max - i_min) + o_min", "title": "" }, { "docid": "06166500c98ae2362c8422ab07039b82", "score": "0.5435322", "text": "def normalize_data(input_data):\n max_val = np.max(input_data)\n min_val = np.min(input_data)\n normalized_data = (input_data - min_val)/(max_val-min_val)\n normalized_data = normalized_data.astype(np.float32)\n return normalized_data, max_val, min_val", "title": "" }, { "docid": "f58b71883e42c7a022c48c6a1e1538b2", "score": "0.5432219", "text": "def arange(start=0, end=None, step=1, dtype=None, name=None):\n if dtype is None:\n dtype = 'int64'\n if end is None:\n end = start\n start = 0\n\n out_shape = None\n if not isinstance(start, Variable) and not isinstance(\n end, Variable) and not isinstance(step, Variable):\n out_shape = [int(math.ceil((end - start) / step))]\n\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n if not isinstance(start, Variable):\n with device_guard(\"cpu\"):\n start = fill_constant([1], dtype, start, force_cpu=True)\n elif start.dtype != dtype:\n start = paddle.cast(start, dtype)\n\n if not isinstance(end, Variable):\n with device_guard(\"cpu\"):\n end = fill_constant([1], dtype, end, force_cpu=True)\n elif end.dtype != dtype:\n end = paddle.cast(end, dtype)\n\n if not isinstance(step, Variable):\n with device_guard(\"cpu\"):\n step = fill_constant([1], dtype, step, force_cpu=True)\n elif step.dtype != dtype:\n step = paddle.cast(step, dtype)\n\n if in_dygraph_mode():\n return _C_ops.arange(start, end, step, dtype, _current_expected_place())\n\n if _in_legacy_dygraph():\n out = _legacy_C_ops.range(start, end, step)\n out.stop_gradient = True\n return out\n\n check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],\n 'range/arange')\n helper = LayerHelper('range', **locals())\n out = helper.create_variable_for_type_inference(dtype, shape=out_shape)\n helper.append_op(type='range',\n inputs={\n 'Start': start,\n 'End': end,\n 'Step': step\n },\n outputs={'Out': out})\n out.stop_gradient = True\n if out_shape is not None:\n out.desc.set_shape(out_shape)\n return out", "title": "" }, { "docid": "807769717a40c58b27e01c1d5e9137a8", "score": "0.54277056", "text": "def flat_data(data,dynlow, dynhigh, log):\n\tif log:\n\t\tmi = 10**dynlow\n\t\tma = 10**dynhigh\n\t\tdata=N.minimum(N.maximum(data,mi),ma)\n\t\tdata=N.log10(data)\n\telse:\n\t\tmi = dynlow\n\t\tma = dynhigh\n\t\tdata=N.minimum(N.maximum(data,mi),ma)\n\treturn data", "title": "" }, { "docid": "99df0b0fda6b84f156023bccdaca6992", "score": "0.54245263", "text": "def _input_to_data(data, normalize):\n # May need to allow other formats as well.\n if not isinstance(data, np.ndarray):\n data = np.asarray(data)\n if normalize in (None, True):\n M = data.max()\n m = data.min()\n if normalize is None: \n normalize = m < 0. or M > 1.\n if normalize:\n d = M - m\n if d == 0:\n d = 1.\n data = (data - m) / d\n shape = data.shape\n data = data.reshape(-1)\n return data, shape", "title": "" }, { "docid": "107d6c1e662e4af47f5bc130951a2193", "score": "0.542146", "text": "def scale(a, tmin=0.0, tmax=1.0):\n return (np.interp(a, (a.min(), a.max()), (tmin, tmax)))", "title": "" }, { "docid": "4fbef98f9aa04cbcdc1c5872b73a28c0", "score": "0.5420449", "text": "def linear_ramp(birth, pers, low=0.0, high=1.0, start=0.0, end=1.0):\n try:\n n = len(birth)\n except:\n n = 1\n birth = [birth]\n pers = [pers]\n\n w = np.zeros((n,))\n for i in range(n):\n if pers[i] < start:\n w[i] = low\n elif pers[i] > end:\n w[i] = high\n else:\n w[i] = (pers[i] - start) * (high - low) / (end - start) + low\n\n return w", "title": "" }, { "docid": "c905ddbc352488b0f546547c1a481c5b", "score": "0.54040045", "text": "def float_range():", "title": "" }, { "docid": "4de77eb68d61761e1f78ec3a432a2707", "score": "0.5403395", "text": "def rescale(value, in_min, in_max, out_min, out_max):\n\n in_range = in_max - in_min\n out_range = out_max - out_min\n\n return (value - in_min) * (out_range / in_range) + out_min", "title": "" }, { "docid": "808f987647ddea8e2e2999e483672bd1", "score": "0.5398063", "text": "def map_value_ranges(value, old_range, new_range):\n if len(old_range) == len(new_range) == 2:\n return mapValue(value, old_range[0], old_range[1], new_range[0], new_range[1])\n else:\n return value", "title": "" }, { "docid": "a1c6efd8456320bd62f4bed302efec05", "score": "0.53957766", "text": "def translate(value, current_min, current_max, new_min, new_max):\n\n # Swap them if the max and min are reversed\n if current_min > current_max:\n tmp = current_max\n current_max = current_min\n current_min = tmp\n\n tmp = new_max\n new_max = new_min\n new_min = tmp\n\n # Return the new min or max if the value falls beyond the current range\n # specified\n if value < current_min:\n return new_min\n if value > current_max:\n return new_max\n\n # Figure out how 'wide' each range is\n left_span = current_max - current_min\n right_span = new_max - new_min\n\n # Convert the left range into a 0-1 range (float)\n value_scaled = (value - current_min) / left_span\n\n # Convert the 0-1 range into a value in the right range.\n return new_min + (value_scaled * right_span)", "title": "" }, { "docid": "25d4421f40a969565889fca70a1d27f7", "score": "0.53947484", "text": "def range_by_lex(self, low, high, start=None, num=None, reverse=False):\n if reverse:\n fn = self.database.zrevrangebylex\n low, high = high, low\n else:\n fn = self.database.zrangebylex\n return fn(self.key, low, high, start, num)", "title": "" }, { "docid": "a67dfa8bf651ac4aa02f79c3903e1723", "score": "0.5392643", "text": "def transform(self, X):\n out = []\n for x in X:\n tmp = [np.min(x), np.max(x)]\n out.append(tmp)\n return np.array(out)", "title": "" }, { "docid": "276082b7112ba89ad69eb43294f50915", "score": "0.5387938", "text": "def scale_data(x):\n m = np.min(x)\n x0 = x - m\n M = np.max(x0)\n x1 = x0 / M\n return (x1, m, M)", "title": "" }, { "docid": "2a2ef3518bad14e71ad24c0aa1f7bc5d", "score": "0.5382268", "text": "def clamp_values(values, low_high):\n mw = max(values)\n return [ project_to_range(w / mw, low_high[0], low_high[1]) for w in values ]", "title": "" }, { "docid": "5e26a6b533365df60052eb8ceb389c43", "score": "0.5381187", "text": "def map_range_clamped(val, in_a, in_b, out_a=0, out_b=1):\n return MathStat.lerp(out_a, out_b, MathStat.getpercent(val, in_a, in_b))", "title": "" }, { "docid": "c74676bd7eb34b607dbd3ce4898a27c4", "score": "0.537763", "text": "def _feature_scaling(self, data):\n\n feature_range = (0, 1)\n feat_min, feat_max = feature_range\n data_normalized = (data - data.min()) / (data.max() - data.min())\n data_scaled = data_normalized * (feat_max - feat_min) + feat_min\n\n return data_scaled", "title": "" }, { "docid": "d0b9d6580523938b1ea20f844acfb32c", "score": "0.5374934", "text": "def map_range_clamped(cls, value, range_a, range_b, out_range_a, out_range_b):\n return 0.000000", "title": "" }, { "docid": "50aa9310a118a8740f1ad4dad2cf77b1", "score": "0.53699434", "text": "def remap_interval(val, input_interval_start, input_interval_end, output_interval_start, output_interval_end):\n slope = (float(output_interval_end) - float(output_interval_start))/(float(input_interval_end) - float(input_interval_start))\n return slope * (val - float(input_interval_start)) + float(output_interval_start)", "title": "" }, { "docid": "cd8799801f54554aa6158ef27ff1d7b2", "score": "0.5362932", "text": "def f_range(x):\n\n return LA.norm(x)", "title": "" }, { "docid": "5ad658fc48c0f17a6ed9122941098685", "score": "0.5358536", "text": "def rescale(inlist, newrange=(0, 1)):\n OldMax = max(inlist)\n OldMin = min(inlist)\n\n if OldMin == OldMax:\n raise RuntimeError('list contains of only one unique value')\n\n OldRange = OldMax - OldMin\n NewRange = newrange[1] - newrange[0]\n result = [(((float(x) - OldMin) * NewRange) / OldRange) + newrange[0] for x in inlist]\n return result", "title": "" }, { "docid": "4c0e9649fadfc64be3c31b59fa13515b", "score": "0.5357811", "text": "def rescale_linear_sum_min(array, new_min, new_sum):\n minimum, maximum = np.min(array), np.nansum(array)\n m = (new_sum - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n output = m * array + b\n\n return output", "title": "" }, { "docid": "910c7592950d33290f415a4d5ce88d20", "score": "0.5355885", "text": "def mapToRange(val, srcMax, dst):\n return (float(val) / float(srcMax)) * (float(dst[1]) - float(dst[0])) + float(dst[0])", "title": "" } ]
14a430696c4604428dfccbd582a58d4d
Add jump moves to the list of available moves.
[ { "docid": "dfb8975d7a80bd241b82a2a107fd3ad7", "score": "0.7265457", "text": "def add_jump_moves(self, loc):\n # go through adjacent field\n for i in range(len(Conf.NORMAL_DIRECTIONS)):\n jump_over_loc = (loc[0] + Conf.NORMAL_DIRECTIONS[i][0], loc[1] + Conf.NORMAL_DIRECTIONS[i][1])\n # if field is in bounds of data structure\n if 0 <= jump_over_loc[0] < self.board.SIZE_BOARD_X_GRID and \\\n 0<= jump_over_loc[1] < self.board.SIZE_BOARD_Y_GRID:\n # if field is a piece\n if not self.board.get_field(jump_over_loc).value == Conf.EMPTY and \\\n not self.board.get_field(jump_over_loc).value == Conf.NON_PLAYABLE:\n # get jump to location\n new_loc = (loc[0] + Conf.JUMP_DIRECTIONS[i][0], loc[1] + Conf.JUMP_DIRECTIONS[i][1])\n # if jump to location is in bounds\n if 0 <= new_loc[0] < self.board.SIZE_BOARD_X_GRID and \\\n 0<= new_loc[1] < self.board.SIZE_BOARD_Y_GRID:\n # if jump to location is free\n if self.board.get_field(new_loc).value == Conf.EMPTY:\n # if this location doesnt already exist and not the starting location\n if not new_loc in self.available_locations and \\\n not new_loc == self.piece_selected_loc:\n self.available_locations.append(new_loc)\n self.add_jump_moves(new_loc)", "title": "" } ]
[ { "docid": "c4be96ae3dfd0e9491f987b4b956f0a8", "score": "0.65495294", "text": "def list_jumps(self):\n\n logger.debug(u'list_jumps(): position={}'.format(self.position))\n\n # Each jump chain begins with checker's starting position\n self._jump_chain = [self.position]\n logger.debug(u'list_jumps(): _jump_chain={}'.format(self._jump_chain))\n\n self._list_of_jump_chains = []\n\n for neighbors in self.get_move_squares():\n if self.valid_jump(neighbors):\n # Keep track of jumped checkers\n self._jumped_checkers = [self.get_checker(neighbors[0])]\n logger.debug(u'list_jumps(): _jumped_checkers={}'.format(self._jumped_checkers))\n #self._jumped_checkers.append(self.get_checker(neighbors[0]))\n\n # Add target squares to the jump chain\n self._add_jump_square(neighbors[1])\n\n logger.debug(u'list_jumps(): Jumped square={}'.format(neighbors[0]))\n logger.debug(u'list_jumps(): Jump to square={}'.format(neighbors[1]))\n\n logger.debug(u'list_jumps(): _list_of_jump_chains={}'.format(self._list_of_jump_chains))\n return self._list_of_jump_chains", "title": "" }, { "docid": "6aa1f4d1f4764a1dca77e3935334498a", "score": "0.6093265", "text": "def _get_all_moves_jumpers(self, pos, vectors):\n moves = []\n\n for x, y in vectors:\n move = (pos[0] + x, pos[1] + y)\n if 0 <= move[0] <= 7 and 0 <= move[1] <= 7:\n moves.append([move])\n\n return moves", "title": "" }, { "docid": "0d80d026ac8d1c26273c2fdde6b27777", "score": "0.60365224", "text": "def calculate_possible_moves(self):\n self.possible_moves.clear()\n if self.colour == \"White\":\n self.possible_moves.append([self.position[0]-1, self.position[1]])\n else:\n self.possible_moves.append([self.position[0]+1, self.position[1]])", "title": "" }, { "docid": "3e5cd7b40790b162a8d4756c1af58c51", "score": "0.5993683", "text": "def calculate_possible_moves(self):\n self.possible_moves = []\n for i in range(8):\n if self.position[1] != i:\n self.possible_moves.append([self.position[0], i])\n if self.position[0] != i:\n self.possible_moves.append([i, self.position[1]])", "title": "" }, { "docid": "e95edec27fcd6bb31ae7953510ab72a5", "score": "0.59000725", "text": "def _add_jump_square(self, square):\n\n logger.debug(u'_add_jump_square({})'.format(square))\n\n self._jump_chain.append(square)\n logger.debug(u'_add_jump_square(): self._jump_chain={}'.format(self._jump_chain))\n\n end_of_jump_chain = True\n\n # Check for more jumps from new position\n for neighbors in self.get_move_squares(square):\n if self.valid_jump(neighbors):\n end_of_jump_chain = False\n\n # Keep track of jumped checkers\n self._jumped_checkers.append(self.get_checker(neighbors[0]))\n\n # Add target squares to the jump chain\n self._add_jump_square(neighbors[1])\n\n logger.debug(u'_add_jump_square(): Jump checker={}'.format(neighbors[0]))\n logger.debug(u'_add_jump_square(): Jump to square={}'.format(neighbors[1]))\n\n # If end of a jump chain\n if end_of_jump_chain:\n logger.debug(u'_add_jump_square(): end of jump chain')\n\n self._list_of_jump_chains.append(deepcopy(self._jump_chain))\n logger.debug(u'_add_jump_square(): self._list_of_jump_chains={}'.format(self._list_of_jump_chains))\n\n self._jump_chain.pop()\n logger.debug(u'_add_jump_square(): self._jump_chain={}'.format(self._jump_chain))\n\n self._jumped_checkers.pop()\n logger.debug(u'_add_jump_square(): self._jumped_checkers={}'.format(self._jumped_checkers))\n\n return", "title": "" }, { "docid": "71e1d03eb5df38a61dded7181c0e43e3", "score": "0.5870563", "text": "def move(self):\n #The moves data structure store moves for all nodes\n moves = dict()\n #Iterate over all munchers\n for muncher in self.m:\n #Record the move of this muncher\n move = muncher.attempt(self.g)\n if move != None:\n i = self.g[muncher.i][move].i\n if moves.get(i) == None:\n moves[i] = list()\n moves[i].append(muncher)\n #Resolve conflicts\n self.m = list()\n for i in moves:\n #Random select\n j = random.randint(0,len(moves[i]) - 1)\n #Make the move\n muncher = moves[i][j]\n muncher.move(self.g)\n #Record as a live muncher\n self.m.append(muncher)", "title": "" }, { "docid": "3c63692116f6f33abcd284e4af096867", "score": "0.58683795", "text": "def check_jump_move(self):\n\n\t\ttry:\n\t\t\tself.jumped_piece = self.canvas.find_overlapping(((self.square_cords[0] + 10) + (self.pc_cords[0] + 10))/2,\\\n\t((self.square_cords[1] + 10) + (self.pc_cords[1] + 10))/2,\\\n\t((self.square_cords[2] - 10) + (self.pc_cords[2] - 10))/2,\\\n\t((self.square_cords[3] - 10) + (self.pc_cords[3] - 10))/2)[1]\n\t\texcept IndexError:\n\t\t\tself.square = None\n\t\t\tself.jumped_piece = None\n\t\t\tself.canvas.itemconfig('SQUARE', outline = 'white', width = 1)\n\n\t\tvector = [i - j for i,j in zip(self.square_cords, self.pc_cords)]\n\n\n\t\tif self.canvas.itemcget(self.jumped_piece, 'fill') != self.state:\n\n\t\t\tif self.canvas.itemcget(self.piece, 'outline') != 'blue':\n\t\t\t\tif self.state == 'maroon':\n\t\t\t\t\tif all(abs(x) == 2 * self.size for x in vector) and all(x > 0 for x in vector[1::2]):\n\t\t\t\t\t\treturn 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.text.set('You must jump!')\n\t\t\t\t\t\treturn 1\n\t\t\t\telse:\n\t\t\t\t\tif all(abs(x) == 2 * self.size for x in vector) and all(x < 0 for x in vector[1::2]):\n\t\t\t\t\t\treturn 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.text.set('You must jump!')\n\t\t\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\t\tif all(abs(x) == 2 * self.size for x in vector):\n\t\t\t\t\t\treturn 0\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.text.set('You must jump!')\n\t\t\t\t\t\treturn 1", "title": "" }, { "docid": "0afbed7b0fa037793591ce1573ac1d00", "score": "0.58523726", "text": "def calculate_possible_moves(self):\n raise NotImplementedError", "title": "" }, { "docid": "ed56ceb57d9ee516c46a3687893019ea", "score": "0.5833667", "text": "def steps(step_list, jump_steps):\n if jump_steps is None:\n return step_list\n new_step_list = list()\n for step in step_list:\n new_step = step\n for jump in jump_steps:\n if step >= jump[0]:\n new_step += jump[1]\n new_step_list.append(new_step)\n return new_step_list", "title": "" }, { "docid": "68ef11cceb71c37f807b65d2792c37b6", "score": "0.5831753", "text": "def do_move(self):\n\t\t\n\t\tfor pc in self.jumpable:\n\t\t\tself.canvas.tag_unbind(pc, '<1>')\n\n\t\t#this part actually does the move\n\t\tself.piece_offset = int(self.size * .20)\n\t\tif self.square != None:\n\t\t\tself.new_x1 = self.canvas.coords(self.square)[0] + self.piece_offset\n\t\t\tself.new_y1 = self.canvas.coords(self.square)[1] + self.piece_offset\n\t\t\tself.new_x2 = self.canvas.coords(self.square)[2] - self.piece_offset\n\t\t\tself.new_y2 = self.canvas.coords(self.square)[3] - self.piece_offset\n\t\t\tself.canvas.coords(self.piece, self.new_x1, self.new_y1, self.new_x2, self.new_y2)\n\n\t\t\t#calls the remove piece function. If there is no piece to be removed, pass.\n\t\t\ttry:\n\t\t\t\tself.remove_jumped_piece()\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\n\t\t\t#assigns the variables for some functions called below\n\n\t\t\tself.canvas.itemconfig(self.piece_squares, outline = 'white', width = 1)\n\n\t\t\t#stat update\n\t\t\tself.text.set(self.state[0].upper() + self.state[1:] + \"'s\" + ' turn.')\n\t\t\tself.historybox.insert(self.history_len, 'Move' + ' ' + str(self.history_len + 1) + ':' + ' ' + 'Sq ' + str(self.piece_squares) + ' ' + 'to' + ' ' + 'Sq.' + ' ' + str(self.square))\n\t\t\tself.history_len += 1\n\n\t\t\t#multi-jump checker\n\t\t\t\n\t\t\tself.jumpable = []\n\t\t\tif self.check_for_jumps(self.piece) and self.jumped:\n\n\t\t\t\tself.jumped = 0\n\t\t\t\tself.canvas.tag_unbind(self.state, '<1>')\n\t\t\t\tself.canvas.tag_bind(self.piece, '<1>', self.piece_clicked)\n\t\t\t\tself.square = None\n\t\t\t\treturn\n\n\t\t\tself.jump_tag.pack_forget()\n\n\t\t\t#game over check\n\t\t\tself.gameover()\n\t\n\t\t\t#turn switch\n\t\t\tself.canvas.tag_unbind(self.state, '<1>')\n\t\t\tself.turn_switcher()\n\t\t\t\n\t\t\tself.setup_newturn()", "title": "" }, { "docid": "18f5990c9fb7ae46127ac37cd44bb081", "score": "0.58083373", "text": "def calculate_possible_moves(self):\n self.possible_moves = []\n potential_moves = [[self.position[0] + 2, self.position[1] + 1],\n [self.position[0] + 2, self.position[1] - 1],\n [self.position[0] - 2, self.position[1] + 1],\n [self.position[0] - 2, self.position[1] - 1],\n [self.position[0] + 1, self.position[1] + 2],\n [self.position[0] + 1, self.position[1] - 2],\n [self.position[0] - 1, self.position[1] + 2],\n [self.position[0] - 1, self.position[1] - 2]]\n for move in potential_moves:\n if 7 >= move[0] >= 0 and 7 >= move[1] >= 0:\n self.possible_moves.append(move)", "title": "" }, { "docid": "614313014e71483bd5775b9e83f670d1", "score": "0.580192", "text": "def make_command_list(self, move:PieceMove) -> [Action]:\n\t\tself.loop_count += 1\n\t\tinstruction_list = []\n\t\tif self.spaces[move.start.as_tuple()] not in self.occupied_spaces:\n\t\t\tprint(str(move.start) + \" is not an occupied space\")\n\t\t\treturn []\n\t\tif self.loop_count < 5:\n\t\t\tself.occupied_spaces -= {move.start}\n\t\t\tpath = self.find_path(move.start, move.end)\n\t\t\tlast = move.start\n\t\t\tinstruction_list.append(Action().PenDown())\n\t\t\tinstruction_list.append(Action().GotoCoord(move.start))\n\t\t\tinstruction_list.append(Action().PenUp())\n\n\t\t\tfor node in path[1:]:\n\t\t\t\tif node in self.occupied_spaces:\n\t\t\t\t\tinstruction_list = instruction_list + self.make_way(last, node, path)\n\t\t\t\tinstruction_list.append(Action().GotoCoord(node))\n\t\t\t\tlast = node\n\n\t\t\tinstruction_list.append(Action().PenDown())\n\t\t\tself.occupied_spaces.add(move.end)\n\t\t\tif self.made_way_flag:\n\t\t\t\tprint(\"returning piece...\")\n\t\t\t\tprint(\"Coords:\", self.made_way_coord, \"\\nSpaces:\", self.contested_space)\n\t\t\t\tinstruction_list = instruction_list + self.return_moved()\n\t\telse:\n\t\t\traise Exception(\"Recursion limit reached; no path available\")\n\t\t# self.print_board()\n\t\treturn instruction_list", "title": "" }, { "docid": "b881f469d8fb7b7f44ecb9f0e27a2edf", "score": "0.57995707", "text": "def generate_possible_moves(self):\n ...", "title": "" }, { "docid": "fcc4770a68ef73b238adcf6f67e2f993", "score": "0.5764538", "text": "def adjacent_moves(self):\n moves = []\n if world.tile_exists(self.x + 1, self.y):\n moves.append(actions.MoveEast())\n if world.tile_exists(self.x - 1, self.y):\n moves.append(actions.MoveWest())\n if world.tile_exists(self.x, self.y - 1):\n moves.append(actions.MoveNorth())\n if world.tile_exists(self.x, self.y + 1):\n moves.append(actions.MoveSouth())\n return moves", "title": "" }, { "docid": "fcc4770a68ef73b238adcf6f67e2f993", "score": "0.5764538", "text": "def adjacent_moves(self):\n moves = []\n if world.tile_exists(self.x + 1, self.y):\n moves.append(actions.MoveEast())\n if world.tile_exists(self.x - 1, self.y):\n moves.append(actions.MoveWest())\n if world.tile_exists(self.x, self.y - 1):\n moves.append(actions.MoveNorth())\n if world.tile_exists(self.x, self.y + 1):\n moves.append(actions.MoveSouth())\n return moves", "title": "" }, { "docid": "c0df36580a50b9a04b9b2e47bbf0f528", "score": "0.5747414", "text": "def nextMoves(self):\n \n movable = []\n # get row and column of the empty piece\n r, c = self.findval(self.mat8,0)\n \n # find which pieces can move there\n if c > 0:\n movable.append((r, c - 1))\n if r > 0:\n movable.append((r - 1, c))\n if c < 2:\n movable.append((r, c + 1))\n if r < 2:\n movable.append((r + 1, c))\n\n return movable", "title": "" }, { "docid": "8b7c6a310bf7091dfae12d0f753aad9f", "score": "0.5746363", "text": "def __legal_moves(self, smoves, kloc):\n moves = []\n nloc = (kloc[0] + 2, kloc[1] - 1)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] + 2, kloc[1] + 1)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] + 1, kloc[1] + 2)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] - 1, kloc[1] + 2)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] - 2, kloc[1] + 1)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] - 2, kloc[1] - 1)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] - 1, kloc[1] - 2)\n if nloc in smoves: moves.append(nloc)\n nloc = (kloc[0] + 1, kloc[1] - 2)\n if nloc in smoves: moves.append(nloc)\n return moves", "title": "" }, { "docid": "0911261dd3e9ee58397303cb06ee109c", "score": "0.5745605", "text": "def get_possible_moves(self) -> list:\n raise NotImplementedError", "title": "" }, { "docid": "c3377ae04da788e8cf75c92b400625b6", "score": "0.57395506", "text": "def computer_moves(self):\n y = self.__computer.get_next_move()\n self.__table.add_move(y)", "title": "" }, { "docid": "47f4aa1323e87bb7a7081bca3c316f04", "score": "0.57309806", "text": "def addMove(self, move):\n mx = move[0]\n my = move[1]\n\n def getRange(x):\n if x >= 0: \n return range(0,self.boardSize-x) \n return range(abs(x), self.boardSize)\n\n for x in getRange(mx):\n for y in getRange(my):\n self.link((x,y), (x+mx, y+my))", "title": "" }, { "docid": "fca584595f7feff261b42fdc9d1bb6bc", "score": "0.5707442", "text": "def calculate_possible_moves(self):\n self.possible_moves.clear()\n potential_moves = []\n potential_moves.append([self.position[0]+1, self.position[1]+1])\n potential_moves.append([self.position[0]+1, self.position[1]-1])\n potential_moves.append([self.position[0]+1, self.position[1]])\n potential_moves.append([self.position[0]-1, self.position[1]+1])\n potential_moves.append([self.position[0]-1, self.position[1]-1])\n potential_moves.append([self.position[0]-1, self.position[1]])\n potential_moves.append([self.position[0], self.position[1]+1])\n potential_moves.append([self.position[0], self.position[1]-1])\n for move in potential_moves:\n if move[0] <= 7 and move [0] >= 0 and move[1] <= 7 and move[1] >= 0:\n self.possible_moves.append(move)", "title": "" }, { "docid": "3f8530bb67c2fbba4ef486143cd12c53", "score": "0.56823015", "text": "def list_moves(self):\n\n moves = []\n\n # Neighboring squares without a checker are possible moves\n #for move_square, jump_square in self.get_move_squares():\n for move_squares in self.get_move_squares():\n if move_squares:\n move_square, jump_square = move_squares\n if move_square and self.get_checker(move_square) == None:\n moves.append((self.position, move_square))\n\n logger.debug(u'list_moves(): moves[]={}'.format(moves))\n return moves", "title": "" }, { "docid": "4ea07f2ecbd43b1edfb7d1f8bdabc3fc", "score": "0.5658222", "text": "def turn_manager(self):\n\n\t\tif self.jumpable:\n\t\t\tif self.check_jump_move() == 1:\n\t\t\t\tself.canvas.itemconfig(self.piece_squares, outline = 'white', width = 1)\n\n\t\t#emptyness check if jump check is good\n\t\t\telif not self.check_jump_move():\n\t\t\t\tif self.square_empty():\n\t\t\t\t\tself.jumped = 1\n\t\t\t\t\tself.do_move() \n\n\t\t\t\telse:\n\t\t\t\t\tself.canvas.itemconfig(self.piece_squares, outline = 'white', width = 1)\n\t\telse:\n\t\t#no jump\n\t\t\tself.checks_move()", "title": "" }, { "docid": "12dec89cd3ae85ed796a98ddbad6ea2d", "score": "0.56568414", "text": "def move(self):\n self.get_moves()\n direction = self.get_best_direction()\n self.trail.append(direction)\n prev_state = self.state\n self.state = tuple(map(operator.add,self.state,self.directions[direction]))\n ev = {direction: self.get_expected_value(self.space.grid[self.state])}\n self.move_options[prev_state].update(ev)", "title": "" }, { "docid": "db8bf7ffa512fbc01cc53d7b739d0425", "score": "0.5645012", "text": "def get_all_moves(self, pos):\n vectors = [(1, 2), (-1, 2), (1, -2), (-1, -2),\n (2, 1), (-2, 1), (2, -1), (-2, -1)]\n return self._get_all_moves_jumpers(pos, vectors)", "title": "" }, { "docid": "bafc3914ee094fd0af05b7c68e46c851", "score": "0.56442994", "text": "def get_all_moves(self, pos):\n vectors = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (-1, 1), (1, -1), (-1, -1)]\n return self._get_all_moves_jumpers(pos, vectors)", "title": "" }, { "docid": "43ba589ac49ca810f52a77fa0819c1c1", "score": "0.56230974", "text": "def make_move(self, curr_state, chosen_move, reward):\n self.game_records.append([curr_state, chosen_move, reward])", "title": "" }, { "docid": "ed057c8dbf49a696df6cfe1a207040dc", "score": "0.56036836", "text": "def get_moves(self):\n moves = []\n # get the position of the empty tile\n empty_r, empty_c = self.get_pos('0')\n\n # calculate states for available adjacent positions\n\n # Make sure that the current row is not the first row\n if empty_r > 0:\n # Copy the original state to create a new state\n new_state = copy.deepcopy(self.state)\n # Replace the position of the empty tile with the one below it\n new_state[empty_r][empty_c], new_state[empty_r - 1][empty_c] = new_state[empty_r - 1][empty_c], '0'\n # Create a new Board with the new state\n board_move_down = Board(new_state, self.goal_state)\n # Append the object with the action and state to the list\n moves.append({\"action\": \"U\", \"board\": board_move_down})\n\n # Make sure the current column is not the first column\n if empty_c > 0:\n # Copy the original state to create a new state\n new_state = copy.deepcopy(self.state)\n # Replace the position of the empty tile with the one to the left\n new_state[empty_r][empty_c], new_state[empty_r][empty_c - 1] = new_state[empty_r][empty_c - 1], '0'\n # Create a new Board with the new state\n board_move_right = Board(new_state, self.goal_state)\n # APpend the object with the action and state to the list\n moves.append({\"action\": \"L\", \"board\": board_move_right})\n\n # Make cure the current column is not the last column\n if empty_c < len(self.state[0]) - 1:\n # Copy the original state to create a new state\n new_state = copy.deepcopy(self.state)\n # Replace the position of the empty tile with the one to the right\n new_state[empty_r][empty_c], new_state[empty_r][empty_c + 1] = new_state[empty_r][empty_c + 1 ], '0'\n # Create a new Board with the new state\n board_move_left = Board(new_state, self.goal_state)\n # Append the object with the action and state to the list\n moves.append({\"action\": \"R\", \"board\": board_move_left})\n\n # Make sure the current row is now the last row\n if empty_r < len(self.state) - 1:\n # Copy the original state to create a new state\n new_state = copy.deepcopy(self.state)\n # Replace the position of the empty tile with the one above\n new_state[empty_r][empty_c], new_state[empty_r + 1][empty_c] = new_state[empty_r + 1][empty_c], '0'\n # Create a new board with the new state\n board_move_up = Board(new_state, self.goal_state)\n # Append the object with the action and state to the list\n moves.append({\"action\": \"D\", \"board\": board_move_up})\n\n return moves", "title": "" }, { "docid": "c2a3a36070801d7ba11a6c201bde044d", "score": "0.5582965", "text": "def move(self, new_positions):\n pass", "title": "" }, { "docid": "523f1ff0591433849e33f357cb7840a0", "score": "0.5582192", "text": "def possible_moves(self):\n possible_targets = []\n ## The bishop like movements.\n b1 = self.y - self.x\n b2 = self.y + self.x\n x_y_possible_values=[0,1,2,3,4,5,6,7]\n # for each x compute the correspondant y value that are in the lines of the bishop's movement.\n # check they are integers among 0 and 7.\n for x_value in x_y_possible_values:\n y1 = x_value + b1\n y2 = -x_value + b2\n if y1 in x_y_possible_values:\n possible_targets.append((x_value, 7 - y1))\n if y2 in x_y_possible_values:\n possible_targets.append((x_value, 7 - y2))\n ## add the rock like movement.\n for value in range(8):\n # vertical movement.\n if value != self.y:\n possible_targets.append((self.x, value))\n # horizontal movement.\n if value != self.x:\n possible_targets.append((value, self.y))\n # The bishop line have a cell in common. so let's delete the duplicates in the output.\n return list(dict.fromkeys(possible_targets))", "title": "" }, { "docid": "8ab884768cdb7bc10a7b4d5e9f35d850", "score": "0.5520593", "text": "def jumped(self, jumps=1): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "15b8bba32924ebcff2d248a30ce3a5cc", "score": "0.5499119", "text": "def move(self, cell):\n cell.new_population[1].append(self)", "title": "" }, { "docid": "0b9de455375f44969a80c10c9b315e62", "score": "0.5490929", "text": "def request_allowed_moves(self):\n self.emit('allowed_moves')", "title": "" }, { "docid": "7cbd96a6edba1b329fa2aa6e051c181a", "score": "0.5434331", "text": "def move(self, cell):\n cell.new_population[0].append(self)", "title": "" }, { "docid": "21f3f904ad1855c6ce7ebb201f2aa4c8", "score": "0.5431746", "text": "def _generate_moves(self, moves):\n changed_positions = moves_to_positions(moves)\n\n if self._num_moves + len(moves) < MAX_CARDS:\n return self._generate_add_moves(changed_positions)\n return self._generate_recycle_moves(moves, changed_positions)", "title": "" }, { "docid": "96ed2d312acf63bc9b30aea5b027d6d5", "score": "0.54261875", "text": "def remember_actions(self, board_state, move):\n \n self.states.append(self.get_hashable_state((board_state)))\n self.moves.append(move)", "title": "" }, { "docid": "5f8648291789a1d074977278ea475d92", "score": "0.54240716", "text": "def availableMoves(self):\n moves = []\n for i in range(0, len(self.board)):\n if self.board[i] == \" \":\n moves.append(i)\n return moves", "title": "" }, { "docid": "5f615200682dc63f5b9a2e4ea0191419", "score": "0.5415462", "text": "def move(self, check, delta_x, delta_y, arg):\n # Check for moving multiple times.\n if arg.isdigit():\n times = int(arg)\n else:\n times = 1\n for movement in range(times):\n # Check for a valid move\n if check not in self.current:\n self.ow()\n break\n # Update the player's postion.\n self.x += delta_x\n self.y += delta_y\n self.current = self.map[self.y][self.x]\n print('moving...')\n # Check for solving the maze.\n if (self.x, self.y) == self.end:\n print('You made it out of the maze!')\n return True\n # Show the next location if not solved.\n self.show_directions()", "title": "" }, { "docid": "13a5a1d9bed22664ce56c8686c1b5ad7", "score": "0.5396579", "text": "def player_moves(self, y):\n self.__table.add_move(y)", "title": "" }, { "docid": "1a8f1c52337fed39a07b75f3a68aa63b", "score": "0.5382325", "text": "def get_all_possible_moves(self):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append([m[0], m[1]]) # Add the move and the state to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append('p')\n return move_list", "title": "" }, { "docid": "c2f04158c14764c51b7dcf8151ddd41e", "score": "0.538123", "text": "def legal_moves(self, state):\n pass", "title": "" }, { "docid": "f0d1a7aa0ce23e5aa98aa3375cc6a2b6", "score": "0.5377578", "text": "def getkingjumpmoves(board, tile, rc=3):\n iboard = board[:]\n rlist = []\n if board[tile] == REDKING:\n filtertype = (BLACK, BLACKKING)\n elif board[tile] == BLACKKING:\n filtertype = (RED, REDKING)\n else:\n # tile is not a king\n return []\n\n if rc == 0: # limit recursion\n return []\n\n rlist += getdownjumpmoves(iboard, tile, filtertype, rc=1)\n rlist += getupjumpmoves(iboard, tile, filtertype, rc=1)\n\n for i, ritem in enumerate(rlist):\n glist = getkingjumpmoves(makemove(iboard, ritem), ritem[1], rc - 1)\n for gst, get, gjt in glist:\n rlist[i] = (ritem[0], get, ritem[2] + gjt)\n\n return rlist", "title": "" }, { "docid": "3b60696bf2e535fcb0cf41abd647ad67", "score": "0.53616613", "text": "def getAllCurrentAvailableMoves(self, verbose=False):\n pawn = self.player[0]\n moves = []\n if not self.isDone():\n\n\n for row in self.board:\n for col in self.board[row]:\n if self.board[row][col][1] == pawn:\n if verbose: print(f'Pawn on {(row, col)} can move to:')\n dests = self.getAvailableMoves((row, col))\n if verbose:print(dests)\n moves += [((row, col), dest) for dest in dests]\n return moves", "title": "" }, { "docid": "4d51f90720df422944723c54c32d7e87", "score": "0.53596205", "text": "def set_valid_moves(self):\n pass", "title": "" }, { "docid": "34f67dbdac90208931c7df403ce4f34e", "score": "0.5356568", "text": "def can_move(self, coord):\n [x, y] = coord\n move_list = [None] * 6\n # check if a move or jump can be made to the east\n if self.is_empty_space((x + 1, y)):\n move_list[DIRECTION_E] = (x + 1, y)\n elif self.is_empty_space((x + 2, y)):\n move_list[DIRECTION_E] = (x + 2, y)\n # check if a move or jump can be made to the southeast\n if self.is_empty_space((x + 1, y - 1)):\n move_list[DIRECTION_NE] = (x + 1, y - 1)\n elif self.is_empty_space((x + 2, y - 2)):\n move_list[DIRECTION_NE] = (x + 2, y - 2)\n # check if a move or jump can be made to the southwest\n if self.is_empty_space((x, y + 1)):\n move_list[DIRECTION_SE] = (x, y + 1)\n elif self.is_empty_space((x, y + 2)):\n move_list[DIRECTION_SE] = (x, y + 2)\n # check if a move or jump can be made to the west\n if self.is_empty_space((x - 1, y)):\n move_list[DIRECTION_W] = (x - 1, y)\n elif self.is_empty_space((x - 2, y)):\n move_list[DIRECTION_W] = (x - 2, y)\n # check if a move or jump can be made to the northwest\n if self.is_empty_space((x - 1, y + 1)):\n move_list[DIRECTION_SW] = (x - 1, y + 1)\n elif self.is_empty_space((x - 2, y + 2)):\n move_list[DIRECTION_SW] = (x - 2, y + 2)\n # check if a move or jump can be made to the northeast\n if self.is_empty_space((x, y - 1)):\n move_list[DIRECTION_NW] = (x, y - 1)\n elif self.is_empty_space((x, y - 2)):\n move_list[DIRECTION_NW] = (x, y - 2)\n return move_list", "title": "" }, { "docid": "0e5c16bb4891e6c79a628bfc139c81fd", "score": "0.5344572", "text": "def possible_moves(self):\n possible_targets = []\n for value in range(8):\n # vertical movement.\n if value != self.y:\n possible_targets.append((self.x, value))\n # horizontal movement.\n if value != self.x:\n possible_targets.append((value, self.y))\n print(\" possible_targets : \",possible_targets)\n return list(dict.fromkeys(possible_targets))", "title": "" }, { "docid": "166d24786b418bf863b9d28fed043843", "score": "0.53428304", "text": "def getpossiblemoves(board, tile):\n\n # return list\n rlist = []\n\n # get jump moves\n # red\n if board[tile] == RED:\n jlist = getupjumpmoves(board, tile)\n if jlist:\n return jlist # tile has to jump\n # black\n if board[tile] == BLACK:\n jlist = getdownjumpmoves(board, tile)\n if jlist:\n return jlist # tile has to jump\n # red king\n if board[tile] == REDKING:\n jlist = getkingjumpmoves(board, tile)\n if jlist:\n return jlist # tile has to jump\n # black king\n if board[tile] == BLACKKING:\n jlist = getkingjumpmoves(board, tile)\n if jlist:\n return jlist # tile has to jump\n\n # get non-jump moves\n # red is going up\n if board[tile] == RED or board[tile] == BLACKKING or board[tile] == REDKING:\n rlist += getupmoves(board, tile)\n if board[tile] == BLACK or board[tile] == REDKING or board[tile] == BLACKKING:\n rlist += getdownmoves(board, tile)\n\n return rlist", "title": "" }, { "docid": "72049bd20586f9fc345c1292fddc9d51", "score": "0.53391165", "text": "def execute_move(self, move):\n b = move.beginning\n e = move.end\n self.board[e[0]][e[1]] = self.board[b[0]][b[1]] # new territory, conquered!!\n self.board[b[0]][b[1]] = 1 # deserted land left in the wake of battle\n self.board[e[0]][e[1]].location = e # update the piece instance's location attribute\n\n if isinstance(move, moves.Jump):\n for piece in move.eliminated: # removed jumped pieces\n row, col = piece.location\n self.board[row][col] = 1\n self.draw_counter = 0\n else:\n self.draw_counter += 1\n\n self._check_king(self.board[e[0]][e[1]])", "title": "" }, { "docid": "125a28385e05c2599bbad5494a721ff6", "score": "0.53381747", "text": "def jumpUp():\n global OpenStates, newStates\n position = 0\n \n while position < 7:\n value = ActualState[position]\n # if is down arrow and there is a space below and the space is empty so can move\n if (value > 0 and position - 2 >= 0 and ActualState[position-2] == 0 and ActualState[position-1] == -1):\n #Calc new state\n temporalState = ActualState[:]\n temporalState[position] = 0\n temporalState[position-2] = 1\n temporalState[7] = step\n temporalState[8] = JUMP_UP\n temporalState[9] = position\n\n #Check if new state is not in closed states\n exists = False\n for x in ClosedStates:\n if x == temporalState:\n exists = True\n #If state was not in closed states add it to open states\n if exists == False:\n OpenStates.append(temporalState)\n newStates = newStates + 1\n position = position + 1 \n return newStates", "title": "" }, { "docid": "6704f468b0a02ea17e127a67e890bd5a", "score": "0.5335666", "text": "def enumerate_moves(self):\n raise NotImplementedError()", "title": "" }, { "docid": "720cc57342763eff437ae2703eba68c9", "score": "0.5335276", "text": "def _add_dummy_start_end_to_moves(\n self, move_list: List[Move[AxisKey]]\n ) -> List[Move[AxisKey]]:\n start_move = Move.build_dummy(move_list[0].unit_vector.keys())\n end_move = Move.build_dummy(move_list[0].unit_vector.keys())\n return [start_move] + move_list + [end_move]", "title": "" }, { "docid": "63495fce12107f5d78a865adb07e7162", "score": "0.532062", "text": "def possible_moves(self):\n possible_targets = []\n # bihsop's movement are according two lines. y = ax + b. with a = 1 for the first one and a = -1 for the second.\n # the value of b depends on the bishop initial position.\n # compute b1 and b2\n b1 = self.y - self.x\n b2 = self.y + self.x\n x_y_possible_values=[0, 1, 2, 3, 4, 5, 6, 7]\n # for each x compute the correspondant y value that are in the lines of the bishop's movement.\n # check they are integers among 0 and 7.\n for x_value in x_y_possible_values:\n y1 = x_value + b1\n y2 = -x_value + b2\n if y1 in x_y_possible_values:\n possible_targets.append((x_value, 7-y1))\n if y2 in x_y_possible_values:\n possible_targets.append((x_value, 7-y2))\n return list(dict.fromkeys(possible_targets))", "title": "" }, { "docid": "0f8b64850c2a50e9001c20a1124c09a1", "score": "0.5311795", "text": "def possible_moves(self):\n possible_targets = []\n x_y_possible_values=[0, 1, 2, 3, 4, 5, 6, 7]\n if self.x + 1 in x_y_possible_values:\n possible_targets.append((self.x + 1, self.y))\n if self.y+1 in x_y_possible_values:\n possible_targets.append((self.x+1, self.y+1))\n if self.y - 1 in x_y_possible_values:\n possible_targets.append((self.x + 1, self.y - 1))\n\n if self.x - 1 in x_y_possible_values:\n possible_targets.append((self.x - 1, self.y))\n if self.y+1 in x_y_possible_values:\n possible_targets.append((self.x - 1, self.y+1))\n if self.y - 1 in x_y_possible_values:\n possible_targets.append((self.x - 1, self.y - 1))\n\n if self.y + 1 in x_y_possible_values:\n possible_targets.append((self.x, self.y + 1))\n if self.y - 1 in x_y_possible_values:\n possible_targets.append((self.x, self.y - 1))\n\n return list(dict.fromkeys(possible_targets))", "title": "" }, { "docid": "f0e4ddf775f1003e046208e1dcccbaa8", "score": "0.53079265", "text": "def calculate_moves(self, position, already_coords=False):\n row, col = self._convert_checker_coord(position) if already_coords == False else position\n piece = self.board[row][col]\n moves = piece.calculate_jump_moves(self.board) # If there are jump moves, we must do them!\n if not moves:\n return piece.calculate_simple_moves(self.board) # if no jump moves, then return simple moves\n return moves", "title": "" }, { "docid": "1703d0495a40e793ca9981d2544abdcd", "score": "0.53055096", "text": "def select_Moves(self, _state, logger=None):\n # INIT \n #mCntr = Counter(f=0, k=0, s=0)\n stop = Counter(i=1)\n _top = _state.seeHeads()\n if logger:\n logger.debug('Beg:{0}:{1}'.format( self.tag, _top ))\n \n _has_mov = True # for sure one pass\n # MAIN\n while _has_mov:\n stop['i'] += 1 #TESTING Cntr\n while _state.fndMoves(_state.partial_tbl_HeadsL): #do a whole seq if possible.\n self.mCntr['f'] += 1\n movsL = _state.movesD['fnd']\n if logger:\n logger.debug(\"--fndMove.{} now sees {} fndMoves:{}...\".format(self.tag, len(movsL), movsL[:2]))\n _state.move(movsL[0], logger) # arbitary use [0]\n continue\n \n if _state.sibMoves(_state.partial_tbl_HeadsL): # do 1, then look other movs\n self.mCntr['s'] += 1\n movsL = _state.movesD['sib']\n if logger:\n logger.debug(\"--sibMove.{} now sees {} sibMoves:{}...\".format(self.tag, len(movsL), movsL[:1]))\n _state.move(movsL[0], logger)\n #continue #bypasses kngMove\n \n if _state.kngMoves(_state.partial_tbl_HeadsL): ###one for sure; maybe branch and play all Hands; \n self.mCntr['k'] += 1 \n movsL = _state.movesD['kng']\n if logger:\n msg = \"==== kngMove.{0} now sees {1} kngMoves:\".format( self.tag, len(movsL))\n for m in movsL:\n msg += \"\\n.................................{}\".format(m)\n logger.info(msg)\n \n # TESTING _state.move(movsL[0], logger)\n self.branch_kngMove(_state, movsL, logger)\n break \n #end while _has_mov: loop\n \n # EXIT \n if _state.isWin or _state.isStymied:\n if logger:\n _top = _state.seeHeads() \n logger.debug('End:{0}:{1}'.format( self.tag, _top)) \n break # the while _has_mov: loop.\n #TESTING EXIT\n stopMax = 40\n if stop['i'] >= stopMax: # TESTING RESTRAINT ONLY\n if logger:\n logger.error('\\nEXCEEDED STOP COUNT OF {0}\\n'.format(stopMax))\n break\n # FOR DEBUG\n pass \n _has_mov = _state.find_Moves()", "title": "" }, { "docid": "2c2fbff2451ef5013a8a308e0e7150ec", "score": "0.5304031", "text": "def _add_move(self,indices,player):\n if len(indices)==2:\n if indices[0]==indices[1]:\n indices = [indices[0]]\n num=len(indices)\n caught_clashes = False #turns true if all moves are safe clashes\n for existing_move in self.moves:\n for index in indices:\n if index in existing_move.indices:\n if len(existing_move.indices)==1:\n return 'overfull'\n #This move will ALWAYS be there, if it can.\n #hence, overfull.\n else:\n #captures any clash\n caught_clashes = True\n if caught_clashes:\n return self._add_clashing_move(indices,player)\n else:\n #Reach this section if there are no clashes at all\n if num==1:\n self.moves.append(Move(indices,player)) #No control needed\n return 'ok'\n else:\n self.q.size+=2 #indicator qubit, and move qubit\n q1 = self.q[self.q.size-2] #To make this readable...\n q2 = self.q[self.q.size-1]\n self.qc.h(q1) #the last qubit in register.\n self.qc.x(q2)\n self.qc.cx(q1,q2)\n self.moves.append(Move(indices,player,q1,q2))\n return 'ok'", "title": "" }, { "docid": "578d0ef00985267318a7b55d01ac31c1", "score": "0.52939963", "text": "def available_actions(self):\n moves = self.adjacent_moves()\n moves.append(actions.ViewInventory())\n \n return moves", "title": "" }, { "docid": "f2b68b4331bbbd70c612ce636ae41f62", "score": "0.52890354", "text": "def possible_moves(self):\n # From the provided example car_config.json file, the return value could be\n # [('O','d',\"some description\"),('R','r',\"some description\"),('O','u',\"some description\")]\n\n possible_moves_lst = []\n VERTICAL = 0\n HORIZONTAL = 1\n for car in self.car_lst:\n # print(car.get_name())\n car_first_coo = car.car_coordinates()[0]\n car_second_coo = car.car_coordinates()[1]\n car_name = car.get_name()\n if car_first_coo[0] == car_second_coo[0]:\n car_orientation = HORIZONTAL\n else:\n car_orientation = VERTICAL\n req_move_left = car.movement_requirements('l')[0]\n req_move_right = car.movement_requirements('r')[0]\n req_move_up = car.movement_requirements('u')[0]\n req_move_down = car.movement_requirements('d')[0]\n if car_orientation == HORIZONTAL:\n if req_move_right[1] < len(self.matrix[req_move_right[0]]):\n if self.cell_content(req_move_right) is None:\n possible_moves_lst.append((car_name, 'r', \"one step right\"))\n if req_move_left[1] >= 0:\n if self.cell_content(req_move_left) is None:\n possible_moves_lst.append((car_name, 'l', \"one step left\"))\n else:\n if req_move_down[0] < len(self.matrix):\n if self.cell_content(req_move_down) is None:\n possible_moves_lst.append((car_name, 'd', \"one step down\"))\n if req_move_up[0] >= 0:\n if self.cell_content(req_move_up) is None:\n possible_moves_lst.append((car_name, 'u', \"one step up\"))\n return possible_moves_lst", "title": "" }, { "docid": "872f758e93c87efde4194cbb90143ebb", "score": "0.5289001", "text": "def generate_possible_moves(self):\n directions = [Tile.Move.North,\n Tile.Move.East,\n Tile.Move.South,\n Tile.Move.West]\n actions = []\n for name in self._tiles:\n tile = self._tiles[name][0]\n for d in directions:\n if self.movable(name, d):\n actions.append((name, d))\n return actions", "title": "" }, { "docid": "acf4cf3893b9cb5636477c13fa8c5123", "score": "0.5273795", "text": "def get_possible_moves(self):\n return self.available", "title": "" }, { "docid": "1755dae0eae0dbc6b3133c1cc599b27e", "score": "0.52603155", "text": "def _generate_add_moves(self, changed_positions):\n for x in range(MAX_X):\n y = self._max_height_for_x(x, changed_positions)\n\n if y < MAX_Y - 1:\n yield from (Move(0, placement, x, y) for placement in [2, 4, 6, 8])\n\n if x < MAX_X - 1 and MAX_Y > y == self._max_height_for_x(x + 1, changed_positions):\n yield from (Move(0, placement, x, y) for placement in [1, 3, 5, 7])", "title": "" }, { "docid": "b9ac14e3c9045188dad747240adea4b7", "score": "0.5252167", "text": "def jump(self):\n if self.is_jump:\n return\n self.action = 'jump'\n self.is_jump = True\n self.dy = 8", "title": "" }, { "docid": "9e9fb8c863d6dcb79e36334591d8678f", "score": "0.52402943", "text": "def king(self):\n self.__is_king = True\n\n # expand the possible moves\n z = [1, -1]\n self.__possible_moves = [(x, y) for x in z for y in z]", "title": "" }, { "docid": "41417d6f278ac02691e1fce58d4b27f5", "score": "0.5236288", "text": "def init_moves(self, no_discs, no_rods):\n k = 0\n for i in xrange(no_discs):\n for j in xrange(no_rods):\n self.moves[k] = (i, j)\n k = k + 1", "title": "" }, { "docid": "a052b1ed9eb97c2d0166389703adb63f", "score": "0.52261287", "text": "def create_available_locs(self, loc):\n self.add_normal_moves(loc)\n self.add_jump_moves(loc)\n self.post(CreateAvailableLocs(self.available_locations), Conf.VIEW)", "title": "" }, { "docid": "a052b1ed9eb97c2d0166389703adb63f", "score": "0.52261287", "text": "def create_available_locs(self, loc):\n self.add_normal_moves(loc)\n self.add_jump_moves(loc)\n self.post(CreateAvailableLocs(self.available_locations), Conf.VIEW)", "title": "" }, { "docid": "51dfca08547e60711b44751576069548", "score": "0.5221337", "text": "def board_with_move(player_num, move, board):\n board[move].append(PLAYERS[player_num])\n return board", "title": "" }, { "docid": "60dcadf4c79058f0ab865955044018ed", "score": "0.5218414", "text": "def possible_moves(self):\n final_lst = []\n for car in self.car_list:\n if car.orientation == 0:\n if 0 <= car.movement_requirements('u')[0][0] <= 6 and \\\n 0 <= car.movement_requirements('u')[0][1] <= 6 \\\n and self.cell_content(car.movement_requirements('u')[0])==None:\n final_lst.append((car.get_name(), 'u', 'can go up'))\n if 0 <= car.movement_requirements('d')[0][0] <= 6 and \\\n 0 <= car.movement_requirements('d')[0][1] <= 6\\\n and self.cell_content(car.movement_requirements('d')[0])==None:\n final_lst.append((car.get_name(), 'd', 'can go down'))\n if car.orientation == 1:\n if 0 <= car.movement_requirements('r')[0][0] <= 6 and \\\n 0 <= car.movement_requirements('r')[0][1] <= 7 \\\n and self.cell_content(car.movement_requirements('r')[0])==None:\n final_lst.append((car.get_name(), 'r', 'can go right'))\n if 0 <= car.movement_requirements('l')[0][0] <= 6 and \\\n 0 <= car.movement_requirements('l')[0][1] <= 6\\\n and self.cell_content(car.movement_requirements('l')[0])==None:\n final_lst.append((car.get_name(), 'l', 'can go left'))\n return final_lst\n # From the provided example car_config.json file, the return value could be\n # [('O','d',\"some description\"),('R','r',\"some description\"),('O','u',\"some description\")]\n pass", "title": "" }, { "docid": "21cbb93084267576e6d791c1408287d0", "score": "0.5215948", "text": "def move(self, step):\n self.grid.clear_beam_area()\n self.update_front_of_players()\n self.respawn_apples(step)\n self.respawn_player(step)\n for player in self.player_list:\n player.reward = 0\n if not player.is_tagged:\n self.check_next_position(player)\n self.update_grid(player)\n self.collect_apple(player, step)\n self.check_if_using_beam(step)\n self.get_observation()", "title": "" }, { "docid": "5e35c330ffd9ab5ddc26bcdb00e07c60", "score": "0.51917976", "text": "def get_moves(self):\n if self.manual:\n return self.get_manual_moves()\n elif self.random:\n return self.strategy.get_random_move(self.map)\n elif self.bourrin:\n return self.strategy.get_bourrin_move(self.map)\n\n return self.strategy.get_next_move(self.map)", "title": "" }, { "docid": "d0c2e8c96af44424bc144083e2cf16c5", "score": "0.51899594", "text": "def jump(self):\r\n\r\n # move down a bit and see if there is a platform below us.\r\n # Move down 2 pixels because it doesn't work well if we only move down 1\r\n # when working with a platform moving down.\r\n self.rect.y += 2\r\n platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\r\n self.rect.y -= 2\r\n\r\n # If it is ok to jump, set our speed upwards\r\n if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:\r\n self.change_y = -9\r\n self.onAir = True", "title": "" }, { "docid": "6be9fd31c991ee454003c6decb2ea84c", "score": "0.5180287", "text": "def possible_moves(self):\n possible_targets = []\n if self.black_or_white == \"white\":\n if self.is_first_move:\n possible_targets.append((self.x, self.y + 1))\n possible_targets.append((self.x, self.y + 2))\n # Capture to left or right\n # if not the pawn in the far lest of Grid, the pawn can capture.\n if self.x != 0:\n possible_targets.append((self.x - 1, self.y + 1))\n # if not the pawn in the far right of Grid, the pawn can capture.\n if self.x != 7:\n possible_targets.append((self.x + 1, self.y + 1))\n\n # Not the first move.\n else:\n possible_targets.append((self.x, self.y + 1))\n # Capture to left or right\n # if not the pawn in the far lest of Grid, the pawn can capture.\n if self.x != 0:\n possible_targets.append((self.x - 1, self.y + 1))\n # if not the pawn in the far right of Grid, the pawn can capture.\n if self.x != 7:\n possible_targets.append((self.x + 1, self.y + 1))\n\n\n # when it's the black pawn.\n else:\n if self.is_first_move:\n possible_targets.append((self.x, self.y - 1))\n possible_targets.append((self.x, self.y - 2))\n # Capture to left or right\n # if not the pawn in the far lest of Grid, the pawn can capture.\n if self.x != 0:\n possible_targets.append((self.x - 1, self.y - 1))\n # if not the pawn in the far right of Grid, the pawn can capture.\n if self.x != 7:\n possible_targets.append((self.x + 1, self.y - 1))\n # Not the first move\n else:\n possible_targets.append((self.x, self.y - 1))\n # Capture to left or right\n # if not the pawn in the far lest of Grid, the pawn can capture.\n if self.x != 0:\n possible_targets.append((self.x - 1, self.y - 1))\n # if not the pawn in the far right of Grid, the pawn can capture.\n if self.x != 7:\n possible_targets.append((self.x + 1, self.y - 1))\n\n return possible_targets", "title": "" }, { "docid": "cd549b720b12a0649e07facb06b4f2aa", "score": "0.51787937", "text": "def GetMoves(self):\n if self.game.game_ended or self.game.players[0].hero.health <= 0 or self.game.players[1].hero.health <= 0:\n return []\n valid_moves = [] # Move format is [string, attacker/card, target, attacker/card index, target index, summoning index]\n\n if not self.game.pre_game_run and len(self.game.current_player.deck.cards) == 30 and len(self.game.other_player.deck.cards) == 30:\n valid_moves.append([MOVE.PRE_GAME])\n elif self.game.pre_game_run and len(self.game.current_player.deck.cards) == 30 and len(self.game.other_player.deck.cards) == 30 and self.game.current_player.max_mana == 0 and self.game._turns_passed == 0:\n valid_moves.append([MOVE.START_TURN])\n elif self.game.current_player.hero.character_class == hearthbreaker.constants.CHARACTER_CLASS.ALL:\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.DRUID])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.HUNTER])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.MAGE])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.PALADIN])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.PRIEST])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.ROGUE])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.SHAMAN])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.WARLOCK])\n valid_moves.append([MOVE.PICK_CLASS, hearthbreaker.constants.CHARACTER_CLASS.WARRIOR])\n elif len(self.game.current_player.deck.cards) < 30:\n if self.game.current_player.name == \"one\":\n owned_cards = []\n ### BASIC ###\n # Druid\n owned_cards.extend([Innervate(), Moonfire(), Claw(), MarkOfTheWild(), WildGrowth(), HealingTouch(),\n SavageRoar(), Swipe(), Starfire(), IronbarkProtector()])\n # Hunter\n owned_cards.extend([HuntersMark(), ArcaneShot(), Tracking(), TimberWolf(), AnimalCompanion(), KillCommand(),\n MultiShot(), Houndmaster(), StarvingBuzzard(), TundraRhino()])\n # Mage\n owned_cards.extend([ArcaneMissiles(), MirrorImage(), ArcaneExplosion(), Frostbolt(), ArcaneIntellect(),\n FrostNova(), Fireball(), Polymorph(), WaterElemental(), Flamestrike()])\n # Paladin\n owned_cards.extend([LightsJustice(), BlessingOfMight(), HandOfProtection(), Humility(), HolyLight(),\n TruesilverChampion(), BlessingOfKings(), Consecration(), HammerOfWrath(),\n GuardianOfKings()])\n # Priest\n owned_cards.extend([HolySmite(), MindVision(), PowerWordShield(), NorthshireCleric(), DivineSpirit(),\n MindBlast(), ShadowWordPain(), ShadowWordDeath(), HolyNova(), MindControl()])\n # Rogue\n owned_cards.extend([Backstab(), DeadlyPoison(), SinisterStrike(), Sap(), Shiv(), FanOfKnives(),\n AssassinsBlade(), Assassinate(), Vanish(), Sprint()])\n # Shaman\n owned_cards.extend([AncestralHealing(), TotemicMight(), FrostShock(), RockbiterWeapon(),\n hearthbreaker.cards.spells.shaman.Windfury(), FlametongueTotem(), Hex(), Windspeaker(),\n Bloodlust(), FireElemental()])\n # Warlock\n owned_cards.extend([SacrificialPact(), Corruption(), MortalCoil(), Soulfire(), Voidwalker(), Succubus(),\n DrainLife(), ShadowBolt(), Hellfire(), DreadInfernal()])\n # Warrior\n owned_cards.extend([Execute(), Whirlwind(), FieryWarAxe(), Cleave(), HeroicStrike(),\n hearthbreaker.cards.spells.warrior.Charge(), ShieldBlock(), WarsongCommander(),\n KorkronElite(), ArcaniteReaper()])\n # Neutral\n owned_cards.extend([ElvenArcher(), GoldshireFootman(), GrimscaleOracle(), MurlocRaider(), StonetuskBoar(),\n VoodooDoctor(), AcidicSwampOoze(), BloodfenRaptor(), BluegillWarrior(),\n FrostwolfGrunt(), KoboldGeomancer(), MurlocTidehunter(), NoviceEngineer(),\n RiverCrocolisk(), DalaranMage(), IronforgeRifleman(), IronfurGrizzly(), MagmaRager(),\n RaidLeader(), RazorfenHunter(), ShatteredSunCleric(), SilverbackPatriarch(),\n Wolfrider(), ChillwindYeti(), DragonlingMechanic(), GnomishInventor(), OasisSnapjaw(),\n OgreMagi(), SenjinShieldmasta(), StormwindKnight(), BootyBayBodyguard(),\n DarkscaleHealer(), FrostwolfWarlord(), GurubashiBerserker(), Nightblade(),\n StormpikeCommando(), Archmage(), BoulderfistOgre(), LordOfTheArena(),\n RecklessRocketeer(), CoreHound(), StormwindChampion(), WarGolem()])\n \n ### CLASSIC ###\n # Druid\n owned_cards.extend([PowerOfTheWild(), Wrath(), MarkOfNature(), KeeperOfTheGrove(), Starfall(),\n DruidOfTheClaw()])\n # Hunter\n owned_cards.extend([Snipe(), ScavengingHyena(), DeadlyShot(), UnleashTheHounds()])\n # Mage\n owned_cards.extend([IceLance(), ManaWyrm(), SorcerersApprentice(), IceBarrier(), EtherealArcanist(),\n Blizzard(), ArchmageAntonidas()])\n # Paladin\n owned_cards.extend([EyeForAnEye(), NobleSacrifice(), Repentance(), ArgentProtector()])\n # Priest\n owned_cards.extend([CircleOfHealing(), Silence(), InnerFire(), Thoughtsteal(), MassDispel(),\n HolyFire()])\n # Rogue\n owned_cards.extend([ColdBlood(), Conceal(), Betrayal(), Eviscerate()])\n # Shaman\n owned_cards.extend([ForkedLightning(), AncestralSpirit(), FarSight(), FeralSpirit()])\n # Warlock\n owned_cards.extend([PowerOverwhelming(), FlameImp(), Demonfire(), SenseDemons(), SummoningPortal(),\n Doomguard()])\n # Warrior\n owned_cards.extend([InnerRage(), Rampage(), Slam(), Armorsmith(), CruelTaskmaster(), MortalStrike(),\n Brawl()])\n # Neutral\n owned_cards.extend([Wisp(), ArgentSquire(), SouthseaDeckhand(), WorgenInfiltrator(), YoungPriestess(),\n AmaniBerserker(), BloodsailRaider(), DireWolfAlpha(), FaerieDragon(), IronbeakOwl(),\n KnifeJuggler(), LootHoarder(), LorewalkerCho(), MadBomber(), MasterSwordsmith(),\n Demolisher(), HarvestGolem(), ImpMaster(), JunglePanther(), QuestingAdventurer(),\n ScarletCrusader(), TaurenWarrior(), TinkmasterOverspark(), AncientBrewmaster(),\n CultMaster(), DefenderOfArgus(), DreadCorsair(), MogushanWarden(),\n SilvermoonGuardian(), VioletTeacher(), AzureDrake(), FenCreeper(),\n SilverHandKnight(), SpitefulSmith(), StranglethornTiger(), FrostElemental(),\n PriestessOfElune()])\n \n ### PROMO ###\n owned_cards.extend([GelbinMekkatorque()])\n \n ### NAXXRAMAS ###\n # Druid\n owned_cards.extend([PoisonSeeds()])\n # Hunter\n owned_cards.extend([Webspinner()])\n # Mage\n owned_cards.extend([Duplicate()])\n # Paladin\n owned_cards.extend([Avenge()])\n # Priest\n owned_cards.extend([DarkCultist()])\n # Rogue\n owned_cards.extend([AnubarAmbusher()])\n # Shaman\n owned_cards.extend([Reincarnate()])\n # Warlock\n owned_cards.extend([Voidcaller()])\n # Warrior\n owned_cards.extend([DeathsBite()])\n # Neutral\n owned_cards.extend([Undertaker(), ZombieChow(), EchoingOoze(), HauntedCreeper(), MadScientist(),\n NerubarWeblord(), NerubianEgg(), UnstableGhoul(), DancingSwords(), Deathlord(),\n ShadeOfNaxxramas(), StoneskinGargoyle(), BaronRivendare(), WailingSoul(), Feugen(),\n Loatheb(), SludgeBelcher(), SpectralKnight(), Stalagg(), Maexxna(), KelThuzad()])\n \n ### GOBLINS VS GNOMES ##\n # Druid\n owned_cards.extend([AnodizedRoboCub(), DruidOfTheFang()])\n # Hunter\n owned_cards.extend([Glaivezooka(), CobraShot(), KingOfBeasts()])\n # Mage\n owned_cards.extend([Flamecannon(), UnstablePortal(), Snowchugger()])\n # Paladin\n owned_cards.extend([SealOfLight(), ScarletPurifier(), CobaltGuardian()])\n # Priest\n owned_cards.extend([Shrinkmeister(), VelensChosen(), UpgradedRepairBot()])\n # Rogue\n owned_cards.extend([GoblinAutoBarber(), OneeyedCheat(), IronSensei(), TinkersSharpswordOil(),\n OgreNinja()])\n # Shaman\n owned_cards.extend([Crackle(), VitalityTotem(), WhirlingZapomatic(), SiltfinSpiritwalker()])\n # Warlock\n owned_cards.extend([Darkbomb(), MistressOfPain(), Implosion(), FloatingWatcher()])\n # Warrior\n owned_cards.extend([Warbot(), OgreWarmaul(), Shieldmaiden()])\n # Neutral\n owned_cards.extend([ClockworkGnome(), Cogmaster(), AnnoyoTron(), ExplosiveSheep(), GilblinStalker(),\n Mechwarper(), MicroMachine(), Puddlestomper(), ShipsCannon(), StonesplinterTrogg(),\n FlyingMachine(), GnomereganInfantry(), OgreBrute(), SpiderTank(),\n TinkertownTechnician(), ArcaneNullifierX21(), LostTallstrider(), MechanicalYeti(),\n PilotedShredder(), AntiqueHealbot(), MogorTheOgre(), Blingtron3000(),\n ForceTankMAX()])\n\n ### BLACKROCK MOUNTAIN ###\n # Druid\n owned_cards.extend([DruidOfTheFlame(), VolcanicLumberer()])\n # Hunter\n owned_cards.extend([QuickShot(), CoreRager()])\n # Mage\n owned_cards.extend([Flamewaker(), DragonsBreath()])\n # Paladin\n owned_cards.extend([SolemnVigil(), DragonConsort()])\n # Priest\n owned_cards.extend([TwilightWhelp(), Resurrect()])\n # Rogue\n owned_cards.extend([GangUp(), DarkIronSkulker()])\n # Shaman\n owned_cards.extend([LavaShock(), FireguardDestroyer()])\n # Warlock\n owned_cards.extend([Demonwrath(), ImpGangBoss()])\n # Warrior\n owned_cards.extend([Revenge(), AxeFlinger()])\n # Neutral\n owned_cards.extend([DragonEgg(), BlackwingTechnician(), DragonkinSorcerer(), HungryDragon(),\n BlackwingCorruptor(), GrimPatron(), DrakonidCrusher(), EmperorThaurissan(),\n VolcanicDrake(), RendBlackhand(), Chromaggus(), MajordomoExecutus(), Nefarian()])\n\n card_list = filter(lambda c: c.character_class == hearthbreaker.constants.CHARACTER_CLASS.ALL or c.character_class == self.game.current_player.hero.character_class,\n owned_cards)\n else:\n card_list = filter(lambda c: c.character_class == hearthbreaker.constants.CHARACTER_CLASS.ALL or c.character_class == self.game.current_player.hero.character_class,\n get_cards())\n for card in card_list:\n counter = [x for x in self.game.current_player.deck.cards if x.name == card.name]\n \n if len(counter) < 1 or (len(counter) < 2 and card.rarity != hearthbreaker.constants.CARD_RARITY.LEGENDARY):\n valid_moves.append([MOVE.PICK_CARD, card])\n else:\n try:\n for card in self.game.current_player.hand:\n dupe = False\n for i in range(len(valid_moves)):\n if valid_moves[i][1].name == card.name:\n dupe = True\n break\n if not dupe:\n if card.can_use(self.game.current_player, self.game) and isinstance(card, MinionCard):\n # Minion placement is only important if there are cards available that matters on it\n if any(any(card.name == name for name in self.adjacent_cards) for card in self.game.players[0].deck.cards) or any(any(card.name == name for name in self.adjacent_cards) for card in self.game.players[1].deck.cards):\n # Found adjacent card, so try every possible placement\n for i in range(len(self.game.current_player.minions) + 1):\n if card.targetable and card.targets is not None:\n for j in range(len(card.targets)):\n valid_moves.append([MOVE.SUMMON_MINION, card, None, self.game.current_player.hand.index(card), j, i])\n else:\n valid_moves.append([MOVE.SUMMON_MINION, card, None, self.game.current_player.hand.index(card), 0, i])\n else:\n # It doesn't matter where the minion is placed\n if card.targetable and card.targets is not None:\n for i in range(len(card.targets)):\n valid_moves.append([MOVE.SUMMON_MINION, card, None, self.game.current_player.hand.index(card), i, 0])\n else:\n valid_moves.append([MOVE.SUMMON_MINION, card, None, self.game.current_player.hand.index(card), 0, 0])\n elif card.can_use(self.game.current_player, self.game) and isinstance(card, WeaponCard):\n if card.targetable and card.targets is not None:\n for i in range(len(card.targets)):\n valid_moves.append([MOVE.EQUIP_WEAPON, card, None, self.game.current_player.hand.index(card), i])\n else:\n valid_moves.append([MOVE.EQUIP_WEAPON, card, None, self.game.current_player.hand.index(card), 0]) \n elif card.can_use(self.game.current_player, self.game) and isinstance(card, SecretCard):\n valid_moves.append([MOVE.UNTARGETED_SPELL, card, None, self.game.current_player.hand.index(card), 0])\n elif card.can_use(self.game.current_player, self.game) and not card.targetable:\n valid_moves.append([MOVE.UNTARGETED_SPELL, card, None, self.game.current_player.hand.index(card), 0])\n elif card.can_use(self.game.current_player, self.game) and card.targetable:\n for i in range(len(card.targets)):\n valid_moves.append([MOVE.TARGETED_SPELL, card, card.targets[i],\n self.game.current_player.hand.index(card), i])\n \n found_taunt = False\n targets = []\n for enemy in self.game.other_player.minions:\n if enemy.taunt and enemy.can_be_attacked():\n found_taunt = True\n if enemy.can_be_attacked():\n targets.append(enemy)\n \n if found_taunt:\n targets = [target for target in targets if target.taunt]\n else:\n targets.append(self.game.other_player.hero)\n \n for minion in self.game.current_player.minions:\n if minion.can_attack():\n for i in range(len(targets)):\n valid_moves.append([MOVE.MINION_ATTACK, minion, targets[i],\n self.game.current_player.minions.index(minion), i])\n \n if self.game.current_player.hero.can_attack():\n for i in range(len(targets)):\n valid_moves.append([MOVE.HERO_ATTACK, self.game.current_player.hero, targets[i], None, i])\n \n if (self.game.current_player.hero.power_targets_minions or \\\n isinstance(self.game.current_player.hero.power, MagePower) or \\\n isinstance(self.game.current_player.hero.power, MindSpike) or \\\n isinstance(self.game.current_player.hero.power, MindShatter) or \\\n isinstance(self.game.current_player.hero.power, PriestPower)) and \\\n self.game.current_player.hero.power.can_use():\n for target in hearthbreaker.targeting.find_spell_target(self.game, lambda t: t.spell_targetable()):\n valid_moves.append([MOVE.HERO_POWER, self.game.current_player.hero, target, 0, \\\n hearthbreaker.targeting.find_spell_target(self.game, lambda t: \\\n t.spell_targetable()).index(target)])\n elif self.game.current_player.hero.power.can_use():\n valid_moves.append([MOVE.HERO_POWER, self.game.current_player.hero, None, None, None])\n\n valid_moves.append([MOVE.END_TURN, None, None])\n except:\n print(self.game.players[0].deck.__str__())\n print(self.game.players[1].deck.__str__())\n self.replay.write_json(\"ragowit_ai.hsreplay\")\n traceback.print_exc()\n sys.exit()\n\n return valid_moves", "title": "" }, { "docid": "837b3cebd005272b30d137cfb9f53539", "score": "0.51727337", "text": "def expand(self):\n moves = self.board.get_moves()\n for move in moves:\n self.children.append(Node(move[\"board\"], self, move[\"action\"], self.weight))", "title": "" }, { "docid": "cadd1926c9287185b83a2783dc57c78a", "score": "0.51619744", "text": "def available_actions(self):\n moves = self.adjacent_moves()\n moves.append(actions.ViewInventory())\n moves.append(actions.Equip())\n moves.append(actions.Status())\n return moves", "title": "" }, { "docid": "5e6d4ef5b0fdb32d888d8876ee2856b3", "score": "0.5158461", "text": "def find_moves(checkers, dice1, dice2):\n setCheckers(checkers) #Bos olan ücgenleri tespit etmek icin yazılan fonksiyon\n tempCheckers1 = checkers.copy()\n list = []\n for i in range(1, 25):\n if tempCheckers1[i] > 0:\n tempCheckers1[i] -= 1\n if (i + dice1) < 25:\n tempCheckers1[i + dice1] += 1\n tempCheckers2 = tempCheckers1.copy()\n for j in range(1, 25):\n if tempCheckers2[j] > 0:\n tempCheckers2[j] -= 1\n if (j + dice2) < 25:\n tempCheckers2[j + dice2] += 1\n point = compareDicts(tempCheckers2, checkers) #ilk durumdaki pulların poziyonları ile son durumu kıyaslar\n if point > 0:\n new_move = ((i, i+dice1), (j, j+dice2), point) #puan 0'dan büyükse pozisyonları ve puanı list'e ekler\n list.append(new_move)\n tempCheckers2 = tempCheckers1.copy()\n tempCheckers1 = checkers.copy()\n print(list)", "title": "" }, { "docid": "a35e297c1343f909cbf94f52f1923572", "score": "0.5157463", "text": "def queue_moves(self, set_covered):\n for cell in set_covered:\n if cell not in self.flagged_cells:\n self.possible_moves.add(cell)", "title": "" }, { "docid": "d6895aedf3a98fdf61ec8a45602ce562", "score": "0.5151673", "text": "def availableMoves(self):\n moves = set()\n tempRow = self.row - 1\n tempCol = self.possibleCol[self.col]\n direction = [(1, 0), (-1, 0), (0, -1), (0, 1),\n (1, 1), (1, -1), (-1, 1), (-1, -1)]\n for (i, k) in direction:\n if 0 <= tempRow + i <= 7 and 0 <= tempCol + \\\n k <= 7 and (self.board[tempRow + i][tempCol + k] is None or self.validMove(tempRow + i, tempCol + k)):\n moves.add((tempRow + i, tempCol + k))\n return moves", "title": "" }, { "docid": "6f118f462f9d143a5f02e13310a7ca2c", "score": "0.5146977", "text": "def move_car(self, name, movekey):\n # implement your code and erase the \"pass\" [('o',u'....)('j','n'....)]\n for opt in self.possible_moves():\n if name == opt[0] and movekey == opt[1]:\n for car in self.car_list:\n if car.get_name() == name:\n if self.cell_content(\n car.movement_requirements(movekey)[\n 0]) is None:\n self.remove_car(car)\n car.move(movekey)\n self.add_car(car)\n return True\n return False", "title": "" }, { "docid": "7dcd8d491dc2e2d6435cab303538b032", "score": "0.51421106", "text": "def cmd_jump(self, player, nick, args):\n # Not implemented.\n pass", "title": "" }, { "docid": "d1572edcd11cf6b7f5c106fe4ecabfbc", "score": "0.5141088", "text": "def computer_move(self, symbol):\r\n\t\tif self.board_full():\r\n\t\t\traise Exception('There are no remaining moves.')\r\n\t\telse:\r\n\t\t\tused_moves = [move for moves in self.player_moves.values() for move in moves]\r\n\t\t\tavailable_moves = [move for move in range(1, 10) if move not in used_moves]\r\n\t\t\tmove = choice(available_moves)\r\n\t\t\tself.add_token(move, symbol)", "title": "" }, { "docid": "62e75f6e5e064edcbd4844f60e69047e", "score": "0.51365596", "text": "def get_possible_moves(self, row, col):\n pass", "title": "" }, { "docid": "01a05fa076d714f020752349f742c885", "score": "0.5131021", "text": "def push(self, move: int):\n if not self.is_legal(move):\n raise InvalidMoveError(\"Trying to push an illegal move.\")\n\n self.stack_moves.append(move)\n self.board_state[move] = self.turn\n self.turn = not self.turn", "title": "" }, { "docid": "f22f0e2ef9b268af19063e6db438db3e", "score": "0.5130621", "text": "def check_for_jump(self):\n\n logger.debug(u'check_for_jump()')\n\n for move_square, jump_square in self.get_move_squares():\n if jump_square:\n move_square_checker = self.get_checker(move_square)\n jump_square_checker = self.get_checker(jump_square)\n\n if (isinstance(move_square_checker, Checker) and\n move_square_checker.color != self.color and\n jump_square_checker == None):\n\n return True\n\n return False", "title": "" }, { "docid": "556028eff1390e517b75581a41232ee7", "score": "0.51257414", "text": "def moves(self):\r\n #List to hold all the possible moves\r\n possible_moves = []\r\n \r\n #Determine the indices of all empty squares\r\n empty_squares = np.argwhere(self.state == 2)\r\n\r\n #If there's only one move left, set the board to finished.\r\n #This makes sure we don't call this function again when we shouldn't.\r\n if len(empty_squares) == 1:\r\n self.full = True\r\n\r\n #Whose turn is it next?\r\n next_turn = None\r\n\r\n #Generate new board states given the current board state and who's move it is\r\n for row,column in empty_squares:\r\n #Deep copy of the current state\r\n move = np.copy(self.state)\r\n\r\n #If it's player's 1 move, then mark with a X and mark turn as player 2's\r\n if self.turn == 1:\r\n move[row][column] = 1\r\n next_turn = 0\r\n else:\r\n move[row][column] = 0 #Otherwise mark with a O and mark turn as player 1's\r\n next_turn = 1\r\n\r\n possible_moves.append(Board(size=self.size, new_state=move, new_turn=next_turn))\r\n return possible_moves", "title": "" }, { "docid": "f8848392f32950e6d937e5a534cef873", "score": "0.51218206", "text": "def legal_moves(self) -> List[chess.Move]:\n assert self._ready, \"Cannot compute legal moves before calling reset()\"\n\n return list(self._board.legal_moves)", "title": "" }, { "docid": "49635835c655140f371d2e2ba2e4a4d3", "score": "0.5119066", "text": "def possible_moves(self):\n return self.__possible_moves", "title": "" }, { "docid": "22fb3f9850a69f6691de55f3d006c8db", "score": "0.5111841", "text": "def jumpDown():\n global OpenStates, newStates\n position = 0\n \n while position < 7:\n value = ActualState[position]\n # if is down arrow and there is two spaces below and the next space is up arrow and the next space is empty so can move\n if (value < 0 and position + 2 < 7 and ActualState[position+2] == 0 and ActualState[position+1] == 1):\n #Calc new state\n temporalState = ActualState[:]\n temporalState[position] = 0\n temporalState[position+2] = -1\n temporalState[7] = step\n temporalState[8] = JUMP_DOWN\n temporalState[9] = position\n\n #Check if new state is not in closed states\n exists = False\n for x in ClosedStates:\n if x == temporalState:\n exists = True\n #If state was not in closed states add it to open states\n if exists == False:\n OpenStates.append(temporalState)\n newStates = newStates + 1\n position = position + 1 \n return newStates", "title": "" }, { "docid": "9c34fc48ec752393c9cdf9c21a2e922f", "score": "0.5108643", "text": "def set_wait_times(self):\n for i in range(1, len(self.commanders)):\n difference = 2 * (len(self.commanders[i]) - len(self.jump))\n if difference < 1:\n self.extra_time.append(difference + 1)\n else:\n self.extra_time.append(0)\n\n for j in range(1, len(self.commanders)):\n self.wait -= 2 * len(self.commanders[j])\n self.wait -= self.extra_time[j - 1]\n self.wait_for_jumps.append(self.wait)\n for i in range(1, len(self.commanders)):\n wait = 2 * len(self.commanders[i])\n wait += (self.commanders_time[i - 1][0] - self.commanders_time[i][0])\n wait -= 2 * len(self.jump)\n self.wait_for_jumps.append(wait)\n \"\"\"I dont have to wait the full time until the last one reaches home. All I need to do is make sure\n the last one will be home when it is it's turn to attack. If I make sure that this is the case, and\n the commanders are added in the order of their speed, then I can be sure that any commander will be home\n when his turn will come.\n \"\"\"\n for c in range(len(self.commanders)):\n self.final_wait -= len(self.commanders[c]) * 2\n \"\"\"Safety measure\"\"\"\n self.final_wait += 1", "title": "" }, { "docid": "3eb38194cc0d6d69c0683f6c3dabc514", "score": "0.5105932", "text": "def jump(self, time):", "title": "" }, { "docid": "7ac07972c95d283af56161eb3c12f46a", "score": "0.5098529", "text": "def get_legal_moves(self, board):\n legal_moves = []\n \n for row in range(board.size):\n for col in range(board.size):\n neighbors = []\n # list of 3-tuples: (from_coords, dir, neigh_coords)\n if board.matrix[row][col] == self.piece_type:\n if col - 1 >= 0 and \\\n board.get_space((row, col-1)) == self.opponent_type:\n neighbors += [((row, col), \"W\", (row, col-1))]\n if col + 1 < board.size and \\\n board.matrix[row][col+1] == self.opponent_type:\n neighbors += [((row, col), \"E\", (row, col+1))]\n if row - 1 >= 0 and \\\n board.matrix[row-1][col] == self.opponent_type:\n neighbors += [((row, col), \"N\", (row-1, col))]\n if row + 1 < board.size and \\\n board.matrix[row+1][col] == self.opponent_type:\n neighbors += [((row, col), \"S\", (row+1, col))]\n # neighbors is a list of neighboring coordinates\n # that have enemy pieces\n\n # check if we can jump over neighbors\n i = 0\n while i < len(neighbors):\n n = neighbors[i]\n from_coords = n[0]\n direc = n[1]\n opp_coords = n[2]\n new_coords = (opp_coords[0] + MOVE_DICT[direc][0], opp_coords[1] + MOVE_DICT[direc][1])\n if board.on_board(new_coords) and board.get_space(new_coords) == BLANK_SPACE:\n nmove = Move(from_coords, new_coords, direc)\n legal_moves.append(nmove)\n # see if we need to consider hopping\n next_neigh = (new_coords[0] + MOVE_DICT[direc][0], new_coords[1] + MOVE_DICT[direc][1])\n if board.on_board(next_neigh) and board.get_space(next_neigh) == self.opponent_type:\n neighbors.append((from_coords, direc, next_neigh))\n i += 1\n return legal_moves", "title": "" }, { "docid": "ca8d45cd826612bc77d28cc15e2cebcc", "score": "0.5098066", "text": "def knight_moves(positions):\n elements_not_in_range = []\n knight_movement = []\n chars_for_movement = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n\n movement_prediction = {}\n for i in positions:\n l = list(i)\n\n # get all possible directions\n movement_prediction['d_up'] = str(int(l[1]) + 2)\n movement_prediction['d_down'] = str(int(l[1]) - 2)\n movement_prediction['c_up'] = chr(ord(l[0]) + 2)\n movement_prediction['c_down'] = chr(ord(l[0]) - 2)\n\n # get all possible movements codes\n if movement_prediction['c_up'] in chars_for_movement:\n knight_movement.append(movement_prediction['c_up'] + str(int(l[1])+1))\n knight_movement.append(movement_prediction['c_up'] + str(int(l[1])-1))\n if movement_prediction['c_down'] in chars_for_movement:\n knight_movement.append(movement_prediction['c_down'] + str(int(l[1])+1))\n knight_movement.append(movement_prediction['c_down'] + str(int(l[1])-1))\n if 1 <= int(movement_prediction['d_up']) <= 8:\n knight_movement.append(chr(ord(l[0]) - 1) + movement_prediction['d_up'])\n knight_movement.append(chr(ord(l[0]) + 1) + movement_prediction['d_up'])\n if 1 <= int(movement_prediction['d_down']) <= 8:\n knight_movement.append(chr(ord(l[0]) - 1) + movement_prediction['d_down'])\n knight_movement.append(chr(ord(l[0]) + 1) + movement_prediction['d_down'])\n\n # filter movements vs valid ranges\n for i in knight_movement:\n if i[0] not in chars_for_movement:\n elements_not_in_range.append(i)\n if not 1 <= int(i[1]) <= 8:\n elements_not_in_range.append(i)\n\n s_move =set(knight_movement)\n s_del = set(elements_not_in_range)\n\n print \" \".join(str(x) for x in sorted(s_move.difference(s_del)))\n\n elements_not_in_range = []\n knight_movement = []", "title": "" }, { "docid": "6743950b061862c0de557610bcae4e1d", "score": "0.50864756", "text": "def possible_moves( board, player, die ):\n ## temporary:\n return []", "title": "" }, { "docid": "ed0dc9676911cbcae5c8250422654c9c", "score": "0.5075042", "text": "def possible_moves(self):\n possible_targets = []\n x_y_possible_values=[0,1,2,3,4,5,6,7]\n if self.x + 2 in x_y_possible_values:\n if self.y+1 in x_y_possible_values:\n possible_targets.append((self.x+2, self.y+1))\n if self.y - 1 in x_y_possible_values:\n possible_targets.append((self.x + 2, self.y - 1))\n\n if self.x - 2 in x_y_possible_values:\n if self.y+1 in x_y_possible_values:\n possible_targets.append((self.x-2, self.y + 1))\n if self.y - 1 in x_y_possible_values:\n possible_targets.append((self.x-2, self.y - 1))\n\n if self.y + 2 in x_y_possible_values:\n if self.x + 1 in x_y_possible_values:\n possible_targets.append((self.x + 1, self.y + 2))\n if self.x - 1 in x_y_possible_values:\n possible_targets.append((self.x - 1, self.y + 2))\n\n if self.y - 2 in x_y_possible_values:\n if self.x + 1 in x_y_possible_values:\n possible_targets.append((self.x + 1, self.y - 2))\n if self.x - 1 in x_y_possible_values:\n possible_targets.append((self.x - 1, self.y - 2))\n\n return list(dict.fromkeys(possible_targets))", "title": "" }, { "docid": "70a95cb651c5f3655b476b410f0b81f3", "score": "0.50688356", "text": "def get_next_moves(self):\n if self.is_initialisation_phase():\n return self._get_initialisation_moves()\n\n if self.current_dice_number != 7:\n empty_move = CatanMove(self.board.get_robber_land())\n moves = [empty_move]\n else:\n moves = [CatanMove(land) for land in self.board.get_lands_to_place_robber_on()]\n moves = self._get_all_possible_development_cards_exposure_moves(moves)\n # _get_all_possible_trade_moves is assuming it's after dev_cards moves and nothing else\n moves = self._get_all_possible_trade_moves(moves)\n moves = self._get_all_possible_paths_moves(moves)\n moves = self._get_all_possible_settlements_moves(moves)\n moves = self._get_all_possible_cities_moves(moves)\n moves = self._get_all_possible_development_cards_purchase_count_moves(moves)\n return moves", "title": "" }, { "docid": "539f92c6edc9a3baff7046828f92fdda", "score": "0.5068089", "text": "def add_token(self, move, symbol):\r\n\t\tif not self.validate_move(move):\r\n\t\t\traise Exception('Invalid move provided.')\r\n\t\tif self.board_full():\r\n\t\t\traise Exception('There are no remaining moves.')\r\n\t\telif self.board[self.move_lookup[move][0]][self.move_lookup[move][1]] != '-':\r\n\t\t\traise Exception('That move is already taken.')\r\n\t\telse:\r\n\t\t\tself.use_move(move, symbol)\r\n\t\t\tself.board[self.move_lookup[move][0]][self.move_lookup[move][1]] = symbol", "title": "" }, { "docid": "6e325271eef0c79fa9a20746d29a3274", "score": "0.50677", "text": "def next_move(self):\n\n # if no moves available, go through all numbered cells found, and see if there any moves.\n # this could create possible moves as the solver only goes through discovered cells\n # on each turn and not the full set of numbered cells found\n if len(self.possible_moves) == 0:\n self.process_move(self.numbered_cells)\n\n if len(self.possible_moves) != 0:\n cell = self.possible_moves.pop()\n else:\n cell = self.guess()\n return cell", "title": "" }, { "docid": "852d7c07acbc13e518b3f75f5cec345d", "score": "0.5055114", "text": "def jump(self): \n #self.getModel().getSystem().deletePlayer(\"pepa\")\n #self.getModel().getSystem().deleteGift(\"recopetin\")\n if not self.findIVItem(self.__player).hasAnimation():\n self.__player.jump()", "title": "" }, { "docid": "26a8a500bdc927915af3feb685f78d2f", "score": "0.50508046", "text": "def request_move(self, board):\n pass", "title": "" } ]
c4e8c93c6258fee1b91052671f2f1037
Returns a fit model (and background if desired) for evaluation on data
[ { "docid": "b04841cbda08153c582a8082541e81c9", "score": "0.0", "text": "def getmodel(self, i, modelnumber, bckgsign=0):\n def individualmodel(x):\n bckg = self.bckg(x, self.getparams()[i][self.modelparamcount:])\n paramstart = sum(self.modelparams[:modelnumber])\n paramend = sum(self.modelparams[:modelnumber + 1])\n # print(paramstart, paramend)\n model = self.model[modelnumber](x, self.getparams()[i][paramstart:paramend])\n return model + (bckg * bckgsign if bckgsign else 0)\n\n return individualmodel", "title": "" } ]
[ { "docid": "1c9cafc520b5fb8eb1b1ab5879bf3780", "score": "0.6879479", "text": "def build_and_evaluate_model():\n y, X_without_constant, X = create_linreg_model_inputs()\n fit_and_evaluate_model_from_inputs(y, X_without_constant, X)", "title": "" }, { "docid": "ff6ab8ec878cafbd43cf5aaec00569d9", "score": "0.66058874", "text": "def evaluate_model(model, X_train, y_train, X_test, y_test):\n \n # Refit the model on the whole train set\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # disable the warning on default optimizer\n model.fit(X_train, y_train)\n \n # Evaluate on test set\n y_predict = model.predict(X_test)\n scores = None\n if y_test is not None:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # disable the warning on f1-score with not all labels\n scores = get_prediction_score(y_test, y_predict)\n\n return model, y_predict, scores", "title": "" }, { "docid": "b52280c9377315ae00af646abc9e00a4", "score": "0.6558215", "text": "def bfe_best_model_from_scratch(\n self,\n x = None,\n y = None,\n data = None,\n test_data=None,\n metric_name: str = None,\n model_name: str = None,\n fit_on_all_train_data: bool = True,\n verbosity:int = 1\n )->Model:\n\n train_data, test_data = verify_data(x, y, data, test_data, \"test\")\n\n metric_name = metric_name or self.eval_metric\n\n if model_name:\n met_val, pipeline = self.get_best_pipeline_by_model(\n model_name,\n metric_name)\n else:\n met_val = self.get_best_metric(metric_name)\n pipeline = self.get_best_pipeline_by_metric(metric_name=metric_name)\n\n met_val = round(met_val, 3)\n\n model_name = model_name or ''\n suffix = f\"{SEP}{metric_name}_{met_val}_{model_name}\"\n prefix = f\"{self.path}{SEP}results_from_scratch{suffix}\"\n\n model_config = pipeline['model']\n\n if self.category == \"DL\":\n model_name = list(model_config.keys())[0]\n kwargs = list(model_config.values())[0]\n\n model_config = DL_MODELS[model_name](mode=self.mode,\n output_features=self.num_outputs,\n **kwargs)\n\n model = self._build_and_eval_from_scratch(\n model=model_config,\n train_data=train_data,\n x_transformation=pipeline['x_transformation'],\n y_transformation=pipeline['y_transformation'],\n prefix=prefix,\n fit_on_all_train_data=fit_on_all_train_data,\n verbosity=verbosity,\n seed=self.parent_seeds_[int(pipeline['iter_num'])-1],\n test_data=test_data\n )\n\n return model", "title": "" }, { "docid": "61a20e51c05152ec3d2443212e8c3ca3", "score": "0.65497416", "text": "def fit(self, *args, **kwargs):\n return self.model.fit(*args, **kwargs)", "title": "" }, { "docid": "19eebff678b53cd09ad66408db4d2902", "score": "0.6536695", "text": "def _fit_job(self, model, **kwargs):\n model.fit(self.observations, self.predictors, **kwargs)\n return model", "title": "" }, { "docid": "ed0510dacce26064422843bfb54d243f", "score": "0.6487775", "text": "def get_fit():\n\n info_path = InfoPath(\n path='temp_data',\n dir_name=\"a01_eight_schools\",\n sub_dir_name=InfoPath.DO_NOT_CREATE\n )\n\n return run(info_path=info_path, func=run_model, data=get_data())", "title": "" }, { "docid": "43f2f27d8e6956879f99c1941d046f2b", "score": "0.6461709", "text": "def fit_model(self):\n \n model = VAR(endog = self.train)\n self.model_fit = model.fit()", "title": "" }, { "docid": "6ec5ee1c18b70b1278f1b7482acde027", "score": "0.6329225", "text": "def model_fit_and_predict(model,X_train,y_train,X_test):\n model.fit(X_train,y_train)\n return model.predict(X_test)", "title": "" }, { "docid": "2554dd0e4e86d0eb0a0f1a3f45037010", "score": "0.632649", "text": "def evaluate(experiment_dir = './experiments/base_model'):\n seed_everything(SEED=212)\n\n params_json = os.path.join(experiment_dir, 'params.json')\n assert os.path.isfile(params_json), f'No json configuration file found at {params_json}'\n hyperparameters = utils.load_hyperparams(params_json)\n\n train_ds, val_ds, test_ds = load_dataset()\n loss_fn = nn.CrossEntropyLoss\n optim_fn = torch.optim.SGD\n model = Net(hyperparameters)\n\n utils.set_logger(os.path.join(experiment_dir, 'train.log'))\n\n net = get_trainer(experiment_dir, model, loss_fn, optim_fn, hyperparameters)\n net.initialize()\n checkpoint = Checkpoint(fn_prefix='best_', monitor='valid_acc_best',\n dirname=experiment_dir)\n net.load_params(checkpoint=checkpoint)\n y_pred = net.predict(val_ds)\n metrics = {\n 'accuracy': accuracy_score(y_pred, val_ds.targets()),\n 'precision': precision_score(y_pred, val_ds.targets(), average='macro'),\n 'recall': recall_score(y_pred, val_ds.targets(), average='macro'),\n 'f1': f1_score(y_pred, val_ds.targets(), average='macro')\n }\n\n metrics_file_path = os.path.join(experiment_dir, 'metrics_val_best_weight.json')\n logging.info(f'Save Validation metrics to {metrics_file_path}')\n utils.save_dict_to_json(metrics, metrics_file_path)\n\n y_pred = net.predict(test_ds)\n metrics = {\n 'accuracy': accuracy_score(y_pred, test_ds.targets()),\n 'precision': precision_score(y_pred, test_ds.targets(), average='macro'),\n 'recall': recall_score(y_pred, test_ds.targets(), average='macro'),\n 'f1': f1_score(y_pred, test_ds.targets(), average='macro')\n }\n metrics_file_path = os.path.join(experiment_dir, 'metrics_test_best_weight.json')\n logging.info(f'Save Test metrics to {metrics_file_path}')\n utils.save_dict_to_json(metrics, metrics_file_path)", "title": "" }, { "docid": "d1adecba6ca9579d112a99a7b0a83fe6", "score": "0.6322142", "text": "def fit(model, data_to_fit, dict_tokens_info, config, do_plot = True):\n population = [model]\n ConstructScipyOptimizeAttributes.construct_info_population(population,dict_tokens_info)\n population = Parametrizer.parametrize_population(population)\n DefConstructor.add_def_statements_attributes(population)\n population = Evaluator.evaluator(population, data_to_fit, config)\n population = QualityEstimator.quality_estimator(population, data_to_fit, config)\n\n fitted = CalculatorModelValues.calculate_model_values(population[0], data_to_fit[:,1:].T)\n\n if do_plot:\n try:\n ObserverTheBestFunction.observer_the_best_function(population,data_to_fit)\n\n return fitted\n\n except:\n\n plt.plot(data_to_fit[:,1], data_to_fit[:,0], 'b', data_to_fit[:,1], zeros(data_to_fit[:,0].shape), 'g')\n plt.show()\n\n return zeros(data_to_fit[:,0].shape)\n\n return fitted", "title": "" }, { "docid": "d05ea8ebafac036f6dde36b463999607", "score": "0.63025403", "text": "def get_model(X_train,\n y_train,\n classifier='MultinomialNB',\n dataset='dataset-{}-{}-.json'.format(CHANNELS, HISTO_BINS),\n use_best_params=True,\n partial=False,\n debug=False):\n clf = classifier_dict[classifier]()\n if dataset in best_params:\n if use_best_params and classifier in best_params[dataset]:\n clf.set_params(**best_params[dataset][classifier])\n logger.info(\"Loading tuned parameters for {}\".format(classifier))\n elif classifier in default_params:\n logger.info(\"Loading default parameters for {}\".format(classifier))\n clf.set_params(**default_params[classifier])\n\n X_train = standardize_data(X_train, classifier)\n if not partial:\n clf.fit(X_train, y_train)\n else:\n clf.partial_fit(X_train, y_train, classes=CLASSES)\n if debug:\n logger.debug(clf.get_params())\n return clf", "title": "" }, { "docid": "669321871b1c252d034671899e4d7f26", "score": "0.6291504", "text": "def fit(self, X, y, **kwargs):\n model = self.Model(**kwargs)\n model.fit(X, y)\n return model", "title": "" }, { "docid": "fdedad8b281db61b2088b6ad93544192", "score": "0.6278794", "text": "def fit(self, X, y, **kwargs):\n return self.model.fit(X, y, **kwargs)", "title": "" }, { "docid": "2c8caf875870b6c26d5a62d13bef6563", "score": "0.62770766", "text": "def evaluate(model, option, **data):\n try:\n if option == 'train':\n # evaluate the model and print the training stats\n evaluation = model.evaluate(data['x_train'], data['y_train'])\n else:\n # evaluate the model and print the training stats\n evaluation = model.evaluate(data['x_test'], data['y_test'])\n\n # return the model\n return evaluation\n except:\n return np.zeros(3)", "title": "" }, { "docid": "4d912c0ab23eb73242b45228373f6144", "score": "0.6265324", "text": "def fit_and_eval_models(train_features,train_target,\r\n validation_features,validation_target,\r\n save_path,validation_low,validation_high):\r\n\r\n # Config: Dictionaries of models and hyperparameters\r\n MODELS = {\r\n 'LinearRegression': LinearRegression(), \r\n 'Lasso': Lasso(),\r\n 'RandomForestRegressor': RandomForestRegressor(),\r\n 'AdaBoostRegressor':AdaBoostRegressor(),\r\n }\r\n GRID = {\r\n 'LinearRegression': [{}],\r\n 'Lasso': [{'alpha':x, 'random_state':0, 'max_iter':10000} for x in [0.01,0.05,0.1,0.5,1,5,10,50,100,500,1000]],\r\n 'RandomForestRegressor': [{'n_estimators':x, 'max_features':'log2',\r\n 'n_jobs':-1} \\\r\n for x in [100,500,1000]],\r\n 'AdaBoostRegressor': [{'n_estimators':y} for y in [50,75,100,125,150,175,200]],\r\n }\r\n\r\n # Fit and get results\r\n model_results = pipeline.build_regressors(MODELS, GRID,\r\n train_features, train_target,\r\n validation_features, validation_target,\r\n save_path, validation_low, validation_high)\r\n return model_results", "title": "" }, { "docid": "79f8e10dcc4c953e3a7d13d26abb4776", "score": "0.6263377", "text": "def model_eval(self, params, dataset):", "title": "" }, { "docid": "bbfac11b6fc5619f48fd9452129b0ffc", "score": "0.62233716", "text": "def run_fit(self):\n\n # Check if data is set\n if self.time is []:\n self.result = []\n return\n\n try:\n self.fit_model()\n except ValueError as e:\n print(e)\n except AttributeError as e:\n print(e)", "title": "" }, { "docid": "e5c272bc8ebbc88e76d37da0ffcfeb94", "score": "0.6196191", "text": "def fit_eval(self, x, y, validation_data=None, mc=False, verbose=0, **config):\n config.update({\"feature_num\": x.shape[2]})\n # if model is not initialized, __build the model\n if self.model is None:\n self._build(mc=mc, **config)\n\n hist = self.model.fit(x, y,\n validation_data=validation_data,\n batch_size=self.batch_size,\n epochs=config.get(\"epochs\", 10),\n verbose=verbose\n )\n # print(hist.history)\n\n if validation_data is None:\n # get train metrics\n # results = self.model.evaluate(x, y)\n result = hist.history.get(self.metric)[0]\n else:\n result = hist.history.get('val_' + str(self.metric))[0]\n return result", "title": "" }, { "docid": "72964f7e6e46ddc945aaee7b37140f16", "score": "0.61858934", "text": "def determine_best_fit_model_Fitter(x_data, y_data, **kwargs):\n f = Fitter(x_data, y_data, **kwargs)\n f.fit()\n best = f.get_best()\n f.summary()\n return best", "title": "" }, { "docid": "f778ae3d169bf07b34adfced44a50e33", "score": "0.6162002", "text": "def get_trained_model(self):\n self.regressor_model = LinearRegression()\n self.regressor_model.fit(self.x_train, self.y_train) # training the algorithm\n return self.regressor_model", "title": "" }, { "docid": "329e8bbe4811afa7364c090ea78a2905", "score": "0.61571366", "text": "def _build_and_fit_model(self, trial, fit_args, fit_kwargs):\n model = self.hypermodel.build(trial.hyperparameters)\n mlflow.tensorflow.autolog()\n with mlflow.start_run(run_name=self.project_name):\n return model.fit(*fit_args, **fit_kwargs)", "title": "" }, { "docid": "03875138b6b0f4762f9b2309529cf692", "score": "0.61552936", "text": "def one_model(input_val, y_val, fn, fn_kwargs = {}, k_folds = 1, random_state = 15, verbose = False):\n \n kf_results = []\n models = []\n \n t1 = time.time()\n\n model = fn(**fn_kwargs)\n model.fit(input_val, y_val)\n \n print(\"Model trained:\", time.time()-t1)\n\n probs_holdout = model.predict_proba(input_val)\n error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier = evaluate_rip(probs_holdout, y_val, verbose=False)\n kf_results.append([error, ece, ece2, ece_cw, ece_cw2, ece_full, ece_full2, mce, mce2, loss, brier])\n models.append(model)\n \n return (models, ((np.mean(kf_results, axis = 0)), (np.std(np.array(kf_results)[:, -2:], axis = 0))))", "title": "" }, { "docid": "f17055b763923c4b30bdc8df97f101f8", "score": "0.61344427", "text": "def eval(self):\n self.train(mode=False)", "title": "" }, { "docid": "f17055b763923c4b30bdc8df97f101f8", "score": "0.61344427", "text": "def eval(self):\n self.train(mode=False)", "title": "" }, { "docid": "54849876feb80436418ae8e873d2c068", "score": "0.6109372", "text": "def evaluate(data, model):\n pass", "title": "" }, { "docid": "8353cea07b9ccf6fad02db90285e3d3a", "score": "0.610738", "text": "def _fit_job(self, model, observations, predictors, **kwargs):\n model.fit(observations, predictors, **kwargs)\n return model", "title": "" }, { "docid": "bd0e66d003bac55572107c1aa79578d3", "score": "0.61003", "text": "def partial_fit(self, X, y, classes=None, weight=None):\n r, _ = get_dimensions(X)\n if self.first_run:\n # Train on all instances before predicting\n self.first_run = False\n for z, m in zip(self.meta_models, self.base_models):\n m.partial_fit(X, y)\n predictions = m.predict(X)\n real_errors = np.array([get_predictions_errors(real_y, estimated_y, self.meta_error_metric)\n for real_y, estimated_y in zip(y, predictions)])\n # Incremental learning of meta-models\n z.partial_fit(X, real_errors, weight)\n # Add first predictions to buffer in the first run\n # if self.diversity_evaluator is not None:\n if hasattr(self, 'diversity_evaluator'):\n if self.diversity_evaluator is not None:\n all_predictions = get_all_predictions(self.base_models, X)\n if r == 1:\n all_predictions = all_predictions[0]\n if self.diversity_evaluator.get_type() == 'window_regression':\n if self.diversity_evaluator.get_class_type() =='window_dissimilarity':\n self.diversity_evaluator.update_(all_predictions, y)\n elif self.diversity_evaluator.get_class_type() == 'window_redundancy':\n self.diversity_evaluator.update_(all_predictions, y)\n elif self.diversity_evaluator.get_class_type() == 'window_correlation':\n self.diversity_evaluator.add(all_predictions)\n else:\n raise NotImplementedError\n elif self.diversity_evaluator.get_type() == 'window_classif':\n self.diversity_evaluator.add(all_predictions, list(range(len(self.base_models))), y)\n elif self.diversity_evaluator.get_type() == 'ff_classif':\n self.diversity_evaluator.add(all_predictions[0], list(range(len(self.base_models))), y)\n else:\n raise NotImplementedError\n else:\n r, _ = get_dimensions(X)\n # get previous predictions and compare to real value of y and see if we need to adapt threshold\n previous_predictions = self.previous_predictions.dequeue(r)\n # compare and see how to adapt threshold\n real_error = get_predictions_errors(y, previous_predictions, self.meta_error_metric)\n # compare error to initial tolerated error rather than dynamic one ?\n if real_error <= self.initial_competence_threshold:\n # We ara a facing a relatively true prediction\n # increase threshold to look for similarly competent classifiers\n self._update_threshold(method=self.threshold_update_method, change_step=self.threshold_update_step,\n increase=True)\n else:\n # We are facing a relatively false prediction\n # decrease threshold to prone less competent ones that were selected\n self._update_threshold(method=self.threshold_update_method, change_step=self.threshold_update_step,\n increase=False)\n for z, m in zip(self.meta_models, self.base_models):\n # Incremental training of base base models\n # Getting base models predictions\n predictions = m.predict(X)\n # Computing real incurred errors\n real_errors = np.array([get_predictions_errors(real_y, estimated_y, self.meta_error_metric)\n for real_y, estimated_y in zip(y, predictions)])\n\n # Incremental learning of meta-models\n z.partial_fit(X, real_errors, weight)\n m.partial_fit(X, y, weight)\n if hasattr(self, 'diversity_evaluator'):\n if self.diversity_evaluator is not None:\n all_predictions = get_all_predictions(self.base_models, X)\n if r == 1:\n all_predictions = all_predictions[0]\n if self.diversity_evaluator.get_type() == 'window_classif':\n self.diversity_evaluator.add(all_predictions, list(range(len(self.base_models))), y)\n if self.diversity_evaluator.get_type() == 'ff_classif':\n self.diversity_evaluator.add(all_predictions, list(range(len(self.base_models))), y)\n if self.diversity_evaluator.get_type() == 'window_regression':\n if self.diversity_evaluator.get_class_type() == 'window_dissimilarity':\n self.diversity_evaluator.update_(all_predictions, y)\n elif self.diversity_evaluator.get_class_type() == 'window_redundancy':\n self.diversity_evaluator.update_(all_predictions, y)\n elif self.diversity_evaluator.get_class_type() == 'window_correlation':\n self.diversity_evaluator.add(all_predictions)", "title": "" }, { "docid": "faa8963064b0c6838eebdbf1658aeb85", "score": "0.60915303", "text": "def train_model(x, y, model):\n if model=='NB':\n print(\"\\tTraining NB model...\")\n return OneVsRestClassifier(MultinomialNB()).fit(x, y)\n elif model=='SVM':\n print(\"\\tTraining SVM model...\")\n return OneVsRestClassifier(LinearSVC()).fit(x, y)\n elif model=='TREE':\n print(\"\\tTraining decision tree model...\")\n return OneVsRestClassifier(DecisionTreeClassifier()).fit(x, y)\n elif model=='MLP': # Multi-layer perceptron NN\n print(\"\\tTraining neural net model...\")\n return OneVsRestClassifier(MLPClassifier()).fit(x, y)\n elif model=='LOGREG': \n print(\"\\tTraining logistic regression model...\")\n return OneVsRestClassifier(LogisticRegression()).fit(x, y)\n else:\n raise ValueError(\"train_model: invalid model selected\")", "title": "" }, { "docid": "e43c9b4b75ad7b16481d0a5ec106380a", "score": "0.6080572", "text": "def fit(self, **kwargs):", "title": "" }, { "docid": "e43c9b4b75ad7b16481d0a5ec106380a", "score": "0.6080572", "text": "def fit(self, **kwargs):", "title": "" }, { "docid": "1fa2e7af04193331536f048fa4869095", "score": "0.6075323", "text": "def train_model(X, y):\n\n ## Use any model that we might find appropriate\n #model = RidgeClassifierCV(alphas=[ 0.1, 1., 10. ])\n\n ##Create the object and set relevant parameters\n #model = LogisticRegression(C=10)\n model = GradientBoostingClassifier(n_estimators=50)\n #Fit the model\n model.fit(X, y)\n\n return model", "title": "" }, { "docid": "76105f742f1f0e4ae17f5d40bd04010e", "score": "0.6075271", "text": "def instantiate_and_fit(self, **fit_kwargs):\n self.build_model()\n self.fit(**fit_kwargs)", "title": "" }, { "docid": "68d54f4ea9a8c4faf436c5604b733b7d", "score": "0.60625094", "text": "def _fit(model, X_train, y_train_binarized):\n model.fit(X_train, y_train_binarized)\n return model", "title": "" }, { "docid": "5b59b3c9a727f02601d4a6f3c6ea4696", "score": "0.6053115", "text": "def train_new_model_best_parameters(self):\n\n # get parameters of the best classifier\n params = self.model.best_params_\n batch_size = params['batch_size']\n epochs = params['epochs']\n if self.exp.consider_class_weight:\n class_weight = params['class_weight']\n else:\n class_weight = None\n\n # train the best model\n new_model = self.create_model()\n y_train = np.asarray(self.dataset.y_train)\n new_model.fit(self.dataset.x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n class_weight=class_weight,\n validation_split=0.1,\n verbose=0)\n return new_model", "title": "" }, { "docid": "4d0a4f5098577031201731806a74395b", "score": "0.6050603", "text": "def fit(self, X, y=None):", "title": "" }, { "docid": "7333c448391858d968f60354dfbe744e", "score": "0.6048499", "text": "def train_ml_model(X, y, algorithm, random_state=0, verbose=True):\r\n \r\n if algorithm == 'tf-regression':\r\n model = build_regression(X.shape[1])\r\n elif algorithm == 'tf-classification-proba':\r\n model = build_classification_proba(X.shape[1], y.shape[1])\r\n \r\n model.fit(X, y, epochs=100, batch_size=32, validation_split=0.2,\r\n verbose=1 if verbose else 0)\r\n \r\n return model", "title": "" }, { "docid": "2509e7ac794484e94688110d7129a9e1", "score": "0.60451937", "text": "def evaluate_trained_model(model, datagen_args, preprocess_args, # settings\n cross_val_num=0, cross_val_parts=8, # cross-validation settings\n name=None, show_data_aug=False, batch_size=256, # other\n data_path='Data/', prep_path='Data/prep/'):\n\n if prep_path is not None:\n # get train and validation generators\n train_generator, val_generator = generators_from_prep(datagen_args=datagen_args, preprocess_args=preprocess_args,\n cross_val_num=cross_val_num, cross_val_parts=cross_val_parts, show_data_aug=show_data_aug,\n batch_size=batch_size, data_path=data_path, prep_path=prep_path)\n\n # obtain metrics for validation set\n val_generator.reset()\n metrics = model.evaluate(generator_wrapper(val_generator), verbose=1,\n steps=val_generator.n // val_generator.batch_size)\n\n # the metrics will contain out_root_acc, etc. for the individual accuracies\n metric_labels = model.metrics_names\n\n # calculate global accuracy\n worker = GlobalAccuracyCallback(val_generator)\n global_accuracy = worker.calc_global_acc(metrics, metric_labels)\n\n return global_accuracy, metrics, metric_labels", "title": "" }, { "docid": "2509e7ac794484e94688110d7129a9e1", "score": "0.60451937", "text": "def evaluate_trained_model(model, datagen_args, preprocess_args, # settings\n cross_val_num=0, cross_val_parts=8, # cross-validation settings\n name=None, show_data_aug=False, batch_size=256, # other\n data_path='Data/', prep_path='Data/prep/'):\n\n if prep_path is not None:\n # get train and validation generators\n train_generator, val_generator = generators_from_prep(datagen_args=datagen_args, preprocess_args=preprocess_args,\n cross_val_num=cross_val_num, cross_val_parts=cross_val_parts, show_data_aug=show_data_aug,\n batch_size=batch_size, data_path=data_path, prep_path=prep_path)\n\n # obtain metrics for validation set\n val_generator.reset()\n metrics = model.evaluate(generator_wrapper(val_generator), verbose=1,\n steps=val_generator.n // val_generator.batch_size)\n\n # the metrics will contain out_root_acc, etc. for the individual accuracies\n metric_labels = model.metrics_names\n\n # calculate global accuracy\n worker = GlobalAccuracyCallback(val_generator)\n global_accuracy = worker.calc_global_acc(metrics, metric_labels)\n\n return global_accuracy, metrics, metric_labels", "title": "" }, { "docid": "fcc302d76514442f894ac0e85ae7f75b", "score": "0.60441864", "text": "def single_evaluate(self, train_data, test_data, fidelity):\n xtrain, ytrain, train_info, train_times = train_data\n xtest, ytest, test_info, _ = test_data\n train_size = len(xtrain)\n\n data_reqs = self.predictor.get_data_reqs()\n\n logger.info(\"Fit the predictor\")\n if data_reqs[\"requires_partial_lc\"]:\n \"\"\"\n todo: distinguish between predictors that need LC info\n at training time vs test time\n \"\"\"\n train_info = copy.deepcopy(train_info)\n test_info = copy.deepcopy(test_info)\n for info_dict in train_info:\n lc_related_keys = [key for key in info_dict.keys() if \"lc\" in key]\n for lc_key in lc_related_keys:\n info_dict[lc_key] = info_dict[lc_key][:fidelity]\n\n for info_dict in test_info:\n lc_related_keys = [key for key in info_dict.keys() if \"lc\" in key]\n for lc_key in lc_related_keys:\n info_dict[lc_key] = info_dict[lc_key][:fidelity]\n\n self.predictor.reset_hyperparams()\n fit_time_start = time.time()\n cv_score = 0\n if (\n self.max_hpo_time > 0\n and len(xtrain) >= 10\n and self.predictor.get_hpo_wrapper()\n ):\n\n # run cross-validation (for model-based predictors)\n hyperparams, cv_score = self.run_hpo(\n xtrain,\n ytrain,\n train_info,\n start_time=fit_time_start,\n metric=\"kendalltau\",\n )\n self.predictor.set_hyperparams(hyperparams)\n\n self.predictor.fit(xtrain, ytrain, train_info)\n hyperparams = self.predictor.get_hyperparams()\n\n fit_time_end = time.time()\n test_pred = self.predictor.query(xtest, test_info)\n query_time_end = time.time()\n\n # If the predictor is an ensemble, take the mean\n if len(test_pred.shape) > 1:\n test_pred = np.mean(test_pred, axis=0)\n\n logger.info(\"Compute evaluation metrics\")\n results_dict = self.compare(ytest, test_pred)\n results_dict[\"train_size\"] = train_size\n results_dict[\"fidelity\"] = fidelity\n results_dict[\"train_time\"] = np.sum(train_times)\n results_dict[\"fit_time\"] = fit_time_end - fit_time_start\n results_dict[\"query_time\"] = (query_time_end - fit_time_end) / len(xtest)\n if hyperparams:\n for key in hyperparams:\n results_dict[\"hp_\" + key] = hyperparams[key]\n results_dict[\"cv_score\"] = cv_score\n \n # note: specific code for zero-cost experiments:\n method_type = None\n if hasattr(self.predictor, 'method_type'):\n method_type = self.predictor.method_type\n print(\n \"dataset: {}, predictor: {}, spearman {}\".format(\n self.dataset, method_type, np.round(results_dict[\"spearman\"], 4)\n )\n )\n print(\"full ytest\", results_dict[\"full_ytest\"])\n print(\"full testpred\", results_dict[\"full_testpred\"])\n # end specific code for zero-cost experiments.\n \n # print entire results dict:\n print_string = \"\"\n for key in results_dict:\n if type(results_dict[key]) not in [str, set, bool]:\n # todo: serialize other types\n print_string += key + \": {}, \".format(np.round(results_dict[key], 4))\n logger.info(print_string)\n self.results.append(results_dict)\n \"\"\"\n Todo: query_time currently does not include the time taken to train a partial learning curve\n \"\"\"", "title": "" }, { "docid": "61ee8df8fe5dd8fa991880e8862378af", "score": "0.6035679", "text": "def _fit_job(self, model, **kwargs):\n obs_shuffled = self.observations.sample(frac=1, replace=True).copy()\n model.fit(obs_shuffled, self.predictors, **kwargs)\n return model", "title": "" }, { "docid": "7ca4b0f7cbcf687e99c0ef9b9ddde32a", "score": "0.6028579", "text": "def _build_and_eval_from_scratch(\n self,\n model,\n train_data,\n x_transformation,\n y_transformation,\n prefix:str,\n model_name=None,\n verbosity:int = 1,\n fit_on_all_train_data:bool = True,\n seed:int = None,\n test_data=None,\n ) -> \"Model\":\n model = self._build_model(\n model=model,\n x_transformation=x_transformation,\n y_transformation=y_transformation,\n prefix=prefix,\n val_metric=self.eval_metric,\n verbosity=verbosity\n )\n\n if seed:\n model.seed_everything(int(seed))\n\n if 'data' in train_data:\n if fit_on_all_train_data:\n model.fit_on_all_training_data(**train_data)\n model.dh_.to_disk(model.path)\n else:\n model.fit(**train_data)\n # todo, save x,y in disk\n\n self._populate_results(model, train_data, test_data, model_name=model_name)\n\n return model", "title": "" }, { "docid": "0e4a6ad50376121d227657f7cf0152dc", "score": "0.60272926", "text": "def run_predict():\n img = load_and_convert_image('drawing.jpeg')\n\n pre_trained_model = 'training_model'\n if (model_exists(pre_trained_model)):\n trained_model = load_model(pre_trained_model)\n guess = make_prediction(trained_model, img)\n \n else:\n trained_model = train()\n guess = make_prediction(trained_model, img)\n \n return guess", "title": "" }, { "docid": "d179e317cd5a29f674adf13f8950ebb1", "score": "0.6024882", "text": "def return_model(self):\n X_train, X_test, y_train, y_test = Model.split_(self)\n trained_model = self.pipeline_.fit(X_train, y_train)\n return trained_model", "title": "" }, { "docid": "2669df5a605471a530f6b44b250afa25", "score": "0.6008297", "text": "def eval_model2(self, params, hyper_params, model_generator, model_name, train_test, attributes):\r\n\r\n X_train, X_val, X_test, y_train, y_val, y_test = train_test \r\n\r\n out = None\r\n fit_time = None\r\n #Define the function we'd like hyperopt to optimize.\r\n #Its input are the optimization parameters.\r\n def hyperopt_objective(x):\r\n if self.verbose:\r\n print({**params, **x})\r\n __out, my_model, __fit_time = model_generator(X_train, y_train, X_val, y_val, {**params, **x}, verbose = self.verbose)\r\n #Store these variables for later use.\r\n out = __out\r\n fit_time = __fit_time\r\n #Generate a score on the validation data.\r\n val_acc = my_model.evaluate(X_val, y_val, verbose = self.verbose)[0]\r\n return {'loss': val_acc, 'status': STATUS_OK, 'fit_time': fit_time, 'model' : my_model, 'out' : out}\r\n\r\n #We will train a model based on the best parameters given by hyperopt\r\n trials = Trials()\r\n best = fmin(hyperopt_objective, hyper_params, \r\n algo=tpe.suggest, max_evals=self.max_evals, trials= trials)\r\n\r\n best_trial = None\r\n #Find the best model.\r\n for trial in trials.trials:\r\n if best_trial == None:\r\n best_trial = trial\r\n elif best_trial['result']['loss'] > trial['result']['loss']:\r\n best_trial = trial\t\t\t\r\n #Otherwise preserve the best_trial.\r\n model = best_trial['result']['model']\r\n out = best_trial['result']['out']\r\n fit_time = best_trial['result']['fit_time']\r\n\r\n #Save the model for later use.\r\n save_dir = os.path.join(self.local_dir, 'trained_models') \r\n if not os.path.isdir(save_dir):\r\n os.makedirs(save_dir)\r\n model.save(os.path.join(save_dir, self.generate_model_name(model_name, params, attributes) + '.h5'))\r\n self.model_results.append([out, best_trial, trials])\r\n \r\n #Evaluate the model.\r\n train_score = model.evaluate(X_train, y_train, verbose = self.verbose)\r\n test_score = model.evaluate(X_test, y_test, verbose = self.verbose)\r\n if self.verbose:\r\n print (\"Loss = \" + str(test_score[0]))\r\n print (\"Test Accuracy = \" + str(test_score[1]))\r\n with open(os.path.join(self.local_dir , self.model_res_file), mode='a') as file:\r\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n #Merge params with tuned hyper_params for recording results.\r\n params = {**params, **best}\r\n #Save attribute values.\r\n my_attributes = attributes.copy()\r\n my_attributes[1] = train_score[1]\r\n my_attributes[2] = fit_time\r\n my_attributes[3] = test_score[1]\r\n #Here we write params/hyper_params specified in attributes to my_attributes before writing\r\n #my_attributes to the model_res_file csv.\r\n if (len(attributes) > 4):\r\n for i in range(4,len(attributes)):\r\n if isinstance(attributes[i], str) and (attributes[i] in params):\r\n if attributes[i] == 'layers':\r\n my_attributes[i] = \"-\".join([str(x) for x in params['layers']])\r\n else:\r\n #We'll search for this attribute in the parameters and record the parameter value.\r\n my_attributes[i] = str(params[attributes[i]])\r\n else:\r\n my_attributes[i] = np.NaN\r\n writer.writerow(my_attributes)", "title": "" }, { "docid": "ca1ca24ac782834256a24514dbcb1c44", "score": "0.60062885", "text": "def make_single_model():\n txt_titles, txt_abstracts, y_engineering, y_bug = consolidate_data()\n\n max_title_words = 500\n make_vectorizers(txt_titles, txt_abstracts, max_title_words, blacklist_bugs=False)\n #\n titles_vectorizer, abstracts_vectorizer = make_vectorizers(txt_titles, txt_abstracts, max_title_words, blacklist_bugs=True)\n\n X_titles = titles_vectorizer.transform(txt_titles)\n X_abstracts = abstracts_vectorizer.transform(txt_abstracts)\n\n X = scipy.sparse.hstack((X_titles, X_abstracts))\n\n y = np.array(y_engineering)\n\n log.info('Fitting model..')\n\n estimator = sklearn.ensemble.RandomForestClassifier(\n n_estimators=500,\n n_jobs=12,\n oob_score=True,\n random_state=13\n )\n\n estimator.fit(X, y)\n\n with open(os.path.join(utils.MODEL_DIR, 'single_rf.pickle'), 'wb') as f:\n pickle.dump(estimator, f)\n\n scaled_importance = estimator.feature_importances_ / np.max(estimator.feature_importances_)\n\n importances = {\n 'title': {w: scaled_importance[i] for w, i in titles_vectorizer.vocabulary_.items()},\n 'abstract': {w: scaled_importance[i + max_title_words] for w, i in abstracts_vectorizer.vocabulary_.items()},\n }\n\n with open(os.path.join(utils.MODEL_DIR, 'single_imp.json'), 'w') as f:\n json.dump(importances, f)", "title": "" }, { "docid": "a782b1ac83aaa9478a06af602791a451", "score": "0.6004595", "text": "def trainModel(model, data, labels, aug, num, s, val, batch=32, epochs=50):\r\n print(\"Stage 2: Training Model- Its Gonna Take Some Time\")\r\n if aug:\r\n data, labels = augmentation(data, labels, num, s)\r\n early_stopping = EarlyStopping(patience=4, restore_best_weights=True)\r\n history = model.fit(x=data, y=labels, batch_size=batch, epochs=epochs, validation_split=val, callbacks=[early_stopping])\r\n return model, history", "title": "" }, { "docid": "d0ced5506fdde5a8ecca7cc7d099f521", "score": "0.59990746", "text": "def eval_model(data, model):\n loss = None\n acc = None\n return loss, acc", "title": "" }, { "docid": "ef77829491b214637e571cd32cba2bfb", "score": "0.59980536", "text": "def _get_model(cfg: Dict, is_train: bool) -> LumpedModel:\n n_jobs = cfg[\"num_workers\"] if \"num_workers\" in cfg else 1\n model_args = cfg[\"model_args\"] if \"model_args\" in cfg else {}\n model = None\n # sklearn models\n if cfg[\"model_type\"] == 'linearRegression':\n model = LinearRegression(n_jobs=n_jobs)\n elif cfg[\"model_type\"] == 'lasso':\n model = Lasso()\n elif cfg[\"model_type\"] == 'randomForest':\n model = RandomForestRegressor(n_jobs=n_jobs, **model_args)\n if model is not None:\n model = LumpedSklearnRegression(model, no_static=cfg[\"no_static\"],\n concat_static=cfg[\"concat_static\"],\n run_dir=cfg[\"run_dir\"],\n n_jobs=n_jobs)\n # other models\n elif cfg[\"model_type\"] == 'lstm':\n num_static = len(cfg[\"static_attributes\"]) - 2 # lat/lon is not part of training\n if \"Regulation\" in cfg[\"static_attributes\"]:\n num_static += 2 # Regulation is one-hot-encoded\n model = LumpedLSTM(len(cfg[\"forcing_attributes\"]),\n num_static,\n use_mse=cfg[\"use_mse\"],\n no_static=cfg[\"no_static\"],\n concat_static=cfg[\"concat_static\"],\n run_dir=cfg[\"run_dir\"],\n n_jobs=n_jobs,\n **model_args)\n\n elif cfg[\"model_type\"] == 'xgb':\n # if model_path is provided, we don't need a parameter search.\n if is_train and \"model_path\" not in model_args:\n model_args[\"param_dist\"] = XGB_PARAM_DIST\n model_args[\"reg_search_param_dist\"] = XGB_REG_PARAM_DIST\n if any([c not in model_args for c in [\"n_estimators\", \"learning_rate\",\n \"early_stopping_rounds\", \"n_cv\",\n \"param_search_n_estimators\",\n \"param_search_early_stopping_rounds\",\n \"param_search_n_iter\",\n \"reg_search_n_iter\"]]):\n raise ValueError(\"XGBoost configuration incomplete.\")\n model = LumpedXGBoost(no_static=cfg[\"no_static\"],\n concat_static=cfg[\"concat_static\"],\n use_mse=cfg[\"use_mse\"],\n run_dir=cfg[\"run_dir\"],\n n_jobs=n_jobs,\n seed=cfg[\"seed\"],\n **model_args)\n\n else:\n raise ValueError(f'Unknown model type {cfg[\"model_type\"]}')\n\n return model", "title": "" }, { "docid": "fdf76b1601b4230ce81014de075ca567", "score": "0.5994029", "text": "def fit(self, X, Y=None):\n\n full_df_wfs = list(map(lambda features_df: features_df.drop('Production', axis=1), split_data_wf(X)))\n prod_df_wfs = list(map(lambda features_df: features_df['Production'], split_data_wf(X)))\n\n for i, full_df_wf, prod_df_wf in zip(*(np.arange(1, len(full_df_wfs) + 1), full_df_wfs, prod_df_wfs)):\n self.best_scores.append(np.inf)\n self.best_models.append(None)\n\n # ---------- data conditioning ---------- #\n # fill na\n full_df_wf = handle_nan(full_df_wf)\n\n # split_dataset data between train / valid / test\n train_X_df, valid_X_df, test_X_df, train_Y, valid_Y, test_Y = split_dataset(full_df_wf, prod_df_wf,\n self.train_valid_ratio,\n self.valid_test_ration)\n # scale\n scaler = StandardScaler()\n train_X_df[:] = scaler.fit_transform(train_X_df)\n valid_X_df[:], test_X_df[:] = scaler.transform(valid_X_df), scaler.transform(test_X_df)\n self.scalers[i] = scaler\n\n # shift\n train_X = preprocess_lstm(train_X_df, shifts=self.shift)\n valid_X, test_X = preprocess_lstm(valid_X_df, shifts=self.shift), preprocess_lstm(test_X_df,\n shifts=self.shift)\n\n # remove lines containing nan in predictions\n train_X, train_Y = handle_nan_Y(train_X, train_Y, self.verbose)\n valid_X, valid_Y = handle_nan_Y(valid_X, valid_Y, self.verbose)\n test_X, test_Y = handle_nan_Y(test_X, test_Y, self.verbose)\n\n # torchize\n train_data = TensorDataset(torch.from_numpy(train_X.copy()).float(), torch.from_numpy(train_Y.copy()))\n val_data = TensorDataset(torch.from_numpy(valid_X.copy()).float(), torch.from_numpy(valid_Y.copy()))\n test_data = TensorDataset(torch.from_numpy(test_X.copy()).float(), torch.from_numpy(test_Y.copy()))\n\n model_path = os.path.join(self.dir_path, str(i))\n try:\n os.mkdir(model_path)\n except FileExistsError as e:\n logger.info(f\"File {model_path} already exists, replace it with new model\")\n shutil.rmtree(model_path)\n os.mkdir(model_path)\n tmp_model_path = os.path.join(model_path, 'tmp')\n os.mkdir(tmp_model_path) # save temporary trained models here, will be removed\n\n # iterate on model configurations\n keys, values = zip(*self.lstm_configs.items())\n for v in itertools.product(*values):\n experiment_param = dict(zip(keys, v))\n if \"batch_size\" not in experiment_param:\n experiment_param[\"batch_size\"] = DEFAULT_BATCH_SIZE\n if \"hidden_dim\" not in experiment_param:\n experiment_param[\"hidden_dim\"] = DEFAULT_HIDDEN_DIM\n if \"num_layers\" not in experiment_param:\n experiment_param[\"num_layers\"] = DEFAULT_NUM_LAYERS\n if \"dropout\" not in experiment_param:\n experiment_param[\"dropout\"] = DEFAULT_DROPOUT\n batch_size = experiment_param[\"batch_size\"]\n if self.verbose > 0:\n logger.info(f\"{i} - {experiment_param} - current best score: {self.best_scores[-1]:2f}\")\n # loss, lr, num_epochs should not be fed into the model\n loss = DEFAULT_LOSS if 'loss' not in experiment_param else experiment_param.pop('loss')\n lr = DEFAULT_LR if 'lr' not in experiment_param else experiment_param.pop('lr')\n num_epochs = DEFAULT_NUM_EPOCHS if 'num_epochs' not in experiment_param else experiment_param.pop(\n 'num_epochs')\n\n train_loader = DataLoader(train_data, shuffle=True, batch_size=batch_size)\n\n lstm_input_size = train_X.shape[-1]\n model_wf = LSTM(lstm_input_size, output_dim=1, batch_first=True, **experiment_param)\n\n finished = train_model(model_wf, train_loader, val_data, loss, tmp_model_path, num_epochs=num_epochs,\n lr=lr, verbose=self.verbose)\n\n if not finished:\n logger.info('Not finished...')\n continue\n\n # Loading the best model to evaluate on test\n model_wf.load_state_dict(torch.load(os.path.join(tmp_model_path, 'state_dict.pt')))\n model_wf.eval()\n test_score = CAPE_loss(model_wf(test_data.tensors[0]).squeeze(), test_data.tensors[1]).item()\n if self.verbose > 1:\n logger.info(\"Test loss: {:.3f}\".format(test_score))\n\n if test_score < self.best_scores[-1]:\n if self.verbose > 1:\n logger.info(f'Save new model for WF {i}...')\n torch.save(model_wf.state_dict(), os.path.join(model_path, 'state_dict.pt'))\n model_config = {'shift': self.shift, 'scaler': self.scalers[i], 'loss': loss, 'lr': lr}\n lstm_config = experiment_param\n utils.save_obj(model_path, model_config, 'model_config')\n utils.save_obj(model_path, lstm_config, 'lstm_config')\n self.best_models[-1] = model_wf\n self.best_scores[-1] = test_score\n # remove tmp file\n shutil.rmtree(tmp_model_path)", "title": "" }, { "docid": "34ab2bee70287096b88981917770e521", "score": "0.5983396", "text": "def retrain_model(self, X, Y):\n\t\treturn self.model.fit(X, Y)", "title": "" }, { "docid": "578db65e5ea6141e10cc88dbdba0144c", "score": "0.5980964", "text": "def partial_regression_model(obj, x, y, k=10):\n\n # fit the model using train/test division\n X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)\n obj.partial_fit(X_train, y_train)\n\n res = {'fitted_model': obj,\n 'cv_score': cross_val_score(obj, x, y, cv=k).mean()}\n\n return res", "title": "" }, { "docid": "99bf635204c65862c773cd25aeb86d8c", "score": "0.5980245", "text": "def single_model_opt(model, x, y, xtest, ytest):\n pipe = Pipeline(steps=[('model', model)])\n fit = pipe.fit(x, y)\n ypred = model.predict(xtest)\n \n print(model,\" \", fit.score(xtest, ytest))\n score_report(ytest, ypred)\n \n plot_confusion_matrix(model, xtest, ytest, values_format='1')\n plt.show()\n pass", "title": "" }, { "docid": "2f64573f477413685eb7232e0a4ebc3a", "score": "0.59761864", "text": "def train_evaluate(hparams):\n dataset_dir = download_data(LOCAL_DATA_DIR)\n train_ds, val_ds, test_ds = load_datasets(dataset_dir, hparams)\n \n train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)\n val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE) \n \n epochs = hparams['epochs']\n steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()\n n_train_steps = steps_per_epoch * epochs\n n_warmup_steps = int(0.1 * n_train_steps) \n \n optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],\n num_train_steps=n_train_steps,\n num_warmup_steps=n_warmup_steps,\n optimizer_type='adamw') \n \n mirrored_strategy = tf.distribute.MirroredStrategy()\n with mirrored_strategy.scope():\n model = build_text_classifier(hparams=hparams, optimizer=optimizer)\n logging.info(model.summary())\n \n history = model.fit(x=train_ds,\n validation_data=val_ds,\n epochs=epochs) \n \n logging.info(\"Test accuracy: %s\", model.evaluate(test_ds))\n\n # Export Keras model in TensorFlow SavedModel format.\n model.save(hparams['model-dir'])\n \n return history", "title": "" }, { "docid": "4027ce5e70ff03f2b97b6f5bca82c1fc", "score": "0.5972034", "text": "def _run_model_fit(self, model):\n\n mb_obs_list=self.sdata['mb_obs_list']\n\n # through parent, this runs the mcmc\n fitter=self._fit_model(mb_obs_list, model)\n\n self._copy_simple_pars(fitter)\n\n self._print_res(fitter)\n\n if self['make_plots']:\n self._do_make_plots(fitter, model)\n\n return fitter", "title": "" }, { "docid": "fce499b8a8b32bf37a38a75db9e43172", "score": "0.5957966", "text": "def run_from_saved_model_with_my_data():\n x, y = load_my_data()\n # preprocess training and test labels \n X_data = preprocess_image_data(x)\n Y_data = preprocess_label_data(y)\n \n # load pre-trained model\n cnn = load_model('cnn_model')\n results = cnn.evaluate(X_data, Y_data, verbose=1)\n predictions = cnn.predict(X_data)\n print('results', results)\n # print('predictions', predictions)\n return results", "title": "" }, { "docid": "e3d53ccfdfe55a56ffd34800d3463f6b", "score": "0.59577394", "text": "def best_model(self):", "title": "" }, { "docid": "e3d53ccfdfe55a56ffd34800d3463f6b", "score": "0.59577394", "text": "def best_model(self):", "title": "" }, { "docid": "fcb57657e52c3b838df589a988ebc4db", "score": "0.5955379", "text": "def fit_model(X, y):\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor(random_state=0)\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth' : range(1, 11)}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search object\n grid = GridSearchCV(regressor, params, scoring = scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "title": "" }, { "docid": "210fe15fc4207a376d856867b8496b8d", "score": "0.5950687", "text": "def _fit_and_eval(\n self,\n model,\n cross_validate:bool = False,\n eval_metrics:bool = False,\n ) -> float:\n if cross_validate:\n # val_score will be obtained by performing cross validation\n if self.val_data_: # keyword data\n val_scores = model.cross_val_score(\n validation_data=self.val_data_,\n scoring=[self.eval_metric] + self.monitor,\n refit=False,\n **self.data_)\n else: # keyword x,y\n val_scores = model.cross_val_score(\n scoring=[self.eval_metric] + self.monitor,\n refit=False,\n **self.data_)\n\n val_score = val_scores.pop(0)\n\n for k, pm_val in zip(self.monitor, val_scores):\n\n self.metrics_.at[self.parent_iter_, k] = pm_val\n\n func = compare_func1(METRIC_TYPES[k])\n best_so_far = func(self.metrics_best_.loc[:self.parent_iter_, k])\n\n best_so_far = fill_val(METRIC_TYPES[k], best_so_far)\n\n func = compare_func(METRIC_TYPES[k])\n if func(pm_val, best_so_far):\n\n self.metrics_best_.at[self.parent_iter_, k] = pm_val\n else:\n # train the model and evaluate it to calculate val_score\n model.fit(**self.data_)\n val_score = self._eval_model_manually(\n model,\n #data,\n self.eval_metric,\n eval_metrics=eval_metrics\n )\n\n return val_score", "title": "" }, { "docid": "e6b8da60f55b0395e7456aa8976fde35", "score": "0.5946849", "text": "def fit(self, epochs=None, env=None) -> None:\n assert self.compiled, 'Call model.compile() before model.fit()'\n\n if self.multiple_individual_processes:\n # start all tasks which are stored in the scheduler\n self.scheduler.run()\n else:\n # single model training\n if epochs is None:\n epochs = self.kwargs.pop('epochs')\n else:\n self.kwargs.pop('epochs') # pop to avoid double kwargs\n\n # fit() can also take a custom env, e.g. a virtual environment\n env_id = self.env_id if env is None else env\n\n learn_func = utils.get_learn_function(self.alg)\n ac, env = learn_func(\n env_id=env_id,\n logger_kwargs=self.logger_kwargs,\n epochs=epochs,\n **self.kwargs\n )\n self.model = ac\n self.env = env\n self.trained = True", "title": "" }, { "docid": "06f653162d02fb80bf7658347d3fafe0", "score": "0.5928384", "text": "def fit_model(X, y):\n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(X.shape[0], test_size = 0.20, random_state = 0)\n #Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n #Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth':np.arange(1,11)}\n #Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n #Create the grid search object\n grid = GridSearchCV(regressor, param_grid = params, scoring = scoring_fnc, cv = cv_sets)\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "title": "" }, { "docid": "58eaa204e342c1afa72079247202f520", "score": "0.5922493", "text": "def fit(self, data, targets):", "title": "" }, { "docid": "7871fb00b6a2326812a29fc59515de09", "score": "0.5922464", "text": "def bfe_model_from_scratch(\n self,\n iter_num: int,\n x = None,\n y = None,\n data = None,\n test_data=None,\n fit_on_all_train_data: bool = True,\n )->Model:\n\n train_data, test_data = verify_data(x, y, data, test_data, \"test\")\n\n pipeline = self.parent_suggestions_[iter_num]\n prefix = f\"{self.path}{SEP}results_from_scratch{SEP}iteration_{iter_num}\"\n\n model = self._build_and_eval_from_scratch(\n model=pipeline['model'],\n train_data=train_data,\n x_transformation=pipeline['x_transformation'],\n y_transformation=pipeline['y_transformation'],\n prefix=prefix,\n fit_on_all_train_data=fit_on_all_train_data,\n seed=self.parent_seeds_[int(pipeline['iter_num'])-1],\n test_data=test_data,\n )\n return model", "title": "" }, { "docid": "b7c84044e4d5870aecb9245f2e58a270", "score": "0.592189", "text": "def evaluate(model, data, params, last_save_file):\n if last_save_file:\n model.load(last_save_file)\n else:\n if not params.save_file:\n raise ValueError(\n \"Must provide a save file name if not training first.\")\n model.load(params.save_file)\n\n split = None\n if params.evaluate_split == 'dev':\n split = data.dev_data\n elif params.evaluate_split == 'train':\n split = data.train_data\n elif params.evaluate_split == 'test':\n split = data.test_data\n elif params.evaluate_split == 'valid':\n split = data.valid_data\n else:\n raise ValueError(\"Split not recognized: \" + str(params.evaluate_split))\n\n filename = params.evaluate_split\n if params.use_predicted_queries:\n filename += \"predicted\"\n else:\n filename += \"gold\"\n\n full_name = os.path.join(params.logdir, filename) + params.results_note\n\n if params.interaction_level or params.use_predicted_queries:\n examples = data.get_all_interactions(split)\n if params.interaction_level:\n evaluate_interaction_sample(\n examples,\n model,\n name=full_name,\n metrics=FINAL_EVAL_METRICS,\n total_num=atis_data.num_utterances(split),\n database_username=params.database_username,\n database_password=params.database_password,\n database_timeout=params.database_timeout,\n use_predicted_queries=params.use_predicted_queries,\n max_generation_length=params.eval_maximum_sql_length,\n write_results=True,\n use_gpu=True)\n else:\n evaluate_using_predicted_queries(\n examples,\n model,\n name=full_name,\n metrics=FINAL_EVAL_METRICS,\n total_num=atis_data.num_utterances(split),\n database_username=params.database_username,\n database_password=params.database_password,\n database_timeout=params.database_timeout)\n else:\n examples = data.get_all_utterances(split)\n evaluate_utterance_sample(\n examples,\n model,\n name=full_name,\n gold_forcing=False,\n metrics=FINAL_EVAL_METRICS,\n total_num=atis_data.num_utterances(split),\n max_generation_length=params.eval_maximum_sql_length,\n database_username=params.database_username,\n database_password=params.database_password,\n database_timeout=params.database_timeout,\n write_results=True)", "title": "" }, { "docid": "f631008266869a37eb46ca848f353134", "score": "0.5920816", "text": "def offline_regression_model(obj, x, y, k=10):\n # fit the model using train/test division\n X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)\n obj.fit(X_train, y_train)\n\n res = {'fitted_model': obj,\n 'cv_score': cross_val_score(obj, x, y, cv=k).mean()}\n return res", "title": "" }, { "docid": "cad2c45324e728e3cb8d142acae07d62", "score": "0.5905783", "text": "def train_model(data, response, model=None):\n if model is None:\n model = AdaBoostClassifier()\n # model = Neural()\n resp = response.values.ravel()\n fitted_model = model.fit(data, resp)\n return fitted_model", "title": "" }, { "docid": "f82065207f06ce5776ea009910dd9d84", "score": "0.5905436", "text": "def fit_model(X, y):\n\n # Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # Set up the parameters we wish to tune\n parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}\n\n # Make an appropriate scoring function\n scoring_function = make_scorer(mean_squared_error)\n\n # Make the GridSearchCV object\n reg = GridSearchCV(regressor, parameters, scoring = scoring_function)\n\n # Fit the learner to the data to obtain the optimal model with tuned parameters\n reg.fit(X, y)\n\n # Return the optimal model\n return reg.best_estimator_", "title": "" }, { "docid": "8da0ba2a7e27fdbe1a831e4d3a86ed50", "score": "0.5896827", "text": "def bfe_all_best_models(\n self,\n x = None,\n y = None,\n data = None,\n test_data=None,\n metric_name: str = None,\n fit_on_all_train_data: bool = True,\n verbosity:int = 0,\n ) -> None:\n\n train_data, test_data = verify_data(x, y, data, test_data, \"test\")\n\n met_name = metric_name or self.eval_metric\n\n for model in self.models:\n\n try:\n metric_val, pipeline = self.get_best_pipeline_by_model(model, met_name)\n except ModelNotUsedError:\n continue\n\n prefix = f\"{self.path}{SEP}results_from_scratch{SEP}{met_name}_{metric_val}_{model}\"\n\n model_config = pipeline['model']\n\n if self.category == \"DL\":\n model_name = list(model_config.keys())[0]\n kwargs = list(model_config.values())[0]\n\n model_config = DL_MODELS[model_name](mode=self.mode,\n output_features=self.num_outputs,\n **kwargs)\n _ = self._build_and_eval_from_scratch(\n model=model_config,\n train_data=train_data,\n x_transformation=pipeline['x_transformation'],\n y_transformation=pipeline['y_transformation'],\n prefix=prefix,\n model_name=model,\n fit_on_all_train_data=fit_on_all_train_data,\n verbosity=verbosity,\n seed=self.parent_seeds_[int(pipeline['iter_num'])-1],\n test_data=test_data,\n )\n\n return", "title": "" }, { "docid": "5a5fe5cbd21663d25b229d721674b6d3", "score": "0.5888107", "text": "def fit_model(self, X, y):\n performance_metric = self.performance_metric\n \n # Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n \n # Set up the parameters we wish to tune\n parameters = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}\n \n # Make an appropriate scoring function\n scoring_function = make_scorer(performance_metric, greater_is_better = False)\n \n cv = ShuffleSplit(y.shape[0], n_iter=1000, test_size=0.3, random_state=42)\n \n # Make the GridSearchCV object\n reg = GridSearchCV(regressor, parameters, scoring = scoring_function, cv = cv)\n \n # Fit the learner to the data to obtain the optimal model with tuned parameters\n reg.fit(X, y)\n print \"best score {}\".format(reg.best_score_)\n print \"best parameters {}\".format(reg.best_params_)\n self.dispFeatureImportance(reg.best_estimator_)\n \n # Return the optimal model\n return reg.best_estimator_", "title": "" }, { "docid": "4e182204ea90fb5aac1ac33c98495b92", "score": "0.5882882", "text": "def _run_model_fit(self, model, coadd=False):\n if coadd:\n self.guesser=self._get_guesser(self['coadd_model_guess'])\n mb_obs_list=self.sdata['coadd_mb_obs_list']\n else:\n self.guesser=self._get_guesser(self['me_model_guess'])\n mb_obs_list=self.sdata['mb_obs_list']\n\n fitter=self._fit_model(mb_obs_list, model)\n\n self._copy_simple_pars(fitter, coadd=coadd)\n\n self._print_res(fitter, coadd=coadd)\n\n if self['make_plots']:\n self._do_make_plots(fitter, model, coadd=coadd,\n fitter_type=self['fitter_class'])\n\n if coadd:\n self.coadd_fitter=fitter\n else:\n self.fitter=fitter", "title": "" }, { "docid": "03328ce41b2a779f77d616f3d5e16c6e", "score": "0.5881077", "text": "def fit_eval_model(model_orig, pats=[1,2,3], Name=None, epochs=10, batch_size=10, sampleProp=1, downsample=10, freqs=None, coh=False):\n\n for pat in pats:\n model = keras.models.clone_model(model_orig)\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n X_train, y_train, X_val, y_val, X_test = gen_dataset(pat, sampleProp=sampleProp, downsample=downsample, freqs=freqs, coh=coh)\n\n model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs)\n\n # training\n loss_train, acc_train = model.evaluate(X_train, y_train)\n pred_train = model.predict(X_train)\n auc_train = sklearn.metrics.roc_auc_score(y_train, pred_train)\n\n # cross validation\n loss_val, acc_val = model.evaluate(X_val, y_val)\n pred_val = model.predict(X_val)\n auc_val = sklearn.metrics.roc_auc_score(y_val, pred_val)\n\n model.save('../outputs/' + Name + '_' + str(pat) + '.h5')\n with open('../outputs/' + Name + '.txt', 'w') as f:\n f.write(\"loss_train: %s\\n\" % loss_train)\n f.write(\"acc_train: %s\\n\" % acc_train)\n f.write(\"auc_train: %s\\n\" % auc_train)\n f.write(\"loss_val: %s\\n\" % loss_val)\n f.write(\"acc_val: %s\\n\" % acc_val)\n f.write(\"auc_val: %s\\n\" % auc_val)", "title": "" }, { "docid": "d8277af7bdda83fbed70cc7a1d04f23d", "score": "0.58789766", "text": "def fit(self):\n if self.c:\n self.model.fit(train_images, train_labels, epochs=5, batch_size=64)\n self.t = True\n else:\n print('Compile the model first')", "title": "" }, { "docid": "a69ffd83b90a8949b3b61142ab1bceb0", "score": "0.58686304", "text": "def fit(self, x, y):\n\n # don't fit model and load instead (but only if chkpt exists)\n p = self.cfg['skip_fit_debug']\n\n # set up base bath\n if self.cfg.get('skip_fit_debug_relative', False):\n base = Path('.')\n else:\n base = Path(hydra.utils.get_original_cwd())\n\n # load or fit model\n if p and (loc := (base / p)).exists():\n logging.info(f'Loading model from {p}.')\n self.model.load_state_dict(\n torch.load(loc, map_location=self.device))\n else:\n if p and not (loc := (base / p)).exists():\n logging.info(f'Tried to load model, but {p} does not exist.')\n\n train_loader, val_loader = self.val_train_loaders(x, y)\n # from activetesting.datasets import get_CIFAR10\n # train_loader, val_loader = get_CIFAR10()\n self.model = self.train_to_convergence(\n self.model, train_loader, val_loader)\n\n path = Path(self.cfg.get('save_path', 'model.pth'))\n\n if not path.exists():\n logging.info(f'Saving model to {path}.')\n path.parent.mkdir(parents=True, exist_ok=True)\n torch.save(self.model.state_dict(), path)\n\n # enable/disable calibration. if enabled load if available.\n calibrated = self.cfg.get('calibrated', False)\n if not calibrated:\n logging.info('Calibration disabled.')\n else:\n load_calibration = self.cfg.get(\n 'load_calibration', True)\n tpath = base / self.cfg.get(\n 'temp_skip_fit_debug', 'temperature.json')\n if load_calibration and tpath.exists():\n calibration_results = from_json(tpath)\n T = calibration_results['T']\n logging.info(f'Loaded T={T} from {tpath}.')\n logging.info(calibration_results)\n self.T = T\n else:\n logging.info(\n f'Calibrating fresh b/c {tpath} does not exist or '\n f'load_calibration is false (is {load_calibration}).')\n _, val_loader = self.val_train_loaders(x, y)\n failure = True\n it = 0\n lr = 0.01\n while failure:\n calibration_results = calibrate.set_temperature(\n self.model, val_loader, self.device,\n n_samples=self.t_cfg.get('variational_samples', None),\n lr=lr*(0.3)**it)\n failure = calibration_results['failure']\n it += 1\n\n if it > 10:\n raise ValueError('Calibration failed too often.')\n\n self.T = calibration_results['T']\n logging.info(f'Set temperature to {self.T} after calibration.')\n\n temp_save_path = Path(\n self.cfg.get('temp_save_path', 'temperature.json'))\n\n if calibrated and not temp_save_path.exists():\n logging.info(f'Saving calibration Temp. to {temp_save_path}.')\n to_json(calibration_results, temp_save_path)\n\n logging.info('Finished fitting.')", "title": "" }, { "docid": "1082440960e657c3964c23b4b3d7b948", "score": "0.5868001", "text": "def fit_process_data(self):\n self.process_data(with_fit=True)", "title": "" }, { "docid": "e25e5018c3648aef05d44b4524249807", "score": "0.5867887", "text": "def evaluate_model():\n global mlp, X_train, Y_train, X_test, Y_test, mdiv, WINE_DATA\n if WINE_DATA:\n print(\"\\n[Training set] Accuracy: %.2f%%\" % (100 * accuracy(mlp.predict(X_train), Y_train)))\n print(\" [Test set] Accuracy: %.2f%%\" % (100 * accuracy(mlp.predict(X_test), Y_test)))\n else:\n MSE = MeanSquaredError()\n train_mae, train_std = abs_error(mdiv * mlp.predict(X_train), mdiv * Y_train)\n train_mse = MSE(mdiv * mlp.predict(X_train), mdiv * Y_train)\n print(\"\\n[Training set] MAE: {:.2f} | Abs. error std: {:.2f} | MSE: {:.2f}\".format(train_mae, train_std, train_mse))\n\n test_mae, test_std = abs_error(mdiv * mlp.predict(X_test), Y_test)\n test_mse = MSE(mdiv * mlp.predict(X_test), Y_test)\n print(\" [Test set] MAE: {:.2f} | Abs. error std: {:.2f} | MSE: {:.2f}\".format(test_mae, test_std, test_mse))", "title": "" }, { "docid": "25a7adac983dbb82279e23d422e81a05", "score": "0.5860001", "text": "def fit_predict(self):\n \n self.fit_model()\n self.forecast()", "title": "" }, { "docid": "de5b13cfc064ae318aa55a212b951072", "score": "0.58598286", "text": "def fit_model(X, y):\n \n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n # TODO: Create a decision tree regressor object\n regressor =DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth':(1,2,3,4,5,6,7,8,9,10)}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' \n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search object\n grid = GridSearchCV(regressor, params, scoring_fnc,cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "title": "" }, { "docid": "38885140343a334530a6e2fb347a89f0", "score": "0.5856484", "text": "def fit_data(x_data, y_data, models, report=True, **kwargs):\n # First, we need to get the model selected by the user.\n avail_models = PostProcessing.load_lmfit_models()\n models_list = []\n for model in models:\n assert isinstance(model, str), f'Models must be introduced as strings, not as {type(model)}.'\n assert model in avail_models, f'Model {model} is not available. Check available methods using PostProcessing.load_lmfit_models()'\n mod = __import__('lmfit.models', fromlist=[model])\n klass = getattr(mod, model)\n if model == 'PolynomialModel':\n assert kwargs.get('degree') is not None, f'If you use a PolynomialModel, you must introduce a degree.'\n models_list.append(klass(degree=kwargs.get('degree')))\n else:\n prefix = 'model' + str(len(models_list)+1) # To avoid overlapping of model parameters.\n models_list.append(klass(prefix=prefix))\n for i in range(0, len(models_list)):\n if i == 0:\n user_model = models_list[i]\n else:\n user_model += models_list[i]\n params = user_model.make_params()\n result = user_model.fit(y_data, params, x=x_data)\n\n if report:\n print(\"\\n------------------------------------------------------\\n\")\n print(\" Fitting results \")\n print(result.fit_report())\n print(\"\\n------------------------------------------------------\\n\")\n result.plot()\n \"\"\"\n We need to retrieve the function corresponding to the created model,\n independently from the number of input models.\n \"\"\"\n # Redefine the parameters, so we can obtain the ones corresponding to\n # the best fit.\n params = result.params\n\n # Define the function to be returned to the user.\n def fit_fun(x):\n return user_model.eval(params=params, x=x)\n\n return fit_fun", "title": "" }, { "docid": "43a89ccdbc2e3ebddd2b3693484db48c", "score": "0.58523446", "text": "def evaluate_models_on_training(x, y, models):\n \n for model in models:\n \n #plot measured points\n pylab.plot(x, y, 'bo',label=\"Measured Points\")\n\n estYvals = pylab.polyval(model,x)\n r2 = r_squared(y, estYvals)\n \n #plot best fit line\n pylab.plot(x, estYvals,'-r', label = \"R^2=\"+format(r2, '.5f'))\n pylab.legend(loc='best')\n \n pylab.xlabel('Year')\n pylab.ylabel('Temperature')\n \n r2_trunc = truncate(r2,3)\n seos_trunc = truncate(se_over_slope(x,y,estYvals,model),3)\n title = (len(model) - 1, \"Degree Model, R = \" , r2_trunc , \"SE/slope =\" , seos_trunc)\n \n pylab.title(title)", "title": "" }, { "docid": "f180a397faa727b113b91f7438441fa3", "score": "0.5846046", "text": "def model_fit(self, generate_data=True, inputs=None, outputs=None,\n csv_name=None, save_model=False, meta_algo_params=None):\n if meta_algo_params is None:\n if self.meta_algo == 'NN':\n meta_algo_params = \\\n {'max_iter': 200, 'hidden_layer_sizes': [100, 100, 100]}\n\n elif self.meta_algo == 'RF':\n meta_algo_params = \\\n {'criterion': 'mse', 'max_depth': 100, 'max_features': 10}\n\n if generate_data:\n inputs, outputs, _ = self._generate_data()\n else:\n if csv_name is not None:\n inputs, outputs = self._transform_from_csv(csv_name=csv_name)\n\n if inputs is None or outputs is None:\n raise NameError('''no inputs / outputs found: please enter a csv name or set generate_data to True''')\n\n X, y, cols, original_cols = self._transform_data(inputs, outputs)\n\n # we decide on a meta-algorithm\n if self.meta_algo not in config('supported_meta_algos'):\n raise KeyError(f'''meta algo {self.meta_algo} currently not supported''')\n\n if self.meta_algo == 'RF':\n meta_algo = RandomForestRegressor(**meta_algo_params)\n if self.meta_algo == 'NN':\n meta_algo = MLPRegressor(**meta_algo_params)\n\n if self.verbose >= 2:\n self.logger.info(f'''Fitting {self.meta_algo} to estimate training durations for model {self.algo}''')\n\n # dividing into train/test\n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=0.20, random_state=42)\n\n if self.meta_algo == 'NN':\n X_train_scaled, X_test_scaled = \\\n self._scale_data(X_train, X_test, save_model)\n\n meta_algo.fit(X_train_scaled, y_train)\n\n else:\n meta_algo.fit(X_train, y_train)\n\n if save_model:\n if self.verbose >= 2:\n self.logger.info(f'''Saving {self.meta_algo} to {self.meta_algo}_{self.algo}_estimator.pkl''')\n\n model_path = f'''{get_path(\"models\")}/{self.meta_algo}_{self.algo}_estimator.pkl'''\n\n joblib.dump(meta_algo, model_path)\n\n json_path = f'''{get_path(\"models\")}/{self.meta_algo}_{self.algo}_estimator.json'''\n\n with open(json_path, 'w') as outfile:\n json.dump({\"dummy\": list(cols),\n \"original\": list(original_cols)}, outfile)\n\n if self.meta_algo == 'NN':\n if self.verbose >= 2:\n self.logger.info(f'''R squared on train set is {r2_score(y_train, meta_algo.predict(X_train_scaled))}''')\n\n # MAPE is the mean absolute percentage error\n test_relu = [max(i, 0) for i in meta_algo.predict(X_test_scaled)]\n train_relu = [max(i, 0) for i in meta_algo.predict(X_train_scaled)]\n y_pred_test = np.array(test_relu)\n y_pred_train = np.array(train_relu)\n\n else:\n if self.verbose >= 2:\n self.logger.info(f'''R squared on train set is {r2_score(y_train, meta_algo.predict(X_train))}''')\n\n y_pred_test = meta_algo.predict(X_test)\n y_pred_train = meta_algo.predict(X_train)\n\n mape_test = np.mean(np.abs((y_test - y_pred_test) / y_test)) * 100\n mape_train = np.mean(np.abs((y_train - y_pred_train) / y_train)) * 100\n\n bins, mape_index_list = self.bins\n mid_bins = [(y_pred_test >= i[0]) & (y_pred_test < i[1]) for i in bins]\n\n bins_values = [y_pred_test < 1] + mid_bins + [y_pred_test >= 10 * 60]\n\n if save_model:\n mse_tests = [mean_squared_error(y_test[bin], y_pred_test[bin])\n for bin in bins_values]\n\n observation_tests = [y_test[bin].shape[0] for bin in bins_values]\n\n mse_test_dic = dict(zip(mape_index_list,\n zip(observation_tests, mse_tests)))\n\n if self.verbose >= 2:\n self.logger.info(f'''Computed mse on test set (with number of observations): {mse_test_dic}''')\n\n if self.meta_algo == 'NN':\n if save_model:\n json_conf_path = f'''{get_path(\"models\")}/{self.meta_algo}_{self.algo}_confint.json'''\n\n self.logger.info(f'''Saving confint to {self.meta_algo}_{self.algo}_confint.json''')\n\n with open(json_conf_path, 'w') as outfile:\n json.dump(mse_test_dic, outfile)\n\n if self.verbose >= 2:\n rmse_train = np.sqrt(mean_squared_error(y_train, y_pred_train))\n rmse_test = np.sqrt(mean_squared_error(y_test, y_pred_test))\n\n self.logger.info(f'''\n MAPE on train set is: {mape_train}\n MAPE on test set is: {mape_test}\n RMSE on train set is {rmse_train}\n RMSE on test set is {rmse_test} ''')\n\n return meta_algo", "title": "" }, { "docid": "c4db4bbe28bb5ed6980ef946466e6bc0", "score": "0.58443815", "text": "def fit_batch():\r\n pass", "title": "" }, { "docid": "5fb97406dd5014ea85607bb6ff708269", "score": "0.5842635", "text": "def predict(model):", "title": "" }, { "docid": "29a5821cf28da8ade2daf9b202637e95", "score": "0.5840065", "text": "def train_base(X_train, y_train_binarized):\n pipeline = _get_training_pipeline(X_train, y_train_binarized)\n model = _fit(model=pipeline, X_train=X_train, y_train_binarized=y_train_binarized)\n return model", "title": "" }, { "docid": "4d9fffb1788f1bb54277c0728e9af518", "score": "0.58347833", "text": "def fit_and_evaluate_model_from_inputs(y, X_without_constant, X):\n model = sm.OLS(y, X)\n results = model.fit()\n print(results.summary())\n\n y_hat = results.predict()\n\n print(\"\\nPerforming Linearity Checks\\n\")\n model_evaluation.perform_linearity_checks(X_without_constant, y, y_hat, results)\n\n print(\"\\nPerforming Independence Checks\\n\")\n model_evaluation.perform_independence_checks(X)\n\n print(\"\\nPerforming Homoscedasticity Checks\\n\")\n model_evaluation.perform_homoscedasticity_checks(y, y_hat, X_without_constant)\n\n print(\"\\nPerforming Normality Checks\\n\")\n model_evaluation.perform_normality_checks(results)", "title": "" }, { "docid": "e91a3392bc23215323fc1c0e919cef5b", "score": "0.5827884", "text": "def trn_sklearn_model(model, xtr, ytr, fit_kwargs, eval_set=None):\n # Fit params\n fit_kwargs = fit_kwargs\n # fit_kwargs = self.fit_kwargs.copy()\n\n # Train and save model\n t0 = time()\n model.fit(xtr, ytr, **fit_kwargs)\n runtime = (time() - t0)/60\n # joblib.dump(model, filename = trn_outdir / ('model.'+self.model_name+'.pkl') )\n return model, runtime", "title": "" }, { "docid": "32f8e3158d24c23d6b234dd4d5f6d1d4", "score": "0.58276707", "text": "def train_model(self):\n assert False, \"Implement this method!\"", "title": "" }, { "docid": "ec07600d252a69c9cb3720820c1dab48", "score": "0.5825553", "text": "def partial_classification_model(obj, x, y, k=10):\n # fit the model using train/test division\n X_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)\n obj.partial_fit(X_train, y_train, classes=[0, 1])\n\n res = {'fitted_model': obj,\n 'cv_score': cross_val_score(obj, x, y, cv=k).mean()}\n return res", "title": "" }, { "docid": "04624f302ab334a51f50e43fded24e55", "score": "0.581961", "text": "def fit(self, X: pd.DataFrame, y: pd.Series):\n\n # check input dataframe\n X, y = check_X_y(X, y)\n\n # If required exclude variables that are not in the input dataframe\n self._confirm_variables(X)\n\n # find numerical variables or check variables entered by user\n self.variables_ = find_or_check_numerical_variables(X, self.variables_)\n\n # check that there are more than 1 variable to select from\n self._check_variable_number()\n\n # save input features\n self._get_feature_names_in(X)\n\n # train model with all features and cross-validation\n model = cross_validate(\n self.estimator,\n X[self.variables_],\n y,\n cv=self.cv,\n scoring=self.scoring,\n return_estimator=True,\n )\n\n # store initial model performance\n self.initial_model_performance_ = model[\"test_score\"].mean()\n\n # Initialize a dataframe that will contain the list of the feature/coeff\n # importance for each cross validation fold\n feature_importances_cv = pd.DataFrame()\n\n # Populate the feature_importances_cv dataframe with columns containing\n # the feature importance values for each model returned by the cross\n # validation.\n # There are as many columns as folds.\n for i in range(len(model[\"estimator\"])):\n m = model[\"estimator\"][i]\n feature_importances_cv[i] = get_feature_importances(m)\n\n # Add the variables as index to feature_importances_cv\n feature_importances_cv.index = self.variables_\n\n # Aggregate the feature importance returned in each fold\n self.feature_importances_ = feature_importances_cv.mean(axis=1)\n\n return X, y", "title": "" }, { "docid": "c211b38e16da023ccd7d20a4143c5e36", "score": "0.58188254", "text": "def train_single_model(model: TransformerModelWrapper, train_data: List[InputExample], config: TrainConfig,\n eval_config: EvalConfig = None, ipet_train_data: List[InputExample] = None,\n unlabeled_data: List[InputExample] = None, return_train_set_results: bool = True):\n\n device = torch.device(config.device if config.device else \"cuda\" if torch.cuda.is_available() else \"cpu\")\n if not ipet_train_data:\n ipet_train_data = []\n\n results_dict = {}\n\n model.model.to(device)\n\n if train_data and return_train_set_results:\n results_dict['train_set_before_training'] = evaluate(model, train_data, eval_config)['scores']['acc']\n\n all_train_data = train_data + ipet_train_data\n\n if not all_train_data and not config.use_logits:\n logger.warning('Training method was called without training examples')\n else:\n global_step, tr_loss = model.train(\n all_train_data, device,\n per_gpu_train_batch_size=config.per_gpu_train_batch_size,\n per_gpu_unlabeled_batch_size=config.per_gpu_unlabeled_batch_size,\n n_gpu=config.n_gpu,\n num_train_epochs=config.num_train_epochs,\n max_steps=config.max_steps,\n gradient_accumulation_steps=config.gradient_accumulation_steps,\n weight_decay=config.weight_decay,\n learning_rate=config.learning_rate,\n adam_epsilon=config.adam_epsilon,\n warmup_steps=config.warmup_steps,\n max_grad_norm=config.max_grad_norm,\n unlabeled_data=unlabeled_data if config.lm_training or config.use_logits else None,\n lm_training=config.lm_training,\n use_logits=config.use_logits,\n alpha=config.alpha,\n temperature=config.temperature\n )\n results_dict['global_step'] = global_step\n results_dict['average_loss'] = tr_loss\n\n if train_data and return_train_set_results:\n results_dict['train_set_after_training'] = evaluate(model, train_data, eval_config)['scores']['acc']\n\n return results_dict", "title": "" }, { "docid": "0a765a48c588a1602184f6e9ad3c5798", "score": "0.5817216", "text": "def train_eval(self):\n\t\tpass", "title": "" }, { "docid": "36abdd7145acaebedfc5afc0915f9864", "score": "0.58149743", "text": "def _fit(self, *args, **kwargs):\n self.fitted = True\n return self.clf.fit(*args, **kwargs)", "title": "" }, { "docid": "d51b0240a83ffbec4dd6655c69d4753f", "score": "0.58147764", "text": "def fit_model(X, y):\n\n # Create cross-validation sets from the training data\n cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)\n\n # Create a decision tree regressor object\n regressor = DecisionTreeRegressor(random_state=0)\n\n # Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = dict(max_depth=range(1, 11))\n\n # Transform 'performance_metric' into a scoring function using 'make_scorer'\n scoring_fnc = make_scorer(performance_metric)\n\n # Create the grid search cv object --> GridSearchCV\n grid = GridSearchCV(regressor, params, cv=cv_sets, scoring=scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n return grid.best_estimator_", "title": "" }, { "docid": "2f83cc79f68b74fef188f14a6ebfdf8d", "score": "0.5814711", "text": "def train_model():\n # Train BG/NBD on Calibration-Holdout data.\n _calibration_model()\n\n # Train BG/NBD data on entire dataset.\n _clv_model()\n return", "title": "" }, { "docid": "0b1e383ed426bfd4d630f20a214e7034", "score": "0.58142984", "text": "def run_training():\n data=pd.read_csv(config.DATASET_DIR/config.DATASET_NAME)\n\n data=data[data['event']<config.CURRENT_GW]\n data=pr.convert_to_categorical(data,config.CATEGORICAL_COLS)\n features=data.drop(config.DROP_COLS,axis=1)\n target=data['total_points']\n \n model=lgb.LGBMRegressor(n_estimators=50)\n model.fit(features,target)\n joblib.dump(model,config.TRAINED_MODEL_DIR/config.TRAINED_MODEL_NAME)", "title": "" }, { "docid": "0351fb0913b2afd766eb825628bab1d2", "score": "0.5808056", "text": "def fit(self, X, y):\n # fit data processor\n X = self.data_processor.fit_transform(X)\n\n # fit model\n self.model.fit(X, y)", "title": "" }, { "docid": "97ae46ea3169d3142311a697ad19ddd7", "score": "0.5804382", "text": "def fit(self, X, y):", "title": "" }, { "docid": "99cf594cebed8f2650f6bb933e3e05d1", "score": "0.5804107", "text": "def find_or_train_model(self):\n self.set_data()\n model = self.find_model()\n if model == None:\n return self.train_model()\n else:\n return model", "title": "" }, { "docid": "0ad83ebcf7592ea578085e00373b3e0e", "score": "0.57999086", "text": "def all_three_models(X, y, header, X_TEST, Y_TARGET):\n seed = 7\n X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.33, random_state=42)\n \n min_diff = 1000\n min_value = 1000\n print 'Linear Regression'\n lm = LinearRegression()\n lm.fit(X_train, y_train)\n # print coeff for each feature\n coefs = pd.DataFrame(zip(header, lm.coef_), columns=['feature','estim coef'])\n print coefs\n target = map(float, y_test)\n pred = lm.predict(X_test)\n for (a,b) in zip(target, pred):\n print a,\" \",b\n print\"RMSE: \", np.sqrt(np.mean((target - pred)**2))\n print \"Predicted: \", lm.predict(X_TEST),\" True: \", Y_TARGET\n if abs(lm.predict(X_TEST) - float(Y_TARGET)) < min_diff:\n min_value, min_diff = lm.predict(X_TEST), abs(lm.predict(X_TEST) - float(Y_TARGET))\n pickle.dump(lm, open(\"Models/linear_3.sav\", 'wb'))\n\n print '*'*80\n\n \"\"\" Ridge \"\"\"\n print \"Ridge\"\n clf = Ridge(alpha=1.0)\n clf.fit(X_train, y_train)\n coefs = pd.DataFrame(zip(header, clf.coef_), columns=['feature','estim coef'])\n print coefs\n target = map(float, y_test)\n pred = clf.predict(X_test)\n for (a,b) in zip(target, pred):\n print a,\" \",b\n print \"RMSE: \", np.sqrt(np.mean((target - pred)**2))\n print \"Predicted: \", clf.predict(X_TEST),\" True: \", Y_TARGET\n if abs(clf.predict(X_TEST) - float(Y_TARGET)) < min_diff:\n min_value, min_diff = clf.predict(X_TEST), abs(clf.predict(X_TEST) - float(Y_TARGET))\n pickle.dump(clf, open(\"Models/ridge_3.sav\", 'wb'))\n\n print '*'*80\n\n \"\"\" Lasso \"\"\"\n print 'Lasso'\n clf = Lasso(alpha=1.0)\n clf.fit(X_train, y_train)\n coefs = pd.DataFrame(zip(header, clf.coef_), columns=['feature','estim coef'])\n print coefs\n target = map(float, y_test)\n pred = clf.predict(X_test)\n for (a,b) in zip(target, pred):\n print a,\" \",b\n print \"RMSE: \", np.sqrt(np.mean((target - pred)**2))\n print \"Predicted: \", clf.predict(X_TEST),\" True: \", Y_TARGET\n if abs(clf.predict(X_TEST) - float(Y_TARGET)) < min_diff:\n min_value, min_diff = clf.predict(X_TEST), abs(clf.predict(X_TEST) - float(Y_TARGET))\n \"\"\" save model \"\"\"\n pickle.dump(clf, open(\"Models/lasso_3.sav\", 'wb'))\n\n print \"Predicted: \", clf.predict(X_TEST),\" True: \", Y_TARGET\n print '*'*80\n return min_value", "title": "" }, { "docid": "f735ca4953b9f9e613a2d22ebb13ba23", "score": "0.57921404", "text": "def run_training():", "title": "" }, { "docid": "d60be20e4b746c3f03437bcb58dfb4eb", "score": "0.5788554", "text": "def train_model(self):\n pass", "title": "" }, { "docid": "2fce5b76567e56cb2f1ec583a8dbafd0", "score": "0.5788192", "text": "def Training(ii,\r\n X_train,\r\n X_val,\r\n X_test,\r\n y_train,\r\n y_val,\r\n y_test,\r\n model):\r\n\r\n # Define validation and test batch sizes\r\n val_batch_size = y_val.size()[1]\r\n # test_batch_size = y_test.size()[1]\r\n\r\n train_split = TensorDataset(X_train, y_train[:, :, ii])\r\n validation_split = TensorDataset(X_val, y_val[:, :, ii])\r\n train_loader = DataLoader(train_split, batch_size=batch_size, shuffle=True, num_workers=workers)\r\n validation_loader = DataLoader(validation_split, batch_size=val_batch_size, shuffle=True, num_workers=workers)\r\n\r\n # Seed, optimiser and criterion\r\n set_seed(42)\r\n\r\n # Parameters to optimize\r\n # params = list(model.fc1[ii].parameters()) + list(model.fc2[ii].parameters()) + \\\r\n # list(model.fc3[ii].parameters()) + list(model.fc4[ii].parameters())\r\n params = list(model.fc1[ii].parameters()) + list(model.fc2[ii].parameters()) + list(model.fc3[ii].parameters())\r\n # params = list(model.fc1[ii].parameters()) + list(model.fc3[ii].parameters())\r\n\r\n # Optimizers\r\n if opt_method == 'SGD':\r\n optimizer = optim.SGD(params, lr=lr, momentum=momentum) # , weight_decay=weight_decay)\r\n elif opt_method == 'Rprop':\r\n optimizer = optim.Rprop(params, lr=lr, etas=(0.5, 1.2), step_sizes=(1e-06, 50))\r\n # optimizer = optim.Adam(params, lr=lr, weight_decay=1e-5)\r\n # optimizer = optim.AdamW(params, lr=lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)\r\n # optimizer = optim.RMSprop(params, lr=lr, alpha=0.99, eps=1e-08, weight_decay=0, momentum=0, centered=False)+\r\n # optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) # , weight_decay=weight_decay)\r\n\r\n # Loss criterions\r\n criterion = nn.MSELoss()\r\n # criterion = nn.L1Loss()\r\n # criterion = nn.AbsCriterion()\r\n criterion = criterion.to(device)\r\n \r\n val_plot = []; train_plot = []\r\n val_loss_plot = []; train_loss_plot = []\r\n\r\n # Model Training\r\n for i_epoch in range(epochs):\r\n\r\n print(\"Epoch:\", i_epoch, '/', epochs)\r\n\r\n val_loss = 0; val_acc = 0; train_loss = 0; train_acc = 0; train_loss2 = 0\r\n val_min = 0\r\n \r\n model.train()\r\n for X, y in train_loader:\r\n X, y = X.to(device), y.to(device)\r\n\r\n yt_pred = model(X, ii)\r\n \r\n train_loss = criterion(yt_pred, y)\r\n\r\n train_loss2 += torch.sum(torch.pow(y - yt_pred, 2)).detach().cpu().numpy()\r\n\r\n train_acc += torch.sum(1 - torch.abs(y - yt_pred) / yt_pred).detach().cpu().numpy()\r\n \r\n optimizer.zero_grad()\r\n train_loss.backward()\r\n optimizer.step()\r\n\r\n model.eval()\r\n for X, y in validation_loader:\r\n with torch.no_grad():\r\n X, y = X.to(device), y.to(device)\r\n y_pred = model(X, ii)\r\n\r\n val_loss += torch.sum(torch.pow(y - y_pred, 2)).detach().cpu().numpy()\r\n\r\n val_acc += torch.sum(1 - torch.abs(y - y_pred) / y_pred).detach().cpu().numpy()\r\n\r\n val_min += torch.min(1 - torch.abs(y - y_pred)).detach().cpu().numpy()\r\n\r\n train_loss2 /= train_slice * data_size * out_piece\r\n train_acc /= train_slice * data_size * out_piece\r\n val_loss /= val_slice * data_size * out_piece\r\n val_acc /= val_slice * data_size * out_piece\r\n val_min /= val_slice * data_size * out_piece\r\n\r\n # val_plot.append(val_min)\r\n val_plot.append(val_acc)\r\n train_plot.append(train_acc)\r\n train_loss_plot.append(train_loss2)\r\n val_loss_plot.append(val_loss)\r\n\r\n\r\n print( \"Val Loss: \" + str(round(val_loss, 4)) + ' Val Acc: ' + str(round(val_acc, 4))) # mean sum squared loss\r\n # print( \"Train Loss: \" + str(round(train_loss, 4)) + ' Train Acc: ' + str(round(train_acc, 4))) # mean sum squared loss\r\n\r\n\r\n #------------- Loss and Accuracy Plots -------------#\r\n if plot_curves == 1:\r\n\r\n val_plot = np.array(val_plot)\r\n train_plot = np.array(train_plot)\r\n val_plot[val_plot < 0] = 0\r\n train_plot[train_plot < 0] = 0\r\n val_plot[val_plot > 1] = 1\r\n train_plot[train_plot > 1] = 1\r\n\r\n fig, axs = plt.subplots(1, 2)\r\n del fig\r\n\r\n fontProperties = {'family':'serif',\r\n 'weight' : 'normal', 'size' : 11}\r\n\r\n axs[0].plot(np.arange(epochs), val_loss_plot, color='crimson')\r\n axs[0].plot(np.arange(epochs), train_loss_plot, color='navy', linestyle='--')\r\n axs[1].plot(np.arange(epochs), val_plot, color='crimson')\r\n axs[1].plot(np.arange(epochs), train_plot, color='navy', linestyle='--')\r\n\r\n print('Valiadation loss:', val_loss_plot[-1])\r\n print('Train loss:', train_loss_plot[-1])\r\n print('Validation accuracy:', val_plot[-1])\r\n print('Train accuracy:', train_plot[-1])\r\n\r\n axs[0].tick_params(axis='x', direction='in')\r\n axs[0].tick_params(axis='y', direction='in')\r\n axs[0].set_aspect(aspect=1.0/axs[0].get_data_ratio())\r\n # axs[0].xlim(left = 0)\r\n axs[1].tick_params(axis='x', direction='in')\r\n axs[1].tick_params(axis='y', direction='in')\r\n axs[1].set_aspect(aspect=1.0/axs[1].get_data_ratio())\r\n # axs[1].xlim(left = 0)\r\n axs[0].set_xticklabels(axs[0].get_xticks().astype(int), fontProperties)\r\n axs[0].set_yticklabels(axs[0].get_yticks().astype(int), fontProperties)\r\n axs[1].set_xticklabels(axs[1].get_xticks().astype(int), fontProperties)\r\n axs[1].set_yticklabels(np.round(axs[1].get_yticks(), 2), fontProperties)\r\n\r\n plt.show()\r\n\r\n if parallel == False:\r\n print(round(ii/rows*100, 2), '%')\r\n\r\n return 0", "title": "" } ]
1489657d219db01357102495748f2c1b
Creates a Path object representing the full path of an output feature class in whatever the destination format is.
[ { "docid": "c9fb61ec9f74708b54f3fb10e6135b5c", "score": "0.0", "text": "def _feature_class_default_name(self, desc, output_workspace, **kwargs):\n return", "title": "" } ]
[ { "docid": "90e65dca5b6ac0d9ee582a58a09964c9", "score": "0.6265195", "text": "def dest_path(self) -> Path:\n pass", "title": "" }, { "docid": "90e65dca5b6ac0d9ee582a58a09964c9", "score": "0.6265195", "text": "def dest_path(self) -> Path:\n pass", "title": "" }, { "docid": "04d35407c1b8ae6a656467b4376b5fda", "score": "0.617815", "text": "def dump_to_path(self, context: OutputContext, obj: Any, path: \"UPath\"):", "title": "" }, { "docid": "fe5c70807e5a8a5344789ca642b02d7a", "score": "0.61555076", "text": "def outpath(self, *path):\n return os.path.join(self.__outpath, *path)", "title": "" }, { "docid": "087c13c591abd19f1dd1f0b4a1024b46", "score": "0.6072429", "text": "def output_path(self):\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + '/../../outputs'),\n self.identifier)", "title": "" }, { "docid": "1d3db231ee99ad04401631a4656a9035", "score": "0.6044012", "text": "def _build_output_path(output_arg, topology_name):\n if output_arg is not None:\n return expand_path(output_arg)\n else:\n return expand_path(topology_name + \".jar\")", "title": "" }, { "docid": "fb9a040182834ea65daf3ea7e52f6a50", "score": "0.5957085", "text": "def outFilePath(self):\n\t\tpass", "title": "" }, { "docid": "2269e4d2179cad4e4d0d52f266774629", "score": "0.59169835", "text": "def getOutputPath(self, append=None, delimiter=\"/\"):\n name = self.name\n if append:\n name = name + append\n if self.father:\n name = self.father.getOutputPath(delimiter+name, delimiter)\n return name", "title": "" }, { "docid": "69c1297f9e964681bf7ad0b2f7d8cd4c", "score": "0.59118444", "text": "def create_outputs(output_directory, input_feature, output_name):\n # Create output GDB and feature class\n is_table = arcpy.Describe(input_feature).datasetType.lower() == 'table'\n copier = _get_copier(is_table)\n\n output_gdb = arcpy.CreateFileGDB_management(output_directory, output_name)[0]\n output_fc = copier(input_feature, os.path.join(output_gdb, output_name))[0]\n # Create directory to contain shape file\n shape_directory = os.path.join(output_directory, output_name)\n if not os.path.exists(shape_directory):\n os.makedirs(shape_directory)\n copier(output_fc, os.path.join(shape_directory, output_name))\n\n return (output_gdb, shape_directory)", "title": "" }, { "docid": "744e640dda21d93c28e9bed6ba40c3f0", "score": "0.5868421", "text": "def output_path(self):\n return self._output_path", "title": "" }, { "docid": "581d8b61495c6cc539fff5d3d065a6ed", "score": "0.57681715", "text": "def export_model(self, destination_path):\n all_paths = [v for k, v in self.info.items() if \"path\" in k]\n if \"train_path\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"train_path\"])\n\n if \"validation_path\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"validation_path\"])\n\n if \"original_train_path\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"original_train_path\"])\n all_paths.extend(self.hyperparams[\"additional_data_paths\"])\n\n if \"split_and_train_params\" in self.hyperparams:\n all_paths.append(self.hyperparams[\"split_and_train_params\"][\"df_path\"])\n copy_all(all_paths, destination_path)", "title": "" }, { "docid": "76775854e762e383998b5b45f8aa8136", "score": "0.57680297", "text": "def get_outfile_path(self):\n\t\treturn self.output", "title": "" }, { "docid": "8557c7c47846e77a4011f8ae6d64f58e", "score": "0.5737424", "text": "def get_output_path(self) -> str:\n return self.args[self.get_output_index()]", "title": "" }, { "docid": "cacead194736b7c3f5e7c4dc0dbfc8cb", "score": "0.5736999", "text": "def _build_output_path(self, output_dir_path, input_dir_path):\n sep = \"_\"\n ms_to_s = 100\n\n # prepare path in proper format for current OS\n tmp_path = self._rebuild_dir_path(output_dir_path)\n\n output_path = os.path.join(tmp_path, sep.join([\n os.path.basename(input_dir_path),\n str(self.framer.frame_length * ms_to_s),\n str(self.framer.frame_step * ms_to_s),\n sep.join(self.extractor.feature_type.split(\" \")),\n \"deltas\" if self.extractor.deltas else \"\"\n ]).replace(\".\", \"\") + \".\").replace(sep + \".\", \".\")\n\n return output_path", "title": "" }, { "docid": "c3ac7778b9d753486a6c47faf21a87ce", "score": "0.5697163", "text": "def _get_inference_output_path(self, output_path=None):\n if output_path is None:\n path = os.path.join(os.path.dirname(__file__), \"../data/inference.csv\")\n else:\n path = output_path\n return path", "title": "" }, { "docid": "b308016411d412316726895feb0208a5", "score": "0.56894606", "text": "def get_SrcClass(args):\n return Pathcom(args)", "title": "" }, { "docid": "1b683ca1235c6b1c3fa78370345f5df6", "score": "0.56729484", "text": "def getOutputPath(self):\n return self.config['output_path']", "title": "" }, { "docid": "1b683ca1235c6b1c3fa78370345f5df6", "score": "0.56729484", "text": "def getOutputPath(self):\n return self.config['output_path']", "title": "" }, { "docid": "0190d35f6b98d6f79f2a4b98813ac129", "score": "0.5650066", "text": "def _convert_path(self, clazz, path):\n return clazz( self.clientmap\n , self.p4.client\n , self.contentlocalroot[:-1] # -1 to strip trailing /\n , path)", "title": "" }, { "docid": "89e9f308630e4ff0d6c27cdc3b141a42", "score": "0.56334686", "text": "def path(self) -> str:\n if self.__path is not None:\n return self.__path\n\n dirs = [os.path.dirname(x) for x in (self.dir, self.Target)]\n dirname = \"/\".join([x for x in dirs if x])\n filename = os.path.basename(self.Target)\n self.__path = \"/\".join([dirname, filename])\n return self.__path", "title": "" }, { "docid": "ad51edf49c09539fb7893fd54a272974", "score": "0.56168807", "text": "def get_output_path(model_object_filename):\n\n project_path = set_main_path()\n return os.path.join(project_path,\n 'src',\n 'resources',\n 'output',\n model_object_filename)", "title": "" }, { "docid": "bdfacb13556fef00e31226c583c6099f", "score": "0.5578438", "text": "def _get_path(self, context: Union[InputContext, OutputContext]) -> \"UPath\":\n path = self._get_path_without_extension(context)\n return self._with_extension(path)", "title": "" }, { "docid": "1eb5bfeee38e51e98b9f874d3e3104dd", "score": "0.55192566", "text": "def outpaths(self, final):\n return", "title": "" }, { "docid": "2be007116c011f84466ceaff969ee911", "score": "0.5496843", "text": "def create_file_path(self):", "title": "" }, { "docid": "fce446c741c32b6645a8bd7f9da79d31", "score": "0.5469162", "text": "def path_to_step_output(dataset, backbone, method, output_dir=configs.save_dir):\n checkpoint_dir = os.path.join(\n output_dir,\n dataset,\n '_'.join([method, backbone]),\n )\n\n if not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n return checkpoint_dir", "title": "" }, { "docid": "8cf20959cf2eac4157a68afa15d0e0cc", "score": "0.5437101", "text": "def token_path(self):\n return self.output_path", "title": "" }, { "docid": "335f7ef0b6e9701b2974902212ede817", "score": "0.5435925", "text": "def path(self):", "title": "" }, { "docid": "bbf2d170a3affc33ea0f33cf4d3e1369", "score": "0.541393", "text": "def path(self):\n\n raise NotImplementedError('Abstract Scene Wrapper path function not implemented!')", "title": "" }, { "docid": "d6b159c57888cc22c5456bada03bf131", "score": "0.5411984", "text": "def dft2Model(self):\n\t\tif self._dft2Path is not None:\n\t\t\treturn os.path.abspath(self._dft2Path)\n\t\telse:\n\t\t\treturn None", "title": "" }, { "docid": "a3db444a33cdd7ab22bacff6c03caf9e", "score": "0.53919005", "text": "def create_path_layer(G, path, output_file, spatial_reference):\n # Create geometry from the edges\n print('Create geometry')\n # set up the shapefile driver\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n # create the data source\n data_source = driver.CreateDataSource(output_file)\n\n # create the layer\n layer = data_source.CreateLayer(\"A Star Shortest Path\", spatial_reference, ogr.wkbLineString)\n\n # Create fields (using default field first, TODO:read from data)\n # fid\n # layer.CreateField(ogr.FieldDefn(\"fid\", ogr.OFTReal))\n # streetID\n layer.CreateField(ogr.FieldDefn(\"streetID\", ogr.OFTInteger64))\n # length\n layer.CreateField(ogr.FieldDefn(\"length\", ogr.OFTReal))\n # u\n layer.CreateField(ogr.FieldDefn(\"u\", ogr.OFTInteger64))\n # v\n layer.CreateField(ogr.FieldDefn(\"v\", ogr.OFTInteger64))\n # length_sc\n layer.CreateField(ogr.FieldDefn(\"length_sc\", ogr.OFTReal))\n \n # order (added field to get the order)\n layer.CreateField(ogr.FieldDefn(\"order\", ogr.OFTInteger))\n\n fields = [\n # 'fid', \n 'streetID', \n 'length',\n 'u',\n 'v', \n 'length_sc'\n ]\n\n # Iterate over the path edges\n for i in range(len(path) - 1):\n node1 = path[i]\n node2 = path[i+1]\n # print(node1)\n # print(node2)\n # print(G.edges[node1, node2])\n edge = G.edges[node1, node2]\n feature = ogr.Feature(layer.GetLayerDefn())\n for field in fields:\n feature.SetField(field, edge[field])\n feature.SetField('order', i)\n # Create geometry from the Wkt\n geom = ogr.CreateGeometryFromWkt(edge['Wkt'])\n # Set the feature geometry using the geom\n feature.SetGeometry(geom)\n \n # Create the feature in the layer (shapefile)\n layer.CreateFeature(feature)\n # Dereference the feature\n feature = None\n\n # Save and close the data source\n data_source = None\n\n return output_file", "title": "" }, { "docid": "fc3748281d5f35616b3c1dc23b1a5d42", "score": "0.53668344", "text": "def get_output_filepath(self):\n if not self.check_format():\n return None\n return os.path.join(self.wiki.public_dir(),\n self.wiki.date, self.FILENAME +\n \".\" + self.fileformat)", "title": "" }, { "docid": "29a100f09755d5750d192a2fe6d256e7", "score": "0.53452337", "text": "def get_path(self):\n return self.path", "title": "" }, { "docid": "1984b926d31d6cc6155837a50be880ee", "score": "0.5345099", "text": "def output(self, path: str) -> str:\n return os.path.join(self._output, path)", "title": "" }, { "docid": "7b8d59e8c2bb3021b4d1d5bc789c296b", "score": "0.53216094", "text": "def make_output_dir(path_output):\n\n if isinstance(path_output, Path) == False:\n path_output = Path(path_output)\n\n path_output.mkdir(parents=True, exist_ok=True)\n\n return path_output", "title": "" }, { "docid": "c6c270eab83ee526e4a773defadba0cb", "score": "0.53191423", "text": "def construct(self, **kwargs) -> PathInst:\n if self.fixed_dir:\n dir_part = cast(PathInst, self.dir_spec)\n else:\n dir_part = self.dir_spec.construct(**kwargs)\n if self.fixed_file:\n file_part = cast(PathInst, self.file_spec)\n else:\n file_part = self.file_spec.construct(**kwargs)\n return dir_part.joinpath(file_part)", "title": "" }, { "docid": "9851b886f7750819349d4c1b1c4b8d1f", "score": "0.53186744", "text": "def filepath_export(self):\n\n return self.EXPORT_FOLDER / f\"{self.SQL_TABLE_NAME}.{self.DATA_TYPE}\"", "title": "" }, { "docid": "c50065ec2b1908fb78220ac11f78eaf2", "score": "0.53149086", "text": "def export(self, path = None, output = False):\n\t\tself._create_estimator()\n\t\tdef serving_input_fn():\n\t\t\tlabel_ids = tf.placeholder(tf.int32, [None], name='label_ids')\t\n\t\t\tinput_ids = tf.placeholder(tf.int32, [None, self.max_seq_length], name='input_ids')\n\t\t\tinput_mask = tf.placeholder(tf.int32, [None, self.max_seq_length], name='input_mask')\n\t\t\tsegment_ids = tf.placeholder(tf.int32, [None, self.max_seq_length], name='segment_ids')\n\t\t\tinput_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n\t\t\t\t'label_ids': label_ids,\n\t\t\t\t'input_ids': input_ids,\n\t\t\t\t'input_mask': input_mask,\n\t\t\t\t'segment_ids': segment_ids,\n\t\t\t})()\n\t\t\treturn input_fn\n\t\tself.estimator._export_to_tpu = False\n\t\tmodel_dir = path or self.output_dir \n\t\tself.estimator.export_saved_model(model_dir, serving_input_fn)\n\t\treturn self.find_exported_model()", "title": "" }, { "docid": "73d45937896fd69070c9a29ea593c309", "score": "0.52889776", "text": "def classifier_output_dir(\n cls, classifier_id: int, ensure_exists: bool = True\n ) -> Path:\n dir_ = cls.classifier_dir(classifier_id) / \"trained_model/\"\n if ensure_exists:\n cls._create_dir_if_not_exists(dir_)\n return dir_", "title": "" }, { "docid": "298cf2d926ebfea7506ed4b503ef69a8", "score": "0.52814186", "text": "def path(self):\n pass", "title": "" }, { "docid": "24c65694156733d467394a46e129919b", "score": "0.5279998", "text": "def make_output_path_graph(out_dir, subject, str_prefixes):\n\n if out_dir is not None:\n # get outpath returned from hiwenet, based on dist name and all other params\n # choose out_dir name based on dist name and all other parameters\n out_subject_dir = out_dir.joinpath(subject)\n if not out_subject_dir.exists():\n out_subject_dir.mkdir(exist_ok=True, parents=True)\n\n if isinstance(str_prefixes, str):\n str_prefixes = [str_prefixes, ]\n\n out_file_name = '{}_graynet.graphml'.format('_'.join(str_prefixes))\n out_weights_path = out_subject_dir.joinpath(out_file_name)\n else:\n out_weights_path = None\n\n return out_weights_path", "title": "" }, { "docid": "5d400eb7cd07f35bd971448e5f47d463", "score": "0.5276292", "text": "def obj_export_path():\n filepath = bpy.data.filepath\n directory = os.path.dirname(filepath)\n print (\"OBJ Export directory is:\", directory)\n return directory", "title": "" }, { "docid": "b3bb25215787a6263076ab0c93b205b6", "score": "0.5265822", "text": "def target_subpath(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"target_subpath\")", "title": "" }, { "docid": "b5bc7169cc2f3018fe72d537192a3f5e", "score": "0.5261364", "text": "def save_classifier(path_out, classif, clf_name, params, feature_names=None, label_names=None):\n if not os.path.isdir(path_out):\n raise FileNotFoundError('missing folder: %s' % path_out)\n dict_classif = {\n 'params': params,\n 'name': clf_name,\n 'clf_pipeline': classif,\n 'features': feature_names,\n 'label_names': label_names,\n }\n\n path_clf = os.path.join(path_out, TEMPLATE_NAME_CLF.format(clf_name))\n logging.info('export classif. of %s to \"%s\"', dict_classif, path_clf)\n with open(path_clf, 'wb') as f:\n pickle.dump(dict_classif, f)\n logging.debug('export finished')\n return path_clf", "title": "" }, { "docid": "fd45d42c20d84e875d00d0e5a7f3422c", "score": "0.52595294", "text": "def _get_halfhour_output_path(self, output_path=None):\n if output_path is None:\n path = os.path.join(\n os.path.dirname(__file__), \"../data/inference_halfhour.csv\"\n )\n else:\n path = output_path\n return path", "title": "" }, { "docid": "7c5b6f30ad623f3c926e07994c929ae1", "score": "0.5253989", "text": "def get_output_file(fn):\n return join(FIGURES_DIR, fn)", "title": "" }, { "docid": "d021e3bae5a477045b75b93718223cb2", "score": "0.5253199", "text": "def _destination(self, path):\n destination = path\n if self.avoid_collisions:\n while destination.is_file():\n fname = '{0}-{1}{2}'.format(\n path.stem,\n next(self.suffix[path.name]),\n path.suffix\n )\n destination = path.with_name(fname)\n\n return destination", "title": "" }, { "docid": "5fde16581b812f39f2f89b98003513fc", "score": "0.5250178", "text": "def output(self):\n print(\">>>>>\\n\")\n print(self.input()[0].path)\n return GCSTarget(self.input()[0].path + '.db')", "title": "" }, { "docid": "9cfd24a1a3d94baaff7113feb688c00e", "score": "0.5231089", "text": "def seg_classes_path(tiny_seg_data_path) -> str:\n return Path(tiny_seg_data_path) / \"classes.txt\"", "title": "" }, { "docid": "12ae65461d9e436d4a2f73795fbf5c58", "score": "0.52179825", "text": "def make_path_to_out(path_out, path_orig, default):\n if path_out:\n if not path_out.startswith(\"/\"):\n print(\"invalid output path, images will be saved in {}\".format(os.path.join(path_orig, default)))\n path_out = os.path.join(path_orig, default)\n else:\n path_out = os.path.join(path_orig, default)\n return path_out", "title": "" }, { "docid": "49ea40713809b20863e6f1c3640bffef", "score": "0.5214476", "text": "async def path(self) -> pathlib.Path:\n\n return mapping.from_maybe_impl(await self._impl_obj.path())", "title": "" }, { "docid": "0b2d00f19fdd6a7a5887661da6d47d25", "score": "0.52069247", "text": "def generate_path(self,x_final):\n pass", "title": "" }, { "docid": "a9909382999829a4defc34c03542c55c", "score": "0.5199785", "text": "def parse_output_path(location):\n if location[-1] != '/':\n location += '/'\n if not os.path.isdir(location):\n os.makedirs(location)\n return location", "title": "" }, { "docid": "b9bad500795d7c098e9b12952518360d", "score": "0.5194996", "text": "def getPathBuilder(self):\n return datapath.from_catalog(self)", "title": "" }, { "docid": "9726066dd1ebaa1ef659eace44a13b46", "score": "0.51913697", "text": "def return_as_Path(path: str = None) -> Optional[Path]:", "title": "" }, { "docid": "a3c35e40d1394c06af296dd99b176462", "score": "0.51854277", "text": "def _get_capture_output_path(self):\n if self._capture_paths is None: # don't capture\n return None\n \n #output directory\n output_dir = self._capture_paths['output_dir']\n \n #output file name\n if self._capture_paths['output_name'] is None:\n timestamp = datetime.now().strftime('%Y%m%d%H%M%S')\n output_prefix = self._capture_paths['output_prefix']\n total_rewards = '_'.join('r%06d' % x for x in self._total_reward)\n output_name = '%s_%s_t%06d_%s.gif' % (output_prefix, timestamp,\n self.time, total_rewards)\n else:\n output_name = self._capture_paths['output_name']\n \n return os.path.join(output_dir, output_name)", "title": "" }, { "docid": "23538533b0cd70c2c41eaf31a373c280", "score": "0.5172049", "text": "def tiny_seg_data_path(tmp_session, seg_classes) -> str:\n path = unzip_url(\n seg_urls.fridge_objects_tiny_path,\n fpath=tmp_session,\n dest=tmp_session,\n exist_ok=True,\n )\n classes_path = Path(path) / \"classes.txt\"\n with open(classes_path, \"w\") as f:\n for c in seg_classes:\n f.write(c + \"\\n\")\n return path", "title": "" }, { "docid": "5c3bd16dd4e5f54fc280e02149ddcbac", "score": "0.5170895", "text": "def get_path(self):\n if self.can_connect_to_goal(0):\n print(\"Can connect to goal\")\n self.connect_to_goal(0)\n return self.reconstruct_path(0, self.x_init, self.x_goal)\n print(\"Could not connect to goal\")\n return None", "title": "" }, { "docid": "10752442e300d438debc83b2a13b96a7", "score": "0.5166252", "text": "def path(self):\n if self.target.redshift_helio > 0:\n # Extragalactic object\n if self.restframe:\n # Get the restframe spectrum.\n key = 'idr.spec_restframe'\n else:\n # Get the original merged spectrum.\n key = 'idr.spec_merged'\n else:\n # Star, get the original spectrum.\n key = 'idr.spec_merged'\n\n path = '%s/%s' % (self.idr_directory, self.meta[key])\n\n return path", "title": "" }, { "docid": "66f75a7da64b1d92e6dbb36ff22dcfdc", "score": "0.5164553", "text": "def generate_output(path=None):\n if path is None:\n return open(OUTPUT_FILE, \"w\").close()\n print(\"Found path:\")\n with open(OUTPUT_FILE, \"w\") as output_file:\n print(*path, sep=\"\\n\")\n output_file.write(\"\\n\".join(path))", "title": "" }, { "docid": "b177117724c787533338c2a9607d0871", "score": "0.5163864", "text": "def getDestinationPath(self):\n return self.getValue(\"destination-path\", self.__session.getCurrentTask())", "title": "" }, { "docid": "21d6379df0a6122b0b15eeb0d6d43afb", "score": "0.51609457", "text": "def to_featureclass(self,\n out_location, out_name,\n overwrite=True, skip_invalid=True):\n from .io import to_featureclass\n return to_featureclass(df=self,\n out_location=out_location,\n out_name=out_name,\n overwrite=overwrite, skip_invalid=skip_invalid)", "title": "" }, { "docid": "50956b55f45daed63cb3f50db7ea1461", "score": "0.51598775", "text": "def features_directory(self) -> str:\n return os.path.join(self.output_directory, \"features\")", "title": "" }, { "docid": "1f48bac02445b4d67578268cae62299e", "score": "0.5158284", "text": "def generate_input_path(self):\n return os.path.join(self.output_path, self.input_name)", "title": "" }, { "docid": "bc46e4819f852e429f20c5fea3864c17", "score": "0.51574665", "text": "def asCategoryPath(self) -> ghidra.program.model.data.CategoryPath:\n ...", "title": "" }, { "docid": "c9b7c4537812d868eff399d7856dc79f", "score": "0.514872", "text": "def getObjectPath(self):", "title": "" }, { "docid": "40ef1f7dc0fbde3fd4ed7e35d313018d", "score": "0.51463217", "text": "def path(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path\")", "title": "" }, { "docid": "979aa15bce58a8f228c1ecdc0f04e312", "score": "0.51330554", "text": "def output(self):\n return GCSTarget(self.gs_path)", "title": "" }, { "docid": "46fa402646286c6a85cfe09124b42e82", "score": "0.5129202", "text": "def _get_path(self):\n if self._path is None:\n self._path = self.newPath()\n return self._path", "title": "" }, { "docid": "964432405deb6f009be153cc3f6fcf36", "score": "0.51144505", "text": "def format_output_filepath(\n features_filepath: Path, \n image_path: Path,\n output_dir: Path,\n orbit_properties_pass: Optional[str],\n band: Optional[str]) -> Path:\n # Filename of the features\n features_filename = os.path.splitext(os.path.basename(features_filepath))[0] \n # Filename of the image -> remove .zip if it is a zip file\n image_filename = os.path.basename(image_path)\n image_filename_noext, image_ext = os.path.splitext(image_filename)\n if image_ext.lower() == '.zip':\n image_filename = image_filename_noext\n\n # Interprete the orbit...\n if orbit_properties_pass is not None:\n if orbit_properties_pass == 'ASCENDING':\n orbit = '_ASC'\n elif orbit_properties_pass == 'DESCENDING':\n orbit = '_DESC'\n else:\n message = f\"Unknown orbit_properties_pass: {orbit_properties_pass}\"\n logger.error(message)\n raise Exception(message)\n else:\n orbit = ''\n\n # Prepare basic filepath\n output_filepath_noext = os.path.join(\n output_dir, f\"{features_filename}__{image_filename}{orbit}\")\n\n # If a band was specified\n if band is not None:\n output_filepath_noext = f\"{output_filepath_noext}_{band}\"\n \n # Add extension\n output_filepath = Path(f\"{output_filepath_noext}.sqlite\")\n return output_filepath", "title": "" }, { "docid": "70b025b8d4a7bccb2463a94fc6a24501", "score": "0.51034355", "text": "def get_savepath(dataset: Dataset, conf:Namespace) -> str:\n\n # First set up the folder\n dirname = os.path.join(\n EXPERIMENT_FOLDER,\n \"FTCmodels\",\n )\n\n # Make sure parent folder exists\n os.makedirs(dirname, exist_ok=True)\n\n # Then add file name\n path = os.path.join(\n dirname,\n f\"FTCNN_{dataset.name}_{conf.feature}_fold{dataset.fold}.pt\"\n )\n return path", "title": "" }, { "docid": "9417665c167c068e5f3c7641f6d4c1ac", "score": "0.51016945", "text": "def gen_fpath(self):\n\n fpath = self['gen_fpath']\n\n if fpath == 'PIPELINE':\n target_modules = ['multi-year', 'collect', 'generation']\n for target_module in target_modules:\n try:\n fpath = Pipeline.parse_previous(\n self.dirout, 'rep-profiles', target='fpath',\n target_module=target_module)[0]\n except KeyError:\n pass\n else:\n break\n\n if fpath == 'PIPELINE':\n try:\n fpath = Pipeline.parse_previous(\n self.dirout, 'rep-profiles', target='gen_fpath',\n target_module='supply-curve-aggregation')[0]\n except KeyError:\n pass\n\n if fpath == 'PIPELINE':\n msg = 'Could not parse gen_fpath from previous pipeline jobs.'\n logger.error(msg)\n raise PipelineError(msg)\n else:\n logger.info('Rep profiles using the following '\n 'pipeline input for gen_fpath: {}'.format(fpath))\n\n return fpath", "title": "" }, { "docid": "cd302a7b39be5504bc12fcde7dab6870", "score": "0.5101154", "text": "def make_path(self, dir_path):\n return join(dir_path, self.name)", "title": "" }, { "docid": "b272ca9a5219bb32fa760636d33461c4", "score": "0.5097699", "text": "def get_path(self):\n\n return f'{self.get_parent().get_path()}{self.get_name()}'", "title": "" }, { "docid": "d2da6406a2d4fa4f3c3b789e69ebb9e3", "score": "0.509284", "text": "def path(self):\n\t\tpass", "title": "" }, { "docid": "0ca57c79df28076955809126034a46f3", "score": "0.5090463", "text": "def getPath(self):\n return str()", "title": "" }, { "docid": "cf6e29ab70a779f9d29f72a4bd0a6d90", "score": "0.50816363", "text": "def path(self):\n path = [self.name]\n return get_path(path)", "title": "" }, { "docid": "fb9a8bf0c5889c05c97d8932c5da7713", "score": "0.50808114", "text": "def full_path(self):\n return os.path.join(self._path, self._name)", "title": "" }, { "docid": "ec27a9721a5902193f5f0b6c920d3977", "score": "0.5079392", "text": "def depot_path(self, path):\n return self._convert_path(p4gf_path_convert.DepotPath, path)", "title": "" }, { "docid": "0f67c8e0ad66f58e979469ffe976fe06", "score": "0.50785035", "text": "def getPath(self):\n pass;", "title": "" }, { "docid": "27efb0314b01b5584cc98412e78da49b", "score": "0.5070304", "text": "def get_path(cls, f: Text) -> Text:\n if not os.path.isabs(f):\n class_file = inspect.getfile(cls)\n class_dir = os.path.dirname(class_file)\n f = os.path.realpath(os.path.join(class_dir, f))\n return f", "title": "" }, { "docid": "5f4b7833156e2844d2a7eede2fc6314e", "score": "0.5056945", "text": "def _create_output(identifier, _, attrs):\n return Output(\n identifier, None, None, attrs.get(\"uom\"),\n attrs.get(\"crs\"), attrs.get(\"mimeType\"), attrs.get(\"encoding\"),\n attrs.get(\"schema\"), parse_bool(attrs.get(\"asReference\"))\n )", "title": "" }, { "docid": "c9cee5ac14311b8772b0a541fe47eb1b", "score": "0.50388336", "text": "def get_output_dir(self):\n return self.output", "title": "" }, { "docid": "66ec7360a14810e82d6eb6362828af73", "score": "0.5032671", "text": "def prepare_output_folder(datapath, dataset, model, instance_name=None):\n if instance_name is not None:\n output_folder = os.path.join(\n datapath, dataset, \"output\", model, \"checkpoints\", instance_name\n )\n else:\n output_folder = os.path.join(\n datapath, dataset, \"output\", model, \"checkpoints\"\n )\n os.makedirs(output_folder, exist_ok=True)\n return output_folder", "title": "" }, { "docid": "2289f1f228a18bb2e830b8f6822d4bd0", "score": "0.5030208", "text": "def get_output_class(self):\n return self.class_output", "title": "" }, { "docid": "58303f215527f64a8da5b4da09da1498", "score": "0.5028796", "text": "def getpath():\n return path", "title": "" }, { "docid": "c7208fac8d3611664a7eef201574db37", "score": "0.5023778", "text": "def path(model):\n return None", "title": "" }, { "docid": "ae68feabdd452aef6ed62c73504bdaf5", "score": "0.5023425", "text": "def __init__(self,output_path,input_path,transformation_type,transformation_level):\r\n # Ein Ordner wird hergestellt,when der Ordner existiert nicht.\r\n isExists = os.path.exists(output_path)\r\n if not isExists:\r\n os.makedirs(output_path)\r\n self.output_path =output_path\r\n self.transformation_type = transformation_type\r\n self.transformation_level = transformation_level\r\n self.input_path = input_path\r\n self.raw_data=self.load_data()\r\n self.features,self.labels,self.features_and_labels = self.features_extraction()\r\n self.wirte_to_csv()", "title": "" }, { "docid": "1cf8e18802b9eca3f390872bb768f857", "score": "0.50222284", "text": "def set_destination_path(entity):\n if entity.output_parameters[entity.configuration.raw_parameter] is not None:\n repo_name = entity.input_parameters[\"repo_name\"].split(\"/\")\n user = repo_name[0]\n repo = repo_name[1]\n path = entity.input_parameters[\"path\"].replace(\"/\", \" \")\n # add destination path to output\n entity.output_parameters[\"dest_path\"] = os.path.join(user, repo, path)", "title": "" }, { "docid": "7982cb1f75f2dde780b11e19d1ad7e7f", "score": "0.5021329", "text": "def get_outputfilepath(task):\n outputfilepath=None\n if 'outputdir' in task['file_names']:\n outputdir= task['file_names']['outputdir']\n else:\n outputdir='.'\n if 'output_filename' in task['file_names']:\n outputfilepath=os.path.join(outputdir,task['file_names']['output_filename'])\n else:\n output_string = task['taskname'].replace(' ','_')+\"_\"+task['uuid']+'.json'\n outputfilepath = os.path.join(outputdir,output_string )\n return outputfilepath", "title": "" }, { "docid": "7037bea5757917baaa774eec8b007c11", "score": "0.5020741", "text": "def to_feature_file_line(self):\n return \"\\t\".join([\n self.address.get_address_byte_string().decode(),\n bytes_to_string(self.feature),\n bytes_to_string(b\"\".join([\n self.left_context,\n self.feature,\n self.right_context\n ]))\n ])", "title": "" }, { "docid": "b1bacfdeeb752a6d408fd69743a65bf5", "score": "0.5016102", "text": "def save_model(self, output_path: str):\n raise NotImplementedError", "title": "" }, { "docid": "64480069d77b1598b62bf2fe432439b7", "score": "0.5012036", "text": "def asDataTypePath(self) -> ghidra.program.model.data.DataTypePath:\n ...", "title": "" }, { "docid": "865ca6765ade325acca6ad5a4ccb4e8a", "score": "0.5011486", "text": "def path(self):\n if self.__path is None:\n opj = os.path.join\n target_path = opj(self.local_session_dir, 'targets', self.target.identifier)\n\n path = {\n 'target': {\n 'copy': opj(target_path, self.target.remote_copy),\n 'exec': opj(target_path, self.target.remote_exec),\n 'explorer': opj(target_path, 'explorer'),\n 'object': opj(target_path, 'object'),\n 'messages': opj(target_path, 'messages'),\n },\n 'local': {\n 'bin': opj(self.local_session_dir, 'bin'),\n 'explorer': opj(self.local_session_dir, 'conf', 'explorer'),\n 'global': target_path,\n 'initial-manifest': opj(self.local_session_dir, 'manifest'),\n 'manifest': opj(self.local_session_dir, 'conf', 'manifest'),\n 'object': opj(target_path, 'object'),\n 'session': self.local_session_dir,\n 'target': target_path,\n 'type': opj(self.local_session_dir, 'conf', 'type'),\n },\n 'remote': {\n 'conf': opj(self.remote_session_dir, 'conf'),\n 'explorer': opj(self.remote_session_dir, 'conf', 'explorer'),\n 'object': opj(self.remote_session_dir, 'object'),\n 'session': self.remote_session_dir,\n 'type': opj(self.remote_session_dir, 'conf', 'type'),\n },\n }\n self.__path = path\n return self.__path", "title": "" }, { "docid": "c5f6ad2bbbfe5685841e2268005f6b8c", "score": "0.50112796", "text": "def get_path(self):\n return self.path", "title": "" }, { "docid": "c5f6ad2bbbfe5685841e2268005f6b8c", "score": "0.50112796", "text": "def get_path(self):\n return self.path", "title": "" }, { "docid": "028efc1a78ca7a66a60ac0f8ae451a3f", "score": "0.5007153", "text": "def make_output_path(filename: str) -> str:\n module_name = os.path.splitext(os.path.basename(__file__))[0]\n name_ext = os.path.splitext(filename)\n return '../output/' + name_ext[0] + '_' + module_name + name_ext[1]", "title": "" }, { "docid": "d2eec5d7f5da3ca2d9930b3131bf47f5", "score": "0.50003135", "text": "def GypPathToUniqueOutput(self, path, qualified=True):\n\n path = self.ExpandSpecial(path)\n assert not path.startswith('$'), path\n\n # Translate the path following this scheme:\n # Input: foo/bar.gyp, target targ, references baz/out.o\n # Output: obj/foo/baz/targ.out.o (if qualified)\n # obj/foo/baz/out.o (otherwise)\n # (and obj.host instead of obj for cross-compiles)\n #\n # Why this scheme and not some other one?\n # 1) for a given input, you can compute all derived outputs by matching\n # its path, even if the input is brought via a gyp file with '..'.\n # 2) simple files like libraries and stamps have a simple filename.\n\n obj = 'obj'\n if self.toolset != 'target':\n obj += '.' + self.toolset\n\n path_dir, path_basename = os.path.split(path)\n assert not os.path.isabs(path_dir), (\n \"'%s' can not be absolute path (see crbug.com/462153).\" % path_dir)\n\n if qualified:\n path_basename = self.name + '.' + path_basename\n return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,\n path_basename))", "title": "" }, { "docid": "ce6c82a07e4225e3e35b0833cdfa1c86", "score": "0.49935687", "text": "def path_fq(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"path_fq\")", "title": "" }, { "docid": "78688fbbd7ae9b4d3096a8ce40a25e28", "score": "0.49909335", "text": "def choose_output(input_file: str) -> str:\n outdir = os.path.abspath(os.path.dirname(input_file))\n prefix = input_file.split('.')[0]\n savefile = os.path.join(outdir, prefix + '.pkl')\n return savefile", "title": "" }, { "docid": "fd937daf1ea7426caf5009f47532a032", "score": "0.49859446", "text": "def getOutputDirectory():\n return \"\"", "title": "" }, { "docid": "f6771bd5df7ed62ab9f2a3776911ff70", "score": "0.49777925", "text": "def _relationship_class_default_name(self, desc, output_workspace, **kwargs):\n return output_workspace.joinpath(desc.name + \".txt\")", "title": "" } ]
933253e9cdd380d613041813dd603e18
Checks the arrival of each user to his desk and switch ON his devices.
[ { "docid": "cac54172c57f94e12612e679ab9fe9c5", "score": "0.61905897", "text": "def check_user_arrival():\n print('[{}] Checking user arrival'.format(datetime.now()))\n\n # Database and Fibaro credentials\n user = 'dadtkzpuzwfows'\n database_password = '1a62e7d11e87864c20e4635015040a6cb0537b1f863abcebe91c50ef78ee4410'\n host = 'ec2-46-137-79-235.eu-west-1.compute.amazonaws.com'\n port = '5432'\n database = 'd53rn0nsdh7eok'\n fibaro_address = '172.19.243.58:80'\n fibaro_username = 'admin'\n fibaro_password = 'admin'\n\n def activate_remote_control(meter_id, command):\n query = requests.post('http://{}/api/devices/{}/action/{}'.format(fibaro_address, meter_id, command),\n auth=HTTPBasicAuth(fibaro_username, fibaro_password)).json()\n return None\n\n # Access last recorded user presence information\n last_recorded_presence = pd.read_csv('tables_csv/user_presence.csv')\n\n try:\n # Initialise connection variable\n connection = None\n\n # Connect to PostgreSQL database\n connection = psycopg2.connect(user=user, password=database_password, host=host,\n port=port, database=database)\n cursor = connection.cursor()\n\n # Obtain the latest presence information of all users from database\n cursor.execute(\"SELECT p.user_id, p.presence, p.unix_time FROM presence p \"\n \"INNER JOIN (SELECT user_id, MAX(unix_time) AS LatestTime \"\n \"FROM presence GROUP BY user_id) pp ON p.user_id = pp.user_id AND p.unix_time = pp.LatestTime \"\n \"ORDER BY user_id\")\n query_result = cursor.fetchall()\n latest_presence = pd.DataFrame(\n query_result, columns=[desc[0] for desc in cursor.description])\n\n # Obtain user id of user who just arrived at his desk\n assert len(last_recorded_presence) == len(latest_presence)\n arrival_ids = [i for i in range(len(last_recorded_presence))\n if last_recorded_presence.loc[i, 'presence'] == 0 and latest_presence.loc[i, 'presence'] == 1]\n\n if len(arrival_ids) != 0:\n # Switch on all devices owned by the arriving user\n for index in arrival_ids:\n cursor.execute(\"SELECT meter_id FROM meters WHERE user_id={}\".format(\n last_recorded_presence.loc[index, 'user_id']))\n meter_ids = cursor.fetchall()\n for meter_id in meter_ids:\n activate_remote_control(meter_id[0], 'turnOn')\n\n # Reset the control activated trackers for different devices\n last_recorded_presence.loc[index,\n 'control_activated_desktop'] = False\n last_recorded_presence.loc[index,\n 'control_activated_laptop'] = False\n last_recorded_presence.loc[index,\n 'control_activated_monitor'] = False\n last_recorded_presence.loc[index,\n 'control_activated_tasklamp'] = False\n last_recorded_presence.loc[index,\n 'control_activated_fan'] = False\n\n last_recorded_presence['presence'] = latest_presence['presence']\n last_recorded_presence.to_csv(\n 'tables_csv/user_presence.csv', index=False)\n\n else:\n pass\n\n # Closing connection\n if (connection):\n cursor.close()\n connection.close()\n\n except(Exception, psycopg2.Error) as error:\n if (connection):\n print('Error: ', error)\n\n return None", "title": "" } ]
[ { "docid": "90c9d8582ab1661b1b528277052582af", "score": "0.6017518", "text": "def check_user_departure():\n print('[{}] Checking user departure'.format(datetime.now()))\n\n # Database and Fibaro credentials\n user = 'dadtkzpuzwfows'\n database_password = '1a62e7d11e87864c20e4635015040a6cb0537b1f863abcebe91c50ef78ee4410'\n host = 'ec2-46-137-79-235.eu-west-1.compute.amazonaws.com'\n port = '5432'\n database = 'd53rn0nsdh7eok'\n fibaro_address = '172.19.243.58:80'\n fibaro_username = 'admin'\n fibaro_password = 'admin'\n\n def activate_remote_control(meter_id, command):\n query = requests.post('http://{}/api/devices/{}/action/{}'.format(fibaro_address, meter_id, command),\n auth=HTTPBasicAuth(fibaro_username, fibaro_password)).json()\n return None\n\n def check_device(index, device_type):\n if device_type == 'tasklamp':\n processed_device_type = 'Task Lamp'\n else:\n processed_device_type = device_type.capitalize()\n\n if last_recorded_presence.loc[index, 'control_activated_{}'.format(device_type)] is np.bool_(False):\n # Query for time interval before device should be remotely switched off\n cursor.execute(\"SELECT presence_setting FROM plug_mate_app_presencedata \"\n \"WHERE user_id={} AND device_type='{}'\".format(last_recorded_presence.loc[index, 'user_id'],\n processed_device_type))\n time_interval = cursor.fetchone()[0]\n\n if time.time() - last_recorded_presence.loc[index, 'last_detected_departure'] > time_interval * 60.0:\n cursor.execute(\n \"SELECT meter_id FROM power_energy_consumption WHERE user_id={} AND device_type='{}' \"\n \"ORDER BY unix_time DESC LIMIT 1\".format(last_recorded_presence.loc[index, 'user_id'], device_type))\n meter_ids = cursor.fetchall()\n for meter_id in meter_ids:\n activate_remote_control(meter_id[0], 'turnOff')\n last_recorded_presence.loc[index, 'control_activated_{}'.format(device_type)] = True\n\n else:\n pass\n else:\n pass\n\n return None\n\n # Access last recorded user presence information\n last_recorded_presence = pd.read_csv('tables_csv/user_presence.csv')\n\n try:\n # Initialise connection variable\n connection = None\n\n # Connect to PostgreSQL database\n connection = psycopg2.connect(user=user, password=database_password, host=host,\n port=port, database=database)\n cursor = connection.cursor()\n\n # Obtain the latest presence information of all users from database\n cursor.execute(\"SELECT p.user_id, p.presence, p.unix_time FROM presence p \"\n \"INNER JOIN (SELECT user_id, MAX(unix_time) AS LatestTime \"\n \"FROM presence GROUP BY user_id) pp ON p.user_id = pp.user_id AND p.unix_time = pp.LatestTime \"\n \"ORDER BY user_id\")\n query_result = cursor.fetchall()\n latest_presence = pd.DataFrame(\n query_result, columns=[desc[0] for desc in cursor.description])\n\n # Obtain user id of user who has just left his desk and update last detected departure\n assert len(last_recorded_presence) == len(latest_presence)\n update = [(last_recorded_presence.loc[i, 'user_id'], latest_presence.loc[i, 'unix_time'])\n for i in range(len(last_recorded_presence))\n if last_recorded_presence.loc[i, 'presence'] == 1 and latest_presence.loc[i, 'presence'] == 0]\n\n if len(update) != 0:\n # Update user_presence of user's departure time\n for user_id, unix_time in update:\n update_index = last_recorded_presence['user_id'].tolist().index(\n user_id)\n last_recorded_presence.loc[update_index, 'presence'] = 0\n last_recorded_presence.loc[update_index,\n 'last_detected_departure'] = unix_time\n\n else:\n pass\n\n # Obtain user id and device type of user who has left his desk for a period longer the duration indicated\n # in the presence based control\n users_absent_index = [i for i in range(len(last_recorded_presence))\n if last_recorded_presence.loc[i, 'presence'] == 0 and\n (last_recorded_presence.loc[i, 'control_activated_desktop'] is np.bool_(False) or\n last_recorded_presence.loc[i, 'control_activated_laptop'] is np.bool_(False) or\n last_recorded_presence.loc[i, 'control_activated_monitor'] is np.bool_(False) or\n last_recorded_presence.loc[i, 'control_activated_tasklamp'] is np.bool_(False) or\n last_recorded_presence.loc[i, 'control_activated_fan'] is np.bool_(False))]\n\n for index in users_absent_index:\n check_device(index, 'desktop')\n check_device(index, 'laptop')\n check_device(index, 'monitor')\n check_device(index, 'tasklamp')\n check_device(index, 'fan')\n\n last_recorded_presence.to_csv(\n 'tables_csv/user_presence.csv', index=False)\n\n # Closing connection\n if (connection):\n cursor.close()\n connection.close()\n\n except(Exception, psycopg2.Error) as error:\n if (connection):\n print('Error: ', error)\n\n return None", "title": "" }, { "docid": "4675060e9f8ea98dce8777269a2cd7af", "score": "0.58045906", "text": "def checkUsers(self):\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"start checkUsers\")\n\n # get user list\n wasConnectionLost, userList = self._timekprLoginManager.getUserList()\n # if we had a disaster, remove all users because connection to DBUS was lost\n if wasConnectionLost:\n # logging\n log.log(cons.TK_LOG_LEVEL_INFO, \"IMPORTANT WARNING: due to lost DBUS connection, all users are de-initialized (including from DBUS) and re-initalized from saved state\")\n # remove them from dbus\n for rUser in self._timekprUserList:\n # remove from DBUS\n self._timekprUserList[rUser].deInitUser()\n # delete all users\n self._timekprUserList.clear()\n # delete termination list as well\n self._timekprUserRestrictionList.clear()\n\n # if global switch is enabled, we need to refresh processes at some iterval (method determines that by itself)\n if self._timekprConfig.getTimekprPlayTimeEnabled():\n # refresh PT process list\n self._timekprPlayTimeConfig.processPlayTimeActivities()\n\n # add new users to track\n for rUserName, userDict in userList.items():\n # login manager is system user, we do these checks only for system users\n if not userhelper.verifyNormalUserID(userDict[cons.TK_CTRL_UID]):\n # sys user\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"NOTE: system user \\\"%s\\\" explicitly excluded\" % (rUserName))\n # try to get login manager VT (if not already found)\n self._timekprLoginManager.determineLoginManagerVT(rUserName, userDict[cons.TK_CTRL_UPATH])\n # if username is in exclusion list, additionally verify that username is not a sysuser / login manager (this is somewhat obsolete now)\n elif rUserName in self._timekprConfig.getTimekprUsersExcl() and rUserName not in userhelper.getTimekprLoginManagers():\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"NOTE: user \\\"%s\\\" explicitly excluded\" % (rUserName))\n # if not in, we add it\n elif rUserName not in self._timekprUserList:\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"NOTE: we have a new user \\\"%s\\\"\" % (rUserName))\n # add user\n self._timekprUserList[rUserName] = timekprUser(\n self._timekprBusName,\n userDict[cons.TK_CTRL_UID],\n userDict[cons.TK_CTRL_UNAME],\n userDict[cons.TK_CTRL_UPATH],\n self._timekprConfig,\n self._timekprPlayTimeConfig\n )\n\n # init variables for user\n self._timekprUserList[rUserName].refreshTimekprRuntimeVariables()\n # adjust config\n self._timekprUserList[rUserName].adjustLimitsFromConfig()\n # adjust time spent\n self._timekprUserList[rUserName].adjustTimeSpentFromControl()\n\n # session list to remove\n removableUsers = [rUserName for rUserName in self._timekprUserList if rUserName not in userList]\n\n # get rid of users which left\n for rUserName in removableUsers:\n log.log(cons.TK_LOG_LEVEL_INFO, \"NOTE: user \\\"%s\\\" has gone\" % (rUserName))\n # save everything for the user\n self._timekprUserList[rUserName].saveSpent()\n self._timekprUserList[rUserName].deInitUser()\n # delete users that left\n self._timekprUserList.pop(rUserName)\n # remove if exists\n if rUserName in self._timekprUserRestrictionList:\n # delete from killing list as well\n self._timekprUserRestrictionList.pop(rUserName)\n\n # go through all users\n for rUserName in self._timekprUserList:\n # init variables for user\n self._timekprUserList[rUserName].refreshTimekprRuntimeVariables()\n\n # adjust time spent\n userActiveEffective, userActiveActual, userScreenLocked = self._timekprUserList[rUserName].adjustTimeSpentActual(self._timekprConfig)\n # recalculate time left\n self._timekprUserList[rUserName].recalculateTimeLeft()\n # process actual user session variable validation\n self._timekprUserList[rUserName].revalidateUserSessionAttributes()\n\n # get stats for user\n timeLeftArray = self._timekprUserList[rUserName].getTimeLeft()\n timeLeftToday = timeLeftArray[0]\n timeLeftInARow = timeLeftArray[1]\n timeHourUnaccounted = timeLeftArray[6]\n timePTActivityCnt = 0\n\n # PlayTime left validation\n if self._timekprConfig.getTimekprPlayTimeEnabled():\n # get time left for PLayTime\n timeLeftPT, isPTEnabled, isPTAccounted, isPTActive = self._timekprUserList[rUserName].getPlayTimeLeft()\n # enabled and active for user\n if isPTEnabled and isPTActive:\n # if there is no time left (compare to almost ultimate answer)\n # or hour is unaccounted and PT is not allowed in those hours\n if (isPTAccounted and timeLeftPT < 0.0042) or (timeHourUnaccounted and not self._timekprUserList[rUserName].getUserPlayTimeUnaccountedIntervalsEnabled()):\n # killing processes\n self._timekprPlayTimeConfig.killPlayTimeProcesses(self._timekprUserList[rUserName].getUserId())\n else:\n # active count\n timePTActivityCnt = self._timekprPlayTimeConfig.getMatchedUserProcessCnt(self._timekprUserList[rUserName].getUserId())\n # set process count (in case PT was disable in-flight or it has changed)\n self._timekprUserList[rUserName].setPlayTimeActiveActivityCnt(timePTActivityCnt)\n\n # logging\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"user \\\"%s\\\", active: %s/%s/%s (act/eff/lck), huacc: %s, tleft: %i\" % (rUserName, str(userActiveActual), str(userActiveEffective), str(userScreenLocked), str(timeHourUnaccounted), timeLeftInARow))\n\n # process actions if user is in the restrictions list\n if rUserName in self._timekprUserRestrictionList:\n # (internal idle killing switch) + user is not active + there is a time available today (opposing to in a row)\n if ((not userActiveActual and timeLeftToday > self._timekprConfig.getTimekprTerminationTime()) or timeHourUnaccounted) and self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RESTY] in (cons.TK_CTRL_RES_T, cons.TK_CTRL_RES_D):\n log.log(cons.TK_LOG_LEVEL_INFO, \"SAVING user \\\"%s\\\" from ending his sessions / shutdown\" % (rUserName))\n # remove from death list\n self._timekprUserRestrictionList.pop(rUserName)\n # if restricted time has passed for hard restrictions, we need to lift the restriction\n elif (timeLeftInARow > self._timekprConfig.getTimekprTerminationTime() or timeHourUnaccounted) and self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RESTY] in (cons.TK_CTRL_RES_T, cons.TK_CTRL_RES_D):\n log.log(cons.TK_LOG_LEVEL_INFO, \"RELEASING terminate / shutdown from user \\\"%s\\\"\" % (rUserName))\n # remove from restriction list\n self._timekprUserRestrictionList.pop(rUserName)\n # if restricted time has passed for soft restrictions, we need to lift the restriction\n elif (timeLeftInARow > self._timekprConfig.getTimekprTerminationTime() or timeHourUnaccounted) and self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RESTY] in (cons.TK_CTRL_RES_L, cons.TK_CTRL_RES_S, cons.TK_CTRL_RES_W):\n log.log(cons.TK_LOG_LEVEL_INFO, \"RELEASING lock / suspend from user \\\"%s\\\"\" % (rUserName))\n # remove from restriction list\n self._timekprUserRestrictionList.pop(rUserName)\n # update restriction stats\n else:\n # update active states for restriction routines\n self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_USACT] = userActiveActual\n self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_USLCK] = userScreenLocked\n self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RTDEA] = max(self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RTDEA] - 1, 0)\n # only if user is active / screen is not locked\n if ((userActiveActual and self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RESTY] in (cons.TK_CTRL_RES_T, cons.TK_CTRL_RES_D))\n or (not userScreenLocked and self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RESTY] in (cons.TK_CTRL_RES_S, cons.TK_CTRL_RES_L, cons.TK_CTRL_RES_W))):\n # update active states for restriction routines\n self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RTDEL] = max(self._timekprUserRestrictionList[rUserName][cons.TK_CTRL_RTDEL] - 1, 0)\n\n # ## FILL IN USER RESTRICTIONS ##\n\n # if user has very few time left, we need to enforce limits: Lock screen / Sleep computer / Shutdown computer / Terminate sessions\n if timeLeftInARow <= self._timekprConfig.getTimekprTerminationTime() and not timeHourUnaccounted and rUserName not in self._timekprUserRestrictionList and userActiveActual:\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"INFO: user \\\"%s\\\" has got restrictions...\" % (rUserName))\n # add user to restrictions list\n self._timekprUserRestrictionList[rUserName] = {\n cons.TK_CTRL_UPATH: self._timekprUserList[rUserName].getUserPathOnBus(), # user path on dbus\n cons.TK_CTRL_FCNTD: max(timeLeftInARow, self._timekprConfig.getTimekprTerminationTime()), # final countdown\n cons.TK_CTRL_RESTY: self._timekprUserList[rUserName].getUserLockoutType(), # restricton type: lock, suspend, suspendwake, terminate, shutdown\n cons.TK_CTRL_RTDEL: 0, # retry delay before next attempt to enforce restrictions\n cons.TK_CTRL_RTDEA: 0, # retry delay (additional delay for lock in case of suspend)\n cons.TK_CTRL_USACT: userActiveActual, # whether user is actually active\n cons.TK_CTRL_USLCK: userScreenLocked, # whether user screen is locked\n cons.TK_CTRL_USWKU: self._timekprUserList[rUserName].findNextAvailableIntervalStart() if self._timekprUserList[rUserName].getUserLockoutType() == cons.TK_CTRL_RES_W and timeLeftToday > timeLeftInARow else None\n }\n # in case this is first restriction we need to initiate restriction process\n if len(self._timekprUserRestrictionList) == 1:\n # process users\n GLib.timeout_add_seconds(1, self._restrictUsers)\n\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"finish checkUsers\")", "title": "" }, { "docid": "edace7792839bedf2617327f11e11be7", "score": "0.5626032", "text": "def ping_devices():", "title": "" }, { "docid": "495b24127b7efae7aa5bb0366f6c13d5", "score": "0.546232", "text": "def OnUser(self, e):\n with userDataLock:\n newStatus = not userData.status\n if newStatus == True:\n UserOnlineDefault(self.UserDataUpdated)\n else:\n UserOfflineDefault(self.UserDataUpdated)\n\n self.DisableButtons()", "title": "" }, { "docid": "4cadea518c6332eec4fe3f4504075350", "score": "0.5461531", "text": "def test_dont_send_device_updates_for_remote_users(self) -> None:\n\n # Send the server a device list EDU for the other user, this will cause\n # it to try and resync the device lists.\n self.federation_transport_client.query_user_devices.return_value = {\n \"stream_id\": \"1\",\n \"user_id\": \"@user2:host2\",\n \"devices\": [{\"device_id\": \"D1\"}],\n }\n\n self.get_success(\n self.device_handler.device_list_updater.incoming_device_list_update(\n \"host2\",\n {\n \"user_id\": \"@user2:host2\",\n \"device_id\": \"D1\",\n \"stream_id\": \"1\",\n \"prev_ids\": [],\n },\n )\n )\n\n self.reactor.advance(1)\n\n # We shouldn't see an EDU for that update\n self.assertEqual(self.edus, [])\n\n # Check that we did successfully process the inbound EDU (otherwise this\n # test would pass if we failed to process the EDU)\n devices = self.get_success(\n self.hs.get_datastores().main.get_cached_devices_for_user(\"@user2:host2\")\n )\n self.assertIn(\"D1\", devices)", "title": "" }, { "docid": "0617466e32a836293c3a02b535be1559", "score": "0.53784394", "text": "def active_users_thread():\n while True:\n send_active_users(broadcast=True)\n socketio.sleep(1)", "title": "" }, { "docid": "0e98d6d3b671691cd4c44fb7cf92467f", "score": "0.530803", "text": "def test_is_on(self):\n assert not fan.is_on(self.hass, FAN_ENTITY_ID)\n\n common.turn_on(self.hass, FAN_ENTITY_ID)\n self.hass.block_till_done()\n assert fan.is_on(self.hass, FAN_ENTITY_ID)", "title": "" }, { "docid": "87fcfaa2d165ada87e459bc74b54c72c", "score": "0.52752936", "text": "def check_slew(self):\n while True:\n if self._pointing_state != 1:\n self._admin_mode = 1 # Set adminMode to OFFLINE\n self.set_state(DevState.DISABLE) # Set STATE to DISABLE\n self._dish_mode = 6 # Set dishMode to STOW\n self._health_state = 0 # Set healthState to OK\n self.set_status(CONST.STR_DISH_STOW_SUCCESS)\n self.dev_logging(CONST.STR_DISH_STOW_SUCCESS, int(tango.LogLevel.LOG_INFO))\n break", "title": "" }, { "docid": "ad3e180a7a8b9909b4c4ab0ade37e465", "score": "0.52693784", "text": "def test_softap_5G_two_clients_ping_each_other(self):\n asserts.skip_if(len(self.android_devices) < 3,\n \"No extra android devices. Skip test\")\n self.validate_full_tether_startup(WIFI_CONFIG_APBAND_5G, test_clients=True)", "title": "" }, { "docid": "d09db9a4b5f04a43848aaa459d5cda2d", "score": "0.5256719", "text": "def __on_initial_statuses(self, users):\n for account_id in users:\n if account_id in self.__users:\n user = self.__users[account_id]\n user.status = users[account_id]['status']\n user.flags = users[account_id]['flags']", "title": "" }, { "docid": "c9f6fbee74a2ad38a06cfc0ef3030ade", "score": "0.5237259", "text": "def schedule_control():\n print('[{}] Checking schedule controls'.format(datetime.now()))\n\n # Database and Fibaro credentials\n user = 'dadtkzpuzwfows'\n database_password = '1a62e7d11e87864c20e4635015040a6cb0537b1f863abcebe91c50ef78ee4410'\n host = 'ec2-46-137-79-235.eu-west-1.compute.amazonaws.com'\n port = '5432'\n database = 'd53rn0nsdh7eok'\n fibaro_address = '172.19.243.58:80'\n fibaro_username = 'admin'\n fibaro_password = 'admin'\n\n def activate_remote_control(meter_id, command):\n query = requests.post('http://{}/api/devices/{}/action/{}'.format(fibaro_address, meter_id, command),\n auth=HTTPBasicAuth(fibaro_username, fibaro_password)).json()\n return None\n\n def check_user_presence(user_id):\n cursor.execute(\"SELECT presence FROM presence WHERE user_id={} ORDER BY unix_time DESC LIMIT 1\".format(user_id))\n presence = cursor.fetchone()[0]\n if presence == 0:\n return False\n elif presence == 1:\n return True\n else:\n raise ValueError('Presence information returned {} is not supported.')\n\n def check_schedule(schedules, current_time, day_of_week, state):\n if state == 'On':\n event_column = 'event_start'\n else:\n event_column = 'event_end'\n\n # Obtain user id and device type information for devices that needs to be switched ON/OFF\n control_schedule = [(schedules.loc[i, 'user_id'], schedules.loc[i, 'device_type'])\n for i in range(len(schedules))\n if current_time in schedules.loc[i, event_column] and day_of_week in schedules.loc[\n i, 'event_rrule']]\n\n # Remotely switch ON/OFF devices using meter id (obtained using user id and device type)\n if state == 'On':\n command = 'turnOn'\n else:\n command = 'turnOff'\n\n for user_id, device_type in control_schedule:\n if check_user_presence(user_id):\n continue\n else:\n if device_type == 'Task Lamp':\n device_type = 'tasklamp'\n\n cursor.execute(\"SELECT meter_id FROM power_energy_consumption WHERE user_id={} AND device_type='{}' \"\n \"ORDER BY unix_time DESC LIMIT 1\".format(user_id, device_type.lower()))\n meter_ids = cursor.fetchall()\n\n for meter_id in meter_ids:\n activate_remote_control(meter_id[0], command)\n\n # Obtain the schedule for all users and device types\n try:\n # Initialise connection variable\n connection = None\n\n # Connect to database\n connection = psycopg2.connect(user=user, password=database_password, host=host,\n port=port, database=database)\n cursor = connection.cursor()\n\n # Obtain schedules from database\n cursor.execute(\"SELECT * FROM plug_mate_app_scheduledata\")\n query_result = cursor.fetchall()\n schedules = pd.DataFrame(query_result, columns=[\n desc[0] for desc in cursor.description])\n\n # Check if the starting time of any schedule matches with the current time\n current_time = datetime.today().strftime('%H:%M')\n day_of_week = datetime.today().strftime('%A')\n\n # Check schedule to see if any devices needs to be switched ON/OFF\n check_schedule(schedules, current_time, day_of_week, 'On')\n check_schedule(schedules, current_time, day_of_week, 'Off')\n\n # Closing connection\n if (connection):\n cursor.close()\n connection.close()\n\n except(Exception, psycopg2.Error) as error:\n if (connection):\n print('Error: ', error)\n\n return None", "title": "" }, { "docid": "18732b1ecd57ca0a929f29720b133c63", "score": "0.52260256", "text": "def set_online(users, user_id):\n users[user_id]['active'] = True\n users[user_id]['first_seen'] = time.time()", "title": "" }, { "docid": "879457e8f229bc846c66d24a1461a6d3", "score": "0.52148634", "text": "def check_journeys(self):\n current_min = self.current_time.minute\n for journey in self.scheduled_trains:\n if journey.departure_minute == current_min:\n journey.start()\n print(f\"Train {journey.train.num} started it's journey from {journey.departure_station} to {journey.arrival_station}!\")\n elif journey.arrival_minute == current_min:\n journey.stop()\n print(f\"Train {journey.train.num} arrived at {journey.arrival_station} (Platform {journey.train.current_platform.platform_num}) from {journey.departure_station}!\")", "title": "" }, { "docid": "bf03b065fa3d399995b2a81dec503980", "score": "0.519261", "text": "def test_wlan_started_client_status(self):\n for fd in self.fuchsia_devices:\n fd.configure_wlan(association_mechanism='policy',\n preserve_saved_networks=True)\n\n status = fd.wlan_lib.wlanStatus()\n self.log.debug(status)\n if status[\"error\"] or not status[\"result\"]:\n raise signals.TestFailure(\n \"DUT's WLAN client status should be populated\")\n\n raise signals.TestPass(\"Success\")", "title": "" }, { "docid": "d43b4c801d5ff320d0db808d01db3d3c", "score": "0.5177363", "text": "def _HandleAutoProv(self):\n # All idle devices -> waiting.\n for target_dev in self.atft_manager.target_devs:\n if (target_dev.serial_number not in self.auto_dev_serials and\n target_dev.provision_status != ProvisionStatus.PROVISION_SUCCESS and\n not ProvisionStatus.isFailed(target_dev.provision_status)\n ):\n self.auto_dev_serials.append(target_dev.serial_number)\n target_dev.provision_status = ProvisionStatus.WAITING\n self._CreateThread(self._HandleStateTransition, target_dev)", "title": "" }, { "docid": "76a5d1bce07ef0a50f52349a3fd8de83", "score": "0.5164091", "text": "def interface_up_handler(self, up_interface_lists):\n try:\n for interface_dict in self.interface_list:\n interface = interface_dict['interface']\n if interface not in up_interface_lists and interface_dict['status'] != self.INTERFACE_DOWN:\n self.logger.debug(\"Interface[%s] changed to Down state\", interface)\n self.notify.error(rpd_event_def.RPD_EVENT_CONNECTIVITY_ETH_DOWN[0], interface, '')\n interface_dict['status'] = self.INTERFACE_DOWN\n # fixme do we need any action?\n # we need to check cores ints\n elif interface in up_interface_lists and interface_dict['status'] == self.INTERFACE_UP:\n up_interface_lists.remove(interface)\n\n for interface in up_interface_lists:\n self.interface_list.append(\n {\n \"interface\": interface,\n \"status\": self.INTERFACE_UP,\n }\n )\n self.notify.info(rpd_event_def.RPD_EVENT_CONNECTIVITY_ETH_UP[0], interface, '')\n if interface not in self.interface_candidate:\n self.interface_candidate.append(interface)\n\n if len(self.interface_candidate) and self.interface_candidate[0] not in self.interface_core_map:\n self.fsm.INTERFACE_SCAN(interface=self.interface_candidate[0])\n\n # Process the timers\n for interface_dict in self.interface_list:\n if interface_dict['status'] == self.INTERFACE_UP:\n # stop the interface timer\n if self.interface_scan_timer:\n self.dispatcher.timer_unregister(\n self.interface_scan_timer)\n self.interface_scan_timer = None\n break\n else:\n if self.interface_scan_timer is None:\n self.interface_scan_timer = self.dispatcher.timer_register(self.INTERFACE_SCAN_TIMEOUT,\n self._interface_scan_timeout_callback,\n timer_type=DpTimerManager.TIMER_ONESHOT)\n except FysomError as e:\n self.logger.error(\n \"Got an exception about manager fsm: %s\", str(e))", "title": "" }, { "docid": "e63e4d159c35631c28ff284eb8cd375c", "score": "0.5152081", "text": "def check_device_state(self):\n\n if not (self.switch.cli.conn.check_client() and self.switch.cli.conn.check_shell()):\n try:\n self.switch.ui.connect()\n except (CLISSHException, SSHException, SocketError):\n self.switch.ui.disconnect()\n raise Exception(\"Device is not ready.\")\n\n # Add cli application check", "title": "" }, { "docid": "49958ea91097ecfdc904b69f6f222709", "score": "0.51510864", "text": "def start():\n\tusers_side = choose_users_side()\n\tboard = create_board()\n\tprint_instructions()\n\tprint_board()\n\twhile True:\n\t\tif has_some_legal_move_somewhere(M):\n\t\t\tboard = move_musketeer(users_side)\n\t\t\tprint_board()\n\t\t\tif is_enemy_win():\n\t\t\t\tprint(\"Cardinal Richleau's men win!\")\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"The Musketeers win!\")\n\t\t\tbreak\n\t\tif has_some_legal_move_somewhere(R):\n\t\t\tboard = move_enemy(users_side)\n\t\t\tprint_board()\n\t\telse:\n\t\t\tprint(\"The Musketeers win!\")\n\t\t\tbreak", "title": "" }, { "docid": "d8c76b2d1a0b650162ec4928a09c5957", "score": "0.5149568", "text": "def listen(self):\n user = None\n side = None\n self.log.info(\"Began thinking\")\n\n while True:\n # TODO Aggregate lore from Chromalore and battles and store it\n\n #set antenna to correct side\n user = ('Orangered_HQ' if user == 'Periwinkle_Prime_3'\n else 'Periwinkle_Prime_3')\n side = (0 if side == 1 else 1)\n self.antenna.set_user(user)\n self.log.info(\"Set antenna to {}\".format(user))\n\n self.get_recruits(user, side)\n\n self.get_combatants()\n\n self.update_all_group(side)\n\n # refresh bot's token\n new_access_info = self.antenna.refresh_token_user()\n memory.handle_player_memory(self.db,\n user,\n accessInfo=dumps(new_access_info))\n\n print(\"Done with {}'s cycle\".format(user))\n time.sleep(30)", "title": "" }, { "docid": "de8fd613313cad4790ab1239c7759283", "score": "0.51479673", "text": "async def sender_task(self):\n await self.connect()\n await self.sync_scenes(0)\n await self.get_device_names()\n\n logging.info(\"Waiting until all devices are retreived\")\n await trio.sleep(5)\n if len(self.devices_for_sync) > 0: # sync devices when there are devices known by name\n logging.info(f\"Devices where replied, syncing those devices\")\n await self.sync_device_status(self.devices_for_sync)\n\n # Main loop, keep updating every 30 seconds. Keeps 'connection' alive in order\n # to receive alarms/events\n while True:\n await trio.sleep(30) # sleep first to handle the sync scenes and device names\n await self.sync_devices()\n await self.get_device_names()", "title": "" }, { "docid": "f585f43c5ac24c69d106a93970d4f576", "score": "0.51214856", "text": "def enable_multifactor(self, user) -> bool:\n return False", "title": "" }, { "docid": "668fb26df0d2fc69f8736dbe8c8f77a4", "score": "0.5110527", "text": "def update_users(self, exclude=None):\n start_time = time()\n for player in self.players:\n if player != exclude:\n player.card_update()\n for observer in self.observers:\n if observer != exclude:\n observer.card_update()\n print(\"Card update, took: \" + str(time() - start_time) + \"s\")", "title": "" }, { "docid": "4b32c3e98e703e160c67351196b73211", "score": "0.5109302", "text": "def do_test_bootstrap(self, env):\n\n mender_device = env.device\n\n devauth = DeviceAuthV2(env.auth)\n devauth.check_expected_status(\"pending\", 1)\n\n # iterate over devices and accept them\n for d in devauth.get_devices():\n devauth.set_device_auth_set_status(\n d[\"id\"], d[\"auth_sets\"][0][\"id\"], \"accepted\"\n )\n logger.info(\"Accepting DeviceID: %s\" % d[\"id\"])\n\n # make sure all devices are accepted\n devauth.check_expected_status(\"accepted\", 1)\n\n Helpers.check_log_have_authtoken(mender_device)\n\n # print all device ids\n for device in devauth.get_devices_status(\"accepted\"):\n logger.info(\"Accepted DeviceID: %s\" % device[\"id\"])", "title": "" }, { "docid": "bfcfc25f0a2617b6607c1b90849b00af", "score": "0.5108779", "text": "def _connect_disconnect_devices(self):\n device_connected = [ False ] * len(self.tethered_devices)\n for _ in range(50):\n dut_id = random.randint(0, len(self.tethered_devices)-1)\n dut = self.tethered_devices[dut_id]\n # wait for 1 sec between connect & disconnect stress test\n time.sleep(1)\n if device_connected[dut_id]:\n wutils.wifi_forget_network(dut, self.network[\"SSID\"])\n else:\n wutils.wifi_connect(dut, self.network)\n device_connected[dut_id] = not device_connected[dut_id]", "title": "" }, { "docid": "5c39e293c2b5741dcd3f1302d8a2e0e3", "score": "0.50933677", "text": "def turnon(self):\n pass", "title": "" }, { "docid": "19ced06e2b054a6aa4b57b4ab5ee4d4e", "score": "0.50810885", "text": "def handle_system_state_messages_that_were_requested_and_set_online_status(**data):\r\n device_id = data['topic'].split('/')[2]\r\n # check if device is already online, according to the state we keep\r\n if devices[device_id]['device']['online'] == 'online':\r\n log.debug(f\"device {device_id} is already online\")\r\n devices[device_id]['device']['when'] = arrow.now().isoformat()\r\n else:\r\n log.info(f\"device {device_id} is back online ♥\")\r\n # update device status in our state manager (devices)\r\n devices[device_id]['device']['online'] = 'online'\r\n devices[device_id]['device']['when'] = arrow.now().isoformat()\r\n # update online status of the entities (channels)\r\n for channel in devices[device_id]['device']['channels']:\r\n mqtt.publish(\r\n topic=f\"meross/{device_id}/{channel}/available\",\r\n payload=\"online\",\r\n retain=True\r\n )", "title": "" }, { "docid": "36f4cdca4433351fff1210d43827f471", "score": "0.5080534", "text": "def start_taking(self, user):\r\n if not self.user_has_access(user):\r\n raise exceptions.AccessDenialError(\r\n errorquotes.ACCESS_DENIED.format(repr(user)))\r\n self._states[user.id] = State.TAKING", "title": "" }, { "docid": "310500447e3ed20843289ec8f886dd5d", "score": "0.50767106", "text": "def checkobservingallowed(self):\n serviceuser = self._lcu_interface.who_servicebroker()\n\n if serviceuser is None or serviceuser == self._lcu_interface.user:\n stationswitchmode = self._lcu_interface.getstationswitchmode()\n if stationswitchmode == 'local':\n if not self._lcu_interface.is_beam_on():\n return True\n return False\n else:\n print(\"Warning: Station is not in stand-alone mode.\")\n return False\n else:\n print(\"\"\"Warning: Someone else ({}) is using LCU\n (You are running as {})\"\"\"\\\n .format(serviceuser, self._lcu_interface.user))\n return False", "title": "" }, { "docid": "4c08ee4f153c9160c70ae6b9e08285d3", "score": "0.5075275", "text": "def devices_for_user(user, confirmed=True, for_verify=False):\n if user.is_anonymous:\n return\n\n for model in device_classes():\n device_set = model.objects.devices_for_user(user, confirmed=confirmed)\n if for_verify:\n device_set = device_set.select_for_update()\n\n yield from device_set", "title": "" }, { "docid": "abbded188f9ad4c8c406694a954e8e17", "score": "0.50678337", "text": "def turn_switch_on(self):\n switch_status = self.get_switch_report()\n switch_state = switch_status[\"relay\"]\n if switch_state == True:\n print(\"Switch is alredy on!\")\n return\n else:\n command = 'curl --location --request GET \"http://' + self.__ip_address + '/relay?state=1\"'\n os.system(command)", "title": "" }, { "docid": "8bc882a27863f071863ace4052acf195", "score": "0.50676113", "text": "async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:\n for member_event in current_state:\n try:\n if member_event.type != EventTypes.Member:\n continue\n\n if not self.hs.is_mine_id(member_event.state_key):\n continue\n\n if member_event.content[\"membership\"] not in {\n Membership.JOIN,\n Membership.INVITE,\n }:\n continue\n\n if (\n \"kind\" not in member_event.content\n or member_event.content[\"kind\"] != \"guest\"\n ):\n continue\n\n # We make the user choose to leave, rather than have the\n # event-sender kick them. This is partially because we don't\n # need to worry about power levels, and partially because guest\n # users are a concept which doesn't hugely work over federation,\n # and having homeservers have their own users leave keeps more\n # of that decision-making and control local to the guest-having\n # homeserver.\n target_user = UserID.from_string(member_event.state_key)\n requester = create_requester(\n target_user, is_guest=True, authenticated_entity=self._server_name\n )\n handler = self.hs.get_room_member_handler()\n await handler.update_membership(\n requester,\n target_user,\n member_event.room_id,\n \"leave\",\n ratelimit=False,\n require_consent=False,\n )\n except Exception as e:\n logger.exception(\"Error kicking guest user: %s\" % (e,))", "title": "" }, { "docid": "cb4c2301484b4ec550fa00e55d034b21", "score": "0.504434", "text": "def turn_on(self):\n pass", "title": "" }, { "docid": "90f64f847d9e9196233474e3212055c1", "score": "0.5037454", "text": "def get(self):\n users = User.query(User.email != None)\n date = datetime.datetime.now()\n query = Game.query(Game.last_move <= date - datetime.timedelta(minutes=12))\n user_game = {game.next_turn: game\n for game in query}\n\n for user in users:\n if user.key in user_game.keys():\n send_turn_reminder_email(user, user_game[user.key].key.urlsafe())", "title": "" }, { "docid": "a17dab2f816678661593f338e2485c44", "score": "0.5031898", "text": "def notify_users(request):\n for user in User.objects.filter(groups__name='API_runners'):\n msg = \"User {} was informed about request {}\".format(\n user.username, request)\n print msg\n # giving too much noise but can be used if soemone need it\n # logger.debug(msg)", "title": "" }, { "docid": "3119b3c80863f878c2941c0227410733", "score": "0.5019806", "text": "def _wait_until_clients_turn(self, client_name=None):\n self.token += 1\n token = self.token\n self.ongoing_clients.append(token)\n while self.ongoing_clients[0] != token:\n pass", "title": "" }, { "docid": "337ef7980bca8024b249cf71b9a2e6cb", "score": "0.5012076", "text": "def test_softap_2G_two_clients_ping_each_other(self):\n asserts.skip_if(len(self.android_devices) < 3,\n \"No extra android devices. Skip test\")\n self.validate_full_tether_startup(WIFI_CONFIG_APBAND_2G, test_clients=True)", "title": "" }, { "docid": "06f5bf6d507fc80efaf6cd2f744c5bc1", "score": "0.50047845", "text": "def signedOn(self):\n self.msg(\"nickserv\", \"identify %s\" % getconfig('password'))\n for channel in self.factory.channels:\n self.join(channel)\n if not self.alreadyrunning:\n reactor.callLater(10, self.gameloop, firstrun=True)\n reactor.callLater(10, self.rssloop, firstrun=True)\n self.alreadyrunning = True", "title": "" }, { "docid": "83b84f480c7138f7b848ddc6681f533a", "score": "0.49902377", "text": "def tick(self):\n for board_type in BOARDS:\n if hasattr(board_type, 'lookup_keys'):\n nodes = self.context.list_devices(**board_type.lookup_keys)\n initialized_nodes = [n for n in nodes if n.is_initialized]\n self._process_device_list(board_type, initialized_nodes)", "title": "" }, { "docid": "3e656db7eadcd84ddb23d05d3d6577ac", "score": "0.49823016", "text": "def switch_laser(self):\n if self._mw.laser_ON_checkBox.isChecked():\n self.time_start = time.time()\n self._fiber_shooting_logic.set_duty_cycle(self._mw.duty_cycle_doubleSpinBox.value())\n self.power_data = []\n self.time_data = []\n self._fiber_shooting_logic.set_power()\n self.laser_status = True\n else:\n self._fiber_shooting_logic.set_duty_cycle(0)\n self.laser_status = False\n return", "title": "" }, { "docid": "5895254ee64b8e5b6a19d72678e1c0c0", "score": "0.49818733", "text": "def _update_user_requests(self):\n\n if self.daytime % 120 == 0:\n self.user_requests['temp_desired'] = random.randint(15, 20)\n\n if self.day_start < self.daytime < self.day_end:\n self.user_requests['temp_desired'] += 4\n\n self.user_requests['light_desired'] = round(random.random(), 1)", "title": "" }, { "docid": "0c0eaea41fe37351baa1eaf548cd911f", "score": "0.49701744", "text": "def initiate(self):\n\n # Add Choices:\n y = []\n for x in MATCH_SUGGESTION_CHOICES:\n ## Don't offer chat option to iPhone clients\n #if filter(lambda z: len(z.device_id) >= 5, self.users.all()):\n # if x[0] == 'chat':\n # continue\n # Facebook option only if both users are authed on FB: (diabled for now\n if (x[0]=='facebook' and self.users.all()[0].auth_facebook and self.users.all()[1].auth_facebook) or x[0]!='facebook' or 1==1:\n y.append(x[0])\n self.choices = '|'.join(y)\n\n\n #depr: does one of the user already have a running match? if yes, enqueue this one:\n self.status='1'\n #for user in self.users.all():\n # if user.get_match():\n # self.status='0'\n\n\n # Save\n self.save()\n\n if self.status=='1':\n self.initiate_users()", "title": "" }, { "docid": "65ecf5a1782db368944ccecefe0e8492", "score": "0.4969057", "text": "def validate_full_tether_startup(self, band=None, hidden=None,\n test_ping=False, test_clients=None):\n initial_wifi_state = self.dut.droid.wifiCheckState()\n initial_cell_state = tel_utils.is_sim_ready(self.log, self.dut)\n self.dut.log.info(\"current state: %s\", initial_wifi_state)\n self.dut.log.info(\"is sim ready? %s\", initial_cell_state)\n if initial_cell_state:\n self.check_cell_data_and_enable()\n config = self.create_softap_config()\n wutils.start_wifi_tethering(self.dut,\n config[wutils.WifiEnums.SSID_KEY],\n config[wutils.WifiEnums.PWD_KEY], band, hidden)\n if hidden:\n # First ensure it's not seen in scan results.\n self.confirm_softap_not_in_scan_results(\n config[wutils.WifiEnums.SSID_KEY])\n # If the network is hidden, it should be saved on the client to be\n # seen in scan results.\n config[wutils.WifiEnums.HIDDEN_KEY] = True\n ret = self.dut_client.droid.wifiAddNetwork(config)\n asserts.assert_true(ret != -1, \"Add network %r failed\" % config)\n self.dut_client.droid.wifiEnableNetwork(ret, 0)\n self.confirm_softap_in_scan_results(config[wutils.WifiEnums.SSID_KEY])\n if test_ping:\n self.validate_ping_between_softap_and_client(config)\n if test_clients:\n if hasattr(self, 'arduino_wifi_dongles'):\n self.validate_traffic_between_softap_clients(config)\n if len(self.android_devices) > 2:\n self.validate_ping_between_two_clients(config)\n wutils.stop_wifi_tethering(self.dut)\n asserts.assert_false(self.dut.droid.wifiIsApEnabled(),\n \"SoftAp is still reported as running\")\n if initial_wifi_state:\n wutils.wait_for_wifi_state(self.dut, True)\n elif self.dut.droid.wifiCheckState():\n asserts.fail(\"Wifi was disabled before softap and now it is enabled\")", "title": "" }, { "docid": "6375e456d14add46e658bef7a54d8c6d", "score": "0.49648637", "text": "def create_users_and_start_service(self):\n mer_request = self.env['merchant.request'].browse(self.env.context.get('active_id'))\n\n # Invoice Validation for duplicacy\n if not mer_request.validate_and_register_invoice(action=self.approval_type, invoice_id=self.invoice_id):\n raise Warning(_('Invoice ID is already registered.'))\n\n # Invoice Validation\n if self.invoice_id.state != 'paid':\n raise Warning(_('Invoice is not paid. Please mark invoice as paid and try again.'))\n\n if self.approval_type == 'new':\n merchant_admin = mer_request.register_partner({'login':self.mer_admin_email, 'name':mer_request.shopname+' - Admin (%s)' % (self.mer_admin_username), 'is_admin':True, 'parent_id':mer_request.partner_ids[0].id})\n merchant_user = mer_request.register_partner({'login':self.mer_user_email, 'name':mer_request.shopname+' - User (%s)' % (self.mer_user_username), 'is_admin':False, 'parent_id':mer_request.partner_ids[0].id})\n mer_request.start()\n\n elif self.approval_type == 'renew':\n mer_request.resume()\n \n return", "title": "" }, { "docid": "0b9f754a7d0c72a30a9c1ed8e10b17ff", "score": "0.49609804", "text": "def _setClients(self, clients):\n for client in clients:\n for stepRec in self.steps:\n if client.name == stepRec['clientName']:\n # Initialize client online/offline deferreds\n onlineDeferred = defer.Deferred().addCallback(self.handleClientReconnect)\n offlineDeferred = defer.Deferred().addCallback(self.handleClientDisconnect)\n # Add these deferreds to corresponding client's dictionaries - to be called back when appropriate events occur\n client.addOnlineDeferred(onlineDeferred, reset = True)\n client.addOfflineDeferred(offlineDeferred, reset = True)\n # Set current step record's 'client' data\n stepRec['client'] = client\n break\n \n # All clients have been set: check 'online' status of all clients, and set workflow's status correspondingly\n self._checkOnline()", "title": "" }, { "docid": "dea8683c856bc4a89955af96f3e128cd", "score": "0.4948599", "text": "async def check_reachable(self, new_user: User):\n try:\n # wait for accept or reject as user\n while new_user not in self.user:\n if new_user.closed:\n log.debug(f\"user connection closed on check_reachable {new_user}\")\n return\n await asyncio.sleep(1.0)\n # try to check TCP\n f_tcp = True\n host_port = new_user.get_host_port()\n af = socket.AF_INET if len(host_port) == 2 else socket.AF_INET6\n sock = socket.socket(af, socket.SOCK_STREAM)\n sock.settimeout(3.0)\n try:\n future: asyncio.Future = loop.run_in_executor(\n None, sock.connect_ex, host_port)\n await asyncio.wait_for(future, 10.0)\n result = future.result()\n if result != 0:\n f_tcp = False\n except (OSError, asyncio.TimeoutError):\n f_tcp = False\n loop.run_in_executor(None, sock.close)\n # try to check UDP\n f_udp = await self.ping(user=new_user, f_udp=True)\n f_changed = False\n # reflect user status\n if f_tcp is not new_user.header.p2p_accept:\n # log.debug(f\"{new_user} Update TCP accept status {new_user.header.p2p_accept}=>{f_tcp}\")\n new_user.header.p2p_accept = f_tcp\n f_changed = True\n if f_udp is not new_user.header.p2p_udp_accept:\n # log.debug(f\"{new_user} Update UDP accept status {new_user.header.p2p_udp_accept}=>{f_udp}\")\n new_user.header.p2p_udp_accept = f_udp\n f_changed = True\n if f_changed:\n log.debug(f\"{new_user} Change socket status tcp={f_tcp} udp={f_udp}\")\n except Exception:\n log.error(\"check_reachable exception\", exc_info=True)", "title": "" }, { "docid": "e99cf67b987ce03657592b6af9c475e6", "score": "0.49391797", "text": "def poll_device(self):\n # check if Tasmota Zigbee Bridge needs to be configured\n tasmota_zigbee_bridge_status = self.tasmota_zigbee_bridge.get('status')\n if tasmota_zigbee_bridge_status == 'discovered':\n self.logger.info(f'poll_device: Tasmota Zigbee Bridge discovered; Configuration will be adapted.')\n zigbee_device = self.tasmota_zigbee_bridge.get('device')\n if zigbee_device:\n self._discover_zigbee_bridge(zigbee_device)\n\n self.logger.info(\"poll_device: Checking online status of connected devices\")\n for tasmota_topic in self.tasmota_devices:\n if self.tasmota_devices[tasmota_topic].get('online') is True and self.tasmota_devices[tasmota_topic].get(\n 'online_timeout'):\n if self.tasmota_devices[tasmota_topic]['online_timeout'] < datetime.now():\n self.tasmota_devices[tasmota_topic]['online'] = False\n self._set_item_value(tasmota_topic, 'item_online', False, 'poll_device')\n self.logger.info(\n f\"poll_device: {tasmota_topic} is not online any more - online_timeout={self.tasmota_devices[tasmota_topic]['online_timeout']}, now={datetime.now()}\")\n # delete data from WebIF dict\n self.tasmota_devices[tasmota_topic]['lights'] = {}\n self.tasmota_devices[tasmota_topic]['rf'] = {}\n self.tasmota_devices[tasmota_topic]['sensors'] = {}\n self.tasmota_devices[tasmota_topic]['relais'] = {}\n self.tasmota_devices[tasmota_topic]['zigbee'] = {}\n else:\n self.logger.debug(f'poll_device: Checking online status of {tasmota_topic} successfull')\n\n # ask for status info of reconnected tasmota_topic (which was not connected during plugin start)\n if not self.tasmota_devices[tasmota_topic].get('mac'):\n self.logger.debug(f\"poll_device: reconnected device discovered and try to discover it\")\n self._identify_device(tasmota_topic)\n\n # update tasmota_meta auf Basis von tasmota_devices\n self._update_tasmota_meta()", "title": "" }, { "docid": "67d10c69fd3dd343f74e79f6a3e79ac4", "score": "0.4936193", "text": "def discover_finished():\n _LOGGER.debug(\n \"Successfully discovered %s out of %s registered nodes\",\n str(len(api_stick.devices)),\n str(api_stick.joined_nodes),\n )\n for component in PLATFORMS_USB:\n hass.data[DOMAIN][config_entry.entry_id][component] = []\n\n for mac, pw_device in api_stick.devices.items():\n # Skip unsupported devices\n if pw_device is not None:\n if USB_RELAY_ID in pw_device.features:\n hass.data[DOMAIN][config_entry.entry_id][SWITCH_DOMAIN].append(mac)\n if USB_MOTION_ID in pw_device.features:\n hass.data[DOMAIN][config_entry.entry_id][\n BINARY_SENSOR_DOMAIN\n ].append(mac)\n hass.data[DOMAIN][config_entry.entry_id][SENSOR_DOMAIN].append(mac)\n\n for component in PLATFORMS_USB:\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(config_entry, component)\n )\n\n def add_new_node(mac):\n \"\"\"Add Listener when a new Plugwise node joined the network.\"\"\"\n device = device_registry.async_get_device({(DOMAIN, mac)}, set())\n hass.components.persistent_notification.async_create(\n title=\"New Plugwise device\",\n message=(\n \"A new Plugwise device has been joined : \\n\\n\"\n f\" - {api_stick.devices[mac].hardware_model} ({mac[-5:]})\\n\\n\"\n f\"Configure this device at the [device dashboard](/config/devices/device/{device.id})\"\n ),\n )\n\n api_stick.auto_update()\n\n # Home Assistant version 2021.6 changed the stored format of system options\n if AwesomeVersion(current_ha_version) >= AwesomeVersion(\"2021.6.0\"):\n if config_entry.pref_disable_new_entities:\n _LOGGER.debug(\"Configuring stick NOT to accept any new join requests\")\n api_stick.allow_join_requests(True, False)\n else:\n _LOGGER.debug(\n \"Configuring stick to automatically accept new join requests\"\n )\n api_stick.allow_join_requests(True, True)\n api_stick.subscribe_stick_callback(add_new_node, CB_JOIN_REQUEST)\n else:\n if config_entry.system_options.disable_new_entities:\n _LOGGER.debug(\"Configuring stick NOT to accept any new join requests\")\n api_stick.allow_join_requests(True, False)\n else:\n _LOGGER.debug(\n \"Configuring stick to automatically accept new join requests\"\n )\n api_stick.allow_join_requests(True, True)\n api_stick.subscribe_stick_callback(add_new_node, CB_JOIN_REQUEST)", "title": "" }, { "docid": "dd0902eca3436193535a97efa7001828", "score": "0.49326062", "text": "def notify(self):\n logger.info('notify')\n\n for coin_name in self.data:\n for address, address_data in self.data[coin_name]['data'].items():\n \n # notify users with INOUT about IN and OUT tranasctions\n if address_data['txs']['out'] or address_data['txs']['in']:\n for user in address_data['users']['inout']: \n self.add(coin_name, user, address, address_data['txs']['out'] | address_data['txs']['in'])\n\n if address_data['txs']['out']:\n for user in address_data['users']['out']: \n self.add(coin_name, user, address, address_data['txs']['out'])\n\n if address_data['txs']['in']:\n for user in address_data['users']['in']: \n self.add(coin_name, user, address, address_data['txs']['in'])\n\n address_data['in'] = set()\n address_data['out'] = set()\n \n #print(self.senders)\n for sender in self.senders:\n sender.send()\n\n self.database.commit()\n self.last_notify = datetime.now()", "title": "" }, { "docid": "5a8a28a1bc8fbf5c81c227d65fdac7b0", "score": "0.49296218", "text": "def check_for_updated_mining_balance(self):\n # snowflake of staking account\n snowflake = str(self.stakeflake)\n # create a new user for staking stake\n # generate snowflake\n # self.check_for_user_and_make(snowflake)\n user = self.get_staking_user(snowflake)\n\n all_users = [x for x in self.get_reg_users_id() if x is not snowflake]\n\n for ids in all_users:\n if str(ids) == snowflake:\n continue\n\n # first you must get the transactions of the account holder\n transaction_list = rpc.listtransactions(str(ids), 100)\n\n for tx in transaction_list:\n # if the generated tag is true this is a stake\n if tx.get('generated') is None:\n # a stake will not affect the individual users balance\n continue\n\n txid = tx[\"txid\"]\n amount = 0.5\n confirmations = int(tx[\"confirmations\"])\n mint_status = tx[\"generated\"]\n status = self.get_transaction_status_by_txid(txid)\n\n # acreddit the staking user throughout the confirmation process\n # the stake will take longer to confirm\n snowflake_cur = snowflake\n if status == \"DOESNT_EXIST\" and mint_status is True:\n self.add_to_balance(snowflake_cur, amount)\n self.add_deposit(snowflake_cur, amount, txid, 'CONFIRMED-STAKE')\n elif status == \"DOESNT_EXIST\" and confirmations < 101:\n self.add_deposit(snowflake_cur, amount,\n txid, 'UNCONFIRMED-STAKE')\n self.add_to_balance_unconfirmed(snowflake_cur, amount)\n elif status == \"UNCONFIRMED-STAKE\" and confirmations >= 101:\n self.add_to_balance(snowflake_cur, amount)\n self.remove_from_balance_unconfirmed(snowflake_cur, amount)\n self.confirm_stake(txid)\n\n # once confirmed split the stake between users balance %", "title": "" }, { "docid": "196eddf8832f92d6972780a0a1535ece", "score": "0.49276504", "text": "def addUsersToMacfilter(self, sync=None):\n\tnew_users = self.evalUsersToAddV2()\n\tlink = self.doConnectionMacfilter()\n\tcursor = link.cursor()\n\n cursor.execute(\"\"\"DELETE FROM device WHERE vlan='1' or vlan='2' or vlan='3' or vlan='4'\"\"\")\n\n\tif sync:\n\t cursor.execute(\"\"\"SELECT id, name, mac, vlan FROM device WHERE vlan != '1' AND vlan != '2' AND vlan != '3' AND vlan != '4'\"\"\")\n\n\t no_auto = cursor.fetchall()\n\n\t print \"Buscando vlans no automáticas y agregandolas a vlan 700\"\n self.__fileLogger(\"Buscando vlans no automáticas y agregandolas a vlan 700\")\n\t for na in no_auto:\n\t\tcursor.execute(\"\"\"SELECT e.name FROM vlan v, esquema e WHERE e.id=v.esquema AND v.id='%s'\"\"\" % (int(na[3])))\n print \"\"\"SELECT e.name FROM vlan v, esquema e WHERE e.id=v.esquema AND v.id='%s'\"\"\" % (int(na[3]))\n self.__fileLogger(\"\"\"SELECT e.name FROM vlan v, esquema e WHERE e.id=v.esquema AND v.id='%s'\"\"\" % (int(na[3])))\n\t\tvlan_where = cursor.fetchone()\n\t\tvlan_where = vlan_where[0]\n\n\t\tcursor.execute(\"\"\"SELECT v.id FROM vlan v WHERE v.esquema='%s' AND v.name='700'\"\"\" % self.getEsquemaId(vlan_where))\n print \"\"\"SELECT v.id FROM vlan v WHERE v.esquema='%s' AND v.name='700'\"\"\" % self.getEsquemaId(vlan_where)\n self.__fileLogger(\"\"\"SELECT v.id FROM vlan v WHERE v.esquema='%s' AND v.name='700'\"\"\" % self.getEsquemaId(vlan_where))\n\t\tvlan_where = cursor.fetchone()\n\t\tvlan_where = int(vlan_where[0])\n\n\t \tcursor.execute(\"\"\"INSERT INTO device (name, mac, vlan) VALUES ('%s','%s','%s')\"\"\" % (na[1],na[2],vlan_where))\n\t\tprint \"\"\"INSERT INTO device (name, mac, vlan) VALUES (''%s','%s','%s')\"\"\" % (na[1],na[2],vlan_where)\n self.__fileLogger(\"\"\"INSERT INTO device (name, mac, vlan) VALUES (''%s','%s','%s')\"\"\" % (na[1],na[2],vlan_where))\n self.__logger(\"\"\"INSERT INTO device (name, mac, vlan) VALUES (''%s','%s','%s')\"\"\" % (na[1],na[2],vlan_where))\n\n \tfor n_user in new_users:\n\t idd = self.getEsquemaId(n_user[2])\n\t vlan_700_id = self.getVlanId(\"700\", idd)\n\t vlan_702_id = self.getVlanId(\"702\", idd)\n\n\t if n_user[1] != '':\n\t print \"INSERT INTO device (name, mac, vlan) VALUES ('%s', '%s', '%s')\" % (n_user[0], n_user[1], vlan_700_id)\n\t print \"INSERT INTO device (name, mac, vlan) VALUES ('%s', '%s', '%s')\" % (n_user[0], n_user[1], vlan_702_id)\n\n self.__logger(\"INSERT INTO device (name, mac, vlan) VALUES ('%s', '%s', '%s')\" % (n_user[0], n_user[1], vlan_700_id))\n self.__logger(\"INSERT INTO device (name, mac, vlan) VALUES ('%s', '%s', '%s')\" % (n_user[0], n_user[1], vlan_702_id))\n\n self.__fileLogger(\"INSERT INTO device (name, mac, vlan) VALUES ('%s', '%s', '%s')\" % (n_user[0], n_user[1], vlan_700_id))\n self.__fileLogger(\"INSERT INTO device (name, mac, vlan) VALUES ('%s', '%s', '%s')\" % (n_user[0], n_user[1], vlan_702_id))\n\n\t cursor.execute(\"\"\"INSERT INTO device (name, mac, vlan) VALUES (\"%s\", \"%s\", %s)\"\"\" % (n_user[0], n_user[1], vlan_700_id))\n\t cursor.execute(\"\"\"INSERT INTO device (name, mac, vlan) VALUES (\"%s\", \"%s\", %s)\"\"\" % (n_user[0], n_user[1], vlan_702_id))\n\n\tcursor.close()\n\tlink.commit()\n\tlink.close()", "title": "" }, { "docid": "c3041b5cf9734c2eddca91c8de71ad3f", "score": "0.49265733", "text": "def startCheck():\n \n sapides_ip = '192.168.0.104'\n saprouter_ip = '192.168.0.106'\n esxi_ip = '192.168.0.220'\n\n with open(\"/tmp/pythonSAP/USBConnection.txt\", \"a\") as f:\n # redirect all terminal outputs to the file\n sys.stdout = f\n # connect to the USB device\n n = 4 # try to connect to the usb evice 4 times\n while n >= 0:\n if n == 0:\n sys.exit()\n try:\n usbDevice = serial.Serial(\"/dev/ttyUSB0\", 9600)\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime())\n print(t, \"Connected to the USB\")\n break\n except:\n usbDevice = False\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime())\n message = \"Could not connect to the USB device.\"\n print(t, message)\n # if first connection attempt failed, then try again after 3 sec\n time.sleep(3)\n n-=1\n # read serial data ( 1 - has electricity, 0 - power off )\n # check usb signal five times\n checkPower = []\n for i in range(5):\n if \"1\" in str(usbDevice.readline()):\n checkPower.append(1)\n time.sleep(1)\n # script checks 3 previous status of the usb signal,\n # if all of those were 0 then shutdown the server\n # create file for the previous session, if it does not exist\n try:\n prevSignal = pickle.load(open('/tmp/pythonSAP/session.p', 'rb'))\n # store only one week data\n if len(prevSignal) >= 10083: # 60*24*7 + 3\n prevSignal = prevSignal[:3].copy()\n except:\n message = \"session file does not exists. will be created\"\n print(message)\n prevSignal = []\n pickle.dump(prevSignal, open('/tmp/pythonSAP/session.p', 'wb'))\n prevSignal = pickle.load(open('/tmp/pythonSAP/session.p', 'rb'))\n # if result of for loop was positive then add 1 (has an electricity)\n # to the list\n if len(checkPower) > 0:\n prevSignal.append(1)\n else:\n prevSignal.append(0)\n # update the session.p file with current session value\n pickle.dump(prevSignal, open('/tmp/pythonSAP/session.p', 'wb'))\n # check last 3 session result, if length less than 3 then\n # dont do anything\n if len(prevSignal) >= 3:\n results = prevSignal[-1:-4:-1] # get last 3 sessions\n if 1 in results:\n shutdown = False # do not do anything\n else:\n # if the result of last three sessions was 0 then\n # shutdown the system\n shutdown = True # server will be shutted down\n else:\n shutdown = False # do not do anything\n if shutdown:\n # no electricity, shutting down the SAPIdes\n os.system(\"date >> /tmp/pythonSAP/pythonLog.txt\")\n os.system(\"echo shutdown the SAP\")\n hostKey = open(\"/home/ubuntuser/SAPIdes.txt\", \"rb\").read()\n uname, passwd = 'idsadm', os.environ['SIDADM_PASS'] # set this env variable beforehand\n ipaddr, command = sapides_ip, [\n 'stopsap sapides >> /tmp/pythonSAP/pythonLog.txt',\n 'shutdown now'\n ]\n shutdownSAPIDES = shutdown_sapides(\n hostKey, uname, passwd, ipaddr, command\n )\n # write sapides shutdown result to the log\n if shutdownSAPIDES:\n t = time.strftime(\n \"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime()\n )\n print(t, \" sapides shutdown completed\")\n else:\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime()\n )\n print(t, \"Could not shutdown saprouter\") \n \"\"\"\n Use this command if you shut down the sap system through itself\n os.system(\n \"su - idsadm -c 'stopsap sapides >> /tmp/pythonSAP/pythonLog.txt'\"\n )\n \"\"\"\n # shutdown saprouter\n hostKey = open(\"/home/ubuntuser/SAPRouter.txt\", \"rb\").read()\n uname, passwd = 'root', os.environ['SAPROUTER_PASS']\n ipaddr, command = saprouter_ip, 'shutdown now'\n shutdownSaprouter = shutdown_saprouter(\n hostKey, uname, passwd, ipaddr, command\n )\n # write saprouter shutdown result to the log\n if shutdownSaprouter:\n t = time.strftime(\n \"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime()\n )\n print(t, \" saprouter shutdown completed\")\n else:\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime()\n )\n print(t, \"Could not shutdown saprouter\")\n # shutdown esxi\n hostKey = open(\"/home/ubuntuser/ESXi.txt\", \"rb\").read()\n uname, passwd = 'root', os.environ['ESXIROOT_PASS']\n ipaddr, command = esxi_ip, 'halt'\n shutdownESX = shutdown_esx(hostKey, uname, passwd, ipaddr, command)\n # write esx shutdown result to the log\n if shutdownESX:\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime()\n )\n print(t, \" esx shutdown completed\")\n else:\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.localtime()\n )\n print(t, \"Could not shutdown esx\")\n else:\n t = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.localtime())\n print(t, checkPower, \"-->current session\")\n os.system(\"date >> /tmp/pythonSAP/pythonLog.txt\")\n os.system(\"echo connection is OK >> /tmp/pythonSAP/pythonLog.txt\")", "title": "" }, { "docid": "bc8b256b2c1446af1888412d0d32d506", "score": "0.4924854", "text": "def update(self):\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n\n if self.active_flag:\n self.sense_and_act()", "title": "" }, { "docid": "d955800d77b8625b26a1731ff47f13f6", "score": "0.4922361", "text": "def signedOn(self):\n for c in self.factory.channel:\n self.join(c)", "title": "" }, { "docid": "d648f9c6833a96926c65a2ccb2856876", "score": "0.49087155", "text": "async def send_clients_event(self):\n\t\tself.check_and_repair_host()\n\n\t\tawait self.broadcast(self.clients_message())", "title": "" }, { "docid": "2cc1e23faf30d8557a2cf278958a21fc", "score": "0.49080792", "text": "def disactivate_devices():\n print 'desactivating devices...'\n airon = '0'\n temperature = '25'\n speed = '0'\n swingon = '0'\n lighton = '0'\n\n conf_data = {\"acuAirOn\" : airon, \"acuTemperature\" : temperature, \"acuSpeed\" : speed, \"acuSwingOn\" : swingon, \"acuLightOn\" : lighton}\n post_data(define_rest_url(), conf_data)", "title": "" }, { "docid": "1e642c96b3c4e0213a07fa492c260595", "score": "0.4905871", "text": "def kickDoge():\n # simply add the message to the queue of each tablet\n BBQcopy = copy.deepcopy(globalBBQ)\n for client in BBQcopy:\n if(client[:10]=='127.0.0.1_'):\n return #no doge-kicking on local host\n # client entry in the BBQ list\n clnInfo = BBQcopy[client]\n # commands sent to this client that were not ACK'ed\n myCMDs = clnInfo[0]\n # kick the watchdog only if the queue is empty\n if(len(myCMDs)<=0):\n addMsg(client,\"doge\")", "title": "" }, { "docid": "d0972d24c4745c7861bc8301045b3feb", "score": "0.4900015", "text": "def run(self):\n while True:\n expired_flowroutes = self.flowroutestore.get_expired_flowroutes()\n\n\n for el in expired_flowroutes:\n self.withdraw_flow_route(el)\n\n time.sleep(1)", "title": "" }, { "docid": "ebc3e611aeca52372d1cff926f9895e2", "score": "0.48886105", "text": "def _other_device(self):\n if self._task.get(\"from_device_uuid\") == None:\n return\n if self._task.get(\"from_type\") != YVOBJECT.DU:\n return\n if self._task.get(\"_user\") == None:\n return\n\n if self._task[\"conversation_type\"] == CONVERSATION_TYPE.P2S:\n if self._task[\"_user\"][\"ppcom_mobile_device_uuid\"] == None or \\\n self._task[\"_user\"][\"ppcom_browser_device_uuid\"] == None:\n return\n\n if self._task[\"conversation_type\"] == CONVERSATION_TYPE.S2S or \\\n self._task[\"conversation_type\"] == CONVERSATION_TYPE.S2P:\n if self._task[\"_user\"][\"mobile_device_uuid\"] == None or \\\n self._task[\"_user\"][\"browser_device_uuid\"] == None:\n return\n\n _device_uuid = None\n if self._task[\"conversation_type\"] == CONVERSATION_TYPE.P2S:\n _device_uuid = self._task[\"_user\"][\"ppcom_mobile_device_uuid\"]\n if self._task[\"from_device_uuid\"] == self._task[\"_user\"][\"ppcom_mobile_device_uuid\"]:\n _device_uuid = self._task[\"_user\"][\"ppcom_browser_device_uuid\"]\n else:\n _device_uuid = self._task[\"_user\"][\"mobile_device_uuid\"]\n if self._task[\"from_device_uuid\"] == self._task[\"_user\"][\"mobile_device_uuid\"]:\n _device_uuid = self._task[\"_user\"][\"browser_device_uuid\"]\n\n if _device_uuid not in self._devices_hash:\n _device = redis_hash_to_dict(self._redis, DeviceInfo, _device_uuid)\n if _device == None or _device[\"device_is_online\"] != True:\n return\n self._devices_hash[_device_uuid] = _device\n\n _user_uuid = self._task[\"from_uuid\"]\n if _user_uuid not in self._users_hash:\n self._users_hash[_user_uuid] = self._task[\"_user\"]\n\n _pid = self._push_to_db(_user_uuid, _device_uuid)\n self._devices_hash[_device_uuid][\"push_uuid\"] = _pid\n self._push_to_pc(_user_uuid, _device_uuid)\n return", "title": "" }, { "docid": "78aa7bfca358c749f93a4864df609f66", "score": "0.4881428", "text": "def _update_flight_status(self):\n prev_flying_enabled = self.flying_enabled\n \"\"\"\n if there is a cf instance and a qtm connection instance\n (even if they are not connected)\n then flying is enabled\n \"\"\"\n self.flying_enabled = (self._cf is not None)\n # and \\\n # self._qtm_connection is not None\n\n \"\"\"\n if the flying enabled is not the same as prev_flying enabled\" \n an additional check for security...?\n \"\"\"\n if not prev_flying_enabled and self.flying_enabled:\n self.switch_flight_mode(FlightModeStates.GROUNDED)\n t1 = threading.Thread(target=self.flight_controller)\n # t2 = threading.Thread(target=self.flight_logger)\n\n t1.start()\n # t2.start()\n\n \"\"\"\n if either the CF or QTM/Posenet Drops out. \n flight mode is disconnect\n \"\"\"\n if prev_flying_enabled and not self.flying_enabled:\n self.switch_flight_mode(FlightModeStates.DISCONNECTED)", "title": "" }, { "docid": "f49bb0873a61cdfab427bcdc9ac09650", "score": "0.48810437", "text": "def test_multiple_success_blocking_user(self):\n test_usernames = [\"foo@test.com\", \"bar@test.com\", \"baz@test.com\"]\n\n for username in test_usernames:\n user = UserFactory.create(username=username, is_active=True)\n assert BlockList.objects.all().count() == 0\n\n COMMAND.handle(\"block_users\", users=test_usernames, block_users=True)\n assert BlockList.objects.all().count() == 3", "title": "" }, { "docid": "97598556c1c14e98bc1d32a11b7cc98b", "score": "0.4871865", "text": "def allow_martians():\n command = 'sudo vtysh -c \"debug bgp allow-martians\"'\n run_command(command)", "title": "" }, { "docid": "2fffa75411a0624b0130577db4d0d486", "score": "0.4869415", "text": "def startWaitClientsReady(self):\n self.notify.debug(\"startWaitClientsReady\")\n \n self.readyToonIds.clear() # reset the list of ready toons\n \n if self._shouldBalanceTeams:\n self.balanceTeams()\n \n # put clients into new state\n self.d_broadcastSetState(\"Rules\")", "title": "" }, { "docid": "0444e5ebcd8de8592ef06fa93b08e7e3", "score": "0.48654696", "text": "def test_online_from_ids(self):\n\n self.assertEqual(models.User.online_from_ids([], self.session), [])\n\n user1 = user_factory.UserFactory()\n user2 = user_factory.UserFactory()\n\n self.assertEqual(list(models.User.online_from_ids([user1.id, user2.id], self.session)), [])\n\n self.assertEqual(list(models.User.online_from_ids([\"reg\"], self.session)), [])\n\n user1.online = True\n self.session.commit()\n\n users = models.User.online_from_ids([user1.id, user2.id], self.session)\n self.assertIn(user1, users)\n self.assertNotIn(user2, users)", "title": "" }, { "docid": "37c5d4f5f7cde9c85b7343d837efc0b6", "score": "0.4862965", "text": "def vending_machines(self):", "title": "" }, { "docid": "4dd2644070f9b5e80a70b813ca911b56", "score": "0.48598692", "text": "def ready_player(user):\n try:\n user.ready = True\n user.save()\n except User.DoesNotExist:\n raise ClientError(\"Unknown Error readying player\")\n return True", "title": "" }, { "docid": "a523e4c88a9a7a54ee0abd4f451bbab8", "score": "0.48585996", "text": "async def async_step_user(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n if self._async_current_entries(True):\n return self.async_abort(reason=\"single_instance_allowed\")\n\n self.hass.data.setdefault(DOMAIN, {})\n if DATA_DISCOVERY not in self.hass.data[DOMAIN]:\n self.hass.data[DOMAIN][DATA_DISCOVERY] = self.hass.async_create_task(\n async_discover_devices()\n )\n\n return self.async_show_form(step_id=\"confirm\")", "title": "" }, { "docid": "7b8524258c15c12e4a36630c3f13ca45", "score": "0.48568967", "text": "def _compute_is_user_working(self):\n for order in self:\n if order.task_timer:\n order.is_user_working = True\n else:\n order.is_user_working = False", "title": "" }, { "docid": "fa8355024a7bd5583e480d97483298d5", "score": "0.48568633", "text": "def UserDataFirstUpdated(self):\n # appKey can be overwritten by a different thread - we need the original value\n # - that's why it needs to be copied to a local variable\n # connectionURL does not change - so it does not need to be synchronized \n with userDataLock:\n if userData.GetPrinterCount() == 1:\n userData.printers[0].status = userconfig_userStatus\n else:\n for printer in userData.GetPrinters():\n if printer.id in userconfig_printerStatus:\n printer.status = userconfig_printerStatus[printer.id]\n if userconfig_userStatus == 'on':\n UserOnline(self.originalAppKey, config.connectionURL, config.version, self.UserDataUpdated)\n else:\n UserOffline(self.originalAppKey, config.connectionURL, config.version, self.UserDataUpdated)", "title": "" }, { "docid": "b0626375449366f0277f98011bd0c8ce", "score": "0.48559514", "text": "def test_network(self):\n self.net.pingAll()\n self.pingAllV6()\n for link in self.switch_matrix:\n s1, s2 = link[0], link[2]\n output(f\"Setting link between {s1} and {s2} down\\n\")\n self.net.configLinkStatus(s1, s2, \"down\")\n self.net.pingAll()\n self.pingAllV6()\n output(f\"Setting link between {s1} and {s2} up\\n\")\n self.net.configLinkStatus(s1, s2, \"up\")\n self.net.pingAll()\n self.pingAllV6()", "title": "" }, { "docid": "6ddd3497b04899841db5b4be4a9acbec", "score": "0.48557994", "text": "def check(self):\n for watchers in self._addrs.values():\n watchers.check()", "title": "" }, { "docid": "d4a7701f25e3ce721e26d2e27d503ebd", "score": "0.48530692", "text": "def run(self):\n for data in self.__unpack__():\n for _, user in self.users.items():\n thread.start_new_thread(self.send, (user, data))", "title": "" }, { "docid": "60b197594f06f2a0aef7e2b04036ff7c", "score": "0.48493338", "text": "def _CheckUsersAFF4(self, client, expected_users):\n summary = client.GetSummary()\n self.assertCountEqual([x.username for x in summary.users], expected_users)\n\n self.assertCountEqual(client.Get(client.Schema.USERNAMES), expected_users)\n\n # Check kb users\n kbusers = [\n x.username for x in client.Get(client.Schema.KNOWLEDGE_BASE).users\n ]\n self.assertCountEqual(kbusers, expected_users)", "title": "" }, { "docid": "03680a41d2427d962dc7321a10a5d9e7", "score": "0.4848555", "text": "def test_full_tether_startup_2G_multiple_clients(self):\n asserts.skip_if(not hasattr(self, 'arduino_wifi_dongles'),\n \"No wifi dongles connected. Skipping test\")\n self.validate_full_tether_startup(WIFI_CONFIG_APBAND_2G,\n test_clients=True)", "title": "" }, { "docid": "ef8c0796e4ffb48df796d4fdd61d12e7", "score": "0.48462287", "text": "def __send_request(self):\n if self.is_start == True:\n for device_i in self.devices:\n device_i.getPosition()\n device_i.getCurrent()\n else:\n print(\"Stop send request\")\n return", "title": "" }, { "docid": "2f1c2d8089c4def95c957da8b1f97c8b", "score": "0.4843274", "text": "def turn_on(self, **kwargs):\n self._relay.turn_on()", "title": "" }, { "docid": "b31b08dba7c4d38eb57faca636868bc3", "score": "0.48431355", "text": "def process(self, user_id: str, all_days: List[str]):\n if self.CC is not None:\n self.CC.logging.log(\"Processing PhoneFeatures\")\n streams = self.CC.get_user_streams(user_id)\n self.process_data(user_id, streams, all_days)", "title": "" }, { "docid": "0bccf26fc6b95b971c970bcce2641a69", "score": "0.4840154", "text": "def check_for_match(self):\n\n #first of all check if we do not have a match running already:\n if self.get_match():\n return False\n\n # Range in Metres:\n range = Decimal(settings.MATCH_RANGE)\n # convert to lat/long degrees (approx):\n range_lat = Decimal(range / 111000)\n # todo: implement calculation by arc projection or other, better approximation\n # (current value is only correct for latitude of about 50)\n range_long = Decimal(range / 71000)\n\n # everybody within a square of range*2 side length:\n lat_min = self.pos_lat - range_lat\n lat_max = self.pos_lat + range_lat\n long_min = self.pos_long - range_long\n long_max = self.pos_long + range_long\n in_range = User.objects.filter(pos_lat__range=(lat_min, lat_max)).filter(pos_long__range=(long_min, long_max))\n #todo ... whose position is not obsolete:\n #in_range = in_range.filter(pos_time__gt=(datetime.now()-timedelta(hours=1)))\n # ... who is not this user:\n in_range = in_range.exclude(device_id=self.device_id)\n EventLog().add_event(\n body='ppl in range: %s' % in_range.all(),\n where='models.User.check_for_match, line=200')\n # ... who shares one of our magnets:\n soulmates = in_range.filter(magnets__in = self.magnets.all()).distinct()\n EventLog().add_event(\n body='soulmates: %s' % soulmates.all(),\n where='models.User.check_for_match, line=205')\n # ... who does not have a running match with this user already: (not necessary as we may only have one match at a time anyway)\n #soulmates_unmatched = soulmates\n #for match in self.matches.exclude(status__gte=90):\n # soulmates_unmatched = soulmates_unmatched.exclude(matches=match)\n #print 'unmatched soulmates: %s' % soulmates_unmatched.all()\n #exclude the matches that timed out less than 60 minutes ago:\n #status:90 last_activity < jetzt-60m\n for match in self.matches.filter(status=90).exclude(last_activity__lt=(datetime.now()-timedelta(seconds=settings.MATCH_QUARANTINE))):\n soulmates = soulmates.exclude(matches=match)\n EventLog().add_event(\n body='unmatched soulmates not in quarantine: %s' % soulmates.all(),\n where='models.User.check_for_match, line=218')\n\n # now trigger a matching Process for those guys:\n # actually only for one of them. later we should add some better priorization:\n matched = False\n for mate in soulmates.all():\n #if soulmates_unmatched.all().count()>=1:\n # mate = soulmates_unmatched.all()[0]\n # does the other user already have a match?\n if mate.get_match():\n continue\n EventLog().add_event(\n body='initiate location based match',\n where='models.User.check_for_match, line=231')\n # get the shared magnets:\n\n magnets = Magnet.objects.filter(users=self)\n magnets = magnets.filter(users=mate)\n #soulmates = in_range.filter(magnets__in = self.magnets.all()).distinct()\n #search_users= [self, mate]\n #magnets = Magnet.objects.filter(users=self, users=mate)\n #if magnets.all().count()>=1:\n #cont = False\n for magnet in magnets.all():\n # do we already have a running match for this magnet? (not necessary as we may only have one match at a time anyway)\n #current_matches = self.matches.exclude(status__gte=90)\n #for current_match in current_matches:\n # if current_match.magnet == magnet:\n # cont = True\n #if cont:\n # cont = False\n # continue\n\n match = Match()\n #todo: do they share more than one magnet?\n match.magnet=magnet\n match.save()\n match.users=(self, mate)\n match.initiate()\n matched=True\n break\n if matched:\n break", "title": "" }, { "docid": "f95ef9fa110ea765b37a582c5202537c", "score": "0.48318437", "text": "def update(self):\n self.isOn = pu.io.ping(self.ip)\n self.isCamera = self.isOn #when the device is found, assume glasses are present - need to make a more robust verification (change pxpinit.py on the glasses pi)", "title": "" }, { "docid": "55f7243e52b8307bf6176a15795d3438", "score": "0.48228908", "text": "def assign_task_to_server(self, users) -> None:\n\n for i in range(users):\n for server in self.servers:\n if server.available():\n server.connect(User(self.TTASK))\n break\n else:\n server = self.create_a_server()\n server.connect(User(self.TTASK))\n self.servers.append(server)", "title": "" }, { "docid": "eb254bf30fa7e3eaa8b86194c8da6ece", "score": "0.4821357", "text": "def update_timer(self):\n cooled_down_users = []\n \n for user in self.users:\n if time() - self.users[user] < self.REPEAT_TIME:\n cooled_down_users.append(user)\n for user in cooled_down_users:\n del self.users[user]", "title": "" }, { "docid": "b9e60703faf323b3e3e47443ac106890", "score": "0.48211575", "text": "def run(conn, user_id):\n\n # each user starts with 100 funds\n conn.hset(\"user:%s\" % user_id, \"funds\", 100)\n\n # each user starts with 10 items\n conn.sadd(\"user:%s:inventory\" % user_id, *range(1, 11))\n\n while True:\n\n # trying to list a random item\n item_id = conn.srandmember(\"user:%s:inventory\" % user_id)\n price = random.randint(1, 100)\n ret = marketplace.list_item(conn, item_id, user_id, price)\n if ret:\n print('user:{} listed item:{} with price {}'.format(user_id, item_id, price))\n elif ret is False:\n print('user:{} could NOT list item:{} with price {}'.format(user_id, item_id, price))\n\n time.sleep(random.randint(1, 10) / 10)\n\n # trying to purchase a random item\n item_id = random.randint(1, 10)\n seller_id = random.randint(1, 10)\n price = random.randint(1, 100)\n ret = marketplace.purchase_item(conn, user_id, item_id, seller_id, price)\n if ret:\n print('user:{} purchased item:{} for price {} from user:{}'.format(user_id, item_id, price, seller_id))\n elif ret is False:\n print('user:{} could NOT purchase item:{} for price {} from user:{}'.format(user_id, item_id, price, seller_id))\n\n time.sleep(random.randint(1, 10) / 10)", "title": "" }, { "docid": "d8dca3f799488eeeb407924e8cb25397", "score": "0.48209473", "text": "def _publish_states_on(self, client, user_data, groups):\n now = time()\n for label, detections in groups.items():\n reported_state = self.__states[label]\n if not reported_state.state or (now - reported_state.when) >= 10:\n reported_state.state = True\n reported_state.when = now\n client.publish(topic=\"{}/detection/{}/state\".format(user_data.topic, label), payload='ON', qos=1,\n retain=False)", "title": "" }, { "docid": "fb3db2572e7c19f142cf78b4ee8d7951", "score": "0.48183107", "text": "def _trigger_scan_if_needed(self, item):\n while gtk.events_pending():\n gtk.main_iteration()\n if item.state != gtk.STATE_PRELIGHT:\n return True\n wireless.Scan(False)\n return False", "title": "" }, { "docid": "d32d0ab6a824988ec4805a32136acf2e", "score": "0.4813113", "text": "def setup_devices(self, devices):\n\n # first device will make all the required setup\n if self.device_id == 0:\n barrier = ReusableBarrierDevices(len(devices))\n for dev in devices:\n dev.barrier = barrier", "title": "" }, { "docid": "f4c15cec8d9f97326774714433900196", "score": "0.48126456", "text": "def run_simulation(self):\n \n will_proceed = True\n while will_proceed:\n user_option = user_interface.simulation_main_menu()\n if user_option == 0:\n self.soda_machine.begin_transaction(self.customer)\n elif user_option == 1:\n self.customer.check_coins_in_wallet()\n elif user_option == 2:\n self.customer.check_backpack()\n elif user_option==3:\n will_proceed = False\n else:\n self.run_simulation()", "title": "" }, { "docid": "7bea96f8656dcf66102a7f53a7691f73", "score": "0.48035726", "text": "def flySuits(self):\n for suit in self.suitList:\n if suit.pathState == 1:\n suit.flyAwayNow()", "title": "" }, { "docid": "939e63271c73fc1769d313a01b735d2f", "score": "0.48025277", "text": "def setUp(self):\n self.users = list()\n self.user_flight_periods = dict()\n self.base_time = timezone.now().replace(\n hour=0, minute=0, second=0, microsecond=0)\n for (username, logs, periods) in TESTDATA_TAKEOFFORLANDINGEVENT:\n # Create user\n user = User.objects.create_user(\n username, 'testemail@x.com', 'testpass')\n user.save()\n # Create log events\n for (time_offset, uas_in_air) in logs:\n event = TakeoffOrLandingEvent()\n event.user = user\n event.timestamp = self.base_time + datetime.timedelta(\n seconds = time_offset)\n event.uas_in_air = uas_in_air\n event.save()\n # Create expected time periods\n user_periods = self.user_flight_periods.setdefault(user, list())\n for (time_start, time_end) in periods:\n if time_start is not None:\n time_start = self.base_time + datetime.timedelta(\n seconds = time_start)\n if time_end is not None:\n time_end = self.base_time + datetime.timedelta(\n seconds = time_end)\n user_periods.append((time_start, time_end))", "title": "" }, { "docid": "067df71a19b3a1037e997f419be89e26", "score": "0.47837654", "text": "def check_sites(self):\n # get the current time for status updates\n in_stock_flag = False\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n\n self.browser.get(\"https://www.target.com/p/playstation-5-digital-edition-console/-/A-81114596#lnk=sametab\")\n # self.browser.get(\"https://www.target.com/p/dualsense-wireless-controller-for-playstation-5/-/A-81114477\") # controller link\n\n time.sleep(7) # wait 5 seconds OR Check BestBuy\n\n # print(self.check_bestBuy())\n\n stock = self.browser.find_element_by_xpath('//*[@id=\"viewport\"]/div[4]/div/div[2]/div[3]/div[1]/div/div/div')\n in_stock = stock.get_attribute('innerHTML')\n keyword = \"Out of stock in stores near you\"\n if keyword in in_stock:\n in_stock_flag = False\n else:\n in_stock_flag = True\n\n\n if not in_stock_flag and self.check_bestBuy() == False:\n print(\"Status: Sold Out \\n\"\n \"Last Checked: \"+ current_time )\n time.sleep(15)\n self.browser.refresh()\n self.check_sites()\n\n # Just target check (uncomment only for target)\n # if not in_stock_flag:\n # print(\"Status: Sold Out \\n\"\n # \"Last Checked: \"+ current_time )\n # time.sleep(15)\n # self.browser.refresh()\n # self.check_sites()\n\n elif self.check_bestBuy():\n link = \"https://www.bestbuy.com/site/sony-playstation-5-digital-edition-console/6430161.p?skuId=6430161\"\n msg = (\"Status: PS5 is IN STOCK @ BestBuy\\n\"\n \"Last Checked: \"+ current_time +\"\\n\" + link)\n self.send_sms(msg)\n self.browser.close()\n\n else:\n msg = (\"Status: PS5 is IN STOCK @ Target\\n\"\n \"Last Checked: \"+ current_time +\"\\n\" + self.browser.current_url)\n self.send_sms(msg)\n self.browser.close()", "title": "" }, { "docid": "768fd87cc1b5ca1b63505acb76fce245", "score": "0.47801176", "text": "def manual_trades(self) -> bool:", "title": "" }, { "docid": "10866ebee3a02a63ad2caa8de092ff40", "score": "0.47779983", "text": "def test_multiple_success(self):\n user_names = [\"foo\", \"bar\", \"baz\"]\n\n for user_name in user_names:\n user = UserFactory.create(username=user_name, is_active=True)\n user.profile.email_optin = True\n user.profile.save()\n UserSocialAuthFactory.create(user=user, provider='not_edx')\n for _ in range(TOTAL_PROGRAMS):\n ProgramEnrollmentFactory.create(user=user)\n\n assert user.is_active is True\n assert user.profile.email_optin is True\n assert UserSocialAuth.objects.filter(user=user).count() == 1\n assert ProgramEnrollment.objects.filter(user=user).count() == TOTAL_PROGRAMS\n\n self.command.handle(\"retire_users\", users=user_names)\n\n for user_name in user_names:\n user = User.objects.get(username=user_name)\n assert user.is_active is False\n assert user.profile.email_optin is False\n assert UserSocialAuth.objects.filter(user=user).count() == 0\n assert ProgramEnrollment.objects.filter(user=user).count() == 0", "title": "" }, { "docid": "7e7b31d90a8c8763cbc5f153fb42a1b9", "score": "0.4776173", "text": "def start_lockdown():\n question(msg[\"public_emergency\"])\n question(msg[\"threatening_life\"])\n question(msg[\"required\"])\n question(msg[\"consistent\"])\n question(msg[\"article\"], 1)", "title": "" }, { "docid": "4bb9c9b92fecb21b2105b9851941490d", "score": "0.47705287", "text": "async def ready(self, ctx):\n if not await self.check_pm(ctx.message):\n return\n if self.game_status == 0:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"There is no active game. Start a game by running `[p]avalon start`.\"))\n return\n if ctx.author.id in self.ready_players:\n await ctx.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You are already ready.\"))\n return\n self.ready_players.append(ctx.author)\n for player in self.players:\n await player.send(\"{0} has readied up! ({1}/{2})\".format(self.displayname(ctx.author), len(self.ready_players), len(self.players)))\n\n if set(self.ready_players) == set(self.players):\n if len(self.players) < self.min_players:\n for player in self.players:\n await player.send(embed=self.make_embed(\"Avalon\", 0xff4055, \"Error\", \"You must have 5 players to start the game.\"))\n else:\n self.game_status = 2\n await self.assign_roles()\n #self.bot.loop.create_task(self.game_loop())", "title": "" }, { "docid": "c058b5a4209a4da15a629c83e8876a47", "score": "0.4765665", "text": "def fetch_private_phone_number_to_user_mapping(self, users: List[UserInfo]):\n\n # Get all target phone numbers of all users and link them to the users id\n\n # Device\n # - Type1: External phone with a phone number\n # - Type2: Sipgate sim card\n # - Type3: Sipgate VOIP\n\n target_phone_numbers: Dict = {}\n for user in users: # todo get users\n response = self.__request('get', '/' + user.id + '/devices')\n for device_dict in response['items']:\n device_phone_number = device_dict.get('number')\n if not device_phone_number:\n self.__logger.info(f\"device {device_dict['id']} of user {user.lastname} ({user.id}) does not have a linked phone number\")\n continue\n\n if device_phone_number in target_phone_numbers:\n if target_phone_numbers[device_phone_number]['userId'] != user.id:\n self.__logger.warning(f\"Number {device_phone_number} from user {user.id} was already target number of user {target_phone_numbers[device_phone_number]}. Skipping.\")\n else:\n self.__logger.info(f\"Number {device_phone_number} from user {user.id} is a duplicate.\")\n continue\n\n # saving userId and all active phone line ids under the target number. The phone lines are sorted by id (in length and value)\n target_phone_numbers[device_phone_number] = {'userId': user.id, 'activePhonelines': sorted(\n device_dict['activePhonelines'], key=lambda ps: (len(ps['id']), ps['id']))}\n\n self.__logger.debug(f\"Dictionary with target_numbers with their user's ids and active phone lines: {target_phone_numbers}\")\n \n return target_phone_numbers", "title": "" }, { "docid": "a1301d0a17014fbc148701b69dadf486", "score": "0.47579986", "text": "def check_for_activity(self):\n all_txn_data = fetch_bcypher_txn_data_from_address(self.b58_address,\n merchant=self.merchant, forwarding_obj=self)\n\n txn_data = filter_bcypher_txns(\n forwarding_address=self.b58_address,\n destination_address=self.destination_address.b58_address,\n txn_data=all_txn_data)\n\n for address, satoshis, confirmations, txn_hash, confidence, preference in txn_data:\n if address == self.b58_address:\n ForwardingAddress.handle_forwarding_txn(\n input_address=address,\n satoshis=satoshis,\n num_confirmations=confirmations,\n input_txn_hash=txn_hash,\n confidence=confidence,\n preference=preference)\n else:\n ForwardingAddress.handle_destination_txn(\n forwarding_address=self.b58_address,\n destination_address=address,\n satoshis=satoshis,\n num_confirmations=confirmations,\n destination_txn_hash=txn_hash)", "title": "" }, { "docid": "c9211889be5c1df366dba5422b6d3da9", "score": "0.47578338", "text": "def run_one_timestep(self):\r\n\r\n # 1. Update all sensobs\r\n for sensob in self.sensobs:\r\n sensob.update()\r\n\r\n # 2. Update all behaviors\r\n for behavior in self.behaviors:\r\n behavior.update()\r\n\r\n # 3. Invoke the arbitrator\r\n motor_recommendation = self.arbitrator.choose_action()\r\n # Remember halt request\r\n\r\n #if motor_recommendation == \"S\":\r\n # self.halt_request = False\r\n #else: self.halt_request = True\r\n\r\n #pdb.set_trace()\r\n self.motobs.update(motor_recommendation)\r\n # 4. Update motobs\r\n # for motob in self.motobs:\r\n # motob.update(motor_recommendation)\r\n\r\n # 5. Wait\r\n start_of_wait = time()\r\n while time() <= start_of_wait + WAITING_TIME:\r\n # Wait, and let the motors do something\r\n pass\r\n\r\n # 6. Reset sensobs\r\n for sensob in self.sensobs:\r\n sensob.reset()", "title": "" }, { "docid": "ff7fc8e104adcf93cf41670698e4ba67", "score": "0.47573698", "text": "def refresh_devices(self, w):\n for item in self.coherence.ssdp_server.known.values():\n if item['MANIFESTATION'] == 'remote':\n self.coherence.ssdp_server.unRegister(item['USN'])\n self.coherence.msearch.double_discover()", "title": "" }, { "docid": "e9c865ad58881ff3307faf713744f1f4", "score": "0.47495377", "text": "def task_connect_wifi(ser):\n\n print_step(\"\\nSetting up WiFi...\\n\")\n\n while True:\n # Look up wlan interface\n result = cmdmule_command(ser, \"ifconfig -a | grep -o \\\"wlan[0-9]\\\"\")\n if result['returncode'] != 0:\n print_warning(\"WiFi dongle not found. Please connect the WiFi dongle and press enter.\")\n print_warning(\"Alternatively, type \\\"skip\\\" to use Ethernet.\")\n if raw_input() == \"skip\":\n break\n continue\n wlan_interface = result['stdout'].strip()\n\n logger.debug(\"[connect_wifi] wlan interface is %s\", wlan_interface)\n\n # Start wpa_supplicant in control interface mode, if it's not already running\n if cmdmule_command(ser, \"sudo wpa_cli status\")['returncode'] != 0:\n result = cmdmule_command(\n ser, \"sudo wpa_supplicant -B -i {0} -D wext -C /run/wpa_supplicant\".format(wlan_interface))\n if result['returncode'] != 0:\n raise Exception(\"Failed to start wpa_supplicant: {}\".format(result['stderr']))\n\n # Disable the active network, so we can scan\n cmdmule_command(ser, \"sudo wpa_cli disable_network 0\")\n time.sleep(1)\n\n # Scan for wireless networks\n print_step(\"Scanning for WiFi networks...\")\n cmdmule_command(ser, \"sudo wpa_cli scan\")\n # Give scan some time to complete\n time.sleep(3)\n\n # Get latest scan results\n results = cmdmule_command(ser, \"sudo wpa_cli scan_results\")\n\n # Split wireless networks results by newline\n wifi_networks = results['stdout'].strip().split(\"\\n\")\n\n # Check we got at least one wireless network\n if len(wifi_networks) < 3:\n raise Exception(\"No wireless networks found.\")\n\n # Strip off heading\n wifi_networks = wifi_networks[2:]\n # Split network info\n wifi_networks = [x.split(\"\\t\") for x in wifi_networks]\n # Format network info into dictionary\n wifi_networks = [\n {\n \"bssid\": n[0],\n \"frequency\": n[1],\n \"strength\": n[2],\n \"flags\": n[3],\n \"ssid\": n[4] if len(n) > 4 else \"\"\n } for n in wifi_networks\n ]\n # Filter out non-empty ssid\n wifi_networks = list(filter(lambda network: network[\"ssid\"], wifi_networks))\n # Filter out subsequent duplicate ssids\n unique_wifi_networks = []\n for network in wifi_networks:\n if network[\"ssid\"] not in [n[\"ssid\"] for n in unique_wifi_networks]:\n unique_wifi_networks.append(network)\n\n # Print list of wireless networks for user to choose\n print(\"\\nWiFi Networks\")\n print(\"\")\n for i in range(len(unique_wifi_networks)):\n print(\" {:>2} - {}\".format(i + 1, unique_wifi_networks[i]['ssid']))\n print(\"\")\n\n # Get wireless network information from user\n while True:\n print_step(\"Please choose the same WiFi network as your personal computer.\")\n print_step(\"\")\n wifi_network_index = raw_input(\" WiFi Network Number: \")\n password = getpass.getpass(\" WiFi WPA1/2 Passphrase (if any): \")\n\n # Convert network index to int\n try:\n wifi_network_index = int(wifi_network_index) - 1\n except ValueError:\n print_error(\"\\nInvalid network number!\\n\")\n continue\n\n # Check network index is in range\n if wifi_network_index not in range(len(unique_wifi_networks)):\n print_error(\"\\nNetwork number out of bounds!\\n\")\n continue\n\n break\n\n # Unescape any characters in the SSID (escaped by wpa_cli)\n ssid = re.sub(r\"\\\\(.)\", \"\\g<1>\", unique_wifi_networks[wifi_network_index]['ssid'])\n # Encode SSID into a hex string\n ssid = codecs.encode(ssid.encode('utf-8'), 'hex_codec').decode()\n\n # Prepare wpa_supplicant configuration with or without WPA enabled\n if (len(password) > 0):\n wpa_supplicant_conf = \"network={{\\n\\tssid={}\\n\\tpsk=\\\"{}\\\"\\n}}\\n\".format(ssid, password)\n else:\n wpa_supplicant_conf = \"network={{\\n\\tssid={}\\n\\tkey_mgmt=NONE\\n\\tauth_alg=OPEN\\n}}\\n\".format(ssid)\n # Create /etc/wpa_supplicant/wpa_supplicant.conf\n wpa_supplicant_conf = base64.b64encode(wpa_supplicant_conf.encode()).decode()\n cmdmule_command(\n ser, \"echo {} | base64 -d | sudo tee /etc/wpa_supplicant/wpa_supplicant.conf\".format(wpa_supplicant_conf))\n\n # Terminate wpa_supplicant that we used for scanning\n cmdmule_command(ser, \"sudo wpa_cli terminate\")\n\n # Decrease dhclient.conf timeout from 60 seconds to 15 seconds\n cmdmule_command(ser, \"sudo sed -i 's/#timeout 60;/timeout 15;/' /etc/dhcp/dhclient.conf\")\n\n # Bring up WiFi interface\n print_step(\"\\nConnecting WiFi...\")\n cmdmule_command(ser, \"sudo ifdown {}\".format(wlan_interface))\n cmdmule_command(ser, \"sudo ifup {}\".format(wlan_interface))\n\n # Check carrier status\n print_step(\"\\nChecking WiFi connectivity...\")\n result = cmdmule_command(ser, \"cat /sys/class/net/{}/carrier\".format(wlan_interface))\n if result['returncode'] != 0 or int(result['stdout'].strip()) != 1:\n print_error(\"Error: Failed to associate with WiFi access point.\")\n continue\n logger.debug(\"[connect_wifi] carrier is up\")\n\n # Check IP status\n result = cmdmule_command(ser, \"ip addr show {} | grep \\\"inet \\\"\".format(wlan_interface))\n if result['returncode'] != 0:\n print_error(\"Error: Failed to get an IP address.\")\n continue\n logger.debug(\"[connect_wifi] interface has ip address\")\n\n break", "title": "" }, { "docid": "95c380f042247bdb9715d4feed3450c5", "score": "0.47409734", "text": "async def get_user_ids_changed(\n self, user_id: str, from_token: StreamToken\n ) -> JsonDict:\n\n set_tag(\"user_id\", user_id)\n set_tag(\"from_token\", str(from_token))\n now_room_key = self.store.get_room_max_token()\n\n room_ids = await self.store.get_rooms_for_user(user_id)\n\n changed = await self.get_device_changes_in_shared_rooms(\n user_id, room_ids, from_token\n )\n\n # Then work out if any users have since joined\n rooms_changed = self.store.get_rooms_that_changed(room_ids, from_token.room_key)\n\n member_events = await self.store.get_membership_changes_for_user(\n user_id, from_token.room_key, now_room_key\n )\n rooms_changed.update(event.room_id for event in member_events)\n\n stream_ordering = from_token.room_key.stream\n\n possibly_changed = set(changed)\n possibly_left = set()\n for room_id in rooms_changed:\n # Check if the forward extremities have changed. If not then we know\n # the current state won't have changed, and so we can skip this room.\n try:\n if not await self.store.have_room_forward_extremities_changed_since(\n room_id, stream_ordering\n ):\n continue\n except errors.StoreError:\n pass\n\n current_state_ids = await self._state_storage.get_current_state_ids(\n room_id, await_full_state=False\n )\n\n # The user may have left the room\n # TODO: Check if they actually did or if we were just invited.\n if room_id not in room_ids:\n for etype, state_key in current_state_ids.keys():\n if etype != EventTypes.Member:\n continue\n possibly_left.add(state_key)\n continue\n\n # Fetch the current state at the time.\n try:\n event_ids = await self.store.get_forward_extremities_for_room_at_stream_ordering(\n room_id, stream_ordering=stream_ordering\n )\n except errors.StoreError:\n # we have purged the stream_ordering index since the stream\n # ordering: treat it the same as a new room\n event_ids = []\n\n # special-case for an empty prev state: include all members\n # in the changed list\n if not event_ids:\n log_kv(\n {\"event\": \"encountered empty previous state\", \"room_id\": room_id}\n )\n for etype, state_key in current_state_ids.keys():\n if etype != EventTypes.Member:\n continue\n possibly_changed.add(state_key)\n continue\n\n current_member_id = current_state_ids.get((EventTypes.Member, user_id))\n if not current_member_id:\n continue\n\n # mapping from event_id -> state_dict\n prev_state_ids = await self._state_storage.get_state_ids_for_events(\n event_ids,\n await_full_state=False,\n )\n\n # Check if we've joined the room? If so we just blindly add all the users to\n # the \"possibly changed\" users.\n for state_dict in prev_state_ids.values():\n member_event = state_dict.get((EventTypes.Member, user_id), None)\n if not member_event or member_event != current_member_id:\n for etype, state_key in current_state_ids.keys():\n if etype != EventTypes.Member:\n continue\n possibly_changed.add(state_key)\n break\n\n # If there has been any change in membership, include them in the\n # possibly changed list. We'll check if they are joined below,\n # and we're not toooo worried about spuriously adding users.\n for key, event_id in current_state_ids.items():\n etype, state_key = key\n if etype != EventTypes.Member:\n continue\n\n # check if this member has changed since any of the extremities\n # at the stream_ordering, and add them to the list if so.\n for state_dict in prev_state_ids.values():\n prev_event_id = state_dict.get(key, None)\n if not prev_event_id or prev_event_id != event_id:\n if state_key != user_id:\n possibly_changed.add(state_key)\n break\n\n if possibly_changed or possibly_left:\n possibly_joined = possibly_changed\n possibly_left = possibly_changed | possibly_left\n\n # Double check if we still share rooms with the given user.\n users_rooms = await self.store.get_rooms_for_users(possibly_left)\n for changed_user_id, entries in users_rooms.items():\n if any(rid in room_ids for rid in entries):\n possibly_left.discard(changed_user_id)\n else:\n possibly_joined.discard(changed_user_id)\n\n else:\n possibly_joined = set()\n possibly_left = set()\n\n result = {\"changed\": list(possibly_joined), \"left\": list(possibly_left)}\n\n log_kv(result)\n\n return result", "title": "" }, { "docid": "e25744b2c82cce92341ddb81eaa6975e", "score": "0.4739705", "text": "def eventcheckin():", "title": "" }, { "docid": "e295a73d10dced2ad5b449f1205123c4", "score": "0.47381276", "text": "def switch_users(self, new_user):\n dict.save()\n self.current_user = dict.login(new_user)", "title": "" } ]
b73b6734c98ffd85b806abcda78459bc
First enforce basic pointtopoint deps (in base class), then call ComputeAtStoreAtParser to normalize schedule.
[ { "docid": "b9d6a54d54e6b5e3f255a3b77f7a7db0", "score": "0.586543", "text": "def normalize(self, cfg):\n super(HalideComputeAtScheduleParameter, self).normalize(cfg)\n cfg[self.name] = ComputeAtStoreAtParser(cfg[self.name],\n self.post_dominators).tokens", "title": "" } ]
[ { "docid": "45a281a86547359deb6a5def2862f653", "score": "0.5088013", "text": "def update(self):\n try:\n soup = self._fetch_raw_train_status() # The raw\n self.schedule = self._create_trip_struct(soup) # The struct\n \n if self.metadata:\n self._generate_properties(self.schedule) # Generate has_arrived, has_departed, ...\n self.schedule = self._adjust_day_difference(self.schedule) # Adjust days if necessary\n self._calculate_time_deltas(self.schedule) # Calculate the misc. times (left, since departure, late, early)\n except TripNotFoundError, e:\n raise\n except Exception, e:\n raise", "title": "" }, { "docid": "d9643a71def81e318ae05e27ca3b81d3", "score": "0.50397015", "text": "def __init__(self, points):\r\n\r\n # STUDENT CODE HERE\r\n\r\n # Target acceleration between each waypoint:\r\n self.const_a = 1.5 # m/s^2\r\n\r\n # Fit 3D lines in between each waypoint\r\n\r\n self.waypoints = points\r\n\r\n N = self.waypoints.shape[0]\r\n dist = np.zeros((N-1,))\r\n self.unit_vec = np.zeros((N-1,3))\r\n self.times = np.zeros((N,))\r\n for i in range(N-1):\r\n # Loop thru N waypoints and get distance between each point.\r\n dist[i] = np.linalg.norm(np.array([points[i][0] - points[i+1][0],\r\n points[i][1] - points[i+1][1],\r\n points[i][2] - points[i+1][2]]))\r\n\r\n # Also save the unit vector corresponding to each distance\r\n if dist[i] != 0:\r\n self.unit_vec[i,:] = (points[i+1,:] - points[i,:])/dist[i]\r\n else:\r\n self.unit_vec[i,:] = np.zeros((3,))\r\n\r\n # Compute times between each node given a constant acceleration\r\n self.times[i+1] = self.times[i] + np.sqrt(4*dist[i]/self.const_a)", "title": "" }, { "docid": "65e936aa1ad3de638b6ac5370f9ab323", "score": "0.49987358", "text": "def _make_local_points(self):\n raise NotImplementedError()", "title": "" }, { "docid": "85a35ecc14a1b656d40b8309ccd39297", "score": "0.4908573", "text": "def _gather_points(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "70ad044d0b02148bcc19f3e569a96b59", "score": "0.4901549", "text": "def _build_points(self, xs, ys):\n pass", "title": "" }, { "docid": "70ad044d0b02148bcc19f3e569a96b59", "score": "0.4901549", "text": "def _build_points(self, xs, ys):\n pass", "title": "" }, { "docid": "ce67c42652c4e8794414dd8ecbddab85", "score": "0.48612612", "text": "def __process_dates(self):\n if self._interval is not None:\n # interval has been set, meaning that we are using a different\n # time then the one directly fed as an argument\n self._config.debug(\"Working with age %s\" % (self._interval,))\n self.__targets = list(filter(self.__right_age, self.__targets))\n self._config.debug(\"Parsed ages, servers remaining: \")\n self.__debug_targets()\n self._interval = None\n elif self.__age is not None:\n self._config.debug(\"Parsing dates\")\n self._interval = self.parse_interval(self.__age)\n self._config.debug(\"Working with age %s\" % (self._interval,))\n self.__targets = list(filter(self.__right_age, self.__targets))\n self._config.debug(\"Parsed ages, servers remaining: \")\n self.__debug_targets()\n else:\n self._config.debug(\"No age provided\")", "title": "" }, { "docid": "5d3b018ee3ab1bc2c8e8c6dac70b1171", "score": "0.48449504", "text": "def __init__(self, waypoints, ellps='WGS84'):\n \n self.waypoints = waypoints\n self.ellps = ellps\n self.geod = pyproj.Geod(ellps = ellps)\n \n # create line segments from the waypoints\n self.segments = [_Segment(self.waypoints[i], self.waypoints[i+1], self.geod)\n for i in range(len(self.waypoints) - 1)]\n \n self.length = sum([seg.length for seg in self.segments])\n self.first_time = self.waypoints[0]['time']\n self.last_time = self.waypoints[-1]['time']", "title": "" }, { "docid": "fbfeb24bbbc4ac7b3c60be4ad228c5e1", "score": "0.48379755", "text": "def _compute_point(dist):\n pass", "title": "" }, { "docid": "fb0a743c056d957cce1e3ae3a77519b5", "score": "0.48212296", "text": "def _calc_feature_points(self):\n\traise NotImplementedError", "title": "" }, { "docid": "fb0a743c056d957cce1e3ae3a77519b5", "score": "0.48212296", "text": "def _calc_feature_points(self):\n\traise NotImplementedError", "title": "" }, { "docid": "b3d8cc86b318125bf8516cc05142b27a", "score": "0.47953585", "text": "def propose_location(self, pars):\n\n self.N_hps = pars.Xt.shape[1]\n min_val = 1\n min_x = None\n\n self.gpr = pars.gpr\n self.Xt = pars.Xt\n\n self.discrete_values = None\n\n # # Find the best optimum by starting from n_restart different random points.\n # Xs = lhs(self.N_hps, samples=pars.n_restarts, criterion='centermaximin')\n # for i, hp in enumerate(sorted(pars.hps.keys())):\n #\n # Xs[:, i] = Xs[:, i] * (pars.hps[hp].bounds[1] - pars.hps[hp].bounds[0]) + pars.hps[hp].bounds[0]\n #\n # # Convert int values to integers\n # if pars.hps[hp].kind == 'discrete':\n # Xs[:, i] = Xs[:, i].astype(int)\n\n # Find the best optimum by starting from n_restart different random points.\n if self.pars.Ncontinuous_hps > 0:\n\n if pars.sampling_method == 'maximin':\n Xs = lhs(self.pars.Ncontinuous_hps, samples=pars.n_restarts, criterion='centermaximin')\n elif pars.sampling_method == 'random':\n Xs = np.random.uniform(size=(pars.n_restarts, self.pars.Ncontinuous_hps))\n\n conts_counter = 0\n for i, hp in enumerate(sorted(pars.hps.keys())):\n if pars.hps[hp].kind == 'continuous':\n Xs[:, conts_counter] = Xs[:, conts_counter] * (pars.hps[hp].bounds[1] - pars.hps[hp].bounds[0]) + pars.hps[hp].bounds[0]\n conts_counter += 1\n\n # Find the best optimum by starting from n_restart different random points.\n # Xs = lhs(self.pars.Ncontinuous_hps, samples=pars.n_restarts, criterion='centermaximin')\n # for i, hp in enumerate(sorted(self.pars.continuous_hps)):\n # Xs[:, i] = Xs[:, i] * (pars.hps[hp].bounds[1] - pars.hps[hp].bounds[0]) + \\\n # pars.hps[hp].bounds[0]\n\n # # Find the maximum in the acquisition function\n\n # print(f\"optim rout: {pars.optim_rout}\")\n # print(f\"Ncontinuous_hps: {pars.Ncontinuous_hps}\")\n\n if pars.optim_rout == 'minimize':\n for x0 in Xs:\n res = minimize(self.min_obj, x0=x0, bounds=pars.bounds, method=pars.method)\n # Find the best optimum across all initiations\n if res.fun < min_val:\n min_val = res.fun[0]\n min_x = res.x\n\n elif pars.optim_rout == 'random_search':\n Ndiscrete_hps = self.N_hps - self.pars.Ncontinuous_hps\n if self.pars.Ncontinuous_hps > 0:\n for x0 in Xs:\n for random_search_iteration in range(self.pars.number_of_random_searches):\n self.discrete_values = []\n for i, hp in enumerate(sorted(self.pars.hps)):\n if self.pars.hps[hp].kind == 'discrete':\n self.discrete_values.append(np.random.choice(self.pars.hps[hp].vals))\n # print(f\"discrete: {self.discrete_values}\")\n res = minimize(self.min_obj, x0=x0, bounds=pars.conts_bounds, method=pars.method)\n # print(f\"res: {res.x}, {res.fun}\")\n # Find the best optimum across all initiations\n if res.fun < min_val:\n min_val = res.fun[0]\n min_x = self.parse_obj_inputs(res.x)\n else:\n # All discrete\n for random_search_iteration in range(self.pars.number_of_random_searches):\n self.discrete_values = []\n for i, hp in enumerate(sorted(self.pars.hps)):\n self.discrete_values.append(np.random.choice(self.pars.hps[hp].vals))\n\n val = self.min_obj()\n if val < min_val:\n min_val = val\n min_x = np.array(self.discrete_values)\n\n elif pars.optim_rout == 'grid_search':\n\n if self.pars.Ncontinuous_hps > 0:\n\n for x0 in Xs:\n for hp_grid_item in self.pars.hp_grid:\n self.discrete_values = np.array(hp_grid_item).reshape(1, -1)[0]\n\n res = minimize(self.min_obj, x0=x0, bounds=pars.conts_bounds, method=pars.method)\n # print(f\"res: {res.x}, {res.fun}\")\n # Find the best optimum across all initiations\n if res.fun < min_val:\n min_val = res.fun[0]\n min_x = self.parse_obj_inputs(res.x)\n\n else:\n\n # All discrete\n for hp_grid_item in self.pars.hp_grid:\n self.discrete_values = np.array(hp_grid_item).reshape(1, -1)[0]\n val = self.min_obj()\n\n if val < min_val:\n min_val = val\n min_x = np.array(self.discrete_values)\n\n return min_x.reshape(-1, 1)", "title": "" }, { "docid": "f9d7e602698152ecb971f3d73f4504a9", "score": "0.47854638", "text": "def parse_settings(self, requested_kwargs):\n kwargs = super().parse_settings(requested_kwargs)\n kwargs['task_list'] = []\n # Build the tasks for the remaining qubits to be measured using the\n # default settings.\n\n freq_centers = {}\n for qb in self.qubits:\n freq_center = self.get_param_value('freq_center', qubit=qb.name)\n if isinstance(freq_center, str):\n freq_center = eval(\n freq_center.format(current=qb.ro_freq(),\n between_dips=self.get_freq_between_dips(\n qb)))\n freq_centers[qb.name] = freq_center\n\n # Unlike the freq_center, the range and pts must be the same for all\n # qubits that share the same feedline.\n freq_pts = self.get_param_value('freq_pts')\n freq_range = self.get_param_value('freq_range')\n if isinstance(freq_range, str):\n freq_range = eval(\n freq_range.format(\n adaptive=self.get_adaptive_freq_range(freq_centers)))\n\n # Array centered around 0\n freqs_base = np.linspace(-freq_range / 2, freq_range / 2, freq_pts)\n\n for qb in self.qubits:\n # Build frequency sweep points\n log.debug(f'{self.step_label}, {qb.name}: '\n f'{freq_centers[qb.name]=}, {freq_range=}, {freq_pts=}')\n\n # Array centered around the desired center\n freqs = freqs_base + freq_centers[qb.name]\n\n # Build voltage sweep points\n current_voltage = self.routine.fluxlines_dict[qb.name]\n volt_range = self.get_param_value('volt_range', qubit=qb.name)\n volt_pts = self.get_param_value('volt_pts', qubit=qb.name)\n volt_center = self.get_param_value('volt_center', qubit=qb.name)\n if isinstance(volt_center, str):\n volt_center = eval(\n volt_center.format(current=current_voltage))\n volts = np.linspace(volt_center - volt_range / 2,\n volt_center + volt_range / 2, volt_pts)\n\n kwargs['task_list'].append({\n 'qb': qb.name,\n 'freqs': freqs,\n 'volts': volts\n })\n kwargs['fluxlines_dict'] = self.routine.fluxlines_dict\n\n # If the parameter expected_dips_width was specified, add it to\n # the analysis_kwargs dictionary\n expected_dips_width = self.get_param_value(\n 'expected_dips_width', default=self.routine.NotFound())\n if type(expected_dips_width) != self.routine.NotFound:\n if 'analysis_kwargs' in kwargs.keys():\n kwargs['analysis_kwargs'][\n 'expected_dips_width'] = expected_dips_width\n else:\n kwargs['analysis_kwargs'] = {\n 'expected_dips_width': expected_dips_width}\n\n return kwargs", "title": "" }, { "docid": "78286f5ffcacea5d89b6f5b202187376", "score": "0.47784626", "text": "def BasePoint():\r\n pass", "title": "" }, { "docid": "20911a58b6085555207b112a731eb6e5", "score": "0.47614387", "text": "def main():\n parser = argparse.ArgumentParser(description='run the GPS cost')\n parser.add_argument('-t', '--test_time', metavar='N', type=int,\n help='test trained policy N time')\n parser.add_argument('-p', '--train_time', metavar='N', type=int,\n help='train policy in N positon')\n parser.add_argument('-c', '--center_position', metavar='N', type=float,\n help='center position')\n parser.add_argument('-r', '--radius', metavar='N', type=float,\n help='radius of area')\n parser.add_argument('-m', '--num', metavar='N', type=float,\n help='N num of experiment')\n args = parser.parse_args()\n if args.test_time == None:\n args.test_time = 50\n if args.train_time == None:\n args.train_time = 25\n if args.center_position == None:\n args.center_position = 0.12\n if args.radius == None:\n args.radius = 0.08\n if args.num == None:\n print('Should add the -m num')\n # while True:\n # a = 1\n\n max_error_bound = 0.01\n # train_positions\n #train_positions = generate_position(args.center_position, args.radius, args.train_time, max_error_bound)\n bas_pos = 0.041\n # np.array([0.11, -0.11, 0]),np.array([0.181, -0.181, 0])\n\n\n \"\"\"\n use for linear increment positions\n \"\"\"\n # init_positions = generate_position(0.02, -0.02, 0.02, 4, 0.001)\n # init_positions2 = generate_position(0.10, -0.10, 0.02, 2, 0.01)\n # init_positions3 = generate_position(0.18, -0.18, 0.02, 2, 0.01)\n # train_positions = np.array([init_positions[0],\n # init_positions[1],\n # init_positions[2],\n # init_positions[3],\n # np.array([0.07, -0.07, 0]), np.array([0.10, -0.10, 0]),\n # np.array([0.15, -0.15, 0]), np.array([0.181, -0.181, 0])\n # ])\n\n init_positions = generate_position(0.02, -0.02, 0.02, 4, 0.001)\n init_positions2 = generate_position(0.10, -0.10, 0.02, 2, 0.01)\n init_positions3 = generate_position(0.18, -0.18, 0.02, 2, 0.01)\n # train_positions = np.array([init_positions[0],\n # init_positions[1],\n # init_positions[2],\n # init_positions[3],\n # init_positions2[0],\n # init_positions2[1],\n # init_positions3[0],\n # init_positions3[0]\n # ])\n\n train_positions = np.array([np.array([0.0, 0.0, 0.0]),\n np.array([0.04, 0, 0]),\n np.array([0, -0.04, 0]),\n np.array([0.04, -0.04, 0]),\n np.array([0.07, -0.07, 0]),\n np.array([0.10, -0.10, 0]),\n np.array([0.15, -0.15, 0]),\n np.array([0.18, -0.18, 0])\n ])\n\n # train_positions = np.array([np.array([0, 0, 0]), np.array([bas_pos, -bas_pos, 0]),\n # np.array([0, -bas_pos, 0]),\n # np.array([bas_pos, 0, 0]),\n # np.array([0.07, -0.07, 0]), np.array([0.10, -0.10, 0]),\n # np.array([0.15, -0.15, 0]), np.array([0.181, -0.181, 0])\n # ])\n # # train_positions = np.array([np.array([0.03, -0.03, 0]), np.array([-0.03, 0.03, 0]), np.array([0.181, -0.181, 0]),\n # # np.array([0.101, -0.101, 0]), np.array([0.241, -0.241, 0])])\n # print(train_positions)\n file_pickle('./position/position_train.pkl', train_positions)\n\n # test_positions\n # test_positions = generate_position(args.center_position, -args.center_position, args.radius, args.test_time, max_error_bound)\n\n # plotposition(test_positions)\n # file_pickle('./position/%d/test_position.pkl' % args.num, test_positions)\n\n # \"\"\"\n # generate train and test for four areas\n # \"\"\"\n # director = 8\n # idx_pos = 1\n # # first area [[-0.05, 0.05], [-0.05, 0.05]]\n # train_position = generate_position(0.05, 0.05, 0.05, 3, 0.02)\n # test_position = generate_position(0.05, 0.05, 0.05, 10, 0.02)\n # file_pickle('./position/%d/train_position_%d.pkl' % (director, idx_pos), train_position)\n # file_pickle('./position/%d/test_position_%d.pkl' % (director, idx_pos), test_position)\n #\n # # second area [[0.1, 0.2], [-0.05, 0.05]]\n # train_position = generate_position(0.05, 0.2, 0.05, 3, 0.02)\n # test_position = generate_position(0.05, 0.2, 0.05, 10, 0.02)\n # file_pickle('./position/%d/train_position_%d.pkl' % (director, idx_pos+1), train_position)\n # file_pickle('./position/%d/test_position_%d.pkl' % (director, idx_pos+1), test_position)\n #\n # # second area [[0.1, 0.2], [-0.05, 0.05]]\n # train_position = generate_position(0.4, 0.05, 0.05, 3, 0.02)\n # test_position = generate_position(0.4, 0.05, 0.05, 10, 0.02)\n # file_pickle('./position/%d/train_position_%d.pkl' % (director, idx_pos+2), train_position)\n # file_pickle('./position/%d/test_position_%d.pkl' % (director, idx_pos+2), test_position)\n #\n # # second area [[0.1, 0.2], [-0.05, 0.05]]\n # train_position = generate_position(0.2, 0.2, 0.05, 3, 0.02)\n # test_position = generate_position(0.2, 0.2, 0.05, 10, 0.02)\n # file_pickle('./position/%d/train_position_%d.pkl' % (director, idx_pos+3), train_position)\n # file_pickle('./position/%d/test_position_%d.pkl' % (director, idx_pos+3), test_position)\n\n \"\"\"\n test the OLGPS generalization\n \"\"\"\n # director = args.num\n # idx_pos = 1\n # train_position = generate_position(0.2, 0.2, 0.050, 4, 0.01)\n # # train_position = np.array([[ 0.178, -0.222, 0. ], [ 0.237, -0.178, 0. ], [ 0.179, -0.172, 0. ],\n # # [0.2, -0.2, 0]])\n # file_pickle('./position/train_position.pkl', train_position)\n # test_position = generate_position(0.2, 0.2, 0.050, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos), test_position)\n # test_position = generate_position(0.2, 0.2, 0.075, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos+1), test_position)\n # test_position = generate_position(0.2, 0.2, 0.100, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos+2), test_position)\n # test_position = generate_position(0.2, 0.2, 0.125, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos+3), test_position)\n # test_position = generate_position(0.2, 0.2, 0.150, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos+4), test_position)\n # test_position = generate_position(0.2, 0.2, 0.175, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos+5), test_position)\n # test_position = generate_position(0.2, 0.2, 0.200, 100, 0.05)\n # file_pickle('./position/test_position_%d.pkl' % (idx_pos+6), test_position)", "title": "" }, { "docid": "95dd33af2eed1a71a0def817c90bdbd6", "score": "0.47591743", "text": "def _parse_sc_points(cls, sc_table, gen_fpath, max_workers=None,\n points_per_worker=400, offshore=False):\n sc_table = cls._parse_sc_table(sc_table, offshore=offshore)\n if 'sc_gid' in sc_table:\n sc_table = sc_table.set_index('sc_gid')\n\n gen_cf_means = cls._get_gen_cf(gen_fpath)\n sc_points = cls._create_points(\n sc_table, gen_cf_means,\n offshore=offshore,\n max_workers=max_workers,\n points_per_worker=points_per_worker)\n\n n = int(sc_table.index.max() + 1)\n capacity = np.zeros(n)\n capacity[sc_table.index] = sc_table['capacity'].values\n mask = np.ones(n, dtype=bool)\n\n return sc_points, capacity, mask", "title": "" }, { "docid": "902541577208ea5f0f718141d9537f15", "score": "0.47575316", "text": "def at_pt(self, params):\n pass", "title": "" }, { "docid": "986cce34668189345a8d8762b6ac8b2d", "score": "0.47439936", "text": "def __init__(self, point_a, point_b, previous_speed=None) -> None:\n self.point_a = point_a\n self.point_b = point_b\n self.__distance()\n self.__time_between_points()\n self.__speed()\n self.__acceleration(previous_speed)\n if (type(self.point_a) is GPXTrackPoint and\n type(self.point_b) is GPXTrackPoint):\n (self.point_a.extensions, self.point_b.extensions) = (None, None)\n (self.point_a.gpx_10_fields, self.point_b.gpx_10_fields) = \\\n (None, None)\n (self.point_a.gpx_11_fields, self.point_b.gpx_11_fields) = \\\n (None, None)", "title": "" }, { "docid": "4157c67c41a39dc6cfed68accdb7288d", "score": "0.4733475", "text": "def process_datapoint(self, raw_datapoint):\n raise NotImplementedError", "title": "" }, { "docid": "fb45061da6028545c743a0a42201d38e", "score": "0.46770433", "text": "def _precompute_force(self):\n raise NotImplementedError", "title": "" }, { "docid": "fb00ffbbf958bb72239033cb818250d8", "score": "0.46734816", "text": "def _resolve_point(\n self, method: str, stations: pd.DataFrame, alt: int, adapt_temp: bool\n ) -> None:\n\n if self._stations.size == 0 or self._data.size == 0:\n return None\n\n if method == \"nearest\":\n\n if adapt_temp:\n\n # Join elevation of involved weather stations\n data = self._data.join(stations[\"elevation\"], on=\"station\")\n\n # Adapt temperature-like data based on altitude\n data = adjust_temp(data, alt)\n\n # Drop elevation & round\n data = data.drop(\"elevation\", axis=1).round(1)\n\n else:\n\n data = self._data\n\n if self.granularity == Granularity.NORMALS:\n self._data = data.groupby(level=[\"start\", \"end\", \"month\"]).agg(\"first\")\n\n else:\n self._data = data.groupby(\n pd.Grouper(level=\"time\", freq=self._freq)\n ).agg(\"first\")\n\n else:\n\n # Join score and elevation of involved weather stations\n data = self._data.join(stations[[\"score\", \"elevation\"]], on=\"station\")\n\n # Adapt temperature-like data based on altitude\n if adapt_temp:\n data = adjust_temp(data, alt)\n\n # Exclude non-mean data & perform aggregation\n if not self.granularity == Granularity.NORMALS:\n excluded = data[\"wdir\"]\n excluded = excluded.groupby(\n pd.Grouper(level=\"time\", freq=self._freq)\n ).agg(\"first\")\n\n # Aggregate mean data\n if self.granularity == Granularity.NORMALS:\n data = data.groupby(level=[\"start\", \"end\", \"month\"]).apply(\n weighted_average\n )\n\n # Remove obsolete index column\n try:\n data = data.reset_index(level=3, drop=True)\n except IndexError:\n pass\n\n else:\n data = data.groupby(pd.Grouper(level=\"time\", freq=self._freq)).apply(\n weighted_average\n )\n\n # Drop RangeIndex\n data.index = data.index.droplevel(1)\n\n # Merge excluded fields\n data[\"wdir\"] = excluded\n\n # Drop score and elevation\n self._data = data.drop([\"score\", \"elevation\"], axis=1).round(1)\n\n # Set placeholder station ID\n self._data[\"station\"] = \"XXXXX\"\n\n # Set index\n if self.granularity == Granularity.NORMALS:\n self._data = self._data.set_index(\"station\", append=True)\n self._data = self._data.reorder_levels([\"station\", \"start\", \"end\", \"month\"])\n else:\n self._data = self._data.set_index(\n [\"station\", self._data.index.get_level_values(\"time\")]\n )\n\n # Set station index\n self._stations = pd.Index([\"XXXXX\"])", "title": "" }, { "docid": "32466d2a9d692c262850d2a903bfd3a6", "score": "0.46593195", "text": "def parse_point_charges(self):\n self.ctx.structure = self.ctx.point_charges_calc['output_structure']", "title": "" }, { "docid": "828bc488986a5f7417f21fcae7d3518a", "score": "0.46501726", "text": "def __init__(self, name, args, functions, post_dominators):\n super(HalideComputeAtScheduleParameter, self).__init__(\n name, *self.gen_nodes_deps(args, functions))\n self.post_dominators = post_dominators", "title": "" }, { "docid": "eb4e1b2842fd4449de73ff541eb1e975", "score": "0.46463543", "text": "def bulk_set_measurement_points(self, measurement_points, io_origin):\n for location in measurement_points:\n self.keypoint_ids.append(location)\n node = int(location[0:len(location) - 1]) - io_origin\n\n if 'X' in location:\n self.analysis[location] = node * 3 + 0\n self.keypoints.append(node * 3 + 0)\n\n if 'Y' in location:\n self.analysis[location] = node * 3 + 1\n self.keypoints.append(node * 3 + 1)\n\n if 'Z' in location:\n if self.dof == 3:\n self.analysis[location] = node * 3 + 2\n self.keypoints.append(node * 3 + 2)\n else:\n print(\"Z-direction is not allowed in 2D structures. \"\n \"Please check the 'MEASUREMENTS' section in the input file.\")\n raise Exception\n\n if self.number_of_keypoints == 0:\n print(\"There is no valid measured dof. Please check the \\'MEASUREMENTS\\' section in the input file.\")", "title": "" }, { "docid": "2e6f55d4f031030cc2614684e1346632", "score": "0.46441394", "text": "def pre_run_methods(self):\n self.add_time_variable()\n if len(self.acid_base_components) > 0:\n self.create_acid_base_system()\n self.acid_base_equilibrium_solve(0)\n if self.ode_method is 'scipy':\n self.create_dynamic_functions()\n self.init_rates_arrays()", "title": "" }, { "docid": "27dc03e94d08cdcd7f33f4f981f50666", "score": "0.46342903", "text": "def do_compute_at(self):\n placeholder = self.placeholder_ub\n placeholder_sch = self.schedule[placeholder]\n # Non-last axis broadcast enhancement here, direct dma_copy is sometimes faster\n if self.no_broadcast:\n placeholder_sch.compute_at(self.schedule[self.broadcasts[0]],\n self.ub_outer)\n else:\n broadcast_sch = self.schedule[self.broadcast_ub]\n placeholder_sch.compute_at(self.schedule[self.broadcasts[0]],\n self.ub_outer)\n broadcast_sch.compute_at(self.schedule[self.broadcasts[0]],\n self.ub_outer)", "title": "" }, { "docid": "7f6f9a9d993d04543c96a494c6d716ce", "score": "0.46269438", "text": "def _calc_feature_points(self):\n\tpass", "title": "" }, { "docid": "fbf0727bd8a2a76e9d91c1c3f3386e5a", "score": "0.46250468", "text": "def compute(\n self,\n core_record: CoreRecord\n ) -> Munch:\n raise NotImplementedError", "title": "" }, { "docid": "2e2004ccdb1cc62adadaae58a2e4677b", "score": "0.46223146", "text": "def test_empty_parameters_to_live_point(empty_live_point):\n np.testing.assert_array_equal(\n lp.parameters_to_live_point([], [\"x\", \"y\", \"z\"]),\n empty_live_point,\n )", "title": "" }, { "docid": "3a4d7df487df57bab0216d1b575d434e", "score": "0.46087843", "text": "def pre_load_points(self, eval_pts, rewards):\n self.x_data += eval_pts\n self.y_data += rewards\n self.x_init = eval_pts\n self.y_init = rewards\n self.pre_loaded_fit = True\n self.gp = get_tuned_gp(self.gp_engine, eval_pts, rewards,\n kernel_type=self.gp_options.kernel_type)", "title": "" }, { "docid": "9ef170db479177d1064f2c32a31fcb7b", "score": "0.46065608", "text": "def test_datapoint(self):\n pass", "title": "" }, { "docid": "23f4a2e7fe41d6451a78186652bbd6ca", "score": "0.45816812", "text": "def test_timestamp_assignment(self):\n\n nucleus_points = [26.15, 26.29, 26.46, 26.6201]\n\n phones_df = DataFrame(\n [\n (26.130000, \"h\", 25.8701),\n (26.160000, \"E\", 26.1301),\n (26.270000, \"_6\", 26.1601),\n (26.360000, \"k\", 26.2701),\n (26.420000, \"l\", 26.3601),\n (26.490000, \"E:\", 26.4201),\n (26.540000, \"_6\", 26.4901),\n (26.620000, \"t\", 26.5401),\n (26.740000, \"@\", 26.6201),\n ],\n columns=[\"end\", \"label\", \"start_est\"],\n )\n\n words_df = DataFrame(\n [\n (25.870000, \"bewirken\", 25.3501),\n (26.130000, \"[h]\", 25.8701),\n (26.740000, \"erklärte\", 26.1301),\n (27.460000, \"Außenminister\", 26.7401),\n ],\n columns=[\"end\", \"label\", \"start_est\"],\n )\n\n tones_df = DataFrame(\n [(25.87, \"<P>\", 22.0), (28.5, \"%\", 25.8701)],\n columns=[\"time\", \"label\", \"start_est\"],\n )\n\n accents_df = DataFrame(\n [\n (26.515000, \"LH*L\"),\n (27.623440, \"L*H\"),\n (27.995732, \"H*L\"),\n (28.682547, \"!H*L\"),\n ],\n columns=[\"time\", \"label\"],\n )\n\n assigned_df = find_syllable_nuclei.assign_points_labels(\n nucleus_points,\n phones=phones_df,\n words=words_df,\n tones=tones_df,\n accents=accents_df,\n )\n\n self.assertTrue(list(assigned_df[\"end\"]) == [26.16, np.nan, 26.49, 26.74])\n self.assertTrue(\n list(assigned_df[\"start_est\"]) == [26.1301, np.nan, 26.4201, 26.6201]\n )", "title": "" }, { "docid": "c1b2acfc61ad9d8e9d44750da6599519", "score": "0.45814323", "text": "def __call__(self, start_time, end_time):\n\t# start by adjusting for timezone (in future versions of astroplan, this may be done for us)\n start_time = self.toUTC(start_time)\n end_time = self.toUTC(end_time)\n\n\t# then do definitions\n final_blocks = []\n unschedulable = []\n remaining_blocks = list(self.oblist)\n current_time = start_time + 5.5*u.hour\n for b in remaining_blocks:\n b._time_scale = u.Quantity([0*u.minute, b.duration/2, b.duration])\n\n\t# let's test something.\n targets = [ob.target for ob in self.oblist]\n times = astroplan.time_grid_from_range(aptime.Time([start_time,end_time]), self.min_delay)\n matrix = np.ones((len(targets), len(times)), dtype=np.bool)\n for constraint in self.constraints:\n t0 = time.time()\n matrix = np.logical_and(matrix, constraint(self.site, targets, times))\n dt = time.time()-t0\n print \"Calling a constraint for {} targets and {} times took {} ms\".format(len(targets),len(times),dt)\n\n\t# now go ahead and start scheduling\n while len(remaining_blocks) > 0 and current_time < end_time:\n # first, check the universal constraints\n observable = [True]\n #times = [current_time, current_time+self.min_delay/2]\n #observable = astroplan.is_always_observable(constraints=self.constraints,\n # observer = self.site,\n # targets=[self.oblist[0]], times=times)\n # if any of the universal constraints come up false, skip this time\n if not observable[0]:\n reason = 'day'\n best_block_idx = None\n \n else:\n\t\t# now score each potential ob based on how well it would fit here\n block_transitions = []\n block_scores = []\n for i, ob in enumerate(remaining_blocks):\n # first calculate transition\n if len(final_blocks) > 0 and type(final_blocks[-1]) == astroplan.ObservingBlock:\n tb = qsim.transition(final_blocks[-1], ob, current_time, self.site)\n transition_time = tb.duration\n else:\n tb = None\n transition_time = 0*u.minute\n block_transitions.append(tb)\n \n\t\t # now verify that it is observable during this time\n times = current_time + transition_time + ob._time_scale\n if not matrix[i, int((current_time-end_time)/(self.min_delay))]:\n observable = [False]\n elif times[-1] > end_time:\n observable = [False]\n else:\n t0 = time.time()\n observable = astroplan.is_always_observable(constraints=ob.constraints,\n observer=self.site,\n targets=[ob.target], times=times)\n dt = time.time()-t0\n print \"is_always_observable for {} constraints and {} times took {} ms\".format(len(ob.constraints),len(times),dt)\n if observable[0]:\n block_scores.append(qsim.score(tb, ob))\n else:\n block_scores.append(float('inf'))\n #block_scores.append(qsim.const_score(tb, ob, times, self.site))\n\t\t # if it would run over the end of the schedule, then assume it is unschedulable\n \n\t \t# now that all that's been calculated, pick the best block\n best_block_idx = np.argmin(block_scores)\n \n\t\t# if the best block is unobservable, then it's not really the best, is it?\n if block_scores[best_block_idx] == float('inf'):\n reason = 'nothing_observable'\n best_block_idx = None\n\n\t # if there is no best block, we obviously need a delay\n if best_block_idx == None:\n self.logger.info(\"Nothing observable at %s\" % (self.fromUTC(current_time)))\n final_blocks.append(astroplan.TransitionBlock(components={reason: self.min_delay},\n start_time=current_time))\n final_blocks[-1].components = {reason: self.min_delay}\n current_time += self.min_delay\n\t # otherwise, go ahead and add it to the schedule; don't forget the TransitionBlock!\n else:\n self.logger.info(\"Scheduled OB at %s\" % (self.fromUTC(current_time)))\n tb = block_transitions.pop(best_block_idx)\n ob = remaining_blocks.pop(best_block_idx)\n if tb is not None:\n final_blocks.append(tb)\n current_time += tb.duration\n ob.start_time = current_time\n current_time += ob.duration\n ob.end_time = current_time\n ob.constraints_value = block_scores[best_block_idx]\n final_blocks.append(ob)\n \n # return the scheduled blocks and the unscheduled blocks\n return [final_blocks, remaining_blocks+unschedulable]", "title": "" }, { "docid": "b123b3839092630537b6e4480291e377", "score": "0.45801067", "text": "def __init__(self, raw_datapoint):\n raise NotImplementedError", "title": "" }, { "docid": "208ee5cb7688f58c507739f986010ffe", "score": "0.45760602", "text": "def layout_procedure(self):\n self.get_tech_params()\n self.get_cell_params()\n self.calculate_pins()\n # self.calculate_obs() # Can be enabled later if needed!!!\n self.calculate_boundary()\n self.instantiate_layout()", "title": "" }, { "docid": "c670da3bc9efa60fb658ab26ac604ac4", "score": "0.45616543", "text": "def __init__(self,particle):\r\n\t\tTopologyBase.__init__(self,particle)\r\n\t\t\r\n\t\tself.position_updater.set(updateParticlesPosition)\r\n\t\tself.information_updater.set(updateParticlesInformation)", "title": "" }, { "docid": "d8b0f3fef8f60d089e74c7483997e0d9", "score": "0.45438924", "text": "def __init__(self,gp_params):\n\n scenario_params = gp_params['orbit_prop_params']['scenario_params']\n sat_params = gp_params['orbit_prop_params']['sat_params']\n rs_general_params = gp_params['gp_general_params']['route_selection_general_params']\n rs_params = gp_params['gp_general_params']['route_selection_params_v1']\n gp_general_other_params = gp_params['gp_general_params']['other_params']\n gp_inst_planning_params = gp_params['gp_instance_params']['route_selection_params']\n\n self.num_sats=sat_params['num_sats']\n self.num_paths=rs_params['num_paths']\n self.sel_start_utc_dt = tt.iso_string_to_dt (gp_inst_planning_params['start_utc'])\n self.sel_end_utc_dt = tt.iso_string_to_dt (gp_inst_planning_params['end_utc'])\n\n # note: M values should be as low as possible to prevent numerical issues (see: https://orinanobworld.blogspot.com/2011/07/perils-of-big-m.html)\n self.M_t_s= 86400 # 1 day\n self.M_dv_Mb= 1000000 # 1000 gigabits\n self.min_path_dv =rs_params['min_path_dv_Mb']\n self.solver_max_runtime =rs_params['solver_max_runtime_s']\n self.solver_name =rs_params['solver_name']\n self.solver_run_remotely =rs_params['solver_run_remotely']\n self.wind_filter_duration = timedelta (seconds =rs_general_params['wind_filter_duration_s'])\n self.latency_params = gp_general_other_params['latency_calculation']\n\n # quick sanity check on M time value\n total_duration =(self.sel_end_utc_dt- self.sel_start_utc_dt).total_seconds ()\n if total_duration > self.M_t_s:\n raise Exception ('big M value is too small for %f second scheduling window' % ( total_duration))", "title": "" }, { "docid": "0dc3983c6c54b389440a70e262643b12", "score": "0.45431486", "text": "def __init__(self, env, num_reg_staff, num_screening_staff, num_interviewers, num_schedulers,\n mean_interarrival_time, pct_second_round,\n reg_time_mean,document_check_time_mean, document_check_time_sd,\n interview_time_mean, interview_time_sd,\n sched_time_mean, sched_time_sd,\n post_int_time, add_post_int_time_mean, rg\n ):\n\n # Simulation environment and random number generator\n self.env = env\n self.rg = rg\n\n # Create list to hold timestamps dictionaries (one per candidate)\n self.timestamps_list = []\n # Create lists to hold occupancy tuples (time, occ)\n self.postint_occupancy_list = [(0.0, 0.0)]\n self.int_occupancy_list = [(0.0, 0.0)]\n\n # Create SimPy resources\n self.reg_staff = simpy.Resource(env, num_reg_staff)\n self.screening_staff = simpy.Resource(env, num_screening_staff)\n self.interviewer = simpy.Resource(env, num_interviewers)\n self.scheduler = simpy.Resource(env, num_schedulers)\n\n # Initialize the candidate flow related attributes\n self.mean_interarrival_time = mean_interarrival_time\n self.pct_second_round = pct_second_round\n\n self.reg_time_mean = reg_time_mean\n self.document_check_time_mean = document_check_time_mean\n self.document_check_time_sd = document_check_time_sd\n self.interview_time_mean = interview_time_mean\n self.interview_time_sd = interview_time_sd\n self.sched_time_mean = sched_time_mean\n self.sched_time_sd = sched_time_sd\n self.post_int_time = post_int_time\n self.add_post_int_time_mean = add_post_int_time_mean", "title": "" }, { "docid": "d5854f8aa605a6a22141d8b13ecf14b7", "score": "0.45312083", "text": "def _parse_point_cloud(self):\n self.point_cloud = np.vstack(\n [self.distances * np.cos(self.angles), self.distances * np.sin(self.angles)]).T", "title": "" }, { "docid": "f78cb9f9f2d0b4e2e027cf7051f26396", "score": "0.45234817", "text": "def __init__(self):\n super().__init__()\n self.parameters = {} # parameters dictionary (they are basically stored into a dictionary identified by tag \"targets\"\n self.acceptedCalcParam = self.scalarVals + self.vectorVals\n self.what = self.acceptedCalcParam # what needs to be computed... default...all\n self.methodsToRun = [] # if a function is present, its outcome name is here stored... if it matches one of the known outcomes, the pp is going to use the function to compute it\n self.printTag = 'PostProcessor BASIC STATISTIC'\n self.biased = False # biased statistics?\n self.pivotParameter = None # time-dependent statistics pivot parameter\n self.pivotValue = None # time-dependent statistics pivot parameter values\n self.dynamic = None # is it time-dependent?\n self.sampleTag = None # Tag used to track samples\n self.pbPresent = False # True if the ProbabilityWeight is available\n self.realizationWeight = None # The joint probabilities\n self.steMetaIndex = 'targets' # when Dataset is requested as output, the default index of ste metadata is ['targets', self.pivotParameter]\n self.multipleFeatures = True # True if multiple features are employed in linear regression as feature inputs\n self.sampleSize = None # number of sample size\n self.calculations = {}\n self.validDataType = ['PointSet', 'HistorySet', 'DataSet'] # The list of accepted types of DataObject", "title": "" }, { "docid": "5bd234c9ae754cf584e6e88406dcdfe4", "score": "0.45139796", "text": "def setup_time(self, phase):\n time_options = phase.time_options\n time_units = time_options['units']\n num_seg = self.grid_data.num_segments\n grid_data = self.grid_data\n output_nodes_per_seg = self.options['output_nodes_per_seg']\n\n super(SolveIVP, self).setup_time(phase)\n\n if output_nodes_per_seg is None:\n # Case 1: Compute times at 'all' node set.\n num_nodes = grid_data.num_nodes\n node_ptau = grid_data.node_ptau\n node_dptau_dstau = grid_data.node_dptau_dstau\n else:\n # Case 2: Compute times at n equally distributed points per segment.\n num_nodes = num_seg * output_nodes_per_seg\n node_stau = np.linspace(-1, 1, output_nodes_per_seg)\n node_ptau = np.empty(0, )\n node_dptau_dstau = np.empty(0, )\n # Append our nodes in phase tau space\n for iseg in range(num_seg):\n v0 = grid_data.segment_ends[iseg]\n v1 = grid_data.segment_ends[iseg + 1]\n node_ptau = np.concatenate((node_ptau, v0 + 0.5 * (node_stau + 1) * (v1 - v0)))\n node_dptau_dstau = np.concatenate((node_dptau_dstau,\n 0.5 * (v1 - v0) * np.ones_like(node_stau)))\n\n time_comp = TimeComp(num_nodes=num_nodes, node_ptau=node_ptau,\n node_dptau_dstau=node_dptau_dstau, units=time_units)\n\n phase.add_subsystem('time', time_comp, promotes=['*'])", "title": "" }, { "docid": "ae05ea6141d528ed51b0b23a2556fe03", "score": "0.4512939", "text": "def test_check_temporal_interp_method_bad(self):\n\n with self.assertRaises(ValueError):\n interp.check_temporal_interp_method(interp.SPLINE_INTERP_METHOD)", "title": "" }, { "docid": "b40abeb1d1e5f9eda1dcca4e411c0027", "score": "0.45072708", "text": "def _schedule_calculation(\n cls, calculation_backend, storage_backend, layer_directory, batch\n ):\n raise NotImplementedError()", "title": "" }, { "docid": "a58bd8f502c4a7868359238946195cc2", "score": "0.45034462", "text": "def _create_points(cls, sc_table, gen_cf_means, offshore=False,\n max_workers=None, points_per_worker=400):\n\n if max_workers is None:\n max_workers = os.cpu_count()\n\n sc_table = cls._parse_sc_table(sc_table, offshore=offshore)\n if 'sc_gid' in sc_table:\n sc_table = sc_table.set_index('sc_gid')\n\n cols = ['capacity', 'res_gids', 'gen_gids', 'gid_counts']\n sc_table = sc_table[cols]\n\n sc_points = {}\n if max_workers > 1:\n logger.info('Creating supply curve points in parallel')\n loggers = [__name__, 'reVX']\n with SpawnProcessPool(max_workers=max_workers,\n loggers=loggers) as exe:\n futures = []\n slices = cls._create_worker_slices(\n sc_table, points_per_worker=points_per_worker)\n for sc_slice in slices:\n table_slice = sc_table.iloc[sc_slice].copy()\n gids = np.unique(np.hstack(table_slice['gen_gids'].values))\n gen_slice = gen_cf_means.loc[gids].copy()\n future = exe.submit(cls._create_points,\n table_slice,\n gen_slice,\n max_workers=1)\n futures.append(future)\n\n for i, future in enumerate(as_completed(futures)):\n sc_points.update(future.result())\n logger.info('Completed {} out of {} Points'\n .format((i + 1) * points_per_worker,\n len(sc_table)))\n\n else:\n logger.debug('Creating supply curve points in serial')\n for i, (sc_gid, sc_point) in enumerate(sc_table.iterrows()):\n sc_gid = int(sc_gid)\n sc_points[sc_gid] = Point.create(sc_point, gen_cf_means)\n logger.debug('Created {} out of {} Points'\n .format(i + 1, len(sc_table)))\n\n return sc_points", "title": "" }, { "docid": "8c126ed0fc6205dc5f51af6b749a588c", "score": "0.4496695", "text": "def _preprocess_op_time(self, op_time):\n if op_time == 'schedule':\n self._step_stats = self._origin_step_stats\n return\n self._step_stats = copy.deepcopy(self._origin_step_stats)\n # Separate job task and gpu tracer stream\n stream_all_stats = []\n job_stats = []\n for stats in self._step_stats.dev_stats:\n if '/stream:all' in stats.device:\n stream_all_stats.append(stats)\n elif '/job' in stats.device:\n job_stats.append(stats)\n\n # Record the start time of the first kernel and the end time of\n # the last gpu kernel for all ops.\n op_gpu_start = {}\n op_gpu_end = {}\n for stats in stream_all_stats:\n for kernel in stats.node_stats:\n name, _ = self._parse_kernel_label(kernel.timeline_label,\n kernel.node_name)\n start = kernel.all_start_micros\n end = kernel.all_start_micros + kernel.all_end_rel_micros\n if name in op_gpu_start:\n op_gpu_start[name] = min(op_gpu_start[name], start)\n op_gpu_end[name] = max(op_gpu_end[name], end)\n else:\n op_gpu_start[name] = start\n op_gpu_end[name] = end\n\n # Update the start and end time of each op according to the op_time\n for stats in job_stats:\n for op in stats.node_stats:\n if op.node_name in op_gpu_start:\n end = max(op_gpu_end[op.node_name],\n op.all_start_micros + op.all_end_rel_micros)\n if op_time == 'gpu':\n op.all_start_micros = op_gpu_start[op.node_name]\n op.all_end_rel_micros = end - op.all_start_micros", "title": "" }, { "docid": "2a0fbf7b8bf1f0e12e1f4f58589cc8a7", "score": "0.44934297", "text": "def _take_point(self):\n raise NotImplementedError()", "title": "" }, { "docid": "0c2ae59add14759712136b0cb21fd477", "score": "0.44883102", "text": "def __validate_points(fixed_points: List[Tuple[float, ...]], moving_points: List[Tuple[float, ...]]):\n # Check neither list is empty:\n if len(moving_points) == 0 or len(fixed_points) == 0:\n raise Exception(\"Points cannot be empty.\")\n\n # Check the lists are both the same length\n if len(moving_points) != len(fixed_points):\n raise Exception(\"From and to points are not the same length.\")\n \n # Check if the dimensionality is consistent throughout both lists\n primary_dimensionality = len(moving_points[0])\n for p in moving_points:\n if len(p) != primary_dimensionality:\n raise Exception(\"Inconsistent dimensionality in moving points.\")\n\n secondary_dimensionality = len(fixed_points[0])\n for s in fixed_points:\n if len(s) != secondary_dimensionality:\n raise Exception(\"Inconsistent dimensionality in fixed points.\")", "title": "" }, { "docid": "73c4a9dfb1f55c096c40a5f400ea43e9", "score": "0.44808096", "text": "def __init__(self,\n learning_rate_base,\n total_steps,\n global_step_init=0,\n warmup_learning_rate=0.0,\n warmup_steps=0,\n hold_base_rate_steps=0,\n verbose=0):\n\n super(WarmUpCosineDecayScheduler, self).__init__()\n self.learning_rate_base = learning_rate_base\n self.total_steps = total_steps\n self.global_step = global_step_init\n self.warmup_learning_rate = warmup_learning_rate\n self.warmup_steps = warmup_steps\n self.hold_base_rate_steps = hold_base_rate_steps\n self.verbose = verbose\n self.learning_rates = []", "title": "" }, { "docid": "78e8f29720cc1857920894d2582c4620", "score": "0.44794402", "text": "def calculate_internals(self):\n \n if len(self.jets) <= 1:\n print(\"ERROR -- events has less than 2 jets, which should never happen!\")\n return\n \n if len(self.jets) != 2:\n print(\"WARNING -- expected two jets in the event, but there are \", len(self.jets))\n \n dijet_vector = self.jets[0].get_four_vector() + self.jets[1].get_four_vector()\n \n print(\"\\n\\n MET phi: \", self.metPhi)\n \n met_py = self.metPt * np.sin(self.metPhi)\n met_px = self.metPt * np.cos(self.metPhi)\n \n Mjj = dijet_vector.M()\n Mjj2 = Mjj * Mjj\n ptjj = dijet_vector.Pt()\n ptjj2 = ptjj * ptjj\n ptMet = dijet_vector.Px() * met_px + dijet_vector.Py() * met_py\n \n MT = np.sqrt(Mjj2 + 2. * (np.sqrt(Mjj2 + ptjj2) * self.metPt - ptMet))\n \n self.Mjj = Mjj\n self.MT = MT", "title": "" }, { "docid": "16d2f1464d3cdbb662a269a173f812f9", "score": "0.44754303", "text": "def validate_flights(self):\n with multiprocessing.Pool(multiprocessing.cpu_count()) as p:\n steps = 1\n\n # we can't just map(self.task.validate, self.flights) because instance attributes updated in subprocesses are not copied back on join\n for pilot_id, goal_distances, tag_times in tqdm(p.imap_unordered(self.task.validate, self.flights.values()), desc='validating flights', total=self.n_pilots, disable=self._progress != 'gui'):\n\n # update goal distances of flight points\n for timestamp, point in self.flights[pilot_id].points.items():\n point.goal_distance = goal_distances[timestamp]\n\n # compute race time for pilot, read list in reverse because ESS is more likely near the end\n self.flights[pilot_id].race_distance = len(self.task) - min(goal_distances.values())\n self.flights[pilot_id]._last_point['point'].goal_distance = min(goal_distances.values())\n\n # compute race time for pilot, read list in reverse because ESS is more likely near the end\n if len(tag_times) == len(self.task.turnpoints):\n for i, turnpoint in enumerate(self.task.turnpoints[::-1]):\n if turnpoint.role == 'ESS':\n race_time = sub_times(tag_times[-(i + 1)], self.task.start)\n self.flights[pilot_id].race_time = race_time\n logging.debug(f'{pilot_id} SS : {race_time}')\n\n # update tag_times of turnpoints\n self.task.update_tag_times(tag_times)\n\n if self._progress == 'ratio':\n print(f'{steps/self.n_pilots:.0%}', file=sys.stderr, flush=True)\n steps += 1\n\n self.validated = True", "title": "" }, { "docid": "da038c5df35ff6759abcab12cff1926e", "score": "0.44737095", "text": "def __init__( # pylint: disable=too-many-locals, too-many-arguments\r\n self, origins, destinations, output_origins, time_window_start_day, time_window_start_time, time_window_end_day,\r\n time_window_end_time, time_increment, network_data_source, travel_mode, chunk_size, max_processes, time_units,\r\n cutoff, weight_field=None, precalculate_network_locations=True, barriers=None\r\n ):\r\n self.origins = origins\r\n self.destinations = destinations\r\n self.weight_field = weight_field\r\n self.network_data_source = network_data_source\r\n self.travel_mode = travel_mode\r\n self.output_origins = output_origins\r\n self.chunk_size = chunk_size\r\n self.max_processes = max_processes\r\n self.time_units = time_units\r\n self.cutoff = cutoff\r\n self.should_precalc_network_locations = precalculate_network_locations\r\n self.barriers = barriers if barriers else []\r\n\r\n # Create a temporary output location for destinations so we can calculate network location fields and not\r\n # overwrite the input\r\n self.temp_destinations = os.path.join(\r\n arcpy.env.scratchGDB, # pylint: disable=no-member\r\n arcpy.CreateUniqueName(\"TempDests\", arcpy.env.scratchGDB) # pylint: disable=no-member\r\n )\r\n\r\n self.time_window_start_day = time_window_start_day\r\n self.time_window_start_time = time_window_start_time\r\n self.time_window_end_day = time_window_end_day\r\n self.time_window_end_time = time_window_end_time\r\n self.time_increment = time_increment\r\n\r\n self.same_origins_destinations = bool(self.origins == self.destinations)\r\n\r\n self.max_origins = self.chunk_size\r\n self.max_destinations = self.chunk_size\r\n\r\n self.is_service = AnalysisHelpers.is_nds_service(self.network_data_source)\r\n self.service_limits = None\r\n self.is_agol = False", "title": "" }, { "docid": "1c95da2bcb5066471994bae71deeb8c6", "score": "0.44559264", "text": "def __init__(__self__, *,\n batch_transform_input: Optional['outputs.MonitoringScheduleBatchTransformInput'] = None,\n endpoint_input: Optional['outputs.MonitoringScheduleEndpointInput'] = None):\n if batch_transform_input is not None:\n pulumi.set(__self__, \"batch_transform_input\", batch_transform_input)\n if endpoint_input is not None:\n pulumi.set(__self__, \"endpoint_input\", endpoint_input)", "title": "" }, { "docid": "a48297a626c60ee6a441b0401755a48b", "score": "0.44495508", "text": "def _create_target_points_for_interp(storm_object_table, lead_times_seconds):\n\n if numpy.any(lead_times_seconds > 0):\n storm_speeds_m_s01, storm_bearings_deg = (\n geodetic_utils.xy_to_scalar_displacements_and_bearings(\n x_displacements_metres=\n storm_object_table[tracking_utils.EAST_VELOCITY_COLUMN].values,\n y_displacements_metres=\n storm_object_table[tracking_utils.NORTH_VELOCITY_COLUMN].values)\n )\n\n num_storm_objects = len(storm_object_table.index)\n num_lead_times = len(lead_times_seconds)\n list_of_target_point_tables = [None] * num_lead_times\n\n for i in range(num_lead_times):\n if lead_times_seconds[i] == 0:\n list_of_target_point_tables[i] = storm_object_table[[\n tracking_utils.FULL_ID_COLUMN, tracking_utils.VALID_TIME_COLUMN,\n tracking_utils.CENTROID_LATITUDE_COLUMN,\n tracking_utils.CENTROID_LONGITUDE_COLUMN,\n tracking_utils.EAST_VELOCITY_COLUMN,\n tracking_utils.NORTH_VELOCITY_COLUMN\n ]]\n\n argument_dict = {\n LEAD_TIME_KEY: numpy.full(num_storm_objects, 0, dtype=int),\n FORECAST_TIME_COLUMN: list_of_target_point_tables[i][\n tracking_utils.VALID_TIME_COLUMN].values\n }\n\n list_of_target_point_tables[i] = (\n list_of_target_point_tables[i].assign(**argument_dict)\n )\n\n if i == 0:\n continue\n\n list_of_target_point_tables[i] = (\n list_of_target_point_tables[i].align(\n list_of_target_point_tables[0], axis=1\n )[0]\n )\n\n continue\n\n these_extrap_latitudes_deg, these_extrap_longitudes_deg = (\n geodetic_utils.start_points_and_displacements_to_endpoints(\n start_latitudes_deg=storm_object_table[\n tracking_utils.CENTROID_LATITUDE_COLUMN].values,\n start_longitudes_deg=storm_object_table[\n tracking_utils.CENTROID_LONGITUDE_COLUMN].values,\n scalar_displacements_metres=\n storm_speeds_m_s01 * lead_times_seconds[i],\n geodetic_bearings_deg=storm_bearings_deg)\n )\n\n these_times_unix_sec = (\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values +\n lead_times_seconds[i]\n )\n\n this_dict = {\n tracking_utils.FULL_ID_COLUMN:\n storm_object_table[tracking_utils.FULL_ID_COLUMN].values,\n tracking_utils.VALID_TIME_COLUMN:\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values,\n tracking_utils.CENTROID_LATITUDE_COLUMN: these_extrap_latitudes_deg,\n tracking_utils.CENTROID_LONGITUDE_COLUMN:\n these_extrap_longitudes_deg,\n FORECAST_TIME_COLUMN: these_times_unix_sec,\n tracking_utils.EAST_VELOCITY_COLUMN:\n storm_object_table[tracking_utils.EAST_VELOCITY_COLUMN].values,\n tracking_utils.NORTH_VELOCITY_COLUMN:\n storm_object_table[tracking_utils.NORTH_VELOCITY_COLUMN].values,\n LEAD_TIME_KEY: numpy.full(\n num_storm_objects, lead_times_seconds[i], dtype=int)\n }\n\n list_of_target_point_tables[i] = pandas.DataFrame.from_dict(this_dict)\n if i == 0:\n continue\n\n list_of_target_point_tables[i] = list_of_target_point_tables[i].align(\n list_of_target_point_tables[0], axis=1\n )[0]\n\n target_point_table = pandas.concat(\n list_of_target_point_tables, axis=0, ignore_index=True)\n\n column_dict_old_to_new = {\n tracking_utils.VALID_TIME_COLUMN: INITIAL_TIME_COLUMN\n }\n\n return target_point_table.rename(\n columns=column_dict_old_to_new, inplace=False)", "title": "" }, { "docid": "01df15f52e6e9d55e9749aa9bf87a347", "score": "0.4444951", "text": "def _calculate(self):\n if not self._inbound.empty():\n input = self._inbound.get_nowait()\n\n if self._continuous:\n self._error = self._get_continuous_error(self._setpoint - input)\n else:\n self._error = self._setpoint - input\n \n if self._control_mode == \"Velocity\":\n self._outbound.put_nowait(self._velocity_calculation())\n else:\n self._outbound.put_nowait(self._position_calculation())\n \n self._prev_error = self._error\n self._on_target()\n \n else:\n self._outbound.put_nowait(self._prev_error)", "title": "" }, { "docid": "b068832e0fa02ecf6cf93fb5958b5af4", "score": "0.44426098", "text": "def transform_points(self, points):\n raise NotImplementedError()", "title": "" }, { "docid": "2acd9b957c4efde720bcac020c184c22", "score": "0.44363397", "text": "def main(**kwargs):\n\n max_time = session.query(func.max(kwargs['old_obj'].timestamp)).scalar().strftime('%Y-%m-%d %H:%M:%S')\n min_time = session.query(func.min(kwargs['old_obj'].timestamp)).scalar().strftime('%Y-%m-%d %H:%M:%S')\n tz = pytz.timezone('America/Los_Angeles')\n time_df = pd.date_range(start=min_time, end=max_time, freq='1H')\n time_list = sorted(list(set([tz.localize(x) for x in time_df])))\n kwargs['times'] = time_list\n print(len(time_list))\n\n if not kwargs['if_all_grids_meo']:\n\n target_aq_obj = kwargs['target_aq_obj']\n kwargs['locations'] = sorted([loc[0] for loc in session.query(target_aq_obj.gid).distinct().all()])\n\n \"\"\" !!! Be careful, create table would overwrite the original table \"\"\"\n create_table(kwargs['target_obj'])\n interpolate_space(**kwargs)", "title": "" }, { "docid": "0936978951612215455c8b90eddf13f1", "score": "0.4434935", "text": "def __init__(self, points):\n self.points = points", "title": "" }, { "docid": "df3dc69eb70d3e8d71ac624148f5acd5", "score": "0.44334844", "text": "def _evaluate(\n self,\n eval_points: np.ndarray,\n *,\n aligned: bool = True,\n ) -> np.ndarray:\n pass", "title": "" }, { "docid": "d78d481c3f5df75a5495af9add879414", "score": "0.44281045", "text": "def init_point_x_y_z(self):\n self.point_x = LazyLoadDict(get_metadata(\"point_x\"))\n self.point_x.set_lazy(\"data\", _point_data_factory(self, \"x\"))\n\n self.point_y = LazyLoadDict(get_metadata(\"point_y\"))\n self.point_y.set_lazy(\"data\", _point_data_factory(self, \"y\"))\n\n self.point_z = LazyLoadDict(get_metadata(\"point_z\"))\n self.point_z.set_lazy(\"data\", _point_data_factory(self, \"z\"))", "title": "" }, { "docid": "2f86abf7351596a7c2fcd4b3786e0cff", "score": "0.44265202", "text": "def build_live_intervals(p: Dict[str, x86.Program]) -> Tuple[Dict[str, x86.Program], Dict[str, List[LiveInterval]]]:\n\n def build_live_intervals_def(p: x86.Program) -> List[LiveInterval]:\n live_sets: List[Set[x86.Var]] = build_live_sets_def(p)\n live_intervals: List[LiveInterval] = []\n\n for line_num, live_set in enumerate(live_sets):\n if live_set is None:\n continue\n\n # For all vars in the live_set, update their endpoint in live_intervals\n # If they are not yet in live_intervals, add a new LiveIntervals for that var\n for var in live_set:\n for live_interval in live_intervals:\n if live_interval.var == var:\n live_interval.endpoint = line_num\n break # If this break is not reached, else \\/ will execute\n else:\n # This var is not yet in the live_set\n # Make new LiveInterval with startpoint and endpoint as current line_num\n live_intervals.append(LiveInterval(var, line_num, line_num))\n\n return live_intervals\n\n def build_live_sets_def(p: x86.Program) -> List[Set[x86.Var]]:\n assert len(p.blocks) == 1\n\n block: Tuple[str, List[x86.Instr]] = [(name, instrs) for name, instrs in p.blocks.items()][0]\n instrs: List[x86.Instr] = block[1]\n\n live_sets = []\n for instr in instrs:\n live_sets.append(build_live_sets_instr(instr))\n\n return live_sets\n\n def build_live_sets_instr(instr: x86.Instr) -> Set[x86.Var]:\n # TODO These types may need changing (?)\n if isinstance(instr, (x86.Movq, x86.Addq, x86.Cmpq, x86.Movzbq, x86.Xorq, x86.Leaq)):\n return build_live_sets_arg(instr.e1).union(build_live_sets_arg(instr.e2))\n elif isinstance(instr, (x86.Set, x86.Negq, x86.TailJmp, x86.IndirectCallq)):\n return build_live_sets_arg(instr.e1)\n\n def build_live_sets_arg(a: x86.Arg) -> Set[x86.Var]:\n if isinstance(a, (x86.Int, x86.Reg, x86.ByteReg, x86.GlobalVal, x86.Deref, x86.FunRef)):\n return set()\n elif isinstance(a, x86.Var):\n return {a}\n else:\n raise ValueError('live_sets_arg linear_scan_help', a)\n\n return p, {name: build_live_intervals_def(prog) for name, prog in p.items()}", "title": "" }, { "docid": "904f222fe2fe43f72c4406180a1d9ef6", "score": "0.44249663", "text": "def __init__(__self__, *,\n advanced_schedule: Optional[pulumi.Input['AdvancedScheduleArgs']] = None,\n creation_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n expiry_time: Optional[pulumi.Input[str]] = None,\n expiry_time_offset_minutes: Optional[pulumi.Input[float]] = None,\n frequency: Optional[pulumi.Input[Union[str, 'ScheduleFrequency']]] = None,\n interval: Optional[pulumi.Input[float]] = None,\n is_enabled: Optional[pulumi.Input[bool]] = None,\n last_modified_time: Optional[pulumi.Input[str]] = None,\n next_run: Optional[pulumi.Input[str]] = None,\n next_run_offset_minutes: Optional[pulumi.Input[float]] = None,\n start_time: Optional[pulumi.Input[str]] = None,\n time_zone: Optional[pulumi.Input[str]] = None):\n if advanced_schedule is not None:\n pulumi.set(__self__, \"advanced_schedule\", advanced_schedule)\n if creation_time is not None:\n pulumi.set(__self__, \"creation_time\", creation_time)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if expiry_time is not None:\n pulumi.set(__self__, \"expiry_time\", expiry_time)\n if expiry_time_offset_minutes is not None:\n pulumi.set(__self__, \"expiry_time_offset_minutes\", expiry_time_offset_minutes)\n if frequency is not None:\n pulumi.set(__self__, \"frequency\", frequency)\n if interval is not None:\n pulumi.set(__self__, \"interval\", interval)\n if is_enabled is None:\n is_enabled = False\n if is_enabled is not None:\n pulumi.set(__self__, \"is_enabled\", is_enabled)\n if last_modified_time is not None:\n pulumi.set(__self__, \"last_modified_time\", last_modified_time)\n if next_run is not None:\n pulumi.set(__self__, \"next_run\", next_run)\n if next_run_offset_minutes is not None:\n pulumi.set(__self__, \"next_run_offset_minutes\", next_run_offset_minutes)\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)\n if time_zone is not None:\n pulumi.set(__self__, \"time_zone\", time_zone)", "title": "" }, { "docid": "e2479eac143186f54f167af89d23851e", "score": "0.44247654", "text": "def calculate_from_parsed_field(self,\n parsed_field,\n destinations\n ):\n raise NotImplementedError()", "title": "" }, { "docid": "3f29446be9a008a42a2b52c316ebfa3c", "score": "0.441841", "text": "def _check_for_missing_scheduled_advices():\n LOGGER.info(\"Checking for missing scheduled pre-settlement advices...\")\n PreSettlementAdviceBusinessProcessCreator().create_missing_scheduled_advice_business_processes()", "title": "" }, { "docid": "2ac59d3c5566ed7688c4c165202fe6da", "score": "0.44145715", "text": "def prepare_services(self):\n monthly_data = self.Scenario['monthly_data']\n time_series = self.Scenario['time_series']\n dt = self.Scenario['dt']\n timeseries_columns = time_series.columns\n monthly_columns = monthly_data.columns\n freq = self.Scenario['frequency']\n\n if self.Deferral is not None:\n if 'Deferral Load (kW)' not in timeseries_columns:\n e_logger.error('Error: Please include a monthly fuel price.')\n raise Exception(\"Missing 'Deferral Load (kW)' from timeseries input\")\n self.POI.update({'deferral': time_series.loc[:, \"Deferral Load (kW)\"],\n 'deferral_growth': self.Deferral['growth']})\n self.Deferral[\"last_year\"] = self.Scenario[\"end_year\"]\n self.Deferral.update({'load': time_series.loc[:, 'Deferral Load (kW)']})\n\n if self.Volt is not None:\n if 'Vars Reservation (%)' not in timeseries_columns:\n e_logger.error('Error: Please include the vars reservation as percent (from 0 to 100) of inverter max.')\n raise Exception(\"Missing 'Vars Reservation (%)' from timeseries input\")\n # make sure that vars reservation inputs are between 0 and 100\n percent = time_series.loc[:, 'VAR Reservation (%)']\n if len([*filter(lambda x: x >= 30, percent.value)]):\n e_logger.error('Error: Please include the vars reservation as percent (from 0 to 100) of inverter max.')\n raise Exception(\"Value error within 'Vars Reservation (%)' timeseries\")\n self.Volt['percent'] = percent\n\n included_system_load = False\n\n if self.RA is not None:\n if 'active hours' in self.RA['idmode'].lower():\n # if using active hours, require the column\n if 'RA Active (y/n)' not in timeseries_columns:\n e_logger.error('Error: Please include when RA is active.')\n raise Exception(\"Missing 'RA Active (y/n)' from timeseries input\")\n self.RA['active'] = time_series.loc[:, 'RA Active (y/n)']\n if 'RA Capacity Price ($/kW)' not in monthly_columns:\n e_logger.error('Error: Please include monthly RA price.')\n raise Exception(\"Missing 'RA Price ($/kW)' from monthly CSV input\")\n\n if 'System Load (kW)' not in timeseries_columns:\n e_logger.error('Error: Please include a system load.')\n raise Exception(\"Missing 'System Load (kW)' from timeseries input\")\n included_system_load = True # flag so we dont check for system load again\n self.RA.update({'value': monthly_data.loc[:, 'RA Capacity Price ($/kW)'],\n 'default_growth': self.Scenario['def_growth'], # applied to system load if 'forecasting' load\n 'system_load': time_series.loc[:, \"System Load (kW)\"],\n 'active': time_series.loc[:, 'RA Active (y/n)'],\n 'dt': dt})\n\n if self.DR is not None:\n if 'DR Capacity Price ($/kW)' not in monthly_columns:\n e_logger.error('Error: Please include DR capcity prices.')\n raise Exception(\"Missing 'DR Capacity Price ($/kW)' from monthly input\")\n if 'DR Energy Price ($/kWh)' not in monthly_columns:\n e_logger.error('Error: Please include DR energy prices.')\n raise Exception(\"Missing 'DR Energy Price ($/kWh)' from monthly input\")\n if 'DR Months (y/n)' not in monthly_columns:\n e_logger.error('Error: Please include DR months.')\n raise Exception(\"Missing 'DR Months (y/n)' from monthly input\")\n if 'DR Capacity (kW)' not in monthly_columns:\n e_logger.error('Error: Please include a DR capacity.')\n raise Exception(\"Missing 'DR Capacity (kW)' from monthly input\")\n if 'Site Load (kW)' not in timeseries_columns:\n e_logger.error('Error: Please include a site load.')\n raise Exception(\"Missing 'Site Load (kW)' from timeseries input\")\n\n # self.Scenario['incl_site_load'] = 1\n\n # if we havent checked for a system load already, check now\n if not included_system_load and 'System Load (kW)' not in timeseries_columns:\n e_logger.error('Error: Please include a system load.')\n raise Exception(\"Missing 'System Load (kW)' from timeseries input\")\n self.DR.update({'cap_price': monthly_data.loc[:, 'DR Capacity Price ($/kW)'],\n 'ene_price': monthly_data.loc[:, 'DR Energy Price ($/kWh)'],\n 'dr_months': self.monthly_to_timeseries(freq, monthly_data.loc[:, ['DR Months (y/n)']]),\n 'dr_cap': self.monthly_to_timeseries(freq, monthly_data.loc[:, ['DR Capacity (kW)']]),\n 'cap_monthly': monthly_data.loc[:, 'DR Capacity (kW)'],\n 'default_growth': self.Scenario['def_growth'],\n 'system_load': time_series.loc[:, \"System Load (kW)\"],\n 'site_load': time_series.loc[:, \"Site Load (kW)\"],\n 'dt': dt})\n\n if included_system_load:\n self.POI.update({'system': time_series.loc[:, \"System Load (kW)\"]})\n\n if self.Backup is not None:\n if 'Backup Price ($/kWh)' not in monthly_columns:\n e_logger.error('Error: Please include Backup Price in monthly data csv.')\n raise Exception(\"Missing 'Backup Price ($/kWh)' from monthly data input\")\n if 'Backup Energy (kWh)' not in monthly_columns:\n e_logger.error('Error: Please include Backup Energy in monthly data csv.')\n raise Exception(\"Missing 'Backup Energy (kWh)' from monthly data input\")\n self.Backup.update({'daily_energy': self.monthly_to_timeseries(freq, monthly_data.loc[:, ['Backup Energy (kWh)']]),\n 'monthly_price': monthly_data.loc[:, 'Backup Price ($/kWh)'],\n 'monthly_energy': monthly_data.loc[:, 'Backup Energy (kWh)']})\n\n if self.SR is not None:\n self.SR.update({'price': time_series.loc[:, 'SR Price ($/kW)'],\n 'dis_power': self.Battery['dis_max_rated']})\n\n if self.NSR is not None:\n self.NSR.update({'price': time_series.loc[:, 'NSR Price ($/kW)'],\n 'dis_power': self.Battery['dis_max_rated']})\n\n if self.DA is not None:\n self.DA.update({'price': time_series.loc[:, 'DA Price ($/kWh)']})\n\n if self.FR is not None:\n if self.FR['CombinedMarket']:\n self.FR.update({'regu_price': np.divide(time_series.loc[:, 'FR Price ($/kW)'], 2),\n 'regd_price': np.divide(time_series.loc[:, 'FR Price ($/kW)'], 2),\n 'energy_price': time_series.loc[:, 'DA Price ($/kWh)']})\n else:\n self.FR.update({'regu_price': time_series.loc[:, 'Reg Up Price ($/kW)'],\n 'regd_price': time_series.loc[:, 'Reg Down Price ($/kW)'],\n 'energy_price': time_series.loc[:, 'DA Price ($/kWh)']})\n\n if self.LF is not None:\n if self.LF['CombinedMarket']:\n self.LF.update({'lf_up_price': np.divide(time_series.loc[:, 'LF Price ($/kW)'], 2),\n 'lf_do_price': np.divide(time_series.loc[:, 'LF Price ($/kW)'], 2),\n 'lf_offset': time_series.loc[:, 'LF Offset (kW)'],\n 'energy_price': time_series.loc[:, 'DA Price ($/kWh)']})\n else:\n self.LF.update({'lf_up_price': time_series.loc[:, 'LF Up Price ($/kW)'],\n 'lf_do_price': time_series.loc[:, 'LF Down Price ($/kW)'],\n 'lf_offset': time_series.loc[:, 'LF Offset (kW)'],\n 'energy_price': time_series.loc[:, 'DA Price ($/kWh)']})\n\n if self.User is not None:\n # check to make sure the user included at least one of the custom constraints\n input_cols = ['Power Max (kW)', 'Power Min (kW)', 'Energy Max (kWh)', 'Energy Min (kWh)']\n if not time_series.columns.isin(input_cols).any():\n e_logger.error(\"User has inputted an invalid constraint for Storage. Please change and run again.\")\n raise Exception(\"User has inputted an invalid constraint for Storage. Please change and run again.\")\n self.User.update({'constraints': time_series.loc[:, list(np.intersect1d(input_cols, list(time_series)))]})\n\n if self.Scenario['customer_sided']:\n retail_prices = Financial.calc_retail_energy_price(self.Finance['customer_tariff'], self.Scenario['frequency'], self.Scenario['opt_years'])\n if self.retailTimeShift is not None:\n self.retailTimeShift.update({'price': retail_prices.loc[:, 'p_energy'],\n 'tariff': self.Finance['customer_tariff'].loc[\n self.Finance['customer_tariff'].Charge.apply((lambda x: x.lower())) == 'energy', :]})\n\n if self.DCM is not None:\n self.DCM.update({'tariff': self.Finance['customer_tariff'].loc[\n self.Finance['customer_tariff'].Charge.apply((lambda x: x.lower())) == 'demand', :],\n 'billing_period': retail_prices.loc[:, 'billing_period']})\n\n u_logger.info(\"Successfully prepared the value-stream (services)\")", "title": "" }, { "docid": "da75bcd9f7908014fb745a69c3ea7f58", "score": "0.4411088", "text": "def make_virtual_point_evaluator(self) -> VirtualPointEvaluator:\n normal = self.normal\n axis = self.axis\n size = self.grid.shape[self.axis]\n get_arr_1d = _make_get_arr_1d(self.grid.num_axes, self.axis)\n\n if size < 2:\n raise ValueError(\n f\"Need two support points along axis {self.axis} to apply conditions\"\n )\n\n # calculate necessary constants\n data = self.get_virtual_point_data()\n\n if self.homogeneous:\n\n @register_jitable\n def virtual_point(arr: np.ndarray, idx: Tuple[int, ...], args=None):\n \"\"\"evaluate the virtual point at `idx`\"\"\"\n arr_1d, _, _ = get_arr_1d(arr, idx)\n if normal:\n val1 = arr_1d[..., axis, data[2]]\n val2 = arr_1d[..., axis, data[4]]\n else:\n val1 = arr_1d[..., data[2]]\n val2 = arr_1d[..., data[4]]\n return data[0] + data[1] * val1 + data[3] * val2\n\n else:\n\n @register_jitable\n def virtual_point(arr: np.ndarray, idx: Tuple[int, ...], args=None):\n \"\"\"evaluate the virtual point at `idx`\"\"\"\n arr_1d, _, bc_idx = get_arr_1d(arr, idx)\n if normal:\n val1 = arr_1d[..., axis, data[2]]\n val2 = arr_1d[..., axis, data[4]]\n else:\n val1 = arr_1d[..., data[2]]\n val2 = arr_1d[..., data[4]]\n return data[0][bc_idx] + data[1][bc_idx] * val1 + data[3][bc_idx] * val2\n\n return virtual_point # type: ignore", "title": "" }, { "docid": "525af63e1e5f4cff355180d9da445faa", "score": "0.4408034", "text": "def __init__(self, schedule, steps_per_epoch, base_learning_rate):\n super().__init__()\n self._schedule = schedule\n self._steps_per_epoch = steps_per_epoch\n self._base_learning_rate = base_learning_rate", "title": "" }, { "docid": "8ab59bfbf459866e5e356b6805c1b3ac", "score": "0.4400647", "text": "def make_virtual_point_evaluator(self) -> VirtualPointEvaluator:\n dx = self.grid.discretization[self.axis]\n num_axes = self.grid.num_axes\n get_arr_1d = _make_get_arr_1d(num_axes, self.axis)\n bc_coords = self.grid._boundary_coordinates(axis=self.axis, upper=self.upper)\n bc_coords = np.moveaxis(bc_coords, -1, 0) # point coordinates to first axis\n assert num_axes <= 3\n\n if self._is_func:\n warn_if_time_not_set = False\n func = jit(self._func)\n else:\n warn_if_time_not_set = self._func.depends_on(\"t\")\n func = self._func.get_compiled()\n\n @register_jitable\n def virtual_point(arr: np.ndarray, idx: Tuple[int, ...], args=None) -> float:\n \"\"\"evaluate the virtual point at `idx`\"\"\"\n _, _, bc_idx = get_arr_1d(arr, idx)\n grid_value = arr[idx]\n coords = bc_coords[bc_idx]\n\n # extract time for handling time-dependent BCs\n if args is None or \"t\" not in args:\n if warn_if_time_not_set:\n raise RuntimeError(\n \"Require value for `t` for time-dependent BC. The value must \"\n \"be passed explicitly via `args` when calling a differential \"\n \"operator.\"\n )\n t = 0.0\n else:\n t = float(args[\"t\"])\n\n if num_axes == 1:\n return func(grid_value, dx, coords[0], t) # type: ignore\n elif num_axes == 2:\n return func(grid_value, dx, coords[0], coords[1], t) # type: ignore\n elif num_axes == 3:\n return func(grid_value, dx, coords[0], coords[1], coords[2], t) # type: ignore\n else:\n # cheap way to signal a problem\n return math.nan\n\n return virtual_point # type: ignore", "title": "" }, { "docid": "24c578a1e2dcc48ac274dba6d4d12fbf", "score": "0.43989366", "text": "def _compute_structure(self, points, dirarg, stepsize=False):\n pot_range_grid = [self.star.mesh.coords['pots'].min(), self.star.mesh.coords['pots'].max()]\n pots = self._compute_potentials(points, self.star.q, self._interp_funcs['bbT1'], self._interp_funcs['bbT2'], pot_range_grid)\n pots2 = pots / self.star.q + 0.5 * (self.star.q - 1) / self.star.q\n\n if stepsize:\n chis = np.zeros(len(pots))\n else:\n chis = np.zeros(len(pots))\n Ss = np.zeros(len(pots))\n Is = np.zeros(len(pots))\n\n if isinstance(self.star.mesh, SphericalMesh):\n \n grid_prim, grid_sec, le_prim, le_sec = self._compute_interp_regions(pots=pots,points=points,pot_range_grid=pot_range_grid)\n # thetas and phis are returned for all points (zeros out of grid)... maybe fix this?\n thetas, phis = self._compute_coords_for_interpolation(points, grid_prim=grid_prim, grid_sec=grid_sec)\n\n if stepsize:\n chis[grid_prim] = self._interp_funcs['chi1']((pots[grid_prim], thetas[grid_prim], phis[grid_prim]))\n chis[grid_sec] = self._interp_funcs['chi2']((pots[grid_sec], thetas[grid_sec], phis[grid_sec]))\n\n else:\n chis[grid_prim] = self._interp_funcs['chi1']((pots[grid_prim], thetas[grid_prim], phis[grid_prim]))\n chis[grid_sec] = self._interp_funcs['chi2']((pots[grid_sec], thetas[grid_sec], phis[grid_sec]))\n Ss[grid_prim] = self._interp_funcs['S1']((pots[grid_prim], thetas[grid_prim], phis[grid_prim]))\n Ss[grid_sec] = self._interp_funcs['S2']((pots[grid_sec], thetas[grid_sec], phis[grid_sec]))\n Is[grid_prim] = self._interp_funcs['I1'][dirarg]((pots[grid_prim], thetas[grid_prim], phis[grid_prim]))\n Is[grid_sec] = self._interp_funcs['I2'][dirarg]((pots[grid_sec], thetas[grid_sec], phis[grid_sec]))\n\n elif isinstance(self.star.mesh, CylindricalMesh):\n\n grid, le_prim, le_sec = self._compute_interp_regions(pots=pots,points=points,pot_range_grid=pot_range_grid)\n # here xnorms and thetas are only those pertaining to grid points\n xnorms, thetas = self._compute_coords_for_interpolation(points, grid=grid, pots=pots)\n \n if stepsize:\n chis[grid] = self._interp_funcs['chi']((pots[grid], xnorms, thetas))\n \n else:\n chis[grid] = self._interp_funcs['chi']((pots, xnorms, thetas))\n Ss[grid] = self._interp_funcs['S']((pots, xnorms, thetas))\n Is[grid] = self._interp_funcs['I'][dirarg]((pots, xnorms, thetas))\n\n else:\n raise ValueError('Geometry not supported with rt_method cobain')\n\n rhos1 = self._interp_funcs['bbrho1'](pots[le_prim])*self.star.structure.default_units['rho']\n rhos2 = self._interp_funcs['bbrho2'](pots2[le_sec])*self.star.structure.default_units['rho']\n Ts1 = self._interp_funcs['bbT1'](pots[le_prim])*self.star.structure.default_units['T']\n Ts2 = self._interp_funcs['bbT2'](pots2[le_sec])*self.star.structure.default_units['T']\n\n if stepsize:\n \n chis[le_prim] = self.star.atmosphere._compute_absorption_coefficient(rhos1, Ts1)\n chis[le_sec] = self.star.atmosphere._compute_absorption_coefficient(rhos2, Ts2)\n\n return chis\n\n else:\n\n chis[le_prim] = self.star.atmosphere._compute_absorption_coefficient(rhos1, Ts1)\n chis[le_sec] = self.star.atmosphere._compute_absorption_coefficient(rhos2, Ts2)\n Ss[le_prim] = Is[le_prim] = self.star.atmosphere._compute_source_function(Ts1)\n Ss[le_sec] = Is[le_sec] = self.star.atmosphere._compute_source_function(Ts2)\n \n return chis, Ss, Is", "title": "" }, { "docid": "16ecf5b6ad2a46f93b6a79ce75c77524", "score": "0.43955338", "text": "def point_to_point(\n sys, timepts, x0=0, u0=0, xf=0, uf=0, T0=0, cost=None, basis=None,\n trajectory_constraints=None, initial_guess=None, params=None, **kwargs):\n #\n # Make sure the problem is one that we can handle\n #\n x0 = _check_convert_array(x0, [(sys.nstates,), (sys.nstates, 1)],\n 'Initial state: ', squeeze=True)\n u0 = _check_convert_array(u0, [(sys.ninputs,), (sys.ninputs, 1)],\n 'Initial input: ', squeeze=True)\n xf = _check_convert_array(xf, [(sys.nstates,), (sys.nstates, 1)],\n 'Final state: ', squeeze=True)\n uf = _check_convert_array(uf, [(sys.ninputs,), (sys.ninputs, 1)],\n 'Final input: ', squeeze=True)\n\n # Process final time\n timepts = np.atleast_1d(timepts)\n Tf = timepts[-1]\n T0 = timepts[0] if len(timepts) > 1 else T0\n\n # Process keyword arguments\n if trajectory_constraints is None:\n # Backwards compatibility\n trajectory_constraints = kwargs.pop('constraints', None)\n\n minimize_kwargs = {}\n minimize_kwargs['method'] = kwargs.pop('minimize_method', None)\n minimize_kwargs['options'] = kwargs.pop('minimize_options', {})\n minimize_kwargs.update(kwargs.pop('minimize_kwargs', {}))\n\n if kwargs:\n raise TypeError(\"unrecognized keywords: \", str(kwargs))\n\n #\n # Determine the basis function set to use and make sure it is big enough\n #\n\n # If no basis set was specified, use a polynomial basis (poor choice...)\n if basis is None:\n basis = PolyFamily(2 * (sys.nstates + sys.ninputs))\n\n # If a multivariable basis was given, make sure the size is correct\n if basis.nvars is not None and basis.nvars != sys.ninputs:\n raise ValueError(\"size of basis does not match flat system size\")\n\n # Make sure we have enough basis functions to solve the problem\n ncoefs = sum([basis.var_ncoefs(i) for i in range(sys.ninputs)])\n if ncoefs < 2 * (sys.nstates + sys.ninputs):\n raise ValueError(\"basis set is too small\")\n elif (cost is not None or trajectory_constraints is not None) and \\\n ncoefs == 2 * (sys.nstates + sys.ninputs):\n warnings.warn(\"minimal basis specified; optimization not possible\")\n cost = None\n trajectory_constraints = None\n\n # Figure out the parameters to use, if any\n params = sys.params if params is None else params\n\n #\n # Map the initial and final conditions to flat output conditions\n #\n # We need to compute the output \"flag\": [z(t), z'(t), z''(t), ...]\n # and then evaluate this at the initial and final condition.\n #\n\n zflag_T0 = sys.forward(x0, u0, params)\n zflag_Tf = sys.forward(xf, uf, params)\n\n #\n # Compute the matrix constraints for initial and final conditions\n #\n # This computation depends on the basis function we are using. It\n # essentially amounts to evaluating the basis functions and their\n # derivatives at the initial and final conditions.\n\n # Compute the flags for the initial and final states\n M_T0 = _basis_flag_matrix(sys, basis, zflag_T0, T0)\n M_Tf = _basis_flag_matrix(sys, basis, zflag_Tf, Tf)\n\n # Stack the initial and final matrix/flag for the point to point problem\n M = np.vstack([M_T0, M_Tf])\n Z = np.hstack([np.hstack(zflag_T0), np.hstack(zflag_Tf)])\n\n #\n # Solve for the coefficients of the flat outputs\n #\n # At this point, we need to solve the equation M alpha = zflag, where M\n # is the matrix constraints for initial and final conditions and zflag =\n # [zflag_T0; zflag_tf].\n #\n # If there are no constraints, then we just need to solve a linear\n # system of equations => use least squares. Otherwise, we have a\n # nonlinear optimal control problem with equality constraints => use\n # scipy.optimize.minimize().\n #\n\n # Start by solving the least squares problem\n # TODO: add warning if rank is too small\n alpha, residuals, rank, s = np.linalg.lstsq(M, Z, rcond=None)\n if rank < Z.size:\n warnings.warn(\"basis too small; solution may not exist\")\n\n if cost is not None or trajectory_constraints is not None:\n # Make sure that we have enough timepoints to evaluate\n if timepts.size < 3:\n raise ControlArgument(\n \"There must be at least three time points if trajectory\"\n \" cost or constraints are specified\")\n\n # Search over the null space to minimize cost/satisfy constraints\n N = sp.linalg.null_space(M)\n\n # Precompute the collocation matrix the defines the flag at timepts\n Mt_list = []\n for t in timepts[1:-1]:\n Mt_list.append(_basis_flag_matrix(sys, basis, zflag_T0, t))\n\n # Define a function to evaluate the cost along a trajectory\n def traj_cost(null_coeffs):\n # Add this to the existing solution\n coeffs = alpha + N @ null_coeffs\n\n # Evaluate the costs at the listed time points\n costval = 0\n for i, t in enumerate(timepts[1:-1]):\n M_t = Mt_list[i]\n\n # Compute flag at this time point\n zflag = (M_t @ coeffs).reshape(sys.ninputs, -1)\n\n # Find states and inputs at the time points\n x, u = sys.reverse(zflag, params)\n\n # Evaluate the cost at this time point\n costval += cost(x, u) * (timepts[i+1] - timepts[i])\n return costval\n\n # If no cost given, override with magnitude of the coefficients\n if cost is None:\n traj_cost = lambda coeffs: coeffs @ coeffs\n\n # Process the constraints we were given\n traj_constraints = trajectory_constraints\n if traj_constraints is None:\n traj_constraints = []\n elif isinstance(traj_constraints, tuple):\n # TODO: Check to make sure this is really a constraint\n traj_constraints = [traj_constraints]\n elif not isinstance(traj_constraints, list):\n raise TypeError(\"trajectory constraints must be a list\")\n\n # Process constraints\n minimize_constraints = []\n if len(traj_constraints) > 0:\n # Set up a nonlinear function to evaluate the constraints\n def traj_const(null_coeffs):\n # Add this to the existing solution\n coeffs = alpha + N @ null_coeffs\n\n # Evaluate the constraints at the listed time points\n values = []\n for i, t in enumerate(timepts[1:-1]):\n # Calculate the states and inputs for the flat output\n M_t = Mt_list[i]\n\n # Compute flag at this time point\n zflag = (M_t @ coeffs).reshape(sys.ninputs, -1)\n\n # Find states and inputs at the time points\n states, inputs = sys.reverse(zflag, params)\n\n # Evaluate the constraint function along the trajectory\n for type, fun, lb, ub in traj_constraints:\n if type == sp.optimize.LinearConstraint:\n # `fun` is A matrix associated with polytope...\n values.append(fun @ np.hstack([states, inputs]))\n elif type == sp.optimize.NonlinearConstraint:\n values.append(fun(states, inputs))\n else:\n raise TypeError(\n \"unknown constraint type %s\" % type)\n return np.array(values).flatten()\n\n # Store upper and lower bounds\n const_lb, const_ub = [], []\n for t in timepts[1:-1]:\n for type, fun, lb, ub in traj_constraints:\n const_lb.append(lb)\n const_ub.append(ub)\n const_lb = np.array(const_lb).flatten()\n const_ub = np.array(const_ub).flatten()\n\n # Store the constraint as a nonlinear constraint\n minimize_constraints = [sp.optimize.NonlinearConstraint(\n traj_const, const_lb, const_ub)]\n\n # Process the initial condition\n if initial_guess is None:\n initial_guess = np.zeros(M.shape[1] - M.shape[0])\n else:\n raise NotImplementedError(\"Initial guess not yet implemented.\")\n\n # Find the optimal solution\n res = sp.optimize.minimize(\n traj_cost, initial_guess, constraints=minimize_constraints,\n **minimize_kwargs)\n alpha += N @ res.x\n\n # See if we got an answer\n if not res.success:\n warnings.warn(\n \"unable to solve optimal control problem\\n\"\n f\"scipy.optimize.minimize: '{res.message}'\", UserWarning)\n\n #\n # Transform the trajectory from flat outputs to states and inputs\n #\n\n # Create a trajectory object to store the result\n systraj = SystemTrajectory(sys, basis, params=params)\n if cost is not None or trajectory_constraints is not None:\n # Store the result of the optimization\n systraj.cost = res.fun\n systraj.success = res.success\n systraj.message = res.message\n\n # Store the flag lengths and coefficients\n # TODO: make this more pythonic\n coef_off = 0\n for i in range(sys.ninputs):\n # Grab the coefficients corresponding to this flat output\n coef_len = basis.var_ncoefs(i)\n systraj.coeffs.append(alpha[coef_off:coef_off + coef_len])\n coef_off += coef_len\n\n # Keep track of the length of the flat flag for this output\n systraj.flaglen.append(len(zflag_T0[i]))\n\n # Return a function that computes inputs and states as a function of time\n return systraj", "title": "" }, { "docid": "5735789ef59b12806064773b7a8c8a23", "score": "0.43941176", "text": "def date_preproc(df, params=None):\n\n\n feats = pd.DataFrame(df['Id'])\n \n \n # Extract the first measurement from each station\n for station in range(52):\n station_cols = [col for col in df.columns if 'S'+str(station) in col]\n feats['Station'+str(station)+'StartTime'] = df[station_cols].min(axis=1).values\n \n date_cols = [col for col in df.columns if col not in ['Id']]\n \n # Generate various date based features\n feats['FirstTime'] = get_first_non_null(df[date_cols])\n feats['LastTime'] = get_last_non_null(df[date_cols])\n feats['StartTime'] = df[date_cols].min(axis=1).values\n feats['EndTime'] = df[date_cols].max(axis=1).values\n feats['Duration'] = feats['EndTime'] - feats['StartTime']\n feats['NATimes'] = np.isnan(df[date_cols]).sum(axis=1)\n\n # A time increment of 0.1 likely corresponds to 1 hr\n feats['StartTimeOfDay'] = (feats['StartTime'].multiply(10)) % 24\n feats['StartTimeOfWeek'] = (feats['StartTime'].multiply(10)) % 168\n feats['EndTimeOfDay'] = (feats['EndTime'].multiply(10)) % 24\n feats['EndTimeOfWeek'] = (feats['EndTime'].multiply(10)) % 168\n \n date_cols = [col for col in df.columns if 'L3' in col]\n \n # Extracts the same statistics for Line 3 specifically\n feats['L3FirstTime'] = get_first_non_null(df[date_cols])\n feats['L3LastTime'] = get_last_non_null(df[date_cols])\n feats['L3StartTime'] = df[date_cols].min(axis=1).values\n feats['L3EndTime'] = df[date_cols].max(axis=1).values\n feats['L3Duration'] = feats['EndTime'] - feats['StartTime']\n feats['L3NATimes'] = np.isnan(df[date_cols]).sum(axis=1)\n feats['L3StartTimeOfDay'] = (feats['StartTime'].multiply(10)) % 24\n feats['L3StartTimeOfWeek'] = (feats['StartTime'].multiply(10)) % 168\n feats['L3EndTimeOfDay'] = (feats['EndTime'].multiply(10)) % 24\n feats['L3EndTimeOfWeek'] = (feats['EndTime'].multiply(10)) % 168\n \n del df\n gc.collect()\n \n return feats", "title": "" }, { "docid": "c87612ea22aaf393934617beb3c9e239", "score": "0.4392335", "text": "def prepare_q4(rdd):\n def midpoint(x):\n \"\"\"Midpoint: lambda x: (x[0] + (x[1] - x[0]) /2).hour) ,\n where x = (start, end)\n \"\"\"\n start = string_to_time(get(x, 'Trip Start Timestamp'))\n end = string_to_time(get(x, 'Trip End Timestamp'))\n return start, end, (start + (start - end) / 2).hour\n\n def avg_speed(x):\n try:\n return ((get(x, 'Trip Miles') /\n (get(x, 'Trip Seconds'))) * 60) * 60\n except ZeroDivisionError:\n return 0\n\n Prepared = namedtuple(\n 'Prepared',\n ['fare', 'tips', 'avg_speed', 'start', 'end', 'midpoint', 'miles'])\n return rdd.map(lambda x: Prepared(get(x, 'Fare'), get(\n x, 'Tips'), avg_speed(x), *midpoint(x), get(x, 'Trip Miles')))", "title": "" }, { "docid": "679a953b2f6fe0bc07953a0c8a4faa34", "score": "0.43897444", "text": "def test_empty_dict_to_live_points(empty_live_point):\n np.testing.assert_array_equal(\n empty_live_point, lp.dict_to_live_points({\"x\": [], \"y\": [], \"z\": []})\n )", "title": "" }, { "docid": "77d59fbd39faea858394d169e7bd6656", "score": "0.43892932", "text": "def wx_for_location():\n lat = float(request.args['lat'])\n lon = float(request.args['lon'])\n\n if lat > 90 or lat < -90 or lon > 180 or lon < -180:\n abort(400)\n\n requested_metrics = request.args.getlist('metrics', int)\n\n if requested_metrics:\n metric_ids = set(requested_metrics)\n else:\n metric_ids = Metric.query.with_entities(Metric.id)\n\n now = datetime.now(pytz.UTC)\n start = request.args.get('start', type=int)\n end = request.args.get('end', type=int)\n\n if start is None:\n start = now - timedelta(hours=1)\n else:\n start = datetime.utcfromtimestamp(start).replace(tzinfo=pytz.UTC)\n\n if not app.debug:\n if start < now - timedelta(days=1):\n start = now - timedelta(days=1)\n\n if end is None:\n end = now + timedelta(hours=12)\n else:\n end = datetime.utcfromtimestamp(end).replace(tzinfo=pytz.UTC)\n\n if not app.debug:\n if end > now + timedelta(days=7):\n end = now + timedelta(days=7)\n\n requested_source_fields = SourceField.query.filter(\n SourceField.metric_id.in_(metric_ids),\n SourceField.projection_id != None, # noqa: E711\n ).all()\n\n with tracing.start_span(\"load_data_points\") as span:\n span.set_attribute(\"start\", str(start))\n span.set_attribute(\"end\", str(end))\n span.set_attribute(\"source_fields\", str(requested_source_fields))\n data_points = load_data_points((lat, lon), start, end, requested_source_fields)\n\n # valid time -> data points\n datas = collections.defaultdict(list)\n\n for dp in data_points:\n datas[datetime2unix(dp.valid_time)].append({\n 'run_time': datetime2unix(dp.run_time),\n 'src_field_id': dp.source_field_id,\n 'value': dp.median(),\n 'raw_values': dp.values,\n })\n\n wx = {\n 'data': datas,\n 'ordered_times': sorted(datas.keys()),\n }\n\n return jsonify(wx)", "title": "" }, { "docid": "88499f1b04133221543d01b3c6f91973", "score": "0.43851417", "text": "def _doCreateGeom(self, instancing):\n\n pass", "title": "" }, { "docid": "1e4c85aac5d69bd08a56de094c98ffc1", "score": "0.43736938", "text": "def test_point_location_easy(self):\n rate_grid1 = RateGrid(self.limits, [POINT_SOURCE],\n area_discretisation=4.0)\n self.assertTupleEqual(\n rate_grid1._get_point_location(Point(14.95, 14.95)),\n (0, 0))\n self.assertTupleEqual(\n rate_grid1._get_point_location(Point(14.95, 15.05)),\n (0, 1))\n self.assertTupleEqual(\n rate_grid1._get_point_location(Point(15.05, 14.95)),\n (1, 0))\n self.assertTupleEqual(\n rate_grid1._get_point_location(Point(15.05, 15.05)),\n (1, 1))", "title": "" }, { "docid": "b77f030ca86d8b164074c7f18615a022", "score": "0.4372981", "text": "def _setup_data_points(self) -> None:\n self._data_points_canada_wildfire = \\\n plot.get_data_points_wild_fires(self._data.wild_fires, 'Canada')\n self._data_points_america_wildfire = \\\n plot.get_data_points_wild_fires(self._data.wild_fires, 'America')\n self._data_points_canada_carbon = \\\n plot.get_data_points_carbon(self._data.carbon_emissions, 0)\n self._data_points_america_carbon = \\\n plot.get_data_points_carbon(self._data.carbon_emissions, 1)\n self._data_points_temp_deviance = \\\n plot.get_data_points_temp(self._data.temperature_deviation)", "title": "" }, { "docid": "be1ea4385536d84fed3b1aea96715415", "score": "0.43725216", "text": "def _setup(self) -> None:\n self.measure_points = [Vec3(point) for point in self.measure_points]\n dimlineray = ConstructionRay(self.dimlinepos, angle=radians(self.angle))\n self.dimline_points = [\n self._get_point_on_dimline(point, dimlineray)\n for point in self.measure_points\n ]\n self.point_order = self._indices_of_sorted_points(self.dimline_points)\n self._build_vectors()", "title": "" }, { "docid": "548a2b5600f6f9ef81d3c4cf8bca8900", "score": "0.43696374", "text": "def parse_system_provenance_items(self, label, x, y, p, provenance_data):\n (tx_overflow, cb_overload, dma_overload, user_overload, tic_overruns,\n tic_overrun_max) = provenance_data[:self.N_SYSTEM_PROVENANCE_WORDS]\n\n # save provenance data items\n with ProvenanceWriter() as db:\n db.insert_core(\n x, y, p, self._TIMES_TRANSMISSION_SPIKES_OVERRAN, tx_overflow)\n if tx_overflow != 0:\n db.insert_report(\n f\"The transmission buffer for {label} was blocked on \"\n f\"{tx_overflow} occasions. \"\n f\" This is often a sign that the system is experiencing \"\n f\"back pressure from the communication fabric. \"\n \"Please either: \"\n \"1. spread the load over more cores, \"\n \"2. reduce your peak transmission load, or \"\n \"3. adjust your mapping algorithm.\")\n\n db.insert_core(\n x, y, p, self._TIMES_CALLBACK_QUEUE_OVERLOADED, cb_overload)\n if cb_overload != 0:\n db.insert_report(\n f\"The callback queue for {label} overloaded on \"\n f\"{cb_overload} occasions. \"\n f\"This is often a sign that the system is running \"\n \"too quickly for the number of neurons per core. \"\n \"Please increase the machine time step or \"\n \"time_scale_factor \"\n \"or decrease the number of neurons per core.\")\n\n db.insert_core(\n x, y, p, self._TIMES_DMA_QUEUE_OVERLOADED, dma_overload)\n if dma_overload != 0:\n db.insert_report(\n f\"The DMA queue for {label} overloaded on {dma_overload} \"\n \"occasions. \"\n \"This is often a sign that the system is running \"\n \"too quickly for the number of neurons per core. \"\n \"Please increase the machine time step or \"\n \"time_scale_factor \"\n \"or decrease the number of neurons per core.\")\n\n db.insert_core(\n x, y, p, self._TIMES_USER_QUEUE_OVERLOADED, user_overload)\n if user_overload != 0:\n db.insert_report(\n f\"The USER queue for {label} overloaded on \"\n f\"{user_overload} occasions. \"\n f\"This is often a sign that the system is running too \"\n f\"quickly for the number of neurons per core. Please \"\n f\"increase the machine time step or time_scale_factor \"\n \"or decrease the number of neurons per core.\")\n\n db.insert_core(\n x, y, p, self._TIMER_TICK_OVERRUN, tic_overruns)\n if tic_overruns != 0:\n db.insert_report(\n f\"A Timer tick callback in {label} was still executing \"\n f\"when the next timer tick callback was fired off \"\n f\"{tic_overruns} times. \"\n f\"This is a sign of the system being overloaded and \"\n f\"therefore the results are likely incorrect. Please \"\n f\"increase the machine time step or time_scale_factor \"\n f\"or decrease the number of neurons per core\")\n\n db.insert_core(\n x, y, p, self._MAX_TIMER_TICK_OVERRUN, tic_overrun_max)\n if tic_overrun_max > 0:\n db.insert_report(\n f\"The timer for {label} fell behind by up to \"\n f\"{tic_overrun_max} ticks. This is a sign of the system \"\n f\"being overloaded and therefore the results are likely \"\n f\"incorrect. Please increase the machine time step or \"\n f\"time_scale_factor \"\n f\"or decrease the number of neurons per core\")", "title": "" }, { "docid": "ea52bb5bc16dd4cdfa6646e524b26267", "score": "0.4369035", "text": "def _resolve_input_parameters(self, spiketrains):\n def get_n_bins():\n n_bins = (self._t_stop - self._t_start) / self._bin_size\n if isinstance(n_bins, pq.Quantity):\n n_bins = n_bins.simplified.item()\n n_bins = round_binning_errors(n_bins, tolerance=self.tolerance)\n return n_bins\n\n def check_n_bins_consistency():\n if self.n_bins != get_n_bins():\n raise ValueError(\n \"Inconsistent arguments: t_start ({t_start}), \"\n \"t_stop ({t_stop}), bin_size ({bin_size}), and \"\n \"n_bins ({n_bins})\".format(\n t_start=self.t_start, t_stop=self.t_stop,\n bin_size=self.bin_size, n_bins=self.n_bins))\n\n def check_consistency():\n if self.t_start >= self.t_stop:\n raise ValueError(\"t_start must be smaller than t_stop\")\n if not isinstance(self.n_bins, int) or self.n_bins <= 0:\n raise TypeError(\"The number of bins ({}) must be a positive \"\n \"integer\".format(self.n_bins))\n\n if not _check_neo_spiketrain(spiketrains):\n # a binned numpy matrix\n self.__resolve_binned(spiketrains)\n self.units = self._bin_size.units\n check_n_bins_consistency()\n check_consistency()\n self._t_start = self._t_start.rescale(self.units).item()\n self._t_stop = self._t_stop.rescale(self.units).item()\n self._bin_size = self._bin_size.rescale(self.units).item()\n return\n\n if self._bin_size is None and self.n_bins is None:\n raise ValueError(\"Either 'bin_size' or 'n_bins' must be given\")\n\n try:\n check_neo_consistency(spiketrains,\n object_type=neo.SpikeTrain,\n t_start=self._t_start,\n t_stop=self._t_stop,\n tolerance=self.tolerance)\n except ValueError as er:\n # different t_start/t_stop\n raise ValueError(er, \"If you want to bin over the shared \"\n \"[t_start, t_stop] interval, provide \"\n \"shared t_start and t_stop explicitly, \"\n \"which can be obtained like so: \"\n \"t_start, t_stop = elephant.utils.\"\n \"get_common_start_stop_times(spiketrains)\"\n )\n\n if self._t_start is None:\n self._t_start = spiketrains[0].t_start\n if self._t_stop is None:\n self._t_stop = spiketrains[0].t_stop\n # At this point, all spiketrains share the same units.\n self.units = spiketrains[0].units\n\n # t_start and t_stop are checked to be time quantities in the\n # check_neo_consistency call.\n self._t_start = self._t_start.rescale(self.units).item()\n self._t_stop = self._t_stop.rescale(self.units).item()\n\n start_shared = max(st.t_start.rescale(self.units).item()\n for st in spiketrains)\n stop_shared = min(st.t_stop.rescale(self.units).item()\n for st in spiketrains)\n\n tolerance = self.tolerance\n if tolerance is None:\n tolerance = 0\n if self._t_start < start_shared - tolerance \\\n or self._t_stop > stop_shared + tolerance:\n raise ValueError(\"'t_start' ({t_start}) or 't_stop' ({t_stop}) is \"\n \"outside of the shared [{start_shared}, \"\n \"{stop_shared}] interval\".format(\n t_start=self.t_start, t_stop=self.t_stop,\n start_shared=start_shared,\n stop_shared=stop_shared))\n\n if self.n_bins is None:\n # bin_size is provided\n self._bin_size = self._bin_size.rescale(self.units).item()\n self.n_bins = get_n_bins()\n elif self._bin_size is None:\n # n_bins is provided\n self._bin_size = (self._t_stop - self._t_start) / self.n_bins\n else:\n # both n_bins are bin_size are given\n self._bin_size = self._bin_size.rescale(self.units).item()\n check_n_bins_consistency()\n\n check_consistency()", "title": "" }, { "docid": "4b919c59e9c64104ff6b572260ee2d89", "score": "0.43676943", "text": "def _clean_pts_violations():\n\n filename = conf['temp_data_dir'] + \"/*Panda_Extract_STW_*.csv\"\n list_of_files = glob.glob(filename)\n latest_file = max(list_of_files, key=os.path.getmtime)\n logging.info(f\"Reading in {latest_file}\")\n\n ptsv = pd.read_csv(latest_file,names=['INSP_ID',\n 'ASSESSOR_PARCEL_10',\n 'LATITUDE',\n 'LONGITUDE',\n 'STREET_ADDRESS',\n 'INSP_TYPE_ID',\n 'INSP_TYPE_NM',\n 'INSP_RESULT_ID',\n 'INSP_RESULT_NM',\n 'PERFORMED_END_DT',\n 'PROJ_TITLE',\n 'SCOPE',\n 'LOCATION_NOTE',\n 'CONSTRUCTION_NOTE'\n ],dtype={'LONGITUDE':np.float64,\n 'LATITUDE':np.float64,\n 'PARCEL_APN':str\n })\n\n ptsv['PARCEL_APN'] = ptsv.ASSESSOR_PARCEL_10\n ptsv['LON'] = ptsv.LONGITUDE\n ptsv['LAT'] = ptsv.LATITUDE\n ptsv['SRC'] = 'DSD_PTS'\n ptsv['TYPE'] = ptsv.INSP_TYPE_NM\n ptsv['STATUS'] = ptsv.INSP_RESULT_NM\n ptsv['UUID'] = (ptsv['SRC'] + '_' + ptsv['INSP_ID'].astype(str).str.replace('-', '_')).str.lower()\n ptsv['ADDRESS'] = ptsv['STREET_ADDRESS'].astype(str)\n ptsv['ISSUE_DATE'] = ptsv['PERFORMED_END_DT']\n ptsv['VIOLATOR'] = ptsv['PROJ_TITLE']\n ptsv['ADDITIONAL_1'] = ptsv['SCOPE']\n ptsv['ADDITIONAL_2'] = ptsv['CONSTRUCTION_NOTE']\n ptsv['COMPLY_BY'] = ''\n\n\n ptsv = ptsv[['UUID',\n 'SRC',\n 'TYPE',\n 'STATUS',\n 'ISSUE_DATE',\n 'COMPLY_BY',\n 'PARCEL_APN',\n 'LON',\n 'LAT',\n 'ADDRESS',\n 'VIOLATOR',\n 'ADDITIONAL_1',\n 'ADDITIONAL_2']]\n\n return ptsv", "title": "" }, { "docid": "6e38def1b9e4cb6259bef1c5b0550c7d", "score": "0.4367231", "text": "def __init__(self, p, **kw):\n _p = p\n if not isinstance(p, point.Point):\n _p = point.Point(p)\n super(VCLine, self).__init__(**kw)\n self.__keypoint = _p\n _p.storeUser(self)\n _p.connect('moved', self.__movePoint)\n _p.connect('change_pending', self.__pointChangePending)\n _p.connect('change_complete', self.__pointChangeComplete)", "title": "" }, { "docid": "e300c3d8c8b26c4665bce3a4595e419d", "score": "0.43652317", "text": "def __init__(\n self,\n geometry_type='POINT',\n srid=4326,\n dimension=2,\n spatial_index=True,\n management=False\n ):\n super().__init__(geometry_type, srid, dimension, spatial_index, management)", "title": "" }, { "docid": "cf7ae0068f0f4e9cabcae76b9c4f6fd4", "score": "0.4364095", "text": "def unparse_point_sources(roi,point_sources,emin,emax,**kwargs):\n point_dict = {}\n for ps in point_sources:\n\n name = str(ps.name)\n point_dict[name]={'TS value':'%g' % roi.TS(which=ps,**kwargs)}\n point_dict[name].update(unparse_spectral(ps.model,scaling=False))\n if ps.model.has_errors():\n point_dict[name]['Flux']='%g +/- %g' % ps.model.i_flux(emin,emax,cgs=True,two_sided=False,error=True)\n else:\n point_dict[name]['Flux']='%g' % ps.model.i_flux(emin,emax,cgs=True,two_sided=False,error=False)\n return point_dict", "title": "" }, { "docid": "9f29a0c51abf8f90434d7a66efdbee1b", "score": "0.43583968", "text": "def _generate_properties(self, schedule):\n self.departed = True if schedule[0][\"depart_time_actual\"] else False\n self.arrived = True if schedule[-1][\"arrival_time_actual\"] else False\n \n self.num_stations = len(self.schedule)\n self.start_station_name = schedule[0][\"station_name\"]\n self.end_station_name = schedule[-1][\"station_name\"]\n self.current_station = self._get_current_train_location(schedule)\n self.current_station_name = self.current_station[\"station_name\"]", "title": "" }, { "docid": "e907fff1c4084f1f9e3970f0b2112430", "score": "0.43577218", "text": "def _compute_transform(self, data, cur_time):\n raise NotImplemented", "title": "" }, { "docid": "c60ba00d5696b58a13b043b2adc39aad", "score": "0.43482926", "text": "def test_point_label_assignment(self):\n\n nucleus_points = [26.15, 26.29, 26.46, 26.6201]\n\n phones_df = DataFrame(\n [\n (26.130000, \"h\", 25.8701),\n (26.160000, \"E\", 26.1301),\n (26.270000, \"_6\", 26.1601),\n (26.360000, \"k\", 26.2701),\n (26.420000, \"l\", 26.3601),\n (26.490000, \"E:\", 26.4201),\n (26.540000, \"_6\", 26.4901),\n (26.620000, \"t\", 26.5401),\n (26.740000, \"@\", 26.6201),\n ],\n columns=[\"end\", \"label\", \"start_est\"],\n )\n\n words_df = DataFrame(\n [\n (25.870000, \"bewirken\", 25.3501),\n (26.130000, \"[h]\", 25.8701),\n (26.740000, \"erklärte\", 26.1301),\n (27.460000, \"Außenminister\", 26.7401),\n ],\n columns=[\"end\", \"label\", \"start_est\"],\n )\n\n tones_df = DataFrame(\n [(25.87, \"<P>\", 22.0), (28.5, \"%\", 25.8701)],\n columns=[\"time\", \"label\", \"start_est\"],\n )\n\n accents_df = DataFrame(\n [\n (26.515000, \"LH*L\"),\n (27.623440, \"L*H\"),\n (27.995732, \"H*L\"),\n (28.682547, \"!H*L\"),\n ],\n columns=[\"time\", \"label\"],\n )\n\n assigned_df = find_syllable_nuclei.assign_points_labels(\n nucleus_points,\n phones=phones_df,\n words=words_df,\n tones=tones_df,\n accents=accents_df,\n )\n\n self.assertTrue(len(assigned_df) == 4)\n self.assertTrue(list(assigned_df[\"phone\"]) == [\"E\", np.nan, \"E:\", \"@\"])\n self.assertTrue(\n list(assigned_df[\"word\"])\n == [\"erklärte\", \"erklärte\", \"erklärte\", \"erklärte\"]\n )", "title": "" }, { "docid": "c148b0e5f3a7c325199e0c019e4eb63b", "score": "0.4347449", "text": "def setUp(self):\n\n per = Performance.objects.all()\n pla = Place.objects.all()\n from itertools import product\n\n for i, ii in product((0, 1), (0, 1)):\n Schedule.objects.create(\n performance=per[i],\n place=pla[ii],\n showtime=tz.localtime(tz.now()) + tz.timedelta(i)\n )", "title": "" }, { "docid": "075657d8d63064da44c10aa4ea6d9740", "score": "0.4339734", "text": "def __init__(self, assaySeriesDict):\n\n\t\tself.assaySeriesDict = assaySeriesDict\n\t\t# self.initialOffset = self.calculateInitialOffset()\n\t\t# self.otherOffsets = self.calculateOtherOffsets()\n\t\tself.assaySchedules = self.scheduleRiffle()", "title": "" }, { "docid": "dd6e00cf8494d0fbe3be0b44f7e348f4", "score": "0.43391156", "text": "def __init__(self):\n self.pose_sub = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n self.base_wp_sub = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)\n self.traffic_lights_sub = rospy.Subscriber('/vehicle/traffic_lights',\n TrafficLightArray,\n self.gt_traffic_cb)\n self.traffic_wp_sub = rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n\n self.lookahead_wps = rospy.get_param('~lookahead_wps', 200)\n self.max_decel = abs(rospy.get_param('~max_deceleration', 1.0))\n self.use_ground_truth = rospy.get_param('~use_ground_truth', False)\n self.dist_threshold = rospy.get_param('~dist_threshold', 2.0)\n self.stopping_dist = rospy.get_param('~stopping_dist', 4.0)\n self.nb_stopping_wp = rospy.get_param('~nb_stopping_wp', 0)\n self.cruise_velocity = rospy.get_param('~cruise_velocity', 4.47)\n self.update_period = rospy.Duration(rospy.get_param('~update_period', 0.2))\n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n self.lane = Lane()\n self.last_lane = Lane()\n\n self.traffic_lights = []\n\n self.state = WaypointUpdater.ACCELERATING\n\n self.traffic_light_wp_index = -1\n\n self.commanded_velocity = 0\n\n self.currpose = None\n self.curr_waypoints = None\n self.start_idx = 0\n\n self.last_time = rospy.Time(0)", "title": "" }, { "docid": "0abf512efe697a75b2a4fc8941baadff", "score": "0.43345532", "text": "def test_check_spatial_interp_method_good(self):\n\n interp.check_spatial_interp_method(interp.SPLINE_INTERP_METHOD)", "title": "" }, { "docid": "3227afd93c833fad61e842bd0eb665b9", "score": "0.43334472", "text": "def compute_apriori_ground_points(network, sensors):\n for point_id, group in network.groupby('id'):\n # Free points are type 2 for V2 and V5 control networks\n if group.iloc[0][\"pointType\"] != 2:\n continue\n positions = []\n look_vecs = []\n for measure_id, row in group.iterrows():\n measure = csmapi.ImageCoord(row[\"line\"], row[\"sample\"])\n locus = sensors[row[\"serialnumber\"]].imageToRemoteImagingLocus(measure)\n positions.append([locus.point.x, locus.point.y, locus.point.z])\n look_vecs.append([locus.direction.x, locus.direction.y, locus.direction.z])\n ground_pt, covar_mat = closest_approach(np.array(positions), np.array(look_vecs))\n covar_vec = [covar_mat[0,0], covar_mat[0,1], covar_mat[0,2],\n covar_mat[1,1], covar_mat[1,2], covar_mat[2,2]]\n network.loc[network.id == point_id, [\"aprioriX\", \"aprioriY\", \"aprioriZ\"]] = ground_pt\n network.loc[network.id == point_id, [\"adjustedX\", \"adjustedY\", \"adjustedZ\"]] = ground_pt\n network.loc[network.id == point_id, [\"adjustedX\", \"adjustedY\", \"adjustedZ\"]] = ground_pt\n # We have to do a separate loop to assign a list to a single cell\n for measure_id, row in group.iterrows():\n network.at[measure_id, 'aprioriCovar'] = covar_vec\n return network", "title": "" }, { "docid": "d5c4574725f00931ec69fe38e9916457", "score": "0.43218344", "text": "def _normalise(self, point):\n\n\t\traise NotImplementedError('Derived must overload.')", "title": "" }, { "docid": "5dc88b1b8fef59a41b35d48d56d23e90", "score": "0.43217802", "text": "def prepare_prediction(raw_data_points,\n para_selector, ps_threshold, text_length,\n ner_tagger,\n timer):\n\n graph_log = [0,0,0] # [total nodes, total connections, number of graphs]\n point_usage_log = [0,0] # [used points, unused points]\n\n ids = []\n queries = []\n contexts = []\n graphs = []\n sent_lengths = []\n\n raw_data_points = [raw_data_points] if type(raw_data_points)!=list else raw_data_points\n for i, point in enumerate(raw_data_points):\n\n \"\"\" DATA PROCESSING \"\"\"\n # make a list[ list[str, list[str]] ] for each point in the batch\n context = para_selector.make_context(point,\n threshold=ps_threshold,\n context_length=text_length)\n timer.again(\"ParagraphSelector_prediction\")\n graph = EntityGraph.EntityGraph(context,\n context_length=text_length,\n tagger=ner_tagger)\n timer.again(\"EntityGraph_construction\")\n\n if graph.graph:\n ids.append(point[0])\n queries.append(point[2])\n contexts.append(context)\n graphs.append(graph)\n sent_lengths.append(utils.sentence_lengths(context, tokenizer))\n graph_log = [a + b # [total nodes, total connections, number of graphs]\n for a, b in\n zip(graph_log, [len(graph.graph),\n len(graph.relation_triplets()),\n 1])]\n point_usage_log[0] += 1\n else: # if the NER in EntityGraph doesn't find entities, the datapoint is useless.\n point_usage_log[1] += 1\n\n # update the batch to exclude useless data points\n if not ids:\n print(\"In eval_dfgn.prepare_prediction(): No usable data points; return value: None\")\n return None, None, None, None, None, \\\n timer, graph_log, point_usage_log\n else:\n return ids, queries, contexts, graphs, sent_lengths,\\\n timer, graph_log, point_usage_log", "title": "" }, { "docid": "1b374059c230f59ea26bbfd3f6515ad2", "score": "0.43124992", "text": "def _coerce_datapoints(self, datapoints_manager, datapoints_data):\n datapoints = []\n for data in datapoints_data:\n data['at'] = self._parse_datetime(data['at'])\n datapoint = datapoints_manager._coerce_datapoint(data)\n datapoints.append(datapoint)\n return datapoints", "title": "" }, { "docid": "c2a350975d8dd385e6af8293deb0e081", "score": "0.43124923", "text": "def engine_run():\n\n all_users = models.User.objects()\n\n # Get all schedulable users in a list\n schedulable_users = []\n\n for user in all_users:\n\n np_arr = user.to_np_arr()\n\n if np_arr is not None and user.can_schedule:\n candidate = Employee(np_arr,\n typecode=\"010\",\n pid=user.pid)\n schedulable_users.append(candidate)\n\n # get all locations that require scheduling\n sm = ScheduleManager()\n\n # iterate over the locations\n all_locations = models.Location.objects()\n\n for loc in all_locations:\n l = Location(\n typecode=1,\n scalarWeight=2,\n requirements=loc.to_np_arr())\n l.name = loc.code\n sm.add_location(l)\n\n for candidate in schedulable_users:\n sm.add_candidate(candidate)\n\n sm.run_schedule()\n\n loc_return_list = []\n\n for s in sm.locations:\n loc_return_list.append({\n \"schedule\": models.global_np_to_json_dict(s.schedule.astype(int)),\n \"code\": s.name\n })\n\n new_schedule = models.Schedule()\n new_schedule.created_on = datetime.datetime.utcnow()\n new_schedule.data = loc_return_list\n new_schedule.sid = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(4))\n new_schedule.save()\n\n return jsonify(loc_return_list)", "title": "" }, { "docid": "fc2fa4f55f8c54f3ecd14e62ee0a5520", "score": "0.4312429", "text": "def precalculate():\n \n pass", "title": "" }, { "docid": "ea4da241af5135351e1c3b1bb7abc6e3", "score": "0.43069625", "text": "def place_points():\n\n arcpy.CreateRoutes_lr(\"ssGravityMain\", \"FACILITYID\", routes)\n arcpy.MakeFeatureLayer_management(routes, \"Routes\")\n arcpy.MakeRouteEventLayer_lr(\"Routes\", \"FACILITYID\", event_table, \"AssetName POINT AtDistance\", \"RouteEvents\")\n arcpy.SpatialJoin_analysis(\"RouteEvents\", \"ssGravityMain\", pacp_observations_temp, \"JOIN_ONE_TO_ONE\", \"KEEP_ALL\")\n arcpy.MakeFeatureLayer_management(pacp_observations_temp, \"Connections\")", "title": "" }, { "docid": "185a7afa155cf3619d49c8f1efa7b77b", "score": "0.43063617", "text": "def init_start(self):\n\n self.model = self.dsparams()\n self.endpoint = npa(self.model.model.offset)[:self.model.model.dim/2]\n self.dT = self.model.model.dT\n\n cntl_dt = 1.0 / float(self.rateInt)\n self.tscale = cntl_dt / self.dT\n\n rospy.loginfo(\"model dt: %s cntl dt: %s tscale : %s\" % (self.dT, cntl_dt, self.tscale))\n rospy.loginfo(\"Dim %d\" % self.model.model.dim)\n rospy.loginfo(\"Using endpoint %s\" % str(self.endpoint))\n\n self.x = [self.sdata.x, self.sdata.y, self.sdata.z]\n self.newx = self.x", "title": "" }, { "docid": "233558db7f3c8ac97f4d16b2af39c827", "score": "0.42979124", "text": "def analyze(self, pt):\n x = self.xvar\n if pt is not None:\n self.localize(pt)\n varsPP = self.varsPP\n # expect only one fixed point for the linear system!\n vlim = 100\n xlim = 1\n i = 1\n fp_coords = None\n while i < 10:\n try:\n fp_coords = find_fixedpoints(self.lin, eps=self.tol,\n subdomain={'v': [-vlim, vlim],\n x: [-xlim, xlim]})[0]\n except IndexError:\n vlim += 200\n xlim += 2\n i += 1\n else:\n break\n if fp_coords is None:\n raise ValueError(\"No linear system fixed points found!\")\n else:\n fp = fixedpoint_2D(self.lin, Point(fp_coords), coords=varsPP, jac=self.jac_fn,\n eps=self.tol/10.)\n\n self.fp = fp\n\n eval_fast_ix = argmax(abs(fp.evals))\n eval_slow_ix = argmin(abs(fp.evals))\n self.eval_fast = fp.evals[eval_fast_ix]\n self.evec_fast = fp.evecs[eval_fast_ix]\n self.eval_slow = fp.evals[eval_slow_ix]\n self.evec_slow = fp.evecs[eval_slow_ix]\n\n # angles associated with eigenvectors\n alpha = abs(arctan2( self.evec_slow[x], self.evec_slow['v'] ))\n alphap = abs(arctan2( self.evec_fast[x], self.evec_fast['v'] ))\n self.s = sin(alphap) / sin(alpha+alphap)\n self.s2 = sin(alphap + arctan(self.lin.pars['D%sinf' % self.xvar])) / sin(alpha+alphap)\n self.alpha = alpha\n self.alphap = alphap\n\n # angles associated with nullclines\n self.theta = arctan(self.lin.pars['D%sinf' % self.xvar])\n self.gamma = arctan2(1, self.lin.pars['psi%s' % self.xvar])\n self.phi = self.gamma - self.theta", "title": "" }, { "docid": "838cd40205005a9777917a11ee7e45ae", "score": "0.42960775", "text": "def _validate_inputs(self):\r\n # Validate input numerical values\r\n if self.chunk_size < 1:\r\n err = \"Chunk size must be greater than 0.\"\r\n arcpy.AddError(err)\r\n raise ValueError(err)\r\n if self.max_processes < 1:\r\n err = \"Maximum allowed parallel processes must be greater than 0.\"\r\n arcpy.AddError(err)\r\n raise ValueError(err)\r\n if self.cutoff not in [\"\", None] and self.cutoff <= 0:\r\n err = \"Impedance cutoff must be greater than 0.\"\r\n arcpy.AddError(err)\r\n raise ValueError(err)\r\n if self.time_increment <= 0:\r\n err = \"The time increment must be greater than 0.\"\r\n arcpy.AddError(err)\r\n raise ValueError(err)\r\n\r\n # Validate origins, destinations, and barriers\r\n self._validate_input_feature_class(self.origins)\r\n self._validate_input_feature_class(self.destinations)\r\n self._validate_weight_field()\r\n for barrier_fc in self.barriers:\r\n self._validate_input_feature_class(barrier_fc)\r\n # If the barriers are layers, convert them to catalog paths so we can pass them to the subprocess\r\n self.barriers = [AnalysisHelpers.get_catalog_path(barrier_fc) for barrier_fc in self.barriers]\r\n\r\n # Validate network\r\n if not self.is_service and not arcpy.Exists(self.network_data_source):\r\n err = f\"Input network dataset {self.network_data_source} does not exist.\"\r\n arcpy.AddError(err)\r\n raise ValueError(err)\r\n if not self.is_service:\r\n # Try to check out the Network Analyst extension\r\n try:\r\n arcpy.CheckOutExtension(\"network\")\r\n except Exception as ex:\r\n err = \"Unable to check out Network Analyst extension license.\"\r\n arcpy.AddError(err)\r\n raise RuntimeError(err) from ex\r\n # If the network dataset is a layer, convert it to a catalog path so we can pass it to the subprocess\r\n self.network_data_source = AnalysisHelpers.get_catalog_path(self.network_data_source)\r\n\r\n # Validate OD Cost Matrix settings and convert travel mode to a JSON string\r\n self.travel_mode = self._validate_od_settings()\r\n\r\n # For a services solve, get tool limits and validate max processes and chunk size\r\n if self.is_service:\r\n self._get_tool_limits_and_is_agol()\r\n if self.is_agol and self.max_processes > AnalysisHelpers.MAX_AGOL_PROCESSES:\r\n arcpy.AddWarning((\r\n f\"The specified maximum number of parallel processes, {self.max_processes}, exceeds the limit of \"\r\n f\"{AnalysisHelpers.MAX_AGOL_PROCESSES} allowed when using as the network data source the ArcGIS \"\r\n \"Online services or a hybrid portal whose network analysis services fall back to the ArcGIS Online \"\r\n \"services. The maximum number of parallel processes has been reduced to \"\r\n f\"{AnalysisHelpers.MAX_AGOL_PROCESSES}.\"))\r\n self.max_processes = AnalysisHelpers.MAX_AGOL_PROCESSES\r\n self._update_max_inputs_for_service()\r\n if self.should_precalc_network_locations:\r\n arcpy.AddWarning(\r\n \"Cannot precalculate network location fields when the network data source is a service.\")\r\n self.should_precalc_network_locations = False", "title": "" } ]
e361c790d3f65e1e27c73dc6e33652cd
Save a dataset to an HDF5 file
[ { "docid": "539658809dd9ab621f6380de089c0177", "score": "0.84975827", "text": "def save_dataset(dataset, outfile):\n import h5py\n f = h5py.File(outfile, 'w')\n for key in dataset.keys():\n f.create_dataset(key, data=dataset[key])\n f.close()", "title": "" } ]
[ { "docid": "ffd982c0c98aa42c210018a9eaf184eb", "score": "0.82145315", "text": "def saveToHdf5(data, dataset_name, save_path, filename):\n hf = h5py.File(os.path.join(save_path, filename), \"w\")\n hf.create_dataset(dataset_name, data=data, dtype=np.uint16)\n hf.close()", "title": "" }, { "docid": "673fbb6590a0d5b4184593bff5bde9b5", "score": "0.8005646", "text": "def SaveHDF5(self, dict):\n data_set = self.data_set\n data_type = self.data_type\n file_index = self.file_index\n bds_output_dirname = self.bds_output_dirname\n\n dirname = bds_output_dirname\n if file_index is not None:\n basename = '%s_%s_%d.hdf5' % (data_set, data_type, file_index)\n else:\n basename = '%s_%s.hdf5' % (data_set, data_type)\n path = dirname + '/' + basename\n\n f = h5py.File(path, 'w')\n for grp_name in dict:\n grp = f.create_group(grp_name)\n for dset_name in dict[grp_name]:\n dset = grp.create_dataset(dset_name, data = dict[grp_name][dset_name])\n f.close()", "title": "" }, { "docid": "bf70f7204b2e57eec614a7100ae71c41", "score": "0.7952305", "text": "def save_hdf_dataset(dataset, fname, verbose=True):\n\n if splitext(fname)[1] == '.hdf5':\n pass\n else:\n fname += '.hdf5'\n\n with h5py.File(fname, 'w') as hdf:\n\n # set main attributes:\n hdf.attrs.create('redshift', dataset.redshift)\n hdf.attrs.create('velspan', dataset.velspan)\n if hasattr(dataset, 'name'):\n hdf.attrs.create('name', dataset.name)\n else:\n hdf.attrs.create('name', '')\n if hasattr(dataset, 'verbose'):\n hdf.attrs.create('verbose', dataset.verbose)\n else:\n hdf.attrs.create('verbose', True)\n\n # .data:\n data = hdf.create_group('data')\n for num, chunk in enumerate(dataset.data):\n spec = data.create_group('spec%i' % (num+1))\n spec.attrs.create('res', chunk['res'])\n spec.attrs.create('norm', chunk['norm'])\n spec.create_dataset('wl', data=chunk['wl'])\n spec.create_dataset('flux', data=chunk['flux'])\n spec.create_dataset('error', data=chunk['error'])\n\n # .regions:\n hdf_regions = hdf.create_group('regions')\n for num, reg in enumerate(dataset.regions):\n reg_group = hdf_regions.create_group('region%i' % (num+1))\n reg_group.attrs.create('velspan', reg.velspan)\n reg_group.attrs.create('res', reg.res)\n reg_group.attrs.create('normalized', reg.normalized)\n reg_group.attrs.create('cont_err', reg.cont_err)\n reg_group.attrs.create('new_mask', reg.new_mask)\n reg_group.attrs.create('specID', reg.specID)\n reg_group.attrs.create('kernel_fwhm', reg.kernel_fwhm)\n reg_group.attrs.create('label', reg.label)\n reg_group.create_dataset('kernel', data=reg.kernel)\n reg_group.create_dataset('wl', data=reg.wl)\n reg_group.create_dataset('flux', data=reg.flux)\n reg_group.create_dataset('mask', data=reg.mask)\n reg_group.create_dataset('error', data=reg.err)\n lines = reg_group.create_group('lines')\n for line in reg.lines:\n lines.create_group(line.tag)\n lines[line.tag].attrs.create('active', line.active)\n\n # .molecules:\n molecules = hdf.create_group('molecules')\n if hasattr(dataset, 'molecules'):\n for molecule, items in dataset.molecules.items():\n pre_array = [tuple(item) for item in items]\n band_data = np.array(pre_array,\n dtype=[('band', 'S8'), ('Jmax', 'i4')])\n molecules.create_dataset(molecule, data=band_data)\n\n # .components:\n components = hdf.create_group('components')\n for ion, comps in dataset.components.items():\n ion_group = components.create_group(ion)\n if len(comps) > 0:\n for cnum, comp in enumerate(comps):\n comp_group = ion_group.create_group(\"comp%i\" % (cnum+1))\n comp_group.create_dataset('z', data=comp[0])\n comp_group.create_dataset('b', data=comp[1])\n comp_group.create_dataset('logN', data=comp[2])\n for varname in ['z', 'b', 'N']:\n if varname == 'N':\n tie_constraint = comp[3]['tie_%s' % varname]\n tie_constraint = 'None' if tie_constraint is None else tie_constraint\n comp_group['logN'].attrs.create('tie_%s' % varname, tie_constraint)\n comp_group['logN'].attrs.create('var_%s' % varname, comp[3]['var_%s' % varname])\n else:\n tie_constraint = comp[3]['tie_%s' % varname]\n tie_constraint = 'None' if tie_constraint is None else tie_constraint\n comp_group[varname].attrs.create('tie_%s' % varname, tie_constraint)\n comp_group[varname].attrs.create('var_%s' % varname, comp[3]['var_%s' % varname])\n\n # .best_fit:\n if dataset.best_fit is not None:\n p_opt = dataset.best_fit\n best_fit = hdf.create_group('best_fit')\n for ion, comps in dataset.components.items():\n params = best_fit.create_group(ion)\n for n in range(len(comps)):\n param_group = params.create_group(\"comp%i\" % (n+1))\n param_group.create_dataset('z', data=p_opt['z%i_%s' % (n, ion)].value)\n param_group.create_dataset('b', data=p_opt['b%i_%s' % (n, ion)].value)\n param_group.create_dataset('logN', data=p_opt['logN%i_%s' % (n, ion)].value)\n\n param_group['z'].attrs.create('error', p_opt['z%i_%s' % (n, ion)].stderr)\n param_group['b'].attrs.create('error', p_opt['b%i_%s' % (n, ion)].stderr)\n param_group['logN'].attrs.create('error', p_opt['logN%i_%s' % (n, ion)].stderr)\n\n if verbose:\n print \"Successfully saved the dataset to file: \" + fname", "title": "" }, { "docid": "1be5a2892a1d2a09a1043cf95f3e266c", "score": "0.7865239", "text": "def save_h5(self, filename):\n self.save_hdf5(filename)", "title": "" }, { "docid": "37a4cd9eacd10002e4e1abf4959d3a79", "score": "0.78230304", "text": "def save_HDF5(hf_file, d_key, data, d_type=\"\"):\n if (d_type == 'json'):\n data = json.dumps(data)\n hf_file.create_dataset(d_key, data=data)", "title": "" }, { "docid": "ed02c102212aeb8374ebd338e227e3a3", "score": "0.7764018", "text": "def write_hdf5(data, labels, output_filename):\n\n x = data.astype(numpy.float32)\n y = labels.astype(numpy.float32)\n\n with h5py.File(output_filename, 'w') as h:\n h.create_dataset('data', data=x, shape=x.shape)\n h.create_dataset('label', data=y, shape=y.shape)\n # h.create_dataset()", "title": "" }, { "docid": "4acceb11237b92bb027cbc6919a53148", "score": "0.7742062", "text": "def writeH5(path, data, label):\r\n with h5py.File(path, 'w') as hf:\r\n hf.create_dataset('data', data=data, dtype='float64')\r\n hf.create_dataset('label', data=label)", "title": "" }, { "docid": "2f9fa8f5880e5e19361531267010fa05", "score": "0.77207375", "text": "def saveh(dset, path):\r\n\tf = h5py.File(path, 'w')\r\n\tf['dset'] = dset\r\n\tf.close()", "title": "" }, { "docid": "50a6e953aa3b841919c6cae053675589", "score": "0.77132845", "text": "def save(filename, data):\n\n f = h5py.File(filename, 'w')\n\n for key in data:\n f.create_dataset(key, data[key].shape, dtype=data[key].dtype)[...] = data[key]\n\n f.close()", "title": "" }, { "docid": "9550c595ebd4b131df67b996cf2f150d", "score": "0.7711351", "text": "def write_hdf5(data, labels, output_filename):\n\n\tx = data.astype(np.float32)\n\ty = labels.astype(np.float32)\n\n\twith h5py.File(output_filename, 'w') as h:\n\t\th.create_dataset('data', data=x, shape=x.shape)\n\t\th.create_dataset('target', data=y, shape=y.shape)\n\t\t# h.create_dataset()", "title": "" }, { "docid": "4933ab720a56af49abff712afe4c00e9", "score": "0.7698196", "text": "def saveh5(iteration, outputdir, edict):\n fname = os.path.join(outputdir, str(iteration)+\".hdf5\")\n with h5py.File(fname, \"w\") as f:\n for k,v in edict.items():\n dset=f.create_dataset(k, data=v)", "title": "" }, { "docid": "c673e410021853aa3bb4f8edd2edf5cd", "score": "0.7648026", "text": "def test_to_hdf5(self):\n self.save_and_check(\"hdf5\")", "title": "" }, { "docid": "c673e410021853aa3bb4f8edd2edf5cd", "score": "0.7648026", "text": "def test_to_hdf5(self):\n self.save_and_check(\"hdf5\")", "title": "" }, { "docid": "1ca497051fa20ddd1cb68c68251795d7", "score": "0.7629075", "text": "def save_hdf5(self, filename):\n self.__save_dict_to_hdf5__(self.to_dict(), filename)", "title": "" }, { "docid": "b7d986bbfbca2e5452a9a72cccf7dae2", "score": "0.76003057", "text": "def save_hdf5(tensor, weights, filename):\n tensor = tensor.cpu()\n ndarr = tensor.float().numpy()\n with h5py.File(filename, 'w') as f:\n f.create_dataset('data', data=ndarr, compression=\"gzip\")\n\tf.create_dataset('weights', data=weights.cpu().float().numpy(), compression='gzip')", "title": "" }, { "docid": "693320f8733237568c1172aa12015cfd", "score": "0.7584176", "text": "def save_data_hdf5(filename, data_path, data):\n with h5py.File(filename, 'a') as h5f:\n h5f.create_dataset(data_path, data=data, compression='gzip')\n return", "title": "" }, { "docid": "925b9ed5913c327a4364e090b7282d3e", "score": "0.7540416", "text": "def save_h5(data, filename):\n data.save('./data_cache/' + filename + '.h5')\n return", "title": "" }, { "docid": "61a693fdbcd1d17d4beec0444b8ac52b", "score": "0.75164205", "text": "def writeHDF5(image_path, dataset_path, array):\n\n with h5py.File(image_path, 'w') as f:\n dset = f.create_dataset(dataset_path, array.shape, array.dtype)\n dset[...] = array", "title": "" }, { "docid": "a0bf47d0e9f28c6ada1dc87646c49f9c", "score": "0.7480678", "text": "def save(filename, group):\n\n hdf5.save(filename, group)", "title": "" }, { "docid": "3d352afdb269c0422b8a0aa3f56e8fdb", "score": "0.74769944", "text": "def write_h5(h5dat, day, dset, dset_name):\n try: # check if dataset exists, if yes: ask if user wants to overwrite. If no, create it\n h5dat.create_dataset(str(day) + '/' + dset_name,\n data=dset, compression='gzip')\n except:\n # if we want to overwrite: delete old dataset and then re-create with\n # new data\n del h5dat[str(day) + '/' + dset_name]\n h5dat.create_dataset(str(day) + '/' + dset_name,\n data=dset, compression='gzip')", "title": "" }, { "docid": "dffeb47e820b785c8bdba341cff6e464", "score": "0.7430235", "text": "def save_h5(dict_to_save, filename):\n\n with h5py.File(filename, 'w') as f:\n for key in dict_to_save:\n f.create_dataset(key, data=dict_to_save[key])", "title": "" }, { "docid": "648da152253f95afa602bbbc4787408a", "score": "0.74204284", "text": "def write_dataset(self, path, data):\n self._h5file.create_dataset(path, data=data)", "title": "" }, { "docid": "d6f45f2f66946d1801a34db8776cf96b", "score": "0.7395943", "text": "def save(self, filename=None, verbose=False):\n if not filename:\n if self.name:\n filename = self.name\n else:\n print(\" [ERROR] - Must specify dataset.name [dataset.set_name('name')]\")\n print(\" or give filename [dataset.save(filename='filename')]\")\n hdf5_save.save_hdf_dataset(self, filename, verbose=verbose)", "title": "" }, { "docid": "f1b562f4e046ef7aa2f6aba9799bd85a", "score": "0.73618114", "text": "def saveh5(self,idx=0):\n\n filename = self.filename+'_'+str(idx)\n filenameh5=pyu.getlong(filename+'.h5',pstruc['DIRR3D'])\n\n\n\n # try/except to avoid loosing the h5 file if\n # read/write error\n try:\n f=h5py.File(filenameh5,'w')\n # keys not saved as attribute of h5py file\n notattr = ['I','B','B0','delays','dis']\n for a in self.__dict__.keys():\n if a not in notattr:\n f.attrs[a]=getattr(self,a)\n\n for k in self.keys():\n f.create_group(str(k))\n for kk in self[k].keys():\n if kk == 'sig2d':\n # Need to find an efficient way to save the signatures\n # 2d which have created the rays\n pass\n elif kk == 'nbrays':\n f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))\n else:\n f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])\n f.close()\n except:\n f.close()\n raise NameError('Rays: issue when writting h5py file')\n print(filenameh5)", "title": "" }, { "docid": "8fe56e0ce9a1e82b49273603c2caaf4d", "score": "0.73564285", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n k = []\n nodes = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n k.append(element.k)\n nodes.append([nid if nid is not None else 0 for nid in element.nodes])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('K', data=k)\n h5_file.create_dataset('nodes', data=nodes)", "title": "" }, { "docid": "dba51acdc3c3d974caca3c974e6b0294", "score": "0.73061275", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)", "title": "" }, { "docid": "c8f3bc66f02fd734a7d2804e8630f542", "score": "0.7272632", "text": "def save_hdf5(tensor, filename):\n tensor = tensor.cpu()\n # 这句效果跟下面的代码一样\n ndarr = tensor.mul(0.5).add(0.5).mul(255).byte().numpy() # .transpose(0,2).transpose(0,1).numpy()\n # ndarr = tensor.add(1).mul(0.5).mul(255).byte().numpy() # .transpose(0,2).transpose(0,1).numpy()\n with h5py.File(filename, 'w') as f:\n f.create_dataset('data', data=ndarr, dtype=\"i8\", compression=\"gzip\")", "title": "" }, { "docid": "ae78e3cdef0234737769722bff3231f0", "score": "0.726901", "text": "def save_step(outpaths, dsname, ds, elsize, axlab):\n\n try:\n h5path = outpaths[dsname]\n except KeyError:\n pass\n else:\n h5_write(ds, ds.shape, ds.dtype,\n h5path,\n element_size_um=elsize, axislabels=axlab)", "title": "" }, { "docid": "8cd135bebebdc418a22627ad0a4fd393", "score": "0.7236347", "text": "def write_hdf5(filename, array, attrs, fillvalue=None,\n dsetname=DEFAULT_DSETNAME):\n f = h5py.File(filename, 'w')\n dset = f.create_dataset(dsetname, data=array, fillvalue=fillvalue,\n **DEFAULT_DSETPARAMS)\n write_attrs(dset, attrs)\n f.close()", "title": "" }, { "docid": "899e56dc7a452bdc590ddc231df21290", "score": "0.7231322", "text": "def save_data_file(data, labels, affine=None, h5_file='data_file.h5'):\n f = tables.open_file(\n h5_file if h5_file.endswith('.h5') else f'{h5_file}.h5', 'w'\n )\n\n f.create_carray('/', 'data', obj=data)\n f.create_carray('/', 'truth', obj=labels)\n if affine is not None:\n f.create_carray('/', 'affine', obj=affine)\n f.close()", "title": "" }, { "docid": "382bcce2fe8df95381139b23f6e8bc43", "score": "0.7229086", "text": "def saveh5(filepath, mdict, mode='w'):\n\n f = h5py.File(filepath, mode)\n\n _create_group_dataset(f, mdict)\n\n f.close()\n return 0", "title": "" }, { "docid": "78cf949bb6ebde848b697ca79a36bcf7", "score": "0.72216475", "text": "def save(self, filename):\n with h5py.File(filename, 'w') as f:\n f.create_dataset('position', self.position.shape, data=self.position)\n f.create_dataset('nhat', self.nhat.shape, data=self.nhat)\n f.create_dataset('alpha', (1,), data=self.alpha)\n f.create_dataset('aperture', (1,), data=self.aperture)\n \n srange = np.linspace(self.spectrum[0], self.spectrum[1], self.spectrum[2])\n f.create_dataset('spectrum', srange.shape, data=srange)", "title": "" }, { "docid": "9eb8f505348344e6266647b508fd1168", "score": "0.7220386", "text": "def save(self, filename):\n with h5py.File(filename, 'x') as hf:\n dset_data = hf.create_dataset('emission_measure', data=self.as_array())\n dset_data.attrs['unit'] = self[0].meta['bunit']\n dset_temperature_bin_edges = hf.create_dataset(\n 'temperature_bin_edges', data=self.temperature_bin_edges.value)\n dset_temperature_bin_edges.attrs['unit'] = self.temperature_bin_edges.unit.to_string()\n meta_group = hf.create_group('meta')\n for key in self[0].meta:\n # Try-except because HDF5 cannot serialize some stuff, .e.g. dictionaries\n try:\n meta_group.attrs[key] = self[0].meta[key]\n except TypeError:\n warnings.warn(f'Could not save metadata for entry {key}')", "title": "" }, { "docid": "a37506640377bdbb4c305b4f3fe5fe8c", "score": "0.7215861", "text": "def save_data(data_fname, data):\n root, ext = os.path.splitext(data_fname)\n output_filename = data_fname\n hdf5_ext = '.hdf5'\n if ext.lower() != hdf5_ext:\n output_filename += hdf5_ext\n with h5py.File(output_filename, 'w') as fidout:\n fidout.create_dataset(os.path.basename(data_fname), data=data)\n gc.collect()", "title": "" }, { "docid": "750518d2f1f14ca957cce6de60cebd0b", "score": "0.72056836", "text": "def save_hdf5(self, filename):\n self._update_waterfall()\n self.waterfall.write_to_hdf5(filename)", "title": "" }, { "docid": "3c6c0683f720673113858a7505019ac7", "score": "0.71954876", "text": "def save(self, filepath):\n f = h5py.File(filepath, \"w\")\n f.create_dataset(\"trial_type\", data=\"RankDocket\")\n f.create_dataset(\"stimulus_set\", data=self.stimulus_set)\n f.create_dataset(\"n_select\", data=self.n_select)\n f.create_dataset(\"is_ranked\", data=self.is_ranked)\n f.close()", "title": "" }, { "docid": "e727ed525562820f57c180989ec04491", "score": "0.7194704", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n k = []\n ge = []\n s = []\n nodes = []\n components = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n k.append(element.k)\n ge.append(element.ge)\n s.append(element.s)\n nodes.append([nid if nid is not None else 0 for nid in element.nodes])\n components.append([element.c1, element.c2])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('K', data=k)\n h5_file.create_dataset('ge', data=ge)\n h5_file.create_dataset('s', data=s)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('components', data=components)", "title": "" }, { "docid": "7b0ce67cb8caf1443050f60e6c2fea33", "score": "0.7190684", "text": "def _save(mfractal, z, t, directory, title, res):\n with h5py.File(f\"data/{directory}/{title}_{res}.hdf5\", 'w') as hf:\n hf.create_dataset(\"time\", data=t)\n hf.create_dataset(\"mfractal\", data=mfractal)\n hf.create_dataset(\"z\", data=z)", "title": "" }, { "docid": "d4fe377963afacc674811f56fedfccb9", "score": "0.71757954", "text": "def save_hdf(self):\n pass", "title": "" }, { "docid": "616b79b1c90ee75c9352e032ce95749e", "score": "0.715004", "text": "def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n components = []\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append([nid if nid is not None else 0 for nid in element.nodes])\n components.append([element.c1, element.c2])\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('pid', data=pids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('components', data=components)", "title": "" }, { "docid": "3d65c4ddb11d52f83264f0efc57d6fea", "score": "0.7072501", "text": "def _saveh5(self,filenameh5,grpname):\n\n filenameh5=pyu.getlong(filenameh5,pstruc['DIRLNK'])\n # try/except to avoid loosing the h5 file if\n # read/write error\n #try:\n\n fh5=h5py.File(filenameh5,'a')\n if self.is3D:\n if not grpname in fh5['ray'].keys():\n fh5['ray'].create_group(grpname)\n else :\n print('ray/'+grpname +'already exists in '+filenameh5)\n f = fh5['ray/'+grpname]\n\n\n else:\n if not grpname in fh5['ray2'].keys():\n fh5['ray2'].create_group(grpname)\n else :\n print('ray2/'+grpname +'already exists in '+filenameh5)\n f = fh5['ray2/'+grpname]\n # keys not saved as attribute of h5py file\n notattr = ['I','B','B0','dis']\n for a in self.__dict__.keys():\n if a not in notattr:\n if type(a)==str:\n a.encode('utf-8')\n # print(\" \",a)\n f.attrs[a] = getattr(self,a)\n\n for k in self.keys():\n f.create_group(str(k))\n for kk in self[k].keys():\n if kk == 'sig2d':\n # Need to find an efficient way to save the signatures\n # 2d which have created the rays\n pass\n elif kk == 'nbrays':\n f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))\n else:\n f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])\n fh5.close()\n #except:\n # fh5.close()\n # raise NameError('Rays: issue when writting h5py file')", "title": "" }, { "docid": "c690355e4b447b4b9726539490a3695b", "score": "0.7070762", "text": "def test_hdf5_writer():\n mod = s3d.Ses3d('europe', os.path.join(TEST_DATA_DIR, 'japan'), components=['rho', 'vp', 'vsh', 'vsv'])\n mod.read()\n\n mod.write_to_hdf5()", "title": "" }, { "docid": "4dfe5f7a5d01d214a0330dc4935d7ef6", "score": "0.70649046", "text": "def saving_into_h5(saving_dir, data_dict, classification_dataset):\n\n def save_each_set(handle, name, data):\n single_set = handle.create_group(name)\n if classification_dataset:\n single_set.create_dataset(\"labels\", data=data[\"labels\"].astype(int))\n single_set.create_dataset(\"X\", data=data[\"X\"].astype(np.float32))\n if name in [\"val\", \"test\"]:\n single_set.create_dataset(\"X_hat\", data=data[\"X_hat\"].astype(np.float32))\n single_set.create_dataset(\n \"missing_mask\", data=data[\"missing_mask\"].astype(np.float32)\n )\n single_set.create_dataset(\n \"indicating_mask\", data=data[\"indicating_mask\"].astype(np.float32)\n )\n\n saving_path = os.path.join(saving_dir, \"datasets.h5\")\n with h5py.File(saving_path, \"w\") as hf:\n hf.create_dataset(\n \"empirical_mean_for_GRUD\",\n data=data_dict[\"train\"][\"empirical_mean_for_GRUD\"],\n )\n save_each_set(hf, \"train\", data_dict[\"train\"])\n save_each_set(hf, \"val\", data_dict[\"val\"])\n save_each_set(hf, \"test\", data_dict[\"test\"])", "title": "" }, { "docid": "e5c88b6e6d05add6aba8c963116248c4", "score": "0.70620584", "text": "def save_dataset(dataset: TablutDataset, path: Optional[str] = './'):\n with open(os.path.join(path, f'{dataset.name}.pkl'), 'wb') as f:\n pickle.dump(dataset, f)", "title": "" }, { "docid": "5e0e45bac7aa21042da8d4033962d07d", "score": "0.70519066", "text": "def to_hdf(self, outfile, suffix=\"\"):\n # Allow either a string or h5 file object ot be passed.\n is_path = False\n if isinstance(outfile, str):\n outfile = h5py.File(outfile, 'w')\n is_path = True\n\n # If no error is provided\n if self.serr is None:\n serr = np.empty_like(self.s)\n serr.fill(np.nan)\n else:\n serr = self.serr\n\n outfile.create_dataset('s' + suffix, data=self.s)\n outfile.create_dataset('serr' + suffix, data=serr)\n outfile.create_dataset('w' + suffix, data=self.w)\n outfile.create_dataset('mask' + suffix, data=self.mask)\n\n outfile.attrs['name' + suffix] = self.name\n outfile.attrs['header' + suffix] = str(self.header)\n\n for key in self.attrs.keys():\n outfile.attrs[key + suffix] = self.attrs[key]\n\n if is_path:\n outfile.close()", "title": "" }, { "docid": "51bf011d1e4e44b0eac2e5d881d9c58c", "score": "0.704029", "text": "def save_h5py(path: str, value: Any):\n # Need to go via a buffer for some filesystems...\n buf = io.BytesIO()\n h5 = h5py.File(buf, 'w')\n for p, v in tree.flatten_with_path(value):\n if v is not None:\n h5.create_dataset('/'.join(map(str, p)), data=np.array(v))\n h5.close()\n\n with epath.Path(path).open('wb') as f:\n f.write(buf.getvalue())", "title": "" }, { "docid": "37799e9f3139ba6b2e3eb38fbc4c7e00", "score": "0.7034918", "text": "def save(self, filepath):\n f = h5py.File(filepath, \"w\")\n f.create_dataset(\"trial_type\", data=\"RankObservations\")\n f.create_dataset(\"stimulus_set\", data=self.stimulus_set)\n f.create_dataset(\"n_select\", data=self.n_select)\n f.create_dataset(\"is_ranked\", data=self.is_ranked)\n f.create_dataset(\"group_id\", data=self.group_id)\n f.create_dataset(\"agent_id\", data=self.agent_id)\n f.create_dataset(\"session_id\", data=self.session_id)\n f.create_dataset(\"weight\", data=self.weight)\n f.create_dataset(\"rt_ms\", data=self.rt_ms)\n f.close()", "title": "" }, { "docid": "9416de616afe46132088ee2cbc00ed63", "score": "0.7030358", "text": "def save_hdf5(network, filepath):\n network = extract_network(network)\n data = save_dict(network)\n\n with h5py.File(filepath, mode='w') as f:\n layer_names = []\n\n for layer in data['layers']:\n layer_name = layer['name']\n layer_group = f.create_group(layer_name)\n\n for attrname, attrvalue in layer.items():\n if attrname != 'parameters':\n layer_group.attrs[attrname] = json.dumps(\n attrvalue, default=repr)\n\n for param_name, param in layer['parameters'].items():\n dataset = layer_group.create_dataset(\n param_name, data=param['value'])\n\n dataset.attrs['trainable'] = param['trainable']\n\n layer_names.append(layer_name)\n\n f.attrs['metadata'] = json.dumps(data['metadata'])\n f.attrs['graph'] = json.dumps(data['graph'])\n f.attrs['layer_names'] = json.dumps(layer_names)", "title": "" }, { "docid": "454dcc6bd3f4e664b1b3f6b24a0b898b", "score": "0.70006645", "text": "def write_dataset(hdf5_file, path, data, overwrite=False):\n with h5py.File(hdf5_file, mode=\"a\") as f:\n if (path in f) and (not overwrite):\n raise RuntimeError(\"Path {} exists, overwrite?\".format(path))\n elif overwrite:\n del f[path]\n f[path] = data\n f.flush()", "title": "" }, { "docid": "666259697e06db28a1c6cab57bca0645", "score": "0.69875336", "text": "def test_save_data_to_hdf5_3(tmp_path):\n fln = \"test.h5\"\n fpath = os.path.join(tmp_path, fln)\n data, metadata = _prepare_raw_dataset(N=5, M=10, K=4096)\n\n save_data_to_hdf5(fpath, data)\n data_loaded, metadata_loaded = read_data_from_hdf5(fpath)\n\n metadata_keys = [\n \"file_type\",\n \"file_format\",\n \"file_format_version\",\n \"file_created_time\",\n \"file_software\",\n \"file_software_version\",\n ]\n for k in metadata_keys:\n assert k in metadata_loaded", "title": "" }, { "docid": "384f10480acb33f0f5f3c58be036b2cc", "score": "0.69786227", "text": "def save(self, path):\n self.save_default_folders_to_measurements()\n self.measurements.flush()\n #\n # Note: shutil.copy and similar don't seem to work under Windows.\n # I suspect that there's some file mapping magic that's\n # causing problems because I found some internet postings\n # where people tried to copy database files and failed.\n # If you're thinking, \"He didn't close the file\", I did.\n # shutil.copy creates a truncated file if you use it.\n #\n hdf5src = self.measurements.hdf5_dict.hdf5_file\n hdf5dest = h5py.File(path, mode=\"w\")\n for key in hdf5src:\n obj = hdf5src[key]\n if isinstance(obj, h5py.Dataset):\n hdf5dest[key] = obj[()]\n else:\n hdf5src.copy(hdf5src[key], hdf5dest, key)\n for key in hdf5src.attrs:\n hdf5dest.attrs[key] = hdf5src.attrs[key]\n hdf5dest.close()", "title": "" }, { "docid": "cbc9201299a8bb40f974e0906760ea00", "score": "0.69763184", "text": "def _write_full_file(self, data, filename):\n with h5py.File(filename, 'w', libver='latest') as f:\n dset = f.create_dataset('array', shape=(data.shape),\n data = data,\n compression='gzip', \n compression_opts=9\n )\n f.close()", "title": "" }, { "docid": "acb78b6f3e86f7efa46dd7b7d7e8ef92", "score": "0.6949269", "text": "def test_save_data_to_hdf5_4(tmp_path):\n fln = \"test.h5\"\n fpath = os.path.join(tmp_path, fln)\n data, metadata = _prepare_raw_dataset(N=5, M=10, K=4096)\n\n application = \"Testing Application\"\n version = \"v4.5.6\"\n\n metadata = {\"file_software\": application, \"file_software_version\": version}\n\n save_data_to_hdf5(fpath, data, metadata=metadata)\n data_loaded, metadata_loaded = read_data_from_hdf5(fpath)\n\n assert metadata_loaded[\"file_software\"] == application\n assert metadata_loaded[\"file_software_version\"] == version", "title": "" }, { "docid": "99c293a720ae12a31ade70f43ee9ed76", "score": "0.6928311", "text": "def save_data(h5file, ddict):\n logging.debug('Saving data to the file...')\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for k, v in ddict.items():\n h5file.create_array('/', k, v)\n h5file.flush()", "title": "" }, { "docid": "0785602fb74a9f095e27d79849033fae", "score": "0.6922566", "text": "def save_bs_hdf5(bs, filename):\n if '.h5' not in filename:\n filename += '.h5'\n\n # Step 1 - Create hdf5 and hierarchy tree/grp\n hf = h5py.File(filename, 'w')\n\n grp_obs = hf.create_group('obs')\n grp_matrix = hf.create_group('matrix')\n grp_info = hf.create_group('infos')\n grp_mask = hf.create_group('mask')\n grp_hdr = hf.create_group('hdr')\n\n # Step 2 - Save observable (not in tree matrix, infos and mask).\n for key in bs:\n if key not in ['matrix', 'mask', 'infos']:\n grp_obs.create_dataset(key, data=bs[key])\n\n # Step 3 - Save matrix (contains all individual observable\n # frame by frame and statistics matrix (covariance, variance, etc.)).\n matrix = bs.matrix\n for mat in matrix:\n if matrix[mat] is not None:\n grp_matrix.create_dataset(mat, data=matrix[mat])\n else:\n grp_matrix.create_dataset(mat, data=[0])\n\n # Step 4 - Save mask (contains all mask informations) as\n # well as u1, v1, u2, v2 coordinates (for the CP).\n mask = bs.mask\n for key in mask:\n if (mask[key] is not None) and (key != 't3_coord'):\n grp_mask.create_dataset(key, data=mask[key])\n t3_coord = mask['t3_coord']\n for key in t3_coord:\n grp_mask.create_dataset(key, data=t3_coord[key])\n\n # Step 5 - Save informations (target, date, etc.).\n infos = bs.infos\n for i in infos:\n if i != 'hdr':\n grp_info.create_dataset(i, data=infos[i])\n\n # Step 6 - Save original header keywords.\n hdr = bs.infos.hdr\n for key in hdr:\n grp_hdr.create_dataset(key, data=hdr[key])\n\n # Last step - close hdf5\n hf.close()\n return None", "title": "" }, { "docid": "939de1aded08a9bc6ddf9f6a0d53b817", "score": "0.69216025", "text": "def write_data(databases, attributes, filename):\n\tprint('writinng data to experiments/{}.h5 ...'.format(filename))\n\tif not os.path.exists('experiments/'):\n\t\tos.makedirs('experiments/')\n\n\thf = h5py.File('experiments/'+filename+'.h5', \"w\")\n\n\t# Write the attributes to the .h5 dataset\n\tfor attribute in attributes:\n\t\tprint(attribute, ': ', attributes[attribute])\n\t\thf.attrs[attribute] = attributes[attribute]\n\n\t# Write the data to the database\n\tfor database in databases:\n\t\tprint(database, ': ', databases[database].shape)\n\t\thf.create_dataset(database, data=databases[database])\n\n\thf.close()", "title": "" }, { "docid": "65cb4face624ef36761ae1994309ebb3", "score": "0.68802", "text": "def write_dataset(split, data, args, label_range=None):\n caption_id = []\n images_path = []\n labels = []\n\n for img in data:\n assert len(img.captions_id) == 1\n caption_id.append(img.captions_id[0])\n labels.append(img.id)\n images_path.append(img.image_path)\n\n #N = len(images)\n data = {'caption_id':caption_id, 'labels':labels, 'images_path':images_path}\n \n if label_range is not None:\n data['label_range'] = label_range\n pickle_root = os.path.join(args.out_root, split + '_sort.pkl')\n else:\n pickle_root = os.path.join(args.out_root, split + '.pkl')\n # Write caption_id and labels as pickle form\n with open(pickle_root, 'wb') as f:\n pickle.dump(data, f)\n\n #h5py_root = os.path.join(args.out_root, split + '.h5')\n #f = h5py.File(h5py_root, 'w')\n #f.create_dataset('images', (N, 3, args.default_image_size, args.default_image_size), data=images)\n\n print('Save dataset')", "title": "" }, { "docid": "aeda1b9cbed1df0cbd56dd661825a7bc", "score": "0.6871627", "text": "def write_h5(\n save_file: PathLike,\n rmsds: List[float],\n point_clouds: np.ndarray,\n):\n with h5py.File(save_file, \"w\") as f:\n write_rmsd(f, rmsds)\n write_point_cloud(f, point_clouds)", "title": "" }, { "docid": "1fd62d1b3ffc6650f2f00597a330a811", "score": "0.68659973", "text": "def write_to_h5ds(h5ds, data, slices=None):\n\n if slices is None:\n\n h5ds[:] = data\n\n else:\n\n try:\n ndim = data.ndim\n except AttributeError:\n ndim = len(data.dims)\n\n if ndim == 1:\n h5ds[slices[0]] = data\n elif ndim == 2:\n h5ds[slices[0], slices[1]] = data\n elif ndim == 3:\n h5ds[slices[0], slices[1], slices[2]] = data\n elif ndim == 4:\n h5ds[slices[0], slices[1], slices[2], slices[3]] = data\n elif ndim == 5:\n h5ds[slices[0], slices[1], slices[2], slices[3], slices[4]] = data", "title": "" }, { "docid": "b20afb94a786227e6517fe98058e82da", "score": "0.68540424", "text": "def save_hdf5(filename, arrays_dict):\n def _save_dict_recursively(file_or_group, d):\n for name, array_or_dict in d.items():\n if issubclass(type(array_or_dict), dict):\n _save_dict_recursively(file_or_group.create_group(name), array_or_dict)\n else:\n array = np.asarray(array_or_dict)\n if np.issubdtype(array.dtype, np.str_):\n array = array.astype(np.string_) # Only fixed length strings are supported (np.string_, not np.str)\n assert array.size > 0, \"Cannot save an empty array.\"\n dset = file_or_group.create_dataset(name, shape=array.shape, dtype=array.dtype)\n dset.write_direct(array)\n\n f = h5py.File(filename, 'w')\n try:\n _save_dict_recursively(f, arrays_dict)\n finally:\n f.close()", "title": "" }, { "docid": "9e87248a009792866d15a8330b7500bf", "score": "0.6850539", "text": "def save_dict_h5py(data, fname):\n with h5py.File(fname, 'w') as hf:\n for key in data.keys():\n hf.create_dataset(key, data=data[key])", "title": "" }, { "docid": "b1da80a97590b9e2d365f70263fc60fa", "score": "0.68147224", "text": "def save_hdf5_images(data, dest_path):\n h5file = h5py.File(dest_path, mode=\"w\")\n h5file.create_dataset(\"image\", data=data, dtype=data.dtype)", "title": "" }, { "docid": "c78b6bf338c6afbab21b2a54324ddafb", "score": "0.681067", "text": "def saveh5(self,filename):\n filename=Path(filename)\n if filename.is_file():\n filename.unlink()\n\n\n vardict = vars(self)\n\n with tables.open_file(str(filename), mode = \"w\", title = \"IonoContainer out.\") as f:\n\n try:\n # XXX only allow 1 level of dictionaries, do not allow for dictionary of dictionaries.\n # Make group for each dictionary\n for cvar in vardict.keys():\n #group = f.create_group(posixpath.sep, cvar,cvar +'dictionary')\n if type(vardict[cvar]) ==dict: # Check if dictionary\n dictkeys = vardict[cvar].keys()\n group2 = f.create_group('/',cvar,cvar+' dictionary')\n for ikeys in dictkeys:\n f.create_array(group2,ikeys,vardict[cvar][ikeys],'Static array')\n else:\n if not(vardict[cvar] is None):\n f.create_array('/',cvar,vardict[cvar],'Static array')\n\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n raise NameError('Failed to write to h5 file.')", "title": "" }, { "docid": "13884a152e609000232b744ea8eb0bce", "score": "0.6810299", "text": "def save(self):\n # Create a temporary file and swap it to the original name in case\n # data needs to be loaded while saving\n tmp_name = uuid.uuid4()\n with h5py.File(\"{}.hdf5\".format(tmp_name), \"w\") as f:\n # Save geometry (atoms and positions need to be separate):\n d_geom = f.create_group(\"geometry\")\n if not isinstance(self.geometry, basestring):\n atoms = [numpy.string_(item[0]) for item in self.geometry]\n positions = numpy.array(\n [list(item[1]) for item in self.geometry])\n else:\n atoms = numpy.string_(self.geometry)\n positions = None\n d_geom.create_dataset(\"atoms\",\n data=(atoms if atoms is not None else False))\n d_geom.create_dataset(\n \"positions\",\n data=(positions if positions is not None else False))\n # Save basis:\n f.create_dataset(\"basis\", data=numpy.string_(self.basis))\n # Save multiplicity:\n f.create_dataset(\"multiplicity\", data=self.multiplicity)\n # Save charge:\n f.create_dataset(\"charge\", data=self.charge)\n # Save description:\n f.create_dataset(\"description\",\n data=numpy.string_(self.description))\n # Save name:\n f.create_dataset(\"name\", data=numpy.string_(self.name))\n # Save n_atoms:\n f.create_dataset(\"n_atoms\", data=self.n_atoms)\n # Save atoms:\n f.create_dataset(\"atoms\", data=numpy.string_(self.atoms))\n # Save protons:\n f.create_dataset(\"protons\", data=self.protons)\n # Save n_electrons:\n f.create_dataset(\"n_electrons\", data=self.n_electrons)\n # Save generic attributes from calculations:\n f.create_dataset(\"n_orbitals\",\n data=(self.n_orbitals\n if self.n_orbitals is not None else False))\n f.create_dataset(\n \"n_qubits\",\n data=(self.n_qubits if self.n_qubits is not None else False))\n f.create_dataset(\n \"nuclear_repulsion\",\n data=(self.nuclear_repulsion\n if self.nuclear_repulsion is not None else False))\n # Save attributes generated from SCF calculation.\n f.create_dataset(\n \"hf_energy\",\n data=(self.hf_energy if self.hf_energy is not None else False))\n f.create_dataset(\n \"canonical_orbitals\",\n data=(self.canonical_orbitals\n if self.canonical_orbitals is not None else False),\n compression=(\"gzip\"\n if self.canonical_orbitals is not None else None))\n f.create_dataset(\n \"overlap_integrals\",\n data=(self.overlap_integrals\n if self.overlap_integrals is not None else False),\n compression=(\"gzip\"\n if self.overlap_integrals is not None else None))\n f.create_dataset(\n \"orbital_energies\",\n data=(self.orbital_energies\n if self.orbital_energies is not None else False))\n # Save attributes generated from integrals.\n f.create_dataset(\n \"one_body_integrals\",\n data=(self.one_body_integrals\n if self.one_body_integrals is not None else False),\n compression=(\"gzip\"\n if self.one_body_integrals is not None else None))\n f.create_dataset(\n \"two_body_integrals\",\n data=(self.two_body_integrals\n if self.two_body_integrals is not None else False),\n compression=(\"gzip\"\n if self.two_body_integrals is not None else None))\n # Save attributes generated from MP2 calculation.\n f.create_dataset(\"mp2_energy\",\n data=(self.mp2_energy\n if self.mp2_energy is not None else False))\n # Save attributes generated from CISD calculation.\n f.create_dataset(\"cisd_energy\",\n data=(self.cisd_energy\n if self.cisd_energy is not None else False))\n f.create_dataset(\n \"cisd_one_rdm\",\n data=(self.cisd_one_rdm\n if self.cisd_one_rdm is not None else False),\n compression=(\"gzip\" if self.cisd_one_rdm is not None else None))\n f.create_dataset(\n \"cisd_two_rdm\",\n data=(self.cisd_two_rdm\n if self.cisd_two_rdm is not None else False),\n compression=(\"gzip\" if self.cisd_two_rdm is not None else None))\n # Save attributes generated from exact diagonalization.\n f.create_dataset(\"fci_energy\",\n data=(self.fci_energy\n if self.fci_energy is not None else False))\n f.create_dataset(\n \"fci_one_rdm\",\n data=(self.fci_one_rdm\n if self.fci_one_rdm is not None else False),\n compression=(\"gzip\" if self.fci_one_rdm is not None else None))\n f.create_dataset(\n \"fci_two_rdm\",\n data=(self.fci_two_rdm\n if self.fci_two_rdm is not None else False),\n compression=(\"gzip\" if self.fci_two_rdm is not None else None))\n # Save attributes generated from CCSD calculation.\n f.create_dataset(\"ccsd_energy\",\n data=(self.ccsd_energy\n if self.ccsd_energy is not None else False))\n f.create_dataset(\n \"ccsd_single_amps\",\n data=(self.ccsd_single_amps\n if self.ccsd_single_amps is not None else False),\n compression=(\"gzip\"\n if self.ccsd_single_amps is not None else None))\n f.create_dataset(\n \"ccsd_double_amps\",\n data=(self.ccsd_double_amps\n if self.ccsd_double_amps is not None else False),\n compression=(\"gzip\"\n if self.ccsd_double_amps is not None else None))\n\n # Save general calculation data\n key_list = list(self.general_calculations.keys())\n f.create_dataset(\"general_calculations_keys\",\n data=([numpy.string_(key) for key in key_list]\n if len(key_list) > 0 else False))\n f.create_dataset(\n \"general_calculations_values\",\n data=([self.general_calculations[key] for key in key_list]\n if len(key_list) > 0 else False))\n\n # Remove old file first for compatibility with systems that don't allow\n # rename replacement. Catching OSError for when file does not exist\n # yet\n try:\n os.remove(\"{}.hdf5\".format(self.filename))\n except OSError:\n pass\n\n shutil.move(\"{}.hdf5\".format(tmp_name), \"{}.hdf5\".format(self.filename))", "title": "" }, { "docid": "d6350646fa727dcb39ec7af4ee6c6ece", "score": "0.68004096", "text": "def dataset_to_hdf(fname):\n f = open(fname, 'rb')\n ds = pickle.load(f)\n f.close()\n f_base = basename(fname)\n root, ext = splitext(f_base)\n hdf_fname = root + '.vfit.h5'\n save_hdf_dataset(ds, hdf_fname)\n return hdf_fname", "title": "" }, { "docid": "2538a7e2385b02803d47f7574c45dc81", "score": "0.67987245", "text": "def create_hdf5(filename, shape, dtype, fillvalue=None,\n dsetname=DEFAULT_DSETNAME):\n f = h5py.File(filename, 'w')\n dset = f.create_dataset(dsetname, shape, dtype=dtype, fillvalue=fillvalue,\n **DEFAULT_DSETPARAMS)\n return dset", "title": "" }, { "docid": "413c473e43ec66c3360da2f793ed04e3", "score": "0.6793435", "text": "def create_hdf5_dataset(dataset_name, file_or_dir, ext, ):\r\n\r\n with h5py.File(dataset_name, 'w') as fp:\r\n\r\n # if we are dealing with only one file\r\n if isfile(file_or_dir):\r\n fname = os.path.split(file_or_dir)[1].split('.')[0]\r\n g = fp.create_group('root')\r\n events = load_dat_events(file_or_dir)\r\n g.create_dataset(f'{fname}', data=events, compression=8)\r\n\r\n # else we are dealing with directories\r\n else:\r\n _add_all_files(fp, file_or_dir, 'root', ext)\r\n\r\n # Navigate subdirectories\r\n sub_dirs = [f.name for f in os.scandir(file_or_dir) if f.is_dir()]\r\n if '.Ds_Store' in sub_dirs: sub_dirs.remove('.Ds_Store')\r\n\r\n # logging.info(f'Processing directories: {sub_dirs} ')\r\n # for each subdirectory add all_files\r\n for folder in sub_dirs:\r\n _add_all_files(fp, os.path.join(file_or_dir, folder), folder, ext)", "title": "" }, { "docid": "a077b5a808db77faa68f9819199d9acd", "score": "0.676816", "text": "def save(filename, d, write_mode='a', overwrite_dataset=False,\n resize=False, dict_label='', compression=None):\n try:\n f = h5py.File(filename, write_mode)\n except IOError:\n raise IOError(\"unable to create {filename} (File \"\n \"accessability: Unable to open \"\n \"file)\".format(filename=filename))\n else:\n try:\n if dict_label:\n base = f.require_group(dict_label)\n _dict_to_h5(f, d, overwrite_dataset, parent_group=base,\n compression=compression)\n else:\n _dict_to_h5(f, d, overwrite_dataset, compression=compression)\n finally: # make sure file is closed even if an exception is raised\n fname = f.filename\n f.close()\n if overwrite_dataset is True and resize is True:\n call(['h5repack', '-i', fname, '-o', fname + '_repack'])\n call(['mv', fname + '_repack', fname])", "title": "" }, { "docid": "cc82f92e287ba534651e0aead3847bb0", "score": "0.67634237", "text": "def to_hdf5(self, filename):\n import h5py\n\n with h5py.File(filename, \"w\") as f:\n ds = f.create_dataset(\"lattice\", (3, 3), dtype=\"float\")\n ds[...] = self.structure.lattice.matrix\n ds = f.create_dataset(\"Z\", (len(self.structure.species),), dtype=\"i\")\n ds[...] = np.array([sp.Z for sp in self.structure.species])\n ds = f.create_dataset(\"fcoords\", self.structure.frac_coords.shape, dtype=\"float\")\n ds[...] = self.structure.frac_coords\n dt = h5py.special_dtype(vlen=str)\n ds = f.create_dataset(\"species\", (len(self.structure.species),), dtype=dt)\n ds[...] = [str(sp) for sp in self.structure.species]\n grp = f.create_group(\"vdata\")\n for k in self.data:\n ds = grp.create_dataset(k, self.data[k].shape, dtype=\"float\")\n ds[...] = self.data[k]\n f.attrs[\"name\"] = self.name\n f.attrs[\"structure_json\"] = json.dumps(self.structure.as_dict())", "title": "" }, { "docid": "6a175115f44d4d444e952a3dfbebd3d7", "score": "0.6748319", "text": "def save(self, filename=\"\"):\n if filename == \"\":\n # use the same name as the HDF5 file\n filename = os.path.splitext(self._file.filename)[0] + \".txt\"\n\n # write to file\n with open(filename, \"w\") as of:\n with redirect_stdout(of):\n self.print()", "title": "" }, { "docid": "6b61232fffe2c74086c7ca0eace26947", "score": "0.6748067", "text": "def save_image_to_h5(image, h5_path, *args, **kwargs):\n with h5py.File(h5_path, \"w\") as f:\n f.create_group(\"data\").create_dataset(\"image\", data=image)", "title": "" }, { "docid": "244888975c154eece21100ca337aa21a", "score": "0.67463636", "text": "def _save_hdf5(self, filename, group = \"OrbitResponseMatrix\"):\n h5zip = None # 'gzip' works in default install\n f = h5py.File(filename, 'w')\n grp = f.create_group(group)\n\n str_type = h5py.new_vlen(str)\n m, n = np.shape(self.m)\n dst = grp.create_dataset('m', (m,n), data=self.m, compression=h5zip)\n #\n name, plane = zip(*self.bpm)\n name = [v.encode('ascii') for v in name]\n dst.attrs[\"bpm_name\"] = name\n dst.attrs[\"bpm_field\"] = plane\n name, plane = zip(*self.cor)\n dst.attrs[\"cor_name\"] = name\n dst.attrs[\"cor_field\"] = plane\n if self.bpm_pv:\n dst.attrs[\"bpm_pv\"] = self.bpm_pv\n if self.cor_pv:\n dst.attrs[\"cor_pv\"] = self.cor_pv\n if self.cor_pvrb:\n dst.attrs[\"cor_pvrb\"] = self.cor_pvrb\n\n f.close()", "title": "" }, { "docid": "0d3b1998cef76e29be24b00eeed9ac17", "score": "0.67428434", "text": "def save_hdf_file(file_path, idata, key_path='entry', overwrite=True):\n if \"\\\\\" in file_path:\n raise ValueError(\n \"Please use a file path following the Unix convention\")\n file_base, file_ext = os.path.splitext(file_path)\n if not ((file_ext == '.hdf') or (file_ext == '.h5')):\n file_ext = '.hdf'\n file_path = file_base + file_ext\n _create_folder(file_path)\n if not overwrite:\n file_path = _create_file_name(file_path)\n try:\n ofile = h5py.File(file_path, 'w')\n except IOError:\n print(\"Couldn't write file: {}\".format(file_path))\n raise\n grp = ofile.create_group(key_path)\n grp.create_dataset(\"data\", data=idata)\n ofile.close()\n return file_path", "title": "" }, { "docid": "1bb29d0b41b3674c97d7bf3a3a09e686", "score": "0.6740051", "text": "def save_hdf5(self, filename, force_overwrite=True):\n with HDF5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:\n f.write(coordinates=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit),\n time=self.time,\n cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit),\n cell_angles=self.unitcell_angles)\n f.topology = self.topology", "title": "" }, { "docid": "458fb38faec6fedc9eacb748b3d3ba76", "score": "0.67296785", "text": "def test_save_data_to_hdf5_2(tmp_path, sets_to_select, sets_to_save):\n fln = \"test.h5\"\n fpath = os.path.join(tmp_path, fln)\n data, metadata = _prepare_raw_dataset(N=5, M=10, K=4096)\n\n data_to_save = copy.deepcopy(data)\n if sets_to_select == \"sum\":\n # Leave only\n del data_to_save[\"det1\"]\n del data_to_save[\"det2\"]\n del data_to_save[\"det3\"]\n elif sets_to_select == \"channels\":\n # Leave only sum\n del data_to_save[\"det_sum\"]\n\n kwargs = {}\n if sets_to_save == \"all\":\n kwargs = {\"create_each_det\": True}\n elif sets_to_save == \"sum\":\n kwargs = {\"create_each_det\": False}\n\n save_data_to_hdf5(fpath, data_to_save, metadata=metadata, **kwargs)\n data_loaded, metadata_loaded = read_data_from_hdf5(fpath)\n\n metadata_selected = {_: metadata_loaded[_] for _ in metadata}\n assert metadata_selected == metadata\n\n assert data_loaded[\"pos_names\"] == data[\"pos_names\"]\n npt.assert_array_almost_equal(data_loaded[\"pos_data\"], data[\"pos_data\"])\n\n assert isinstance(data_loaded[\"det_sum\"], np.ndarray)\n npt.assert_array_almost_equal(data_loaded[\"det_sum\"], data[\"det_sum\"])\n if (sets_to_select == \"sum\") or (sets_to_save == \"sum\"):\n # Only 'sum' of all channels will be saved\n keys = set(data.keys())\n keys.remove(\"det1\")\n keys.remove(\"det2\")\n keys.remove(\"det3\")\n assert \"det1\" not in data_loaded\n assert \"det2\" not in data_loaded\n assert \"det3\" not in data_loaded\n assert set(data_loaded.keys()) == keys\n else:\n assert set(data_loaded.keys()) == set(data.keys())\n npt.assert_array_almost_equal(data_loaded[\"det1\"], data[\"det1\"])\n npt.assert_array_almost_equal(data_loaded[\"det2\"], data[\"det2\"])\n npt.assert_array_almost_equal(data_loaded[\"det3\"], data[\"det3\"])\n\n assert data_loaded[\"pos_names\"] == data[\"pos_names\"]\n npt.assert_array_almost_equal(data_loaded[\"pos_data\"], data[\"pos_data\"])\n assert data_loaded[\"scaler_names\"] == data[\"scaler_names\"]\n npt.assert_array_almost_equal(data_loaded[\"scaler_data\"], data[\"scaler_data\"])", "title": "" }, { "docid": "9e2a6d5704a1540d1a0f7bec297d82f5", "score": "0.67233026", "text": "def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True):\n # convert to numpy array\n write_data = np.array(write_data)\n\n # check folder existence\n folder_name, _ = os.path.split(hdf5_name)\n if not os.path.exists(folder_name) and len(folder_name) != 0:\n os.makedirs(folder_name)\n\n # check hdf5 existence\n if os.path.exists(hdf5_name):\n # if already exists, open with r+ mode\n hdf5_file = h5py.File(hdf5_name, \"r+\")\n # check dataset existence\n if hdf5_path in hdf5_file:\n if is_overwrite:\n print(\"Warning: data in hdf5 file already exists. recreate dataset in hdf5.\")\n hdf5_file.__delitem__(hdf5_path)\n else:\n print(\"ERROR: there is already dataset.\")\n print(\"if you want to overwrite, please set is_overwrite = True.\")\n hdf5_file.close()\n sys.exit(1)\n else:\n # if not exists, open with w mode\n hdf5_file = h5py.File(hdf5_name, \"w\")\n\n # write data to hdf5\n hdf5_file.create_dataset(hdf5_path, data=write_data)\n hdf5_file.flush()\n hdf5_file.close()", "title": "" }, { "docid": "a026d7d37cdaa5ea96e9aec6c2f293ed", "score": "0.6705257", "text": "def wH5data(fname, dname, data, reverseShapeAttr=False, verbose=False, overwrite=False, slicing=None, chunkCompress=False):\n f = None\n if slicing is not None:\n f = h5py.File(fname, 'a')\n else:\n if not overwrite and os.path.exists(fname):\n raise RuntimeError(\"path '%s' already exists\" % fname)\n try:\n f = h5py.File(fname, 'w')\n except:\n raise RuntimeError(\"could not open file '%s' for writing\" % fname)\n paths = dname.split(\"/\")\n ds = f\n for i,path in enumerate(paths):\n if i != len(paths)-1:\n ds = ds.create_group(path)\n else:\n if slicing is None:\n if chunkCompress:\n ds = ds.create_dataset(path, data=data, chunks=True, compression='gzip')\n else:\n ds = ds.create_dataset(path, data=data)\n if(reverseShapeAttr):\n ds.attrs.create(\"reverse-shape\", \"1\")\n else:\n ds = ds[path]\n ds[slicing] = data\n\n if verbose:\n print \"wrote '%s/%s' (shape=%r, dtype=%s)\" % (fname,dname,data.shape,data.dtype)\n f.close()", "title": "" }, { "docid": "9a1dd0a4c891e195d8c3de08bf3400d1", "score": "0.66942126", "text": "def toHDF5(self):\n for writable in self.writable_list:\n writable.toHDF5(self.h5path)", "title": "" }, { "docid": "fafaaa3ec321134fea0d4c06b272853e", "score": "0.66927093", "text": "def save_dataset(filename, dataset):\n\n with open(filename, mode='w', encoding='utf-8')as f:\n np.savetxt(filename, dataset)\n f.close()\n print(f\"{filename} saved successfully\")", "title": "" }, { "docid": "41860ed76663c7371eb5ea3c21d437c6", "score": "0.6686285", "text": "def writeH5Dataset(self, foldername, time, nameConvention=\"grid\"):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername, nameConvention, time)\n file = h5py.File(filename, 'w', driver='mpio', comm=self.global_comm)\n dset = file.create_dataset(\n \"dset\", self._layout.fullShape, dtype=self._f.dtype)\n slices = tuple([slice(s, e) for s, e in zip(\n self._layout.starts, self._layout.ends)])\n dset[slices] = self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data,\n (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()", "title": "" }, { "docid": "bad5ba2b9d602728954ad290c2b7b5f4", "score": "0.6684261", "text": "def save_arrays(array_list: tp.Iterable[np.ndarray], out_file):\n hdf5_file = h5py.File(str(out_file), \"w\")\n for i, array in enumerate(array_list):\n hdf5_file.create_dataset(f\"series{i}\", data=array)\n hdf5_file.close()", "title": "" }, { "docid": "d52ed80969fe3b0004531b733826f5af", "score": "0.66817296", "text": "def test_save_data_to_hdf5_1(tmp_path):\n fln = \"test.h5\"\n fpath = os.path.join(tmp_path, fln)\n data, metadata = _prepare_raw_dataset(N=5, M=10, K=4096)\n\n save_data_to_hdf5(fpath, data, metadata=metadata)\n\n # Check if the file exists\n assert os.path.isfile(fpath)\n\n # Check that the exception occurs in case of attempt to overwrite the file\n with pytest.raises(IOError, match=\"File .* already exists\"):\n save_data_to_hdf5(fpath, data, metadata=metadata)\n\n # File should be overwritten\n save_data_to_hdf5(fpath, data, metadata=metadata, file_overwrite_existing=True)\n\n # Different version of the file should be created\n save_data_to_hdf5(fpath, data, metadata=metadata, fname_add_version=True)\n save_data_to_hdf5(fpath, data, metadata=metadata, fname_add_version=True)\n\n # Check if the file exists\n assert os.path.isfile(fpath)\n assert os.path.isfile(os.path.join(tmp_path, \"test_v1.h5\"))\n assert os.path.isfile(os.path.join(tmp_path, \"test_v2.h5\"))", "title": "" }, { "docid": "e96658eab7aa65c23f1c0d226d042ee2", "score": "0.6678098", "text": "def write_hdf5_file(\n file_path, data, n_examples, chunks, dtype=\"i4\", compression=\"gzip\"\n):\n with h5py.File(file_path, mode='w') as h5_file:\n h5_file.attrs[\"n_examples\"] = n_examples\n h5_file.create_dataset(\n \"data\",\n data=data,\n dtype=dtype,\n chunks=chunks,\n compression=compression,\n )", "title": "" }, { "docid": "ed377e53496758f9212ff392cf69dc33", "score": "0.667726", "text": "def save_data(self):\n db = pd.HDFStore(self.path)\n try:\n db.remove('option/clean')\n except KeyError:\n pass\n\n for name, key in zip(names, keys):\n if name in self.df_list.keys():\n db.append('option/clean/%s' % key, self.df_list[name])\n print output % ('DATA', 'save into h5 df_%s: %d' % (key, len(self.df_list[name])))\n db.close()\n print '=' * 70", "title": "" }, { "docid": "dcd3bdfc29642e94bc3cc4d86be5e823", "score": "0.6672849", "text": "def save_dataframe(filename_or_h5group, dataset_name, dataframe):\n if isinstance(filename_or_h5group, str):\n with h5.File(filename_or_h5group, 'a') as f:\n _save_dataframe_to_h5group(f, dataset_name, dataframe)\n f.close()\n elif isinstance(filename_or_h5group, h5.Group):\n _save_dataframe_to_h5group(filename_or_h5group, dataset_name, dataframe)\n else:\n raise TypeError(\n 'Expected argument `filename_or_h5group` to be str '\n 'filename or instance of h5py.Group, got {} of type {} instead'.format(\n filename_or_h5group, type(filename_or_h5group)\n )\n )", "title": "" }, { "docid": "63a6bd135eea2959a464b50dc660050c", "score": "0.6672335", "text": "def export_hdf5_filename(self, hdf5_filename: str) -> None:\n from pyNastran.op2.op2_interface.hdf5_interface import export_op2_to_hdf5_filename\n export_op2_to_hdf5_filename(hdf5_filename, self)", "title": "" }, { "docid": "c3fd20d54fb98cb4ff91dd096a60eda3", "score": "0.6670214", "text": "def write(self, filename):\n with h5py.File(filename, 'a') as f:\n dset = f.create_dataset('data', data=self.ys)\n dset = f.create_dataset('ivars', data=self.ivars)\n dset = f.create_dataset('xs', data=self.xs)\n for attr in np.append(REQUIRED_1D, OPTIONAL_1D):\n if not getattr(self, attr).dtype.type is np.str_:\n dset = f.create_dataset(attr, data=getattr(self, attr))\n else:\n strings = [a.encode('utf8') for a in getattr(self, attr)] # h5py workaround\n dset = f.create_dataset(attr, data=strings)", "title": "" }, { "docid": "710102a51939863f8e92632bb6f5e3c0", "score": "0.6664588", "text": "def save_subset_big(filename, data, begin, end):\n\n o = h5py.File(filename, 'w')\n\n for key in data:\n shape = list(data[key].shape)\n shape[0] = end - begin\n o.create_dataset(key, shape, dtype=data[key].dtype,\n compression='gzip')[...] = data[key][begin:end]\n\n o.close()", "title": "" }, { "docid": "120350389b834b093d604b1d32a1ceb2", "score": "0.66642326", "text": "def save_orbit_hdf5(filename, obj):\n VERSION = '1.0'\n\n if os.path.splitext(filename) != '.hdf5':\n filename += '.hdf5'\n\n if obj.names['BPMx'] is None:\n names_BPMx = []\n names_BPMy = []\n names_CMx = []\n names_CMy = []\n else:\n names_BPMx = obj.names['BPMx']\n names_BPMy = obj.names['BPMy']\n names_CMx = obj.names['CMx']\n names_CMy = obj.names['CMy']\n\n with h5py.File(filename, 'w') as f:\n f.create_dataset('data/BPMx', data=obj.BPMx)\n f.create_dataset('data/BPMy', data=obj.BPMy)\n f.create_dataset('data/CMx', data=obj.CMx)\n f.create_dataset('data/CMy', data=obj.CMy)\n f.create_dataset('sampling_frequency', data=obj.sampling_frequency)\n f.create_dataset('names/BPMx', data=names_BPMx)\n f.create_dataset('names/BPMy', data=names_BPMy)\n f.create_dataset('names/CMx', data=names_CMx)\n f.create_dataset('names/CMy', data=names_CMy)\n f.attrs['data_structure'] = \"array[item, time_sample]\"\n f.attrs['measure_date'] = obj.measure_date.strftime(DATETIME_ISO)\n f.attrs['creation_date'] = datetime.now().strftime(DATETIME_ISO)\n f.attrs['__version__'] = VERSION", "title": "" }, { "docid": "a203d624508f26e38f4c422145d25942", "score": "0.6646201", "text": "def write_hdf5(self, file_name: str, *, metadata: typing.Optional[typing.Dict[str, typing.Any]] = None) -> None:\n assert isinstance(file_name, str), 'File name must be of type str'\n assert isinstance(metadata, dict) or metadata is None, 'Metadata must be a dict or None'\n\n if metadata is None:\n # Set default value\n metadata = {}\n else:\n assert isinstance(metadata, dict), 'Metadata must be of type dict'\n assert all(isinstance(k, str) for k in metadata), 'Keys of the metadata dict must be of type str'\n metadata = metadata.copy() # Copy metadata to make sure the dict is not mutated\n\n # Add default metadata\n metadata.update({\n 'artiq_version': _artiq_version,\n 'dax_version': _dax_version,\n })\n\n with h5py.File(file_name, mode='w') as f:\n # Write archive and datasets\n self.dataset_mgr.write_hdf5(f)\n # Write metadata\n for k, v in metadata.items():\n f[k] = v", "title": "" }, { "docid": "07d3a9b9ff2a1ffc331be5bdf2c0e582", "score": "0.6633718", "text": "def save(self, filename, df=None):\n\n\t\tdf.to_hdf(self.output_dir + filename, 'df', mode='w')", "title": "" }, { "docid": "7f72dc99454b1b974419a29860e737ed", "score": "0.6625079", "text": "def h5_write(data, shape, dtype,\n h5path_full, h5file=None,\n element_size_um=None, axislabels=None,\n chunks=True, compression=\"gzip\",\n comm=None,\n slices=None):\n\n if comm is not None:\n chunks = None\n compression = None\n\n if h5path_full:\n\n basepath, h5path_dset = h5path_full.split('.h5')\n if not isinstance(h5file, h5py.File):\n h5path_file = basepath + '.h5'\n if comm is not None:\n h5file = h5py.File(h5path_file, 'a',\n driver='mpio', comm=comm)\n else:\n h5file = h5py.File(h5path_file, 'a')\n\n if h5path_dset in h5file:\n h5ds = h5file[h5path_dset]\n else:\n if comm is not None:\n h5ds = h5file.create_dataset(h5path_dset,\n shape=shape,\n dtype=dtype)\n else:\n h5ds = h5file.create_dataset(h5path_dset,\n shape=shape,\n dtype=dtype,\n chunks=chunks,\n compression=compression)\n h5_write_attributes(h5ds, element_size_um, axislabels)\n\n if data is not None:\n\n write_to_h5ds(h5ds, data, slices)\n\n h5file.close()\n\n else:\n\n return h5file, h5ds\n\n else:\n\n return None, np.empty(shape, dtype)", "title": "" }, { "docid": "a9f8644c24ad85768c469773dc9443ad", "score": "0.66218895", "text": "def _save_dataframe_to_h5group(h5group, dataset_name, dataframe):\n h5group.create_dataset(\n dataset_name,\n data=np.asarray(dataframe.values, dtype=np.bytes_),\n dtype=h5.special_dtype(vlen=str),\n )\n h5group[dataset_name].attrs[COLNAMEATTR] = json.dumps(\n dataframe.columns.tolist()\n )\n h5group[dataset_name].attrs[COLTYPEATTR] = json.dumps(\n [t.str for t in dataframe.dtypes.tolist()]\n )", "title": "" }, { "docid": "8f8e581bc8c0a5bc0e04fe97ea40145a", "score": "0.6609155", "text": "def write_h5f(variables, variable_names, file_name):\n h5f = h5py.File(file_name, 'w')\n for (v, vn) in zip(variables, variable_names):\n h5f.create_dataset(vn, data=v)\n h5f.close()", "title": "" }, { "docid": "d53c5637e8f1c7854f8bf47e16545dd0", "score": "0.6600406", "text": "def write_dataset(dataset, save_path):\n if not dataset:\n logging.info('No dataset to write.')\n return\n logging.info(f'Writing dataset to {save_path}')\n for split_name, list_of_input_output_pairs in dataset.items():\n folder_name = os.path.join(save_path, split_name)\n if not os.path.exists(folder_name):\n os.makedirs(folder_name)\n encode_name = os.path.join(folder_name, f'{split_name}_encode.txt')\n decode_name = os.path.join(folder_name, f'{split_name}_decode.txt')\n skeleton_name = os.path.join(folder_name, f'{split_name}_skeleton.txt')\n with open(\n encode_name, 'w', encoding='utf8') as encode_f, open(\n decode_name, 'w', encoding='utf8') as decode_f, open(\n skeleton_name, 'w', encoding='utf8') as skeleton_f:\n for pair in list_of_input_output_pairs:\n encode_f.write(pair[0] + '\\n')\n decode_f.write(pair[1] + '\\n')\n skeleton_f.write(pair[2] + '\\n')\n logging.info(f'Dataset written to {save_path}')", "title": "" }, { "docid": "5e773b412bcb256a30bab6038b3d81d5", "score": "0.65975153", "text": "def to_hdf5(self, fname: Path, *, overwrite=False) -> None:\n mode = \"w\" if overwrite else \"w-\"\n with h5py.File(fname, mode) as fout:\n for station, sats in self.station_data.items():\n for prn, data in sats.items():\n fout.create_dataset(f\"data/{station}/{prn}\", data=data)\n for station, loc in self.station_locs.items():\n fout[f\"loc/{station}\"] = loc\n fout.attrs.update(\n {\n \"start_date\": self.start_date.timestamp(),\n \"duration\": self.duration.total_seconds(),\n }\n )", "title": "" }, { "docid": "aea9fb05f4b8fc1b67bfe0020e95392f", "score": "0.65876096", "text": "def test_saveToHdf5(self):\n for key in self.testCubes.keys():\n print \"Checking HDF5 loading of test cube {0!s}\".format(key)\n cube = self.testCubes[key]\n filename = os.path.normpath(self.dataPath+\"/test_{0!s}.hdf5\".format(key))\n cube.saveToHdf5(filename,overwrite = True)\n \n self.assert_(os.path.exists(filename),\"File {0!s} has not been created!\".format(filename))\n self.assert_(os.path.isfile(filename))\n \n restoredCube = Datacube()\n restoredCube.loadFromHdf5(filename)\n \n self.assert_(restoredCube.equal(cube),\"Error: Restored datacube does not match original one!\")", "title": "" }, { "docid": "aeba7462ac85327315291d6621c2bf86", "score": "0.6577293", "text": "def save_lh5(self, filename, force_overwrite=True):\n with LH5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f:\n f.write(coordinates=self.xyz)\n f.topology = self.topology", "title": "" }, { "docid": "71e60ba686397cad690344d5f73853d8", "score": "0.657645", "text": "def write_h5(self, savefile, warning=True):\n import h5py\n import pickle\n \n if os.path.isfile(savefile):\n _savefile, savefile = savefile, savefile.replace(\".h5\", f\"_{io.get_now()}.h5\")\n if warning:\n warn(f\"The file {_savefile} is already existing. Writing results in: {savefile}\")\n \n from prospect.io import write_results as writer\n writer.write_hdf5(hfile=savefile, run_params=self.run_params, model=self.model, obs=self.obs,\n sampler=self.fit_output[\"sampling\"][0], optimize_result_list=self.fit_output[\"optimization\"][0],\n tsample=self.fit_output[\"sampling\"][1], toptimize=self.fit_output[\"optimization\"][1])\n with h5py.File(savefile, \"a\") as _h5f:\n _h5f.create_dataset(\"model\", data=np.void(pickle.dumps(self.model)))\n _h5f.create_dataset(\"sps\", data=np.void(pickle.dumps(self.sps)))\n _h5f.flush()", "title": "" }, { "docid": "49cfebf0924c685f1951d8903c4304a2", "score": "0.65652305", "text": "def h5_to_ds(inpath, outpath):\n pass", "title": "" } ]
65c71fb8a4ab4f4cfe3284611a7507f9
Creates a database and deletes it if it already exists.
[ { "docid": "47b3a033fb75f986d7c6942aa34364bf", "score": "0.0", "text": "def syncdb(self):\n conn.execute('''DROP TABLE IF EXISTS CAMPGROUNDS''') # preventing \"sqlite3.OperationalError: table already exists\"\n\n conn.execute('''CREATE TABLE CAMPGROUNDS \n (ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n NAME TEXT NOT NULL,\n TYPE TEXT NOT NULL,\n LOCATION TEXT NOT NULL,\n CAPACITY INT NOT NULL,\n PARKING TEXT NOT NULL,\n INTERNET TEXT NOT NULL,\n RESTROOMS TEXT NOT NULL,\n SHOWERS TEXT NOT NULL,\n POOL TEXT NOT NULL,\n PET_FRIENDLY TEXT NOT NULL,\n FAMILY_FRIENDLY TEXT NOT NULL,\n WATER_HOOK_UP TEXT NOT NULL,\n SEWER_HOOK_UP TEXT NOT NULL,\n PICNIC_AREA TEXT NOT NULL);''') # creating the table and columns\n\n # populating the table:\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Rocky Bend Campground', 'Tent sites', 'Upper Nestucca River Rd, Beaver, OR 97108', 6, 'Yes', 'No', 'Vault toilet', 'No', 'No', 'Yes', 'No playground', 'No', 'No', 'No')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Deerwood RV Park', 'RV park', '35059 Seavey Loop Rd, Eugene, OR 97405', 50, 'Yes', 'Yes', 'Flush toilets', 'Yes', 'No', 'Yes', 'No playground', 'Yes', 'Yes', 'No')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Packard Creek Campground', 'Tent sites', 'NF-21, Westfir, OR 97492', 35, 'Yes', 'No', 'Vault toilet', 'No', 'No', 'Yes', 'No playground', 'No', 'No', 'Yes')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Mt Hood Village RV Resort', 'RV park', '65000 E. Hwy 26, Welches, OR 97067', 382, 'Yes', 'Yes', 'Flush toilets', 'Yes', 'Yes', 'Yes', 'Yes', 'Yes', 'Yes', 'Yes')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Portland Fairview RV Park', 'RV park', '21401 NE Sandy Blvd, Fairview, OR 97024', 407, 'Yes', 'No', 'Flush toilets', 'Yes', 'Yes', 'Yes', 'No playground', 'Yes', 'Yes', 'No')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Barton Park', 'Tent sites', '19009 SE Barton Park Rd, Boring, OR 97009', 112, 'Yes', 'No', 'Vault toilets', 'No', 'No', 'Yes', 'No playground', 'No', 'No', 'Yes')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Dougan Creek Campground', 'Tent sites', 'Park Rd, Washougal, WA 98671', 7, 'Yes', 'No', 'Flush toilets', 'No', 'No', 'Yes', 'No playground', 'No', 'No', 'Yes')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Jantzen Beach RV Park', 'RV park', '1503 N Hayden Island Dr, Portland, OR 97217', 85, 'Yes', 'Yes, high-speed', 'Flush toilets', 'Yes', 'Yes', 'Yes', 'Playground, clubhouse, game room, basketball court', 'Yes', 'Yes', 'No')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Lower Falls Campground', 'Tent sites', '42218 NE Yale Bridge Rd, Amboy, WA 98601', 43, 'Yes', 'No', 'Accessible vault toilets', 'No', 'No', 'Yes', 'No playground', 'No', 'No', 'Yes')\");\n\n conn.execute(\"INSERT INTO CAMPGROUNDS (NAME,TYPE,LOCATION,CAPACITY,PARKING,INTERNET,RESTROOMS,SHOWERS,POOL,PET_FRIENDLY,FAMILY_FRIENDLY,WATER_HOOK_UP, SEWER_HOOK_UP,PICNIC_AREA) \\\n VALUES ('Promontory Park Campground', 'Tent sites', '40600 OR-224, Estacada, OR 97023', 46, 'Yes', 'No', 'Accessible restrooms', 'Yes', 'No', 'Yes', 'A fishing lake just for kids', 'No', 'No', 'Yes')\");\n\n conn.commit()", "title": "" } ]
[ { "docid": "12ad554f5bd2d013fe7a9d8a77a2f529", "score": "0.79566014", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.784458", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.784458", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.784458", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.784458", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.784458", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "3afd585c2246649a715145389c9ca04f", "score": "0.7764564", "text": "def create_db():\n create_database()", "title": "" }, { "docid": "097e739c149e33f13c0b8959ea369fa7", "score": "0.7731424", "text": "def _delete_database(self):\n self.engine.execute(\n \"DROP DATABASE IF EXISTS {};\".format(self.filename))", "title": "" }, { "docid": "048ddb67aceb203d2a20347fd01f71bc", "score": "0.76624", "text": "def create_db():\n with app.app_context():\n db.create_all()", "title": "" }, { "docid": "a212d8bbc9e805ceb4a115bf49646502", "score": "0.766156", "text": "def create_database(self):\n logger.debug(self.temp_conn())\n exists = self.cursor.fetchone()\n if not exists:\n self.cursor.execute('CREATE DATABASE {};'.format(self.dbName))\n print('Succesfully created database: {}'.format(self.dbName))\n else:\n print(f'Database {self.dbName} already exists')\n self.conn.close()\n self.cursor.close()", "title": "" }, { "docid": "1cf69f367b04bb69ba9cb971afb55a1d", "score": "0.75599015", "text": "def create_database(dbname):\n # TBD: Use shell when available\n if PSQL.database_exists(dbname):\n raise PSQLException(\"Database %s already exists\" %dbname)\n cmd = Command(name='drop database', cmdStr='createdb %s' %(dbname))\n tinctest.logger.debug(\"Creating database: %s\" %cmd)\n cmd.run(validateAfter = False)\n result = cmd.get_results()\n tinctest.logger.debug(\"Output - %s\" %result)\n if result.rc != 0 or result.stderr:\n return False\n return True", "title": "" }, { "docid": "96ba28b45a58727517c8b216d958cefd", "score": "0.75190765", "text": "def create_database(self, db_name):\n self.cur.execute('CREATE DATABASE {};'.format(db_name))", "title": "" }, { "docid": "89b752c16e740dc37967f5dd1b7d7317", "score": "0.7517836", "text": "def recreate_db():\n drop_db()\n create_db()", "title": "" }, { "docid": "89b752c16e740dc37967f5dd1b7d7317", "score": "0.7517836", "text": "def recreate_db():\n drop_db()\n create_db()", "title": "" }, { "docid": "7eace1e9a6621185b7b403e3ec1220f4", "score": "0.749302", "text": "def create_database(self, db_name):\n if db_name not in self.get_all_databases():\n self.db.execute(\"create database %s\" % db_name)\n else:\n print \"Warning: Database [%s] already exists\" % db_name", "title": "" }, { "docid": "709739c04af08a28fa8581c0ca24b3d5", "score": "0.7441495", "text": "def create_db(self, db_name):\n if not self.connection:\n raise error.NotConnected\n elif db_name == '':\n raise error.InvalidParameters\n else:\n query = \"CREATE DATABASE \" + db_name\n\n with self.connection.cursor() as cursor:\n cursor.execute(query)", "title": "" }, { "docid": "2804dc6a14580dbb73802c3f4100d1af", "score": "0.7369494", "text": "def create_db(self):\n # use blunt force to ensure a fresh test db each time\n if self.test and os.path.exists(self.test_db_path):\n os.remove(self.test_db_path)\n session = self.get_db_session()\n Base.metadata.create_all(self.engine)", "title": "" }, { "docid": "c7350385b3fb12f986a5ddcc5226c88d", "score": "0.73586375", "text": "def create_db():\n db.create_all()\n print(\"Tables created\")", "title": "" }, { "docid": "e8447081495bdbf81db3bbe73d9b30d1", "score": "0.73388135", "text": "def create_db():\n db = db_utils.get_db()\n\n with db:\n db.create_tables([\n models.Station,\n models.Network,\n models.Connection\n ])", "title": "" }, { "docid": "1e4f592616f7f826f8ca960b699a9a98", "score": "0.73384297", "text": "def create_db(database_url):\n engine = create_engine(database_url)\n Base.metadata.create_all(engine)\n print \"Database file created...\"", "title": "" }, { "docid": "28c87f7078d53af60688fc49760f2976", "score": "0.72963345", "text": "def create_database (instance, database, exc = True) :\n ins = get_list_database (instance)\n if database in ins :\n if exc : raise Exception (\"database %s already exists\" % database)\n else :\n cmd = 'sqlcmd -S %s -Q \"CREATE DATABASE %s\"' % (instance, database)\n out,err = run_cmd (cmd, wait = True, shell = True, log_error = False)\n if len (err) > 0 :\n raise Exception (\"error: %s\" % err)", "title": "" }, { "docid": "5e4432f906d74f24071ab24efdc9a374", "score": "0.7285172", "text": "def db_create(name, user=None, password=None, host=None, port=None):\r\n if db_exists(name):\r\n log.info('DB {0!r} already exists'.format(name))\r\n return False\r\n client = _client(user=user, password=password, host=host, port=port)\r\n return client.create_database(name)", "title": "" }, { "docid": "2ff30b1282333fef81ac6d6bf2f8e1fc", "score": "0.72840035", "text": "def empty_database(cls):\n # Create a custom connection not attached to the database, so we can\n # destroy and recreate the database itself\n connection = connect(cls.host, cls.user, cls.password)\n cursor = connection.cursor()\n cursor.execute(\n \"DROP DATABASE {};\".format(\n connection.escape_string(cls.database)))\n cursor.execute(\n \"CREATE DATABASE {};\".format(\n connection.escape_string(cls.database)))\n cursor.close()\n connection.close()", "title": "" }, { "docid": "4ef70f90bf24c4ab86302d2d8d958b4c", "score": "0.7265896", "text": "def create():\n db.create_all()", "title": "" }, { "docid": "4ef70f90bf24c4ab86302d2d8d958b4c", "score": "0.7265896", "text": "def create():\n db.create_all()", "title": "" }, { "docid": "e80b30eacfdcc937f27d0462c9e22912", "score": "0.725138", "text": "def create_database(self, dbname, **kwargs):\n new_db = self._DATABASE_CLASS(self, dbname)\n if new_db.exists():\n if kwargs.get('throw_on_exists', True):\n raise CloudantException(\n \"Database {0} already exists\".format(dbname)\n )\n new_db.create()\n super(CouchDB, self).__setitem__(dbname, new_db)\n return new_db", "title": "" }, { "docid": "f244b043e6e4cfb056f32cc653d179eb", "score": "0.72339725", "text": "def db_recreate(ctx):\n print(success('Recreating the database...'))\n manage(ctx, 'recreate_database', hide=os.environ.get('HIDE'))", "title": "" }, { "docid": "c5c96c8895fba533b21af7b05d4fdc61", "score": "0.72169113", "text": "def create_db(self):\n with self.db:\n self.db.drop_tables([models.Property, models.Category])\n self.db.create_tables([models.Property, models.Category])", "title": "" }, { "docid": "8ab58a2db491168f49ee614d47608577", "score": "0.72141707", "text": "def create_db():\n db.create_all(app=app)\n click.echo('Create and nationalize database')", "title": "" }, { "docid": "77763f24c74790a8c27520a73031e0b9", "score": "0.7197671", "text": "def drop_db():\n engine = SQLConfig().engine\n if database_exists(engine.url):\n db_session.commit()\n BaseModel.metadata.drop_all()\n drop_database(engine.url)", "title": "" }, { "docid": "6fed18057171bb9a615590000f293c27", "score": "0.7197606", "text": "def create_db(dbname):\r\n # Connect to the default database\r\n con = getopenconnection(dbname='postgres')\r\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\r\n cur = con.cursor()\r\n\r\n # Check if an existing database with the same name exists\r\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\r\n count = cur.fetchone()[0]\r\n if count == 0:\r\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\r\n else:\r\n print 'A database named {0} already exists'.format(dbname)\r\n\r\n # Clean up\r\n cur.close()\r\n con.close()", "title": "" }, { "docid": "b25228804bd8e656dcbcc6785ed427a1", "score": "0.7187221", "text": "def create_db(dbname):\n # Connect to the default database\n con = getopenconnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.close()", "title": "" }, { "docid": "a6fac02b438614eb904000983dfcc698", "score": "0.71845424", "text": "def createdb():\n db.create_all()", "title": "" }, { "docid": "4f1b43a824ab3dedbeb657a5354bf841", "score": "0.7165739", "text": "def createDB(dbname='ddsassignment3'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.commit()\n con.close()", "title": "" }, { "docid": "40073444654180ea669ed6146a5c8e97", "score": "0.7158237", "text": "def setup_database():\n os.makedirs(BASE + os.sep + \"db\", exist_ok=True)", "title": "" }, { "docid": "28ca5201eceaa34c05ce17fbe70c8bdb", "score": "0.71548826", "text": "def create_db(dbname):\n # Connect to the default database\n con = getopenconnection(dbname='dds_assgn1')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print 'A database named {0} already exists'.format(dbname)\n\n # Clean up\n cur.close()\n con.close()", "title": "" }, { "docid": "abfa092f8b9052e132b0da22a99f6e7b", "score": "0.7154821", "text": "def create_database(db_url, database=None, encoding=\"utf8\", template=None):\n dbm = DatabaseManager.make_manager(db_url, database)\n dbm.create(encoding, template)", "title": "" }, { "docid": "154ac92f68c9dba5cc576ec9a1ba590b", "score": "0.71486586", "text": "def init_db():\n engine = SQLConfig().engine\n if not database_exists(engine.url):\n logger.info(\"Database does not exist, creating database.\")\n create_database(engine.url)\n logger.info(\"Creating tables\")\n BaseModel.metadata.create_all()", "title": "" }, { "docid": "d55a3165eb70fabc1849838b063c1cf2", "score": "0.7140806", "text": "def create_new_database(self, path):\n os.mkdir(path)", "title": "" }, { "docid": "ad0665c3e0a179eb85ff5d9adf313968", "score": "0.71404654", "text": "def createDB(dbname='ddsassignment3'):\r\n # Connect to the default database\r\n con = getOpenConnection(dbname='postgres')\r\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\r\n cur = con.cursor()\r\n\r\n # Check if an existing database with the same name exists\r\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\r\n count = cur.fetchone()[0]\r\n if count == 0:\r\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\r\n else:\r\n print('A database named {0} already exists'.format(dbname))\r\n\r\n # Clean up\r\n cur.close()\r\n con.commit()\r\n con.close()", "title": "" }, { "docid": "0fcb3f362a9f687722ac91527b5b4ecb", "score": "0.7134745", "text": "def init_db():\n with app.app_context():\n db.session.commit()\n db.drop_all()\n db.create_all()", "title": "" }, { "docid": "e690df6977a8d0cd2ca97799d73dad66", "score": "0.712901", "text": "def createdb():\n from databus.models import db\n db.create_all()", "title": "" }, { "docid": "b44cda1ffe33e087ef94e33ab550f8a5", "score": "0.7128189", "text": "def __create_database(self):\n # build query and open cursor\n logging.info('Creating MySQL database if it does not exist')\n sql = f'CREATE DATABASE IF NOT EXISTS {self.dbname};'\n\n # create the database\n cursor = self.cnxn.cursor()\n cursor._defer_warnings = True\n cursor.execute(sql)\n self.cnxn.select_db(self.dbname)\n\n # cleanup\n cursor.close()\n self.cnxn.commit()", "title": "" }, { "docid": "8eb7b926578b552d34e96369baa5dcdf", "score": "0.7123573", "text": "def recreate_db():\n db.drop_all()\n db.create_all()\n db.session.commit()", "title": "" }, { "docid": "8eb7b926578b552d34e96369baa5dcdf", "score": "0.7123573", "text": "def recreate_db():\n db.drop_all()\n db.create_all()\n db.session.commit()", "title": "" }, { "docid": "a09cc027be18da5282d17353164e830e", "score": "0.7102976", "text": "def destroy_database(self):\n self.temp_conn()\n self.exists = self.cursor.fetchone()\n if self.exists:\n self.cursor.execute('DROP DATABASE {};'.format(self.dbName))\n print('Succesfully destroyed database: {}'.format(self.dbName))\n else:\n print(f'No such database, {self.dbName} exists')\n self.conn.close()\n self.cursor.close()", "title": "" }, { "docid": "b600c545b3cb2bab492a127f0f116c40", "score": "0.7086264", "text": "def recreate_db():\n\n db.drop_all()\n db.create_all()\n db.session.commit()", "title": "" }, { "docid": "fd96d2be1c3f328eeb51070a2d34c837", "score": "0.70653117", "text": "def create_databases(launch_args):\n cnx = connection.MySQLConnection(user=launch_args.dbusername,\n password=launch_args.dbpassword,\n host=launch_args.dbip)\n cursor = cnx.cursor()\n cursor.execute(\"DROP DATABASE IF EXISTS `{}`\". format(launch_args.dbname))\n cursor.execute(\"CREATE DATABASE `{}`\".format(launch_args.dbname))\n cnx.commit()\n cnx.close()", "title": "" }, { "docid": "6acba194fffc2dd56a550a1544022728", "score": "0.7054318", "text": "def create_database(self, database_name):\n\t\tself._connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n\t\tself.execute_query('CREATE DATABASE ' + database_name + ';', save=True)", "title": "" }, { "docid": "373013a0f7d2470df0f16facb337f018", "score": "0.70486075", "text": "def createDB(dbname='dds_assignment1'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print('A database named {0} already exists'.format(dbname))\n\n # Clean up\n cur.close()\n con.close()", "title": "" }, { "docid": "cd89e8fbfc6204a126034adfc90b928c", "score": "0.7042176", "text": "def create_database(db_name):\n engine_type = settings.DATABASES.get(\"default\").get(\"ENGINE\").split(\".\")[-1]\n\n if engine_type.find(\"postgresql\") != -1:\n command = [\"createdb\", \"-U\", \"postgres\", db_name, '-h', settings.DB_SERVER_ADDRESS]\n subprocess.call(command)\n\n elif engine_type.find(\"mysql\") != -1:\n import MySQLdb\n db = MySQLdb.connect(host=settings.DATABASES.get(\"default\").get(\"HOST\"),\n user=settings.DATABASES.get(\"default\").get(\"USER\"),\n passwd=settings.DATABASES.get(\"default\").get(\"PASSWORD\"))\n cur = db.cursor()\n cur.execute(\"CREATE DATABASE IF NOT EXISTS {}\".format(db_name))\n\n elif engine_type.find(\"sqlite\") != -1:\n pass\n else:\n logging.error(\"Database system %s is not available with data_versioning\")\n\n migrate_db(db_name)", "title": "" }, { "docid": "b7aa109fa919019c82e3e124f0856701", "score": "0.70190954", "text": "def db(app, request):\n test_db_file = app.config['SQLALCHEMY_DATABASE_URI'].split(\":///\")[1]\n\n if os.path.exists(test_db_file):\n os.unlink(test_db_file)\n\n def teardown():\n _db.drop_all()\n os.unlink(test_db_file)\n\n _db.app = app\n _db.create_all()\n\n request.addfinalizer(teardown)\n return _db", "title": "" }, { "docid": "18ce2f769f49c78de322a8e78e99b71e", "score": "0.70171726", "text": "def create_db():\n speaker.say(database.create_db())\n if todo.has_been_called:\n todo.has_been_called = False\n todo()\n elif add_todo.has_been_called:\n add_todo.has_been_called = False\n add_todo()\n return", "title": "" }, { "docid": "05e13723c94f616080755ce995f3086c", "score": "0.70141673", "text": "def recreate_db():\n db.drop_all()\n db.create_all()\n db.session.commit()\n print(\"recreated the database\")", "title": "" }, { "docid": "2260bdb2d87b3bc9e3d747a8eac290fc", "score": "0.69975406", "text": "def create_db(db_host, db_user, db_password, db_name):\n conn = psycopg2.connect(host = db_host,user = db_user,password = db_password, database = 'postgres')\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = conn.cursor()\n cur.execute(\"CREATE DATABASE %s ;\" % db_name)\n cur.close()", "title": "" }, { "docid": "bd49deac33b7251005810a9f5cb106ed", "score": "0.6995227", "text": "def create():\n db.create_all()\n print('Created all of the tables in the database.')", "title": "" }, { "docid": "bbae16e8e4527ee67dc7193a32066261", "score": "0.6992282", "text": "def recreate(self):\n try:\n os.remove(self.dbpath)\n except OSError:\n pass\n self.create()", "title": "" }, { "docid": "88d8518d1046c1742c863e66a95ad820", "score": "0.698289", "text": "def createdb():\n db.create_all()", "title": "" }, { "docid": "0f22a87f537734daf425012e5528a812", "score": "0.69634527", "text": "def create_database():\n logging.info(\"Creating CloudSQL database\")\n sql_wrapper.call_create_database(cloudsql_importer_constants)", "title": "" }, { "docid": "b7f08832e74f568e48906ca93942b5c8", "score": "0.69603914", "text": "def setup_database():\n try:\n with database:\n LOGGER.info('Attempting to create database')\n database.create_tables([\n Customer,\n CustomerContact\n ])\n LOGGER.info('Database created successfully.')\n except Exception as create_error:\n LOGGER.error(str(create_error))", "title": "" }, { "docid": "58ce0f933406dfe47e8b7d2911d9bb21", "score": "0.6937258", "text": "def createDB(dbname='dds_assignment'):\n # Connect to the default database\n con = getOpenConnection(dbname='postgres')\n con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n\n # Check if an existing database with the same name exists\n cur.execute('SELECT COUNT(*) FROM pg_catalog.pg_database WHERE datname=\\'%s\\'' % (dbname,))\n count = cur.fetchone()[0]\n if count == 0:\n cur.execute('CREATE DATABASE %s' % (dbname,)) # Create the database\n else:\n print('A database named {0} already exists'.format(dbname))\n # Clean up\n cur.close()\n con.close()", "title": "" }, { "docid": "a6d3e9b2be807ad655679a8b5495c29e", "score": "0.6931665", "text": "def db_init(db):\n\n # connect to the database\n mydb = mysql.connector.connect(\n host=db[\"host\"], user=db[\"user\"], passwd=db[\"passwd\"]\n )\n\n # set the database pointer\n cur = mydb.cursor()\n\n # delete an old database if exists\n sql = \"DROP DATABASE IF EXISTS \" + db[\"db_name\"]\n cur.execute(sql)\n\n # create a new database\n sql = \"CREATE DATABASE \" + db[\"db_name\"]\n cur.execute(sql)\n\n # close db and cursor\n mydb.close()\n cur.close()", "title": "" }, { "docid": "ef8067b0485c7855db3fac5f6aef73e1", "score": "0.693079", "text": "def create_new_database(self):\n try:\n req = sql_queries.CREATE_NEW_DATABASE\n self.cursor.execute(req)\n self.cursor.execute(TABLES['category'])\n self.cursor.execute(TABLES['product'])\n self.cursor.execute(TABLES['category_has_product'])\n\n except mysql.connector.Error as err:\n print(f\"Erreur lors de l'exécution de 'create_new_database'. \"\n f\"Détails de l'erreur : {err}\")", "title": "" }, { "docid": "10684a67cddbf51aad85a3f453250dbd", "score": "0.69268036", "text": "def create_empty_db(self) -> None:\n entries: list[_DBEntry] = [\n _DBEntry(fname=fname) for fname in _ensure_str(self.fnames)\n ]\n if self.db_fname.exists():\n self.db_fname.unlink()\n self._db = SimpleDatabase(self.db_fname)\n self._db.insert_multiple(entries)", "title": "" }, { "docid": "a3ac1dfb68cd7a9138626b2fbaa568af", "score": "0.6887185", "text": "def create_db():\n print(\"creating\")\n DemoBase.metadata.drop_all(engine)\n DemoBase.metadata.create_all(engine)\n global session\n create_test_data(session)", "title": "" }, { "docid": "bfa3f23319a563d94b0135de97d71d95", "score": "0.6885637", "text": "def drop_db_command():\n db.drop_all()\n db.create_all()\n\n click.echo(\"Initialized the database.\")", "title": "" }, { "docid": "0ad1fd95f2f6b62a4d6a474d9509f1b7", "score": "0.6882462", "text": "def create_db(args,engine=None):\n if engine is None:\n RDS = eval(args.RDS)\n logger.info(\"RDS:%s\"%RDS)\n engine = sql.create_engine(get_engine_string(RDS = RDS))\n\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n logging.info(\"database created\")", "title": "" }, { "docid": "108982df571cb56d24452c15ab0a3cfa", "score": "0.68802434", "text": "def _create_new_db(self, title=None):\n return self.create_new_db_cli(title)", "title": "" }, { "docid": "f774d5066be7a25a7b954d3317c0f313", "score": "0.68452847", "text": "def create_db(name=None):\n helpers.create_db(name, env.vagrant_host, env.vagrant_db_user, env.vagrant_db_pass)", "title": "" }, { "docid": "39d83b659fab626eb829568844ee91e1", "score": "0.68451107", "text": "def delete_db(self) -> None:\n pass", "title": "" }, { "docid": "9d251f7356161498e11df310088d5e35", "score": "0.67842245", "text": "def destroy_database():\n del DatabaseHandler.actualDatabase\n DatabaseHandler.actualDatabase = None\n if os.path.exists(DatabaseHandler.actualDatabasePath):\n shutil.rmtree(DatabaseHandler.actualDatabasePath)\n DatabaseHandler.actualDatabasePath = ''\n DatabaseHandler.dbIsTemp = False", "title": "" }, { "docid": "70b4ab129f81d02f65d3985b89ce8c66", "score": "0.6779434", "text": "def create_database_if_not_exists(self) -> None:\n conn_uri = nbgrader_format_db_url(self.course_id)\n\n if not database_exists(conn_uri):\n logger.debug(\"db not exist, create database\")\n create_database(conn_uri)", "title": "" }, { "docid": "4820eafc864eab269a96ac6e3539f4ce", "score": "0.6771848", "text": "def delete_database(self):\n if self.con is not None:\n logging.info(\"Deleting old database...\")\n # Closing will delete the database, as it's only stored in memory.\n # See https://stackoverflow.com/questions/48732439/deleting-a-database-file-in-memory\n self.con.close()\n self.con = None", "title": "" }, { "docid": "2305c0e565dfeed845d6e4acf1de29b3", "score": "0.6763552", "text": "def create_newdb(self):\n\n new_db_name = 'db_backup'\n \n command = \"CREATE DATABASE \" + new_db_name + \";\"\n \n conn = None\n try:\n # connect to the PostgreSQL server\n conn = psycopg2.connect(dbname = self.DBNAME, port = self.PORT, user = self.USER, password = self.PASSWORD, host = self.HOST)\n conn.autocommit = True\n cur = conn.cursor()\n # create database\n cur.execute(command)\n cur.execute(\"GRANT ALL PRIVILEGES ON DATABASE {} TO {} ;\".format(new_db_name, self.configuration_obj.DATABASE_USER))\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n logger.info('New database has been created!')\n except (Exception, psycopg2.DatabaseError) as error:\n logger.info(error)\n finally:\n if conn is not None:\n conn.close()\n return new_db_name", "title": "" }, { "docid": "e741134864193173eba0c647b33dc12e", "score": "0.6755503", "text": "def delete_db():\n db.drop_all()", "title": "" }, { "docid": "1be7e79b942478bce572061703b8f0a8", "score": "0.675323", "text": "def create_database(self):\n # If the database already exists...\n create_new_database = True\n\n if os.path.exists(self.database_location):\n if self.database_is_valid() and not self.overwrite:\n create_new_database = False\n elif self.overwrite: # We just totally scrap the file. We could just drop the reference table...\n self.delete_database()\n else:\n raise(sqlite3.DatabaseError(\"Database already exists, but is invalid for writing to.\"))\n\n if create_new_database:\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME,\n Value REAL,\n Debug INTEGER\n );\n \"\"\"\n with sqlite3.connect(self.database_location) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)", "title": "" }, { "docid": "916758ffc6bc750b93dbc4c3f1e12ff4", "score": "0.67465234", "text": "def create_db_statement(self):\n create_statement = \"CREATE DATABASE IF NOT EXISTS \" + self.database_name()\n return create_statement", "title": "" }, { "docid": "c124ae58d49262b5a81939a6e6d8b5b5", "score": "0.673364", "text": "def create_tracker_db(self):\n\n LOGGER.info('Running recreateTrackerDB')\n\n e = getEngine(local_config.tk_db_connection_string)\n tk_db.metadata.drop_all(e)\n tk_db.metadata.create_all(e)", "title": "" }, { "docid": "5a9d92322d7a26a219f117274032ed4e", "score": "0.6722217", "text": "def create_db(self, name):\n assert name\n path = os.path.join(self.root_path, name)\n os.mkdir(path)", "title": "" }, { "docid": "da6a51ff89db74600b4366eba24a8ea3", "score": "0.67139095", "text": "def init_db():\n db.create_all()\n if 'SQLALCHEMY_MIGRATE_URI' in app.config:\n if not exists(app.config['SQLALCHEMY_MIGRATE_PATH']):\n api.create(app.config['SQLALCHEMY_MIGRATE_PATH'], 'database repository')\n api.version_control(app.config['SQLALCHEMY_MIGRATE_URI'], app.config['SQLALCHEMY_MIGRATE_PATH'])\n else:\n api.version_control(app.config['SQLALCHEMY_MIGRATE_URI'], app.config['SQLALCHEMY_MIGRATE_PATH'],\n api.version(app.config['SQLALCHEMY_MIGRATE_PATH']))", "title": "" }, { "docid": "dbe097a4d9f38892b7b9bc73379d1542", "score": "0.66986114", "text": "def delete_database(self, dbname):\n db = self._DATABASE_CLASS(self, dbname)\n if not db.exists():\n raise CloudantException(\n \"Database {0} does not exist\".format(dbname)\n )\n db.delete()\n if dbname in self.keys():\n super(CouchDB, self).__delitem__(dbname)", "title": "" }, { "docid": "3e0fbf9f112d9edf72605d31689102d2", "score": "0.66964567", "text": "def create_db_schema():\n\n with Database() as db:\n db.create_db_schema()", "title": "" }, { "docid": "7152daa57e99c9b19a78272ad6e3a6c2", "score": "0.669564", "text": "def create_db():\n db.create_all()\n default_client = Client(fullname=\"Клиент\", phone=89100000000)\n default_client.save()\n user = User(username=\"admin\", password=\"123\")\n user.save()", "title": "" }, { "docid": "d13191666b8a6e279e6ba59b34d3f64e", "score": "0.6694252", "text": "def reset_database(dbname, retries = 5, sleep_interval = 5):\n if PSQL.database_exists(dbname):\n result = PSQL.drop_database(dbname, retries, sleep_interval)\n if not result:\n tinctest.logger.warning(\"Could not delete database %s\" %dbname)\n return False\n return PSQL.create_database(dbname)", "title": "" }, { "docid": "4c8404c6eda49c278a798e0b0e02012c", "score": "0.6692204", "text": "def test_create_new_database_if_none_exists(self):\n # Arrange\n # Remove the database file if it already exits\n if os.path.exists(database_file):\n os.remove(database_file)\n # Act\n build_database()\n # Assert\n self.assertTrue(os.path.exists(database_file))", "title": "" }, { "docid": "ecbf4e88ab4265049b3396c7710572cb", "score": "0.66872716", "text": "def database():\n _, file_name = tempfile.mkstemp()\n os.environ['DATABASE_NAME'] = file_name\n Article.create_table(database_name=file_name)\n yield\n os.unlink(file_name)", "title": "" }, { "docid": "c9e36faa551b5888daa96b719f59dac1", "score": "0.66855514", "text": "def reset_db():\n os.system('dropdb photos')\n os.system('createdb photos')\n \n with app.app_context():\n connect_to_db(app, echo=False)\n db.create_all()\n\n print('Reset db complete!')", "title": "" }, { "docid": "66ce0daf7c220208ae667efb71c35e0a", "score": "0.6684781", "text": "def delete_database(self, db_name):\n if db_name in self.get_all_databases():\n self.db.execute(\"drop database %s\" % db_name)\n else:\n print \"Warning: Database [%s] does not exists, deletion failure\" % db_name", "title": "" }, { "docid": "2534807019c90d5621fa518eb1546858", "score": "0.6673096", "text": "def create_database(self, database_name):\n\t\ttry:\n\t\t\tquery = f\"\"\"CREATE DATABASE {database_name};\"\"\"\n\t\t\tcursor = self.cursor()\n\t\t\tcursor.execute(query)\n\n\t\texcept (Exception, p.DatabaseError)\tas error:\n\t\t\treturn f\"failed to create {database_name}, because of {error}\"", "title": "" }, { "docid": "068657699cf63a1cfea041d10c77b9e8", "score": "0.6658844", "text": "def init_db():\n import model\n\n model.Base.metadata.drop_all(engine)\n model.Base.metadata.create_all(engine)", "title": "" }, { "docid": "2cac31f05e56832a627feb9beda89870", "score": "0.66577023", "text": "def db_init():\n if not os.path.isdir(constants.DB_DIR):\n os.mkdir(constants.DB_DIR)\n create_db()\n elif not os.path.exists(constants.DB_FILE):\n create_db()\n else:\n # Clearing the database insead of just deleting is so we don't have\n # to recreate the IEEE mac address table\n for model in [models.Station, models.Network, models.Connection]:\n model.delete().where(True).execute()", "title": "" }, { "docid": "05a4f5a154a2dbacb94d74c8a4ab5b0a", "score": "0.6653758", "text": "def db_remove(name, user=None, password=None, host=None, port=None):\r\n if not db_exists(name):\r\n log.info('DB {0!r} does not exist'.format(name))\r\n return False\r\n client = _client(user=user, password=password, host=host, port=port)\r\n return client.delete_database(name)", "title": "" }, { "docid": "24ed2f15da3bbe56f5e0a225780ef781", "score": "0.66458505", "text": "def initdb(exist_ok: bool = False, drop_existing: bool = False):\n db_url = engine.url\n typer.echo(f\"Using database at {db_url}\")\n\n if database_exists(db_url):\n if drop_existing:\n with wrap_echo(\"Dropping database\"):\n drop_database(db_url)\n elif not exist_ok:\n typer.echo(\n \"Database already exists, aborting.\\n\"\n \"Use --exist-ok if you are sure the database is uninitialized and contains no data.\\n\"\n \"Use --drop-existing if you want to recreate it.\",\n err=True,\n )\n return\n\n with wrap_echo(\"Creating database\"):\n create_database(db_url)\n pass\n\n with engine.connect() as con:\n with wrap_echo(\"Installing pgcrypto extension\"):\n con.execute(\"CREATE EXTENSION IF NOT EXISTS pgcrypto;\")\n pass\n\n with wrap_echo(\"Creating metadata\"):\n base.metadata.create_all()\n pass\n\n typer.echo(\"Database initialization complete.\")", "title": "" }, { "docid": "e853a9c5598238d7c32c2ed3a696c7b4", "score": "0.6645353", "text": "def createDb ( name = None, serveradress = None ):\n \n if not ( serveradress ) or ( serveradress == \"\" ) :\n raise DatabaseError ( \"CreateDb (): Serveradress doesn't exists\" % name )\n \n # Get Db Server\n server = couchdb.client.Server ( serveradress )\n \n # Check if db name already exist\n if name in server :\n raise DatabaseError ( \"CreateDb (): Database '%s' already exist\" % name )\n \n # Create DataBate \n db = server.create ( name )\n \n # Create predefined Database Views\n createDbViews ( db )\n \n return db", "title": "" }, { "docid": "2950dd1b02ef377fbcc36495abf73a07", "score": "0.66446006", "text": "def create_database():\n fab.local('n98-magerun.phar db:create')", "title": "" }, { "docid": "913dc008867db45aa9655a58c99b75ab", "score": "0.6634423", "text": "def db(app):\n with app.app_context():\n if not database_exists(str(_db.engine.url)) and \\\n app.config['SQLALCHEMY_DATABASE_URI'] != 'sqlite://':\n create_database(_db.engine.url)\n _db.create_all()\n\n yield _db\n\n # Explicitly close DB connection\n _db.session.close()\n _db.drop_all()", "title": "" }, { "docid": "13ce8a65e3b54c95657fafd22d80dca3", "score": "0.663284", "text": "def create_db():\n with contextlib.closing(get_db()) as db:\n cursor = db.cursor()\n with open('schema.sql', mode='r') as file:\n cursor.executescript(file.read())\n db.commit()", "title": "" }, { "docid": "c0753ef215d23f6812220e374cc951d5", "score": "0.6626555", "text": "def test_create_database(self):\n\n # Setup the tables\n CreateDatabase.run(app=self.app)\n engine = create_engine(TestManagePy.postgresql_url)\n connection = engine.connect()\n\n for model in [Users]:\n exists = engine.dialect.has_table(connection, model.__tablename__)\n self.assertTrue(exists)\n\n # Clean up the tables\n Base.metadata.drop_all(bind=engine)", "title": "" }, { "docid": "b47d5ce211abe4514c9f60d26436978f", "score": "0.6599476", "text": "def delete_database(self):\n os.remove(self.database_location)", "title": "" }, { "docid": "5b870c03fea67794f8e92421b1ae6e18", "score": "0.6587531", "text": "def delete_db(self):\n logger.info('BEGIN OLD DATABASE DELETE OPERATION')\n conn = None\n try: \n # connect to the PostgreSQL server\n conn = psycopg2.connect(dbname = \"db_backup\", port = self.PORT, user = self.USER, password = self.PASSWORD, host = self.HOST)\n conn.autocommit = True\n cur = conn.cursor()\n\n cur.execute(\"SELECT pg_terminate_backend( pid ) \"\n \"FROM pg_stat_activity \"\n \"WHERE pid <> pg_backend_pid( ) \"\n \"AND datname = '{}'\".format(self.configuration_obj.DATABASE_NAME))\n\n\n # delete old postgres db\n command = \"DROP DATABASE \" + self.configuration_obj.DATABASE_NAME + \";\"\n cur.execute(command)\n \n '''\n cur.execute(\"SELECT pg_terminate_backend( pid ) \"\n \"FROM pg_stat_activity \"\n \"WHERE datname = '{}'\".format(new_db_name))\n\n rename_command = \"ALTER DATABASE \" + new_db_name + \" RENAME TO \" + self.configuration_obj.DATABASE_NAME + \";\"\n cur.execute(rename_command)\n '''\n\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n logger.info('Old database has been dropped!')\n except (Exception, psycopg2.DatabaseError) as error:\n logger.info(error)\n logger.info('Error from old database delete.')\n finally:\n if conn is not None:\n conn.close()", "title": "" }, { "docid": "74135c5e1828130d3a7afff1ff613874", "score": "0.65596116", "text": "def create_empty_monitoring_db():\n\n if os.path.isfile(MONITORING_DB_NAME):\n os.remove(MONITORING_DB_NAME)\n create_monitoring_db()", "title": "" } ]
050cb928284b6a9c9703074777173b5e
Submits a job to a cluster. Autonaming is currently not supported for this resource.
[ { "docid": "4ff55f313bedd8830cdedccd4c460f33", "score": "0.0", "text": "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n driver_scheduling_config: Optional[pulumi.Input[pulumi.InputType['DriverSchedulingConfigArgs']]] = None,\n hadoop_job: Optional[pulumi.Input[pulumi.InputType['HadoopJobArgs']]] = None,\n hive_job: Optional[pulumi.Input[pulumi.InputType['HiveJobArgs']]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n pig_job: Optional[pulumi.Input[pulumi.InputType['PigJobArgs']]] = None,\n placement: Optional[pulumi.Input[pulumi.InputType['JobPlacementArgs']]] = None,\n presto_job: Optional[pulumi.Input[pulumi.InputType['PrestoJobArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n pyspark_job: Optional[pulumi.Input[pulumi.InputType['PySparkJobArgs']]] = None,\n reference: Optional[pulumi.Input[pulumi.InputType['JobReferenceArgs']]] = None,\n region: Optional[pulumi.Input[str]] = None,\n request_id: Optional[pulumi.Input[str]] = None,\n scheduling: Optional[pulumi.Input[pulumi.InputType['JobSchedulingArgs']]] = None,\n spark_job: Optional[pulumi.Input[pulumi.InputType['SparkJobArgs']]] = None,\n spark_r_job: Optional[pulumi.Input[pulumi.InputType['SparkRJobArgs']]] = None,\n spark_sql_job: Optional[pulumi.Input[pulumi.InputType['SparkSqlJobArgs']]] = None,\n trino_job: Optional[pulumi.Input[pulumi.InputType['TrinoJobArgs']]] = None,\n __props__=None):\n ...", "title": "" } ]
[ { "docid": "794c19caee503fbf75f81f1a57f4860f", "score": "0.69753313", "text": "def submit_job(self, **kwargs):\n # | - submit_job\n kwargs = merge_two_dicts(self.default_sub_params, kwargs)\n\n # | - Checking if job has already been submitted\n if \"path_i\" in kwargs:\n path_i = kwargs[\"path_i\"]\n else:\n path_i = \".\"\n\n if self.is_job_submitted(path_i=path_i):\n return(None)\n # __|\n\n # | - Writing Job Submission Parameters\n sub_params_name = \".submission_params.json\"\n with open(os.path.join(path_i, sub_params_name), \"w\") as fle:\n json.dump(kwargs, fle, indent=2, skipkeys=True)\n # __|\n\n self.cluster.submit_job_clust(**kwargs)\n\n # | - Writing Cluster System Info to File\n if \"path_i\" in kwargs:\n path_i = kwargs[\"path_i\"]\n\n with open(os.path.join(path_i, \".cluster_sys\"), \"w\") as fle:\n # fle.write(self.cluster_sys)\n fle.write(self.cluster_sys + \"\\n\")\n # __|\n\n # __|", "title": "" }, { "docid": "da8eedd205faabc0da3989210ef89d50", "score": "0.68169165", "text": "def submit_job_clust(self, **kwargs):\n # | - submit_job\n print(\"submit_job_clust | DummyCluster\")\n print(\"Nothing happens!!\")\n # __|", "title": "" }, { "docid": "656a5a2cfb045d72cee35cfec2272847", "score": "0.6784938", "text": "def submit_job_clust(self, **kwargs):\n # | - submit_job\n\n # | - Merging Submission Parameters\n params = merge_two_dicts(self.default_sub_params, kwargs)\n\n path = params[\"path_i\"]\n # __|\n\n # | - Checking if job has already been submitted\n # if self.is_job_submitted():\n # return(None)\n # __|\n\n # | - Submit Job *******************************************************\n\n os.chdir(path)\n\n if params[\"job_name\"] == \"Default\":\n params[\"job_name\"] = os.getcwd()\n\n print(\"submitting job\")\n\n os.system(\"chmod 777 *\")\n\n # bash_command = \"/u/if/flores12/bin/qv model.py\"\n\n # bash_command = \"/afs/slac/g/suncat/bin/dobsub \"\n bash_command = \"dobsub \"\n bash_command += \"-q \" + str(params[\"queue\"]) + \" \"\n\n # Not sure why, but the -n flag is used to specify the numer of cores\n bash_command += \"-n \" + str(params[\"cpus\"]) + \" \"\n # bash_command += \"-n \" + str(params[\"nodes\"]) + \" \"\n # bash_command += \"-c \" + str(params[\"cpus\"]) + \" \"\n\n bash_command += \"-W \" + str(params[\"wall_time\"]) + \" \"\n bash_command += \"-o \" + str(params[\"out_file\"]) + \" \"\n bash_command += \"-e \" + str(params[\"err_file\"]) + \" \"\n\n # bash_command += \"-o job.out \"\n # bash_command += \"-e job.err \"\n\n # bash_command += \"-M \" + params[\"memory\"] + \" \"\n bash_command += \"-J \" + params[\"job_name\"] + \" \"\n bash_command += params[\"job_script\"]\n\n # | - FIXME Python 2 --> 3\n print(\"Python version info\")\n print(sys.version_info)\n if (sys.version_info > (3, 0)):\n try:\n output = subprocess.Popen(\n bash_command,\n stdout=subprocess.PIPE,\n shell=True,\n )\n sub_time = datetime.datetime.now().isoformat()\n\n\n except subprocess.CalledProcessError as e:\n print(\"Ping stdout output:\\n\", e.output)\n os.chdir(self.root_dir)\n print(\"JOB SKIPPED: \")\n return(None)\n\n else:\n try:\n output = subprocess.Popen(\n bash_command,\n stdout=subprocess.PIPE,\n shell=True,\n )\n sub_time = datetime.datetime.now().isoformat()\n\n # except subprocess.CalledProcessError, e:\n except subprocess.CalledProcessError as e:\n print(\"Ping stdout output:\\n\", e.output)\n # print \"Ping stdout output:\\n\", e.output\n os.chdir(self.root_dir)\n print(\"JOB SKIPPED: \")\n return(None)\n\n # | - __old\n # try:\n # output = subprocess.Popen(\n # bash_command,\n # stdout=subprocess.PIPE,\n # shell=True,\n # )\n # sub_time = datetime.datetime.now().isoformat()\n #\n # # except subprocess.CalledProcessError, e:\n # except subprocess.CalledProcessError as e:\n #\n # print \"Ping stdout output:\\n\", e.output\n #\n # os.chdir(self.root_dir)\n # print(\"JOB SKIPPED: \")\n # return(None)\n\n # __|\n\n # __|\n\n # __|\n\n # | - Parsing Output\n # out = output.communicate()[0]\n out = output.communicate()[0].decode()\n ind = out.find(\"Job <\")\n\n out1 = out[ind:]\n ind1 = out1.find(\"<\")\n ind2 = out1.find(\">\")\n\n jobid = out1[ind1 + 1:ind2]\n\n if jobid.isdigit():\n jobid = int(jobid)\n else:\n jobid = None\n # __|\n\n # | - Writing Files\n with open(\".SUBMITTED\", \"w\") as fle:\n fle.write(\"\\n\")\n\n with open(\".bash_comm\", \"w\") as fle:\n fle.write(str(bash_command) + str(\"\\n\"))\n\n with open(\".jobid\", \"w\") as fle:\n fle.write(str(jobid) + str(\"\\n\"))\n\n with open(\".sub_out\", \"w\") as fle:\n fle.write(out)\n # __|\n\n os.chdir(self.root_dir)\n\n return(out, jobid)\n # __|", "title": "" }, { "docid": "2d31dee2fdd9e00ebbf36ed3e16d1126", "score": "0.6650706", "text": "def submit(self, job_name: str):\n self._gateway_client.execute(job_name)\n # TODO return a JobSubmissionResult future", "title": "" }, { "docid": "5a038e47729df1ff969c0cf6173964e2", "score": "0.65861076", "text": "def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, \n log=None, required_output=[], nb_submit=0):\n raise NotImplemented, 'No implementation of how to submit a job to cluster \\'%s\\'' % self.name", "title": "" }, { "docid": "110fafb7d18bb57c495cab0eacfc01a4", "score": "0.64094824", "text": "def submit_job(job_name, block_name, command, host_mask=\"\"):\n import af\n from stalker_pyramid import cgru_working_directory\n\n block = af.Block(block_name, \"maya\")\n block.setCommand(\" \".join(command))\n block.setNumeric(1, 1, 1, 1)\n block.setWorkingDirectory(cgru_working_directory)\n\n job = af.Job(job_name)\n job.blocks = [block]\n if host_mask != \"\":\n host_mask = host_mask.replace('\"', \"\")\n job.setHostsMask(host_mask)\n status, data = job.send()\n\n if not status:\n RuntimeError(\"Something went wrong!\")", "title": "" }, { "docid": "3a78add56c1bdc7fbbae012e22af6c69", "score": "0.634293", "text": "def submit_job():\n \n owner = request.environ[\"HTTP_REMOTE_USER\"]\n is_admin = owner in (\"jarcher\", \"lafferty\", \"qinqing\", \"nseltzer\")\n from_user = request.args.get(\"repo\", \"\")[:-4]\n \n git_sha1 = request.args.get(\"sha1\", \"\")\n queue_name = request.args.get(\"queue_name\", \"stable\")\n file_name = request.args.get(\"file_name\", \"main.ipynb\")\n \n # Upload this commit to S3.\n prepare_submission(from_user, git_sha1)\n \n # Submit this SHA-1 to the backing cluster.\n submit_a_job(owner, from_user, git_sha1, queue_name, is_admin, file_name)\n return redirect(request.referrer or \"/\")", "title": "" }, { "docid": "0b71ddb36071cd01fb8004dc8fa8fd6b", "score": "0.6342338", "text": "def submit_job(self, context: Context):\n self.log.info(\n \"Running AWS Batch job - job definition: %s - on queue %s\",\n self.job_definition,\n self.job_queue,\n )\n\n if self.container_overrides:\n self.log.info(\"AWS Batch job - container overrides: %s\", self.container_overrides)\n if self.array_properties:\n self.log.info(\"AWS Batch job - array properties: %s\", self.array_properties)\n if self.node_overrides:\n self.log.info(\"AWS Batch job - node properties: %s\", self.node_overrides)\n\n args = {\n \"jobName\": self.job_name,\n \"jobQueue\": self.job_queue,\n \"jobDefinition\": self.job_definition,\n \"arrayProperties\": self.array_properties,\n \"parameters\": self.parameters,\n \"tags\": self.tags,\n \"containerOverrides\": self.container_overrides,\n \"nodeOverrides\": self.node_overrides,\n \"shareIdentifier\": self.share_identifier,\n \"schedulingPriorityOverride\": self.scheduling_priority_override,\n }\n\n try:\n response = self.hook.client.submit_job(**trim_none_values(args))\n except Exception as e:\n self.log.error(\n \"AWS Batch job failed submission - job definition: %s - on queue %s\",\n self.job_definition,\n self.job_queue,\n )\n raise AirflowException(e)\n\n self.job_id = response[\"jobId\"]\n self.log.info(\"AWS Batch job (%s) started: %s\", self.job_id, response)\n BatchJobDetailsLink.persist(\n context=context,\n operator=self,\n region_name=self.hook.conn_region_name,\n aws_partition=self.hook.conn_partition,\n job_id=self.job_id,\n )", "title": "" }, { "docid": "097e863b5cdc080d29a600197e70c691", "score": "0.6340748", "text": "def submit_job_clust(self, **kwargs):\n # | - submit_job\n time.sleep(1.5)\n\n # | - Merging Submission Parameters\n params = merge_two_dicts(self.default_sub_params, kwargs)\n\n path = params[\"path_i\"]\n # __|\n\n self.__make_run_vasp_script__(params)\n\n\n # | - Submit Job\n os.chdir(path)\n\n if params[\"job_name\"] == \"Default\":\n params[\"job_name\"] = os.getcwd()\n\n print(\"submitting job\")\n os.system(\"chmod 777 *\")\n # bash_command = \"/u/if/flores12/bin/qv model.py\"\n # __| **** TEMP\n\n\n # | - Bash Submisssion Command\n bash_command = \"/usr/bin/sbatch \"\n\n bash_command += \"-p \" + str(params[\"queue\"]) + \" \"\n bash_command += \"--nodes \" + str(params[\"nodes\"]) + \" \"\n bash_command += \"--ntasks-per-node \" + str(params[\"cpus\"]) + \" \"\n bash_command += \"--mem-per-cpu \" + str(params[\"memory\"]) + \" \"\n bash_command += \"--time \" + str(params[\"wall_time\"]) + \" \"\n bash_command += \"--job-name \" + str(params[\"job_name\"]) + \" \"\n bash_command += \"--qos \" + str(params[\"priority\"]) + \" \"\n bash_command += \"--mail-user \" + str(params[\"email\"]) + \" \"\n bash_command += \"--mail-type \" + str(params[\"email_mess\"]) + \" \"\n bash_command += \"--output \" + str(params[\"out_file\"]) + \" \"\n bash_command += \"--error \" + str(params[\"err_file\"]) + \" \"\n bash_command += \"-C CPU_GEN:HSW \" # COMBAK Formalize this cpu architecture filter\n\n bash_command += params[\"job_script\"]\n\n print(\"Bash Submission Command:\")\n print(bash_command)\n # __|\n\n try:\n\n if sys.version_info[0] < 3:\n # raise Exception(\"Must be using Python 3\")\n output = subprocess.Popen(\n bash_command,\n stdout=subprocess.PIPE,\n shell=True,\n # encoding=\"utf8\",\n )\n\n else:\n output = subprocess.Popen(\n bash_command,\n stdout=subprocess.PIPE,\n shell=True,\n encoding=\"utf8\",\n )\n\n sub_time = datetime.datetime.now().isoformat()\n\n # except subprocess.CalledProcessError, e:\n except subprocess.CalledProcessError as e:\n print(\"Ping stdout output:\\n\", e.output)\n\n os.chdir(self.root_dir)\n print(\"JOB SKIPPED: \")\n return(None)\n\n # | - Parsing Output\n out = output.communicate()[0]\n out_copy = copy.deepcopy(out)\n\n ind = out.find(\"job\")\n\n out = out[ind + 3:]\n\n jobid = re.sub(\"[^0-9]\", \"\", out)\n\n try:\n jobid = int(jobid)\n\n except:\n print(\"Couldn't parse for jobid | !@!!\")\n jobid = None\n pass\n\n if type(jobid) == int:\n jobid = jobid\n else:\n jobid = None\n # __|\n\n # | - Writing Files\n with open(\".SUBMITTED\", \"w\") as fle:\n fle.write(\"\\n\")\n\n with open(\".bash_comm\", \"w\") as fle:\n fle.write(str(bash_command) + str(\"\\n\"))\n\n with open(\".jobid\", \"w\") as fle:\n fle.write(str(jobid) + str(\"\\n\"))\n\n with open(\".sub_out\", \"w\") as fle:\n fle.write(out_copy)\n\n # | - Writing Job Submission Parameters\n with open(\".submission_params_2.json\", \"w\") as fle:\n json.dump(params, fle, indent=2, skipkeys=True)\n # __|\n\n # __|\n\n os.chdir(self.root_dir)\n\n return(out, jobid)\n\n # __|", "title": "" }, { "docid": "0d1dfa590ca3b7ff7a26ea8bc9d6c8d4", "score": "0.6314378", "text": "def submit(self):\n retval, msg = self._submitShell.execute( \\\n \"condor_submit -remote \" + self.server, self._generateSubmitString())\n if retval != 0:\n print(\"ERROR #\" + str(retval) + \":\", file=sys.stderr)\n print(\"WARNING: Since 'condor_submit' returned an error, your \" \\\n + \"job was probably not submitted. If your job submitted \" \\\n + \"after all, this object will still not be able to \" \\\n + \"monitor its status.\", file=sys.stderr)\n return None\n print(msg)\n clusterRE = re.search(\"(cluster )(\\d+)\", msg)\n if clusterRE is None: raise BadFormatError(\"condor_submit\")\n clusterStr = clusterRE.group(2)\n if not clusterStr.isdigit(): raise BadFormatError(\"condor_submit\")\n self.cluster = int(clusterStr)\n return self.cluster", "title": "" }, { "docid": "86595ccf5d3279681f371df8256e7046", "score": "0.6305849", "text": "def submit_jobs(self, cluster, force_local=False):\n if self._is_new:\n logger.info(\"Submit %s jobs for execution.\", self._config.get_num_jobs())\n logger.info(\"JADE version %s\", jade.version.__version__)\n registry = Registry()\n loggers = registry.list_loggers()\n logger.info(\"Registered modules for logging: %s\", \", \".join(loggers))\n self._save_repository_info(registry)\n\n ResultsAggregator.create(self._output)\n\n # If an events summary file exists, it is invalid.\n events_file = os.path.join(self._output, EVENTS_FILENAME)\n if os.path.exists(events_file):\n os.remove(events_file)\n\n event = StructuredLogEvent(\n source=\"submitter\",\n category=EVENT_CATEGORY_RESOURCE_UTIL,\n name=EVENT_NAME_SUBMIT_COMPLETED,\n message=\"job submission started\",\n num_jobs=self.get_num_jobs(),\n )\n log_event(event)\n else:\n self._handle_submission_groups()\n\n result = Status.IN_PROGRESS\n group = self._config.get_default_submission_group()\n groups = make_submission_group_lookup(cluster.config.submission_groups)\n self._hpc = HpcManager(groups, self._output)\n\n if self._hpc.hpc_type == HpcType.LOCAL or force_local:\n runner = JobRunner(self._config_file, output=self._output)\n num_processes = group.submitter_params.num_processes\n verbose = group.submitter_params.verbose\n result = runner.run_jobs(verbose=verbose, num_processes=num_processes)\n agg = ResultsAggregator.load(self._output)\n agg.process_results()\n is_complete = True\n else:\n is_complete = self._submit_to_hpc(cluster)\n\n if is_complete:\n result = self._handle_completion(cluster)\n\n return result", "title": "" }, { "docid": "a7059f430d44159130263f93c49922a0", "score": "0.6192596", "text": "def submit_job_clust(self, **kwargs):\n # | - submit_job\n time.sleep(1.5)\n\n # | - Merging Submission Parameters\n params = merge_two_dicts(self.default_sub_params, kwargs)\n\n path = params[\"path_i\"]\n\n\n # Fixing debug flag specification\n if params[\"priority\"] == \"debug\":\n params[\"queue\"] = \"debug\"\n\n if params[\"queue\"] == \"debug\":\n params[\"priority\"] = \"debug\"\n # __|\n\n # | - Submit Job\n os.chdir(path)\n\n if params[\"job_name\"] == \"Default\":\n params[\"job_name\"] = os.getcwd()\n\n print(\"submitting job\")\n os.system(\"chmod 777 *\")\n # bash_command = \"/u/if/flores12/bin/qv model.py\"\n # __| **** TEMP\n\n # | - Create vasp_run script\n os.system(\"cd $SLURM_SUBMIT_DIR\")\n os.system(\"export TMPDIR=$SLURM_SUBMIT_DIR\")\n os.system(\"export VASP_SCRIPT=./run_vasp.py\")\n\n os.system(\"echo import os > run_vasp.py\")\n\n exitcode_line = \"exitcode = os.system('srun -n \" + \\\n str(int(self.cores_per_node * int(params[\"nodes\"]))) + \\\n \" /project/projectdirs/m2997/special_edison')\"\n\n line_2 = 'echo ' + '\"' + exitcode_line + '\" >> run_vasp.py'\n os.system(line_2) # on edison\n # __|\n\n # | - Bash Submisssion Command\n bash_command = \"/usr/bin/sbatch \"\n\n # The -q flag is being used in place of the -p flag\n # Only the -q needs to be defined\n bash_command += \"-q \" + str(params[\"queue\"]) + \" \"\n # bash_command += \"-p \" + str(params[\"queue\"]) + \" \"\n\n bash_command += \"--nodes \" + str(params[\"nodes\"]) + \" \"\n bash_command += \"--time \" + str(params[\"wall_time\"]) + \" \"\n\n # bash_command += \"--qos \" + str(params[\"priority\"]) + \" \" # Didn't work\n\n bash_command += \"--output \" + str(params[\"out_file\"]) + \" \"\n bash_command += \"--error \" + str(params[\"err_file\"]) + \" \"\n bash_command += \"-C haswell \"\n\n bash_command += params[\"job_script\"]\n\n print(\"Bash Submission Command:\")\n print(bash_command)\n # __|\n\n try:\n output = subprocess.Popen(\n bash_command,\n stdout=subprocess.PIPE,\n shell=True,\n )\n sub_time = datetime.datetime.now().isoformat()\n # except subprocess.CalledProcessError, e:\n except subprocess.CalledProcessError as e:\n print(\"Ping stdout output:\\n\", e.output)\n\n os.chdir(self.root_dir)\n print(\"JOB SKIPPED: \")\n return(None)\n\n # | - Parsing Output\n # out, err = pickle.load(open(\"job_sub_output.pickle\", \"r\"))\n\n try:\n # job_id = int(out_list[-1])\n out, err = output.communicate()\n out_copy = copy.deepcopy(out)\n out = out.strip()\n out_list = out.split(\" \")\n job_id = int(out_list[-1])\n\n except:\n print(\"Couldn't parse for jobid\")\n job_id = None\n\n # out = output.communicate()[0]\n # out_copy = copy.deepcopy(out)\n #\n # ind = out.find(\"job\")\n # out = out[ind + 3:]\n #\n # jobid = re.sub(\"[^0-9]\", \"\", out)\n #\n # try:\n # jobid = int(jobid)\n #\n # except:\n # print(\"Couldn't parse for jobid | !@!!\")\n # jobid = None\n # pass\n #\n # if type(jobid) == int:\n # jobid = jobid\n # else:\n # jobid = None\n # __|\n\n # | - Writing Files\n with open(\".SUBMITTED\", \"w\") as fle:\n fle.write(\"\\n\")\n\n with open(\".bash_comm\", \"w\") as fle:\n fle.write(str(bash_command) + str(\"\\n\"))\n\n with open(\".jobid\", \"w\") as fle:\n fle.write(str(job_id) + str(\"\\n\"))\n\n if sys.version_info >= (3, 0):\n with open(\".sub_out\", \"wb\") as fle:\n fle.write(out_copy)\n\n else:\n with open(\".sub_out\", \"w\") as fle:\n fle.write(out_copy)\n # __|\n\n os.chdir(self.root_dir)\n\n # | - Save subprocess output for analysis\n # import pickle\n #\n # pickle.dump(\n # output.communicate(),\n # open(\"job_sub_output.pickle\", \"wb\"),\n # )\n # return(output)\n # __|\n\n # return(out, jobid)\n # __|", "title": "" }, { "docid": "5369fb9f58140c533c155833b0d72462", "score": "0.6140132", "text": "def SubmitJob(self, job_jar, class_name,\n job_script=None,\n job_poll_interval=None,\n job_stdout_file=None, job_arguments=None,\n job_type=SPARK_JOB_TYPE):\n pass", "title": "" }, { "docid": "92de16ec238813a2c33caa0b2dff90a4", "score": "0.6101407", "text": "def submit_job_clust(self, **kwargs):\n # | - submit_job_clust\n\n # | - Job Parameters\n params = merge_two_dicts(self.default_sub_params, kwargs)\n\n path = params[\"path_i\"]\n copy_PythonModules = params[\"copy_PythonModules\"]\n copy_PythonPackages = params[\"copy_PythonPackages\"]\n cpus = params[\"cpus\"]\n queue = params[\"queue\"]\n # __|\n\n root_dir = os.getcwd()\n if path is None:\n path = root_dir\n\n # | - Checking if job has already been submitted\n os.chdir(path)\n if os.path.isfile(\".SUBMITTED\"):\n print(\"Directory already submitted, will be skipped\")\n os.chdir(root_dir)\n return(None) # <-------- SKIP JOB --------------------------------\n else:\n os.chdir(root_dir)\n # __|\n\n self.__copy_pyth_mods_packs_to_job_dir__(\n path,\n copy_mods=copy_PythonModules,\n copy_packs=copy_PythonPackages,\n )\n\n # | - Submit Job\n # Args: path, root_dir, queue, cpus\n\n os.chdir(path)\n\n if os.path.isfile(\".SUBMITTED\"):\n print(\"Directory already submitted, will be skipped\")\n os.chdir(root_dir)\n return(None)\n else:\n print(\"submitting job\")\n aws_dir = self.aws_dir\n\n if cpus == \"default\":\n bash_command = aws_dir + \"/bin/trisub -q \" + queue\n else:\n\n # | - Checking that number of cpus is within allows\n if queue == \"medium\":\n if cpus > 4:\n print(\"Medium queue can't have more than 4 cpus\")\n print(\" setting cpus to 4\")\n cpus = 4\n # __|\n\n bash_command = aws_dir + \"/bin/trisub -c \" + str(cpus) + \\\n \" -q \" + queue\n\n # try:\n output = subprocess.check_output(\n bash_command,\n shell=True,\n universal_newlines=True, # CHANGED\n )\n\n sub_time = datetime.datetime.now().isoformat()\n\n # # except subprocess.CalledProcessError, e:\n # except subprocess.CalledProcessError as e:\n # print(\"Ping stdout output:\\n\", e.output)\n #\n # os.chdir(root_dir)\n # print(\"JOB SKIPPED: \")\n # return(None)\n # __|\n\n os.system(\"chmod 777 \" + path + \"/*\")\n os.system(\"chmod 777 \" + path)\n\n # | - Parsing Submission for Job ID\n output = output.splitlines()\n print(output) # CHANGED\n\n for line in output:\n if \"jobId\" in line:\n lst = line.split('\"')\n job_id_ind = (lst.index(\"jobId\") + 2)\n jobId = lst[job_id_ind]\n\n file = open(\".jobid\", \"w\")\n file.write(jobId + \"\\n\")\n # __|\n\n file = open(\".SUBMITTED\", \"w\")\n file.close()\n\n os.chdir(root_dir)\n\n # | - Querying AWS For Job Info\n job_queue_dict = self.job_info_batch(jobId)\n job_queue_dict[\"submit_time\"] = sub_time\n\n jobs_file_path = self.job_queue_dir + \"/jobs.csv\"\n\n df_new = pd.DataFrame([job_queue_dict])\n if os.path.isfile(jobs_file_path):\n df = pd.read_csv(jobs_file_path)\n df = df.append(df_new)\n else:\n df = df_new\n\n df.to_csv(jobs_file_path, index=False)\n # __|\n\n return job_queue_dict\n # __|", "title": "" }, { "docid": "1fec48f1b5cbef3cff6fa837e264f544", "score": "0.6067582", "text": "def submit(args, conf):\n workflow_dest = os.path.join(conf.get('cwl','cwl_workflows'), args.uid + os.path.splitext(args.workflow)[1])\n job_dest = os.path.join(conf.get('cwl','cwl_jobs'), constants.JOBS_NEW, args.uid + os.path.splitext(args.job)[1])\n os.rename(args.workflow, workflow_dest)\n os.rename(args.job, job_dest)\n return {\"uid\": args.uid,\n \"worflow\": workflow_dest,\n \"job\": job_dest}", "title": "" }, { "docid": "9c26772c7c8c77dd92f50871f2621be7", "score": "0.60383767", "text": "def execute(self, job_name: str):\n # TODO support block to job finish\n # job_submit_result = self.submit(job_name)\n # job_submit_result.wait_finish()\n raise Exception(\"Unsupported\")", "title": "" }, { "docid": "d42d445765bb60107f6756991d7c5527", "score": "0.60077286", "text": "def runJob(self, finjob, name):\n self.log.notice(\"############################################################\")\n self.log.notice(\" Running or submitting job: %s \" % name)\n self.log.notice(\"\\n\\n\")\n res = finjob.submit(self.diracInstance, mode=self.clip.submitMode)\n if not res[\"OK\"]:\n self.log.error(\"Failed job:\", res['Message'])\n return S_ERROR()\n return S_OK()", "title": "" }, { "docid": "79a1e590be5aeffb369f292c1aad3142", "score": "0.59945273", "text": "def submit_job():\n if json_pattern.match(request.headers['Content-Type']):\n request.json[\"metadata_uri\"] = app.config[\"METADATA_URI\"]\n app.logger.debug('Submitting metadata job %s', request.json)\n try:\n job = get_hive().create_job(app.analysis, request.json)\n except ValueError as e:\n raise HTTPRequestError(str(e), 404)\n results = {\"job_id\": job.job_id}\n email = request.json.get('email')\n email_notification = request.json.get('email_notification')\n if email != None and email != '' and email_notification != None:\n app.logger.debug('Submitting email request for %s', email)\n email_results = email_when_complete.delay(request.url_root + \"jobs/\" + str(job.job_id) + \"?format=email\",\n email)\n results['email_task'] = email_results.id\n return jsonify(results), 201\n else:\n app.logger.error('Could not handle input of type %s', request.headers['Content-Type'])\n raise HTTPRequestError('Could not handle input of type %s' % request.headers['Content-Type'])", "title": "" }, { "docid": "e2a2418392bed55e0fcf653ea0bdd901", "score": "0.5976321", "text": "def submit(self):\n name=\"J\"+str(self._uuid)[:10]\n if(self.name is not None):\n name=self.name\n import helix\n j = helix.QSub(command=self.command)\n (so,se) = j.submit(jobname=name,nodes=self.nodes,params=self.params)\n return so.strip(),se", "title": "" }, { "docid": "e05a0fe6c9c5eccb982df88520de413c", "score": "0.5913772", "text": "def submit_job(cook_url, pool=None, headers=None, **kwargs):\n if headers is None:\n headers = {}\n if 'name' not in kwargs:\n kwargs['name'] = DEFAULT_JOB_NAME_PREFIX + get_caller()\n uuids, resp = submit_jobs(cook_url, job_specs=[kwargs], pool=pool, headers=headers)\n return uuids[0], resp", "title": "" }, { "docid": "35df0c9d4e1802020dac874195313127", "score": "0.58090854", "text": "def submit_job(package_uris):\n logging.info('Submitting job.')\n\n time_stamp = datetime.datetime.now().strftime('_%Y_%m_%d_%H_%M_%S')\n JOB_NAME = FLAGS.job_name + time_stamp\n\n USR_PROJECT_ID = FLAGS.project_id\n BUCKET = FLAGS.bucket.split('/')[-1]\n JOB_DIR = os.path.join('gs://', BUCKET, 'mnist_demo')\n REGION = FLAGS.region\n DATA_PATH = os.path.join('gs://', BUCKET, 'data/mnist.npz')\n PACKAGE_URIS = package_uris\n\n training_inputs = {'packageUris': PACKAGE_URIS,\n 'pythonModule': 'cloudssifier.train',\n 'args': ['--data_path', DATA_PATH],\n 'region': REGION,\n 'jobDir': JOB_DIR,\n 'scaleTier': 'CUSTOM',\n 'masterType': 'standard_p100',\n 'runtimeVersion': '1.12',\n 'pythonVersion': '3.5',\n }\n\n\n job_spec = {'jobId': JOB_NAME, 'trainingInput': training_inputs}\n\n # Store your full project ID in a variable in the format the API needs.\n project_id = 'projects/{}'.format(USR_PROJECT_ID)\n\n # Build a representation of the Cloud ML API.\n ml = discovery.build('ml', 'v1')\n\n # Create a request to call projects.models.create.\n request = ml.projects().jobs().create(body=job_spec, parent=project_id)\n\n # Make the call.\n try:\n response = request.execute()\n logging.info(response)\n print('\\nTo stream logs run: gcloud ai-platform jobs stream-logs %s\\n'\n % JOB_NAME)\n except errors.HttpError as err:\n # Something went wrong, print out some information.\n logging.error('There was an error creating the model. Check the details:')\n logging.error(err._get_reason())", "title": "" }, { "docid": "b2e9883e921d6d8898f2e564247d60b1", "score": "0.575423", "text": "def recv_submit_job(self, task, unique, data):\n handle = 'H:lap:1'\n job = GearmanJob(self, handle, task, unique, data)\n request = GearmanJobRequest(job)\n self.job_created(handle)\n \n self.dispatcher.dispatch(request, task)", "title": "" }, { "docid": "404e393d0da414716cb9ae5b855a9bc8", "score": "0.5735101", "text": "def submit(self, data):\n jobid, username, ticket, status, sgeid, creation, errstatus = data\n try:\n # Get service command\n jvars = self.load_ticket(ticket)\n command = jvars[\"command\"]\n\n # Load the service template\n with codecs.open(os.path.join(self.config[\"services\"], command), \"r\", \"utf-8\") as f:\n template = f.read()\n\n # Replace variable names contained in the template\n now_name = self.temp_name()\n job_name = \"{}.{}\".format(username, now_name)\n template = template.replace(\"##JOB_NAME##\", job_name)\n real_dir = os.path.dirname(self.location_translate(ticket))\n template = template.replace(\"##ERR_OUT##\", \":{}\".format(real_dir))\n template = template.replace(\"##STD_OUT##\", \":{}\".format(real_dir))\n template = template.replace(\"##INSTANCE_TICKET##\", self.location_translate(ticket))\n template = template.replace(\"##SPEECH_SERVICES##\", self.config[\"services\"])\n template = template.replace(\"##DOCKER_PATH##\", self.config[\"DIR_TRANSLATE\"][0])\n template = template.replace(\"##REAL_PATH##\", self.config[\"DIR_TRANSLATE\"][1])\n\n script_name = os.path.join(real_dir, \"{}.sh\".format(job_name))\n with codecs.open(script_name, \"w\", \"utf-8\") as f:\n f.write(template)\n\n # SGE - qsub job\n sgeid = self.sge.qsub(script_name)\n\n # Populate the task info JSON file\n jvars[\"sgeid\"] = sgeid\n jvars[\"resultfile\"] = \"{}.result\".format(script_name)\n jvars[\"scriptname\"] = script_name\n self.update_ticket(jvars, ticket)\n self.logger.debug(\"SUBMIT: {}, {}, {}\".format(jobid, sgeid, script_name))\n\n # Update job\n with self.db as db:\n db.lock()\n db.update(\"status\", \"Q\", jobid)\n db.update(\"sgeid\", sgeid, jobid)\n\n except Exception as e:\n # Something went wrong\n self.set_error(e, \"Submit Error\", jobid, ticket)", "title": "" }, { "docid": "be5b5c9e7020e0913f9f248a18fc99e7", "score": "0.57223016", "text": "def submit_to_scheduler(self,batchfilename,unique_id):\n submit_exit_value = self.__scheduler.submit_job(batchfilename)\n write_job_id_exit_value = self.__scheduler.write_jobid_to_status(unique_id)\n return submit_exit_value and write_job_id_exit_value", "title": "" }, { "docid": "552018c8e4978d4fa411773fe5ccff13", "score": "0.5720857", "text": "def submit(self) -> str:\n\n # The submission is not currently tested, since it requires a slurm install\n # Perhaps a docker with slurm pre-installed is a good idea in this case\n sbatch_command = self._format_sbatch_command()\n sbatch_process = subprocess.Popen(\n sbatch_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n stdout, stderr = sbatch_process.communicate()\n\n if sbatch_process.returncode != 0:\n err_msg = \"Sbatch job submission failed with follow error:\\n{}\"\n raise RuntimeError(err_msg.format(stderr))\n job_number = None\n else:\n job_number = stdout.decode(\"utf-8\").strip().split(\":\")[0]\n return job_number", "title": "" }, { "docid": "2686eac44b819b5495037f5895f7112d", "score": "0.57187074", "text": "def send_job(self) -> Job:\n pass", "title": "" }, { "docid": "2686eac44b819b5495037f5895f7112d", "score": "0.57187074", "text": "def send_job(self) -> Job:\n pass", "title": "" }, { "docid": "379000fbd72b17fdec365c8f9337ee9b", "score": "0.57119715", "text": "def submit_job(job_path, jobname):\n # Submit a job\n proc = subprocess.Popen('qsub -N {} {}'.format(jobname, job_path),\n shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n # Get job ID\n stdout = [line.decode().rstrip() for line in proc.stdout]\n job_id = int(stdout[0].split('.')[0])\n proc.wait()\n\n # Wait till job finish\n finish = False\n while not finish:\n proc = subprocess.Popen('qstat -u {un}'.format(un=username),\n shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout = [line.decode().rstrip() for line in proc.stdout]\n running_jobs_ids = list()\n for i, status_line in enumerate(stdout):\n if i > 4:\n job_id_ = int(status_line.strip().split('.')[0])\n running_jobs_ids.append(job_id_)\n if job_id not in running_jobs_ids:\n finish = True\n time.sleep(30)\n # Clear job files\n subprocess.Popen(['rm {jobname}.o{ID} {jobname}.e{ID}'.format(jobname=jobname, ID=job_id)], shell=True)", "title": "" }, { "docid": "0263f8f5c213fad275f0991d29e53101", "score": "0.56978524", "text": "def test_submit():\n project_name = 'Example'\n src_name = 'example'\n example_project_path = Path(__file__).parent.parent / project_name\n worker = config.Remote.online_worker_names[0]\n runs_path = example_project_path / config.Constants.runs\n\n os.chdir(str(example_project_path))\n uploader = Uploader(example_project_path, src_name)\n \n job = Job(params.param2default)\n job.update_param_name(runs_path, num_new=0)\n job.update_job_name(0)\n job.param2val['project_path'] = config.WorkerDirs.research_data / project_name\n \n uploader.to_disk(job, worker)\n\n return job.is_ready()", "title": "" }, { "docid": "314c0fab2a6f54da01bf57a6c728b6bc", "score": "0.56928426", "text": "def SubmitJob(self, jar_file, class_name, job_poll_interval=None, # pytype: disable=signature-mismatch # overriding-default-value-checks\n job_stdout_file=None, job_arguments=None,\n job_type=SPARK_JOB_TYPE):\n if job_type == SPARK_JOB_TYPE:\n raise NotImplementedError()\n\n cmd_list = [posixpath.join(hadoop.HADOOP_BIN, 'hadoop'),\n 'jar', jar_file]\n if class_name:\n cmd_list.append(class_name)\n if job_arguments:\n cmd_list += job_arguments\n cmd_string = ' '.join(cmd_list)\n start_time = datetime.datetime.now()\n stdout, _ = self.leader.RemoteCommand(cmd_string)\n end_time = datetime.datetime.now()\n if job_stdout_file:\n with open(job_stdout_file, 'w') as f:\n f.write(stdout)\n return {SUCCESS: True,\n RUNTIME: (end_time - start_time).total_seconds()}", "title": "" }, { "docid": "5345a4f8815906681cb0b3f14c22d432", "score": "0.56730855", "text": "def main_submit(args):\n if args.verbosity>=1:\n print \">>> main_submit\", args\n \n verbosity = args.verbosity\n resubmit = args.subcommand=='resubmit'\n force = args.force\n dryrun = args.dryrun # prepare job and submit command, but do not submit\n testrun = args.testrun # only run a few test jobs\n queue = args.queue # queue option for the batch system (job flavor for HTCondor)\n time = args.time # maximum time for the batch system\n batchopts = args.batchopts # extra options for the batch system\n batch = getbatch(CONFIG,verb=verbosity+1)\n \n for jobcfg in preparejobs(args):\n jobid = None\n cfgname = jobcfg['cfgname']\n jobdir = jobcfg['jobdir']\n logdir = jobcfg['logdir']\n outdir = jobcfg['outdir']\n joblist = jobcfg['joblist'] # text file with list of tasks to be executed per job\n jobname = jobcfg['jobname']\n nchunks = jobcfg['nchunks']\n jkwargs = { # key-word arguments for batch.submit\n 'name': jobname, 'opt': batchopts, 'dry': dryrun,\n 'short': (testrun>0), 'queue':queue, 'time':time\n }\n if nchunks<=0:\n print \">>> Nothing to %ssubmit!\"%('re' if resubmit else '')\n continue\n if batch.system=='HTCondor':\n # use specific settings for KIT condor\n if 'etp' in GLOB._host:\n script = \"python/batch/submit_HTCondor_KIT.sub\"\n else:\n script = \"python/batch/submit_HTCondor.sub\"\n appcmds = [\"initialdir=%s\"%(jobdir),\n \"mylogfile='log/%s.$(ClusterId).$(ProcId).log'\"%(jobname)]\n jkwargs.update({ 'app': appcmds })\n elif batch.system=='SLURM':\n script = \"python/batch/submit_SLURM.sh\"\n logfile = os.path.join(logdir,\"%x.%A.%a.log\") # $JOBNAME.o$JOBID.$TASKID.log\n jkwargs.update({ 'log': logfile, 'array': nchunks })\n #elif batch.system=='SGE':\n #elif batch.system=='CRAB':\n else:\n LOG.throw(NotImplementedError,\"Submission for batch system '%s' has not been implemented (yet)...\"%(batch.system))\n \n # SUBMIT\n if args.prompt: # ask user confirmation before submitting\n while True:\n submit = raw_input(\">>> Do you want to submit %d jobs to the batch system? [y/n] \"%(nchunks))\n if any(s in submit.lower() for s in ['q','exit']): # quit this script\n print \">>> Quitting...\"\n exit(0)\n elif any(s in submit.lower() for s in ['f','all']):\n print \">>> Force submission...\"\n submit = 'y'\n args.prompt = False # stop asking for next samples\n if 'y' in submit.lower(): # submit this job\n jobid = batch.submit(script,joblist,**jkwargs)\n break\n elif 'n' in submit.lower(): # do not submit this job\n print \">>> Not submitting.\"\n break\n else:\n print \">>> '%s' is not a valid answer, please choose y/n.\"%submit\n else:\n jobid = batch.submit(script,joblist,**jkwargs)\n \n # WRITE JOBCONFIG\n if jobid!=None:\n jobcfg['jobids'].append(jobid)\n if verbosity>=1:\n print \">>> Creating config file '%s'...\"%(cfgname)\n with open(cfgname,'w') as file:\n json.dump(jobcfg,file,indent=2)", "title": "" }, { "docid": "1999990f3e23260083a967d5cd9a493a", "score": "0.5662804", "text": "def submit(self):\n self.keep_data = False\n ManagedJob.submit(self)", "title": "" }, { "docid": "b6adbd2c73e4f562bab790a6d444bf15", "score": "0.56421906", "text": "def submit_job(self, pilot_url, jd):\n if self.job_url==None:\n self.job_url=self.get_job_url(pilot_url) \n \n if self.pilot_url==None:\n self.pilot_url = pilot_url\n self.bj=pilot_url_dict[pilot_url] \n self.bj.add_subjob(jd, self.job_url, self.uuid)", "title": "" }, { "docid": "805fe822421e8e35935d9c74741d6947", "score": "0.564069", "text": "def submit_job(self, qry = \"\", db = \"MyDB\", taskname = \"pyjob\", estimate = 1):\n\n try:\n jobid = self.SubmitJob(wsid = self._wsid,\n pw = self._pw,\n qry = qry,\n context = db,\n taskname = taskname,\n estimate = estimate)\n except Exception:\n traceback.print_exc()\n raise Exception(\"CASJobs SOAP Error\")\n \n return jobid['SubmitJobResult']", "title": "" }, { "docid": "3c12dbb5c65f744e7e1c2c60dd68e070", "score": "0.56278163", "text": "def create_job(self):\n import re, json\n from dbacademy.dbrest import DBAcademyRestClient\n client = DBAcademyRestClient()\n\n config = self.get_job_config()\n print(f\"Creating the job {config.job_name}\")\n\n # Delete the existing pipeline if it exists\n client.jobs().delete_by_name(config.job_name, success_only=False)\n\n course_name = re.sub(\"[^a-zA-Z0-9]\", \"-\", DA.course_name)\n while \"--\" in course_name: course_name = course_name.replace(\"--\", \"-\")\n \n params = {\n \"name\": f\"{config.job_name}\",\n \"tags\": {\n \"dbacademy.course\": course_name,\n \"dbacademy.source\": course_name\n },\n \"email_notifications\": {},\n \"timeout_seconds\": 7200,\n \"max_concurrent_runs\": 1,\n \"format\": \"MULTI_TASK\",\n \"tasks\": [],\n \"job_clusters\": [{\n \"job_cluster_key\": \"shared_cluster\",\n \"new_cluster\": {\n \"num_workers\": 0,\n \"spark_version\": f\"{client.clusters().get_current_spark_version()}\",\n \"spark_conf\": { \"spark.master\": \"local[*]\" },\n },\n }]\n }\n \n for task in config.tasks:\n task_def = {\n \"task_key\": task.name,\n }\n params.get(\"tasks\").append(task_def)\n if task.cluster is not None: task_def[\"job_cluster_key\"] = task.cluster\n \n if task.pipeline_id is not None: \n task_def[\"pipeline_task\"] = {\"pipeline_id\": task.pipeline_id}\n else: \n task_def[\"notebook_task\"] = {\n \"notebook_path\": task.resource,\n \"base_parameters\": task.params\n }\n \n if len(task.depends_on) > 0:\n task_def[\"depends_on\"] = list()\n for key in task.depends_on: task_def[\"depends_on\"].append({\"task_key\":key})\n \n instance_pool_id = client.clusters().get_current_instance_pool_id()\n cluster = params.get(\"job_clusters\")[0].get(\"new_cluster\")\n if instance_pool_id:\n cluster[\"instance_pool_id\"] = instance_pool_id\n else:\n node_type_id = client.clusters().get_current_node_type_id()\n cluster[\"node_type_id\"] = node_type_id\n \n # print(json.dumps(params, indent=4))\n \n json_response = client.jobs().create(params)\n self.job_id = json_response[\"job_id\"]\n print(f\"Created job {self.job_id}\")", "title": "" }, { "docid": "dca6f3e1fdf6846de7f198726ee06133", "score": "0.562475", "text": "def SubmitJob(\n self,\n jarfile=None,\n classname=None,\n pyspark_file=None,\n query_file=None,\n job_poll_interval=None,\n job_stdout_file=None,\n job_arguments=None,\n job_files=None,\n job_jars=None,\n job_py_files=None,\n job_type=None,\n properties=None,\n ):\n job_manager_name = self._GetJobManagerName()\n job_properties = self.GetJobProperties()\n start_time = datetime.datetime.now()\n self.k8s_cluster.ApplyManifest(\n 'container/flink/flink-job-and-deployment.yaml.j2',\n job_manager_name=job_manager_name,\n job_manager_service=self.FLINK_JOB_MANAGER_SERVICE,\n classname=classname,\n job_arguments=','.join(job_arguments),\n image=self.image,\n task_manager_replicas=self.k8s_cluster.num_nodes - 1,\n task_manager_rpc_port=job_properties.get('taskmanager.rpc.port'),\n job_manager_rpc_port=job_properties.get('jobmanager.rpc.port'),\n blob_server_port=job_properties.get('blob.server.port'),\n queryable_state_proxy_ports=job_properties.get(\n 'queryable-state.proxy.ports'\n ),\n )\n stdout, _, _ = container_service.RunKubectlCommand(\n ['get', 'pod', f'--selector=job-name={job_manager_name}', '-o', 'yaml']\n )\n pods = yaml.safe_load(stdout)['items']\n if len(pods) <= 0:\n raise JobSubmissionError('No pod was created for the job.')\n container = container_service.KubernetesPod(pods[0]['metadata']['name'])\n self.flink_jobmanagers.append(container)\n try:\n container.WaitForExit()\n except container_service.ContainerException as e:\n raise JobSubmissionError() from e\n end_time = datetime.datetime.now()\n\n if job_stdout_file:\n with open(job_stdout_file, 'w') as f:\n f.write(container.GetLogs())\n\n return JobResult(run_time=(end_time - start_time).total_seconds())", "title": "" }, { "docid": "475e149174e8c995bf8ff69f171308c9", "score": "0.5599666", "text": "def submit(self):\n for job in self.jobs:\n print \"submitting job %s\" % str(job)\n self._recursiveSubmit(job)\n \n print self.submitLog", "title": "" }, { "docid": "5d5485b4bc1e99246be91de4b935e1a3", "score": "0.55915964", "text": "def submit_job(self, glidin_url, jd):\n print \"submit job: \" + str(glidin_url)\n if self.job_url==None:\n self.job_url=self.get_job_url(glidin_url)\n\n for i in range(0,3):\n try:\n print \"create job entry \"\n self.job_dir = saga.advert.directory(saga.url(self.job_url), \n saga.advert.Create | saga.advert.CreateParents | saga.advert.ReadWrite)\n print \"initialized advert directory for job: \" + self.job_url\n # put job description attributes to advert\n attributes = jd.list_attributes() \n for i in attributes: \n if jd.attribute_is_vector(i):\n self.job_dir.set_vector_attribute(i, jd.get_vector_attribute(i))\n else:\n logging.debug(\"Add attribute: \" + str(i) + \" Value: \" + jd.get_attribute(i))\n self.job_dir.set_attribute(i, jd.get_attribute(i))\n\n self.job_dir.set_attribute(\"state\", str(saga.job.Unknown))\n # return self object for get_state() query \n #logging.debug(\"Submission time (time to create advert entries): \" + str(time.time()-start) + \" s\")\n return self \n except:\n traceback.print_exc(file=sys.stdout)\n #time.sleep(2)\n #raise Exception(\"Unable to submit job\") ", "title": "" }, { "docid": "9c4b984a94e66ffc2f020c43b435c514", "score": "0.5570158", "text": "def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,\n required_output=[], nb_submit=0):\n \n text = \"\"\"Executable = %(prog)s\n output = %(stdout)s\n error = %(stderr)s\n log = %(log)s\n %(argument)s\n environment = CONDOR_ID=$(Cluster).$(Process)\n Universe = vanilla\n notification = Error\n Initialdir = %(cwd)s\n %(requirement)s\n getenv=True\n queue 1\n \"\"\"\n \n if self.cluster_queue not in ['None', None]:\n requirement = 'Requirements = %s=?=True' % self.cluster_queue\n else:\n requirement = ''\n\n if cwd is None:\n cwd = os.getcwd()\n if stdout is None:\n stdout = '/dev/null'\n if stderr is None:\n stderr = '/dev/null'\n if log is None:\n log = '/dev/null'\n if not os.path.exists(prog):\n prog = os.path.join(cwd, prog)\n if argument:\n argument = 'Arguments = %s' % ' '.join(argument)\n else:\n argument = ''\n \n\n dico = {'prog': prog, 'cwd': cwd, 'stdout': stdout, \n 'stderr': stderr,'log': log,'argument': argument,\n 'requirement': requirement}\n\n open('submit_condor','w').write(text % dico)\n a = misc.Popen(['condor_submit','submit_condor'], stdout=subprocess.PIPE)\n output = a.stdout.read()\n #Submitting job(s).\n #Logging submit event(s).\n #1 job(s) submitted to cluster 2253622.\n pat = re.compile(\"submitted to cluster (\\d*)\",re.MULTILINE)\n try:\n id = pat.search(output).groups()[0]\n except:\n raise ClusterManagmentError, 'fail to submit to the cluster: \\n%s' \\\n % output \n self.submitted += 1\n self.submitted_ids.append(id)\n return id", "title": "" }, { "docid": "2538d707aeac27bc047cbea7668fce2f", "score": "0.5565302", "text": "def submit(self, spec, fields):\n # generate uid for given spec/fields\n rep = json.dumps(dict(spec=spec, fields=fields))\n wmaid = wmaHash(rep)\n # submit spark job\n self.taskmgr.spawn(self.submit_spark, wmaid, spec, fields)\n # self.submit_spark(wmaid, spec, fields)\n # return wmaids of submitted job\n results = [wmaid]\n return results", "title": "" }, { "docid": "b41ea9e0529d3846ffb9b62418849ea4", "score": "0.5550214", "text": "def control_one_job(self, pid):\n raise NotImplemented, 'No implementation of how to control the job status to cluster \\'%s\\'' % self.name", "title": "" }, { "docid": "3995e8d087afb9041db890e5718c2b79", "score": "0.55284196", "text": "def SubmitJob(self,\n jarfile=None,\n classname=None,\n pyspark_file=None,\n query_file=None,\n job_poll_interval=None,\n job_stdout_file=None,\n job_arguments=None,\n job_files=None,\n job_jars=None,\n job_py_files=None,\n job_type=None,\n properties=None):\n if job_type != self.HADOOP_JOB_TYPE:\n raise NotImplementedError\n cmd_list = [hadoop.HADOOP_CMD]\n # Order is important\n if jarfile:\n cmd_list += ['jar', jarfile]\n # Specifying classname only works if jarfile is omitted or if it has no\n # main class.\n if classname:\n cmd_list += [classname]\n all_properties = self.GetJobProperties()\n all_properties.update(properties or {})\n cmd_list += ['-D{}={}'.format(k, v) for k, v in all_properties.items()]\n if job_arguments:\n cmd_list += job_arguments\n cmd_string = ' '.join(cmd_list)\n\n start_time = datetime.datetime.now()\n try:\n stdout, _ = self.leader.RobustRemoteCommand(cmd_string)\n except errors.VirtualMachine.RemoteCommandError as e:\n raise JobSubmissionError() from e\n end_time = datetime.datetime.now()\n\n if job_stdout_file:\n with open(job_stdout_file, 'w') as f:\n f.write(stdout)\n return JobResult(run_time=(end_time - start_time).total_seconds())", "title": "" }, { "docid": "2c61682b65e6da1853a651cf82e9620a", "score": "0.5526301", "text": "def addJob(self, payload, friendlyName = \"\", jobFamily = \"\", priority=1): \n return self.post(\"jobs\", { 'payload': payload, \"friendlyName\" : friendlyName, \"jobFamily\" : jobFamily})", "title": "" }, { "docid": "a892c6de7a78bf8c7c2cfae6a328740f", "score": "0.55237633", "text": "def submit(self, **kwargs):\n pwd = curdir\n wd = dirname(self.logFile)\n chdir(wd)\n d = OrderedDict()\n self.logging.warn(\"!FIXME! ugly hard coded values for memory & cpu time\")\n nCPU = int(defaults.get(\"numcores\",\"8\"))\n d['job-name'] = self.name\n d['nodes'] = 1\n d['time'] = \"24:00:00\" #defaults.get(\"cputime\",\"24:00:00\")\n d['partition'] = defaults.get('queue',\"normal\")\n d['constraint'] = defaults.get('constraint','mc') # no use of gpu\n #d['mem'] = \"60GB\" #defaults.get(\"memory\",\"4G\")\n d['output'] = op_join(wd,\"output.log\")\n d['error'] = op_join(wd,\"output.err\")\n\td['account'] = defaults.get('account','sm24')\n #d['ntasks-per-node']=nCPU\n img = defaults.get(\"image\",\"zimmerst85/dampesw-cscs:latest\")\n #d['image'] = defaults.get(\"image\",\"zimmerst85/dampesw-cscs:latest\")\n job_file = open(\"submit.sh\", \"w\")\n job_file.write(\"#!/bin/bash\\n\")\n data = [\"#SBATCH --%s=%s\\n\" % (k, v) for k, v in d.iteritems()]\n job_file.write(\"\".join(data))\n # now add CSCS specific stuff\n job_file.write(\"module load daint-{constraint}\\n\".format(constraint=d['constraint']))\n job_file.write(\"module load shifter-ng\\n\")\n job_file.write(\"export DAMPE_WORKFLOW_SERVER_URL=%s\\n\"%DAMPE_WORKFLOW_URL)\n job_file.write(\"export NTHREADS=%i\\n\"%nCPU)\n ### shifter_call = '\\nsrun -C gpu shifter --image={image} --volume={wd}:/workdir bash -c \"bash /workdir/script\"\\n'.format(image=d['image'],wd=wd)\n shifter_call = '\\nsrun -C {constraint} shifter run --mount=type=bind,source={wd},destination=/workdir --mount=type=bind,source=$HOME,destination=$HOME {image} bash -c \"bash /workdir/script\"\\n'.format(image=img,wd=wd,constraint=d['constraint'])\n job_file.write(shifter_call)\n job_file.close()\n #print 'we are in directory: ',wd\n output = self.__run__(\"sbatch --account=sm24 submit.sh\")\n print output\n chdir(pwd)\n return self.__regexId__(output)", "title": "" }, { "docid": "a5ccc796cab3ec537c2a5d3f2c796d60", "score": "0.5517031", "text": "def job_send(self, *args, **kwargs):\n\n pass", "title": "" }, { "docid": "4c0246122bafacf5b2b2143a9e3179df", "score": "0.5513336", "text": "def submit(self, **kwargs):\n return sasoptpy.util.submit(self, **kwargs)", "title": "" }, { "docid": "43773bfca7ba2b0ad8cd57f9b426428a", "score": "0.5512067", "text": "def submit(self, cluster_index, cluster_label):\n\n if cluster_index not in self._popped:\n raise ValueError(\"This item was not popped; you cannot label it.\")\n self.cluster_labels[cluster_index] = cluster_label", "title": "" }, { "docid": "eecfa4749d68d2a035618083c1740550", "score": "0.5511622", "text": "def submitJob(cls,command,**kwargs):\n sbatch = Command.fetch(\"sbatch\",jsonstr=Slurm.sbatchstr,defaults={\"partition\":\"serial_requeue\",\"time\":\"100\",\"output\":\"/dev/null\"})\n sbatch.command = command\n envs = {\"JT_SLURM_PARTITION\":\"partition\",\"JT_SLURM_TIME\":\"time\"}\n for env,arg in envs.iteritems():\n if env in os.environ:\n sbatch.setArgValue(arg,os.environ[env])\n \n for arg,value in kwargs.iteritems():\n if arg == \"scriptpath\":\n sbatch.scriptpath = value\n else:\n sbatch.setArgValue(arg,value)\n logger.info(\"sbatch command %s\" % sbatch)\n \n [returncode,stdout,stderr] = sbatch.run()\n \n # If sbatch fails, keep trying with increasing sleep times between\n # attempts.\n attempts = 1\n t1 = t2 = 5\n while returncode != 0 and attempts < SBATCH_MAX_ATTEMPTS:\n sleep(t2)\n [returncode,stdout,stderr] = sbatch.run()\n attempts += 1\n t1, t2 = t2, t1 + t2\n \n if returncode != 0:\n raise Exception(\"sbatch command failed after %d attempts: %s\" % (SBATCH_MAX_ATTEMPTS,stderr))\n \n jobid = stdout.split()[-1]\n return jobid", "title": "" }, { "docid": "af58e6dfb5776db623c0139ecca1f3d1", "score": "0.5501251", "text": "def submit(self, script, *args, **kwargs):\n # Only allow one submission per job\n if self._lock:\n logger.debug(\"This Job instance is locked, for further submissions create a new\")\n return\n\n # Define a directory if not already done\n if not('directory' in kwargs and kwargs['directory']):\n kwargs['directory'] = os.getcwd()\n\n # Quick check if all scripts are sound - Also keep copy of logs and scripts\n self._script, self._log = Job.check_script(script)\n\n # See if we need to prepare an array\n if len(self._script) > 1:\n if any(base.__name__ == \"ClusterPlatform\" for base in self._platform.__bases__):\n script, _ = prep_array_script(self._script, kwargs['directory'], self._platform.TASK_ID)\n kwargs[\"array\"] = [1, len(self._script),\n kwargs['max_array_jobs'] if 'max_array_jobs' in kwargs else len(self._script)]\n kwargs[\"shell\"] = \"/bin/sh\"\n kwargs[\"log\"] = os.devnull\n\n script = [script]\n else:\n script = self._script\n else:\n script = self._script\n\n # Get the submission function and submit the job\n self._pid = self._platform.sub(script, **kwargs)\n # Lock this Job so we cannot submit another\n self._lock = True", "title": "" }, { "docid": "b02d511979bfd99f123e56965a5b8778", "score": "0.54995674", "text": "def submit(job_row):\n fns = pipeline_utils.get_fns_for_jobid(job_row['id']) \n\n script = os.path.join(config.basic.pipelinedir, 'bin', '%s_search.py'%config.basic.survey)\n\n # Specify requested resources for job submission\n if job_row['task']=='rfifind':\n res = [4*60*60, 1024, 25]\n elif 'search' in job_row['task']:\n res = [165240, 1024, 28] # 45.9 hrs\n elif job_row['task']=='sifting': # Sifting should be quick\n res = [30*60, 256, 5]\n elif 'folding' in job_row['task']:\n res = [96*60*60, 3000, 28]\n #elif job_row['task']=='tidyup':\n # res = [30*60, 256, 5]\n options = job_row['task']\n \n try:\n SPAN512_job.presubmission_check(fns)\n outdir = SPAN512_job.get_output_dir(fns)\n # Attempt to submit the job\n queue_id = config.jobpooler.queue_manager.submit\\\n (fns, outdir, job_row['id'], resources=res, script=script, opts=options)\n except (FailedPreCheckError):\n # Error caught during presubmission check.\n exceptionmsgs = traceback.format_exception(*sys.exc_info())\n errormsg = \"Job ID: %d \" % job_row['id']\n errormsg += \"failed presubmission check!\\n\\n\"\n errormsg += \"\".join(exceptionmsgs)\n\n jobpool_cout.outs(\"Job ID: %d failed presubmission check!\\n\\t%s\\n\" % \\\n (job_row['id'], exceptionmsgs[-1])) \n \n if config.email.send_on_terminal_failures:\n # Send error email\n msg = \"Presubmission check failed!\\n\"\n msg += \"Job ID: %d\\n\\n\" % \\\n (job_row['id'])\n msg += errormsg\n msg += \"\\n*** Job has been terminally failed. ***\\n\"\n msg += \"*** Job will NOT be re-submitted! ***\\n\"\n if config.basic.delete_rawdata:\n jobpool_cout.outs(\"Job #%d will NOT be retried. \" \\\n \"Data files will be deleted.\" % job_row['id'])\n msg += \"*** Raw data files will be deleted. ***\\n\"\n else:\n jobpool_cout.outs(\"Job #%d will NOT be retried. \" % job_row['id'])\n notification = mailer.ErrorMailer(msg, \\\n subject=\"Job failed presubmission check - Terminal\")\n notification.send()\n\n if config.basic.delete_rawdata:\n pipeline_utils.clean_up(job_row['id'])\n\n queries = []\n arglist = []\n queries.append(\"INSERT INTO job_submits (\" \\\n \"job_id, \" \\\n \"status, \" \\\n \"created_at, \" \\\n \"updated_at, \" \\\n \"details) \" \\\n \"VALUES (%d, %s, '%s', '%s', %s)\" % ( job_row['id'], 'precheck_failed', \\\n jobtracker.nowstr(), jobtracker.nowstr(), \\\n errormsg) )\n queries.append(\"UPDATE jobs \" \\\n \"SET status='terminal_failure', \" \\\n \"details='Failed presubmission check', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\"% (jobtracker.nowstr(), job_row['id']) )\n jobtracker.query(queries)\n\n except (queue_managers.QueueManagerJobFatalError,\\\n datafile.DataFileError):\n # Error caught during job submission.\n exceptionmsgs = traceback.format_exception(*sys.exc_info())\n errormsg = \"Error while submitting job!\\n\"\n errormsg += \"\\tJob ID: %d\\n\\n\" % job_row['id']\n errormsg += \"\".join(exceptionmsgs)\n\n jobpool_cout.outs(\"Error while submitting job!\\n\" \\\n \"\\tJob ID: %d\\n\\t%s\\n\" % \\\n (job_row['id'], exceptionmsgs[-1])) \n \n queries = []\n arglist = []\n queries.append(\"INSERT INTO job_submits (\" \\\n \"job_id, \" \\\n \"status, \" \\\n \"created_at, \" \\\n \"updated_at, \" \\\n \"details) \" \\\n \"VALUES (%d, %s, '%s', '%s', %s)\" % ( job_row['id'], 'submission_failed', \\\n jobtracker.nowstr(), jobtracker.nowstr(), \\\n errormsg) )\n queries.append(\"UPDATE jobs \" \\\n \"SET status='failed', \" \\\n \"details='Error while submitting job', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % (jobtracker.nowstr(), job_row['id']) )\n jobtracker.execute(queries)\n except queue_managers.QueueManagerNonFatalError:\n # Do nothing. Don't submit the job. Don't mark the job as 'submitted'.\n # Don't mark the job as 'failed'. The job submission will be retried.\n pass\n except queue_managers.QueueManagerFatalError:\n # A fatal error occurred. Re-raise!\n raise\n else: \n # No error occurred\n msg = \"Submitted job to process:\\n\" \n msg += \"\\tJob ID: %d, Queue ID: %s\\n\" % (job_row['id'], queue_id) \n msg += \"\\tData file(s):\\n\" \n for fn in fns:\n msg += \"\\t%s\\n\" % fn\n jobpool_cout.outs(msg)\n queries = []\n queries.append(\"INSERT INTO job_submits (\" \\\n \"job_id, \" \\\n \"queue_id, \" \\\n \"output_dir, \" \\\n \"status, \" \\\n \"created_at, \" \\\n \"updated_at, \" \\\n \"details) \" \\\n \"VALUES (%d,'%s','%s','%s','%s','%s','%s')\" % \\\n (job_row['id'], queue_id, outdir, 'running', \\\n jobtracker.nowstr(), jobtracker.nowstr(), \\\n 'Job submitted to queue'))\n queries.append(\"UPDATE jobs \" \\\n \"SET status='submitted', \" \\\n \"details='Job submitted to queue', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), job_row['id']))\n jobtracker.query(queries)", "title": "" }, { "docid": "c784c75850079e6f51a75a0a25348916", "score": "0.549333", "text": "def perform_job(self, the_job):\n raise Exception(\"Not implemented by {} job\".format(self.job_name))", "title": "" }, { "docid": "9200c778cd44fe9999e98e5da64944c9", "score": "0.54869765", "text": "def create_job(self, employer_id, compensation, location, description, category_id, group_id):\n\n data_dict = {'employer_id': employer_id,\n 'group_id': group_id,\n 'compensation': compensation,\n 'location': location,\n 'description': description,\n 'category_id': str(category_id)}\n return self.app.post('/job_submit', data=data_dict, follow_redirects=True)", "title": "" }, { "docid": "759ef335b8f8b6f49cf7e660ad742df8", "score": "0.54786766", "text": "def run(self):\n if self.job_id is not None:\n raise Exception(_(\"Job design already submitted (Oozie job id %(id)s).\") % dict(id=(self.job_id,)))\n\n fs_defaultfs = self._fs.fs_defaultfs\n jobtracker = hadoop.cluster.get_cluster_addr_for_job_submission()\n\n try:\n wf_dir = self._get_and_create_deployment_dir()\n except Exception, ex:\n LOG.exception(\"Failed to access deployment directory\")\n raise PopupException(message=_(\"Failed to access deployment directory.\"),\n detail=str(ex))\n\n wf_xml = self._generate_workflow_xml(fs_defaultfs)\n self._do_as(self._username, self._copy_files, wf_dir, wf_xml)\n LOG.info(\"Prepared deployment directory at '%s' for %s\" % (wf_dir, self))\n LOG.info(\"Submitting design id %s to %s as `%s'\" % (self._design_obj.id, jobtracker, self._username))\n\n try:\n prev = get_oozie().setuser(self._username)\n self._job_id = get_oozie().submit_workflow(\n self._fs.get_hdfs_path(wf_dir),\n properties=self._get_properties(jobtracker))\n LOG.info(\"Submitted: %s\" % (self,))\n\n # Now we need to run it\n get_oozie().job_control(self.job_id, 'start')\n LOG.info(\"Started: %s\" % (self,))\n finally:\n get_oozie().setuser(prev)\n\n return self.job_id", "title": "" }, { "docid": "517cdb9f9857e0c33f2032af13da3059", "score": "0.54611164", "text": "def submit_task(self, dataloader_name, args): # -> None:\n ...", "title": "" }, { "docid": "06f3fb80d90a7b98b06e98c1dcc29c7e", "score": "0.5455409", "text": "def submit(executable, arguments=None, universe=\"vanilla\", log=LOG_FILE,\n outfile = OUTFILE_FMT % \"$(Cluster)\",\n errfile = ERRFILE_FMT % \"$(Cluster)\"):\n descparts = [\n \"Executable = %s\" % executable,\n \"Universe = %s\" % universe,\n \"Log = %s\" % log,\n \"output = %s\" % outfile,\n \"error = %s\" % errfile,\n ]\n if arguments:\n descparts.append(\"Arguments = %s\" % arguments)\n descparts.append(\"Queue\")\n\n desc = \"\\n\".join(descparts)\n return submit_text(desc)", "title": "" }, { "docid": "1b83feb60f79783fc66400bf3544a8b5", "score": "0.54553527", "text": "def transfer(self, jobname, to_server):\r\n config = self.get_config_xml(jobname)\r\n return self._http_post(self._other_url(to_server, NEWJOB),\r\n data=config,\r\n params={'name': jobname},\r\n headers={'Content-Type': 'application/xml'})", "title": "" }, { "docid": "8612d04d769732122159530406526e83", "score": "0.5451528", "text": "def _submit(command, conf):\n\n with tempfile.NamedTemporaryFile(suffix='.sh', mode='w') as sbatch:\n # Generate SLURM sbatch file contents to submit job\n sbatchstr = conf.sbatch(command)\n logger.debug(f'Generated sbatch file:', sbatchstr)\n # Write sbatch to file\n sbatch.write(sbatchstr)\n sbatch.flush()\n\n # Wait a bit to make sure that we don’t overload SLURM\n time.sleep(0.1)\n\n # Create a file to store the job output\n # It is created in the working dir (normal /tmp does not seem to work)\n out = tempfile.NamedTemporaryFile(suffix='.out', dir='.', delete=False)\n logger.debug(f'Created temporary output file {out.name} for command '\n f'\"{command}\"')\n out.close()\n\n # Run sbatch to start the job and specify job output file\n sbatch_command = ['sbatch', '-o', out.name, '--requeue', sbatch.name]\n try:\n sbatch_out = subprocess.check_output(sbatch_command, env=conf.env)\n except subprocess.CalledProcessError as e:\n raise JobSchedulingError(f'Submitting job \"{command}\" failed '\n f'with output: {e.output}')\n\n # Parse the task ID from the sbatch stdout\n task_id = re.match(r'Submitted batch job (\\d+)',\n sbatch_out.decode()).group(1)\n\n logger.debug(f'Submitted job {task_id}: \"{command}\"')\n\n return task_id, out.name", "title": "" }, { "docid": "eb6991220998473585049e0fe764884b", "score": "0.54325324", "text": "def run_job(job, engine=None, wait=True, jobname=None, display=True,\n _return_result=False):\n import pyccc\n\n # this is a hacky list of jobs that's mostly for debugging\n mdt._lastjobs[mdt._njobs] = job\n mdt._lastjobs[-1] = job\n mdt._njobs += 1\n\n if job.engine is None:\n engine = utils.if_not_none(engine, mdt.compute.get_engine())\n job.engine = engine\n if engine is None:\n raise ValueError('No compute engine configured! Configure MDT using '\n 'moldesign.compute.config')\n\n if isinstance(job.engine, pyccc.engines.Docker):\n check_pull_image(job.engine.client, job.image)\n\n job.submit()\n jobname = utils.if_not_none(jobname, job.name)\n if display:\n display_log(job.get_display_object(), jobname)\n\n if wait:\n job.wait()\n if _return_result: return job.result\n\n return job", "title": "" }, { "docid": "e88ae3ca1c2c6dfd3f66187e81421543", "score": "0.54248273", "text": "def dispatch(self, job_id, **kwargs):\n return self.agent.http.post(\n CB.json(index=False, allow_404=False),\n '/v1/job/%s/dispatch' % job_id, data=kwargs)", "title": "" }, { "docid": "b2f6ae90493067748f4b41a10c5236db", "score": "0.5424347", "text": "def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,\n required_output=[], nb_submit=0):\n \n me_dir = os.path.realpath(os.path.join(cwd,prog)).rsplit('/SubProcesses',1)[0]\n me_dir = misc.digest(me_dir)[-14:]\n if not me_dir[0].isalpha():\n me_dir = 'a' + me_dir[1:]\n \n text = \"\"\n command = ['bsub', '-J', me_dir]\n if cwd is None:\n cwd = os.getcwd()\n else: \n text = \" cd %s;\" % cwd\n if stdout and isinstance(stdout, str):\n command.extend(['-o', stdout])\n if stderr and isinstance(stdout, str):\n command.extend(['-e', stderr])\n elif stderr == -2: # -2 is subprocess.STDOUT\n pass\n if log is None:\n log = '/dev/null'\n \n text += prog\n if argument:\n text += ' ' + ' '.join(argument)\n \n if self.cluster_queue and self.cluster_queue != 'None':\n command.extend(['-q', self.cluster_queue])\n\n a = misc.Popen(command, stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE, cwd=cwd)\n \n output = a.communicate(text)[0]\n #Job <nnnn> is submitted to default queue <normal>.\n try:\n id = output.split('>',1)[0].split('<')[1]\n except:\n raise ClusterManagmentError, 'fail to submit to the cluster: \\n%s' \\\n % output \n if not id.isdigit():\n raise ClusterManagmentError, 'fail to submit to the cluster: \\n%s' \\\n % output \n self.submitted += 1\n self.submitted_ids.append(id)\n return id", "title": "" }, { "docid": "e26085b4f2c7032214fe86ba76150db5", "score": "0.54236627", "text": "def submit(self, prog, argument=[], cwd=None, stdout=None, stderr=None, log=None,\n required_output=[], nb_submit=0):\n \n text = \"\"\n if cwd is None:\n cwd = os.getcwd()\n else: \n text = \" cd %s; bash \" % cwd\n if stdout is None:\n stdout = os.path.join(cwd, \"log.%s\" % prog.split('/')[-1])\n if stderr is None:\n stderr = os.path.join(cwd, \"err.%s\" % prog.split('/')[-1])\n elif stderr == -2: # -2 is subprocess.STDOUT\n stderr = stdout\n if log is None:\n log = '/dev/null'\n\n text += prog\n if argument:\n text += ' ' + ' '.join(argument)\n text += '\\n'\n tmp_submit = os.path.join(cwd, 'tmp_submit')\n open(tmp_submit,'w').write(text)\n\n a = misc.Popen(['qsub','-o', stdout,\n '-e', stderr,\n tmp_submit],\n stdout=subprocess.PIPE, \n stderr=subprocess.STDOUT,\n stdin=subprocess.PIPE, cwd=cwd)\n\n output = a.communicate()[0]\n #Your job 874511 (\"test.sh\") has been submitted\n pat = re.compile(\"Your job (\\d*) \\(\",re.MULTILINE)\n try:\n id = pat.search(output).groups()[0]\n except:\n raise ClusterManagmentError, 'fail to submit to the cluster: \\n%s' \\\n % output \n self.submitted += 1\n self.submitted_ids.append(id)\n return id", "title": "" }, { "docid": "35f7e9c8ca1b0e13527f0a280fd8652f", "score": "0.54109657", "text": "def _submit(assignment, user, _):\n submit.queue_for_testing(assignment, user, course_id)", "title": "" }, { "docid": "111a2f09254c059663c555e7112d9c80", "score": "0.54050744", "text": "def invoke(self, job_payload):\n executor_id = job_payload['executor_id']\n job_id = job_payload['job_id']\n runtime_name = job_payload['runtime_name']\n logger.debug(f'ExecutorID {executor_id} | JobID {job_id} - Putting job into localhost queue')\n\n self.start_manager()\n env = self.get_env(runtime_name)\n job_filename = env._prepare_job_file(job_payload)\n\n self.job_queue.put((job_payload, job_filename))", "title": "" }, { "docid": "3c781d5c97d372952fff5383d93e924a", "score": "0.5395302", "text": "def POST(self, **kwargs):\n job_data = list(\n self.exec_lowstate(\n client=\"local_async\", token=cherrypy.session.get(\"token\")\n )\n )\n\n cherrypy.response.status = 202\n return {\n \"return\": job_data,\n \"_links\": {\n \"jobs\": [{\"href\": \"/jobs/{}\".format(i[\"jid\"])} for i in job_data if i],\n },\n }", "title": "" }, { "docid": "645d3c9487a599673fbc6a42da544b93", "score": "0.5383967", "text": "def submit_jobs_to_cluster( assembly_file_names, cluster_dir_names, para_jobs, script_name ):\n\t\n\tIDs_to_check = []\n\tbatch_ID = str( datetime.datetime.now() )[-3:]\n\tfor idx, file_name in enumerate( assembly_file_names ):\n\t\tID = \"E_\" + batch_ID + '_' + str( idx ).zfill(4)\n\t\tIDs_to_check.append( ID )\n\t\tsh_file = cluster_dir_names[ idx ] + ID + '.sh'\n\t\tout_file = cluster_dir_names[ idx ] + ID + '.out'\n\t\terr_file = cluster_dir_names[ idx ] + ID + '.err'\n\t\t\n\t\tcmd = \"python \" + script_name + \" --in \" + assembly_file_names[ idx ] + \" --cluster_dir \" + cluster_dir_names[ idx ]\n\t\t\n\t\twith open( sh_file, \"w\" ) as out:\n\t\t\t\tout.write( \"#!/bin/bash\\n\" + \" \".join( [ \t\"echo \" + '\"',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tcmd + '\"',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"| qsub -cwd\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-pe multislot 8\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-N\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tID,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-l vf=5G\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-l arch=lx-amd64\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-P fair_share\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-l idle=1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-o\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tout_file,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"-e\",\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\terr_file\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t] ) + '\\n'\n\t\t\t\t\t\t\t)\n\t\tos.popen( \"chmod +x \" + sh_file )\n\t\tos.popen( sh_file )\n\t\ttime.sleep(1)\n\t\tos.remove( sh_file )\n\t\twaiting_status = True\n\t\twhile waiting_status:\n\t\t\tqstat = os.popen( \"qstat\" )\n\t\t\tcontent = qstat.read()\n\t\t\tqstat_IDs = re.findall( \"E_\" + batch_ID + \"_\\d{4}\", content )\n\t\t\tcounter = 0\n\t\t\tfor ID in qstat_IDs:\n\t\t\t\tif ID in IDs_to_check:\n\t\t\t\t\tcounter += 1\n\t\t\tif counter < para_jobs:\n\t\t\t\twaiting_status = False\n\t\t\telse:\n\t\t\t\ttime.sleep( 1 )\n\t\n\twaiting_status = True\n\twhile waiting_status:\n\t\tqstat = os.popen( \"qstat\" )\n\t\tcontent = qstat.read()\n\t\tqstat_IDs = re.findall( \"E_\" + batch_ID + \"_\\d{4}\", content )\n\t\twaiting_status = False\n\t\tfor ID in IDs_to_check:\n\t\t\tif ID in qstat_IDs:\n\t\t\t\twaiting_status = True\n\t\ttime.sleep( 120 )", "title": "" }, { "docid": "3d4ab9c7896937e28e96dfe7c820510c", "score": "0.53772116", "text": "def submit_job(self, dsl, config):\n post_data = {'job_dsl': dsl,\n 'job_runtime_conf': config}\n response = requests.post(\n \"/\".join([self.server_url, \"job\", \"submit\"]), json=post_data)\n\n return self.prettify(response)", "title": "" }, { "docid": "f93e1afd60526b8d7ae650c1db14d08b", "score": "0.5367707", "text": "def submit(sequence_tag, **kwargs):\n set_log(kwargs)\n art_directory = os.path.dirname(os.path.realpath(sys.argv[0]))\n (nightly_release, project, platform, nightly_tag) = get_atlas_env()\n job_type = 'grid' if kwargs['type'] is None else kwargs['type']\n user = os.getenv('USER', 'artprod')\n inform_panda = user == 'artprod'\n package = kwargs['package']\n config = kwargs['config']\n no_action = kwargs['no_action']\n wait_and_copy = True\n exit(ArtGrid(art_directory, nightly_release, project, platform, nightly_tag, max_jobs=int(kwargs['max_jobs'])).task_list(job_type, sequence_tag, inform_panda, package, no_action, wait_and_copy, config))", "title": "" }, { "docid": "0b3ddaec58fbbbd7af704e9c83234d6d", "score": "0.53657585", "text": "def submit(command_line):\n from six import string_types\n if isinstance(command_line, string_types):\n command_line = [command_line]\n if not command_line[0].startswith('condor_submit'):\n command_line.insert(0, 'condor_submit')\n output = run_command(command_line, shell=True)\n\n for line in output:\n match = re.match(r'(\\d+) job\\(s\\) submitted to cluster (\\d+)\\.', line)\n if match:\n n_jobs = int(match.group(1))\n cluster_id = int(match.group(2))\n logger.info('Submitted %s jobs to cluster_id %s', n_jobs, cluster_id)\n return cluster_id, n_jobs, output", "title": "" }, { "docid": "8718a98817a7f453b7fc2fc24d1e897f", "score": "0.53594136", "text": "def enqueue(self, func, model_name=None, args=None, kwargs=None,\n priority=None, eta=None, max_retries=None, description=None):\n new_job = Job(func=func, model_name=model_name, args=args,\n kwargs=kwargs, priority=priority, eta=eta,\n max_retries=max_retries, description=description)\n new_job.user_id = self.session.uid\n if 'company_id' in self.session.context:\n company_id = self.session.context['company_id']\n else:\n company_model = self.session.env['res.company']\n company_model = company_model.sudo(new_job.user_id)\n company_id = company_model._company_default_get('queue.job').id\n new_job.company_id = company_id\n self.store(new_job)\n return new_job.uuid", "title": "" }, { "docid": "6e123cf15a09780fb92e90f0ea3dfa61", "score": "0.5357236", "text": "def send_job(self, out_format:str=None, job_options:Dict=None, **format_options) -> RESTJob:\n pass", "title": "" }, { "docid": "91fd476f95aa02c5db3fd1cc7311f7cf", "score": "0.53546226", "text": "def Submission(self, queue=None, jobname=None, \\\n nnodes=None, ncores=None, walltime=None, \\\n bindir=None, vasp5=None, vasp6=None):\n\n self.queue = queue\n\n os.chdir(self.objdir)\n\n if nnodes is None: nnodes = 1\n if ncores is None: ncores = mp.cpu_count()\n\n if queue.upper() == \"LOCAL\":\n runjob = open('runjob', 'w')\n runjob.write('#!/bin/bash \\n')\n runjob.write(\"\\n\\n\")\n #runjob.write(\"export PATH=$HOME/ANACONDA/bin:$PATH \\n\")\n # if vasp5: runjob.write(\"export PATH=$HOME/VASP.5.4.4/bin:$PATH \\n\")\n # if vasp6: runjob.write(\"export PATH=$HOME/VASP.6.1.1/bin:$PATH \\n\")\n # runjob.write(\"\\n\\n\")\n # runjob.write(\"module unload compiler mpi \\n\")\n # runjob.write(\"module load compiler mpi/openmpi/4.0.2 \\n\")\n # runjob.write(\"\\n\\n\")\n if bindir is None:\n runjob.write(\"export PATH=$HOME/VASP2WANN/bin:$PATH \\n\")\n runjob.write(\"\\n\\n\")\n if self.lsoc:\n runjob.write('mpirun -np %g vasp_ncl \\n' % ncores)\n else:\n runjob.write('mpirun -np %g vasp_std \\n' % ncores)\n if bindir is not None:\n runjob.write('mpirun -np %g %s \\n' % (ncores, bindir))\n runjob.close()\n\n if queue.upper() == \"SLURM\":\n ntasks = int(nnodes * ncores)\n runjob = open('runjob', 'w')\n runjob.write('#!/bin/bash \\n')\n runjob.write('#SBATCH --job-name={} \\n'.format(jobname))\n runjob.write('#SBATCH -o {}.out \\n'.format(jobname))\n runjob.write('#SBATCH -e {}.err \\n'.format(jobname))\n runjob.write('#SBATCH --nodes={} \\n'.format(nnodes))\n runjob.write('#SBATCH --ntasks={} \\n'.format(ntasks))\n runjob.write('#SBATCH --time {} \\n'.format(walltime))\n runjob.write(\"\\n\\n\")\n #runjob.write(\"export PATH=$HOME/ANACONDA/bin:$PATH \\n\")\n runjob.write(\"export PATH=$HOME/VASP2WANN/bin:$PATH \\n\")\n runjob.write(\"export PATH=$HOME/WANNIER90/V3.1:$PATH \\n\")\n runjob.write(\"\\n\\n\")\n runjon.write(\"module load slurm_setup \\n\")\n if vasp5: runjob.write(\"module load vasp \\n\")\n if vasp6: runjob.write(\"module load vasp/6.1.2 \\n\")\n runjob.write(\"\\n\\n\")\n if LSORBIT == \".TRUE.\":\n if vasp5: runjob.write(\"vasp5 -n $SLURM_NTASKS -s full \\n\")\n if vasp6: runjob.write(\"vasp6 -n $SLURM_NTASKS -s ncl \\n\")\n else:\n if vasp5: runjob.write(\"vasp5 -n $SLURM_NTASKS \\n\")\n if vasp6: runjob.write(\"vasp6 -n $SLURM_NTASKS \\n\")\n # runjob.write('cd $SLURM_SUBMIT_DIR \\n')\n # runjob.write('module load intel/2016.0.109 \\n')\n # runjob.write('module load openmpi/1.10.1 \\n')\n # runjob.write('module load vasp/5.4.1 \\n')\n # runjob.write('mpirun {} \\n'.format(binary))\n runjob.close()\n\n if self.queue.upper() == \"LOCAL\": os.system(\"bash runjob\")\n if self.queue.upper() == \"SLURM\": os.system(\"sbatch runjob\")\n\n os.chdir(self.rootdir)", "title": "" }, { "docid": "e95b5ca809e97001d006f987f4684de3", "score": "0.5349994", "text": "def afanasy_job_submitter(cls):\n from anima.dcc.mayaEnv import afanasy\n\n ui = afanasy.UI()\n ui.show()", "title": "" }, { "docid": "8b226b582ce5f4cf6abf3885abf0e41d", "score": "0.5347589", "text": "def _SubmitInstanceCreationJob(instance, disk_template=None):\n if disk_template is None:\n disk_template = qa_config.GetDefaultDiskTemplate()\n try:\n cmd = ([\"gnt-instance\", \"add\", \"--submit\", \"--opportunistic-locking\",\n \"--os-type=%s\" % qa_config.get(\"os\"),\n \"--disk-template=%s\" % disk_template] +\n GetGenericAddParameters(instance, disk_template))\n cmd.append(instance.name)\n\n instance.SetDiskTemplate(disk_template)\n\n return _ExecuteJobSubmittingCmd(cmd)\n except:\n instance.Release()\n raise", "title": "" }, { "docid": "d28bc6be580c4ad85d6b38841e76da59", "score": "0.53113323", "text": "def SubmitJob(self,\n jarfile=None,\n classname=None,\n pyspark_file=None,\n query_file=None,\n job_poll_interval=None,\n job_stdout_file=None,\n job_arguments=None,\n job_files=None,\n job_jars=None,\n job_py_files=None,\n job_type=None,\n properties=None):\n cmd = self.GetSparkSubmitCommand(\n jarfile=jarfile,\n classname=classname,\n pyspark_file=pyspark_file,\n job_arguments=job_arguments,\n job_files=job_files,\n job_jars=job_jars,\n job_py_files=job_py_files,\n job_type=job_type,\n properties=properties)\n start_time = datetime.datetime.now()\n try:\n stdout, _ = self.leader.RobustRemoteCommand(' '.join(cmd))\n except errors.VirtualMachine.RemoteCommandError as e:\n raise JobSubmissionError() from e\n end_time = datetime.datetime.now()\n\n if job_stdout_file:\n with open(job_stdout_file, 'w') as f:\n f.write(stdout)\n return JobResult(run_time=(end_time - start_time).total_seconds())", "title": "" }, { "docid": "0c06859ba818093d9f0a0ccf3af420ed", "score": "0.5297819", "text": "def submit(self, task):\n pass", "title": "" }, { "docid": "e0ca25f0b913662291a7b23de26a87ca", "score": "0.5294962", "text": "def submit_under_jobcontrol(args, script_args):\n # Launch the backend under job control:\n\n prog_name = \"Residue Mutation\"\n\n scriptlauncher = launcher.Launcher(\n script=BACKEND,\n jobname=args.jobname,\n runtoplevel=True, # FIXME should be False\n # FIXME It just does not seem right to have runtoplevel\n # be set to True. Does this really fix remote jobs?????\n # It may have negitive side-effects. The GUI runs the\n # backend via $SCHRODINGER/run anyway.\n prog=prog_name,\n wait=args.WAIT,\n no_redirect=False,\n )\n # NOTE: -SAVE is passed from toplevel to jlaunch via\n # SCHRODINGER_SAVE_JOBDIR.\n\n # Add input file to jlaunch input file list:\n scriptlauncher.addInputFile(args.structures)\n\n # NOTE: Output file will be added at run-time\n\n if args.res_file:\n scriptlauncher.addInputFile(args.res_file)\n if args.muts_file:\n scriptlauncher.addInputFile(args.muts_file)\n if args.residue_structure:\n scriptlauncher.addInputFile(args.residue_structure)\n\n # Add script arguments:\n # Since the backend will already be running under job control.\n script_args.append(\"-NOJOBID\")\n\n # NOTE: We should NOT remove the -LOCAL argument from the\n # args list, because the backend will need to use it.\n\n # Tell the driver where to run the subjobs (if the user did not specify\n # -subhost):\n if args.NJOBS > 1 and not args.subhost:\n host_list = jobcontrol.get_command_line_host_list()\n host_string = jobcontrol.host_list_to_str(host_list)\n script_args += [\"-subhost\", host_string]\n\n scriptlauncher.addScriptArgs(script_args)\n\n scriptlauncher.launch()", "title": "" }, { "docid": "08a7f77198174b69b5034e552fbc032a", "score": "0.5289645", "text": "def createJob(self, payload):\n\n url = self.base_url + \"/jobs\"\n headers = {'Content-Type':'application/json'}\n\n return self._post(url, \"Create job\", headers, payload)", "title": "" }, { "docid": "5d76ddcc7cbe933c9fd5c7dd3f217458", "score": "0.5285005", "text": "def submit(self, path_to_job_script: os.PathLike) -> str:\n replicate_path, _ = self.os.split(path_to_job_script)\n stdout_file = self.os.join(replicate_path, self.STDOUT_FILENAME)\n stderr_file = self.os.join(replicate_path, self.STDERR_FILENAME)\n self.os.command(f'chmod +r {path_to_job_script}')\n self.os.command(f'sh {path_to_job_script} > {stdout_file} 2> {stderr_file} &')\n return None #TODO", "title": "" }, { "docid": "e040f88cb603459d8a3b3244809d295e", "score": "0.5255483", "text": "def submit(job_id=None):\n form = JobSubmissionForm()\n\n # Fill resource dropdown list choices\n form.resource.choices = [(h.url, h.name) for h in Resource.query.all()]\n\n # Temporary directory to store uploaded files before job object creation\n # We use a simple protocol here. The script file with start with `script_'\n # in file name and input files will start with `input_' in their names.\n # This will help us not to pass around lots of parameters, only upload\n # directory would be enough.\n upload_dir = tempfile.mkdtemp()\n\n if form.validate_on_submit():\n # Save script file to upload directory\n script_file = request.files.get('script_file')\n\n # Make sure file name is safe\n script_safe_filename = secure_filename(script_file.filename)\n\n # Recognize type of the script\n script_type = _recognize_script_type(script_safe_filename)\n\n # TODO: recognize she bang in scripts\n script_file.save(os.path.join(upload_dir,\n 'script_' + script_safe_filename))\n\n # Save input files to upload directory\n for f in request.files.getlist('input_files'):\n if f.filename not in (None, ''):\n # Remove unsupported characters from filename\n safe_filename = secure_filename(f.filename)\n # Save file to upload temp directory\n f.save(os.path.join(upload_dir, 'input_' + safe_filename))\n\n # Assing the default value\n resource_url = 'localhost'\n\n # Check if user has filled `new_resource' field\n if form.resource.data != 'None':\n resource_url = form.resource.data\n # New url field and priority\n if form.new_resource.data not in (None, ''):\n resource_url = form.new_resource.data\n\n try:\n # Submit the job\n job_id = \\\n job_services.submit(resource_url,\n upload_dir,\n script_type,\n **form.data)\n # Redirect to list\n return redirect(url_for('.detail', job_id=job_id))\n except Exception, ex:\n # Delete temporary directory with its contents\n if os.path.exists(upload_dir):\n shutil.rmtree(upload_dir)\n # This is a workaround to recognize the error, regarding cases,\n # when user's login page produces a lot of text upon SSH, which\n # confuses sftp. This is a well known issue on the net and\n # this is this is a trick to recognize the error.\n raise\n if \"message too long\" in str(ex):\n flash(\"sftp error: Make sure you can do sftp/scp to the\"\n \" remote host and there are no echo statements in the\"\n \" .bashrc of the remote machine.\", category='error')\n else:\n flash(str(ex), category='error')\n\n return render_template('job/job_submit.html', form=form)", "title": "" }, { "docid": "7f07754509b671c0f454874cbee859dc", "score": "0.5238377", "text": "def Args(parser):\n parser.add_argument(\n '--cluster',\n required=True,\n help='The Dataproc cluster to submit the job to.')", "title": "" }, { "docid": "799a6c8edec810c551db9961676a8f1b", "score": "0.52359915", "text": "def putJob(self, job, block=True, timeout=None):\n assert isinstance(job, Job)\n # don't reuse old work requests\n assert not getattr(job, 'exception', None)\n self._jobs_queue.put(job, block, timeout)\n self.workJobs[job.jobID] = job", "title": "" }, { "docid": "600dd8f3ca1736c0036f358761e4495d", "score": "0.522854", "text": "def post_cluster(\n host,\n token,\n process_request_id,\n cluster_index,\n method=METHOD['https']):\n\n url = '%s%s%s' % (method, host, URLS['CLUSTERS'])\n headers = {'Authorization': \"Token %s\" % token}\n\n # process_request and index are required\n data = {\n 'process_request': process_request_id,\n 'index': cluster_index\n }\n\n try:\n response = requests.post(\n url,\n headers=headers,\n data=data,\n verify=False)\n except Exception as e:\n print(e)\n return {'status': None, 'reason': 'No response', 'data': ''}\n\n if response.status_code == 201:\n try:\n data = response.json()\n except Exception as e:\n data = response.text()\n print(e)\n else:\n data = response.text\n\n return {\n 'status': response.status_code,\n 'reason': response.reason,\n 'data': data,\n }", "title": "" }, { "docid": "20d07bff2baf4d928a31c44f85b191b0", "score": "0.5223693", "text": "def post_job(args):\n if os.path.exists(args.request) == False:\n print \"Invalid request filepath\"\n return\n \n datafile = open(args.request, \"r\")\n request = datafile.read()\n datafile.close()\n doc = None\n \n if args.job_data == None or args.job_data == \"\":\n doc = resources.post_job(request, None)\n else:\n if os.path.exists(args.job_data) == False:\n print \"Invalid job data filepath\"\n return\n \n doc = resources.post_job(args.request, args.job_data)\n \n print_result(doc)", "title": "" }, { "docid": "c6d4cd627e52c6142b6dfbd57122a0f5", "score": "0.5216971", "text": "def argo_submit(params):\n\n message = {\n 'action': 'argo-submit',\n 'params': params,\n }\n queue_client = QueueClient.from_connection_string(\n os.getenv('AZ_SB_CON_KICKER'), os.getenv('AZ_SB_QUEUE') )\n\n with queue_client.get_sender() as sender:\n message = Message(json.dumps(message).encode('utf8'))\n logging.info(f'Sending message: {message}')\n ret = sender.send(message)", "title": "" }, { "docid": "443cdde6933740210763b485a9523617", "score": "0.5216698", "text": "def cluster_join(username, hostname):\n ret = {\"comment\": \"\", \"success\": False}\n\n cmd = __execute_cmd(\"riak-admin\", \"cluster join {}@{}\".format(username, hostname))\n\n if cmd[\"retcode\"] != 0:\n ret[\"comment\"] = cmd[\"stdout\"]\n else:\n ret[\"comment\"] = cmd[\"stdout\"]\n ret[\"success\"] = True\n\n return ret", "title": "" }, { "docid": "0594bfcfe6e0785360310bad23a65d9d", "score": "0.5207396", "text": "def run_submit_jobs(config_file, output, params, pipeline_stage_num=None):\n os.makedirs(output, exist_ok=True)\n\n mgr = JobSubmitter.create(config_file, params, output=output)\n cluster = Cluster.create(\n output,\n mgr.config,\n pipeline_stage_num=pipeline_stage_num,\n )\n\n local = params.hpc_config.hpc_type == HpcType.LOCAL\n ret = 1\n try:\n status = mgr.submit_jobs(cluster, force_local=local)\n if status == Status.IN_PROGRESS:\n check_cmd = f\"jade show-status -o {output}\"\n if not params.dry_run:\n print(f\"Jobs are in progress. Run '{check_cmd}' for updates.\")\n ret = 0\n else:\n ret = status.value\n finally:\n cluster.demote_from_submitter()\n if local:\n # These files were not used in this case.\n cluster.delete_files_internal()\n\n return ret", "title": "" }, { "docid": "b3160e79d99c6bf5469e3b1f4a9dcb99", "score": "0.5188561", "text": "def submit(self):\n raise NotImplementedError", "title": "" }, { "docid": "015b365872cb5bca5b0d79271c891eee", "score": "0.51828617", "text": "def create_and_submit_batch_job(self, job_list_string, job_time, job_mem):\n\t\tself._create_submission_job(job_list_string,job_time,job_mem)\n\t\tself._submit_job()", "title": "" }, { "docid": "4eb9d088d0ea55aecffe6e8d98d44e84", "score": "0.5178808", "text": "def submit(self, *args):\n self.submit_button.start()\n self.refresh_button.enable(False)\n self.enable_watcher(False)\n maya.check_box(self.watch_job, edit=True, enable=False)\n watcher = maya.check_box(self.watch_job, query=True, value=True)\n if watcher and not self.selected_dir:\n maya.warning(\"You must select a download directory \"\n \"if you wish to watch this job.\")\n else:\n self.base.submit(watcher, self.selected_dir)\n maya.check_box(self.watch_job, edit=True, enable=True)\n self.enable_watcher(True)\n self.submit_button.finish()\n self.refresh_button.enable(True)", "title": "" }, { "docid": "56d21a1b9e72d8ca26ca67d9b144e07d", "score": "0.517861", "text": "def job(self, job):\n\n self._job = job", "title": "" }, { "docid": "8bcb627c38239243f090755528374dae", "score": "0.5177769", "text": "def SubmitJob(self,\n jarfile: Optional[str] = None,\n classname: Optional[str] = None,\n pyspark_file: Optional[str] = None,\n query_file: Optional[str] = None,\n job_poll_interval: Optional[float] = None,\n job_stdout_file: Optional[str] = None,\n job_arguments: Optional[List[str]] = None,\n job_files: Optional[List[str]] = None,\n job_jars: Optional[List[str]] = None,\n job_py_files: Optional[List[str]] = None,\n job_type: Optional[str] = None,\n properties: Optional[Dict[str, str]] = None) -> JobResult:\n pass", "title": "" }, { "docid": "22c1ea46592d54697e747d0b283742b0", "score": "0.51756656", "text": "def resubmit_job(client, args):\n job_id = args.get('job_id')\n\n result = client.resubmit_job(job_id=job_id)\n # readable output will be in markdown format\n readable_output = '## Resubmitted Job'\n readable_output += tableToMarkdown('Twinwave Submissions', result, headers=['JobID'])\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix=\"Twinwave.Submissions\",\n outputs_key_field='JobID',\n outputs=result,\n raw_response=result # raw response - the original response\n )", "title": "" }, { "docid": "2bdfe82e090777ae73dac6d651d30500", "score": "0.51655895", "text": "def requires_submit(func):\n\n @wraps(func)\n def _wrapper(self, *args, **kwargs):\n if self._future is None:\n raise JobError(\"Job not submitted yet!. You have to .submit() first!\")\n return func(self, *args, **kwargs)\n\n return _wrapper", "title": "" }, { "docid": "c8350df4cf5f224549025894c35e4850", "score": "0.51577526", "text": "def run(jobname):\n # Get path of working directory\n path = os.getcwd()\n # Get job file path\n job_path = os.path.join(path, 'job.sh')\n # Submit a job to do a scf calculation\n submit_job(job_path, 'vasp_'+jobname)", "title": "" }, { "docid": "c15fd0710cccca3da5094ed0a42bf7aa", "score": "0.51519114", "text": "def submit_script(script, **kwargs):\n filename = local_filename('condorpy.jobscript.%s')\n with open(filename, 'w') as f:\n f.write(script)\n os.chmod(filename, 0o755)\n return submit(filename, **kwargs), filename", "title": "" }, { "docid": "8c4788e8c47c637f02dc3e9732ffda3b", "score": "0.51454127", "text": "def job(self, job: JobDefinition):\n\n self._job = job", "title": "" }, { "docid": "3a3af5424dd668aa5a50ebd920faa3e8", "score": "0.51372564", "text": "def enqueue_job(request, job, jumppoint = None, stoppoint = None):\n\n set_last_user_command(job.id, \"start\")\n\n if job.checkpoint == -1:\n job.checkpoint = 0\n job.save()\n\n \"\"\" Update S3 \"\"\"\n job.generate_exe(jumppoint=jumppoint, stoppoint=stoppoint)\n set_job_exe(job.id, job.exe)\n print \"36:\", job.exe\n \n \"\"\"\n to-do: the following file uploads are taking sweet time.\n Maybe we could move the uploads to a separate thread?\n \"\"\"\n push_jobfile_to_s3(job.id, job.settings.original_aa_file.aaseq_path._get_path() )\n \n \"\"\"How many taxa and sites in the amino acid sequence collection? -- we'll use this information\n later (in the job daemon) to launch an EC2 instance of the appropriate size.\"\"\"\n fin = open( job.settings.original_aa_file.aaseq_path._get_path(), \"r\" )\n ntaxa = 0\n nsites = 0\n lastseq = \"\"\n for l in fin.xreadlines():\n if l.startswith(\">\"):\n lastseqlen = lastseq.__len__()\n if nsites > lastseqlen:\n nsites = lastseqlen\n lastseq = \"\"\n ntaxa += 1\n else:\n lastseq += l.strip()\n fin.close()\n set_ntaxa(job.id, ntaxa)\n set_seqlen(job.id, nsites)\n \n if job.settings.has_codon_data != False:\n push_jobfile_to_s3(job.id, job.settings.original_codon_file.codonseq_path._get_path() )\n if job.settings.constraint_tree_file:\n push_jobfile_to_s3(job.id, job.settings.constraint_tree_file.constrainttree_path._get_path() )\n if job.settings.user_msas:\n for aa in job.settings.user_msas.all():\n push_jobfile_to_s3(job.id, aa.attachment.path)\n configfile = job.generate_configfile()\n push_jobfile_to_s3(job.id, configfile)\n setup_slave_startup_script(job.id)\n js = get_job_status(job.id)\n if js == \"Stopped\" or js == None:\n set_job_status(job.id, \"Starting, waiting for cloud resources to spawn\")\n # else, the job is already running, so don't override its current status\n\n \"\"\" Add to the SQS \"\"\"\n sqs_start(job.id)\n\n \"\"\"At this point, we're done. We wait for the PhyloBot job daemon\n to read the SQS queue and take action.\"\"\"\n return", "title": "" }, { "docid": "64e99c1f4ba379b23b273ff9ee9f59aa", "score": "0.51334363", "text": "def mantidjobs(*kargs, **kwargs):\n\n job=kwargs['job']\n if job=='create experimental Nexus files':\n pass\n else:\n print 'job not recognized, nothing to calculate'", "title": "" }, { "docid": "e58bb87c1fd95f608cc0915c9872cb48", "score": "0.5131561", "text": "def submit_conformer(self, conformer, restart=False):\n assert conformer, \"Please provide a conformer to submit a job\"\n\n self.calculator.conformer = conformer\n ase_calculator = self.calculator.get_conformer_calc()\n label = conformer.smiles + f\"_{conformer.index}\"\n file_path = os.path.join(ase_calculator.scratch, label)\n # for testing\n os.environ[\"FILE_PATH\"] = file_path\n\n attempted = False\n if os.path.exists(file_path + \".log\"):\n attempted = True\n if not restart:\n logging.info(\n \"It appears that this job has already been run, not running it a second time.\")\n\n if restart or not attempted:\n # Getting the number of processors needed for a child job\n number_of_atoms = conformer.rmg_molecule.get_num_atoms() - conformer.rmg_molecule.get_num_atoms('H')\n if number_of_atoms >= 4:\n nproc = 2\n elif number_of_atoms >= 7:\n nproc = 4\n elif number_of_atoms >= 9:\n nproc = 6\n else:\n nproc = 8\n ase_calculator.nprocshared = nproc\n self.write_input(conformer, ase_calculator)\n if restart:\n logging.info(\n f\"Restarting calculations for {conformer}.\"\n )\n else:\n logging.info(f\"Starting calculations for {conformer}\")\n\n command = [\n \"\"\"sbatch\"\"\", \n f\"\"\"--job-name=\"{label}\" \"\"\", \n f\"\"\"--output=\"{label}.slurm.log\" \"\"\", \n f\"\"\"--error=\"{label}.slurm.log\" \"\"\",\n \"\"\"-N 1\"\"\",\n f\"\"\"-n {nproc}\"\"\",\n \"\"\"-t 24:00:00\"\"\",\n f\"--mem {self.calculator.settings['mem']}\"\n ]\n # Building on the remaining commands\n if self.partition:\n command.append(f\"\"\"-p {self.partition}\"\"\")\n if self.exclude:\n if isinstance(self.exclude, str):\n command.append(f\"\"\"--exclude={self.exclude}\"\"\")\n elif isinstance(self.exclude, list):\n exc = \"\"\n for e in self.exclude:\n exc += e\n exc += \",\"\n exc = exc[:-1]\n command.append(f\"\"\"--exclude={exc}\"\"\")\n if self.account:\n command.append(f\"\"\"-A {self.account}\"\"\")\n \n command.append(f\"\"\"--wrap=\"{self.calculator.command} '{file_path}.com' > '{file_path}.log'\" \"\"\")\n exe = \"\"\n for c in command:\n exe += c + \" \" #combining the command into a single string, this makes submit go faster.\n if self.check_complete(label): #checking to see if the job is already in the queue\n # if it's not, then we're gona submit it\n self.submit(exe)\n else:\n # it's currently in the queue, so not actually submitting it\n return label\n return label", "title": "" }, { "docid": "a42327c94d0aac6cf0cffc6f9d33de26", "score": "0.5131294", "text": "def submit_jobs():\n jobs = []\n jobs.extend(jobtracker.query(\"SELECT * FROM jobs \" \\\n \"WHERE status='retrying' \" \\\n \"ORDER BY updated_at ASC\"))\n jobs.extend(jobtracker.query(\"SELECT * FROM jobs \" \\\n \"WHERE status='new'\" \\\n \"ORDER BY updated_at ASC\"))\n for job in jobs:\n if config.jobpooler.queue_manager.can_submit():\n submit(job)\n if config.jobpooler.submit_sleep:\n time.sleep(config.jobpooler.submit_sleep)\n else:\n break", "title": "" }, { "docid": "fe4bc6e43b8270364d6f86e1c8e3f2e7", "score": "0.51236796", "text": "def submit_one_coadd_job(tile, parenttask, mode, parameters, location,\n exclude_pointing_jobs=False,\n science_obs_only=False,\n never_update=False,\n dryrun=True, priority=0,\n output_task=None, pointings_only=False):\n # Generate tag, task name, and filter.\n if not output_task:\n coadd_task = generate_hpx_coadd_task(parenttask)\n else:\n coadd_task = output_task\n\n tag = generate_hpx_coadd_tag(tile, coadd_task)\n filt = create_hpx_filter(tile, parenttask)\n\n db = get_database()\n\n # Check what current parent values should be.\n try:\n parent_jobs = get_parents(tile, parenttask,\n exclude_pointing_jobs=exclude_pointing_jobs,\n pointings_only=pointings_only,\n science_obs_only=science_obs_only)\n except NoRowsError:\n parent_jobs = []\n\n # Perform upsert operation and return job ID (or None).\n return add_upd_del_job(\n db=db,\n tag=tag,\n location=location,\n mode=mode,\n parameters=parameters,\n task=coadd_task,\n priority=priority,\n parent_jobs=parent_jobs,\n filters=([filt] * len(parent_jobs)),\n tilelist=[tile],\n allow_upd=(not never_update),\n allow_del=(not never_update),\n description='coadd for tile {}'.format(tile),\n dry_run=dryrun)", "title": "" } ]
c1df52cabfc7189ecec5abca901b75c2
Draws the bounding boxes and lines on the given image. is a list of dicts representing the people in the image, formatted like the output of sd_measure.measure_locations().
[ { "docid": "c847b55f5353e15a031ad357b5a9fb0f", "score": "0.7856473", "text": "def drawBoxesAndLines(image, people):\n for d in people:\n topLeft = (d['bbox'][3], d['bbox'][2])\n bottomRight = (d['bbox'][1], d['bbox'][0])\n center = tuple(coord // 2 for coord in tuple(map(operator.add, topLeft, bottomRight)))\n # Draw bbox around person\n if len(d['too_close']) == 0:\n color = (0, 255, 0)\n else:\n color = (0, 0, 255)\n image = cv2.rectangle(image, topLeft, bottomRight, color, 2)\n\n for idx in d['too_close']:\n coordsOfOtherPerson = people[idx]['bbox']\n centerOfOtherPerson = ((coordsOfOtherPerson[1] + coordsOfOtherPerson[3]) // 2,\n (coordsOfOtherPerson[0] + coordsOfOtherPerson[2]) // 2)\n image = cv2.line(image, center, centerOfOtherPerson, (0, 0, 255), 2)\n return image", "title": "" } ]
[ { "docid": "cff4babb9963b0845d5f694602d07442", "score": "0.7160496", "text": "def draw_bounding_boxes_on_image(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n raise ValueError('Input must be of size [N, 4]')\n for i in range(boxes_shape[0]):\n display_str_list = ()\n if display_str_list_list:\n display_str_list = display_str_list_list[i]\n draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1],\n boxes[i, 2], boxes[i, 3], color, thickness,\n display_str_list)", "title": "" }, { "docid": "d7e3b6356bf5ba3eb647c6251b0899d7", "score": "0.71047497", "text": "def draw_bounding_boxes_on_image_array(image,\n boxes,\n color='red',\n thickness=4,\n display_str_list_list=()):\n image_pil = Image.fromarray(image)\n draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,\n display_str_list_list)\n np.copyto(image, np.array(image_pil))", "title": "" }, { "docid": "a6cc667f551124f39415e72dd3453ae6", "score": "0.70775753", "text": "def draw_bounding_box_on_image(self, image, ymin, xmin, ymax, xmax, color, font, thickness=4, display_str_list=()):\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top), (left, top)], width=thickness, fill=color)\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = top + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle([(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color)\n draw.text((left + margin, text_bottom - text_height - margin), display_str, fill=\"black\", font=font)\n text_bottom -= text_height - 2 * margin", "title": "" }, { "docid": "32e2a9d595539ece4eaff8d36c147341", "score": "0.7075286", "text": "def draw_boundingboxes(image, this_frame_boundingboxes):\n v_res, h_res, c = image.shape\n\n for i, boundingbox in enumerate(this_frame_boundingboxes['boundingbox']): \n left = int(boundingbox.left * h_res)\n top = int(boundingbox.top * v_res)\n right = int(boundingbox.right * h_res)\n bottom = int(boundingbox.bottom * v_res)\n \n cv2.rectangle(image, (left,top), (right,bottom), (0,255,0), 2)\n\n sorted_attributes = []\n for attribute in this_frame_boundingboxes['attributes'][i]:\n if(attribute.confidence >= 0.6):\n sorted_attributes.append(attribute.name)\n\n sorted_attributes.sort()\n \n if sorted_attributes: \n for i, attribute in enumerate(sorted_attributes):\n cv2.putText(image, attribute, (left, (bottom+20)+20*i), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, cv2.LINE_AA)\n\n return image", "title": "" }, { "docid": "7b6ab644bf5873f38aaf804b3c5dd867", "score": "0.70150644", "text": "def draw_bounding_box_on_image(\n image, ymin, xmin, ymax, xmax, color, font, thickness=4, display_str_list=()\n):\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n (left, right, top, bottom) = (\n xmin * im_width,\n xmax * im_width,\n ymin * im_height,\n ymax * im_height,\n )\n draw.line(\n [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],\n width=thickness,\n fill=color,\n )\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = top + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [\n (left, text_bottom - text_height - 2 * margin),\n (left + text_width, text_bottom),\n ],\n fill=color,\n )\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill=\"black\",\n font=font,\n )\n text_bottom -= text_height - 2 * margin", "title": "" }, { "docid": "7f73741abf26f21e9b03410eefdaf3aa", "score": "0.6968699", "text": "def display_img(self, img, bndboxes=None):\n\n for bndbox in bndboxes:\n cv2.rectangle(\n img,\n bndbox[0],\n bndbox[1],\n (0, 255, 0),\n thickness=1,\n lineType=cv2.LINE_AA,\n )\n\n plt.imshow(img)\n plt.show()", "title": "" }, { "docid": "09b895176c192abf0c9f5f39ce74c5a4", "score": "0.695145", "text": "def draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n clss=None,\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True,\n label_font_size=16): \n if clss is None:\n color = COLORS[1]\n else:\n color = COLORS[int(clss) % len(COLORS)]\n\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n draw.line([(left, top), (left, bottom), (right, bottom),\n (right, top), (left, top)], width=thickness, fill=color)\n\n try:\n font = ImageFont.truetype('arial.ttf', label_font_size)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin), (left + text_width,\n text_bottom)],\n fill=color)\n\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n\n text_bottom -= (text_height + 2 * margin)", "title": "" }, { "docid": "2228c59d03d10b2e3a9623ec012a4f71", "score": "0.69426435", "text": "def draw_bounding_boxes_on_image(image,\n boxes,\n classes,\n thickness=4,\n display_strs=()):\n\n boxes_shape = boxes.shape\n if not boxes_shape:\n return\n if len(boxes_shape) != 2 or boxes_shape[1] != 4:\n # print('Input must be of size [N, 4], but is ' + str(boxes_shape))\n return # no object detection on this image, return\n for i in range(boxes_shape[0]):\n if display_strs:\n display_str_list = display_strs[i]\n draw_bounding_box_on_image(image,\n boxes[i, 0], boxes[i, 1], boxes[i, 2], boxes[i, 3],\n classes[i],\n thickness=thickness, display_str_list=display_str_list)", "title": "" }, { "docid": "f2f90c5bbcded3f9ba9b941bf6f91154", "score": "0.69232655", "text": "def draw_debug_rects(self, image):\n\n if utils.is_gray(image):\n face_color = 255\n left_eye_color = 255\n right_eye_color = 255\n nose_color = 255\n mouth_color = 255\n\n else:\n face_color = (255, 255, 255) # white\n left_eye_color = (0, 0, 255) # red\n right_eye_color = (0, 255, 255) # yellow\n nose_color = (0, 255, 0) # green\n mouth_color = (255, 0, 0) # blue\n\n for face in self.faces:\n rects.outline_rect(image, face.face_rect, face_color)\n rects.outline_rect(image, face.left_eye_rect, left_eye_color)\n rects.outline_rect(image, face.right_eye_rect, right_eye_color)\n rects.outline_rect(image, face.nose_rect, nose_color)\n rects.outline_rect(image, face.mouth_rect, mouth_color)", "title": "" }, { "docid": "7c099179f0d2c371162a8027d6f980b7", "score": "0.69191575", "text": "def draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=()):\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n if thickness > 0:\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top),\n (left, top)],\n width=thickness,\n fill=color)\n try:\n font = ImageFont.truetype('arial.ttf', 24)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle(\n [(left, text_bottom - text_height - 2 * margin), (left + text_width,\n text_bottom)],\n fill=color)\n draw.text(\n (left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n text_bottom -= text_height - 2 * margin", "title": "" }, { "docid": "9a249ee752e5274409e38bb3ca11f502", "score": "0.6895307", "text": "def draw_bounding_box_on_image(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n else:\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n draw.line([(left, top), (left, bottom), (right, bottom), (right, top),\n (left, top)],\n width=thickness,\n fill=color)\n try:\n font = ImageFont.truetype('arial.ttf', 24)\n except IOError:\n font = ImageFont.load_default()\n\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)\n\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n for display_str in display_str_list[::-1]:\n text_width, text_height = font.getsize(display_str)\n margin = np.ceil(0.05 * text_height)\n draw.rectangle([(left, text_bottom - text_height - 2 * margin),\n (left + text_width, text_bottom)],\n fill=color)\n draw.text((left + margin, text_bottom - text_height - margin),\n display_str,\n fill='black',\n font=font)\n text_bottom -= text_height - 2 * margin", "title": "" }, { "docid": "6def47cb6575cfe43b764f647f5cc418", "score": "0.6836986", "text": "def draw_boxes(self, img, detections):\n\n # ----- While loop: drawing -----------------------------------------------------\n for detect in detections:\n # get coordinates\n (x, y, w, h) = detect[1]\n top_left = (int(x), int(y))\n bottom_right = (int(x + w), int(y + h))\n color = (0, 255, 0) # green\n thick = 2\n\n # draw\n cv.rectangle(img, top_left, bottom_right, color, thick, 1)", "title": "" }, { "docid": "06bf288f1e916ce6ee3678a96bcb4650", "score": "0.6804539", "text": "def draw(img, boxes):\n image = Image.fromarray(np.floor(img).astype('uint8'))\n thickness = (image.size[0] + image.size[1]) // 600\n for box in boxes:\n p1 = (box.x1, box.y1)\n p2 = (box.x2, box.y2)\n label = '{} {:.2f}%'.format(box.cls, box.score * 100)\n draw = ImageDraw.Draw(image)\n # font = ImageFont.truetype(font='./FiraMono-Medium.otf', encoding='ADOB')\n label_size = draw.textsize(label)\n text_origin = np.array([p1[0], p1[1] + 1])\n color = np.random.randint(0, 255, [3])\n color = np.array([0, 255, 0])\n\n for i in range(thickness):\n draw.rectangle([p1[0] + i, p1[1] + i, p2[0] - i, p2[1] - i], outline=tuple(color))\n\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill=tuple(color))\n draw.text(tuple(text_origin), label, fill=(0, 0, 0), label_size=2)\n\n del draw\n return image", "title": "" }, { "docid": "477dd2badda2e119b20cc6ddc4313a02", "score": "0.67918557", "text": "def draw_bbox(self, image):\n image = image.copy()\n \n kwargs = {'img': image, 'color': (255,0,0), 'thickness': 5}\n cv2.line(pt1=(self.bbox['xmin'], self.bbox['ymin']),\n pt2=(self.bbox['xmin'], self.bbox['ymax']),\n **kwargs)\n cv2.line(pt1=(self.bbox['xmin'], self.bbox['ymin']),\n pt2=(self.bbox['xmax'], self.bbox['ymin']),\n **kwargs)\n cv2.line(pt1=(self.bbox['xmax'], self.bbox['ymax']),\n pt2=(self.bbox['xmin'], self.bbox['ymax']),\n **kwargs)\n cv2.line(pt1=(self.bbox['xmax'], self.bbox['ymax']),\n pt2=(self.bbox['xmax'], self.bbox['ymin']),\n **kwargs)\n preview(image)", "title": "" }, { "docid": "ae6cec03596ad5eb1fb4ad4163af1b04", "score": "0.6734276", "text": "def draw_bboxes(image, locations):\n #print(locations)\n for left,top,right,bottom,confidence in locations:\n if confidence==0:\n continue\n cv2.rectangle(image,(left,top),(right,bottom),(255,0,0),3)\n #cv2.imwrite('bbox.png',image)#test on a single image\n return image", "title": "" }, { "docid": "f46457b45e1eb3296cceb982198e030a", "score": "0.6723932", "text": "def plot_boxes(img, boxes):\n if boxes is None:\n return img\n\n draw = ImageDraw.Draw(img)\n for box in boxes:\n draw.polygon([box[0], box[1], box[2], box[3], box[4], box[5], box[6], box[7]], outline=(0, 255, 0))\n return img", "title": "" }, { "docid": "ba902b8e509f81122e2296b229c9937a", "score": "0.66956997", "text": "def draw_bounding_box_on_image_array(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=()):\n \n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,\n thickness, display_str_list)\n np.copyto(image, np.array(image_pil))", "title": "" }, { "docid": "cb156d03ad1bf275e059cd21b30ceb2e", "score": "0.66225964", "text": "def render_image(frame, image_wise_bboxes, output_image_file, box_color, linewidth=3):\n image = frame.load_image()\n draw = ImageDraw.Draw(image)\n for annotations in image_wise_bboxes:\n class_name = annotations.category\n box = annotations.box\n outline_color = (box_color[class_name].R,\n box_color[class_name].G,\n box_color[class_name].B)\n if (box[2] - box[0]) >= 0 and (box[3] - box[1]) >= 0:\n draw.rectangle(box, outline=outline_color)\n for i in range(linewidth):\n x1 = max(0, box[0] - i)\n y1 = max(0, box[1] - i)\n x2 = min(frame.width, box[2] + i)\n y2 = min(frame.height, box[3] + i)\n draw.rectangle(box, outline=outline_color)\n image.save(output_image_file)", "title": "" }, { "docid": "f900d56ab54d8372f1b8ee27ebc43915", "score": "0.658467", "text": "def draw_boxes(self, img, bboxes, color=(0, 0, 255), thick=6):\n\n imcopy = np.copy(img)\n for bbox in bboxes:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n return imcopy", "title": "" }, { "docid": "b57bcb5216405c7eb873ee092304c957", "score": "0.65805", "text": "def draw_bboxes(self, image, bboxes, confidences, class_ids):\n\n for bb, conf, cid in zip(bboxes, confidences, class_ids):\n clr = [int(c) for c in self.bbox_colors[cid]]\n cv.rectangle(image, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]), clr, 2)\n label = \"{}:{:.4f}\".format(self.object_names[cid], conf)\n (label_width, label_height), baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 2)\n y_label = max(bb[1], label_height)\n cv.rectangle(image, (bb[0], y_label - label_height), (bb[0] + label_width, y_label + baseLine),\n (255, 255, 255), cv.FILLED)\n cv.putText(image, label, (bb[0], y_label), cv.FONT_HERSHEY_SIMPLEX, 0.5, clr, 2)\n return image", "title": "" }, { "docid": "b827a48875a5d16b8d463cad6f193c6d", "score": "0.65717643", "text": "def draw_box(image, markers, thickness=1):\n \n #print ('markers in image' + str(markers))\n cv2.line(image,markers[0],markers[1],(0,255,0),thickness)\n cv2.line(image,markers[1],markers[3],(0,255,0),thickness)\n cv2.line(image,markers[3],markers[2],(0,255,0),thickness)\n cv2.line(image,markers[2],markers[0],(0,255,0),thickness)\n \n \n return image", "title": "" }, { "docid": "8f2584177390a760670bf4e7c153e8be", "score": "0.65304774", "text": "def draw_obj_bbox(image, bboxes, classes, probs, dataset_type):\n draw = image.copy()\n label_list = get_labels(dataset_type)\n for bbox, cls, prob in zip(bboxes, classes, probs):\n label = \"{}:{:.6f}\".format(label_list[cls], prob)\n # map -> list\n inner_color = np.random.uniform(0, 255, 3).astype(np.uint8)\n color = (int(inner_color[0]), int(inner_color[1]), int(inner_color[2]))\n cv2.rectangle(\n draw,\n pt1=(int(bbox[0]), int(bbox[1])),\n pt2=(int(bbox[2]), int(bbox[3])),\n color=color,\n thickness=1)\n t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]\n pt2 = int(bbox[0]) + t_size[0] + 3, int(bbox[1]) + t_size[1] + 4\n cv2.rectangle(\n draw,\n pt1=(int(bbox[0]), int(bbox[1])),\n pt2=tuple(pt2),\n color=color,\n thickness=-1)\n cv2.putText(draw, label, (int(bbox[0]), t_size[1] + 4 + int(bbox[1])),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)\n return draw", "title": "" }, { "docid": "2e6f1235516687656566cf06225d178d", "score": "0.6519187", "text": "def draw_bounding_boxes(image, faces, face_emotion_prediction_dictionary):\n for i, face in enumerate(faces):\n x, y, w, h = getxywh(face)\n image = cv2.rectangle(image, (x, y), (x + w, y + h), (255, 165, 0), 2)\n face_emotion = face_emotion_prediction_dictionary[i][0]\n font_scale = 0.9\n font = cv2.FONT_HERSHEY_PLAIN\n rectangle_bgr = (255, 255, 255)\n text = str(i+1) + \". \" + face_emotion[\"emotion\"] + \": \" + face_emotion[\"probability\"]\n (text_width, text_height) = cv2.getTextSize(text, font, fontScale=font_scale, thickness=1)[0]\n text_offset_x = x\n text_offset_y = y - 1\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height - 2))\n cv2.rectangle(image, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n cv2.putText(image, text, (text_offset_x, text_offset_y), font, fontScale=font_scale, color=(255, 165, 0), thickness=1)\n return image", "title": "" }, { "docid": "c7eaad04602b990cee19f31242a7dc18", "score": "0.6489813", "text": "def drawBoundingBox(image, bbox, lblid, lblconf=None):\n ymin,xmin,ymax,xmax = [int(f) for f in bbox]\n lblcolor = LBL_COLORS[lblid]\n lblname = LBL_NAMES[lblid]\n image = cv.rectangle(image, (xmin, ymin), (xmax, ymax), lblcolor, 1)\n image = writeBBLabelText(image, (xmin, ymin), (xmax, ymax), lblname, lblcolor, lblconf)\n return image", "title": "" }, { "docid": "fad069a0c1ebd0f1b97143013ac632bb", "score": "0.6485699", "text": "def draw_annotated_box(image, box_list_list, label_list, color_list):\n\n font_path = os.path.join(os.path.dirname(__file__), 'keras_yolo3/font/FiraMono-Medium.otf')\n font = ImageFont.truetype(font = font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))\n thickness = (image.size[0] + image.size[1]) // 300\n\n draw = ImageDraw.Draw(image)\n\n for box_list, label, color in zip(box_list_list, label_list, color_list):\n if not isinstance(color, tuple):\n color = tuple(color)\n for box in box_list:\n # deal with empty predictions\n if len(box)<4:\n continue\n\n # if score is also passed, append to label\n thelabel = '{}'.format(label)\n if len(box)>4:\n thelabel += ' {:.2f}'.format(box[-1])\n label_size = draw.textsize(thelabel, font)\n\n xmin, ymin, xmax, ymax = box[:4]\n ymin = max(0, np.floor(ymin + 0.5).astype('int32'))\n xmin = max(0, np.floor(xmin + 0.5).astype('int32'))\n ymax = min(image.size[1], np.floor(ymax + 0.5).astype('int32'))\n xmax = min(image.size[0], np.floor(xmax + 0.5).astype('int32'))\n\n if ymin - label_size[1] >= 0:\n text_origin = np.array([xmin, ymin - label_size[1]])\n else:\n text_origin = np.array([xmin, ymax])\n\n\n for i in range(thickness):\n draw.rectangle([xmin + i, ymin + i, xmax - i, ymax - i], outline=color)\n draw.rectangle([tuple(text_origin), tuple(text_origin + label_size)], fill = color)\n draw.text(text_origin, thelabel, fill=(0, 0, 0), font=font)\n\n del draw\n\n return image", "title": "" }, { "docid": "87821eed671fced90b2f5da56ce3a55b", "score": "0.64568996", "text": "def draw_boxes(img, boxes):\n for box in boxes:\n A = (box[0], box[1])\n B = (box[6], box[7])\n cv2.rectangle(img, A, B, (0, 255, 0, 0.5), 3)\n return img", "title": "" }, { "docid": "7b8c3f130524469ad4a6516b2d3175d7", "score": "0.64454657", "text": "def show_image(image, boxes, original_width=500, original_height=500):\n source_img = Image.fromarray(image.astype('uint8'))\n width, height = source_img.size[0], source_img.size[1]\n font = ImageFont.truetype(font='font/FiraMono-Medium.otf',\n size=np.floor(3e-2 * height + 0.5).astype('int32'))\n thickness = (width + height) // 200\n\n class_list = utils.label_names_as_list()\n\n draw = ImageDraw.Draw(source_img)\n for box in boxes:\n class_index = box.get_classindex()\n bb_color = get_random_color(class_index)\n\n label = '{} {:.2f}'.format(class_list[class_index], box.class_probs[class_index])\n label_size = list(draw.textsize(label, font))\n\n box_x_ratio, box_y_ratio, box_width_ratio, box_height_ratio = box.get_coordinates()\n left = min(max(width * (box_x_ratio - box_width_ratio / 2), 0), width)\n top = min(max(height * (box_y_ratio - box_height_ratio / 2), 0), height)\n right = min(max(width * (box_x_ratio + box_width_ratio / 2), 0), width)\n bottom = min(max(height * (box_y_ratio + box_height_ratio / 2), 0), height)\n if top - label_size[1] >= 0:\n text_origin = [left, top - label_size[1]]\n else:\n text_origin = [left, top + 1]\n for i in range(thickness):\n draw.rectangle([left + i, top + i, right - i, bottom - i],\n outline=bb_color)\n draw.rectangle([tuple(text_origin), text_origin[0] + label_size[0], text_origin[1] + label_size[1]],\n fill=bb_color)\n draw.text(text_origin, label, fill=(0, 0, 0), font=font)\n del draw\n source_img = source_img.resize((original_width, original_height), Image.ANTIALIAS)\n imshow(\"Predicted\", np.array(source_img))", "title": "" }, { "docid": "d9a9d9adc202d6d19c8656e3004795dc", "score": "0.64094853", "text": "def draw_boxes_and_label_on_image_cv2(img, class_label_map, class_boxes_map):\r\n for c, boxes in class_boxes_map.items():\r\n # print(\"boxes in visualize.py:{}\".format(boxes))\r\n for box in boxes:\r\n # assert len(box) == 5, 'class_boxes_map every item must be [bb_left, bb_top, bb_right, bb_bottom, prob]'\r\n # checking box order is bb_left, bb_top, bb_right, bb_bottom\r\n # make sure all box should be int for OpenCV\r\n bb_left = int(box[0])\r\n bb_top = int(box[1])\r\n bb_right = int(box[2])\r\n bb_bottom = int(box[3])\r\n\r\n # prob will round 2 digits\r\n prob = round(box[4], 2)\r\n index = int(box[5])\r\n unique_color = _create_unique_color_uchar(index)\r\n cv2.rectangle(img, (bb_left, bb_top), (bb_right, bb_bottom), unique_color, 1)\r\n\r\n # text_label = '{} {}'.format(class_label_map[c], prob)\r\n text_label1 = '{}'.format(class_label_map[str(index)])\r\n # text_label2 = '{}'.format(prob)\r\n\r\n (ret_val, base_line) = cv2.getTextSize(text_label1, cv2.FONT_HERSHEY_COMPLEX, 0.4, 1)\r\n text_org1 = (bb_left - 2, bb_top + 5)\r\n # text_org2 = (bb_left, bb_top + bb_bottom - 0)\r\n\r\n # cv2.rectangle(img, (text_org[0] - 5, text_org[1] + base_line - 5),\r\n # (text_org[0] + ret_val[0] + 5, text_org[1] - ret_val[1] + 5), unique_color, 1)\r\n # this rectangle for fill text rect\r\n # print(ret_val)\r\n cv2.rectangle(img, (text_org1[0], text_org1[1] + base_line - 3),\r\n (text_org1[0] + ret_val[0] - 1, text_org1[1] - ret_val[1]),\r\n unique_color, -1)\r\n cv2.putText(img, text_label1, text_org1, cv2.FONT_HERSHEY_DUPLEX, 0.4, (255, 255, 255), 1)\r\n # cv2.putText(img, text_label2, text_org2, cv2.FONT_HERSHEY_DUPLEX, 0.2, (150, 150, 150), 1)\r\n\r\n return img", "title": "" }, { "docid": "4c4f91d31362a64fa8524ecbf123e920", "score": "0.6401002", "text": "def visualize_bbox(img_file, yolo_ann_file, label_dict, figure_size=(6, 8)):\n\n img = mpimg.imread(img_file)\n fig, ax = plt.subplots(1, 1, figsize=figure_size)\n ax.imshow(img)\n\n im_height, im_width, _ = img.shape\n\n palette = mcolors.TABLEAU_COLORS\n colors = [c for c in palette.keys()]\n with open(yolo_ann_file, \"r\") as fin:\n for line in fin:\n cat, center_w, center_h, width, height = line.split()\n cat = int(cat)\n category_name = label_dict[cat]\n left = (float(center_w) - float(width) / 2) * im_width\n top = (float(center_h) - float(height) / 2) * im_height\n width = float(width) * im_width\n height = float(height) * im_height\n\n rect = plt.Rectangle(\n (left, top),\n width,\n height,\n fill=False,\n linewidth=2,\n edgecolor=colors[cat],\n )\n ax.add_patch(rect)\n props = dict(boxstyle=\"round\", facecolor=colors[cat], alpha=0.5)\n ax.text(\n left,\n top,\n category_name,\n fontsize=14,\n verticalalignment=\"top\",\n bbox=props,\n ) \n plt.show()", "title": "" }, { "docid": "38e47be6e981e19ef3150c5ef0cc2051", "score": "0.6368204", "text": "def draw_bounding_box_on_image_array(image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=4,\n display_str_list=(),\n use_normalized_coordinates=True):\n image_pil = Image.fromarray(np.uint8(image)).convert('RGB')\n draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,\n thickness, display_str_list,\n use_normalized_coordinates)\n np.copyto(image, np.array(image_pil))", "title": "" }, { "docid": "9c0e07efbf580212693e3ec33c3102f2", "score": "0.63677627", "text": "def draw_boxes(image_and_detections):\n image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections,\n tf.uint8)\n return image_with_boxes", "title": "" }, { "docid": "d429ce0bad7e489e85ccf13c61684f46", "score": "0.6367289", "text": "def render_detection_bounding_boxes(detections, image, label_map={},\n classification_label_map={},\n confidence_threshold=0.8, thickness=4,\n classification_confidence_threshold=0.3,\n max_classifications=3):\n\n display_boxes = []\n display_strs = [] # list of lists, one list of strings for each bounding box (to accommodate multiple labels)\n classes = [] # for color selection\n\n for detection in detections:\n\n score = detection['conf']\n if score > confidence_threshold:\n \n x1, y1, w_box, h_box = detection['bbox']\n display_boxes.append([y1, x1, y1 + h_box, x1 + w_box])\n clss = detection['category']\n label = label_map[clss] if clss in label_map else clss\n displayed_label = ['{}: {}%'.format(label, round(100 * score))]\n \n if 'classifications' in detection:\n \n # To avoid duplicate colors with detection-only visualization, offset\n # the classification class index by the number of detection classes\n clss = len(bbox_categories) + int(detection['classifications'][0][0])\n classifications = detection['classifications']\n if len(classifications) > max_classifications:\n classifications = classifications[0:max_classifications]\n for classification in classifications:\n p = classification[1]\n if p < classification_confidence_threshold:\n continue\n class_key = classification[0]\n if class_key in classification_label_map:\n class_name = classification_label_map[class_key]\n else:\n class_name = class_key\n displayed_label += ['{}: {:5.1%}'.format(class_name.lower(), classification[1])]\n \n # ...if we have detection results\n display_strs.append(displayed_label)\n classes.append(clss)\n\n # ...if the confidence of this detection is above threshold\n \n # ...for each detection\n \n display_boxes = np.array(display_boxes)\n\n draw_bounding_boxes_on_image(image, display_boxes, classes,\n display_strs=display_strs, thickness=thickness)", "title": "" }, { "docid": "12b00dd2e2bec867b11930583f6fa7d2", "score": "0.63468635", "text": "def draw_bbox(img_name, result, threshold=0.5, save_name='res.jpg'):\n img = Image.open(img_name).convert('RGB')\n draw = ImageDraw.Draw(img)\n for res in result:\n cat_id, score, bbox = res[0], res[1], res[2:]\n print(cat_id, score, bbox)\n if score < threshold:\n continue\n xmin, ymin, xmax, ymax = bbox\n draw.line([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),\n (xmin, ymin)],\n width=2,\n fill=(255, 0, 0))\n print('category id is {}, bbox is {}'.format(cat_id, bbox))\n img.save(save_name, quality=95)", "title": "" }, { "docid": "8b5595a997abf4312560a097a16cccea", "score": "0.6344608", "text": "def draw_rects(img, rects, color, thick):\r\n for rect in rects:\r\n startpt = (rect[0], rect[1])\r\n endpt = (rect[2], rect[3])\r\n img = cv2.rectangle(img, startpt, endpt, color, thick)\r\n return img", "title": "" }, { "docid": "cf6663da8fdae294e82e80eb31d9ad50", "score": "0.6338574", "text": "def draw_boxes(img, box_set, color=(0, 0, 255), thick=6):\n # Make a copy of the image\n imcopy = np.copy(img)\n # Iterate through the bounding boxes\n for bboxes in box_set:\n for bbox in bboxes:\n # Draw a rectangle given bbox coordinates\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n # Return the image copy with boxes drawn\n return imcopy", "title": "" }, { "docid": "7d725cfd45817db874d95eca1475cad5", "score": "0.63206965", "text": "def draw_bbox(file_name): \n with open(file_name, 'r') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n #print(row)\n file_name = row['img_path']\n img = cv2.imread(file_name)\n\n top_left = (int(row['topleft_x']), int(row['topleft_y']))\n bottom_right = (int(row['bottomright_x']), int(row['bottomright_y']))\n print(row['img_path'])\n img = cv2.rectangle(img,top_left,bottom_right,(0,255,0),3)\n cv2.imshow('image', img)\n cv2.waitKey(0)", "title": "" }, { "docid": "8c7abe54c81b3d478926afbacaa9dd84", "score": "0.6313703", "text": "def draw_bboxes(img, bboxes, color):\n\n for bbox in bboxes:\n bbox = bbox.astype(int)\n img = cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color, 2)\n return img", "title": "" }, { "docid": "d97c23e5eb85cf2103306977a70bbacf", "score": "0.6310389", "text": "def DrawImage(self):\n self.image[:,:]=self.colorDict[\"black\"]\n # road, walls and center line\n for cl, lw, rw in zip(self.cl_segments, self.lw_segments, self.rw_segments):\n self.DrawPolygon([lw.p1, lw.p2, rw.p2, rw.p1], \"black\")\n self.DrawSegment(rw, \"yellow\")\n self.DrawSegment(lw, \"yellow\")\n self.DrawSegment(cl, \"white\")\n # start and end lines\n self.DrawSegment(self.start_segment, \"white\")\n self.DrawSegment(self.end_segment, \"red\")", "title": "" }, { "docid": "62a23e156a61d0f82182344e5d8d63ad", "score": "0.6305977", "text": "def draw_bounding_box_pil_image(\n image: Image,\n ymin: float,\n xmin: float,\n ymax: float,\n xmax: float,\n text: AnyStr = \"\",\n use_normalized_coordinates: bool = True,\n color: AnyStr = BOUNDING_BOX_COLOR,\n) -> None:\n draw = ImageDraw.Draw(image)\n im_width, im_height = image.size\n line_thickness = 3 * int(np.ceil(0.001 * max(im_width, im_height)))\n (left, right, top, bottom) = (xmin, xmax, ymin, ymax)\n if use_normalized_coordinates:\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height)\n lines = [(left, top), (left, bottom), (right, bottom), (right, top), (left, top)]\n draw.line(xy=lines, width=line_thickness, fill=color)\n if text:\n text_line_list = text.splitlines()\n scaled_font = scale_bounding_box_font(image, text_line_list, left, right)\n # If the total height of the display strings added to the top of the bounding box\n # exceeds the top of the image, stack the strings below the bounding box instead of above.\n text_height = sum([scaled_font.getsize(text_line)[1] for text_line in text_line_list])\n text_height_with_margin = (1 + 2 * 0.05) * text_height # Each line has a top and bottom margin of 0.05x\n text_bottom = top\n if top < text_height_with_margin:\n text_bottom += text_height_with_margin\n # Reverse list and print from bottom to top.\n for text_line in text_line_list[::-1]:\n text_width, text_height = scaled_font.getsize(text_line)\n margin = int(np.ceil(0.05 * text_height))\n rectangle = [(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom + 2 * margin)]\n draw.rectangle(xy=rectangle, fill=color)\n draw.text(\n xy=(left + margin, text_bottom - text_height - margin), text=text_line, fill=\"black\", font=scaled_font\n )\n text_bottom -= text_height - 2 * margin", "title": "" }, { "docid": "88b3cde23c84b9541a4a11683375bd5e", "score": "0.6302675", "text": "def drawBoxes(img, bBoxes, color=(0, 255, 0), thickness=4):\n ######################## Your code #############################\n\n\n ######################## End of your code ######################", "title": "" }, { "docid": "37b343bd6ea25a2d080ecf2ca71ffcde", "score": "0.62359625", "text": "def showboxes(image, boxes, output_figure_path=None):\n color_list = [\"red\", \"blue\", \"yellow\", \"orange\", \"purple\", \"gold\", \"silver\", \"magenta\",\n \"cyan\", \"black\", \"white\", \"pink\"]\n figure = plt.figure()\n axis = figure.add_subplot(111, aspect='equal')\n plt.imshow(image)\n # color = \"yellow\"\n for box in boxes:\n axis.add_patch(patches.Rectangle(box[:2],\n box[2] - box[0],\n box[3] - box[1],\n fill=None,\n ec=color_list[box[4].astype(int)],\n lw=2))\n\n if output_figure_path is not None:\n plt.savefig(output_figure_path)", "title": "" }, { "docid": "25d846b821c9c2346e940ce5da8e3eb6", "score": "0.6232027", "text": "def plot_boxes(self, img, classes, scores, boxes, file_name, line_width=1, visualize=False):\n img_ax = pyplot.imshow(img)\n colors = dict()\n for i in range(classes.shape[0]):\n cls_id = int(classes[i])\n if cls_id >= 0:\n score = scores[i]\n if cls_id not in colors:\n colors[cls_id] = (random(), random(), random())\n ymin = int(numpy.clip(boxes[i, 0], 0, self.image_shape[0]))\n xmin = int(numpy.clip(boxes[i, 2], 0, self.image_shape[1]))\n ymax = int(numpy.clip(boxes[i, 1], ymin, self.image_shape[0]))\n xmax = int(numpy.clip(boxes[i, 3], xmin, self.image_shape[1]))\n rect = pyplot.Rectangle((xmin, ymin), width=xmax - xmin, height=ymax - ymin, fill=False, edgecolor=colors[cls_id], linewidth=line_width)\n pyplot.gca().add_patch(rect)\n class_name = str(cls_id)\n pyplot.gca().text(xmin + 5, max(ymin - 5, 0), '{:s}|{:.2f}'.format(class_name, score), bbox=dict(facecolor=colors[cls_id], alpha=0.5), fontsize=8, color='white')\n img_ax.figure.savefig(file_name, dpi=500)\n if visualize:\n pyplot.show()\n pyplot.close()", "title": "" }, { "docid": "c138525a84b21c7a4d31ca596b32dda0", "score": "0.62131137", "text": "def draw_all_result(self, image):\n for facebox, conf in self.detection_result:\n cv2.rectangle(image, (facebox[0], facebox[1]),\n (facebox[2], facebox[3]), (0, 255, 0))\n label = \"face: %.4f\" % conf\n label_size, base_line = cv2.getTextSize(\n label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n\n cv2.rectangle(image, (facebox[0], facebox[1] - label_size[1]),\n (facebox[0] + label_size[0],\n facebox[1] + base_line),\n (0, 255, 0), cv2.FILLED)\n cv2.putText(image, label, (facebox[0], facebox[1]),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))", "title": "" }, { "docid": "b2bef76af7bffbf8f72548f93f2ee1d7", "score": "0.6202989", "text": "def draw_bbox(img, bbox):\n x1 = int(bbox[0])\n y1 = int(bbox[1])\n x2 = int(bbox[2])\n y2 = int(bbox[3])\n\n img = cv2.rectangle(img, (x1, y1), (x2, y2), (255, 255, 255))\n return img", "title": "" }, { "docid": "c7a561728c507dc595c48629e0d9ebc7", "score": "0.6189948", "text": "def annotate_image(self):\n cv2.imshow(\"Img\", self.img)\n cv2.setMouseCallback(\"Img\", self.bounding)\n # Var to keep undo stack in order\n deleting = False\n while 1:\n key = cv2.waitKey(1) & 0xFF\n if key == ord('z'):\n if len(self.boxes):\n self.box_stack.append(self.boxes[-1])\n del self.boxes[-1]\n deleting = True\n if key == ord('y'):\n if deleting:\n deleting = False\n self.boxes.append(self.box_stack[-1])\n del self.box_stack[-1]\n if key == ord('q'):\n utils.append_to_json(self.json_path, self.boxes, 0)\n cv2.destroyAllWindows()", "title": "" }, { "docid": "eed441b35960f273840034f081947e57", "score": "0.6189792", "text": "def draw_boxes(img, boxes,labels, thickness=1):\n \n for box,label in zip(boxes,labels):\n box = [int(x) for x in box]\n if label == 2:\n color = (0,225,0) # green\n elif label == 1:\n color = (0,0,225) # red\n cv2.rectangle(img, (box[0],box[1]),(box[2],box[3]),color,thickness)\n return img", "title": "" }, { "docid": "e205256fb19d12dcfc2e7eddf5902bb7", "score": "0.61831754", "text": "def draw(self, image):\n\t\tim = np.copy(image)\n\n\t\t# rectangle around detection\n\t\tim = cv2.line(im, self.points[0], self.points[1], (0, 0xff, 0), 2)\n\t\tim = cv2.line(im, self.points[1], self.points[2], (0, 0, 0xff), 2)\n\t\tim = cv2.line(im, self.points[2], self.points[3], (0xff, 0, 0), 2)\n\t\tim = cv2.line(im, self.points[3], self.points[0], (0xff, 0, 0), 2)\n\n\t\tfontface = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX\n\t\ttextsize, _ = cv2.getTextSize (str(self.id), fontface, 1.0, 2)\n\t\tcenter = (\n\t\t\t\tint(self.center[0] - textsize[0] / 2),\n\t\t\t\tint(self.center[1] + textsize[1] / 2))\n\t\t# text with ID in the center of detection\n\t\tim = cv2.putText(im, str(self.id), center, fontface, 1.0, (0xff, 0x99, 0), 2)\n\n\t\treturn im", "title": "" }, { "docid": "4b09daa0254fbcfa0834d64936f590e9", "score": "0.6179993", "text": "def draw(self, image, color):\n for i in range(nb_l):\n if i not in [6,9]:\n l = self.limbs[i]\n if l.valid():\n cv.line(image, tuple([l.p1.x, l.p1.y]), tuple([l.p2.x, l.p2.y]), color, 2)\n if i == 12:\n radius = l.length() * 0.6\n center = tuple([self.pts[0].x, self.pts[0].y])\n cv.circle(image, center, int(radius), color, 2)\n \n b_limbs = self.get_body_limbs()\n if self.all_limbs_valid(b_limbs):\n b_points = list()\n for bl in b_limbs:\n b_points.append([bl.p2.x, bl.p2.y])\n cv.polylines(image, np.int32([b_points]), True, color, 2)", "title": "" }, { "docid": "004d03e4672b89c95c351a167f3d640c", "score": "0.6164947", "text": "def overlay_boxes(self, image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n \n colors = self.compute_colors_for_labels(labels).tolist()\n\n for box, color in zip(boxes, colors):\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 3\n )\n\n return image", "title": "" }, { "docid": "a35251feba9a7491dc759a3a05db66f4", "score": "0.6148471", "text": "def plt_bboxes(img, classes, scores, bboxes, figsize=(10,10), linewidth=1.5):\r\n height = img.shape[0]\r\n width = img.shape[1]\r\n colors = dict()\r\n for _ in range(classes.shape[0]):\r\n cls_id = int(classes[i])\r\n if cls_id >= 0:\r\n score = scores[i]\r\n if cls_id not in colors:\r\n colors[cls_id] = (random.random(), random.random(), random.random())\r\n\r\n xmin = int(bboxes[i, 0] * width)\r\n ymin = int(bboxes[i, 1] * height)\r\n xmax = int(bboxes[i, 2] * width)\r\n ymax = int(bboxes[i, 3] * height)\r\n img = cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0))\r\n return img", "title": "" }, { "docid": "9175129ada058f76aaaf008cd88184fa", "score": "0.6147569", "text": "def display_bboxes(image, stats, color=(0, 0, 255), width=3):\n image_to_show = image.copy() # TODO: combine display_bboxes() and save_bboxes\n for stat in stats:\n p_1 = (stat[0], stat[1])\n p_2 = (stat[0] + stat[2], stat[1] + stat[3])\n cv2.rectangle(image_to_show, p_1, p_2, color, width)\n utils.show_image(image_to_show)", "title": "" }, { "docid": "88e68c25ce386cb7e147448cc2f6dca8", "score": "0.6139505", "text": "def _py_draw_rectangles(image,\n boxes,\n scores,\n labels,\n color=GREEN,\n thickness=1,\n fontscale=1.0):\n height, width, _ = image.shape\n\n canvas = image.copy()\n for i, (box, score, label) in enumerate(zip(boxes, scores, labels)):\n label = label.decode('UTF8') if isinstance(label, bytes) else label\n if label and score > -1000:\n text = '%s: %.3lf' % (label, score)\n elif score > -1000:\n text = '%.3lf' % (score)\n else:\n text = label\n (text_w, text_h), baseline = cv2.getTextSize(text, _FONTFACE, fontscale,\n thickness)\n\n ymin, xmin, ymax, xmax = box\n ymin, xmin, ymax, xmax = (int(height * ymin + 0.5), int(width * xmin + 0.5),\n int(height * ymax + 0.5), int(width * xmax + 0.5))\n\n cv2.rectangle(\n canvas,\n pt1=(xmin, ymin),\n pt2=(xmax, ymax),\n color=color,\n thickness=thickness)\n if text:\n cv2.rectangle(\n canvas,\n pt1=(xmin + thickness, ymin + thickness),\n pt2=(xmin + thickness + text_w, ymin + thickness + text_h),\n color=color,\n thickness=-1)\n text_color = BLACK if color != BLACK else WHITE\n cv2.putText(\n canvas,\n text,\n org=(xmin, ymin + text_h),\n fontFace=_FONTFACE,\n fontScale=fontscale,\n color=text_color,\n thickness=thickness)\n return canvas", "title": "" }, { "docid": "9255a5ff8714c4972fb8541137752bb1", "score": "0.61385214", "text": "def draw_rectangles(img, rectangles: Iterable[Rectangle], color=(0, 0, 255), thick=6) -> ndarray:\n imcopy = np.copy(img)\n for bbox in rectangles:\n cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)\n return imcopy", "title": "" }, { "docid": "4b94b816a655bb393761b0a2c29311cb", "score": "0.6130028", "text": "def plot_detections(im, boxes, gt_bounds=[],\n scores=[], classes=[], outfile='', plot_thresh=0.3,\n color_dict={},\n gt_color = (0, 255, 255),\n plot_line_thickness=2, show_labels=True,\n label_alpha_scale=0.85, compression_level=9,\n alpha_scaling=True, show_plots=False, skip_empty=False,\n test_box_rescale_frac=1,\n label_txt=None,\n draw_rect=True, draw_circle=False,\n verbose=False, super_verbose=False):\n\n ##################################\n # label settings\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_size = 0.3\n font_width = 1\n display_str_height = 3\n # upscale plot_line_thickness\n plot_line_thickness *= test_box_rescale_frac\n ##################################\n\n if verbose:\n print(\"color_dict:\", color_dict)\n output = im\n h, w = im.shape[:2]\n nboxes = 0\n\n # scale alpha with prob can be extremely slow since we're overlaying a\n # a fresh image for each box, need to bin boxes and then plot. Instead,\n # bin the scores, then plot\n\n # if alpha scaling, bin by scores\n if alpha_scaling:\n # if alpha scaling, bin by scores\n if verbose:\n print(\"Binning scores in plot_rects()...\")\n # https://docs.scipy.org/doc/numpy/reference/generated/numpy.digitize.html\n # define a step between 0.25 and 0.95\n bins = np.linspace(0.2, 0.95, 7)\n # bins = np.linspace(0, 1.0, 11) # define a step of 0.1 between 0 and 1?\n # clip scores to highest value of bins\n scores_clip = np.clip(scores, 0, np.max(bins))\n # bin that each element belongs to\n inds = np.digitize(scores_clip, bins, right=True)\n unique_inds = np.sort(np.unique(inds))\n for bin_ind in unique_inds:\n\n # overlay for boxes and labels, respectively\n overlay = np.zeros(im.shape).astype(\n np.uint8) # overlay = im_raw.copy()\n overlay1 = np.zeros(im.shape).astype(np.uint8)\n\n alpha_val = bins[bin_ind]\n\n boxes_bin = boxes[bin_ind == inds]\n scores_bin = scores_clip[bin_ind == inds]\n classes_bin = classes[bin_ind == inds]\n\n if verbose:\n print(\"bin_ind:\", bin_ind)\n print(\"alpha_val:\", alpha_val)\n print(\"scores_bin.shape:\", scores_bin.shape)\n\n alpha = alpha_val\n\n # for labels, if desired, make labels a bit dimmer\n alpha_prime = max(min(bins), label_alpha_scale * alpha)\n # add boxes\n for box, score, classy in zip(boxes_bin, scores_bin, classes_bin):\n\n if score >= plot_thresh:\n nboxes += 1\n [xmin, ymin, xmax, ymax] = box\n # [ymin, xmin, ymax, xmax] = box # orig from github, don't know why we reversed order!...\n left, right, top, bottom = xmin, xmax, ymin, ymax\n\n # check boxes\n if (left < 0) or (right > (w-1)) or (top < 0) or (bottom > (h-1)):\n print(\"box coords out of bounds...\")\n print(\" im.shape:\", im.shape)\n print(\" left, right, top, bottom:\",\n left, right, top, bottom)\n return\n\n if (right < left) or (bottom < top):\n print(\"box coords reversed?...\")\n print(\" im.shape:\", im.shape)\n print(\" left, right, top, bottom:\",\n left, right, top, bottom)\n return\n\n # get label and color\n classy_str = str(classy) + ': ' + \\\n str(int(100*float(score))) + '%'\n color = color_dict[classy]\n\n if super_verbose:\n #print (\" box:\", box)\n print(\" left, right, top, bottom:\",\n left, right, top, bottom)\n print(\" classs:\", classy)\n print(\" score:\", score)\n print(\" classy_str:\", classy_str)\n print(\" color:\", color)\n\n # add rectangle to overlay\n if draw_rect:\n cv2.rectangle(\n overlay, (int(left), int(bottom)),\n (int(right), int(top)), color,\n plot_line_thickness,\n lineType=1) # cv2.CV_AA)\n if draw_circle:\n d = max(abs(left-right), abs(top-bottom))\n r = int(d/2.0)\n cx, cy = int((left+right)/2.0), int((top+bottom)/2.0)\n cv2.circle(overlay, (cx, cy), r, color, plot_line_thickness, lineType=2)\n\n\n # plot categories too?\n if show_labels:\n # adapted from visuatlizion_utils.py\n # get location\n display_str = classy_str # or classy, whch is '1 = airplane'\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n #display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (\n 1 + 2 * 0.05) * display_str_height\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n (text_width, text_height), _ = cv2.getTextSize(\n display_str, fontFace=font, fontScale=font_size,\n thickness=font_width) # 5, 5#font.getsize(display_str)\n margin = np.ceil(0.1 * text_height)\n\n # get rect and text coords,\n rect_top_left = (int(left - (plot_line_thickness - 1) * margin),\n int(text_bottom - text_height - (plot_line_thickness + 3) * margin))\n rect_bottom_right = (int(left + text_width + margin),\n int(text_bottom - (plot_line_thickness * margin)))\n text_loc = (int(left + margin),\n int(text_bottom - (plot_line_thickness + 2) * margin))\n\n # plot\n # if desired, make labels a bit dimmer\n if draw_rect:\n cv2.rectangle(overlay1, rect_top_left, rect_bottom_right,\n color, -1)\n cv2.putText(overlay1, display_str, text_loc,\n font, font_size, (0, 0, 0), font_width,\n # cv2.CV_AA)\n cv2.LINE_AA)\n\n # for the bin, combine overlay and original image\n overlay_alpha = (alpha * overlay).astype(np.uint8)\n if verbose:\n print(\"overlay.shape:\", overlay.shape)\n print(\"overlay_alpha.shape:\", overlay_alpha.shape)\n print(\"overlay.dtype:\", overlay.dtype)\n print(\"min, max, overlay\", np.min(overlay), np.max(overlay))\n #print (\"output.shape:\", output.shape)\n #print (\"output.dtype:\", output.dtype)\n # simply sum the two channels?\n # Reduce the output image where the overaly is non-\n # to use masks, see https://docs.opencv.org/3.1.0/d0/d86/tutorial_py_image_arithmetics.html\n overlay_gray = cv2.cvtColor(overlay, cv2.COLOR_BGR2GRAY)\n yup = np.nonzero(overlay_gray)\n output_tmp = output.astype(float)\n output_tmp[yup] *= (1.0 - alpha)\n output = cv2.add(output_tmp.astype(np.uint8), overlay_alpha)\n\n # add labels, if desired\n if show_labels:\n overlay_alpha1 = (alpha_prime * overlay1).astype(np.uint8)\n overlay_gray1 = cv2.cvtColor(overlay1, cv2.COLOR_BGR2GRAY)\n yup = np.nonzero(overlay_gray1)\n output_tmp = output.astype(float)\n output_tmp[yup] *= (1.0 - alpha_prime)\n output = cv2.add(output_tmp.astype(np.uint8), overlay_alpha1)\n\n # no alpha scaling, much simpler to plot\n else:\n \n for box, score, classy in zip(boxes, scores, classes):\n \n if score >= plot_thresh:\n nboxes += 1\n [xmin, ymin, xmax, ymax] = box\n # [ymin, xmin, ymax, xmax] = box\n left, right, top, bottom = xmin, xmax, ymin, ymax\n\n # get label and color\n classy_str = str(classy) + ': ' + \\\n str(int(100*float(score))) + '%'\n color = color_dict[classy]\n\n if verbose:\n #print (\" box:\", box)\n print(\" left, right, top, bottom:\",\n left, right, top, bottom)\n print(\" classs:\", classy)\n print(\" score:\", score)\n\n # add rectangle\n if draw_rect:\n cv2.rectangle(output, (int(left), int(bottom)), (int(right),\n int(top)), color,\n plot_line_thickness)\n if draw_circle:\n d = max(abs(left-right), abs(top-bottom))\n r = int(d/2.0)\n cx, cy = int((left+right)/2.0), int((top+bottom)/2.0)\n cv2.circle(output, (cx, cy), r, color, plot_line_thickness, lineType=2)\n \n # plot categories too?\n if show_labels:\n # adapted from visuatlizion_utils.py\n # get location\n display_str = classy_str # or classy, whch is '1 = airplane'\n # If the total height of the display strings added to the top of the bounding\n # box exceeds the top of the image, stack the strings below the bounding box\n # instead of above.\n #display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]\n # Each display_str has a top and bottom margin of 0.05x.\n total_display_str_height = (\n 1 + 2 * 0.05) * display_str_height\n if top > total_display_str_height:\n text_bottom = top\n else:\n text_bottom = bottom + total_display_str_height\n # Reverse list and print from bottom to top.\n (text_width, text_height), _ = cv2.getTextSize(display_str,\n fontFace=font,\n fontScale=font_size,\n thickness=font_width) # 5, 5#font.getsize(display_str)\n margin = np.ceil(0.1 * text_height)\n\n # get rect and text coords,\n rect_top_left = (int(left - (plot_line_thickness - 1) * margin),\n int(text_bottom - text_height - (plot_line_thickness + 3) * margin))\n rect_bottom_right = (int(left + text_width + margin),\n int(text_bottom - (plot_line_thickness * margin)))\n text_loc = (int(left + margin),\n int(text_bottom - (plot_line_thickness + 2) * margin))\n\n # annoying notch between label box and bounding box,\n # caused by rounded lines, so if\n # alpha is high, move everything down a smidge\n if (not alpha_scaling) or ((alpha > 0.75) and (plot_line_thickness > 1)):\n rect_top_left = (rect_top_left[0], int(\n rect_top_left[1] + margin))\n rect_bottom_right = (rect_bottom_right[0], int(\n rect_bottom_right[1] + margin))\n text_loc = (text_loc[0], int(text_loc[1] + margin))\n\n if draw_rect:\n cv2.rectangle(output, rect_top_left, rect_bottom_right,\n color, -1)\n cv2.putText(output, display_str, text_loc,\n font, font_size, (0, 0, 0), font_width,\n # cv2.CV_AA)\n cv2.LINE_AA)\n \n # plot gt if desired \n if len(gt_bounds) > 0:\n # print(\"plotting gt bounds:\",)\n plot_line_thickness_gt = 1 # plot_line_thickness\n for gt_bound in gt_bounds:\n # print(\"gt_bound:\", gt_bound)\n [ymin, xmin, ymax, xmax] = gt_bound\n left, right, top, bottom = xmin, xmax, ymin, ymax\n # add rectangle\n if draw_rect:\n cv2.rectangle(output, (int(left), int(bottom)), (int(right),\n int(top)), gt_color,\n plot_line_thickness_gt)\n if draw_circle:\n d = max(abs(left-right), abs(top-bottom))\n r = int(d/2.0)\n cx, cy = int((left+right)/2.0), int((top+bottom)/2.0)\n cv2.circle(output, (cx, cy), r, gt_color, plot_line_thickness_gt, lineType=4)\n\n # resize, if desired\n if test_box_rescale_frac != 1:\n height, width = output.shape[:2]\n output = cv2.resize(output, (width/test_box_rescale_frac, height/test_box_rescale_frac),\n interpolation=cv2.INTER_CUBIC)\n\n # add label if desired\n if label_txt:\n text_loc_label = (10, 20)\n cv2.putText(output, label_txt, text_loc_label,\n font, 2*font_size, (0, 0, 0), font_width,\n # cv2.CV_AA)\n cv2.LINE_AA) \n \n if skip_empty and nboxes == 0:\n return\n else:\n if verbose:\n print(\"Saving plot to:\", outfile)\n cv2.imwrite(outfile, output, [\n cv2.IMWRITE_PNG_COMPRESSION, compression_level])\n\n if show_plots:\n # plt.show()\n cmd = 'eog ' + outfile + '&'\n os.system(cmd)\n\n return", "title": "" }, { "docid": "c5c93e1e16426ff49d3926b3eec4e54a", "score": "0.6129289", "text": "def drawObjects(image, objects, color=COLOR_YELLOW, thickness=2):\n\n if len(objects) == 0:\n return image\n exit()\n\n image = Image.fromarray(image)\n draw = ImageDraw.Draw(image)\n for (x1, y1, x2, y2) in objects:\n for i in range(thickness):\n draw.rectangle([x1+i, y1+i, x2-i, y2-i], outline=color)\n\n del draw\n return np.asarray(image)", "title": "" }, { "docid": "a3cd4b6e683bc29955a52df662c892f6", "score": "0.6124588", "text": "def draw_labeled_bboxes(self, img, labels):\n\n for car_number in range(1, labels[1]+1):\n #Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n #Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n #Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n #Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)\n #Return the image\n return img", "title": "" }, { "docid": "d9eded078236b9baf2e90d9e7b711a6f", "score": "0.6119242", "text": "def _draw_bounding_boxes(self):\n for object in self.env_objects:\n object.draw_bounding_box()", "title": "" }, { "docid": "f9eea14d220c02f2aad0fa2a6fce8ad0", "score": "0.61156416", "text": "def paint_boxes(file: [BufferedReader, bytes], face_boxes: dict) -> BytesIO:\n conf = DrawConfig\n font_file: str = conf.FONT_FILE\n font_size: int = conf.FONT_SIZE\n colour_f: tuple = conf.COLOUR_FONT\n colour_r: tuple = conf.COLOUR_RECTANGLE\n x = conf.FONT_X_STEP_POS\n y = conf.FONT_Y_STEP_POS\n img = Image.open(file) if isinstance(file, BufferedReader) else Image.open(BytesIO(file))\n draw = ImageDraw.Draw(img)\n font = ImageFont.truetype(font=font_file, size=font_size)\n for face_box in face_boxes[\"faceBoxes\"]:\n face_number = str(face_box[\"i\"])\n height = face_box[\"h\"]\n _l = face_box[\"l\"]\n t = face_box[\"t\"]\n draw.rectangle(((_l, t), (_l + height, t + height)), outline=colour_r, width=3)\n draw.text(((_l*2+height + x)/2, t+height + y), str(face_number), fill=colour_f, font=font)\n img_byte_arr = BytesIO()\n img.save(img_byte_arr, format=\"png\")\n del conf\n return img_byte_arr", "title": "" }, { "docid": "48b6bb0f7f237cb0aafaa055e8fcf5c3", "score": "0.6111508", "text": "def render_bounding_boxes(boxes,scores,classes,inputFileNames,outputFileNames=[],confidenceThreshold=0.9):\n\n nImages = len(inputFileNames)\n iImage = 0\n\n for iImage in range(0,nImages):\n\n inputFileName = inputFileNames[iImage]\n\n if iImage >= len(outputFileNames):\n outputFileName = ''\n else:\n outputFileName = outputFileNames[iImage]\n\n if len(outputFileName) == 0:\n name, ext = os.path.splitext(inputFileName)\n outputFileName = \"{}.{}{}\".format(name,'_detections',ext)\n\n image = mpimg.imread(inputFileName)\n iBox = 0; box = boxes[iImage][iBox]\n dpi = 100\n s = image.shape; imageHeight = s[0]; imageWidth = s[1]\n figsize = imageWidth / float(dpi), imageHeight / float(dpi)\n\n fig = plt.figure(figsize=figsize)\n ax = plt.axes([0,0,1,1])\n \n # Display the image\n ax.imshow(image)\n ax.set_axis_off()\n \n # plt.show()\n for iBox,box in enumerate(boxes[iImage]):\n\n score = scores[iImage][iBox]\n if score < confidenceThreshold:\n continue\n\n # top, left, bottom, right \n #\n # x,y origin is the upper-left\n topRel = box[0]\n leftRel = box[1]\n bottomRel = box[2]\n rightRel = box[3]\n \n x = leftRel * imageWidth\n y = topRel * imageHeight\n w = (rightRel-leftRel) * imageWidth\n h = (bottomRel-topRel) * imageHeight\n \n # Location is the bottom-left of the rect\n #\n # Origin is the upper-left\n iLeft = x\n iBottom = y\n rect = patches.Rectangle((iLeft,iBottom),w,h,linewidth=2,edgecolor='r',facecolor='none')\n \n # Add the patch to the Axes\n ax.add_patch(rect) \n\n # ...for each box\n\n # This is magic goop that removes whitespace around image plots (sort of) \n plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)\n plt.margins(0,0)\n ax.xaxis.set_major_locator(ticker.NullLocator())\n ax.yaxis.set_major_locator(ticker.NullLocator())\n ax.axis('tight')\n ax.set(xlim=[0,imageWidth],ylim=[imageHeight,0],aspect=1)\n plt.axis('off') \n\n # plt.savefig(outputFileName, bbox_inches='tight', pad_inches=0.0, dpi=dpi, transparent=True)\n plt.savefig(outputFileName, dpi=dpi, transparent=True)\n # os.startfile(outputFileName)\n\n # ...for each image", "title": "" }, { "docid": "743b798dd2e2deb1901fbbc514c4b222", "score": "0.6093209", "text": "def __display_lines(self, image, lines, rectangle):\n\n line_image = np.zeros_like(image)\n # Draw lines\n if lines is not None:\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n cv2.line(line_image, (x1, y1), (x2, y2), (0, 255, 0), 4)\n\n # Draw a point at the current position (in blue)\n line_image = cv2.circle(\n line_image, (self.current_position, 20), 5, (255, 0, 0), -1)\n\n # Draw a rectangle around the stop-sign if it is found\n if rectangle is not None:\n cv2.rectangle(line_image, rectangle[0], rectangle[1], 255, 3)\n\n return line_image", "title": "" }, { "docid": "ef3ae7b45cc63958240d3e333df68f7b", "score": "0.60901845", "text": "def draw_box(image, boxes, box_color=(255, 255, 255)):\n for box in boxes:\n cv2.rectangle(image,\n (box[0], box[1]),\n (box[2], box[3]), box_color, 3)", "title": "" }, { "docid": "e99b540210cdfac23bb57b9c0005e16d", "score": "0.6081111", "text": "def draw_boxes(image,box_array,labels,net_w,net_h):\n for ind_class in range(len(labels)):\n box_array_class = box_array[ind_class]\n if box_array_class.shape[0]:\n for ind_col in range(box_array_class.shape[0]):\n # draw anchors box\n cv2.rectangle(image, (int(box_array_class[ind_col,0]*net_w),\n int(box_array_class[ind_col,1]*net_h)),\n (int(box_array_class[ind_col,2]*net_w),\n int(box_array_class[ind_col,3]*net_h)),\n (0,255,0), 1)\n # draw class name and probability\n cv2.putText(image,\n labels[ind_class] + ' ' + \"{:.3f}\".format(box_array_class[ind_col,4]*100) + '%',\n (int(box_array_class[ind_col,0]*net_w),\n int(box_array_class[ind_col,1]*net_h) - 5),\n cv2.FONT_HERSHEY_SIMPLEX,\n 2e-3 * image.shape[0],\n (0,255,0), 1)\n else:\n continue\n return image", "title": "" }, { "docid": "d935218ec8cf002d2ad0c0fde7d55587", "score": "0.60751", "text": "def drawtextrectangle(json_response, image):\n\n texts = json_response['responses'][0]['textAnnotations']\n\n for text in texts:\n box = [(v.get('x', 0.0), v.get('y', 0.0)) for v in text['boundingPoly']['vertices']]\n cv2.rectangle(image, box[0], box[2], (0, 255, 0), 2)\n\n return image", "title": "" }, { "docid": "67c40b967882cf24c53caf4e389481dc", "score": "0.6073217", "text": "def image_with_bounding_boxes(self, file_id: int) -> \"PIL.Image\":\n file_bytes = self._cognite_client.files.download_bytes(id=file_id)\n\n try:\n import numpy as np\n from bounding_box import bounding_box as bb\n from pdf2image import convert_from_bytes\n from PIL import Image\n except (ImportError, ModuleNotFoundError) as e:\n warnings.warn(\n f\"Module {e.name} missing, 'pip install Pillow numpy bounding_box pdf2image' for advanced visualization of results\"\n )\n raise\n\n def draw_bbox(pnid_img):\n img_arr = np.array(pnid_img)\n height, width = img_arr.shape[:-1]\n img_arr_copy = img_arr[:, :, ::-1].copy()\n for detected_item in self.data:\n bbox = detected_item.bounding_box\n label = detected_item.text\n bb.add(\n img_arr_copy,\n int(bbox[\"xMin\"] * width),\n int(bbox[\"yMin\"] * height),\n int(bbox[\"xMax\"] * width),\n int(bbox[\"yMax\"] * height),\n label,\n \"red\",\n )\n return Image.fromarray(img_arr_copy[:, :, ::-1])\n\n try:\n return draw_bbox(convert_from_bytes(file_bytes)[0])\n except Exception:\n return None", "title": "" }, { "docid": "505bab484880329e5aa1a0ab481555be", "score": "0.6034857", "text": "def draw_labeled_bboxes(img, labels):\n for car_number in range(1, labels[1]+1):\n nonzero = (labels[0] == car_number).nonzero()\n nonzero_y = np.array(nonzero[0])\n nonzero_x = np.array(nonzero[1])\n\n bbox = ((np.min(nonzero_x), np.min(nonzero_y)), (np.max(nonzero_x), np.max(nonzero_y)))\n area = (bbox[1][0] - bbox[0][0]) * (bbox[1][1] - bbox[0][1])\n if area > 3200:\n cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)\n\n return img", "title": "" }, { "docid": "2f8960bd5d46c113dd3160ae941a3587", "score": "0.6032789", "text": "def draw_boxes(img: np.ndarray,\n boxes: np.ndarray) -> List[np.ndarray]:\n to_merge = []\n for box in boxes:\n if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(\n box[3] - box[0]) < 5:\n continue\n\n x = int(box[0])\n y = int(box[1])\n w = int(box[6]) - int(box[0])\n h = int(box[7]) - int(box[1])\n custombox = Box(x, y, w, h)\n to_merge.append(custombox)\n\n vertical_threshold = round(img.shape[0] * 0.05)\n horizontal_threshold = round(img.shape[1] * 0.05)\n merged_boxes = merge_boxes(to_merge,\n v_thresh=vertical_threshold,\n h_thresh=horizontal_threshold)\n\n cropped_images = []\n for custombox in merged_boxes:\n custombox = custombox.scale(fx=1.15, fy=1.15, max_height=img.shape[0],\n max_width=img.shape[1])\n cropped = img[\n custombox.top_left_y:custombox.top_left_y + custombox.height,\n custombox.top_left_x:custombox.top_left_x + custombox.width]\n cropped_images.append(cropped)\n\n return cropped_images", "title": "" }, { "docid": "aa6b8a1a18d5208665e67c9c86b5672c", "score": "0.6022178", "text": "def draw_boxes_and_label_on_image_cv2(img, class_label_map, class_boxes_map):\n # for c, boxes in class_boxes_map.items():\n # for box in boxes:\n # assert len(box) == 5, 'class_boxes_map every item must be [bb_left, bb_top, bb_width, bb_height, prob]'\n # # checking box order is bb_left, bb_top, bb_width, bb_height\n # # make sure all box should be int for OpenCV\n # bb_left = int(box[0])\n # bb_top = int(box[1])\n # bb_width = int(box[2])\n # bb_height = int(box[3])\n #\n # # 类标签得到不一样的颜色\n # unique_color = _create_unique_color_uchar(c)\n # #画边界\n # cv2.rectangle(img, (bb_left, bb_top), (bb_width, bb_height), unique_color, 2)\n # #类别+概率\n # prob = round(box[4], 2)\n # text_label = '{} {}'.format(class_label_map[c], prob)\n # text_org = (bb_left, bb_top - 0)\n # #加文字\n # cv2.putText(img, text_label, text_org, cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n\n for c,box in enumerate(class_boxes_map):\n assert len(box) == 4, 'class_boxes_map every item must be [bb_left, bb_top, bb_width, bb_height]'\n # checking box order is bb_left, bb_top, bb_width, bb_height\n # make sure all box should be int for OpenCV\n bb_left = int(box[0])\n bb_top = int(box[1])\n bb_width = int(box[2])\n bb_height = int(box[3])\n\n # 类标签得到不一样的颜色\n unique_color = _create_unique_color_uchar(c)\n # 画边界\n cv2.rectangle(img, (bb_left, bb_top), (bb_width, bb_height), unique_color, 2)\n return img", "title": "" }, { "docid": "9f4fa3a4677cc69a5aa2271bfc4459f6", "score": "0.60137194", "text": "def draw( im_path, dets, thresh=0.1):\n im = Image.open(im_path)\n if len(dets) == 0:\n return im\n draw = ImageDraw.Draw(im)\n for i in dets:\n if i[-1]<thresh:continue\n bbox = i[:4]\n score = i[-1]\n draw.rectangle(list(bbox.astype(np.int32)))\n draw.text(list(bbox[:2].astype(int)), str(score))\n return im", "title": "" }, { "docid": "1939559f1337102c33271bb08c335c1e", "score": "0.6013055", "text": "def main():\n #takephoto() # First take a picture\n\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('vision', 'v1', credentials=credentials)\n images = {}\n\n\n with open('title_crop.png', 'rb') as image:\n image_content = base64.b64encode(image.read())\n service_request = service.images().annotate(body={\n 'requests': [{\n 'image': {\n 'content': image_content.decode('UTF-8')\n },\n 'features': [{\n 'type': 'TEXT_DETECTION',\n 'maxResults': 10\n }]\n }]\n })\n\n\n responses = service_request.execute()\n #\n # print json.dumps(response, indent=1, sort_keys=True)\t#Print it out and make it somewhat pretty.\n #print responses['responses']\n\n ocr_response = []\n for response in responses['responses']:\n if 'fullTextAnnotation' in response:\n ocr_response= response['fullTextAnnotation']\n else:\n ocr_response = []\n\n #drill down to the fisrt textAnnotation element, description filed\n\n #print json.dumps(text_response,indent=1,sort_keys=True)\n\n print ocr_response['text']\n\n #y coordinate of bounding box\n print ocr_response['pages'][0]['blocks'][0]['boundingBox']['vertices'][0]['y']\n print ocr_response['pages'][0]['blocks'][0]['boundingBox']['vertices'][0]['x']\n\n '''\n # access x coordinate of bouinding polygon\n #print text_response[0]['boundingPoly']['vertices'][0]['x']\n\n x1=text_response[0]['boundingPoly']['vertices'][0]['x']\n y1=text_response[0]['boundingPoly']['vertices'][0]['y']\n\n print x1,y1\n\n x2 = text_response[0]['boundingPoly']['vertices'][1]['x']\n y2 = text_response[0]['boundingPoly']['vertices'][1]['y']\n\n print x2,y2\n '''", "title": "" }, { "docid": "2120aecd7727efab6578fd643da388ed", "score": "0.60103846", "text": "def draw_boxes_and_write(image_path, boxes, classname, thickness_box=4):\n\n image = Image.open(image_path)\n\n draw = ImageDraw.Draw(image)\n\n font = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 12)\n\n for i in range(len(boxes)):\n bot, left, top, right = boxes[i, ...]\n class_id = int(classes[i])\n color = COLOR_LIST[class_id]\n draw.line([(left, top), (left, bot), (right, bot), (right, top), (left, top)], width=thickness_box, fill=color)\n\n\n# draw.text((0, 0), str(int(scores[i] * 100)) + \": \" + classname[i] ,(255,255,255), font=font)\n\n draw.text((left, bot), str(int(scores[i] * 100)) + \"%: \" + classname[i] ,color, font=font)\n\n output_file_path = DEBUGDIR + \"/\" + image_path.split(\"/\")[-1]\n print(\"output debug \" + output_file_path)\n #plt.imsave(fname=output_file_path, arr=image, format=\"png\")\n\n image.close()", "title": "" }, { "docid": "80d3fdbb92cf3aa42a368ea8a0d4dbf1", "score": "0.60097766", "text": "def _draw(self, frame, boxes, probs, landmarks,image_name,Dest_Folder):\n # try:\n count = 0\n for box, prob, ld in zip(boxes, probs, landmarks):\n count += 1\n self.crop_eyes(ld,frame,count,image_name,Dest_Folder)\n\n # Draw rectangle on frame\n # cv2.rectangle(frame,\n # (box[0], box[1]),\n # (box[2], box[3]),\n # (0, 0, 255),\n # thickness=2)\n #\n # # Show probability\n # cv2.putText(frame, str(\n # prob), (box[2], box[3]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)\n #\n # # Draw landmarks\n # cv2.circle(frame, tuple(ld[0]), 5, (0, 0, 255), -1)\n # cv2.circle(frame, tuple(ld[1]), 5, (0, 0, 255), -1)\n # cv2.circle(frame, tuple(ld[2]), 5, (0, 0, 255), -1)\n # cv2.circle(frame, tuple(ld[3]), 5, (0, 0, 255), -1)\n # cv2.circle(frame, tuple(ld[4]), 5, (0, 0, 255), -1)\n\n\n\n\n\n # except:\n # pass\n\n return frame", "title": "" }, { "docid": "8a09d91c045f3e12f9a6cbe3ce0349ac", "score": "0.6005847", "text": "def draw_rectangles(image, rectangles, x, y, winScaleW, winScaleH, eddytype='cyclone'):\n if eddytype=='cyclone': color = (0, 76, 217)\n else: color = (217, 83, 25)\n for r in rectangles:\n cv2.rectangle(image, (r[0], r[1]), (r[2], r[3]), color, 2)\n ctr = ( int( (r[0]+(r[2]-r[0])/2)/winScaleW ), int( (r[1]+(r[3]-r[1])/2)/winScaleH ) )\n if x.ndim > 1: textLabel = \"{} ({:.2f},{:.2f})\".format(eddytype,x[ctr],y[ctr])\n else: textLabel = \"{} ({:.2f},{:.2f})\".format(eddytype,x[ctr[0]],y[ctr[1]])\n (retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,0.5,1)\n textOrg = (r[0], r[1])#+baseLine+15)\n cv2.rectangle(image, (textOrg[0] - 3, textOrg[1]+baseLine - 3), (textOrg[0]+retval[0] + 3, textOrg[1]-retval[1] - 3), (0, 0, 0), 2)\n cv2.rectangle(image, (textOrg[0] - 3, textOrg[1]+baseLine - 3), (textOrg[0]+retval[0] + 3, textOrg[1]-retval[1] - 3), (255, 255, 255), -1)\n cv2.putText(image, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 0.5, (0, 0, 0), 1)", "title": "" }, { "docid": "6c343a5211e5da12bc9ffa3fc28a0239", "score": "0.59925765", "text": "def render_to_image(self, image, thickness=2, circles=True):\n for region_layout in self.regions:\n image = draw_lines(\n image,\n [line.baseline for line in region_layout.lines if line.baseline is not None], color=(0,0,255), circles=(circles, circles, False), thickness=thickness)\n image = draw_lines(\n image,\n [line.polygon for line in region_layout.lines if line.polygon is not None], color=(0,255,0), close=True, thickness=thickness)\n image = draw_lines(\n image,\n [region_layout.polygon], color=(255, 0, 0), circles=(circles, circles, circles), close=True, thickness=thickness)\n return image", "title": "" }, { "docid": "d29061debd1a01a9bdc57e5e33664d48", "score": "0.5992053", "text": "def draw_bbox(image, bbox, label=None, color=None, thickness=5):\n\n xmin = bbox[0]\n ymin = bbox[1]\n xmax = bbox[2]\n ymax = bbox[3]\n\n assert xmin < xmax and ymin < ymax\n\n # Width and height of bbox\n #width = xmax - xmin\n #height = ymax - ymin\n\n text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5)\n text_width, text_height = text_size[0][0], text_size[0][1]\n\n # Draw the rectangle\n region = np.array([[xmin-3, ymin],\n [xmin-3, ymin-text_height-26],\n [xmin+text_width+13, ymin-text_height-26],\n [xmin+text_width+13, ymin]], dtype='int32')\n\n cv2.fillPoly(img=image, pts=[region], color=color)\n cv2.rectangle(img=image, pt1=(xmin, ymin), pt2=(xmax, ymax),\n color=color, thickness=thickness)\n\n # Put the text\n cv2.putText(img=image, text=label, org=(xmin+13, ymin-13),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1e-3 * image.shape[0],\n color=(0,0,0), thickness=2)\n\n return image", "title": "" }, { "docid": "03ca1ddf288921b48c2f7830fe31feb9", "score": "0.5971691", "text": "def _py_draw_rectangles_v2(image,\n total,\n boxes,\n scores,\n labels,\n color=GREEN,\n thickness=1,\n fontscale=1.0,\n show_score=True):\n height, width, _ = image.shape\n\n canvas = image.copy()\n for i, (box, score, label) in enumerate(zip(boxes, scores, labels)):\n if i >= total: break\n\n label = label.decode('UTF8') if isinstance(label, bytes) else label\n text = '%s: %.0lf%%' % (label, score * 100) if show_score else label\n\n (text_w, text_h), baseline = cv2.getTextSize(text, _FONTFACE, fontscale,\n thickness)\n\n ymin, xmin, ymax, xmax = box\n ymin, xmin, ymax, xmax = (int(height * ymin + 0.5), int(width * xmin + 0.5),\n int(height * ymax + 0.5), int(width * xmax + 0.5))\n\n cv2.rectangle(\n canvas,\n pt1=(xmin, ymin),\n pt2=(xmax, ymax),\n color=color,\n thickness=thickness * 2)\n cv2.rectangle(\n canvas,\n pt1=(xmin, ymin),\n pt2=(xmin + 2 * thickness + text_w, ymin + 2 * thickness + text_h),\n color=color,\n thickness=-1)\n text_color = BLACK if color != BLACK else WHITE\n cv2.putText(\n canvas,\n text,\n org=(xmin, ymin + text_h),\n fontFace=_FONTFACE,\n fontScale=fontscale,\n color=text_color,\n thickness=thickness)\n return canvas", "title": "" }, { "docid": "90757a30a9fe69e28a77b5e182224f61", "score": "0.59703785", "text": "def get_bbox(im):\n PADDING_SIZE = 0\n info = {}\n list_dict = ['address_line_1','address_line_2', 'birthday', 'hometown_line_1', 'hometown_line_2', 'id', 'name', 'nation', 'sex', 'passport']\n\n # check input\n if im is None:\n return []\n\n target = inference(im)\n \n pts = polygon_from_corners(target)\n # pts = pts.astype(int)\n \n # infs = increase_border(pts, PADDING_SIZE)\n \n for i, inf in enumerate(pts):\n info[list_dict[i]] = inf\n\n # in = [(int(p[0]), int(p[1])) for p in corners]\n \n return info", "title": "" }, { "docid": "ac69d161993942f5e365addf66538109", "score": "0.5969174", "text": "def vis_detections(im, class_name, dets, thresh=0.1, rcnn_boxes):\n inds = np.where(dets[:, -1] >= thresh)[0]\n #inds = np.where(dets[:, -1])[0]\n if len(inds) == 0:\n return\n im = im[:, :, (2, 1, 0)]\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds: #more than one box per image\n bbox = dets[i, :4]\n rcnn_boxes.append(bbox)\n score = dets[i, -1]\n #bbox contain the values we are after\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='red', linewidth=3.5)\n )\n ax.text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n ax.set_title(('{} detections with '\n 'p({} | box) >= {:.1f}').format(class_name, class_name,\n thresh),\n fontsize=14)\n plt.axis('off')\n plt.tight_layout()\n plt.draw()\n return rcnn_boxes", "title": "" }, { "docid": "761db9ca6c4f1035ded287d01613bd7c", "score": "0.59617823", "text": "def render_bounding_boxes(boxes, scores, classes, input_file_names, output_file_names=[],\n confidence_threshold=DEFAULT_CONFIDENCE_THRESHOLD, \n linewidth=DEFAULT_LINE_WIDTH):\n\n n_images = len(input_file_names)\n iImage = 0\n\n for iImage in range(0,n_images):\n\n input_file_name = input_file_names[iImage]\n\n if iImage >= len(output_file_names):\n output_file_name = ''\n else:\n output_file_name = output_file_names[iImage]\n\n if len(output_file_name) == 0:\n name, ext = os.path.splitext(input_file_name)\n output_file_name = \"{}{}{}\".format(name,DETECTION_FILENAME_INSERT,ext)\n\n image = PIL.Image.open(input_file_name).convert(\"RGB\")\n detections = []\n \n for iBox in range(0,len(boxes)):\n \n # Boxes are input to this function as:\n #\n # top, left, bottom, right \n #\n # x,y origin is the upper-left\n #\n # normalized\n #\n # ...and our rendering function needs:\n #\n # left, top, w, h\n #\n # x,y origin is the upper-left\n #\n # normalized\n \n bbox_in = boxes[iImage][iBox]\n bbox = [bbox_in[1],\n bbox_in[0], \n bbox_in[3]-bbox_in[1],\n bbox_in[2]-bbox_in[0]]\n \n detections.append({'category':str(classes[iImage][iBox]),\n 'conf':scores[iImage][iBox],\n 'bbox':bbox})\n \n # ...for each detection\n \n render_detection_bounding_boxes(detections, image,\n confidence_threshold=confidence_threshold, \n thickness=linewidth,\n label_map=bbox_category_str_id_to_name)\n image.save(output_file_name)\n \n # ...for each image", "title": "" }, { "docid": "fbe5ad45b269337400d611a63d2f41c3", "score": "0.5960681", "text": "def draw_tracking_box(box, image):\n (x, y, w, h) = [int(v) for v in box]\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)", "title": "" }, { "docid": "aa3de1dc244a64c1f6fff380fc5e6f4e", "score": "0.59551674", "text": "def vis_detections(imgid, im, dets, thresh=0.5):\n class_name = 'face'\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return\n im = im[:, :, (2, 1, 0)]\n print (len(inds))\n fig, ax = plt.subplots(figsize=(12, 12))\n ax.imshow(im, aspect='equal')\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n ax.add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='green', linewidth=2.5)\n )\n '''\n ax.text(bbox[0], bbox[1] - 5,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=10, color='white')\n '''\n #ax.set_title(('{} detections with '\n # 'p({} | box) >= {:.1f}').format(class_name, class_name,\n # thresh),\n # fontsize=10)\n plt.axis('off')\n plt.tight_layout()\n plt.savefig('./val_pic_res/'+str(imgid), dpi=fig.dpi)", "title": "" }, { "docid": "9543e3f18423c63fd66baaa7c521eac6", "score": "0.59452343", "text": "def draw(image):\n\n img = image\n\n cv.line(img, (0, 0), (512, 512), (0, 0, 0), 3)\n cv.arrowedLine(img, (0, 512), (200, 300), (255, 255, 255), 3, tipLength=0.1)\n cv.rectangle(img, (200, 200), (300, 300), (0, 255, 0), 3)\n cv.circle(img, (250, 250), 50, (255, 0, 0), 3)\n cv.ellipse(img, (250, 250), (50, 25), 0, 0, 360, (0, 0, 255), 3)\n pts = np.array([[200, 220],\n [200, 280],\n [220, 300],\n [280, 300],\n [300, 280],\n [300, 220],\n [280, 200],\n [220, 200],\n ],\n dtype=np.int32)\n pts = np.reshape(pts, (-1, 1, 2))\n cv.polylines(img, [pts], True, (255, 255, 255), 3)\n font = cv.FONT_HERSHEY_SIMPLEX\n size, _ = cv.getTextSize('TEXT', font, 1, 2)\n x = 250 - size[0]//2\n y = 250 + size[1]//2\n cv.putText(img, 'TEXT', (x, y), font, 1, (255, 255, 255), 2, cv.LINE_AA)", "title": "" }, { "docid": "38003e38f4a309885f6c555b963854be", "score": "0.59435993", "text": "def visualize_detection(self, img_msg, bounding_boxes):\n return draw_labeled_boxes_img_msg(self._cv_bridge, img_msg, bounding_boxes)", "title": "" }, { "docid": "b2f29b24bcb69e8bbfb2e63aaedbc810", "score": "0.59356344", "text": "def draw_bbox(img, box, cls=None, color=(255,0,0) ):\n h,w = img.shape[:2]\n yxyx = box\n yxyx = np.multiply(yxyx, [h,w,h,w])\n yxyx = np.round(yxyx).astype(np.int32)\n y0,x0,y1,x1 = yxyx\n cv2.rectangle(img, (x0,y0), (x1,y1), color, thickness=2)\n if cls is not None:\n tx, ty = max(x0,0), max(y0-(0.1*(y1-y0)), 0)\n txt_box = (tx, ty, 0.5*(x1-x0), 0.1*(y1-y0))\n # org = ( max(x0,0), min(y1,h) )\n put_text(img, txt_box, cls,\n color=color,\n thickness=1 # TODO : auto-thickness\n )\n # org = ( max(x0,0), min(y1,h) )\n # cv2.putText(img, cls, org, \n # cv2.FONT_HERSHEY_SIMPLEX, 1.0, color,\n # 1, cv2.LINE_AA\n # )", "title": "" }, { "docid": "18ebb81abcda9112c4002ebbcd9f1a68", "score": "0.59338385", "text": "def draw_bboxes(img_name,\n bboxes,\n labels,\n context,\n colors,\n width=None,\n class_names=None,\n score_thr=0.5,\n out_file=None):\n assert bboxes.ndim == 2\n assert labels.ndim == 1\n assert bboxes.shape[0] == labels.shape[0]\n assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5\n \n img = imread(img_name)\n img = img.copy()\n \n ori_size = img.shape\n\n width = ori_size[0]\n ratio = 1\n pr = ori_size[0]/800\n scores = bboxes[:, -1]\n\n if score_thr > 0.0:\n assert bboxes.shape[1] == 5\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds]\n scores = scores[inds]\n\n pred_num = labels.shape[0]\n context[\"numofPredictions\"] = pred_num\n\n ABC = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n i = 0\n \n for bbox, label, score in zip(bboxes, labels, scores):\n \n pred_cls = class_names[label]\n color = colors[pred_cls]\n box_id = ABC[i]\n \n bbox = bbox*ratio\n bbox_int = bbox.astype(np.int32)\n\n det = {\"bboxId\":box_id, \"score\": float(score) ,\n \"xmin\":int(bbox_int[0]), \"ymin\":int(bbox_int[1]),\n \"xmax\":int(bbox_int[2]), \"ymax\":int(bbox_int[3]) }\n\n context[\"pestTable\"][pred_cls].append(det)\n \n left_top = (bbox_int[0], bbox_int[1])\n right_bottom = (bbox_int[2], bbox_int[3])\n \n cv2.rectangle(img, (left_top[0], left_top[1]),\n (right_bottom[0], right_bottom[1]), color, int(4*pr))\n text_size, baseline = cv2.getTextSize(box_id,\n cv2.FONT_HERSHEY_SIMPLEX, int(1.3*pr), int(2*pr))\n p1 = (left_top[0], left_top[1] + text_size[1])\n cv2.rectangle(img, tuple(left_top), (p1[0] + text_size[0], p1[1]+1 ), color, -1)\n cv2.putText(img, box_id, (p1[0], p1[1]),\n cv2.FONT_HERSHEY_SIMPLEX, int(1.3*pr), (255,255,255), int(2*pr), 8)\n \n i += 1\n \n \n print('done '+ str(out_file))\n \n imwrite(img, out_file)\n \n return context", "title": "" }, { "docid": "8cb9f5ebcea8e5c25591ec42f39c68e5", "score": "0.5932406", "text": "def draw_face_bbox(image, bboxes, probs, points=None):\n draw = image.copy()\n if points is None:\n points = [None] * len(bboxes)\n for bbox, prob, point in zip(bboxes, probs, points):\n cv2.putText(draw, str(prob)[:8], (int(bbox[0]), int(bbox[1]-2)), \\\n cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 255, 0), 1)\n cv2.rectangle(\n draw,\n pt1=(int(bbox[0]), int(bbox[1])),\n pt2=(int(bbox[2]), int(bbox[3])),\n color=(255, 0, 0), # (B, G, R)\n thickness=2)\n if point is not None:\n for idx in range(5):\n cv2.circle(draw, (int(point[idx]), int(point[idx+5])), \\\n 2, (0, 0, 255))\n return draw", "title": "" }, { "docid": "6be9ae88f3eed101217d3ae61525ea7d", "score": "0.5924276", "text": "def visualize_boxes_and_labels_on_image_array(\n image,\n boxes,\n classes,\n scores,\n category_index,\n instance_masks=None,\n instance_boundaries=None,\n keypoints=None,\n use_normalized_coordinates=False,\n max_boxes_to_draw=20,\n min_score_thresh=.9,\n agnostic_mode=False,\n line_thickness=3,\n groundtruth_box_visualization_color='red',\n skip_scores=False,\n skip_labels=False):\n # Create a display string (and color) for every box location, group any boxes\n # that correspond to the same location.\n box_to_display_str_map = collections.defaultdict(list)\n box_to_color_map = collections.defaultdict(str)\n box_to_instance_masks_map = {}\n box_to_instance_boundaries_map = {}\n box_to_keypoints_map = collections.defaultdict(list)\n if not max_boxes_to_draw:\n max_boxes_to_draw = boxes.shape[0]\n for i in range(min(max_boxes_to_draw, boxes.shape[0])):\n if scores is None or scores[i] > min_score_thresh:\n box = tuple(boxes[i].tolist())\n #print(\"610:box:\",box)\n if instance_masks is not None:\n box_to_instance_masks_map[box] = instance_masks[i]\n if instance_boundaries is not None:\n box_to_instance_boundaries_map[box] = instance_boundaries[i]\n if keypoints is not None:\n box_to_keypoints_map[box].extend(keypoints[i])\n if scores is None:\n box_to_color_map[box] = groundtruth_box_visualization_color\n else:\n display_str = ''\n if not skip_labels:\n if not agnostic_mode:\n if classes[i] in category_index.keys():\n class_name = category_index[classes[i]]['name']\n else:\n class_name = 'N/A'\n display_str = str(class_name)\n if not skip_scores:\n if not display_str:\n display_str = '{}%'.format(int(100 * scores[i]))\n else:\n display_str = '{}: {}%'.format(display_str,\n int(100 * scores[i]))\n box_to_display_str_map[box].append(display_str)\n if agnostic_mode:\n box_to_color_map[box] = 'DarkOrange'\n else:\n box_to_color_map[box] = STANDARD_COLORS[\n classes[i] % len(STANDARD_COLORS)]\n\n # Draw all boxes onto image.\n for box, color in box_to_color_map.items():\n ymin, xmin, ymax, xmax = box\n if instance_masks is not None:\n draw_mask_on_image_array(image,\n box_to_instance_masks_map[box],\n color=color)\n if instance_boundaries is not None:\n draw_mask_on_image_array(image,\n box_to_instance_boundaries_map[box],\n color='green',\n alpha=0.5)\n draw_bounding_box_on_image_array(\n image,\n ymin,\n xmin,\n ymax,\n xmax,\n color='red',\n thickness=line_thickness,\n display_str_list=\n [], #box_to_display_str_map[box], #CHEN: remove the string for better visualize on hotpsot task\n use_normalized_coordinates=use_normalized_coordinates)\n if keypoints is not None:\n draw_keypoints_on_image_array(\n image,\n box_to_keypoints_map[box],\n color=color,\n radius=line_thickness / 2,\n use_normalized_coordinates=use_normalized_coordinates)\n\n return image", "title": "" }, { "docid": "55685a773db97b0adb7bd6f0587a6405", "score": "0.5923672", "text": "def vis_detections(im, bbox, prob, save):\n if save:\n if not osp.exists(cfg.ROOT_DIR + '/result'):\n os.mkdir(cfg.ROOT_DIR + '/result')\n IMAGE_DIR = cfg.ROOT_DIR + '/result/image'\n if not osp.exists(IMAGE_DIR):\n os.mkdir(IMAGE_DIR)\n TXT_DIR = cfg.ROOT_DIR + '/result/position'\n if not osp.exists(TXT_DIR):\n os.mkdir(TXT_DIR)\n\n plt.cla()\n# plt.figure(figsize=(15,15))\n if im.ndim == 3:\n im = im[:,:,0]\n plt.imshow(im)\n for i in xrange(len(bbox)):\n plt.gca().add_patch(plt.Rectangle((bbox[i][1], bbox[i][2]), (bbox[i][3]-bbox[i][1]), (bbox[i][4]-bbox[i][2]), fill=False, edgecolor='g', linewidth=3) ) \n plt.text(bbox[i][1], bbox[i][2] - 2,'{:.3f}'.format(float(prob[i,1])),fontdict={'size': 12, 'color': 'y'})\n if save:\n with open(osp.join(TXT_DIR, str(cfg.IMAGE_NUMBER)+'.txt'),\"a\") as f:\n new_con = str(np.int_(bbox[i][1])) + ' ' + str(np.int_(bbox[i][2])) + ' ' + str(np.int_(bbox[i][3])) + ' ' + str(np.int_(bbox[i][4])) + '\\n'\n f.write(new_con)\n if save:\n plt.savefig(osp.join(IMAGE_DIR, str(cfg.IMAGE_NUMBER)+'.jpg'),bbox_inches='tight')\n else:\n plt.show()\n print bbox", "title": "" }, { "docid": "aa555c92076a63caabf424e46c0b17a9", "score": "0.59228545", "text": "def set_rects_for_images(self):\r\n for i, digit in enumerate(self.digit_list):\r\n digit.rect = digit.image.get_rect()\r\n digit.rect.x = self.x + (i * 10)\r\n digit.rect.y = self.y", "title": "" }, { "docid": "be3765894ab7507a3de9ef86b8bfbf10", "score": "0.5907353", "text": "def show_detection(image, faces):\n\n for (x, y, w, h) in faces:\n cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 5)\n return image", "title": "" }, { "docid": "1c0a34286ef93f1ed25571569d9f4c9f", "score": "0.590132", "text": "def debug_image(image):\n contours = Vision.find_contours(image)\n bounding_boxes = Vision.group_contours(contours)\n\n for bounding_box in bounding_boxes:\n x, y, w, h = bounding_box\n cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n for contour in contours:\n cv2.drawContours(image, [np.int0(cv2.boxPoints(cv2.minAreaRect(contour)))], 0, (0, 0, 255), 2)\n\n return image", "title": "" }, { "docid": "a89e4df6bffbcbed81555606ad70169a", "score": "0.5883086", "text": "def visualize_bbox(img, bbox, w=0, h=0, class_name=None, color=BOX_COLOR, thickness=2, IsNormalize=True):\n if IsNormalize :\n x, y, width, height = bbox\n\n x_min = int((x - width)*w)\n y_min = int((y - height)*h)\n x_max = int((x + width)*w)\n y_max = int((y + height)*h) \n \n else :\n# print(type(bbox))\n# bbox[:,2:] /= 2\n# bbox[:,:2] += bbox[:,2:] \n x_min, y_min, x_max, y_max = list(map(int, bbox))\n# print(x_min, y_min, x_max, y_max)\n\n img = cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)\n \n# ((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1) \n# cv2.rectangle(img, (y_min - int(1.3 * text_height), x_min ), (y_min, x_min + text_width ), BOX_COLOR, -1)\n# cv2.putText(\n# img,\n# text=class_name,\n# org=(x_min, y_min - int(0.3 * text_height)),\n# fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n# fontScale=0.35, \n# color=TEXT_COLOR, \n# lineType=cv2.LINE_AA,\n# )\n return img", "title": "" }, { "docid": "bdf8aa7e92396dcb87de9f7f9d6ce67f", "score": "0.58807", "text": "def plot(image, classified_boxes, window_size):\n fig1 = plt.figure(dpi=400)\n ax1 = fig1.add_subplot(1,1,1) \n ax1.imshow(image, cmap=plt.cm.gray)\n ax1.axis('off')\n for box in classified_boxes:\n x_min, y_min, x_max, y_max = box[0]-.5, box[1]-.5, box[0]+window_size[0]-.5, box[1]+window_size[1]-.5\n prediction, predict_score = box[2], box[3]\n ax1.text(x_min, y_min-3, \"%s %d%%\" % (prediction, predict_score*100), color=\"red\", fontsize=3)\n x = [x_max, x_max, x_min, x_min, x_max]\n y = [y_max, y_min, y_min, y_max, y_max]\n line, = ax1.plot(x,y,color=\"red\")\n line.set_linewidth(.5)\n fig1.savefig(\"classification.png\")\n return", "title": "" }, { "docid": "74a1a65992c4e1220700133260c02902", "score": "0.58757967", "text": "def visualization(image_path, points, label, vis_color = (255,255,255)):\r\n points = np.asarray(points, dtype=np.int32)\r\n points = np.reshape(points, [-1,2])\r\n image = cv2.imread(image_path)\r\n cv2.polylines(image, [points], 1, (0,255,0), 2)\r\n image = Image.fromarray(image)\r\n FONT = ImageFont.truetype(font_path, 20, encoding='utf-8') \r\n DRAW = ImageDraw.Draw(image) \r\n \r\n DRAW.text(points[0], label, vis_color, font=FONT)\r\n return np.array(image)", "title": "" }, { "docid": "dcead320754fab0022c0e8ebf70dec60", "score": "0.5872691", "text": "def _mark_bounding_boxes(self):\n if self._dbg is None:\n return\n\n img = self.working_copy.copy()\n for bbox in self.bounding_boxes:\n x0, y0, x1, y1 = bbox[1]\n img[y0,x0:x1+1] = 0.5\n img[y1,x0:x1+1] = 0.5\n img[y0:y1+1,x0] = 0.5\n img[y0:y1+1,x1] = 0.5\n return img", "title": "" }, { "docid": "ceb4304957c3f44d5480aedb2a1b9d38", "score": "0.5871588", "text": "def draw_labeled_bboxes(img, labels, colors):\n for car_number in range(1, labels[1]+1):\n # Find pixels with each car_number label value\n nonzero = (labels[0] == car_number).nonzero()\n # Identify x and y values of those pixels\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Define a bounding box based on min/max x and y\n bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))\n # Draw the box on the image\n cv2.rectangle(img, bbox[0], bbox[1], colors[car_number - 1], 6)\n # Return the image\n return img", "title": "" }, { "docid": "82fe5ae9011ceb04c3701107ae594ae6", "score": "0.5865134", "text": "def show_image_with_boxes(img, objects, calib, show3d=True):\n img1 = np.copy(img) # for 2d bbox\n img2 = np.copy(img) # for 3d bbox\n for obj in objects: # process each object in an image\n if obj.type == 'DontCare': continue # remove 'DontCare' class\n # draw 2d bbox\n cv2.rectangle(img1, (int(obj.xmin), int(obj.ymin)),\n (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2)\n\n # calculate 3d bbox for left color cam from P: P2\n box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)\n print(\"********************, \", obj.type)\n # print(\"********************, \", box3d_pts_2d.shape)\n img2 = utils.draw_projected_box3d(img2, box3d_pts_2d)\n\n #  Image.fromarray(img1).show()\n cv2.imshow('2D bounding box in image', cv2.cvtColor(img1, cv2.COLOR_BGR2RGB))\n if show3d:\n # Image.fromarray(img2).show()\n cv2.imshow('3D bounding box in image', cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "title": "" }, { "docid": "a5a10dfa26945d787b0934addd9b6b8f", "score": "0.5861558", "text": "def plot_boxes(img,boxes,scores,classes,class_names,args):\n for i in range(len(boxes)):\n x1y1 = boxes[i][0:2][::-1]\n x2y2 = boxes[i][2:4][::-1]\n plot_one_box(img,[x1y1[0],x1y1[1],x2y2[0],x2y2[1]],(255,0,255),label=str(\"%s:%0.2f\" %(class_names[classes[i]],scores[i])))\n return img", "title": "" }, { "docid": "8d22108a117b5458196c9bde864d7469", "score": "0.5861478", "text": "def process_image(image):\n result = draw_lane_lines(image)\n print(result.shape)\n return result", "title": "" }, { "docid": "51f39a20e56d30c68386a96d014f7603", "score": "0.5860227", "text": "def get_image(img, lines):\n for x1, y1, x2, y2 in lines[0]:\n cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 2)\n return img", "title": "" }, { "docid": "73b9282cd6c3532ef5475a05a1b852fa", "score": "0.5857586", "text": "def visualize_results(image, output_dict):\n vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n instance_masks=output_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=4)\n \n return image", "title": "" } ]
4d2d4a6fadcc40cdc77b1c55d2aa3696
Returns coarse grid resolution in Z direction
[ { "docid": "7058b97866df3c35a7390d99f2047246", "score": "0.82881844", "text": "def coarseResolutionZ(self):\n return self.params.coarseLengthZ/float(self.params.gridPointsZ-1)", "title": "" } ]
[ { "docid": "564e191f1313e6283343e82773a2f304", "score": "0.7533923", "text": "def fineResolutionZ(self):\n return self.params.fineLengthZ/float(self.params.gridPointsZ-1)", "title": "" }, { "docid": "2dcb68c291bb907c221b1fd53248cc41", "score": "0.695249", "text": "def coarseResolutionX(self):\n return self.params.coarseLengthX/float(self.params.gridPointsX-1)", "title": "" }, { "docid": "5c0098fe12993b38670fdf4c9e4f5eb1", "score": "0.69233876", "text": "def coarseResolutionY(self):\n return self.params.coarseLengthY/float(self.params.gridPointsY-1)", "title": "" }, { "docid": "ea986f165731daeeb91bd9c8574a8d82", "score": "0.65984195", "text": "def get_ground_resolution(self) -> Tuple[float, float]:\n\n graze = numpy.deg2rad(self.SCPCOA.GrazeAng)\n twist = numpy.deg2rad(self.SCPCOA.TwistAng)\n row_ss = self.Grid.Row.SS\n col_ss = self.Grid.Col.SS\n\n row_ground = abs(float(row_ss / numpy.cos(graze)))\n col_ground = float(numpy.sqrt((numpy.tan(graze) * numpy.tan(twist) * row_ss)**2 + (col_ss/numpy.cos(twist))**2))\n return row_ground, col_ground", "title": "" }, { "docid": "aa8a3aeabccc082f281d432ab62a956d", "score": "0.656452", "text": "def grid_resolution(self):\n distances = distance.cdist(self.grid, self.grid)\n np.fill_diagonal(distances, np.inf)\n return np.min(distances) * self.unit", "title": "" }, { "docid": "f6b0e43174bbc132aa03ad077cc57dbc", "score": "0.6255525", "text": "def fineResolutionY(self):\n return self.params.fineLengthY/float(self.params.gridPointsY-1)", "title": "" }, { "docid": "e65c8e8be80930a43157adedefcc0569", "score": "0.6227308", "text": "def get_grid_size(grid_obj):\n z_len = grid_obj.z['data'][-1] - grid_obj.z['data'][0]\n x_len = grid_obj.x['data'][-1] - grid_obj.x['data'][0]\n y_len = grid_obj.y['data'][-1] - grid_obj.y['data'][0]\n z_size = z_len / (grid_obj.z['data'].shape[0] - 1)\n x_size = x_len / (grid_obj.x['data'].shape[0] - 1)\n y_size = y_len / (grid_obj.y['data'].shape[0] - 1)\n return np.array([z_size, y_size, x_size])", "title": "" }, { "docid": "f1ec29862f5409e96c2ac5407f944a66", "score": "0.60934645", "text": "def grid_resolution(self):\n return min(self.dax0, self.dax1, self.dax2)", "title": "" }, { "docid": "62e2a3a49c33beafabe41f4ad6320e08", "score": "0.60723877", "text": "def grid (self, z):\n width = (self.bbox[2] - self.bbox[0]) / (self.resolutions[z] * self.size[0])\n height = (self.bbox[3] - self.bbox[1]) / (self.resolutions[z] * self.size[1])\n return (width, height)", "title": "" }, { "docid": "7ac3cb780247340d8d9d5c9d78a40d3f", "score": "0.60331", "text": "def get_grid_alt(grid_size, alt_meters=1500):\n return np.int32(np.ceil(alt_meters/grid_size[0]))", "title": "" }, { "docid": "a47b1e4700d85b8bde28c0e26e1e3b24", "score": "0.60149544", "text": "def fineResolutionX(self):\n return self.params.fineLengthX/float(self.params.gridPointsX-1)", "title": "" }, { "docid": "c02084550b58f1bffb754f4fd79fa56e", "score": "0.5951176", "text": "def resolution(self):\n geo_coords = self.to_geographic()\n resolution = abs(_initialresolution * math.cos(geo_coords.lat * _pi_180) / (2**self.zoom))\n return resolution", "title": "" }, { "docid": "e47828185dc1c8299602be25a9d8e67b", "score": "0.58325493", "text": "def totalGridPoints(self):\n return self.params.gridPointsX*self.params.gridPointsY*\\\n self.params.gridPointsZ", "title": "" }, { "docid": "f940036633cc9ed9ac404f50783c0500", "score": "0.57711804", "text": "def highest_z(self) -> float:", "title": "" }, { "docid": "9f91a55d99ac2533c800adf976668d84", "score": "0.5762031", "text": "def get_grid_z(self, grid_id = None):\n return self._anuga.grid_z", "title": "" }, { "docid": "f72e422b5f12b28eea1a421fade77d77", "score": "0.5696824", "text": "def get_resolution(self):\n return self.axis_resolution", "title": "" }, { "docid": "0d086daf669bcaa19fa79afb94cc7c14", "score": "0.56940645", "text": "def get_zone(coord):\n # There are 60 longitudinal projection zones numbered 1 to 60 starting at 180W\n # So that's -180 = 1, -174 = 2, -168 = 3\n zone = ((coord - -180) / 6.0)\n return int(math.ceil(zone))", "title": "" }, { "docid": "644fd1584070fae74d1e663d2068381e", "score": "0.56645054", "text": "def resolution(self):\n return self.mip_resolution(self.mip)", "title": "" }, { "docid": "4eb68cb1398041a72ede3c4e3bc82818", "score": "0.5645508", "text": "def ground_resolution(self) -> float:\n return None", "title": "" }, { "docid": "6abaa662bdfbf515e28fd88a477d8e3a", "score": "0.56422126", "text": "def calc_surface_area_in_grid( res='1x1', debug=False ):\n logging.info('called calc surface area in grid')\n\n # Get latitudes and longitudes in grid \n lon_e, lat_e, NIU = get_latlonalt4res( res=res, centre=False, debug=debug ) \n lon_c, lat_c, NIU = get_latlonalt4res( res=res, centre=True, debug=debug ) \n\n # Set variables values\n PI_180 = pi/ 180.0\n Re = np.float64( 6.375E6 ) # Radius of Earth [m] \n lon_dim = get_dims4res(res=res)[0]\n lon_degrees = float( lon_dim )\n\n # Loop lats and calculate area \n A1x1 = []\n for n, lat_ in enumerate( lat_e[:-1] ):\n\n # Lat at S and N edges of 1x1 box [radians]\n S = PI_180 * lat_e[n]\n N = PI_180 * lat_e[n+1] \n\n # S to N extent of grid box [unitless]\n RLAT = np.sin( N ) - np.sin( S )\n\n # 1x1 surface area [m2] (see [GEOS-Chem] \"grid_mod.f\" for algorithm)\n A1x1 += [ 2.0 * pi * Re * Re / lon_degrees * RLAT ]\n\n A1x1 = np.array( A1x1 )\n if debug:\n print(A1x1)\n print([ (i.shape, i.min(),i.max() ) for i in [ A1x1 ] ])\n\n # convert to 2D array / apply to all longitudes \n A1x1 = np.array( [ list( A1x1 ) ] * int(lon_dim) )\n\n return A1x1", "title": "" }, { "docid": "cce9e0fd23d6383a8d542cf89562529f", "score": "0.560488", "text": "def get_raster_units_per_pixel(self):\n return self.wfd.get_units_per_pixel()", "title": "" }, { "docid": "5620b4dd749bb73e87d7e870b97b87e9", "score": "0.5596778", "text": "def calc_coarse_bounds(self):\n x_positions = self.hdf_files[0][\"MAPS\"][\"x_axis\"][:]\n y_positions = self.hdf_files[0][\"MAPS\"][\"y_axis\"][:]\n self.coarse_positions = []\n for i in range(len(self.theta)):\n\n x_left = round(x_positions[int(self.bounds[0][i])], 5)\n x_right = round(x_positions[int(self.bounds[1][i])], 5)\n x_center = round((x_right + x_left) / 2, 5)\n x_width = round(x_right - x_left, 5)\n y_top = round(y_positions[int(self.bounds[2][i])], 5)\n y_bottom = round(y_positions[int(self.bounds[3][i])], 5)\n y_center = round((y_top + y_bottom) / 2, 5)\n y_width = round(y_bottom - y_top, 5)\n x_pos_temp = list([self.theta[i], x_center, x_width, y_center, y_width])\n self.coarse_positions.append(x_pos_temp) \n\n return self.coarse_positions", "title": "" }, { "docid": "63e1021a72776ebcb6b058bb3977ca9b", "score": "0.5581639", "text": "def tile_resolution(zoom, latitude):\n return 156543.034 * math.cos(math.radians(latitude)) / (2 ** zoom)", "title": "" }, { "docid": "c5b05492f9640b5dac1ada1856030a89", "score": "0.55756134", "text": "def regrid_wfdei(res):\n\n if not (0 < res < 180):\n raise ValueError(\"Resolution must be in range [1, 180).\")\n elif (180 % res != 0):\n raise ValueError(\"Resolution must be a factor of 180 (res=%d).\" % res)\n\n wfdei = read_grid_wfdei()\n\n # get 2d array in which the value is the landpoint number\n WFDEI_2d = wfdei.expand(np.arange(len(wfdei)))\n WFDEI_2d.fill_value = IMDI\n\n ###########################################################################\n # Define the new, coarser grid based on the 0.5deg WFDEI grid.\n ###########################################################################\n width = int(res/DLON_WFDEI)\n height = int(res/DLAT_WFDEI)\n\n vv = list(range(0, (wfdei.nr+1), width))\n hh = list(range(0, (wfdei.nc+1), height))\n lp_min = 0.3*width*height\n\n lons = [0.5*(wfdei.lons[h1] + wfdei.lons[h2-1])\n for h1, h2 in zip(hh[:-1], hh[1:])]\n\n lats = [0.5*(wfdei.lats[v1] + wfdei.lats[v2-1])\n for v1, v2 in zip(vv[:-1], vv[1:])]\n\n lons = np.array(lons)\n lats = np.array(lats)\n\n ###########################################################################\n # List of tile location H,V and landpoints in HV.\n ###########################################################################\n ListH = []\n ListV = []\n ListLP = []\n\n for H in range(360//res):\n for V in range(180//res):\n cols = slice(hh[H], hh[H+1])\n rows = slice(vv[V], vv[V+1])\n TILE = WFDEI_2d[rows, cols]\n if TILE.count() > lp_min:\n ListH.append(H)\n ListV.append(V)\n ListLP.append(TILE.compressed().tolist())\n\n coarse_grid = LandGrid(None, lons, lats, [ListV, ListH])\n\n return coarse_grid, ListLP", "title": "" }, { "docid": "a4d8efacb1df047c9ca375030a62e9a0", "score": "0.5565868", "text": "def main(grid):\n dimension = len(grid)\n\n # dict where:\n # Key - tuple (x, y)\n # Value - height of tower\n xyz_dict = {}\n for i in range(dimension):\n for j in range(dimension):\n xyz_dict[(i, j)] = grid[i][j]\n\n # for xy_area we simply look at the\n # presence/absence of a tower. we don't\n # care about the height itself. if a tower\n # is present, then add 1 to the xy_area\n xy_area = 0\n for val in xyz_dict.values():\n if val != 0:\n xy_area += 1\n\n yz_dict = make_dict(xyz_dict, 1)\n yz_area = calc_area(yz_dict)\n\n xz_dict = make_dict(xyz_dict, 0)\n xz_area = calc_area(xz_dict)\n\n total_projection = xy_area + yz_area + xz_area\n print(\"Total projection:\", total_projection)\n # return total_projection", "title": "" }, { "docid": "8e701bb6036b8a82f7aa212f4abd8198", "score": "0.5531701", "text": "def surfaceTolerance(self):\n return float()", "title": "" }, { "docid": "8e701bb6036b8a82f7aa212f4abd8198", "score": "0.55311775", "text": "def surfaceTolerance(self):\n return float()", "title": "" }, { "docid": "f8b7ebacd310c1290835f884231052b2", "score": "0.55192566", "text": "def getRS( self ) -> float:\n return self.__z", "title": "" }, { "docid": "a388e20d1b2389d28d543485e0fb7de7", "score": "0.55129313", "text": "def calcDepth(self,zeta=None):\r\n #h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()\r\n if self.gridtype == 'rho':\r\n h = self.h\r\n elif self.gridtype == 'psi':\r\n h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])\r\n elif self.gridtype == 'u':\r\n h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])\r\n elif self.gridtype == 'v':\r\n h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])\r\n \r\n return get_depth(self.S,self.C,self.hc,h,zeta=zeta, Vtransform=self.Vtransform).squeeze()", "title": "" }, { "docid": "1b410094b074eca378f6e8c36c99f0f2", "score": "0.5476696", "text": "def z_max(self) -> float:\n return self.Nz * self.dz", "title": "" }, { "docid": "4bbcd38d63aefbf7c228f46d05170ef8", "score": "0.54685163", "text": "def _get_physical_coordinates_of_z_plane(zrange: Tuple[float, float]):\n physical_z = (zrange[1] - zrange[0]) / 2 + zrange[0]\n return physical_z", "title": "" }, { "docid": "9221931b71dcd78b70ef1c726ab269b6", "score": "0.5465155", "text": "def get_nc_Grid_HYCOM2(grdfile, name='GLBv0.08_Arctic4'):\n\n nc = netCDF4.Dataset(grdfile)\n lon = nc.variables['lon'][:]\n lat = nc.variables['lat'][:]\n depth = nc.variables['z'][:]\n ssh = nc.variables['ssh'][0,:,:]\n var = nc.variables['temp'][0,:,:,:]\n nc.close()\n\n lon_t, lat_t = np.meshgrid(lon, lat)\n\n lonv = 0.5 * (lon[1:] + lon[:-1])\n lonv = np.insert(lonv, 0, 2*lon[0] - lonv[0])\n lonv = np.append(lonv, 360.1)\n\n latv = 0.5 * (lat[1:] + lat[:-1])\n latv = np.insert(latv, 0, [2*lat[0] - latv[0]])\n latv = np.append(latv, [2*lat[-1] - latv[-1]])\n\n lon_vert, lat_vert = np.meshgrid(lonv, latv)\n\n mask_t = np.array(~var[:].mask, dtype='int')\n\n z_t = np.tile(depth,(mask_t.shape[2],mask_t.shape[1],1)).T\n\n depth_bnds = np.zeros(len(depth)+1)\n for i in range(1,len(depth)):\n depth_bnds[i] = 0.5 * (depth[i-1] + depth[i])\n depth_bnds[-1] = 5750\n\n bottom = pyroms.utility.get_bottom(var[::-1,:,:], mask_t[0], spval=var.fill_value)\n nlev = len(depth)\n bottom = (nlev-1) - bottom\n h = np.zeros(mask_t[0,:].shape)\n for i in range(mask_t[0,:].shape[1]):\n for j in range(mask_t[0,:].shape[0]):\n if mask_t[0,j,i] == 1:\n h[j,i] = depth_bnds[int(bottom[j,i])+1]\n\n angle = np.zeros((lat.shape[0], lon.shape[0]))\n\n# geod = pyproj.Geod(ellps='WGS84')\n# az_forward, az_back, dx = geod.inv(lon_vert[:,:-1], lat_vert[:,:-1], lon_vert[:,1:], lat_vert[:,1:])\n# angle = 0.5 * (az_forward[1:,:] + az_forward[:-1,:])\n# angle = (90 - angle) * np.pi/180.\n\n return Grid_HYCOM(lon_t, lat_t, lon_vert, lat_vert, mask_t, z_t, h, angle, name)", "title": "" }, { "docid": "e7aa75054c3bf735a112c8e5971f04fa", "score": "0.5462959", "text": "def surface(self):\n return self.long * self.larg", "title": "" }, { "docid": "0c07f2f59f8303279ddaee4c34dd119c", "score": "0.53951466", "text": "def photzp(self):\n # What is the calibration shift applied in DR2?\n mycaldb = get_caldb()\n try:\n shift = mycaldb.shifts[self.run]\n except KeyError:\n shift = 0.0\n # The zeropoint in the metadata file is corrected for extinction\n # but not re-calibrated and not corrected for PERCORR.\n # In accordance with the ESO standard, photzp absorbs the scaling\n # with exposure time.\n photzp = (METADATA[self.run]['zeropoint_precalib']\n - self.percorr\n + shift\n + 2.5*np.log10(self.exptime))\n return np.round(photzp, 4)", "title": "" }, { "docid": "297d4da3c3a3f3bdaa7fc243611ee3aa", "score": "0.53671426", "text": "def getRes(self, surface):\r\n\t\treturn surface.get_size()", "title": "" }, { "docid": "e78a919a54d0676f6affc4ada1b5b53e", "score": "0.53632295", "text": "def getZFar(self):\n return _osg.Depth_getZFar(self)", "title": "" }, { "docid": "d3f31fd4eec8f28a9717883986465ed6", "score": "0.5363179", "text": "def _get_gridY(self):\n return self.rootStyle['gridY']", "title": "" }, { "docid": "828600c8c234c67dbcf67bac4a98bf2a", "score": "0.53617746", "text": "def getZChannel(self) -> int:\n ...", "title": "" }, { "docid": "bbaa21371a76da509610665fc06ea92c", "score": "0.5343565", "text": "def surface(self):\n return math.pi * self.rayon ** 2", "title": "" }, { "docid": "138897760678be311c88beccb8cf81e9", "score": "0.5331006", "text": "def getGridCost(self):\n net_load = self.system.net_load\n\n import_E = net_load >= 0\n export_E = net_load < 0\n cost_profile = np.zeros((len(net_load),1))\n cost_profile[import_E] = self.mip[import_E]\n cost_profile[export_E] = self.export_rate\n cost = net_load * cost_profile\n self.grid_cost = cost\n return cost", "title": "" }, { "docid": "3190e9828f371792859c9fbccb5d2d56", "score": "0.53241545", "text": "def getGridDisplacements(self):\n origin = (\n old_div(self.options[\"Grid\"][\"origin\"], self.options[\"Image\"][\"resampling\"])\n ).astype(int)\n x = []\n for i in range(2):\n x.append(\n np.linspace(\n origin[i] * self.ref_img.GetSpacing()[i],\n (\n origin[i]\n + (self.options[\"Grid\"][\"size\"][i] - 1)\n * self.options[\"Grid\"][\"spacing\"][i]\n )\n * self.ref_img.GetSpacing()[i],\n self.options[\"Grid\"][\"size\"][i] * self.options[\"Grid\"][\"upsampling\"]\n - (self.options[\"Grid\"][\"upsampling\"] - 1),\n endpoint=False,\n )\n )\n grid = np.meshgrid(x[0], x[1])\n self.results[\"Coordinates\"] = np.zeros((grid[0].size, 2))\n self.results[\"Displacement\"] = np.zeros((grid[0].size, 2))\n\n cnt = 0\n for i in range(grid[0].shape[0]):\n for j in range(grid[0].shape[1]):\n p = np.array([grid[0][i, j], grid[1][i, j]])\n self.results[\"Coordinates\"][cnt, :] = p\n self.results[\"Displacement\"][cnt, :] = (\n self.transform.TransformPoint(p) - p\n )\n cnt += 1", "title": "" }, { "docid": "955a9a578c8b858bf0c1e159c31986f6", "score": "0.53154606", "text": "def get_buffer_lengths_z(theta_zs, shape_grid, res):\n\n # buffer_lengths_z = torch.sin(theta_zs) * max(shape_grid[:2]) * res\n buffer_lengths_z = torch.sin(theta_zs) * (math.sqrt(shape_grid[0]**2 + shape_grid[1]**2)) * res\n\n return buffer_lengths_z", "title": "" }, { "docid": "a2eee01b654ef989c15aaf9b2d8dd5ef", "score": "0.5304275", "text": "def spatial_resolution(self) -> Optional[float]:\n return self.properties.get(\"spatial_resolution\")", "title": "" }, { "docid": "81d417aed07bb8a5e2748ad43b952f98", "score": "0.5295278", "text": "def getPixelResolution(lat, lon, shape, units=\"km\"):\n \n if str(lat.size) == '4':\n col = ([lat[0], lon[0]], [lat[2], lon[2]])\n row = ([lat[0], lon[0]], [lat[3], lon[3]])\n else:\n col = ([lat[0,0], lon[0,0]], [lat[0,-1], lon[0,-1]])\n row = ([lat[0,0], lon[0,0]], [lat[-1,0], lon[-1,0]])\n\n PxResRow = getDistanceLL(col[0], col[1])\n PxResCol = getDistanceLL(row[0], row[1])\n PxResRow = PxResRow/shape[1]\n PxResCol = PxResCol/shape[0]\n\n if units == \"deg\":\n PxResRow = min(getCoordinateDiffForDistance(lat[0], lon[0], PxResRow, units=\"km\"))\n PxResCol = min(getCoordinateDiffForDistance(lat[0], lon[0], PxResCol, units=\"km\"))\n\n return PxResCol, PxResRow", "title": "" }, { "docid": "36da8e9b1666223f338bbac76ddecbd8", "score": "0.5292986", "text": "def getBestShift(img):\n cy,cx = ndimage.measurements.center_of_mass(img)\n print cy,cx\n\n rows,cols = img.shape\n shiftx = np.round(cols/2.0-cx).astype(int)\n shifty = np.round(rows/2.0-cy).astype(int)\n\n return shiftx,shifty", "title": "" }, { "docid": "d493c8bdb740556b91888a963a610c0f", "score": "0.528777", "text": "def evaluate_grid(self, grid, lat, lon) :\r\n idx_2d = self.calc_2d_index(lat,lon)\r\n return grid[idx_2d]", "title": "" }, { "docid": "b2949928a43cc2f6e60f3f30cd657c26", "score": "0.5281831", "text": "def get_grid_sizes(self) -> torch.LongTensor:\n return self.locator.get_grid_sizes()", "title": "" }, { "docid": "790fff9db2b5d9f2982998b3aff76b46", "score": "0.5276462", "text": "def calc_zone_r( r_in, r_out ):\n return ( 0.5*(r_out**3 + r_in**3) )**(1./3);", "title": "" }, { "docid": "377f4a6f2295d4709c3ac9ad250d38f7", "score": "0.52533746", "text": "def getSurfaceDip(surface):\n\taverage_dip = 0.0\n\tfor i in range(surface.shape[1] - 1):\n\t\tp1 = surface[0,i]\n\t\tp2 = surface[0,i + 1]\n\t\tp3 = surface[1,i]\n\t\taverage_dip = average_dip + getPlaneDip(p1,p2,p3)\n\treturn average_dip / (surface.shape[1] - 1)", "title": "" }, { "docid": "523f4a92ffb0eb7366c59fca6f100cc0", "score": "0.5251375", "text": "def _resolution_estimate(ordered_pair_list, cutoff):\n\n x = []\n y = []\n\n for o in ordered_pair_list:\n x.append(o[0])\n y.append(o[1])\n\n if max(y) < cutoff:\n # there is no point where this exceeds the resolution\n # cutoff\n return -1.0\n\n # this means that there is a place where the resolution cutof\n # can be reached - get there by working backwards\n\n x.reverse()\n y.reverse()\n\n if y[0] >= cutoff:\n # this exceeds the resolution limit requested\n return x[0]\n\n j = 0\n while y[j] < cutoff:\n j += 1\n\n resolution = x[j] + (cutoff - y[j]) * (x[j - 1] - x[j]) / (y[j - 1] - y[j])\n\n return resolution", "title": "" }, { "docid": "9f15571010d5a1a58b5f38e0da12cbba", "score": "0.52384526", "text": "def get_fixed_grid_edges(y, x, grid_res):\n # These values represent the distance from the center of a grid cell to the\n # edges. Obtained from the GOES-16 PUG Vol. 3 Sec. 5.1.2\n res_vals = {\n '0.5' : 0.7e-05,\n '1.0' : 1.4e-05,\n '2.0' : 2.8e-05,\n '4.0' : 5.6e-05,\n '10.0': 14.0e-05\n }\n\n # More useful than getting a KeyError\n if (grid_res not in res_vals.keys()):\n raise ValueError((\"Invalid grid resolution '{}'.\"\n \"Valid values: ['0.5', '1.0', '2.0', '4.0', '10.0']\".format(resolution)))\n\n # Get the edge offset value for the given grid cell resolution\n edge_offset = res_vals[grid_res]\n\n # Subtract the edge offset from each grid cell centroid value, which yields\n # the left edge of every cell (and the right edge for every cell in x[n-1])\n #\n # Add the offset value to the last centroid in the array to obtain\n # it's right edge\n x_edges = x - edge_offset\n x_edges = np.append(x_edges, x[-1] + edge_offset)\n\n y_edges = y - edge_offset\n y_edges = np.append(y_edges, y[-1] + edge_offset)\n\n return (y_edges, x_edges)", "title": "" }, { "docid": "0556983640e7fa68463e310851235bc4", "score": "0.5238078", "text": "def getResolution (self, (minx, miny, maxx, maxy)):\n return max( float(maxx - minx) / self.size[0],\n float(maxy - miny) / self.size[1] )", "title": "" }, { "docid": "996fade952f2cfd18f6a095dacedc1f3", "score": "0.5233027", "text": "def resolution(self, energies_in_kev):\n return 0.6 * np.sqrt(energies_in_kev)", "title": "" }, { "docid": "02fda2e40b080b71c42f68155e854e8a", "score": "0.5223785", "text": "def eazy_stats(zgrid, pzi):\n # p(z) weighted\n zphot = np.sum(zgrid * pzi) / np.sum(pzi)\n # Uncertainty\n cum_pzi = np.cumsum(pzi) / np.sum(pzi)\n l68 = zgrid[np.argmin(np.abs(cum_pzi - 0.16))]\n u68 = zgrid[np.argmin(np.abs(cum_pzi - (1 - 0.16)))]\n #\n return zphot, (u68-l68)/2.", "title": "" }, { "docid": "e910c3738baeda740b9e6137e347c4b1", "score": "0.52216476", "text": "def grid_parameters(\n x_lim: list = None, y_lim: list = None, z_lim=None, grf: float = 1\n) -> (np.array, int, int):\n if y_lim is None:\n y_lim = [0, 1000]\n else:\n y_lim = y_lim\n if x_lim is None:\n x_lim = [0, 1500]\n else:\n x_lim = x_lim\n if z_lim is None:\n z_lim = [0, grf]\n else:\n z_lim = z_lim\n\n grf = grf # Cell dimension\n nrow = int(np.diff(y_lim) / grf) # Number of rows\n ncol = int(np.diff(x_lim) / grf) # Number of columns\n nlay = int(np.diff(z_lim) / grf) # Number of layers\n\n if nlay == 1:\n array = np.ones((nrow, ncol)) # Dummy array\n # Centroids of dummy array\n xys = get_centroids(array, grf) + np.min([x_lim, y_lim], axis=1)\n else:\n array = np.ones((nlay, nrow, ncol))\n xys = get_centroids(array, grf) + np.min([x_lim, y_lim, z_lim], axis=1)\n\n return xys, nrow, ncol, nlay", "title": "" }, { "docid": "b888d6e88497b771aa9c4dc21f4956ba", "score": "0.5217395", "text": "def g(z: Zecording, x, y=3):\n return z.n_cells", "title": "" }, { "docid": "1067bb1a838eda38c9010cbce270ef14", "score": "0.52100515", "text": "def create_grid2(cloud):\n minx, maxx = cloud[LAT].min(), cloud[LAT].max()\n miny, maxy = cloud[LONG].min(), cloud[LONG].max()\n minz, maxz = cloud[ELE].min(), cloud[ELE].max()\n im_range = (1+maxx - minx, 1+maxy - miny, 1+maxz-minz)\n cell_size = .10 # .1 meters = 10 cms\n\n # create blank images\n cells = np.zeros((np.around(im_range[0]/.1)+1, np.around(im_range[1]/.1)+1),dtype=np.float32)\n counts = np.zeros((np.around(im_range[0]/.1)+1, np.around(im_range[1]/.1)+1),dtype=np.float32)\n light = np.zeros((np.around(im_range[0]/.1)+1, np.around(im_range[1]/.1)+1),dtype=np.float32)\n\n mz = 0\n print (\"Grid size: {} wide x {} tall \".format( im_range[0] / .1, im_range[1] / .1,im_range[2] / .1))\n ## loop through the data putting points in cells\n for x, y, z, i in zip(cloud[LAT], cloud[LONG], cloud[ELE], cloud[INT]):\n xc = int(np.round((x - minx) / cell_size))\n yc = int(np.round((y - miny) / cell_size))\n zc = int(np.round((z - minz) / cell_size))\n mz = max(mz, zc)\n # if zc <= ground: ## we are interested on only the ground level\n if i > 80: # and zc < 100: ## we are interested on only the ground level\n cells[xc,yc] += z\n light[xc,yc] += i\n counts[xc,yc] += 1 ## to compute promedio\n counts[counts == 0] = 1 ## to avoid NaN\n print \"maxz: %s \" % mz\n return cells/counts, light/counts, minx, miny, minz", "title": "" }, { "docid": "a66def1f9882a68a44d3b196b54794fe", "score": "0.5202959", "text": "def get_gc_alt(alt):\n alt_c = gchemgrid('c_km_geos5_r')\n return find_nearest( alt_c, alt )", "title": "" }, { "docid": "27e687b60142af02921c643b00a87321", "score": "0.51998943", "text": "def get_tracer_zminmax(tracer):\n assert_tracer(tracer)\n config = get_qa_config()\n zmin = config[\"tile_qa_plot\"][\"tracers\"][tracer][\"zmin\"]\n zmax = config[\"tile_qa_plot\"][\"tracers\"][tracer][\"zmax\"]\n return zmin, zmax", "title": "" }, { "docid": "73fb1840feffa9fc6941f2be77565e09", "score": "0.5192703", "text": "def gridGen(self,central=[4.11,4.11,1],width=1.5,res=3):", "title": "" }, { "docid": "d4b193f43fd7778ee14530ae7daa4b57", "score": "0.5191971", "text": "def getResolution(self):\n return self.resolution", "title": "" }, { "docid": "8a1b6fbc1d753c9551e78610550a00d8", "score": "0.51911587", "text": "def healpix_resolution_calculator(nodes):\n resolution = int(math.sqrt(nodes / 12))\n return resolution", "title": "" }, { "docid": "73bde016614643edc33c73ab451b79ea", "score": "0.5185269", "text": "def get_map_resolution(self):\n return self.map_resolution", "title": "" }, { "docid": "bd28b6a1361f15616fcaa83732c2d7ab", "score": "0.5185054", "text": "def perc(x,y):\n # value of z at origin: Try zmin = 80 for shoreline or 250 for no shore\n nxpoints = 11\n nypoints = 11\n nclass = 2\n endpoint = 80000 #endpoint\n percentage = zeros((nxpoints,nypoints,nclass))\n for i in range(nxpoints):\n for j in range(nypoints):\n for k in range (nclass):\n if (i > 5):\n percentage[i,j,0] = 1.0\n percentage[i,j,1] = 0.0\n else:\n percentage[i,j,0] = 0.0\n percentage[i,j,1] = 1.0\n return percentage", "title": "" }, { "docid": "281404838dd2d099cee9b867ee3ef423", "score": "0.51812005", "text": "def _generate_grid(self):\n # Center bounding volume.\n min = -self.size * 0.5\n max = self.size * 0.5\n \n # generate grid\n x = np.linspace(min[0], max[0], self.resolution[0])\n y = np.linspace(min[1], max[1], self.resolution[1])\n z = np.linspace(min[2], max[2], self.resolution[2])\n x, y, z = np.meshgrid(x, y, z)\n return x, y, z", "title": "" }, { "docid": "df2cff398fd10cfdc684d6869ebc00a5", "score": "0.5176759", "text": "def ncpl(self):\n if self._grid_type == \"DISV\":\n ncpl = self._datadict[\"NCPL\"]\n if self._grid_type == \"DIS\":\n ncpl = self.nrow * self.ncol\n else:\n None\n return ncpl", "title": "" }, { "docid": "2ffc372400e0c65855fe61e30bfb4e01", "score": "0.5172572", "text": "def test_0077_chlocal_H2O(self):\n import os\n dname = os.path.dirname(os.path.abspath(__file__))\n nao = nao_c(label='water', cd=dname)\n g = nao.build_3dgrid_ae(level=3)\n int_chlocal = (g.weights*nao.vna(g.coords, sp2v=nao.ao_log.sp2chlocal)).sum()\n self.assertAlmostEqual(int_chlocal, -7.9999819496898787)", "title": "" }, { "docid": "224b0b555b1ed8482a50c34163470e1e", "score": "0.51685804", "text": "def get_corners(x, y, x_grid, y_grid, z):\n x_order = np.argsort(np.abs(np.subtract(x, x_grid)))\n x1 = np.where(x_grid == x_grid[x_order][0])\n x2 = np.where(x_grid == x_grid[x_order][1])\n \n y_order = np.argsort(np.abs(np.subtract(y, y_grid)))\n y1 = np.where(y_grid == y_grid[y_order][0])\n y2 = np.where(y_grid == y_grid[y_order][1])\n \n x_vals = np.sort(x_grid[np.concatenate((x1,x2),1).flatten() ])\n y_vals = np.sort(y_grid[np.concatenate((y1,y2),1).flatten() ])\n z_vals = np.concatenate((z[x1,y1],z[x1,y2],z[x2,y1],z[x2,y2]),0)\n return x_vals, y_vals, z_vals", "title": "" }, { "docid": "244e9f22020618c9e25ea758245a6419", "score": "0.51664793", "text": "def topo2grid(topo,lat,lon):\n\n output_data=np.zeros(lat.shape)\n output_n=np.zeros(lat.shape)\n\n max_dlat=np.max(lat[1:,:]-lat[:-1,:])/2.0\n max_dlon=np.max(lon[:,1:]-lon[:,:-1])/2.0\n\n hi_dlat=np.max(topo.lat[1:,:]-topo.lat[:-1,:])/2.0\n hi_dlon=np.max(topo.lon[:,1:]-topo.lon[:,:-1])/2.0\n ratio=max(hi_dlat/max_dlat, hi_dlon/max_dlon)\n window=max(2,2*int(np.floor(ratio)))\n\n ny,nx=topo.data.shape\n nyl,nxl=lat.shape\n\n print(\"Converting topographic grid\")\n if (nyl>100):\n print(\"for large grids, this can take a while\")\n for i in range(ny):\n if (nyl>100):\n if (i%20)==0:\n print(\"\\r{0:4.0f}%\".format(100.0*i/float(ny)), end=\"\")\n sys.stdout.flush()\n dists=(lat-topo.lat[i,0])**2 + (lon-topo.lon[i,0])**2\n cury,curx=np.unravel_index(np.argmin(dists),dists.shape)\n for j in range(nx):\n xmin=max(0,curx-window)\n xmax=min(nxl,curx+window)\n ymin=max(0,cury-window)\n ymax=max(nyl,cury+window)\n\n dists=(lat[ymin:ymax,xmin:xmax]-topo.lat[i,j])**2 + (lon[ymin:ymax,xmin:xmax]-topo.lon[i,j])**2\n cury,curx=np.unravel_index(np.argmin(dists),dists.shape)\n cury+=ymin\n curx+=xmin\n if ((np.abs(lat[cury,curx]-topo.lat[i,j]) < max_dlat)\n and (np.abs(lon[cury,curx]-topo.lon[i,j]) < max_dlon)) :\n\n output_data[cury,curx]+=topo.data[i,j]\n output_n[cury,curx]+=1\n\n\n output_n[output_n==0]=1\n\n print(\" Finished\")\n print(output_n.min(), output_n.max())\n return Bunch(topo=output_data/output_n, lat=lat, lon=lon)", "title": "" }, { "docid": "c9283cdb547b059b4fb40c7072b18065", "score": "0.5160822", "text": "def surface_from_grid(grid_surface, nuv):\n approx = SurfaceApprox.approx_from_grid_surface(grid_surface)\n return approx.compute_approximation(nuv=nuv)", "title": "" }, { "docid": "b3cd05624b6b017d5e39335d91a178ae", "score": "0.51595616", "text": "def solar_noon_local(LonDegE):\n return 12.", "title": "" }, { "docid": "c859365c4c4102461c445243b12db9e4", "score": "0.51561815", "text": "def resolution(self):\n return int(self.transform[0, 0]), int(self.transform[1, 1])", "title": "" }, { "docid": "bee3e7d96a88e86e8d7a99084e730034", "score": "0.5152065", "text": "def highest_z(self) -> float:\n return self._highest_z", "title": "" }, { "docid": "e0b4ace61d77342f917402cd9ba1ba45", "score": "0.5149172", "text": "def highz_coldens_profile(frb):\n x = np.linspace(-36.59,36.41,524)\n xL,yL = np.meshgrid(xL,xL)\n rp = radial_data(np.log10(frb),x=xL,y=yL)\n return rp", "title": "" }, { "docid": "28910d111d6bfd0f1cd1bc76f432867e", "score": "0.51479024", "text": "def get_gc_res( wd, filename='ctm.nc' ):\n # create NetCDf if not created.\n fname = wd+ '/'+filename\n if not os.path.isfile(fname):\n from .bpch2netCDF import convert_to_netCDF\n convert_to_netCDF( wd, filename=filename )\n\n # \"open\" NetCDF + extract time\n with Dataset( fname, 'r' ) as rootgrp:\n lon = rootgrp['longitude']\n lat = rootgrp['latitude']\n# lvls = rootgrp['model_level_number']\n lat, lon = [ np.array(i) for i in (lat, lon) ]\n\n # compare with dictionary to get resoslution \n dims = (len(lon),len(lat))\n res = get_dims4res( r_dims=True, just2D=True )[dims]\n if isinstance(res, type(None)):\n logging.error(\"Could not find resolution for run in {wd}\".format(wd=wd))\n\n return res", "title": "" }, { "docid": "6c53ff5a7624a53dc82d400497110b9b", "score": "0.5147388", "text": "def size(self):\n\n return int(self.grid.size ** (1/2))", "title": "" }, { "docid": "9ced5e96cc232e3f1461bb0ef56c9f49", "score": "0.5143831", "text": "def resolution(self, energies_in_kev):\n return 0.7 * np.sqrt(energies_in_kev)", "title": "" }, { "docid": "e29a4de8d97f4d8207fd29266cda9169", "score": "0.5127743", "text": "def get_coverage_grid_extent(instance):\n instance_wcs = get_wcs_record(instance)\n grid = instance_wcs.grid\n return [(int(h) - int(l) + 1) for\n h, l in zip(grid.highlimits, grid.lowlimits)]", "title": "" }, { "docid": "74cd6f00ada3c616f4e067b90aeec164", "score": "0.5126945", "text": "def find_dimension_numerical(self, tol=1e-10):\n V = self.get_coeffs(gr=1, Mset=self.M0(), Yset=self.Y())\n rk = V.numerical_rank(tol=tol)\n numdim = len(self._norm['Vals'][0])\n return V.ncols() - rk - numdim + 1", "title": "" }, { "docid": "18369f6a04ac27fd34d9486fc31c0dba", "score": "0.512592", "text": "def edges_z(self):\n if getattr(self, \"_edges_z\", None) is None and self.dim == 3:\n N = self.reshape(self.gridN, \"N\", \"N\", \"M\")\n XYZ = [mkvc(0.5 * (n[:, :, :-1] + n[:, :, 1:])) for n in N]\n self._edges_z = np.c_[XYZ[0], XYZ[1], XYZ[2]]\n return self._edges_z", "title": "" }, { "docid": "6d3efcc5c78be405921a67e440fd6a17", "score": "0.51250625", "text": "def calc_Rg(track):\n x,y,z = track[:,0],track[:,1],track[:,2]\n r_mean = [np.mean(x),np.mean(y),np.mean(z)]\n rsq = (x - r_mean[0])**2 + (y - r_mean[1])**2 + (z - r_mean[2])**2\n return np.sqrt(np.sum(rsq)/len(rsq))", "title": "" }, { "docid": "786a718149fe77af7886a6bb9aa70b44", "score": "0.5118747", "text": "def _get_pzf(self): # Padding z-axis front\n d = self.d\n base = dict(base=d, em=self.em) # In case relative units, use this as base.\n return units(self.rootStyle.get('pzf', 0), base=base)", "title": "" }, { "docid": "52443543df8956921c4800eb3388eb55", "score": "0.51120406", "text": "def getWarpGrid(self):\n return self.warp.getvolumecoordinates(self.warp.griddata.warpmeshdof)", "title": "" }, { "docid": "a0f470b13efb160fc6367a4353d4c988", "score": "0.5112013", "text": "def E_z(z):\n return H_z(z) / const.c.to(u.km / u.s)", "title": "" }, { "docid": "d0449628a95ac3add4a2f596bac8fd1b", "score": "0.51099366", "text": "def _compute_zcdp(self, noise_stddev):\n zcdp = 1 / (2*noise_stddev**2)\n return zcdp", "title": "" }, { "docid": "7b31e8aef3b78a85a623eb779b7484d8", "score": "0.51022625", "text": "def GlobalRadiation(ra, z):\n\n rg = ra * (0.75 + 2.0e-5*z)\n\n return rg", "title": "" }, { "docid": "9e9a23d6566f25d53947961a07cbce8f", "score": "0.50994194", "text": "def horz_dpi(self):\r\n return self._horz_dpi", "title": "" }, { "docid": "9e9a23d6566f25d53947961a07cbce8f", "score": "0.50994194", "text": "def horz_dpi(self):\r\n return self._horz_dpi", "title": "" }, { "docid": "8845908d3265c2ac823cd6f39e64638c", "score": "0.50950843", "text": "def getCommonGrid(self):\n return self.warp.getcommonvolumecoordinates(self.warp.griddata.commonmeshdof)", "title": "" }, { "docid": "2962088b7696757e54df4324189909d5", "score": "0.5090387", "text": "def surfacedensity(self):\n nh2 = self.gridded['dens'] / 1e6\n if self.ygrid.min() < 0:\n return np.trapz(nh2, x=self.ygrid*sc.au*1e2, axis=0)\n return 2. * np.trapz(nh2, x=self.ygrid*sc.au*1e2, axis=0)", "title": "" }, { "docid": "aecd32ff69f4af6c69d9d8a81b2beedc", "score": "0.5087554", "text": "def __get_origin_resolution(self):\n if self.__cam is not None:\n\n if self.__override_camera_res:\n res_scale = self.__res_fraction\n\n if res_scale == \"specific\":\n return self.__res_override\n else:\n return (int(self.__cam.parmTuple(\"res\").eval()[0] * float(res_scale)),\n int(self.__cam.parmTuple(\"res\").eval()[1] * float(res_scale)))\n\n return self.__cam.parmTuple(\"res\").eval()\n else:\n return tuple((0, 0))", "title": "" }, { "docid": "1d884f5695312ffbeb671b035621c363", "score": "0.50864655", "text": "def get_problem_grid_shape(h, xlim, zlim):\n x_pts = int(xlim[1]/h + 1)\n z_pts = int(zlim[1]/h + 1)\n \n return x_pts, z_pts", "title": "" }, { "docid": "2a7ea7b64f852dccc5cc7ee0647547b0", "score": "0.50848097", "text": "def compute_surface_disp_point(sources, nu, x, y, compute_depth=0):\n u_disp, v_disp, w_disp = 0, 0, 0;\n\n for source in sources:\n dx, dy, dz = compute_disps_from_one_mogi(source, x, y, nu, compute_depth);\n # Update the displacements from all sources\n u_disp = u_disp + dx;\n v_disp = v_disp + dy;\n w_disp = w_disp + dz; # vertical\n return u_disp, v_disp, w_disp;", "title": "" }, { "docid": "be045f6cfed7700577a2091c78a8741b", "score": "0.50830024", "text": "def getZoneLocFromGrid(gridCol, gridRow):\r\n locX = chr(ord('A') + gridCol)\r\n locY = str(gridRow + 1)\r\n return locX + locY", "title": "" }, { "docid": "0cd8dba0d82816b2d64da2a2d4a776ff", "score": "0.5082587", "text": "def level_zero_resolution(self) -> float:\n return self._map_height / self._tile_height", "title": "" }, { "docid": "90f7f3d3c3fb7ee99ffccc54d3180690", "score": "0.5077346", "text": "def getCO2coldensity_HCN(hcn_surfpressure_target, z0, guess):\n\timport scipy.optimize\n\tdef func(colden):\n\t\treturn ComputeHCNAbundance(colden, z0, 0)-hcn_surfpressure_target\n\t\n\treturn scipy.optimize.newton(func, guess)", "title": "" }, { "docid": "cf3c81d44fc69a51ba9b07e321d96777", "score": "0.5075957", "text": "def coordinates2(npixel: int):\n return (numpy.mgrid[0:npixel, 0:npixel] - npixel // 2) / npixel", "title": "" }, { "docid": "57c6a437e08b377ec34e3341156d2479", "score": "0.507481", "text": "def get_grid_dimensions(**kwargs):\n gridfile = get_file(**kwargs)\n ff = pf.open(gridfile)\n teffs = []\n loggs = []\n for mod in ff[1:]:\n teffs.append(float(mod.header['TEFF']))\n loggs.append(float(mod.header['LOGG']))\n ff.close()\n return np.array(teffs),np.array(loggs)", "title": "" }, { "docid": "57c6a437e08b377ec34e3341156d2479", "score": "0.507481", "text": "def get_grid_dimensions(**kwargs):\n gridfile = get_file(**kwargs)\n ff = pf.open(gridfile)\n teffs = []\n loggs = []\n for mod in ff[1:]:\n teffs.append(float(mod.header['TEFF']))\n loggs.append(float(mod.header['LOGG']))\n ff.close()\n return np.array(teffs),np.array(loggs)", "title": "" }, { "docid": "cf76b81a266dcf982a2ae9e27a14afb8", "score": "0.50676733", "text": "def get_resolution(phys_y : tuple,\n phys_x : tuple,\n phys_z : tuple):\n # Conversion factors to nm, these are based off of supported Bioformats length units\n UNITS = {'m': 10**9,\n 'cm': 10**7,\n 'mm': 10**6,\n 'µm': 10**3,\n 'nm': 1,\n 'Å': 10**-1}\n\n if None in phys_y:\n phys_y = 325\n else:\n phys_y = phys_y[0] * UNITS[phys_y[1]]\n if None in phys_x:\n phys_x = 325\n else:\n phys_x = phys_x[0] * UNITS[phys_x[1]]\n if None in phys_z:\n phys_z = (phys_x + phys_y)/2\n else:\n phys_z = phys_z[0] * UNITS[phys_z[1]]\n \n return [phys_y, phys_x, phys_z]", "title": "" } ]
1c3e6eba438cb0b7f88a9298306938e7
Compress the data of the vasprun.xml file to a JSON file.
[ { "docid": "24184ad8ae62b939c1e2eb460220943b", "score": "0.0", "text": "def data(vasprun_file):\n from pybat.cli.commands.util import data\n\n data(vasprun_file=vasprun_file)", "title": "" } ]
[ { "docid": "80205bd7bb1a38c089e49e6edd257744", "score": "0.6087916", "text": "def xmltojson_cmd(infile, savedir):\n xmltojson(infile, savedir)", "title": "" }, { "docid": "6fd9ff7841d3a1c8d8e8b65fe5e9ebaa", "score": "0.58871293", "text": "def writeToJSON(portscan):\n with open(\"portscan.json\", \"w\") as outfile:\n json.dump(portscan, outfile)", "title": "" }, { "docid": "4879d588850771dc6b2ccd90c6e13dca", "score": "0.5833612", "text": "def dump(self, filename):\n target = os.getcwd() + '/../data'\n if not os.path.isdir(target):\n os.mkdir(target)\n output = os.path.join(target, filename)\n with open(output, 'wb') as f:\n f.write(json.dumps(self.data))\n f.close()", "title": "" }, { "docid": "9a5897128b2e4cd4fb16dba89a68ee36", "score": "0.5830213", "text": "def write_json(self):\n # Handling file handler\n if self._ofile:\n if self._compress:\n ofh = gzip.open(self._ofile, 'wb', compresslevel=9)\n else:\n ofh = open(self._ofile, 'w')\n else:\n ofh = sys.stdout\n # Big string\n content = '[\\n' + ','.join(self._json) + '\\n]' + '\\n'\n # Handling compression\n if self._ofile and self._compress:\n ofh.write(content.encode())\n else:\n ofh.write(content)\n ofh.close()", "title": "" }, { "docid": "69af34613f28760b9fa6ed650e447180", "score": "0.58268976", "text": "def save_json(result, filename) -> None:\n try:\n with open(filename,'w') as f:\n result[\"schema\"] = \"orquestra-v1-data\"\n f.write(json.dumps(result, indent=2)) \n\n except IOError:\n print(f'Error: Could not open {filename}')", "title": "" }, { "docid": "7fb6b84a755c89a99e8a6117af441278", "score": "0.57726663", "text": "def to_json(self, filepath):\n with open(filepath, 'w+') as f:\n json.dump(self.data, f, cls=DataEncoder)", "title": "" }, { "docid": "f6b398f02367d8ede9a2337eda73b66c", "score": "0.5719761", "text": "def gzip_xml(fname):\n with open(fname, 'r') as f:\n fdata = f.read()\n with gzip.open(fname + '.gz', 'w') as gf:\n gf.write(fdata)\n os.remove(fname)\n print fname + '.gz successfully archived'", "title": "" }, { "docid": "98fa8c87cafa9e0a975b08f85468f7ca", "score": "0.57172585", "text": "def save(self, filename):\n\n l = []\n\n for node in self:\n d = {\n 'name': node.name,\n 'state': node.state,\n 'variables': node.variables \n }\n\n if node.store_result and hasattr(node, '_result'):\n d['result'] = node.result\n\n l.append(d)\n\n j = json.dumps(l, indent=2)\n\n with open(filename, 'w') as f:\n f.write(j)", "title": "" }, { "docid": "2a0ebe3e871141fe3c5b5e1ed094d1e9", "score": "0.56946695", "text": "def output_file(filename: str, compress: bool, data: dict):\n print('Outputting file.')\n data = json.dumps(data, separators=(',', ':'))\n if compress:\n with open(filename + '.json.bz2', 'wb') as fp:\n data_bytes = bytes(data, encoding='utf-8')\n fp.write(bz2.compress(data_bytes))\n else:\n with open(filename + '.json', 'w') as fp:\n fp.write(data)", "title": "" }, { "docid": "494845d0badbcf985b49999790ee1f96", "score": "0.5693416", "text": "def to_file(self,path):\n\t\twith open(path,'w') as output:\n\t\t\toutput.write(self.json())", "title": "" }, { "docid": "7221f723a2932119c83a9f036ddea831", "score": "0.567452", "text": "def save(filename,name,archive):\n filename = filename\n data = {name: [v.tolist() for v in archive]}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "title": "" }, { "docid": "b08eda0bea901bab24d4b0787035cc44", "score": "0.5649405", "text": "def to_json(self, file_name=\"pipesystem.json\"):\n with open(file_name, \"w\") as json_file:\n json.dump(self.to_config(), json_file, indent=4)", "title": "" }, { "docid": "050e7fd79e03ffe19a5153f14af7d861", "score": "0.56261146", "text": "def dump(self):\n json_document = {\"records\": [rec.raw for rec in self.records],\n \"relationships\": [rel.to_json_dict() for rel in self.relationships]}\n with open(os.path.join(self.source_dir, sina_filename), 'w') as outfile:\n json.dump(json_document, outfile)", "title": "" }, { "docid": "2778ea97fc6c67c17631b1a4bfe304fe", "score": "0.56188035", "text": "def to_json(self, filepath):\n raise NotImplementedError", "title": "" }, { "docid": "6b292b07d1f5f27217706612bfb5b94c", "score": "0.5615729", "text": "def temp_file_to_json(self):\n if self.domain_search_outfile:\n self.convert_domain_search_output()\n if self.name_search_outfile:\n self.convert_name_search_output()\n try:\n os.remove(self.temp_domain_search_file)\n except OSError:\n pass", "title": "" }, { "docid": "3d7c6a5ea75f6e75a344e0561b5f6e56", "score": "0.55664045", "text": "def write_krun_results_file(results, filename):\n\n with bz2.BZ2File(filename, 'wb') as file_:\n file_.write(json.dumps(results, indent=4))", "title": "" }, { "docid": "a1668b1c9cd9bc3f24b38b628e0bd3c7", "score": "0.55483013", "text": "def compress():", "title": "" }, { "docid": "13816d9326172068157336a735e23076", "score": "0.5525693", "text": "def save_json(self, filename):\n contents = self.to_json()\n file = open(filename, \"w\")\n file.write(contents)\n file.close()", "title": "" }, { "docid": "70ba5b984ce7479ea7588549db5d45c9", "score": "0.5523142", "text": "def to_JSON(self, entrypoint, out_filename):\n \n entrypoint.to_JSON(out_filename)", "title": "" }, { "docid": "0f253d59c415124b5134b245c76fc398", "score": "0.5522204", "text": "def export_values_json(self, filename, data):\n \n with open(filename, 'w') as outfile:\n json.dump(data, outfile, indent=4);", "title": "" }, { "docid": "97a98e0c29f04ef3dbcb6b2b058f448f", "score": "0.5521432", "text": "def save_as_JSON(self, filename):\n \n outputdata = self.generate_dict()\n\n with open(filename, 'w') as outputjson:\n json.dump(outputdata, outputjson, indent=4)", "title": "" }, { "docid": "f2d4a991c129115c5383b17551ed0d23", "score": "0.55147225", "text": "def dump_json(filename, stage, igv, rotor, stator):\n os.chdir('Output')\n with open(filename, 'w') as json_dump:\n json.dump(json.loads(json.dumps([[stage.root.__dict__,\n stage.mean.__dict__,\n stage.tip.__dict__],\n [igv.root.__dict__,\n igv.mean.__dict__,\n igv.tip.__dict__],\n [rotor.root.__dict__,\n rotor.mean.__dict__,\n rotor.tip.__dict__],\n [stator.root.__dict__,\n stator.mean.__dict__,\n stator.tip.__dict__]]),\n parse_float=lambda x: round(float(x), 3)),\n json_dump, indent=2)\n os.chdir('..')", "title": "" }, { "docid": "85663c3df753096f9e0fd7357bd9c020", "score": "0.55130184", "text": "def make_json(filename):\n with open(\"data.json\", \"w\") as f:\n json.dump(data, f)", "title": "" }, { "docid": "f6656f7c27157301b931c71cd242bc4b", "score": "0.55120814", "text": "def to_json_file(self, filepath):\n with open(filepath, 'w') as f:\n json.dump(self._data, f)", "title": "" }, { "docid": "3a986f02afbd050b20ff588796966d2d", "score": "0.55039585", "text": "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases]}\n f = open(filename, \"w\")\n json.dump(data,f)\n f.close()", "title": "" }, { "docid": "eb33d957375ea18b83f5db2ced19be40", "score": "0.548352", "text": "def compress(context, data, name):\n return CompressorNode(nodelist=TextNode(data), kind=name, mode='file').render(context=context)", "title": "" }, { "docid": "3be4594eb76888adab76009e683cd113", "score": "0.54792225", "text": "def _set_data_json(self):\n with open(self._file_name, 'w') as json_file:\n json.dump(self._config_data, json_file)", "title": "" }, { "docid": "8674274abda73932ea0c5fa0bbafb2cf", "score": "0.54718894", "text": "def write(self):\n with open(self.path, 'wt') as f:\n f.write(jsonEnc(self._getContents(), sort_keys=True, indent=3))", "title": "" }, { "docid": "3783bde540b8869d0d2246169d21c25c", "score": "0.54610837", "text": "def generateJSON(xml, jsonName):\n # Generates an unformatted JSON file from an XML file stripping unwanted strings\n os.system(\"met/xml2json.py -t xml2json -o \" + jsonName + \" \" + xml +\n \" --strip_text\")", "title": "" }, { "docid": "9a4d1778fcc9ee65a0853a11dd9d0dfc", "score": "0.54196304", "text": "def write_json_file(self, filename):\n with open(filename, \"w\") as f:\n f.write(self.to_json())", "title": "" }, { "docid": "a90a9af31cb8a394db8434816e1d6a34", "score": "0.5412421", "text": "def write_json_file(self, fname, content):\n fpath = pathlib.Path(self.src_dir).joinpath(fname)\n if not fpath.parent.exists():\n fpath.parent.mkdir(parents=True, exist_ok=True)\n with open(str(fpath.absolute()), 'w') as json_fileobj:\n return json.dump(content, json_fileobj)", "title": "" }, { "docid": "0a22bed694a4b8d7f20628c9767c4bcd", "score": "0.5407927", "text": "def save(self, filename):\n with open(filename, \"w\") as dst:\n json_obj = [v.json() for v in self.__vectors]\n dst.write(json.dumps(json_obj))", "title": "" }, { "docid": "c242190e978940b1813d64a1956ae776", "score": "0.53840137", "text": "def writejson(self, filename, data):\r\n with open(filename, 'w') as outfile:\r\n json.dump(data, outfile, sort_keys=True,\r\n indent=4, ensure_ascii=False)", "title": "" }, { "docid": "5a9fbcdb325c45eaf8f0a41395481c62", "score": "0.53653395", "text": "def write_json(self, data, filename): # Function to add to JSON\n\n with open(filename, 'w') as f:\n json.dump(data, f)", "title": "" }, { "docid": "bbdca52434276030b4b7faf44843bc85", "score": "0.5360251", "text": "def process_file(f):\n return compress([f])", "title": "" }, { "docid": "d494fecd65dcd2e754a440fa26404308", "score": "0.5358751", "text": "def _save_json(serializable, file_name):\n fn = os.path.abspath(file_name)\n DataView.create_dir(fn)\n\n with codecs.open(fn, 'w', encoding='utf-8') as f:\n json.dump(serializable, f, separators=(',\\n', ': '))", "title": "" }, { "docid": "d3771cf58803f369edf6d44f42c9c643", "score": "0.53542185", "text": "def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases],\n \"cost\": str(self.cost.__name__)}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()", "title": "" }, { "docid": "553249d6007fd236946631251568e5b2", "score": "0.5351722", "text": "def create_json(self) -> None:\r\n json_dump = json.dumps(self.final)\r\n json_str = json.loads(json_dump, parse_int=str)\r\n final_json = json.dumps(json_str, indent=4)\r\n with open('data.json', 'w') as outfile:\r\n outfile.write(final_json)\r\n logging.info(\"Json file successfully created.\")", "title": "" }, { "docid": "a7181a998c9c2d847b5c03b21f0ac13b", "score": "0.53472877", "text": "def fto_dump(data, the_path):\n with open(the_path, 'w') as f:\n json.dump(data, f)", "title": "" }, { "docid": "aa99e4260ff7dc480dd49632e23a96f4", "score": "0.5343564", "text": "def data_to_json_file(path, data):\n f = open(path, \"w\")\n print(data, file=f)\n f.close()", "title": "" }, { "docid": "ce91e48535fa4f3da09350a49f6b6966", "score": "0.53223234", "text": "def _save_json(self, file, file_name):\n return file", "title": "" }, { "docid": "14dba2417ad48948c5911338f3d2738e", "score": "0.53213364", "text": "def write_doujinshi_json(directory,filename,data):\r\n logger.info(\"Writing json output\")\r\n filepath = os.path.join(directory,filename)\r\n \r\n \r\n if not filename.endswith(\".json\"):\r\n filename = filename + \".json\"\r\n \r\n logger.debug(f\"Filepath:{filepath}\")\r\n \r\n tags = Tag_count()\r\n \r\n for doujinshi in data:\r\n if doujinshi is None:\r\n continue\r\n \r\n for tag in doujinshi.tags:\r\n tags.Insert(tag)\r\n \r\n data.append(tags.tags)\r\n \r\n try:\r\n create_path(directory)\r\n \r\n with open(filepath,\"w\") as json_file:\r\n json_file.write(\r\n json.dumps(data, default=lambda o: o.__dict__, \r\n sort_keys=True, indent=4))\r\n \r\n except OSError as error:\r\n logger.error(repr(error))\r\n exit(1)\r\n \r\n else:\r\n logger.info(\"Writing finished\")", "title": "" }, { "docid": "0c7ddd113a8cd4ed8432039c9a861845", "score": "0.5320808", "text": "def write_json_file(self, filename, contents):\n self.s3_resource.Object(self.bucket_name, filename).put(Body=json.dumps(contents))\n return None", "title": "" }, { "docid": "848c0549c00a905206e60378eff28776", "score": "0.53202945", "text": "def gzip_assets():\n logger.info(\"Starting to optimize and gzip assets\")\n p = Path(os.path.join(DATA_PATH, WWLANG))\n for fn in tqdm(list(p.glob(\"*.json\")) + list(p.glob(\"i18n/*.json\"))):\n logger.info(f\"Optimizing and gzipping '{fn.name}'\")\n # if fn.stat().st_size > 1400:\n file_path = str(fn)\n with open(file_path) as f:\n data = json.load(f)\n\n if \"conjugations.json\" == fn.name:\n longest = 0\n longest_key = \"root\"\n for k, v in data[0][\"input\"].items():\n if len(v) >= longest:\n longest = len(v)\n longest_key = k\n data.sort(key=lambda o: o[\"input\"][longest_key], reverse=True)\n assert all(set(o.keys()) == {\"input\", \"output\"} for o in data)\n\n with gzip.open(file_path + \".gz\", \"wt\") as zipfile:\n json.dump(\n data, zipfile, separators=(\",\", \":\"), ensure_ascii=False, sort_keys=True\n )", "title": "" }, { "docid": "2d19c527e744e4fa502881874fc7e0c8", "score": "0.53096056", "text": "def export_json(file_name, obj):\n\twith open(file_name, 'w') as output_stream:\n\t\tjson.dump(obj, output_stream, indent=2, default=_json_serializer)", "title": "" }, { "docid": "d254f62899ea51a4e27ccfd9dd11aa0e", "score": "0.53060395", "text": "def to_json(result_dict, filename):\n with open(filename, 'w') as f:\n content = json.dumps(result_dict)\n f.write(content)", "title": "" }, { "docid": "28c273918b7dae28f1d612dab9673827", "score": "0.53059274", "text": "def write_json(self, filename: str):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n file.write(self.model_dump_json())", "title": "" }, { "docid": "28c273918b7dae28f1d612dab9673827", "score": "0.53059274", "text": "def write_json(self, filename: str):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n file.write(self.model_dump_json())", "title": "" }, { "docid": "2dd401313f4d6bfa5fe44313de0492fe", "score": "0.5305066", "text": "def generateJSON(self, file_name, time_series):\n try:\n data = time_series\n data.get('result')['timeline'] = [str(date_str) for date_str in time_series.get('result').get(\"timeline\")]\n with open(file_name, 'w') as outfile:\n json.dump(data, outfile)\n except FileNotFoundError:\n pass", "title": "" }, { "docid": "54e39c93bfea99b8412990e772687d37", "score": "0.5301949", "text": "def dumpgeoipdata(filename, data):\n import json\n with open(filename, 'w') as f:\n json.dump(data, f)", "title": "" }, { "docid": "14fa1cb7f65deaa8ba99c14d66adc473", "score": "0.529736", "text": "def save( self, fname ):\n data = {}\n data[\"pairs\"] = self.pairs\n data[\"n_sym_funcs_per_pair\"] = self.n_sym_funcs_per_pair\n data[\"sym_func_width\"] = self.sym_func_width\n data[\"Rcut\"] = self.Rcut\n data[\"Rmin\"] = self.Rmin\n data[\"n_hidden\"] = len(self.output_weights)\n data[\"weights\"] = self.get_weights()\n with open(fname,'w') as outfile:\n json.dump(data,outfile)\n print (\"Results written to {}\".format(fname))", "title": "" }, { "docid": "e7c77f90b3fae0e74cd78b58031f2ade", "score": "0.5292759", "text": "def write_to_json(results, filename):\n \n with open(filename, 'w') as f:\n out_result = []\n for approach in results:\n result = approach.serialize()\n result['neo'] = approach.neo.serialize_neo()\n out_result.append(result)\n json.dump(out_result, f, indent=4)", "title": "" }, { "docid": "1d5747aaa3e86c33c2e7c9c26bdaf52d", "score": "0.52820367", "text": "def save(self):\n with open(\"release.info\", 'w') as f:\n data = self.jsonize()\n json.dump(data, fp=f, indent=4)", "title": "" }, { "docid": "cc76683e23f455283b899a89e1431281", "score": "0.52802646", "text": "def to_file(self, fp, sort_keys=False):\n dict_ = self.serialize()\n with open_file(fp, mode='w') as writer:\n json.dump(dict_, writer, indent=2, sort_keys=sort_keys)", "title": "" }, { "docid": "3291a7e557570fe746f01f36e68b160e", "score": "0.52786493", "text": "def create_json(path: str, rez: dict):\n with open(path, 'w') as f:\n json.dump(rez, f, ensure_ascii=False, indent=2)", "title": "" }, { "docid": "4755191c2d1f76cb432e89d5d0197953", "score": "0.5276012", "text": "def dumpJson(self, filename: str) -> None:\n d = self._convertForJson(self._getSerializable())\n with open(self.outdir / filename, \"w\") as f:\n json.dump(d, f)", "title": "" }, { "docid": "f36996471e0c175243cb5f2facd5d047", "score": "0.5272114", "text": "def test_convert_data_xml_to_json(self):\n pass", "title": "" }, { "docid": "5d22753ff20f08483bd30064892cec2d", "score": "0.52654535", "text": "def create_zip_with_json_files(data_per_file,output_path):\n\n INDENTATION_CHARS = 4\n\n zip = ZipFile(output_path,'w')\n\n for filename, data in data_per_file.items():\n\n if isinstance(data,list) or isinstance(data,dict):\n output = json.dumps(data,indent=INDENTATION_CHARS)\n zip.writestr(filename+'.json',output)", "title": "" }, { "docid": "cf3b02d1f7f54266a6fbb80b597dca6d", "score": "0.5263186", "text": "def save_to_file(self, file_name):\n with open(file_name + '.json', 'w', encoding='utf8') as f:\n json.dump(self.dict_flats, f, ensure_ascii=False)", "title": "" }, { "docid": "571001f1c7f9716fe99e7fcd32875037", "score": "0.5262575", "text": "def _write_json(self, filepath, filename, writemode, content, indent):\n with open(os.path.join(filepath, filename), writemode) as fil:\n json.dump(content, fil, indent=indent)\n self.logger.info(\"Wrote content to file {}\".format(filename))", "title": "" }, { "docid": "0593448a57f64ceb28eacc2e8006eeda", "score": "0.52580416", "text": "def to_json_file(self, json_file_path):\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())", "title": "" }, { "docid": "6faccc5aa91175d4ff2a15be83b3c3bf", "score": "0.52536666", "text": "def to_json(self):\n filename = f\"{self.coin}_{self.filename}.json\"\n relative_path = os.path.join(self.filepath, filename)\n absolute_path = os.path.join(os.getcwd(), relative_path)\n df = self._subset_columns()\n filtered_data = df.to_dict('records')\n try:\n with open(absolute_path, 'w', encoding='utf-8') as file:\n json.dump(filtered_data, file, ensure_ascii=False, indent=4)\n except Exception as e:\n print(e)\n else:\n print(\"Data successfully saved to the JSON file\")", "title": "" }, { "docid": "5e18760ab8028b05564937552a15ed21", "score": "0.52528226", "text": "def ConvertInfoToJSON(self):\n jsonFile = jsonPath + filename + \".json\"\n try:\n fin = open(filePath, \"r\")\n with open(jsonFile, \"w\")as fout:\n while True:\n line = fin.readline()\n if line == \"\":\n break\n json.dump(line, fout, indent=1)\n\n fin.close() \n return 0\n\n except Exception:\n return -1", "title": "" }, { "docid": "a437fc00e1046704480edc951c04ee52", "score": "0.52522707", "text": "def to_json_file(self, json_file_path):\n with open(json_file_path, \"w\", encoding='utf-8') as writer:\n writer.write(self.to_json_string())", "title": "" }, { "docid": "a437fc00e1046704480edc951c04ee52", "score": "0.52522707", "text": "def to_json_file(self, json_file_path):\n with open(json_file_path, \"w\", encoding='utf-8') as writer:\n writer.write(self.to_json_string())", "title": "" }, { "docid": "c3d2e1dfce741f817a860811fe7eaade", "score": "0.5248234", "text": "def save_as_json(obj, dest_file_name):\n LOG.debug(\"Saving to json at: \" + dest_file_name)\n j = json.dumps(obj)\n f = open(dest_file_name, 'w')\n f.write(j + \"\\n\")\n f.close()", "title": "" }, { "docid": "59c0924e679cb8c65522a65d88c35b1a", "score": "0.5241253", "text": "def _save_json(self, filename, data):\n with open(filename, 'w') as file:\n for d in data:\n del d['data']\n file.write(json.dumps(d))\n file.write('\\n')", "title": "" }, { "docid": "01af0cdd2009c68b8c88b952bf159cb4", "score": "0.52409256", "text": "def export_to_json(self):\n pass", "title": "" }, { "docid": "1d071d86d1998998082469395d2c2210", "score": "0.5238958", "text": "def to_json(self, filepath):\n\n json_string = json.dumps(self.__dict__, indent=4)\n filepath = Path(filepath)\n with open(filepath, \"w\") as f:\n f.write(json_string)", "title": "" }, { "docid": "159ae38cea1e3d4280f129deb7b7a6d0", "score": "0.523077", "text": "def toJson(self, fname):\n # dumps as list to retain order through default iterator encoding\n # that buggers things otherwise\n listify = ['hipercam.MccdWin'] + list(\n ((key,['hipercam.CcdWin']+list(val.items())) \\\n for key, val in self.items())\n )\n with open(fname, 'w') as fp:\n json.dump(listify, fp, cls=_Encoder, indent=2)", "title": "" }, { "docid": "14e187ea3e1d40db2a76c1dd84de9ee6", "score": "0.5229163", "text": "def save_data_as_json_file(self, data: List[Dict[str, Any]], file: str) -> None:\n with open(f\"{self.json_dir}/{file}\", \"w\") as outfile:\n json.dump(data, outfile)", "title": "" }, { "docid": "fe68c07837170e8206502bff116f169b", "score": "0.52183884", "text": "def serializeToFile(self, fname, annotations):\n with open(fname, 'w') as f:\n json.dump(annotations, f, indent=4, separators=(',', ': '), sort_keys=True)\n f.write(\"\\n\")", "title": "" }, { "docid": "262a548d38a366ed19d9ffa0d74f3f48", "score": "0.51935935", "text": "def writeJSON(filename, data):\n with open(filename, \"w\") as f:\n json.dump(data, f)", "title": "" }, { "docid": "ce9e5fdbff187ad8fcc49e5b5f2872b5", "score": "0.5192609", "text": "def json_file(filename, file_content):\n\n json_filename = filename.replace('*','-')\n json_file_content = json.dumps(json.loads(file_content),\n sort_keys=True,indent=4,\n separators=(',',':'))\n directory=os.getcwd()\n location=directory+\"/output/json-files\"\n if not os.path.exists(\"output/json-files\"):\n os.makedirs(\"output/json-files\")\n full_json_filename = location + \"/\" + json_filename \n file_object = open(full_json_filename,'w')\n file_object.write(json_file_content)\n file_object.close()\n print((\"JSON file Downloaded at %s\" %(full_json_filename)))", "title": "" }, { "docid": "0d21b8ea81c81425bbf71f62c0a42fb8", "score": "0.5192532", "text": "def write_file(filename, data):\n with open(filename, 'w') as output:\n json.dump(data, output)", "title": "" }, { "docid": "7183f75d8de58df209db00139a93c810", "score": "0.51891184", "text": "def save_to_json(data, filename, indent=4):\n with open(filename, 'w') as json_file:\n json.dump(data, json_file, indent=indent)", "title": "" }, { "docid": "9ab75227aed32b6efa44c82d496206aa", "score": "0.51864725", "text": "def toJson(self, fname):\n\n # dumps as list to retain order through default iterator encoding that\n # buggers things otherwise\n listify = ['hipercam.CcdWin'] + list(self.items())\n with open(fname,'w') as fp:\n json.dump(listify, fp, cls=_Encoder, indent=2)", "title": "" }, { "docid": "069838a27b3b2dc60135e4a6f992b464", "score": "0.5174381", "text": "def write_to_json(results, filename):\n json_data = list()\n for element in results:\n if element.neo.name is None:\n element.neo.name = ''\n json_data.append(\n {\n 'datetime_utc': datetime_to_str(element.time),\n 'distance_au': element.distance,\n 'velocity_km_s': element.velocity,\n 'neo': {\n 'designation': element.neo.designation,\n 'name': element.neo.name,\n 'diameter_km': element.neo.diameter,\n 'potentially_hazardous': element.neo.hazardous\n }\n }\n )\n\n with open(filename, 'w') as outfile:\n outfile.write(json.dumps(json_data, indent='\\t'))", "title": "" }, { "docid": "bb9e0a74cc169deb7981ca9a827fef70", "score": "0.51704663", "text": "def save_json_file(self):\n with open(self.json_file_name, 'w') as outfile:\n json.dump(self.results_dict,\n outfile,\n sort_keys=True,\n indent=4,\n ensure_ascii=False)", "title": "" }, { "docid": "1d79babd29a8fed7625c57b6c560f708", "score": "0.5167704", "text": "def write_json(self, filename: str):\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n sentiment = []\n\n for iv in self.segments.all_intervals:\n data_dict = iv.data.model_dump()\n data_dict[\"begin\"] = iv.begin\n data_dict[\"end\"] = iv.end\n sentiment.append(data_dict)\n\n json.dump(sentiment, file, allow_nan=True)", "title": "" }, { "docid": "485e8fd8dc869c109f678d1507a0160b", "score": "0.5152793", "text": "def save(self, filename):\n with open(filename, \"w\") as build_info:\n json.dump(self.info, build_info, skipkeys=True, indent=2)", "title": "" }, { "docid": "d4c48249e3519591609c7ce17ff5894c", "score": "0.51466936", "text": "def yaml_2_json(self, yaml_file_path=0):", "title": "" }, { "docid": "58e52f3d2b6e167e972db511ad32e7b6", "score": "0.5145426", "text": "def write_xml(filename, data, root_tag):\n if (len(data) == 0):\n\treturn\n\n xml_dir = os.path.normpath(os.path.join(results_dir, \"xml\"))\n \n if not os.path.isdir(xml_dir):\n os.makedirs(xml_dir)\n\n filename = os.path.join(xml_dir,filename)\n\n xml_document = minidom.Document()\n try:\n # se c'e' gia' qualcosa di scritto correttamente sul file xml lo mantengo\n document_tree = minidom.parse(filename).documentElement\n if booldebug:\n print(\"Struttura file recuperata da \"+filename)\n except:\n # altrimenti creo la nuova struttura\n document_tree = xml_document.createElement(root_tag)\n if booldebug:\n print(\"Creata struttura del file, root tag: \"+root_tag)\n\n xml_document.appendChild(document_tree)\n\n id_index = 1\n\n time_stamp = datetime.datetime.now()\n\n for element in data:\n elementChild = xml_document.createElement(\"element\")\n # imposta l'id con numero identificatore e data del log\n elementChild.setAttribute(\"id\", str(id_index) + \" \" + str(time_stamp))\n id_index += 1\n\n for feature_k, feature_v in element.items():\n featureChild = xml_document.createElement(str(feature_k))\n featureChild.appendChild(xml_document.createTextNode(str(feature_v)))\n elementChild.appendChild(featureChild)\n\n if booldebug:\n print(\"Aggiunto su contenuto del file: \" + feature_k)\n\n document_tree.appendChild(elementChild)\n\n xml_content = xml_document.toprettyxml(indent=\"\\t\").replace(\"\\t\\t\\n\", \"\").replace(\"\\t\\n\", \"\")\n\n file_out = open(filename, \"w\")\n file_out.write(xml_content)\n file_out.close()\n\n if booldebug:\n print(\"Terminata scrittura su file xml\")", "title": "" }, { "docid": "86e7906d96b835d3ca8215f7143da6d3", "score": "0.51433897", "text": "def _output_stuff(self):\n with open(os.getcwd()+\"/tmp/output.json\", \"wb\") as l:\n con = json.dumps(self.output, indent=4, sort_keys=True, encoding=\"ASCII\")\n l.write(con)", "title": "" }, { "docid": "f0d775bd68034512f945006597c5a906", "score": "0.51360154", "text": "def create_xml(self) -> None:\r\n xml = dicttoxml.dicttoxml(self.final, attr_type=False)\r\n final_xml = parseString(xml)\r\n with open('data.xml', 'w') as outfile:\r\n outfile.write(final_xml.toprettyxml())\r\n logging.info(\"XML file successfully created.\")", "title": "" }, { "docid": "04c8c61e2f86996e8cc9ba4b10410827", "score": "0.5131111", "text": "def dump_to_json_file(filepath, data, **extra_options):\n json_bytes = dump_to_json_bytes(data, **extra_options)\n with open(filepath, \"wb\") as f:\n f.write(json_bytes)\n return json_bytes", "title": "" }, { "docid": "6f26ff55e340e76f8caa59f83858c217", "score": "0.5129651", "text": "def output_file():\r\n output_file = open(\"data.js\", \"w\") \r\n print(\"var title =\",vartitle+\";\",\"\\nvar json_data=\\n\",varjson_data+\";\",file=output_file)\r\n output_file.close()", "title": "" }, { "docid": "308eef7d3fa4244eca08d22a0c7446f2", "score": "0.5127227", "text": "def write_json_dataset(self, features_df, filename):\n symbols = features_df['sym'].unique()\n with open(filename, 'wb') as f:\n\n for idx, sym in enumerate(symbols):\n sym_df = features_df[features_df['sym'] == sym]\n sym_df = sym_df.drop(columns='sym')\n\n json_obj = {\"start\": str(sym_df['time'].iloc[0]), \n \"target\": list(sym_df[self.target]), \n \"cat\":[idx], \n \"dynamic_feat\":[list(sym_df[column]) for column in self.features]}\n\n json_line = json.dumps(json_obj) + '\\n'\n json_line = json_line.encode('utf-8')\n\n f.write(json_line)\n\n self.mapping[sym] = idx\n print('JSON file created at ' + filename)", "title": "" }, { "docid": "60a2a48f55d2ef04ca40ed8d87387906", "score": "0.5118856", "text": "def save_vectorizer(self, vectorizer_filepath):\n print(f'vectorizer path is {vectorizer_filepath}')\n with open(vectorizer_filepath, 'w') as fp:\n json.dump(self.vectorizer.to_serializable(), fp)", "title": "" }, { "docid": "bb74c849c8c0b17042987fbfea0ecd6d", "score": "0.5114134", "text": "def process_map(file_in, pretty = False):\n file_out = \"{0}.json\".format(file_in)\n data = []\n with codecs.open(file_out, \"w\") as fo:\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n if el:\n data.append(el)\n if pretty:\n fo.write(json.dumps(el, indent=2, ensure_ascii=False).encode('utf-8')+\"\\n\")\n else:\n fo.write(json.dumps(el, ensure_ascii=False).encode('utf-8') + \"\\n\")\n\n return data", "title": "" }, { "docid": "298de7c5a4da56a0f0d6b91c845559e5", "score": "0.5107435", "text": "def serialize(self, root):", "title": "" }, { "docid": "298de7c5a4da56a0f0d6b91c845559e5", "score": "0.5107435", "text": "def serialize(self, root):", "title": "" }, { "docid": "b90afaf42906267c25140cf5ec9fc3cc", "score": "0.51066726", "text": "def save(self):\n with open(self.filename, 'w') as fp:\n json.dump(self.values, fp, indent=' ')", "title": "" }, { "docid": "c2a99d6fde0022e3f466114eb3fd714f", "score": "0.5106615", "text": "def getJSON(self, filename):\r\n\t\tself.send_pkt('-g')\r\n\t\tdata = self.recvall()\r\n\t\t#print ('Received message:',data.decode())\r\n\t\tprint('Received message length : ', len(data))\r\n\t\twith open(filename, 'w') as f:\r\n\t\t\tf.write(data.decode())", "title": "" }, { "docid": "f21216504e31e26c929ae10abd3fe9ba", "score": "0.51059467", "text": "def save_library(self, filename):\r\n j_builds = []\r\n for bld in self._builds:\r\n j_builds.append(bld.serialize())\r\n\r\n with open(filename, 'w') as bch_fl:\r\n json.dump(j_builds, bch_fl)", "title": "" }, { "docid": "2164868680fa00c8a5d3cc788e7316da", "score": "0.509989", "text": "def write_in_file(self) :\n\n# Write the mesh and the flux\n np.savez(self.filename,x=self.x,y=self.y,\n flux_moments=self.flux_moments,p1sa_flxm=self.p1sa_flxm,\n mip_flxm=self.mip_flxm)\n# Write the flux_moments in a compressed files\n np.savetxt(self.filename+'.gz',self.flux_moments)", "title": "" }, { "docid": "db0e82e0a3406364ad44495a267a18ab", "score": "0.5097044", "text": "def process_map(file_in, pretty = False):\n \n file_out = \"{0}.json\".format(file_in)\n data = []\n with codecs.open(file_out, \"w\", encoding='utf-8') as fo:\n for _, element in ET.iterparse(file_in):\n el = shape_element(element)\n el = additional_cleaning(el)\n if el: \n data.append(el)\n if pretty:\n fo.write(json.dumps(el, indent=2, ensure_ascii=False)+\"\\n\")\n else:\n fo.write(json.dumps(el, ensure_ascii=False) + \"\\n\")\n return data", "title": "" }, { "docid": "e636269b9190c3d8085609cba4046865", "score": "0.50866544", "text": "def _save_json_cache(self, fname):\n import json\n cache = self._cache\n with _my_open(fname, 'w') as fd:\n json.dump([(k, cache[k].fileinfos) for k in cache],\n fd,\n indent=2,\n sort_keys=True)\n return", "title": "" }, { "docid": "46a7df8b6cce119cf24b12b8dccf8d2f", "score": "0.5086523", "text": "def write_geo_data_in_file(fout, geo_data):\n with open(fout, 'w') as fo:\n fo.write(json.dumps(geo_data, indent=4))", "title": "" }, { "docid": "0fbcc32dd2955f26fad6186fa6357741", "score": "0.5082315", "text": "def compress_file(file_name):\n\n with open(file_name, \"r\") as f:\n printout = f.read()\n printout = printout.encode(\"utf-8\")\n with gzip.open(file_name + \".gz\", \"wb\") as f:\n f.write(printout)", "title": "" }, { "docid": "551839de8c56d7a7c63ff9d2abaeaa2a", "score": "0.50814563", "text": "def saveGraph(self, file_name):\n with open(\"./data/\" + file_name, 'w') as outfile:\n json.dump(self.convertGraphToJSON(), outfile, indent=4, sort_keys=True)", "title": "" } ]
728861cd78198f1beb0a2d35a4cda8c4
Stream file to S3
[ { "docid": "a438e54a32e721581578a4a892c2ea3c", "score": "0.7667747", "text": "def stream_file_to_s3(upload_name, reader, is_certified=False):\n path, file_name = upload_name.rsplit('/', 1)\n logger.debug({\n 'message': 'Streaming file to S3',\n 'message_type': 'ValidatorDebug',\n 'file_name': file_name if file_name else path\n })\n\n if is_certified:\n handler = S3Handler.create_file_path(upload_name, CONFIG_BROKER[\"certified_bucket\"])\n else:\n handler = S3Handler.create_file_path(upload_name)\n\n with smart_open.smart_open(handler, 'w') as writer:\n while True:\n chunk = reader.read(CHUNK_SIZE)\n if chunk:\n writer.write(chunk)\n else:\n break", "title": "" } ]
[ { "docid": "b5c9b9311e5316eb730da379a6a9a78e", "score": "0.80118376", "text": "def memory_to_s3(bucket, object_path, file_stream):\n s3 = boto3.resource('s3')\n object = s3.Object(bucket, object_path)\n object.put(Body=file_stream)\n print('%s uploaded' % object_path)", "title": "" }, { "docid": "2e3a7cbd90f3cce392996769aaf5613d", "score": "0.72189814", "text": "def save_to_s3(obj, bucket_name, key_name):\n\n fb = BytesIO()\n joblib.dump(obj, fb)\n fb.seek(0)\n\n s3_client = boto3.client('s3')\n s3_client.upload_fileobj(fb, bucket_name, key_name)", "title": "" }, { "docid": "d59720843de137ad86ebfbd7eb004ed0", "score": "0.7077825", "text": "def put(self, filename, data):\n url = 'http://' + self.bucket + '.s3.amazonaws.com/' + filename\n return requests.put(url, data=data, auth=S3Auth(self.access_key, self.secret_key))", "title": "" }, { "docid": "ef07eceab8307c6bc5d79d10b7457d11", "score": "0.70468676", "text": "def _store_file_in_s3(request, filename: str) -> None:\n be_s3_bucket_name = settings.BULK_ENROLLMENT_TOOL_SETTINGS['bulk_enrollment_s3_bucket']\n\n # Stream content into S3 bucket.\n s3 = boto3.resource('s3')\n response = s3.Object(be_s3_bucket_name, filename).put(\n Body=request.FILES['bulkEnrollmentFile'].file.getvalue())\n\n logger.debug(\n f'Store bulk enrollment file in S3. File ID: {filename}', extra=response)\n\n return None", "title": "" }, { "docid": "8a904192c8082e99c6605e945f40ab4d", "score": "0.6991479", "text": "def put(sds_file):\n bucket = _get_bucket()\n new_object = bucket.Object(sds_file.s3_key)\n new_object.upload_file(sds_file.filepath,\n ExtraArgs={\"Metadata\": {\"checksum\": str(sds_file.checksum)}})", "title": "" }, { "docid": "e70222ada319eff89dce9c2bea541ec6", "score": "0.6985986", "text": "def upload_to_s3(bucket, file_name, value, mimetype):\n from boto.s3.key import Key\n AWS_HEADERS = {\n 'Cache-Control':'max-age=31556926,public'\n }\n k = Key(bucket)\n k.key = file_name\n k.content_type = mimetype\n k.set_contents_from_string(value, headers=AWS_HEADERS)\n k.set_acl('public-read')", "title": "" }, { "docid": "b96d2bcf9346e0249a816a8648742837", "score": "0.6947398", "text": "def upload_s3(file, key_name, content_type, bucket_name):\n # create connection\n conn = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)\n\n # upload the file after getting the right bucket\n bucket = conn.get_bucket(bucket_name)\n obj = S3Key(bucket)\n obj.name = key_name\n obj.content_type = content_type\n obj.set_contents_from_string(file.getvalue())\n obj.set_acl('public-read')\n\n # close stringio object\n file.close()\n\n return obj.generate_url(expires_in=0, query_auth=False)", "title": "" }, { "docid": "15b863f863621534342d9749f59fbc71", "score": "0.69276077", "text": "def upload_s3():\n try:\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(BUCKET_NAME)\n resp = bucket.upload_file(\n DATA_FILE, KEY, ExtraArgs={\n 'ACL': 'public-read'})\n except Exception as e:\n print(\"Error in uploading file to S3: \", e)", "title": "" }, { "docid": "52cb77866f1cd14bad88aa6a88c25aba", "score": "0.6913304", "text": "def amazon_s3(file_name):\n temp_file = tempfile.NamedTemporaryFile()\n s3 = boto3.client(\n \"s3\",\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n )\n s3.download_fileobj(\n settings.AWS_STORAGE_BUCKET_NAME, \"media/\" + file_name, temp_file\n )\n\n return temp_file", "title": "" }, { "docid": "081cc8b4f63f9ded94731e9c3c50b5b4", "score": "0.69026524", "text": "def stage_file_to_s3(\n s3,\n source_url,\n destination_bucket,\n destination_path):\n s3 = boto3.client('s3')\n file_name = os.path.basename(source_url)\n req = requests.get(source_url, stream=True)\n buffer = io.BytesIO()\n for chunk in req.iter_content(chunk_size=1024):\n if chunk:\n buffer.write(chunk)\n \n buffer.seek(0)\n s3.upload_fileobj(\n Fileobj=buffer,\n Bucket=destination_bucket,\n Key=destination_path)\n print(f'[ok] {file_name}')", "title": "" }, { "docid": "fe71d15c2ca89d2bd413b6d07b6d3964", "score": "0.6884302", "text": "def write_bytes_to_s3(bytes_:BytesIO, s3_uri:str):\n bytes_.seek(0)\n # Do it\n parsed_s3 = urlparse(s3_uri)\n s3.meta.client.upload_fileobj(Fileobj=bytes_, \n Bucket=parsed_s3.netloc, \n Key=parsed_s3.path[1:])", "title": "" }, { "docid": "0db5ed6c0a2a4f16f955a8248d3df74e", "score": "0.6864006", "text": "def save_to_s3(self, directory: str, bucket: str, s3=None) -> None:", "title": "" }, { "docid": "c45de5c659746ad6582710f6b6352db0", "score": "0.6863431", "text": "def put_object_s3(bucket, filename, filedata, content_type):\n bucket.put_object(\n Body=filedata,\n Key=filename,\n ContentType=content_type,\n ACL='public-read'\n )", "title": "" }, { "docid": "e035038102130f4515c8076a27718119", "score": "0.68391335", "text": "def copy_s3(self) -> None:\n bucket = self.base[\"s3-bucket\"]\n client = boto3.client(\n \"s3\",\n aws_access_key_id=os.getenv(\"S3_KEY\"),\n aws_secret_access_key=os.getenv(\"S3_SECRET\"),\n endpoint_url=os.getenv(\"S3_URL\"),\n )\n files = client.list_objects(Bucket=bucket)\n\n for path in files[\"Contents\"]:\n f = path[\"Key\"]\n makedirs(join(*f.split(\"/\")[:-1]), exist_ok=True)\n client.download_file(bucket, f, f)", "title": "" }, { "docid": "72c02e46e4d9ead52a6035493af6bd83", "score": "0.6760381", "text": "def store_in_s3(self):\n\n zip_file = '{}.zip'.format(self.tenant_name)\n\n s3 = resource(\n 's3',\n aws_access_key_id=environ.get('AWS_ACCESS_KEY_ID'),\n aws_secret_access_key=environ.get('AWS_SECRET_ACCESS_KEY')\n )\n\n if (s3.Bucket(environ.get('S3_BUCKET')) not in s3.buckets.all()):\n bucket = s3.create_bucket(Bucket=environ.get('S3_BUCKET'))\n else:\n bucket = s3.Bucket(environ.get('S3_BUCKET'))\n\n file_path = getcwd() + '/' + zip_file\n s3.Object(environ.get('S3_BUCKET'), zip_file).put(Body=open(file_path, 'rb'), ContentType='application/zip')\n\n s3_client = s3.meta.client\n\n url = s3_client.generate_presigned_url(\n ClientMethod='get_object',\n Params={\n 'Bucket': environ.get('S3_BUCKET'),\n 'Key': zip_file\n }, ExpiresIn=FILE_EXPIRATION_TIME)\n\n return url", "title": "" }, { "docid": "461cb176ab1f68272a9c3c72a08dd22b", "score": "0.6728785", "text": "def s3_resource(session):\n #print type(session)\n s3 = session.resource('s3')", "title": "" }, { "docid": "e954f151d829c6d31c5bf5ba25be11ab", "score": "0.67179686", "text": "def uploadToS3(self, filePath):\n with open(filePath, 'rb') as data:\n fileName = os.path.basename(filePath)\n self.S3Bucket.Object(fileName).put(Body=data, ACL='public-read')\n return self.getPublicLink(fileName)", "title": "" }, { "docid": "58fbb080124a1cfb0a50475597555765", "score": "0.6694606", "text": "def upload(file_: BinaryIO, path: str) -> str:\n # IMPORTANT: move the cursor to the begin of the file\n file_.seek(0, 0)\n\n s3 = _get_s3_client()\n s3.upload_fileobj(file_, AWS_S3_BUCKET_NAME, path)\n\n return os.path.join(AWS_S3_URL, path)", "title": "" }, { "docid": "7e425fd977004b69bbc0d7f0a0b8f02f", "score": "0.6681896", "text": "def _s3put(self, s3url, payload):\n rsp = requests.put(s3url, data=payload, headers={'Content-Type': 'application/octet-stream'})\n if rsp.status_code == 200:\n logging.info(f\"s3 upload success\")\n else:\n logging.warning(f\"s3 upload fail: {rsp.status_code}\")", "title": "" }, { "docid": "b409e90287ff649c6079effd6b81fd61", "score": "0.66430396", "text": "def s3_get(url: str, temp_file: IO) ->None:\n s3_resource = boto3.resource('s3')\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "title": "" }, { "docid": "e493d331730748f881a5829aad563885", "score": "0.6633998", "text": "def store_file_aws(self, complete_file_path):\n # Store the file in AWS S3\n data = open(complete_file_path, 'rb')\n folder_name = date.today()\n\n s3 = boto3.resource(\n 's3',\n aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_access_secret_key,\n config=ConfigAWS(signature_version='s3v4')\n )\n try:\n s3.Bucket(aws_bucket_name).upload_file(complete_file_path, '%s/%s' % (folder_name, self.filename))\n s3Link = \"s3://\" + aws_bucket_name + \"/\" + str(folder_name) + \"/\" + self.filename\n return s3Link\n except FileNotFoundError:\n print(\"Error: The file was not found\")\n return \"\"\n except NoCredentialsError:\n print(\"Error: Credentials not available\")\n return \"\"", "title": "" }, { "docid": "73dcad126d1861f8a440c486feae77f3", "score": "0.66317457", "text": "def put_data_file_over_s3(_file):\n s3 = boto3.resource('s3')\n today = datetime.date.today()\n data_url_path = \"channelstats/rsyslog/twitter/public_data_of_page\" \\\n \"/%s/%s/%s/\" % (today.year, today.strftime('%m'), today.strftime('%d'))\n\n s3.Bucket('lambda-inin').upload_file(_file,\n data_url_path + _file.split(\"/\")[-1],\n ExtraArgs={'ContentType': 'text'})", "title": "" }, { "docid": "288942454ff5895a8a8638d26279a188", "score": "0.66154116", "text": "def s3_upload_data(s3_path, body):\n s3_bucket.put_object(Key=s3_path, Body=body)", "title": "" }, { "docid": "908aa6d8627857dfebff383b36c8a2f1", "score": "0.66130745", "text": "def update_content_file(s3_bucket, s3_key):\n content = json.loads(S3.get_object(Bucket=s3_bucket, Key=CONTENT_FILE_KEY)['Body'].read().decode('utf-8'))\n content.insert(0, {\"filename\": s3_key, \"thumbnail\": THUMBNAIL_PREFIX + s3_key})\n S3.put_object(Body=json.dumps(content), Bucket=s3_bucket, Key=CONTENT_FILE_KEY, ContentType=\"application/json\")", "title": "" }, { "docid": "e88e0912f766f22732bb30aab6fda623", "score": "0.66050625", "text": "def save_to_s3(filename, contents):\n s3 = boto3.resource('s3')\n obj = s3.Object(BUCKET_NAME, S3_PATH.format(filename))\n obj.put(Body=contents)", "title": "" }, { "docid": "8da5a296e356abefb7a18b35150477aa", "score": "0.6586423", "text": "def import_data_to_S3(self):\n\n subprocess.run([\"wget\", self.src_link])\n zipfile = \"log{Date}.zip\".format(Date = self.date)\n csvfile = \"log{Date}.csv\".format(Date = self.date)\n subprocess.run([\"unzip\", zipfile])\n subprocess.run([\"rm\", zipfile])\n subprocess.run([\"rm\", \"README.txt\"])\n subprocess.run([\"gzip\", csvfile])\n gzfile = \"log{Date}.csv.gz\".format(Date = self.date)\n subprocess.run([\"aws\", \"s3\", \"cp\", gzfile, \"s3://{Bucket}/\".format(Bucket = self.bucket)])\n subprocess.run([\"rm\", gzfile])", "title": "" }, { "docid": "66d9ad096e2f1da5f4a6a89b02c0d781", "score": "0.6585686", "text": "def upload_s3_file(filename: str):\n\n client = boto3.client(\"s3\")\n client.upload_file(filename, \"finm33150\", filename)", "title": "" }, { "docid": "16db60ca48c94c8505a83c1b997aca00", "score": "0.65854406", "text": "def get_image_stream_from_s3(path):\n s3client = boto3.client(\n 's3'\n )\n bucketname = \"solarnet-data\"\n file_to_read = path\n fileobj = s3client.get_object(\n Bucket=bucketname,\n Key=file_to_read\n )\n # img = load_img(io.BytesIO(fileobj['Body'].read()))\n return fileobj['Body'].read()", "title": "" }, { "docid": "93bc370b5a3e6de1f7b455904a4b664f", "score": "0.6584281", "text": "def download_s3_object(bucket, key, destination_path):\n s3_client = boto3.client('s3')\n s3_client.download_file(bucket, key, destination_path)\n return", "title": "" }, { "docid": "150de2623653e82530eb5386d345cbff", "score": "0.657318", "text": "def upload_file_aws(self, local_path, s3_path):\n\n try:\n\n s3_resource = boto3.resource(\n \"s3\",\n region_name=self.aws_cred[\"AWS_REGION\"],\n aws_access_key_id=self.aws_cred[\"ACCESS_KEY_ID\"],\n aws_secret_access_key=self.aws_cred[\"SECRET_ACCESS_KEY\"],\n )\n\n s3_resource.Bucket(self.aws_cred[\"BUCKET_NAME\"]).put_object(\n Key=s3_path, Body=open(local_path, \"rb\")\n )\n\n except ClientError as e:\n logging.error(e)\n return False\n return True", "title": "" }, { "docid": "b1a21e28b063157ab75fdcd60f62e0c2", "score": "0.6562699", "text": "def make_upload_s3(bucket_name, aws_access_key_id, aws_secret_access_key):\r\n import boto\r\n from boto.s3.key import Key\r\n def upload(arcpath):\r\n log.info('S3: Uploading %s to bucket %s.', arcpath, bucket_name)\r\n # connect to the bucket\r\n conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)\r\n bucket = conn.get_bucket(bucket_name)\r\n # create a key to keep track of our file in the storage\r\n k = Key(bucket)\r\n k.key = os.path.basename(arcpath)\r\n k.set_contents_from_filename(arcpath)\r\n log.info('S3: Upload finished.')\r\n return upload", "title": "" }, { "docid": "c4c104bec83137e655a9a46255ba9a7c", "score": "0.6540125", "text": "def s3_upload_file(file):\n S3_CLIENT.upload_file(OUTPUT_DIR + file,\n OUTPUT_BUCKET_NAME, OUTPUT_DIR + file)", "title": "" }, { "docid": "85389d598160b76b1bb1e319c99a157b", "score": "0.65378976", "text": "def write_file(self, filehandle):\n filepath = os.path.join(self.S3_DIRECTORY, filehandle.name)\n self.bucket.put_file(filepath, filehandle)", "title": "" }, { "docid": "df0dc4818c4ac05ff0e50ab6cb9b553e", "score": "0.6507062", "text": "def upload2s3(file_name, object_name):\n filename = \"{}/{}\".format(app.config['UPLOAD_FOLDER'],file_name)\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(filename, app.config['BUCKET_NAME'], object_name)\n\n return response", "title": "" }, { "docid": "17d84b8add147a59aaa8fd2c9aa1b6d7", "score": "0.6505637", "text": "def s3_handle(fname):\n import boto\n\n class S3Handle:\n def __init__(self, key):\n self._key = key\n self._iter = self._line_iter()\n def _line_iter(self):\n \"\"\"From mrjob: https://github.com/Yelp/mrjob/blob/master/mrjob/util.py\n \"\"\"\n buf = \"\"\n search_offset = 0\n for chunk in self._chunk_iter():\n buf += chunk\n start = 0\n while True:\n end = buf.find(\"\\n\", start + search_offset) + 1\n if end: # if find() returned -1, end would be 0\n yield buf[start:end]\n start = end\n # reset the search offset\n search_offset = 0\n else:\n # this will happen eventually\n buf = buf[start:]\n # set search offset so we do not need to scan this part of the buffer again\n search_offset = len(buf)\n break\n if buf:\n yield buf + '\\n'\n def _chunk_iter(self):\n dec = zlib.decompressobj(16 | zlib.MAX_WBITS) if self._key.name.endswith(\".gz\") else None\n for chunk in self._key:\n if dec:\n chunk = dec.decompress(chunk)\n if chunk:\n yield chunk\n def __enter__(self):\n return self\n def __exit__(self, *args):\n self.close()\n def __iter__(self):\n return self\n def read(self, size):\n return self._key.read(size)\n def next(self):\n return self._iter.next()\n def close(self):\n self._key.close(fast=True)\n\n bucket, key = s3_bucket_key(fname)\n s3 = boto.connect_s3()\n s3b = s3.get_bucket(bucket)\n s3key = s3b.get_key(key)\n return S3Handle(s3key)", "title": "" }, { "docid": "9defd26d07ac3b32e7c89131e332b45e", "score": "0.64973027", "text": "def _download_file_from_s3(bucket, object_key):\r\n\r\n # Create an S3 client for downloading files from S3\r\n s3_region = config['aws']['region_name']\r\n s3_client = boto3.client('s3', region_name=s3_region)\r\n\r\n # Download the object and decode it from bytes to a string\r\n return s3_client.get_object(Bucket=bucket, Key=object_key)['Body'].read().decode('utf-8')", "title": "" }, { "docid": "c4d3fe759272d614b149c5bc6d4dbb6a", "score": "0.6488309", "text": "def upload_to_s3(aws_access_key_id, aws_secret_access_key, file, bucket, key, callback=None, md5=None, reduced_redundancy=False, content_type=None):\n try:\n size = os.fstat(file.fileno()).st_size\n except:\n # Not all file objects implement fileno(),\n # so we fall back on this\n file.seek(0, os.SEEK_END)\n size = file.tell()\n\n conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)\n bucket = conn.get_bucket(bucket, validate=True)\n k = Key(bucket)\n k.key = key\n if content_type:\n k.set_metadata('Content-Type', content_type)\n sent = k.set_contents_from_file(file, cb=callback, md5=md5, reduced_redundancy=reduced_redundancy, rewind=True)\n\n # Rewind for later use\n file.seek(0)\n\n if sent == size:\n return True\n return False", "title": "" }, { "docid": "d1a9234a881f9466d6d3e8d2145e39b8", "score": "0.64808047", "text": "def standard_s3_file_transfer(bucket, s3_key_name, transfer_file, file_size_MB, use_rr):\n\n new_s3_file = bucket.new_key(s3_key_name)\n if transfer_file.endswith('.README'):\n new_s3_file.content_type = 'text/plain'\n new_s3_file.set_contents_from_filename(\n transfer_file, reduced_redundancy=use_rr, cb=upload_cb, num_cb=10)\n new_s3_file.set_acl(\"public-read\")", "title": "" }, { "docid": "b6a20f16209d171a6c9ad0da97dce439", "score": "0.64766496", "text": "def store_in_s3(self, log_file_name, log_path):\n conn = boto.connect_s3(self.AWSAccessKeyId, self.AWSSecretKey)\n #conn = boto.connect_s3(self.access_key, self.secret_key)\n bucket = conn.get_bucket(self.bucket)\n k = Key(bucket)\n k.key = log_file_name\n k.set_contents_from_filename(log_path)", "title": "" }, { "docid": "fb49bd3a11011f343fd622c078a6d1d5", "score": "0.64765656", "text": "async def _publish(self, content, s3_object_key):\n\n async with self.session.create_client(\n 's3',\n aws_secret_access_key=self.bookstore_settings.s3_secret_access_key,\n aws_access_key_id=self.bookstore_settings.s3_access_key_id,\n endpoint_url=self.bookstore_settings.s3_endpoint_url,\n region_name=self.bookstore_settings.s3_region_name,\n ) as client:\n self.log.info(f\"Processing published write to {s3_object_key}\")\n try:\n obj = await client.put_object(\n Bucket=self.bookstore_settings.s3_bucket,\n Key=s3_object_key,\n Body=json.dumps(content),\n )\n except ClientError as e:\n status_code = e.response['ResponseMetadata'].get('HTTPStatusCode')\n raise web.HTTPError(status_code, e.args[0])\n self.log.info(f\"Done with published write to {s3_object_key}\")\n\n return obj", "title": "" }, { "docid": "f1b543edf976f73668f16e480db2dad5", "score": "0.647195", "text": "def s3_upload_file(local_path, s3_path):\n s3_client.upload_file(local_path, S3_BUCKET_NAME, s3_path)", "title": "" }, { "docid": "cf53467b1b97cebc8da6e1539928db2b", "score": "0.64659226", "text": "def boto_s3_resource():\n return boto3.resource(\"s3\")", "title": "" }, { "docid": "5c7cf3de182dd2b5c32fb9c31f968629", "score": "0.6455249", "text": "def save_s3_product(s3obj, path):\n obj = s3obj\n s3path, filename = os.path.split(obj.key)\n outfile = os.path.join(path, filename)\n print(filename)\n with open(outfile, 'wb') as f:\n data = obj.get()['Body'].read()\n f.write(data)\n return outfile", "title": "" }, { "docid": "e0ebec07d90b728a234b9967810daa31", "score": "0.6440417", "text": "def uploadToS3(file_obj:Path, prefix:str='hr_bk_files')->None:\n\n from os.path import basename\n from datetime import datetime\n from pytz import timezone\n import boto3\n\n bucket = '' # Add your bucket here\n \n #s3_client = boto3.client('s3')\n s3_client = boto3.client('s3', aws_access_key_id='', aws_secret_access_key='')\n \n time = datetime.now(timezone('US/Eastern')).strftime('%Y%m%d')\n\n object_name = \"{prefix}/{year}/{month}/{day}/{name}\".format(prefix=prefix, year=time[0:4], month=time[4:6], day=time[6:], name=basename(file_obj))\n \n with open(file_obj, \"rb\") as f:\n s3_client.upload_fileobj(f, bucket, object_name)", "title": "" }, { "docid": "e04a3795bdcf0d4655ac9eadf4ee64a1", "score": "0.64345163", "text": "def upload_cache_file_to_s3(dataset_id, cache_file_as_dict):\n print(os.environ[\"S3_BUCKET_NAME\"])\n s3_object = s3.Object(os.environ[\"S3_BUCKET_NAME\"], f\"{dataset_id}_sample.json\")\n s3_object.put(Body=(bytes(json.dumps(cache_file_as_dict).encode(\"UTF-8\"))))\n update_cache_table(dataset_id)", "title": "" }, { "docid": "5a2b4cad0342e9ca9c32d6e02ac85fe1", "score": "0.6428141", "text": "def upload_file(file_name, bucket, object_name):\n s3_client = boto3.client('s3')\n response = s3_client.upload_file(file_name, bucket, object_name)\n\n return response", "title": "" }, { "docid": "7aab0c82735184cb836f9aefcf5e1ea5", "score": "0.6418069", "text": "def storage(s3_resource,\n bucket_name,\n file_name):\n\n _object = s3_resource.Object(\n bucket_name,\n file_name\n )\n\n _object.upload_file(\n file_name,\n ExtraArgs={\n 'ServerSideEncryption': 'AES256',\n 'StorageClass': 'STANDARD_IA'}\n )\n\n _object.reload()\n print(f\"\\t Reloading storage class: {_object.storage_class}\")", "title": "" }, { "docid": "cd9c5ff0e1c2a03a5bbb9a7296e6e9ec", "score": "0.64046115", "text": "def upload_to_s3(bucket, local_file, s3_file):\r\n s3 = boto3.client('s3', endpoint_url= host,aws_access_key_id=ACCESS_KEY,\r\n aws_secret_access_key=SECRET_KEY,config=Config(signature_version='s3v4'),\r\n region_name='US')\r\n print(' Uploading ' +local_file + ' as ' + bucket + '\\\\' +s3_file)\r\n try:\r\n s3.upload_file(local_file, bucket, s3_file)\r\n print(' '+s3_file + \": Upload Successful\")\r\n print(' ---------')\r\n return True\r\n except NoCredentialsError:\r\n print(\"Credentials not available\")\r\n return False", "title": "" }, { "docid": "ea4ad25ce348dc628b602923477d621a", "score": "0.64020735", "text": "def get(self, filename):\n url = 'http://' + self.bucket + '.s3.amazonaws.com/' + filename\n return requests.get(url, auth=S3Auth(self.access_key, self.secret_key))", "title": "" }, { "docid": "ab2d2cac4c8bfb7dba58f71c8f661412", "score": "0.63965064", "text": "def upload_file(file_name, bucket, content):\n # If S3 object_name was not specified, use file_name\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.resource('s3')\n response = s3_client.Object(bucket, object_name).put(Body=content)\n return response", "title": "" }, { "docid": "1dd53ff3b01e24ca82ff5dc6b84a8efe", "score": "0.6382369", "text": "def _cache_s3_call(self, file_name: str, objects: dict):\n with open(file_name, 'w') as outfile:\n json.dump(objects, outfile, default=json_serial, sort_keys=True, indent=2)", "title": "" }, { "docid": "b469ba87b704ce73087dd78c83dd592b", "score": "0.6373419", "text": "def upload_to_s3(file_):\n now_ = now()\n\n file_name = 'export-wins/{year}/{month}/{timestamp}.csv'.format(\n year=now_.year,\n month=now_.month,\n timestamp=now_.isoformat()\n )\n\n s3 = boto3.client(\n 's3',\n aws_access_key_id=settings.CSV_UPLOAD_AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.CSV_UPLOAD_AWS_SECRET_ACCESS_KEY,\n region_name=settings.CSV_AWS_REGION\n )\n s3.upload_fileobj(\n file_,\n settings.CSV_UPLOAD_AWS_BUCKET,\n file_name,\n ExtraArgs={'ServerSideEncryption': \"AES256\"}\n )\n return 's3://{bucket_name}/{key}'.format(\n bucket_name=settings.CSV_UPLOAD_AWS_BUCKET,\n key=file_name\n )", "title": "" }, { "docid": "fbc2f7cb0422464846b53e202525e17c", "score": "0.637075", "text": "def _upload(self):\n self.s3.upload_file(self.processed_path, S3Config.bucket, self.upload_path)", "title": "" }, { "docid": "033e7cfc96d19f1061a07f0b438ed54f", "score": "0.63704413", "text": "def _upload_file(filename):\n abort(500, \"Bucket not available\") if not s3_bucket else True\n response = s3_client.upload_file(\n Bucket = s3_bucket,\n Filename=filename,\n Key=filename\n )\n return response", "title": "" }, { "docid": "85e58d89fbd1e0a8c6efabf2d172f9f3", "score": "0.6362893", "text": "def load_to_bytes(s3_uri:str):\n parsed_s3 = urlparse(s3_uri)\n f = BytesIO()\n s3.meta.client.download_fileobj(parsed_s3.netloc, \n parsed_s3.path[1:], f)\n f.seek(0)\n return f", "title": "" }, { "docid": "5190fc25623aff968a01ca04e6747565", "score": "0.6360162", "text": "def file(layer, config, path=None):\n full_path = os.path.join(layer.basedir, path)\n s3bucket, s3key = _get_s3path(layer, config, full_path)\n s3 = S3(config)\n s3.cp(full_path, s3bucket, s3key)\n layer.logger.info(\"{} -> {}/{}\".format(full_path, s3bucket, s3key))\n if layer.type == \"sam\":\n return os.path.join(\"s3://\", s3bucket, s3key)\n else:\n return {'s3bucket': s3bucket, 's3key': s3key}", "title": "" }, { "docid": "aecede868cc61d9194df5764f15ff59e", "score": "0.63542867", "text": "def execute(self, context):\n S3DownloadAndUploadOperator.stage_file_to_s3(\n s3=self.s3,\n source_url=self.http_source_url,\n destination_bucket=self.s3_destination_bucket,\n destination_path=self.s3_destination_path)", "title": "" }, { "docid": "727f3d04a29892eb0a67289b676d9e04", "score": "0.6331013", "text": "def write_s3_file(filename, s3_data):\n this_key = '{}/{}.json'.format(AWS_S3_EVENT_PATH, filename)\n\n try:\n s3_client = boto3.client('s3')\n response = s3_client.put_object(\n Bucket=AWS_S3_EVENT_BUCKET,\n Body=json.dumps(s3_data),\n Key=this_key,\n ContentType='application/json'\n )\n except Exception as exc:\n print(exc)\n return\n\n return False if response['ResponseMetadata']['HTTPStatusCode'] != 200 else True", "title": "" }, { "docid": "db168b380329487ac9e11fcea5952ef2", "score": "0.6322121", "text": "def _s3get(self, s3url):\n with requests.get(s3url, stream=True) as rsp:\n if rsp.status_code == 200:\n getfile = tempfile.TemporaryFile()\n shutil.copyfileobj(rsp.raw, getfile)\n logging.info(f\"s3 download success\")\n getfile.seek(0)\n return getfile\n else:\n logging.warning(f\"s3 download fail: {rsp.status_code}\")\n return None", "title": "" }, { "docid": "4a1a407e1600f75a8cb5da82ba3c5605", "score": "0.63045406", "text": "def download_s3_file(filename: str):\n\n client = boto3.client(\"s3\", config=Config(signature_version=UNSIGNED))\n client.download_file(\"finm33150\", filename, filename)", "title": "" }, { "docid": "63bef8d40086d528d068302c14932e8f", "score": "0.6303911", "text": "def process_file(s3_bucket, s3_key):\n print(\"processing: s3://\" + s3_bucket + \"/\" + s3_key)\n base_id = hashlib.sha1(s3_key.encode('utf-8')).hexdigest()\n with tempfile.TemporaryDirectory() as tmpdir:\n srcFile = os.path.join(tmpdir, \"elb_log.txt\")\n s3.Bucket(s3_bucket).download_file(s3_key, srcFile)\n recnum = 0;\n batch = []\n with open(srcFile, \"r\") as src:\n for s in src:\n recnum += 1\n batch += process_record(base_id, recnum, s)\n if recnum % batch_size == 0:\n do_upload(batch, recnum)\n batch = []\n do_upload(batch, recnum)", "title": "" }, { "docid": "9347dc4fb7da3242fdf8c8639c54d955", "score": "0.6303541", "text": "def upload(fname, file):\n try:\n if len(fname) > 0:\n print(\"Publishing %s\" % (PATH_BUCKET + fname + '/' + fname + '.csv'))\n\n get_s3(SESSION).Object(BUCKET, PATH_BUCKET + fname + '/' + fname + '.csv').put(Body=file)\n except Exception as err:\n # Print error and raise it to stop loop for upload\n print(err)\n raise Exception(err)", "title": "" }, { "docid": "2611a0550b188629599c7ca85e17ec80", "score": "0.6293063", "text": "def s3_bucket_download(remote_file, local_file):\r\n s3client.download_file(recommendation_bucket, remote_file, local_file)", "title": "" }, { "docid": "af0514f9525764db2291c623bd1486c3", "score": "0.62914336", "text": "def multi_part_upload_s3(file_path, bucket_name, s3_key_name=None, use_rr=True, make_public=True):\n\n # connect to s3\n s3_connection = boto.connect_s3(S3_ACCESS_KEY, S3_SECRET_KEY)\n s3_bucket = s3_connection.get_bucket(bucket_name)\n if s3_key_name is None:\n s3_key_name = os.path.basename(file_path)\n\n # get file size\n file_size_MB = os.path.getsize(file_path) / 1e6\n if file_size_MB < 5000:\n standard_s3_file_transfer(\n s3_bucket, s3_key_name, file_path, file_size_MB, use_rr)\n\n else:\n multipart_s3_file_transfer(\n s3_bucket, s3_key_name, file_path, file_size_MB, use_rr)\n\n s3_key = s3_bucket.get_key(s3_key_name)\n if make_public:\n s3_key.set_acl(\"public-read\")", "title": "" }, { "docid": "adf084f5a8f21c00d8367c112201d7be", "score": "0.6280152", "text": "def pandas_to_s3(df, client, bucket, key):\n # Write DF to string stream\n csv_buffer = StringIO()\n df.to_csv(csv_buffer, index=False)\n\n # Reset stream position\n csv_buffer.seek(0)\n # Create binary stream\n gz_buffer = BytesIO()\n\n # Compress string stream using gzip\n with gzip.GzipFile(mode='w', fileobj=gz_buffer) as gz_file:\n gz_file.write(bytes(csv_buffer.getvalue(), 'utf-8'))\n\n # Write stream to S3\n response = client.put_object(Bucket=bucket, Key=key, Body=gz_buffer.getvalue())\n\n return response", "title": "" }, { "docid": "7b231089f0992379dd8f5fb7a2baee74", "score": "0.62800795", "text": "def upload_file_s3(s3_client, file, bucket):\n try:\n response = s3_client.upload_file(file, bucket, file)\n logging.info(f\"File {file} was uploaded in {bucket}.\")\n except ClientError as e:\n logging.error(f\"Failed in upload the file {file} - {e}\")", "title": "" }, { "docid": "2b729b5e7c381ec62e2cefaa8126a388", "score": "0.62788904", "text": "def download_file(s3_resource,\n bucket_name,\n file_name\n ):\n s3_resource.Object(bucket_name,\n file_name\n ).download_file(file_name)\n print(f\"\\tDownloaded File: {file_name} from bucket {bucket_name}\")", "title": "" }, { "docid": "9e2212ab822ab1f7a2835a0c7469d91d", "score": "0.62765026", "text": "def upload_file_with_signed_url(\n self,\n file: IOBase,\n signed_write_url: str,\n headers: Optional[Dict] = None,\n session: Optional[requests.Session] = None,\n ) -> Response:\n\n # check to see if server side encryption for S3 is desired\n # see https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingServerSideEncryption.html\n # see https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html\n lightly_s3_sse_kms_key = os.environ.get(LIGHTLY_S3_SSE_KMS_KEY, \"\").strip()\n # Only set s3 related headers when we are talking with s3\n if (\n utils.get_signed_url_destination(signed_write_url) == DatasourceType.S3\n and lightly_s3_sse_kms_key\n ):\n if headers is None:\n headers = {}\n # don't override previously set SSE\n if \"x-amz-server-side-encryption\" not in headers:\n if lightly_s3_sse_kms_key.lower() == \"true\":\n # enable SSE with the key of amazon\n headers[\"x-amz-server-side-encryption\"] = \"AES256\"\n else:\n # enable SSE with specific customer KMS key\n headers[\"x-amz-server-side-encryption\"] = \"aws:kms\"\n headers[\n \"x-amz-server-side-encryption-aws-kms-key-id\"\n ] = lightly_s3_sse_kms_key\n\n # start requests session and make put request\n sess = session or requests\n if headers is not None:\n response = sess.put(signed_write_url, data=file, headers=headers)\n else:\n response = sess.put(signed_write_url, data=file)\n response.raise_for_status()\n return response", "title": "" }, { "docid": "567271258694ef7d8daf90ecdeb95f5f", "score": "0.627581", "text": "def upload_file_to_s3(file, presigned_urls, max_chunk_size):\n echo(\n style(\n \"Uploading the file...\",\n fg=\"green\",\n bold=False,\n )\n )\n\n try:\n parts = []\n index = 0\n file_size = Path(file.name).stat().st_size\n for chunk_size in tqdm(range(0, file_size, max_chunk_size)):\n presigned_url_object = presigned_urls[index]\n part = presigned_url_object[\"partNumber\"]\n url = presigned_url_object[\"url\"]\n file_data = file.read(max_chunk_size)\n response = requests.put(url, data=file_data)\n if response.status_code != HTTPStatus.OK:\n response.raise_for_status()\n\n etag = response.headers[\"ETag\"]\n parts.append({\"ETag\": etag, \"PartNumber\": part})\n index += 1\n\n response = {\"success\": True, \"parts\": parts}\n except Exception as err:\n echo(style(\"\\nThere was an error while uploading the file: {}\".format(err), fg=\"red\", bold=True))\n response = {\n \"success\": False,\n \"parts\": []\n }\n return response", "title": "" }, { "docid": "d82211bc9d3f1c272c5a168e4200350c", "score": "0.6268786", "text": "def s3_fetch(url: str,\n s3: MaybeS3 = None,\n range: Optional[ByteRange] = None, # pylint: disable=redefined-builtin\n **kwargs) -> bytes:\n return s3_open(url, s3=s3, range=range, **kwargs).read()", "title": "" }, { "docid": "8c6d54e0ec063da25c7b52daa6b1e795", "score": "0.6260389", "text": "def download_file_from_s3(src: str, dest: str, content_type: Optional[str] = None) -> None:\n try:\n # Modules to talk to S3 might not always be available so we import things here.\n from toil.lib.aws.utils import get_object_for_url\n except ImportError:\n raise RuntimeError(\"Cannot access S3 as AWS modules are not available\")\n\n with open(dest, 'wb') as out_stream:\n obj = get_object_for_url(urlparse(src), existing=True)\n obj.download_fileobj(out_stream)", "title": "" }, { "docid": "645711671674e2cebbae521c9334b81b", "score": "0.6258981", "text": "def get_read_stream(self, uid):\n key = self.prep_key(uid)\n try:\n obj = self.my_s3.Object(self.bucket_name, key)\n raw = obj.get()['Body']\n except botocore.exceptions.ClientError as problem:\n logging.info('Got error in s3 operation: %s', str(problem))\n response = getattr(problem, 'response', None)\n if response is None:\n raise\n code = response.get('Error', {}).get('Code', None)\n if code is None:\n raise\n if code == 'NoSuchKey':\n raise KeyError(key)\n raise\n\n return raw", "title": "" }, { "docid": "f7dfeeaca54d2101bbb4bf205c1d1464", "score": "0.6254984", "text": "def store_on_s3(data, bucket, prefix):\n # Pickle data\n data = pickle.dumps(data)\n\n # s3 setup\n s3 = boto3.resource(\"s3\")\n # timestamp = str(time.time()).replace('.', '')\n filename = f\"{prefix}.pickle\"\n obj = s3.Object(bucket, filename)\n obj.put(Body=data)\n\n return filename", "title": "" }, { "docid": "f8a578f93bfb6d7692a3272b91a50395", "score": "0.62549686", "text": "def save_to_s3(dat, path, bucket, **kwargs):\n tmp_file = mk_tmp_file()\n\n ## Save to tmp file\n try:\n dat.to_csv(\n path_or_buf=tmp_file,\n sep='|',\n header=False,\n index=False,\n compression='gzip',\n **kwargs\n )\n except Exception as e:\n logging.exception('Failuring saving output to tmp file')\n raise e\n\n ## Copy to S3\n try:\n s3_resource = boto3.resource('s3')\n s3_object = s3_resource.Object(bucket.replace('s3://', ''), path)\n s3_object.upload_file(tmp_file)\n except Exception as e:\n logging.exception('Failure copying data to S3 bucket')\n raise e\n finally:\n os.remove(tmp_file)", "title": "" }, { "docid": "fab9e9f31372d227811b2876bf9b9d51", "score": "0.6254699", "text": "def upload_report_s3(path, bucketName, filename):\r\n s3 = boto3.resource('s3')\r\n try:\r\n s3.meta.client.upload_file(path, bucketName, filename)\r\n except ClientError as e:\r\n print(e)", "title": "" }, { "docid": "34d60f95e3b95c80cc9c9c03a4ffbb49", "score": "0.62456214", "text": "def upload_to_s3(aws_access_key_id, aws_secret_access_key, file, bucket, key, sub_directory, callback=None, md5=None, reduced_redundancy=False, content_type=None):\n try:\n size = os.fstat(file.fileno()).st_size\n except:\n # Not all file objects implement fileno(),\n # so we fall back on this\n file.seek(0, os.SEEK_END)\n size = file.tell()\n\n conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)\n bucket = conn.get_bucket(bucket, validate=True)\n k = Key(bucket)\n if sub_directory != \"\":\n k.key = os.path.join(sub_directory, key)\n else:\n k.key = key\n if content_type:\n k.set_metadata('Content-Type', content_type)\n sent = k.set_contents_from_file(file, cb=callback, md5=md5, reduced_redundancy=reduced_redundancy, rewind=True)\n\n # Rewind for later use\n file.seek(0)\n\n if sent == size:\n return True\n return False", "title": "" }, { "docid": "c74548be29a83b41113a879f6b822fad", "score": "0.62258816", "text": "def download_file(bucket_name, path, target, sagemaker_session):\n path = path.lstrip('/')\n boto_session = sagemaker_session.boto_session\n\n s3 = boto_session.resource('s3')\n bucket = s3.Bucket(bucket_name)\n bucket.download_file(path, target)", "title": "" }, { "docid": "d69381000dcb4727f83a23f42a2f015e", "score": "0.62181526", "text": "def upload_s3(filename, data, bucket_name=None):\n s3_instance = aws_init()\n if bucket_name is None:\n bucket_name = settings.AWS_DATA_BUCKET\n bucket = s3_instance.Bucket(bucket_name)\n if isinstance(data, pd.DataFrame):\n #If the data passed is a pandas dataframe\n #data['lastUpdateDate'] = datetime.datetime.now().date()\n csv_buffer = StringIO()\n data.to_csv(csv_buffer, encoding='utf-8-sig', index=False)\n filedata = csv_buffer.getvalue()\n content_type = 'text/csv'\n put_object_s3(bucket, filename, filedata, content_type)\n else:\n content_type = 'text/html'\n filedata = data\n put_object_s3(bucket, filename, filedata, content_type)\n\n report_url = f\"https://{bucket_name}.s3.{settings.AWS_REGION}.amazonaws.com/{filename}\"\n\n return report_url", "title": "" }, { "docid": "735f233492a61cc06239dd6851c3f7cb", "score": "0.6214688", "text": "def s3_share( self, s3_bucket_file ):\n os.system( \"s3cmd -P setacl %s\" % s3_bucket_file )", "title": "" }, { "docid": "2b71975ef8dbbc1d895e2ebb7804c1b5", "score": "0.6207866", "text": "def upload_to_s3():\n timestamp = strftime(\"%Y-%m-%d\", gmtime())\n bucket = S3_BUCKET_ROOT + '-backup-' + timestamp\n local('s3cmd mb %s' % bucket)\n for localfile in env.backupfiles:\n local('s3cmd put %s %s' % (localfile, bucket))\n env.backupfiles = []", "title": "" }, { "docid": "016a5c4a6d699ea17892b3b7af9f8e2b", "score": "0.6201882", "text": "def upload_file(file_name, bucket, object_name):\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.generate_presigned_url(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "title": "" }, { "docid": "1dd8d3d3b34c5b9cda0f501f4d09269e", "score": "0.6195654", "text": "def s3_read(s3_path):\n for line in open(s3_path, 'rb', encoding='utf-8'):\n print(line.decode('utf8'))", "title": "" }, { "docid": "f84c6bc7fd6446c793859c469f15873a", "score": "0.61955583", "text": "async def download_stream(\n self, bucket: str, object_name: str, *,\n headers: Optional[Dict[str, Any]] = None,\n timeout: int = DEFAULT_TIMEOUT,\n session: Optional[Session] = None,\n ) -> StreamResponse:\n return await self._download_stream(\n bucket, object_name,\n headers=headers, timeout=timeout,\n params={'alt': 'media'},\n session=session,\n )", "title": "" }, { "docid": "8f31c39f45573d7f5cae25db32da6d0e", "score": "0.61923546", "text": "def s3_dump(data: Union[bytes, str, IO],\n url: str,\n s3: MaybeS3 = None,\n **kwargs):\n\n s3 = s3 or s3_client()\n bucket, key = s3_url_parse(url)\n\n r = s3.put_object(Bucket=bucket, # type: ignore[attr-defined]\n Key=key,\n Body=data,\n **kwargs)\n code = r['ResponseMetadata']['HTTPStatusCode']\n return 200 <= code < 300", "title": "" }, { "docid": "5965757f7c10d25953b41a274a06ef47", "score": "0.6189247", "text": "def s3_bucket_upload(local_file, remote_file):\r\n s3client.upload_file(local_file, recommendation_bucket, remote_file)", "title": "" }, { "docid": "6c3cf9f92e85b9ba7af09bbded0d4fee", "score": "0.6180995", "text": "def open_s3(self):\r\n conn = boto.connect_s3(\r\n self.AWS_ACCESS_KEY_ID,\r\n self.AWS_SECRET_ACCESS_KEY,\r\n **self.get_s3connection_kwargs())\r\n try:\r\n bucket = conn.get_bucket(self.AWS_BUCKET_NAME)\r\n except boto.exception.S3ResponseError:\r\n bucket = conn.create_bucket(self.AWS_BUCKET_NAME)\r\n return bucket, boto.s3.key.Key(bucket)", "title": "" }, { "docid": "923f087e13cf423620fd7ea46650904d", "score": "0.6180162", "text": "def download_s3_file(bucket, key):\n filename = os.path.join(\"/tmp\", key)\n S3.download_file(Bucket=bucket, Key=key, Filename=filename)\n return filename", "title": "" }, { "docid": "3f258c09dde22df6cae62c495d23c616", "score": "0.6177846", "text": "def s3_dump(data, url, s3=None, **kwargs):\n\n s3 = s3 or make_s3_client()\n bucket, key = s3_url_parse(url)\n\n r = s3.put_object(Bucket=bucket,\n Key=key,\n Body=data,\n **kwargs)\n code = r['ResponseMetadata']['HTTPStatusCode']\n return 200 <= code < 300", "title": "" }, { "docid": "73195555fc605fd720527bca19b35449", "score": "0.61642385", "text": "def put_object_pickled_gzip(self, s3_bucket: str, s3_key: str, obj: Any) -> bool:\n assert isinstance(s3_bucket, str)\n assert isinstance(s3_key, str)\n assert object is not None\n\n # Convert Object to Pickled GZIP File\n gz_buffer = io.BytesIO()\n with gzip.GzipFile(mode='w', fileobj=gz_buffer) as gz_file:\n pickle.dump(obj, gz_file) # Saves 50% of space!\n\n response = self._client.put_object(\n Bucket=s3_bucket, Body=gz_buffer.getvalue(), Key=s3_key)\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200", "title": "" }, { "docid": "df91239d525ab3b755bd87ca1586d27e", "score": "0.6161259", "text": "def download_object(self, s3_dict):\n\n bucket_name = s3_dict['bucket']['name']\n object_key = s3_dict['object']['key']\n object_path, object_name = os.path.split(object_key)\n\n download_path = os.path.join(self.target_dir, object_path)\n if self.flatten:\n download_path = self.target_dir\n\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n if s3_dict['object']['size']:\n full_download = os.path.join(download_path, object_name)\n\n logging.info(\"Downloading '%s' from bucket '%s' to '%s'...\" %\n (object_key, bucket_name, full_download))\n\n start_time = time.time()\n self.s3.download_file(bucket_name, object_key, full_download)\n elapsed_time = time.time() - start_time\n file_size = os.path.getsize(full_download)\n logging.info('Downloading complete. %s MiBs downloaded in %s seconds at %s MiB/s' %\n (file_size / MiB, elapsed_time, file_size / MiB / elapsed_time))\n else:\n logging.warning('Skipping directory or 0 byte file: %s' % object_key)", "title": "" }, { "docid": "49e249ed055b984bb53e7340c938da70", "score": "0.61553293", "text": "def upload_file_to_bucket(json_path = 'json', filename_local, filename_s3, bucket_name):\n # Read in the contents of the json file from the local filesystem\n data = []\n with open(json_path+'\\'+filename_local) as json_data:\n data = json.load(json_data)\n\n # Write the contents to a json file in the S3 bucket\n s3 = boto3.resource('s3')\n obj = s3.Object(bucket_name,filename_s3)\n obj.put(Body=json.dumps(data))", "title": "" }, { "docid": "26bf5382f91596800a6350550be94aa1", "score": "0.61536217", "text": "def upload_to_s3(local_path, s3_path, bucket=\"betfair-exchange-qemtek\"):\n # Upload the file\n try:\n s3_client.upload_file(local_path, bucket, s3_path)\n except ClientError as e:\n logging.error(e)\n print(e)", "title": "" }, { "docid": "3bf7e6e949176c050731742222440e66", "score": "0.6139956", "text": "def download_from_s3(local_path, s3_path, bucket=\"betfair-exchange-qemtek\"):\n try:\n s3_client.download_file(Bucket=bucket, Key=s3_path, Filename=local_path)\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise", "title": "" }, { "docid": "cf664d72947e2b497101864240dde27a", "score": "0.6139688", "text": "def upload():\n s3_client = boto3.client(\"s3\")\n\n # Generate a random S3 key name\n audio_file_uuid = uuid.uuid4().hex\n\n fields = None\n conditions = None\n print(\"**** %s.mp3\"%(audio_file_uuid))\n try:\n\n # Generate the presigned URL for put requests\n # presigned_url = s3_client.generate_presigned_url(ClientMethod='put_object',\n # Params={\"Bucket\": S3_BUCKET_NAME, \"Key\": upload_key}, ExpiresIn=EXPIRATION)\n presigned_url = s3_client.generate_presigned_post(S3_BUCKET_NAME,\n \"%s.mp3\"%(audio_file_uuid),\n Fields=fields,\n Conditions=conditions,\n ExpiresIn=EXPIRATION)\n except ClientError as e:\n print(\"[-] Error: %s\"%(e))\n return Response(status_code=400,\n headers={'Content-Type': 'application/json'},\n body={'status': 'error',\n 'response': \"Error generating pre-signed S3 URL\"})\n\n return Response(status_code=200,\n headers={'Content-Type': 'application/json'},\n body={'status': 'success',\n 'response': presigned_url})", "title": "" }, { "docid": "25472af51148b13b777759c1247a88b1", "score": "0.61377054", "text": "def upload_to_s3(bucket, artefact, bucket_key):\n try:\n client = boto3.client('s3')\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n try:\n client.put_object(\n Body=open(artefact, 'rb'),\n Bucket=bucket,\n Key=bucket_key\n )\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n return True", "title": "" }, { "docid": "b3766a565551be9554e0f26a1eac0395", "score": "0.61372674", "text": "def upload_file_to_bucket(s3_client, file_obj, bucket, folder, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n \n rec = (\"course_video\"+str(gid))\n object_name = rec\n\n # Upload the file\n try:\n # with open(\"files\", \"rb\") as f:\n upload = s3_client.upload_fileobj(file_obj, bucket, f\"{folder}/{object_name}\")\n print(upload)\n #return upload\n except ClientError as e:\n logging.error(e)\n return False\n return True", "title": "" }, { "docid": "0c4871033376b15c8ed3d160be0cb61a", "score": "0.61370075", "text": "def upload_file(cls, file_path, bucket_name=os.environ.get('AWS_STORAGE_BUCKET_NAME'), filename=None):\n try:\n s3 = client('s3', aws_secret_access_key=cls.__ACCESS_KEY,\n aws_access_key_id=cls.__ACCESS_ID)\n if not os.path.isfile(file_path):\n raise Exception('Invalid file path')\n\n filename = filename if filename else os.path.basename(file_path)\n s3.upload_file(file_path, bucket_name, filename, ExtraArgs={'ACL': 'public-read'})\n return '%s/%s/%s' % (cls.__S3_BASE_URL, bucket_name, filename)\n except Exception, e:\n raise e", "title": "" }, { "docid": "98ca4ec78c4e15c687beb1f0d7a9f31f", "score": "0.61182433", "text": "def open_s3_object(\n path: str,\n mode: str,\n version_id: Optional[str] = None,\n use_threads: Union[bool, int] = False,\n s3_additional_kwargs: Optional[Dict[str, str]] = None,\n s3_block_size: int = -1, # One shot download\n boto3_session: Optional[boto3.Session] = None,\n newline: Optional[str] = \"\\n\",\n encoding: Optional[str] = \"utf-8\",\n) -> Iterator[Union[_S3ObjectBase, io.TextIOWrapper]]:\n s3obj: Optional[_S3ObjectBase] = None\n text_s3obj: Optional[io.TextIOWrapper] = None\n try:\n s3obj = _S3ObjectBase(\n path=path,\n s3_block_size=s3_block_size,\n mode=mode,\n version_id=version_id,\n use_threads=use_threads,\n s3_additional_kwargs=s3_additional_kwargs,\n boto3_session=boto3_session,\n encoding=encoding,\n newline=newline,\n )\n if \"b\" in mode: # binary\n yield s3obj\n else: # text\n text_s3obj = io.TextIOWrapper(\n buffer=cast(BinaryIO, s3obj),\n encoding=encoding,\n newline=newline,\n line_buffering=False,\n write_through=False,\n )\n yield text_s3obj\n finally:\n if text_s3obj is not None and text_s3obj.closed is False:\n text_s3obj.close()\n if s3obj is not None and s3obj.closed is False:\n s3obj.close()", "title": "" }, { "docid": "5e0367107e54086f477fe322fcb41df5", "score": "0.6114526", "text": "def download_file(sds_file, dest_path):\n bucket = _get_bucket()\n bucket.Object(sds_file.s3_key).download_file(dest_path)", "title": "" }, { "docid": "edbeb3e89ff8389e7db48620a6376f2f", "score": "0.6111297", "text": "def download_s3(remote_fname, local_fname, bucket_name=\"alpenglowoptics\"):\n if not os.path.exists(local_fname):\n s3 = boto3.resource('s3')\n b = s3.Bucket(bucket_name)\n b.download_file(remote_fname, local_fname)", "title": "" } ]
e8a02ce5f26fa8cc2d306060dd37be47
sort the car models (values) and return the resulting cars dict
[ { "docid": "404e0b2973bc4b852495dda2a2ade319", "score": "0.8415727", "text": "def sort_car_models(cars=cars):\n sorted_cars = {}\n for brand, models in cars.items():\n models.sort()\n sorted_cars[brand] = models\n return sorted_cars", "title": "" } ]
[ { "docid": "c8ac09c6b760c42b3fc69469c5426c6b", "score": "0.6511038", "text": "def sorted_model(self):\n return sorted(self.model.items(), key=operator.itemgetter(1))", "title": "" }, { "docid": "b55d91bcb4c51b57eee168deb7e6743d", "score": "0.63506234", "text": "def sort(self):\n sorted_models = []\n concrete_models = set()\n models = list(self.data)\n while len(sorted_models) < len(models):\n found = False\n for model in models:\n if model in sorted_models:\n continue\n deps = self.dependencies.get(model._meta.concrete_model)\n if not (deps and deps.difference(concrete_models)):\n sorted_models.append(model)\n concrete_models.add(model._meta.concrete_model)\n found = True\n if not found:\n return\n self.data = {model: self.data[model] for model in sorted_models[::-1]}", "title": "" }, { "docid": "f58daa283911becf1e133aa23357c21f", "score": "0.60691667", "text": "def smart_sort_models(self, app):\n app_config = apps.get_app_config(app['app_label'])\n models = list(app_config.get_models())\n if all([hasattr(x, 'model_order') for x in models]):\n # inject ordering into dictionary\n for m in models:\n ml = next((x for x in app['models'] if x['object_name'] == m.__name__))\n ml['model_order'] = m.model_order\n app['models'] = sorted(app['models'], key=lambda x: x['model_order'])", "title": "" }, { "docid": "0ac73de11540470c79147f20977d401f", "score": "0.6003834", "text": "def sort(self):\n self.cartas.sort()", "title": "" }, { "docid": "7d25b502fc58b56966cd4bb86b6e3fe2", "score": "0.585631", "text": "def get_cars_data():\n cars = db.get_all_cars()\n cars_list = []\n for car in cars:\n cars_list.append(display_car(car))\n cars_dict = {i: cars_list[i] for i in range(len(cars_list))}\n return cars_dict", "title": "" }, { "docid": "c5056364d117b8b92710cbaecfc5e303", "score": "0.56478745", "text": "def get_all_matching_models(cars=cars, grep=\"trail\"):\n all_models = []\n for models in cars.values():\n all_models += models\n grep = grep.lower()\n matching = []\n for model in all_models:\n if grep in model.lower():\n matching.append(model)\n matching.sort()\n return matching", "title": "" }, { "docid": "e257ff3713d6e66f59d42a4947b82533", "score": "0.5615401", "text": "def get_car_pool(car_models_info):\n return [Car(info['file path'], info['main object name'], info['scaling factor'], info['camera_pos'])\n for info in car_models_info]", "title": "" }, { "docid": "8382c9805b9b09fe1e1e72d74eb4faca", "score": "0.5574958", "text": "def update_position_of_cars(self):\n self.cars = dict()\n for car_id in self.cars_info.keys():\n self.cars[car_id] = Car(car_id, self)", "title": "" }, { "docid": "2071fe6a983c1dfc4b744db837ea408d", "score": "0.55441517", "text": "def _group_models(self):\n od = {}\n\n # Add models to appropriate list. We only aggregate models which\n # completed successfully and have a valid AIC and BMD.\n for i, model in enumerate(self.models):\n output = getattr(model, \"output\", {})\n if output.get(\"AIC\") and output.get(\"BMD\") and output[\"BMD\"] > 0:\n key = \"{}-{}\".format(output[\"AIC\"], output[\"BMD\"])\n if key in od:\n od[key].append(model)\n else:\n od[key] = [model]\n else:\n od[i] = [model]\n\n # Sort each list by the number of parameters\n def _get_num_params(model):\n return (\n len(model.output[\"parameters\"])\n if hasattr(model, \"output\") and \"parameters\" in model.output\n else 0\n )\n\n for key, _models in od.items():\n _models.sort(key=_get_num_params)\n\n return list(od.values())", "title": "" }, { "docid": "b19e8ce42e67faad6d3219397f8cf91d", "score": "0.5500205", "text": "def sortCar(sim_time: int, car_list: List):\n for car in car_list:\n car.path_time = car.getLength()\n\n cars_sorted = sorted(car_list)\n cars_sorted = [c for c in cars_sorted if c.path_time <= sim_time]\n\n return cars_sorted", "title": "" }, { "docid": "7d51c3179a2aad7311c27cec21092d42", "score": "0.54927796", "text": "def all_cars(request):\n\n cars = Car.objects.filter(for_sale=True)\n query = None\n brand = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'brand':\n sortkey = 'brand__brand_name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n cars = cars.order_by(sortkey)\n\n if 'brand' in request.GET:\n brands = request.GET['brand'].split(\",\")\n cars = cars.filter(brand__brand_name__in=brands)\n brands = Brand.objects.filter(brand_name__in=brands)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria\")\n return redirect(reverse('cars'))\n\n queries = Q(make__icontains=query) | Q(model__icontains=query) \\\n | Q(year__icontains=query) \\\n | Q(transmission__icontains=query) \\\n | Q(fuel_type__icontains=query)\n\n cars = cars.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'cars': cars,\n 'search_term': query,\n 'current_brands': brand,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'cars/cars.html', context)", "title": "" }, { "docid": "74386c580cbfda0050be966172a51ad5", "score": "0.5482412", "text": "def sort(self):\n pass", "title": "" }, { "docid": "be93864afb3a12c4c28da1ec907952e5", "score": "0.54293966", "text": "def dataCollection():\n words = findWords(titles)\n dicts = sort(words)\n return dicts", "title": "" }, { "docid": "f5bed525b143f095de48ee715bd0f173", "score": "0.5394283", "text": "def get_queryset(self):\n\n carmake_pk = self.request.query_params.get('carmake_pk', None)\n carmodel_pk = self.request.query_params.get('carmodel_pk', None)\n cartrim_pk = self.request.query_params.get('cartrim_pk', None)\n\n print(carmake_pk, carmodel_pk, cartrim_pk)\n\n if cartrim_pk is not None and cartrim_pk != '':\n queryset = CarResult.objects.filter(cartrim_pk=cartrim_pk).order_by(\n '-price').order_by('miles').order_by('-year')\n elif carmodel_pk is not None and carmodel_pk != '':\n queryset = CarResult.objects.filter(carmodel_pk=carmodel_pk).order_by(\n '-price').order_by('miles').order_by('-year')\n elif carmake_pk is not None and carmake_pk != '':\n queryset = CarResult.objects.filter(carmake_pk=carmake_pk).order_by(\n '-price').order_by('miles').order_by('-year')\n else:\n queryset = CarResult.objects.all().order_by('year')\n return queryset", "title": "" }, { "docid": "c17f826fb51765d8d3fc96dd8be2b95e", "score": "0.5353031", "text": "def all_vehicles(request):\n\n vehicles = Vehicle.objects.all()\n query = None\n categories = None\n sort = None\n direction = None\n\n if request.GET:\n if 'sort' in request.GET:\n sortkey = request.GET['sort']\n sort = sortkey\n if sortkey == 'name':\n sortkey = 'lower_name'\n vehicles = vehicles.annotate(lower_name=Lower('name'))\n if sortkey == 'category':\n sortkey = 'category__name'\n if 'direction' in request.GET:\n direction = request.GET['direction']\n if direction == 'desc':\n sortkey = f'-{sortkey}'\n vehicles = vehicles.order_by(sortkey)\n\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n vehicles = vehicles.filter(category__name__in=categories)\n categories = Category.objects.filter(name__in=categories)\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria\")\n return redirect(reverse('vehicles'))\n\n queries = Q(name__icontains=query\n ) | Q(description__icontains=query)\n vehicles = vehicles.filter(queries)\n\n current_sorting = f'{sort}_{direction}'\n\n context = {\n 'vehicles': vehicles,\n 'search_term': query,\n 'current_categories': categories,\n 'current_sorting': current_sorting,\n }\n\n return render(request, 'vehicles/vehicles.html', context)", "title": "" }, { "docid": "64cd4dfae69c5a72b7df0165bb2b3c11", "score": "0.5346408", "text": "def get_cars(request):\n lat = request.GET.get('lat')\n lng = request.GET.get('lng')\n km_range = request.GET.get('km_range')\n car_choice = request.GET.get('car_choice')\n db_cars = []\n if car_choice == 'pink':\n db_cars = Cabs.objects.filter(car_booked=False, car_pink=True).values('cab_number', 'latitude', 'longitude')\n else:\n db_cars = Cabs.objects.filter(car_booked=False).values('cab_number', 'latitude', 'longitude')\n cars_in_range = 0\n nearest_car = {}\n min_dist = km_range\n user_loc = Point(str(lat) + \", \"+ str(lng))\n for car in db_cars:\n car_loc = Point(str(car['latitude']) + \", \" + str(car['longitude']))\n dist = distance.distance(user_loc, car_loc).kilometers\n if dist <= int(km_range):\n cars_in_range += 1\n if dist < min_dist:\n nearest_car = car\n to_json = {}\n to_json['cars_available'] = cars_in_range\n to_json['nearest_car'] = nearest_car\n return HttpResponse(json.dumps(to_json), content_type=\"application/json\")", "title": "" }, { "docid": "0b9bfb2595ad3ae1e3c1b30c1db11bbb", "score": "0.5346088", "text": "def sort_objects(self, request, objects, sort_attr, descending):\r\n return objects", "title": "" }, { "docid": "22fc2527af7ea87710917080fcc97075", "score": "0.53268653", "text": "def sort_movies(self, movies):", "title": "" }, { "docid": "1935effa510cd5ca5639a57c449f94bd", "score": "0.531316", "text": "def make_car(manufacturar, model, **extra_info):\n car_dict = {}\n car_dict['manufacturar_name'] = manufacturar.title()\n car_dict['model_name'] = model.title()\n for key, value in extra_info.items():\n car_dict[key] = value\n return car_dict", "title": "" }, { "docid": "134560aa240d53fd6f1ddc69793e6a1e", "score": "0.5312566", "text": "def process_data(data):\n #Ílocale.setlocale(locale.LC_ALL, 'en_US.UTF8')\n max_revenue = {\"revenue\": 0}\n sales = {\"total_sales\": 0}\n best_car = {}\n\n for item in data:\n # Calculate the revenue generated by this model (price * total_sales)\n # We need to convert the price from \"$1234.56\" to 1234.56\n item_price = locale.atof(item[\"price\"].strip(\"$\"))\n item_revenue = item[\"total_sales\"] * item_price\n if item_revenue > max_revenue[\"revenue\"]:\n item[\"revenue\"] = item_revenue\n max_revenue = item\n\n # TODO: also handle max sales\n if item['total_sales'] > sales['total_sales']:\n sales = item\n\n if not item['total_sales'] in best_car.keys():\n best_car[item['car']['car_year']] = item['total_sales']\n else:\n best_car[item['car']['car_year']] += item['total_sales']\n\n all_values = best_car.values()\n max_values = max(all_values)\n max_key = max(best_car, key=best_car.get)\n\n # TODO: also handle most popular car_year\n\n summary = [\n \"The {} generated the most revenue: ${}\".format(\n format_car(max_revenue[\"car\"]), max_revenue[\"revenue\"]),\n \"The {} had the most sales: {}\".format(sales['car']['car_model'],sales['total_sales']),\n \"The most popular year was {} with {} sales\".format(max_key, max_values),\n ]\n\n return summary", "title": "" }, { "docid": "ff827e6f837e9e97c20fbdab0a51de80", "score": "0.5308433", "text": "def sort_stored_products() -> None:\n product_records.sort(key=_product_price)", "title": "" }, { "docid": "e927638b0c7d1321eef41277600d6fe5", "score": "0.53031987", "text": "def sort_objects(self):\n\n def sortkey(x):\n if isinstance(x, Pin):\n return 4\n elif isinstance(x, Text):\n return 5\n elif hasattr(x, \"fill\") and x.fill == \"F\":\n return 2\n elif hasattr(x, \"fill\") and x.fill == \"f\":\n return 0\n else:\n return 1\n\n self.objects.sort(key=sortkey)", "title": "" }, { "docid": "107fb8ad9ebf880c86711a1c55a1750d", "score": "0.5286801", "text": "def sort_gene_values():\n pass", "title": "" }, { "docid": "a26c583fb4959da826803959deea016f", "score": "0.5283987", "text": "def ordenar_objetos(self):\n self.objetos.sort(key=lambda objeto: objeto.z)", "title": "" }, { "docid": "775d93dd629881aef7128c7f909efff2", "score": "0.52460384", "text": "def build_sort(sort):\n sort_obj = []\n if sort:\n for k, v in sort.items():\n if k == 'raiting' or k == 'rating':\n sort_obj.append({'rating': 'desc'})\n else:\n sort_obj.append({k: 'desc'})\n\n return sort_obj", "title": "" }, { "docid": "56230132c396b3712e1ca60a2b9bc6ff", "score": "0.5235259", "text": "def get_first_model_each_manufacturer(cars=cars):\n return [models[0] for models in cars.values()]", "title": "" }, { "docid": "5a40a2437de9643cd0ad88162a87c223", "score": "0.52208847", "text": "def army_names():\n names = {}\n result=[]\n for book in books :\n book_str = \"Book \" + book\n for army in data['armies'][book_str] :\n # Ignore same army with plain vs models\n k = key_army_name(army)\n plain = str(k)\n \n if plain not in names :\n names[plain] = 1\n result.append(army)\n result.sort(key=key_army_name)\n return result", "title": "" }, { "docid": "ad9543ad8cd7cdcb5685b498816bc538", "score": "0.52078664", "text": "def _get_dict(self):\n result = collections.OrderedDict()\n result[\"nature\"] = \"library\"\n result[\"objects\"] = self.dic_obj\n result[\"relations\"] = self.dic_rlt\n return result", "title": "" }, { "docid": "31105dcc16cceed79ce3e00dda50b969", "score": "0.51773256", "text": "def vehicles(self):\n return copy.deepcopy(self._vehiclesDict)", "title": "" }, { "docid": "300a1814f162b20cdf39ce4e226c0a32", "score": "0.5158343", "text": "def getVehiclesAmount(self, amount, orderBy, invert=False):\n vehicles = {}\n if ge(amount,0):\n if ge(amount, self.getSize()):\n return self.vehicles\n\n #keyOrder = lambda vehicle: sum(vehicle._id)/len(vehicle._id)\n #keyOrder = \"_id\" sum(vehicle._id)/len(vehicle._id)\n keyOrder = attrgetter(\"_id\")\n #TODO: Arrumar isso\n if(eq(orderBy,VEHICLE_SPEED)):\n keyOrder = lambda vehicle: vehicle.average(VEHICLE_SPEED)\n\n key = 0\n for vehicle in (sorted(self.vehicles.values(), key=keyOrder, reverse=invert)):\n if ge(key, amount):\n break\n vehicles[len(vehicles)] = vehicle\n key += 1\n\n return copy.deepcopy(vehicles)", "title": "" }, { "docid": "0ae2f369a3c76f6c190a9ccffb73a6ef", "score": "0.51170415", "text": "def display_dict_models(dict_models, sort_by='test_score'):\n cls = [key for key in dict_models.keys()]\n test_s = [dict_models[key]['test_score'] for key in cls]\n training_s = [dict_models[key]['train_score'] for key in cls]\n training_t = [dict_models[key]['train_time'] for key in cls]\n df_ = pd.DataFrame(data=numpy.zeros(shape=(len(cls),4)), columns = ['classifier', 'train_score', 'test_score', 'train_time'])\n for ii in range(0,len(cls)):\n df_.loc[ii, 'classifier'] = cls[ii]\n df_.loc[ii, 'train_score'] = training_s[ii]\n df_.loc[ii, 'test_score'] = test_s[ii]\n df_.loc[ii, 'train_time'] = training_t[ii]\n print(df_.sort_values(by=sort_by, ascending=False))", "title": "" }, { "docid": "a17c9303c5d39d986a0bc630c3e5249f", "score": "0.51133925", "text": "def sort_finds(self, method = None ):\r\n if not method:\r\n method = self.sort\r\n\r\n if method == 'rating':\r\n finds = Found.objects.filter(search = self.pk).order_by('-rating')\r\n self.sort = 'rating'\r\n else:\r\n finds = Found.objects.filter(search = self.pk).order_by('-found_on')\r\n self.sort = 'date'\r\n\r\n # This causes the finds to be DeReferenced \r\n self.finds = [ find for find in finds ]\r\n return self.finds", "title": "" }, { "docid": "b60676e3cd8ad5fb73ae16dfa08463a5", "score": "0.5111921", "text": "def sort(self):\r\n self.cards.sort()", "title": "" }, { "docid": "d83f74df17d9c4ee0198894c396bca97", "score": "0.51100534", "text": "def sort(self):\n self.cards.sort()", "title": "" }, { "docid": "1300125d496aee35e805eb1d04f101f9", "score": "0.5102367", "text": "async def sort_many(attributes: dict) -> list:\n\n return [\n mongo_to_dict(equipment)\n async for equipment in equipment_collection.find(attributes)\n ]", "title": "" }, { "docid": "100155bb479737cf842791dcca4b378b", "score": "0.5098958", "text": "def sorted(self):\n return sorted(self.pairings, key=lambda pairing: pairing.name)", "title": "" }, { "docid": "e55ed3abbc7315280cc85620e7d5a871", "score": "0.5092274", "text": "def make_car(manufacturer, model, **options):\n\tcar_dict = {\n\t\t'manufacturer': manufacturer.title(),\n\t\t'model': model.title()\n}\n\tfor key, value in options.items():\n\t\tcar_dict[key] = value\n\t\n\treturn car_dict", "title": "" }, { "docid": "b4c1321cd593ffeb1ab9123a2f32e545", "score": "0.5090754", "text": "def get_models_by_phenotype(models, min_value=0):\n\n allphens = get_phenotype_universe(models) \n result = dict()\n for phen in allphens:\n result[phen] = set()\n for key, model in models.items():\n for x in model.data:\n if x.value >= min_value: \n result[x.phenotype].add(key)\n return result", "title": "" }, { "docid": "0db1fefb81724a138d84cfb11b191a3c", "score": "0.5088249", "text": "def sortMatches(predictionDict):\n\n genes = ('fliC', 'flnaA', 'fllA', 'fmlA', 'flkA', 'gnd', 'wzx', 'wzy', 'wzm', 'wzt')\n\n logging.info(\"Sorting matches from {}\".format(predictionDict))\n sortedMatchDict = {}\n\n for genome_name, value in predictionDict.iteritems():\n\n tempDict = collections.defaultdict(list)\n if value == 'NA':\n sortedMatchDict[genome_name] = 'NA'\n continue\n\n for title, hsp in value.iteritems():\n productPercentage = 0 if hsp[1] == 'NA' else getProductPercentage(hsp[1], hsp[0])\n\n for gene in genes:\n if re.search(gene, title):\n\n d = {'title': title, 'hsp': hsp[0],\n 'length': hsp[1], 'percentage': productPercentage}\n\n tempDict[gene].append(d)\n\n break\n\n #Sorting the lists in the resulting dictionary\n for gene in tempDict:\n\n tempDict[gene] = sorted(tempDict[gene], key=lambda k: k['percentage'], reverse = True)\n\n # convert to regular `dict` to avoid any downstream weirdness\n sortedMatchDict[genome_name] = dict(tempDict)\n\n return sortedMatchDict", "title": "" }, { "docid": "63aefb213cd0e71477fa62f91393c409", "score": "0.508692", "text": "def sortLists(self):\r\n\t\tself.immunities.sort()\r\n\t\tself.resistances.sort()\r\n\t\tself.weaknesses.sort()", "title": "" }, { "docid": "26bb72dff4386e880306eda1c6820120", "score": "0.50464875", "text": "def get_vehicles():\r\n vehicles = Vehicle.objects.all()\r\n return vehicles", "title": "" }, { "docid": "f8294b62400e5e7beecf31e8355cf570", "score": "0.5043857", "text": "def sort(self):\n return OrderedDict(sorted([(grade, tuple(sorted(students))) for\n grade, students in self.__db.items()]))", "title": "" }, { "docid": "692473129433fd678a96dde586da904e", "score": "0.5043295", "text": "def filter_sort(details):\n return sorted(details)", "title": "" }, { "docid": "0745fd88e9a8ef9fe8c03776bf813760", "score": "0.5031241", "text": "def sortinfo(self):\n d = dict(self.properties)\n if self.type is not None:\n d[CVARSORT] = self.type\n return d", "title": "" }, { "docid": "e2bf0b8fbdc52351cb12cf8a3fbbb2e7", "score": "0.5023946", "text": "def order_objects(self, objs):\n orm_ordering = self.order_by()\n ordering = []\n for order in orm_ordering:\n if order[0] == \"-\":\n field = order[1:]\n desc = True\n else:\n field = order\n desc = False\n ordering.append(\n (lambda obj: get_value(obj, field), desc)\n )\n return sort_multiple(objs, *ordering)", "title": "" }, { "docid": "f7caebc7f2524499f20869b59e6f214a", "score": "0.50234836", "text": "def get_models(automaker, year):\n filtered = filter(lambda car: car.get('automaker') == automaker and car.get('year') == year, data)\n return set([car.get('model') for car in filtered])", "title": "" }, { "docid": "030a43133632b16416d50d05ec851d98", "score": "0.50222653", "text": "def get_ranked_object_list(self):\n return sorted(self.objects.items(), key=lambda item: item[1], reverse=True)", "title": "" }, { "docid": "41d61c02e487bf3abb066de88f807f3e", "score": "0.5005075", "text": "def sort_clashes(self,\n sort_vdW = False,\n sort_model_distance = False,\n sort_overlap = False,\n sort_symmetry = False):\n from collections import OrderedDict\n options = [sort_vdW, sort_model_distance, sort_overlap, sort_symmetry]\n if (options.count(True) == 0):\n sort_overlap = True\n elif (options.count(True) > 1):\n raise Sorry('Can only sort by one value.')\n if sort_model_distance: key = 0\n if sort_vdW: key = 1\n if sort_overlap: key = 2\n if sort_symmetry: key = 4\n self._clashes_dict = OrderedDict(\n sorted(self._clashes_dict.items(), key=lambda x: x[1][key]))", "title": "" }, { "docid": "78bf9c76836fbdf443dd1dabeeabe8e4", "score": "0.5002873", "text": "def apply_sorting(self, obj_list, options=None):\n return obj_list", "title": "" }, { "docid": "e1fef0e572570aa9e12c19a6fdce3eee", "score": "0.49867544", "text": "def get_all_rockauto_models(make, model_year):\r\n\r\n make_split = make.split(\" \")\r\n make_join = \"+\".join(make_split)\r\n\r\n url = f\"http://www.rockauto.com/en/catalog/{make_join},{model_year}\"\r\n req = requests.get(url)\r\n src = req.content\r\n soup = BeautifulSoup(src, \"lxml\")\r\n\r\n divs_normal = soup.findAll(\"a\", {\"class\": \"navlabellink nvoffset nnormal\"})\r\n models = [div_normal.string for div_normal in divs_normal][2:]\r\n\r\n return sorted(models)", "title": "" }, { "docid": "110d039cdb6bace16c147d8988e000f6", "score": "0.49653724", "text": "def sort_treadles(self):\n raise NotImplementedError", "title": "" }, { "docid": "bd81469ff911b8940b5959ce4de52382", "score": "0.4964206", "text": "def _sort_basis_dict(bs):\n\n keyorder = [\n 'molssi_bse_magic', 'basisSetName', 'basisSetDescription', 'basisSetRevisionDescription', 'basisSetRole',\n 'basisSetReferences', 'basisSetNotes', 'basisSetElements', 'elementReferences',\n 'elementECPElectrons', 'elementElectronShells', 'elementECP', 'elementComponents', 'elementEntry',\n 'shellFunctionType', 'shellHarmonicType', 'shellRegion', 'shellAngularMomentum', 'shellExponents',\n 'shellCoefficients', 'potentialECPType', 'potentialAngularMomentum', 'potentialRExponents',\n 'potentialGaussianExponents', 'potentialCoefficients'\n ]\n\n # Add integers for the elements\n keyorder.extend(list(range(150)))\n\n bs_sorted = sorted(bs.items(), key=lambda x: keyorder.index(x[0]))\n bs_sorted = collections.OrderedDict(bs_sorted)\n\n for k, v in bs_sorted.items():\n if isinstance(v, dict):\n bs_sorted[k] = _sort_basis_dict(v)\n elif k == 'elementElectronShells':\n bs_sorted[k] = [_sort_basis_dict(x) for x in v]\n\n return bs_sorted", "title": "" }, { "docid": "787d4c942c073fad1d15767e5a07b4d0", "score": "0.49615175", "text": "def sort_formulas_materials(self, formulas):\n for materials in formulas.values():\n materials.sort()\n return formulas", "title": "" }, { "docid": "e4dac3527a1476c00a4267fb984064dc", "score": "0.49532148", "text": "def __init__(self):\n self._cars = {}", "title": "" }, { "docid": "c8b47a892a907577afec0058c4383460", "score": "0.4953139", "text": "def data_for_sorting():\n raise NotImplementedError", "title": "" }, { "docid": "c8b47a892a907577afec0058c4383460", "score": "0.4953139", "text": "def data_for_sorting():\n raise NotImplementedError", "title": "" }, { "docid": "ee84e2975fe60b1b584fba1ba32ac24d", "score": "0.49525264", "text": "def sort_by_rate(self,data):", "title": "" }, { "docid": "89a2ddeec11a1e1e270b33f9f5b5835f", "score": "0.49493638", "text": "def SortSims(self):\n\n self.sim_list.sort(key=lambda x: x['cache'].config_name)", "title": "" }, { "docid": "1e5180896393d6180620ce190fe2a211", "score": "0.49474245", "text": "def queryset_to_resort_dictionary(selected_resorts):\r\n\r\n resorts_list = [x for x in selected_resorts.values('id', 'name', 'website')]\r\n\r\n for resort in resorts_list:\r\n resort_object = selected_resorts.get(pk=resort['id'])\r\n resort['our_take'] = resort_object.our_take\r\n\r\n try:\r\n resort['state'] = resort_object.address.locality.state.name\r\n resort['address'] = resort_object.address.formatted\r\n except:\r\n resort['address'] = resort_object.address.raw \r\n\r\n return(resorts_list)", "title": "" }, { "docid": "401c66371cce3be28e6ca8245837685c", "score": "0.49412897", "text": "def sort(request, form):\n criterion_dict = {'book_name': Book.sort_by_book_name,\n 'author': Book.sort_by_author,\n 'estimation': Book.sort_by_estimation}\n\n category = Category.objects.get(id=form.cleaned_data['category'])\n books_count = Book.objects.filter(id_category=category).count()\n\n if form.cleaned_data['criterion'] == 'most_readable':\n books = Book.sort_by_readable(request.user, category, books_count)\n else:\n books = criterion_dict[form.cleaned_data['criterion']](request.user, category)\n\n paginator = Paginator(books, settings.BOOKS_PER_PAGE)\n page = paginator.page(form.cleaned_data['page'])\n\n context = {\n 'category': category.id,\n 'criterion': form.cleaned_data['criterion'],\n 'books': page.object_list,\n 'has_next': page.has_next(),\n 'next_page': page.next_page_number() if page.has_next() else paginator.num_pages\n }\n\n return HttpResponse(json.dumps(context), content_type='application/json')", "title": "" }, { "docid": "425f781d99ba6867ab5cef171c8728e9", "score": "0.49370405", "text": "def make_car(manufacturer, model, **options):\n car_dict = {\n 'manufacturer': manufacturer.title(),\n 'model': model.title(),\n }\n for option, value in options.items():\n car_dict[option] = value\n\n return car_dict", "title": "" }, { "docid": "3e711c1ae2b6b6a06a3120904c1898db", "score": "0.49249533", "text": "def make_car(manufacturer, model_name, **car_info):\n\tcar = {}\n\tcar['first_name'] = manufacturer\n\tcar['last_name'] = model_name\n\tfor key, value in car_info.items():\n\t\tcar[key] = value\n\treturn car", "title": "" }, { "docid": "0f2b84159b64871dcb4903ae95e4e386", "score": "0.49177736", "text": "def fetch_car(self, maker: str, name: str, year: str = \"\") -> List[Car]:\n if not self._makers:\n self.fetch_cars(maker)\n maker_id = self._makers.get(maker)\n if not maker_id:\n raise FipeAPIError(f\"Inexistent maker: {maker}\")\n car_id = None\n try:\n car_id = self._cars[maker][name]\n except KeyError:\n raise FipeAPIError(f\"Inexistent car: {name}\")\n r = requests.get(self._car_url.format(maker_id, car_id))\n _raise_on_failure(r, [200])\n models = {}\n for item in r.json():\n key = item[\"id\"]\n year_ = key.split(\"-\")[0]\n if year_ == \"32000\":\n year_ = f\"{datetime.datetime.now().year} (zero KM)\"\n models[year_] = key\n if year:\n models = {year: models[year]}\n list_models = []\n for k, v in models.items():\n url = self._car_model_url.format(maker_id, car_id, v)\n r = requests.get(url)\n log.debug(f\"getting url {url}\")\n # This sleep is needed due to the limit of requests per sec of the API.\n time.sleep(2)\n _raise_on_failure(r, [200, 201])\n resp = r.json()\n if resp[\"ano_modelo\"] == \"32000\":\n resp[\"ano_modelo\"] = f\"{datetime.datetime.now().year} (zero KM)\"\n list_models.append(\n Car(\n fipe_id=resp[\"fipe_codigo\"],\n name=resp[\"name\"].lower(),\n maker=maker,\n year=resp[\"ano_modelo\"],\n price=year_str_to_int(resp[\"preco\"]),\n currency=\"BRL\",\n fuel=resp[\"combustivel\"],\n pub_date=resp[\"referencia\"],\n )\n )\n return list_models", "title": "" }, { "docid": "a0cec00e072ce5b242457fcfcdad0a67", "score": "0.49119473", "text": "def sortGs(gs):\n gs[\"white\"].sort()\n gs[\"black\"].sort()", "title": "" }, { "docid": "2ffd723453943fb75cfc6a09cf16faa1", "score": "0.4908673", "text": "def sort_list(cls, v):\n v.sort(key=lambda x: x.name)\n return v", "title": "" }, { "docid": "fca140bdd841ece7093a3cbdf8efb3bb", "score": "0.49074504", "text": "def sort_facets(self):\n for f in self.facets:\n f.sort_vertices()\n self.facets.sort()", "title": "" }, { "docid": "c7b26651fe006f4ddc6f981eca64f424", "score": "0.49019098", "text": "def BuildModel(data, label):\n labellist = defaultdict(list)\n for i in range(len(data)):\n labellist[data[i]].append(label[i])\n model = dict()\n for d, l in labellist.items():\n sortedlabels = sorted(Counter(l).items(), key=operator.itemgetter(1), reverse=True)\n model[d] = [x[0] for x in sortedlabels]\n return model", "title": "" }, { "docid": "789c4abf57a86e3f46f18cef51f78486", "score": "0.48988795", "text": "async def sort(room_id: str) -> list:\n\n return [\n mongo_to_dict(equipment)\n async for equipment in equipment_collection.find({\"room_id\": str(room_id)})\n ]", "title": "" }, { "docid": "83a30cf27ce7d5d7d9bbbe1cf3b48f97", "score": "0.4895603", "text": "def sorted_items(self):\n return canonsort_items(self, self.canonical_order)", "title": "" }, { "docid": "053ed8559a6b34f6ba4d9380b6466b64", "score": "0.4891673", "text": "def create_data_model(list_of_vehicles, list_of_products):\n data = {}\n data['distance_matrix'] =[\n [\n 0, 18.5, 12.5, 10.5, 5.3, 34\n ],\n [\n 18.5, 0, 8.4, 10.9, 24.7, 20\n ],\n [\n 12.5, 8.4, 0, 4.5, 17.6, 22.2\n ],\n [\n 10.5, 10.9, 4.5, 0, 15.2, 24.6\n ],\n [\n 5.3, 24.7, 17.6, 15.2,0, 34.4\n ],\n [ 34, 20, 22.2, 24.6, 34.4, 0\n\n ],\n ]#map matrix\n #list_of_vheight = []\n #list_of_vwidth = []\n list_of_vlarge = []\n list_of_vweight = []\n for element in list_of_vehicles:\n #list_of_vheight.append(element..__trunk_dimension.__height)\n #list_of_vhwidht.append(element.__model.__trunk_dimension.__width)\n list_of_vlarge.append(element.GetModel().GetTrunkDimension().GetLarge())\n list_of_vweight.append(element.GetModel().GetTrunkDimension().GetWeight())\n\n #list_height = []\n #list_width = []\n list_large = [0]\n list_weight = [0]\n for element in list_of_products:\n #list_height.append(element.__product_dimension.__height)\n #list_width.append(element.__product_dimension.__width)\n list_large.append(element.GetProductDimension().GetLarge())\n list_weight.append(element.GetProductDimension().GetWeight())\n\n\n data['num_vehicles'] = len(list_of_vehicles) #number of vehicles\n data['depot'] = 0\n #data['vehicle_capacities_height'] = list_of_vheight\n #data['vehicle_capacities_width'] = list_of_vwidth\n data['vehicle_capacities_large'] = list_of_vlarge\n data['vehicle_capacities_weight'] = list_of_vweight\n #data['demand_height'] = list_height\n #data['demand_width'] = list_width\n data['demand_large'] = list_large\n data['demand_weight'] = list_weight\n return data", "title": "" }, { "docid": "e4f1f1e12fbf7fde989456812f3f2263", "score": "0.48893028", "text": "def make_car(manufacturer, model, **options):\r\n car_dict = {\r\n 'manufacturer': manufacturer.title(),\r\n 'model': model.title(),\r\n }\r\n for option, value in options.items():\r\n car_dict[option] = value\r\n return car_dict", "title": "" }, { "docid": "1cfe9f4c6aa8062d87fb3c1652f39be5", "score": "0.48859173", "text": "def get_template_models(service, force_refresh=False):\n if not force_refresh:\n obj = get_cache()\n if obj:\n return obj['sorted_models']\n\n models = []\n svc_models, service_version = get_models_json(service)\n\n for svc_model in svc_models:\n for model in models:\n if model['name'] == svc_model['name']:\n model['versions'].append({\n 'name': svc_model['version'],\n 'release_level': svc_model['release']['level'],\n 'release_color':\n RELEASE_COLORS[svc_model['release']['level']]\n })\n break\n else:\n models.append({\n 'pretty': PRETTY_MODELS.get(svc_model['name'],\n svc_model['name']),\n 'name': svc_model['name'],\n 'versions': [{\n 'name': svc_model['version'],\n 'release_level': svc_model['release']['level'],\n 'release_color':\n RELEASE_COLORS[svc_model['release']['level']]\n }]\n })\n\n for model in models:\n model['versions'] = sorted(model['versions'], key=lambda k: k['name'])\n\n sorted_models = sorted(models, key=lambda k: k['pretty'].lower())\n set_cache({'sorted_models': sorted_models,\n 'service_version': service_version})\n\n return sorted_models", "title": "" }, { "docid": "3e041dc6e08f2c8a92fb9d3db4b3cfbb", "score": "0.48850513", "text": "def cars_dict_to_table(car_data):\n table_data = [[\"ID\", \"Car\", \"Price\", \"Total Sales\"]]\n for item in car_data:\n table_data.append([item[\"id\"], format_car(item[\"car\"]), item[\"price\"], item[\"total_sales\"]])\n return table_data", "title": "" }, { "docid": "3e041dc6e08f2c8a92fb9d3db4b3cfbb", "score": "0.48850513", "text": "def cars_dict_to_table(car_data):\n table_data = [[\"ID\", \"Car\", \"Price\", \"Total Sales\"]]\n for item in car_data:\n table_data.append([item[\"id\"], format_car(item[\"car\"]), item[\"price\"], item[\"total_sales\"]])\n return table_data", "title": "" }, { "docid": "8205e2a6b41fab6fd03beea501be3ff7", "score": "0.48752037", "text": "def get_UA_models(models, category):\n \n result = dict() \n for key, model in models.items():\n if model.category != category:\n continue\n desc = model.description\n if desc[\"sex\"]==\"U\" and desc[\"neg_phenotypes\"]:\n result[key] = model\n return result", "title": "" }, { "docid": "f0856fc17688e51612e58ac1a3fb70c0", "score": "0.4863599", "text": "def get_models(automaker, year):\n return set([\n car[\"model\"]\n for car in data\n if car[\"year\"] == year and car[\"automaker\"] == automaker\n ])", "title": "" }, { "docid": "624fed20277a00080f3db46251a5cdc3", "score": "0.48553243", "text": "def sort_cms_models(cms_models):\r\n cms_models.sort(key=lambda model: (\r\n get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999,\r\n model['app_name'],\r\n model['title']\r\n ))", "title": "" }, { "docid": "0e8db0ed872ef80878173f4be120837e", "score": "0.48542315", "text": "def make_car(manufacturer, model_name, **optional):\n\tcar = {}\n\tcar['manufacturer'] = manufacturer\n\tcar['model_name'] = model_name\n\tfor key, value in optional.items():\n\t\tcar[key] = value\n\tprint(car)", "title": "" }, { "docid": "8c0eba0a28a6736cb0e9735e7e26683e", "score": "0.48488554", "text": "def items_by_vendor(self):\n result = []\n cache = {}\n vendor = None\n for item in self.items.all().order_by(\"product__vendor\"):\n if item.product.vendor != vendor:\n if cache:\n result.append(cache)\n cache = {'vendor': item.product.vendor, 'items': []}\n vendor = item.product.vendor\n cache['items'].append(item)\n if cache:\n result.append(cache)\n return result", "title": "" }, { "docid": "3da8b248e213c5fa31f44540e83eab2b", "score": "0.48436084", "text": "def order_criteria(self):\n return self.sort", "title": "" }, { "docid": "18d699e8c5486cbe77efb5ac17e828e2", "score": "0.48359346", "text": "def solution(data):\n\t\treturn list(data.keys()).sort()", "title": "" }, { "docid": "d904a8bcc71c76d99267627cb21aca8b", "score": "0.48298615", "text": "def sort(self, by, key=None, reverse=False):\n\t\tsorter = _data_sorter(self._dict, by).sort(key=key, reverse=reverse)\n\t\tsorted_out = {}\n\t\tfor i in sorter.mapper:\n\t\t\tsorted_out.update({i:self._dict[sorter.mapper[i]]})\n\t\t\t\n\t\treturn Data(sorted_out)", "title": "" }, { "docid": "324a49af59e033c694b4460eb9d2b53a", "score": "0.48136103", "text": "def available_models():\n return {title: model.desc() for title, model in _MODELS.items()}", "title": "" }, { "docid": "2188a9a595ad57a3e995a10f2d0a5bc5", "score": "0.481115", "text": "def make_car(make, model, **car_model):\n car_model['manufacturer'] = make\n car_model['model'] = model\n return car_model", "title": "" }, { "docid": "64e848e11438e79c193b96419ab6ab86", "score": "0.48042303", "text": "def sort(self):\n\n return self._replaceRdd(self._jvmRdd.sort())", "title": "" }, { "docid": "35d82b6148e589b3f5272924e40b9b6a", "score": "0.47964212", "text": "def sort_by_price():\n\n items = Item.objects.order_by('-price')\n\n return render_template(\"base.html\", items = items)", "title": "" }, { "docid": "29dc1017a777f2eee7c59d951c8f5746", "score": "0.47876558", "text": "def parks_by_city():\n\n park = db.session.query(Park.park_name, Park.city).filter(Park.city\n != None).order_by(Park.city, Park.park_name).distinct().all()\n\n parks = {}\n\n for p in park:\n a = str(p[0])\n b = str(p[1])\n if b not in parks:\n parks[b] = []\n parks[b].append(a)\n else:\n parks[b].append(a)\n print parks\n\n return render_template(\"parks-by-city.html\", parks=parks)", "title": "" }, { "docid": "81ec932eba571f6dda23f1b00a39a527", "score": "0.47872648", "text": "def _parseCars(r):\n cars = json_normalize(r.json()).drop(columns='eventClasses')\n classes = json_normalize(r.json(), 'eventClasses', meta='entryId')\n return (cars, classes)", "title": "" }, { "docid": "897b09826da2c97f869c3ed67d51c182", "score": "0.4777315", "text": "def getTopPlacesTypes(places_to_details, query, model):\n rst = {}\n \n for place in places_to_details: \n types = places_to_details[place]['types']\n score = computeScore(query, types, model)\n rst[place] = score\n\n \n return sorted(rst.items(), key=lambda item : item[1], reverse=False)", "title": "" }, { "docid": "d32f17ef01d1b44313d6f138721e774d", "score": "0.4770072", "text": "def _sort(self, m1, m2):\r\n return cmp(m1.get_id(), m2.get_id())", "title": "" }, { "docid": "4626dbdc7730ca5dcf453086288f80b0", "score": "0.4767069", "text": "def sortResult(self, result_list):\n rmap = {}\n classes = []\n for n, t, o, e in result_list:\n cls = t.__class__\n if not cls in rmap:\n rmap[cls] = []\n classes.append(cls)\n rmap[cls].append((n, t, o, e))\n r = [(cls, rmap[cls]) for cls in classes]\n return r", "title": "" }, { "docid": "a914b9ea6cbead96f9974c0c0b45fe6d", "score": "0.4762955", "text": "def get_sorted_characteristics(self):\n to_sort = []\n for charac in self.entropyDict.keys():\n [charac_entropy,charac_gain,indiv_totals,entropies] = self.entropyDict[charac]\n to_sort.append( (charac_gain, [charac,charac_entropy,charac_gain,indiv_totals,entropies]) )\n\n \n to_sort.sort()\n #Here to_sort is sorted, duh!\n sorted_tuples = to_sort\n #the returned list contains elements like:\n #[charac_name, char_entropy,char_gain,indiv_totals, entropies]\n return [list for gain,list in sorted_tuples]", "title": "" }, { "docid": "406954cb76714e52bc5b8adc131be712", "score": "0.47625342", "text": "def sortNumUses(multbricks):\n sortedBricks = sorted(multbricks.items(),key=operator.itemgetter(1))\n sortedBricks.reverse()\n return sortedBricks", "title": "" }, { "docid": "b79acf891a77a23f9481f61ef928fc8c", "score": "0.4760209", "text": "def sort_films(self):\n self.FILMS_SORTED = True\n self.films = sorted(self.films, key=lambda f_id: self.get_film(f_id).get_release_date())", "title": "" }, { "docid": "c57a6511de846c1f120a76def473d0a6", "score": "0.47530392", "text": "def arrange_data(data):\n new_list = []\n for page in data:\n for dictionary in page:\n # save the values we want for each title\n new_dictionary = {}\n for k, v in dictionary.items():\n if k == 'id':\n new_dictionary[k] = v\n if k == 'title':\n new_dictionary[k] = v\n if k == 'popularity':\n new_dictionary[k] = v\n if k == 'release_date':\n new_dictionary[k] = v\n if k == 'vote_count':\n new_dictionary[k] = v\n if k == 'vote_average':\n new_dictionary[k] = v\n if k == 'genre_ids':\n new_dictionary[k] = v\n new_list.append(new_dictionary)\n return new_list", "title": "" }, { "docid": "479887a96f2bc259f2bf28513c64aa44", "score": "0.4746503", "text": "def test_sort(self):\n lst_key = 'customer'\n lst_value = ['jim', 'peter', 'mary']\n\n if self.r.exists(lst_key):\n self.r.delete(lst_key)\n\n self.r.rpush(lst_key, lst_value[0], lst_value[1], lst_value[2])\n\n print('sort {}'.format(self.r.sort(lst_key, alpha=True)))\n\n map_score = {'score:jim': 1, 'score:peter': 3, 'score:mary': 5}\n self.r.mset(map_score)\n\n print('sort by {}'.format(self.r.sort(lst_key, by='socre:*', get=['score:*', '#']\n )))\n #print('result {}'.format(self.r.hget('result')))", "title": "" }, { "docid": "5e960101a1291a7d119f768e9c454142", "score": "0.4738602", "text": "def set_vvdic_key_order(vv: Vividict) -> list:\n ro_list = []\n feat_list_1d = []\n n = 0\n for k1 in vv.keys():\n for k2 in vv[k1].keys():\n ro_list.append([k1, k2, n])\n feat_list_1d.append(k2) # just the 2nd layer keys: features\n n += 1\n return ro_list, feat_list_1d", "title": "" }, { "docid": "ccbd5504911be762f82d452923b62fdd", "score": "0.47383884", "text": "def sort_values(artist_values):\n\n # sort the values into a list\n artist_values_sorted_list = sorted(\n artist_values.items(), key=itemgetter(1), reverse=False\n )\n\n # convert list into dict\n artist_values_sorted = {}\n for i in artist_values_sorted_list:\n artist_values_sorted[i[0]] = i[1]\n return artist_values_sorted", "title": "" }, { "docid": "3e4e56a47aec5e967739ecb5b6893a17", "score": "0.47380736", "text": "def make_car(manufacturer, model, **car_info):\n car_profile = {}\n car_profile['manufacturer_name'] = manufacturer\n car_profile['model_name'] = model\n for key, value in car_info.items():\n car_profile[key] = value\n return car_profile", "title": "" }, { "docid": "1dd6fb4c4a05c194c67b9ab59140ef20", "score": "0.4734421", "text": "def REGIONS_LIST_SORTED_BY_NAME():\n\n # Avoid circular import.\n from mkt.regions.utils import remove_accents\n\n by_name = sorted([v for k, v in DEFINED if v.id and v.weight > -1],\n key=lambda v: remove_accents(unicode(v.name)))\n by_name.append(RESTOFWORLD)\n return by_name", "title": "" } ]
ebd980a0f02eb074c6f9f0f9147380f9
Calculate hydrophobic interactions. ODDT generates hydrophobic interactions for all pairs of atoms that are within the specification which results in multiple interactions between the same hydrophobic region of the ligand and protein. We reduce those down to a single interaction, the one that is the shortest. ODDT also seems to generate hydrophobic interactions that are complete duplicates, and the deduplication process removes these as well.
[ { "docid": "496c4a21f0933581c691b52f4f7a00a8", "score": "0.65272385", "text": "def calc_hydrophobic_interactions(protein, mol, key_inters_defs, mol_key_inters):\n inters = {}\n protein_atoms, ligand_atoms = oddt.interactions.hydrophobic_contacts(protein, mol)\n for p, l in zip(protein_atoms, ligand_atoms):\n c = get_canonical_residue(p)\n dist = spatial.distance(np.array([p['coords']]), np.array([l['coords']]))[0][0]\n p_coords = (p['coords'][0].item(), p['coords'][1].item(), p['coords'][2].item())\n l_coords = (l['coords'][0].item(), l['coords'][1].item(), l['coords'][2].item())\n t = interactions.Interaction(c, p_coords, l_coords, dist, l['id'].item())\n if c in inters:\n current_value = inters[c]\n if dist < current_value.distance:\n inters[c] = t\n else:\n inters[c] = t\n if key_inters_defs and c in key_inters_defs:\n mol_key_inters.append(interactions.I_TYPE_HYDROPHOBIC + ':' + c)\n\n if inters:\n # print(' found', len(inters), 'hydrophobics')\n return interactions.InteractionType(interactions.I_NAME_HYDROPHOBIC, list(inters.values()))\n else:\n return None", "title": "" } ]
[ { "docid": "d0953c0ffc6dd735c0c162d85a6996a9", "score": "0.6387586", "text": "def build_hydrogens(self):\n # TODO assumes only one continuous chain (and 1 set of N & C terminals)\n coords = coord_generator(self.coords, NUM_COORDS_PER_RES, remove_padding=True)\n new_coords = []\n prev_res_atoms = None\n for i, (aa, crd) in enumerate(zip(self.seq, coords)):\n # Add empty hydrogen coordinates for missing residues\n if crd.sum() == 0:\n new_coords.append(self.ones((NUM_COORDS_PER_RES_W_HYDROGENS, 3))*0)\n prev_res_atoms = None\n continue\n # Create an organized mapping from atom name to Catesian coordinates\n d = {name: xyz for (name, xyz) in zip(self.atom_map[aa], crd)}\n atoms = AtomHolder(d, default=None) # default=self.empty_coord would allow building hydrogens from missing coords\n # Generate hydrogen positions as array/tensor\n hydrogen_positions = self.get_hydrogens_for_res(\n aa,\n atoms,\n prev_res_atoms,\n n_terminal=i == 0,\n c_terminal=i == len(self.seq) - 1)\n # Append Hydrogens immediately after heavy atoms, followed by PADs to L=24\n new_coords.append(self.concatenate((crd, hydrogen_positions)))\n prev_res_atoms = atoms\n self.reduced_coords = self.concatenate(new_coords)\n return self.reduced_coords", "title": "" }, { "docid": "71b9f4bdea01b0b843ac1da739ade1d8", "score": "0.5912552", "text": "def compute_hydrogen_bonds(self):\n\n # Select the backbone atoms for each aminoacid.\n caMask = np.logical_and(self.atomNames_== \"CA\", self.atomTypes_ == 5)\n cMask = np.logical_and(self.atomNames_== \"C\", self.atomAminoIds_ >= 0)\n nMask = np.logical_and(self.atomNames_== \"N\", self.atomAminoIds_ >= 0)\n oMask = np.logical_and(self.atomNames_== \"O\", self.atomAminoIds_ >= 0)\n \n caPos = self.atomPos_[0, caMask]\n cPos = self.atomPos_[0, cMask]\n nPos = self.atomPos_[0, nMask]\n oPos = self.atomPos_[0, oMask]\n\n nIds = np.nonzero(nMask)[0]\n oIds = np.nonzero(oMask)[0]\n\n if len(caPos) != len(cPos) or len(caPos) != len(nPos) or len(caPos) != len(oPos):\n\n caAminoIds = self.atomAminoIds_[caMask]\n cAminoIds = self.atomAminoIds_[cMask]\n nAminoIds = self.atomAminoIds_[nMask]\n oAminoIds = self.atomAminoIds_[oMask]\n cAminoIter = 0\n nAminoIter = 0\n oAminoIter = 0\n newCPos = []\n newNPos = []\n newOPos = []\n newNIds = []\n newOIds = []\n for curAminoIter, curAminoId in enumerate(caAminoIds):\n if oAminoIter < len(oAminoIds):\n curOAminoId = oAminoIds[oAminoIter]\n else:\n curOAminoId = -1\n if cAminoIter < len(cAminoIds):\n curCAminoId = cAminoIds[cAminoIter]\n else:\n curCAminoId = -1\n if nAminoIter < len(nAminoIds):\n curNAminoId = nAminoIds[nAminoIter]\n else:\n curNAminoId = -1\n\n if curAminoId == curOAminoId:\n newOPos.append(oPos[oAminoIter])\n newOIds.append(oIds[oAminoIter])\n oAminoIter += 1\n else:\n newOPos.append(caPos[curAminoIter])\n newOIds.append(-1)\n \n if curAminoId == curCAminoId:\n newCPos.append(cPos[cAminoIter])\n cAminoIter += 1\n else:\n newCPos.append(caPos[curAminoIter])\n\n if curAminoId == curNAminoId:\n newNPos.append(nPos[nAminoIter])\n newNIds.append(nIds[nAminoIter])\n nAminoIter += 1\n else:\n newNPos.append(caPos[curAminoIter])\n newNIds.append(-1)\n\n cPos = np.array(newCPos)\n nPos = np.array(newNPos)\n oPos = np.array(newOPos)\n nIds = np.array(newNIds)\n oIds = np.array(newOIds)\n\n # Get the position of the previous C atom in the backbone for each aminoacid.\n cPrev = []\n oPrev = []\n for curIter in range(len(caPos)):\n startIndex = 0\n if curIter > 0:\n startIndex = self.aminoNeighsSIndices_[curIter-1]\n endIndex = self.aminoNeighsSIndices_[curIter]\n selCPos = np.array(cPos[curIter])\n selOPos = np.array(oPos[curIter])\n for curNeighIter in range(endIndex-startIndex):\n curNeigh = self.aminoNeighs_[curNeighIter + startIndex, 0]\n if curNeigh < curIter:\n selCPos = cPos[curNeigh]\n selOPos = oPos[curNeigh]\n cPrev.append(selCPos)\n oPrev.append(selOPos)\n \n # Compute the position of the hydrogen atom.\n cPrev = np.array(cPrev)\n oPrev = np.array(oPrev)\n prevVec = cPrev - oPrev \n prevVec = prevVec / (np.linalg.norm(prevVec, axis = 1, keepdims=True) + 1e-9)\n hPos = nPos + prevVec\n\n # Compute the hydrogen bonds.\n distON = oPos.reshape((-1, 1, 3)) - nPos.reshape((1, -1, 3))\n distCH = cPos.reshape((-1, 1, 3)) - hPos.reshape((1, -1, 3))\n distOH = oPos.reshape((-1, 1, 3)) - hPos.reshape((1, -1, 3))\n distCN = cPos.reshape((-1, 1, 3)) - nPos.reshape((1, -1, 3))\n distON = np.linalg.norm(distON, axis = -1)\n distCH = np.linalg.norm(distCH, axis = -1)\n distOH = np.linalg.norm(distOH, axis = -1)\n distCN = np.linalg.norm(distCN, axis = -1)\n distON = 1.0 / (distON + 1e-9) \n distCH = 1.0 / (distCH + 1e-9)\n distOH = 1.0 / (distOH + 1e-9)\n distCN = 1.0 / (distCN + 1e-9)\n\n U = (0.084 * 332.0) * (distON + distCH - distOH - distCN)\n for curIter in range(len(caPos)):\n U[curIter, curIter] = 0.0\n startIndex = 0\n if curIter > 0:\n startIndex = self.aminoNeighsSIndices_[curIter-1]\n endIndex = self.aminoNeighsSIndices_[curIter]\n for curNeighIter in range(endIndex-startIndex):\n curNeigh = self.aminoNeighs_[curNeighIter + startIndex, 0]\n U[curIter, curNeigh] = 0.0\n\n minIndex = np.argmin(U, axis=1)\n EMin = np.zeros_like(U)\n for i in range(U.shape[0]):\n EMin[i, minIndex[i]] = np.amin(U[i, :])\n maskHBonds = EMin < -0.5\n maskHBondsVals = maskHBonds.astype(np.int32)\n hBondsIndexs = np.transpose(np.nonzero(maskHBonds))\n\n # Create a new graph for the covalent bonds and hydrogen bonds together.\n self.covBondListHB_ = []\n self.atomCovBondSIndicesHB_ = []\n curStartIndex = 0\n for curIter, curEndIndex in enumerate(self.atomCovBondSIndices_):\n\n for curNeighbor in self.covBondList_[curStartIndex:curEndIndex]:\n self.covBondListHB_.append(curNeighbor)\n\n curAminoIndex = self.atomAminoIds_[curIter]\n if curAminoIndex >= 0:\n\n if self.atomNames_[curIter] == \"O\" and np.sum(maskHBondsVals[curAminoIndex, :]) > 0:\n curAminoNeigh = np.nonzero(maskHBonds[curAminoIndex, :])[0][0]\n if nIds[curAminoNeigh] >= 0:\n self.covBondListHB_.append([nIds[curAminoNeigh], curIter])\n elif self.atomNames_[curIter] == \"N\" and np.sum(maskHBondsVals[:, curAminoIndex]) > 0:\n curAminoNeigh = np.nonzero(maskHBonds[:, curAminoIndex])[0][0]\n if oIds[curAminoNeigh] >= 0:\n self.covBondListHB_.append([oIds[curAminoNeigh], curIter])\n\n self.atomCovBondSIndicesHB_.append(len(self.covBondListHB_))\n curStartIndex = curEndIndex\n\n self.covBondListHB_ = np.array(self.covBondListHB_)\n self.atomCovBondSIndicesHB_ = np.array(self.atomCovBondSIndicesHB_)\n\n # Create a new graph for the peptide bonds and hydrogen bonds together.\n self.aminoNeighsHB_ = []\n self.aminoNeighsSIndicesHB_ = []\n curStartIndex = 0\n for curIter, curEndIndex in enumerate(self.aminoNeighsSIndices_):\n\n for curNeighbor in self.aminoNeighs_[curStartIndex:curEndIndex]:\n self.aminoNeighsHB_.append(curNeighbor)\n\n if np.sum(maskHBondsVals[curIter, :]) > 0:\n curAminoNeigh = np.nonzero(maskHBonds[curIter, :])[0][0]\n self.aminoNeighsHB_.append([curAminoNeigh, curIter])\n if np.sum(maskHBondsVals[:, curIter]) > 0:\n curAminoNeigh = np.nonzero(maskHBonds[:, curIter])[0][0]\n self.aminoNeighsHB_.append([curAminoNeigh, curIter])\n\n self.aminoNeighsSIndicesHB_.append(len(self.aminoNeighsHB_))\n curStartIndex = curEndIndex\n\n self.aminoNeighsHB_ = np.array(self.aminoNeighsHB_)\n self.aminoNeighsSIndicesHB_ = np.array(self.aminoNeighsSIndicesHB_)", "title": "" }, { "docid": "418d514bfcb16e9b41e957202d3226ff", "score": "0.5901551", "text": "def generalizations(examples_so_far, h):\n hypotheses = []\n\n # Delete disjunctions\n disj_powerset = powerset(range(len(h)))\n for disjs in disj_powerset:\n h2 = h.copy()\n for d in reversed(list(disjs)):\n del h2[d]\n\n if check_all_consistency(examples_so_far, h2):\n hypotheses += h2\n\n # Delete AND operations in disjunctions\n for i, disj in enumerate(h):\n a_powerset = powerset(disj.keys())\n for attrs in a_powerset:\n h2 = h[i].copy()\n for a in attrs:\n del h2[a]\n\n if check_all_consistency(examples_so_far, [h2]):\n h3 = h.copy()\n h3[i] = h2.copy()\n hypotheses += h3\n\n # Add OR operations\n if hypotheses == [] or hypotheses == [{}]:\n hypotheses = add_or(examples_so_far, h)\n else:\n hypotheses.extend(add_or(examples_so_far, h))\n\n shuffle(hypotheses)\n return hypotheses", "title": "" }, { "docid": "0aff4a167860bc0bdabcee3c2932f140", "score": "0.5772034", "text": "def main_hydrogen(model,\n terminate_all_N_terminals=False,\n terminate_all_C_terminals=False,\n cap_all_terminals=False,\n append_to_end_of_model=False,\n verbose=False):\n #\n # make sure all pdb_interpretation parameters have been done\n #\n hierarchy = model.get_hierarchy()\n # should be automatic\n model.process(make_restraints=True)\n geometry_restraints_manager = model.get_restraints_manager().geometry\n atoms = hierarchy.atoms()\n\n #ready_set_utils.add_main_chain_atoms(hierarchy, geometry_restraints_manager)\n if (terminate_all_N_terminals or\n terminate_all_C_terminals or\n cap_all_terminals\n ):\n add_terminal_hydrogens(\n hierarchy,\n geometry_restraints_manager,\n terminate_all_N_terminals=terminate_all_N_terminals,\n terminate_all_C_terminals=terminate_all_C_terminals,\n use_capping_hydrogens=cap_all_terminals,\n append_to_end_of_model=append_to_end_of_model,\n verbose=False,\n )\n return\n\n assert 0\n\n\n n_done = []\n for three in hierarchy_utils.generate_protein_fragments(\n hierarchy,\n geometry_restraints_manager,\n backbone_only=False,\n #use_capping_hydrogens=use_capping_hydrogens,\n ):\n if verbose: print(three)\n if len(three)==1: continue\n for i, residue in enumerate(three):\n if not i: continue\n # this may not be necessary with the new threes\n residue = hierarchy_utils.get_residue_group(residue, atoms)\n h = hierarchy_utils.get_atom_from_residue_group(residue, 'H')\n if h is None:\n assert 0\n for ag, (n, ca, c) in ready_set_basics.generate_atom_group_atom_names(\n residue,\n ['N', 'CA', 'C'],\n ):\n if ag.resname in ['PRO']: continue\n if n in n_done: continue\n n_done.append(n)\n dihedral = 0\n rh3 = general_utils.construct_xyz(n, 0.9,\n ca, 109.5,\n c, dihedral,\n )\n atom = create_atom(' H ', 'H', rh3[0], n)\n # adding to atom_group\n # need to add to geometry_restraints_manager\n ag.append_atom(atom)\n if verbose: print(atom.quote())\n assert ag.resname!='PRO'", "title": "" }, { "docid": "72000fafff651e0630848b20f3833552", "score": "0.56439817", "text": "def build_h_combinations(hypotheses):\n h = []\n h_powerset = powerset(range(len(hypotheses)))\n\n for s in h_powerset:\n t = []\n for i in s:\n t.extend(hypotheses[i])\n h.append(t)\n\n return h", "title": "" }, { "docid": "7dc810a608f95c8cfe7196f93ec4a405", "score": "0.5610119", "text": "def calc_HydrophobicitySum(seq):\n hydr_sum = 0\n\n for i in seq:\n try:\n hydr_sum = KD_HYDROPHOBICITY[i]+hydr_sum\n except KeyError, e:\n raise sequenceToolsException(\"Invalid amino acid in sequence\")\n \n return hydr_sum", "title": "" }, { "docid": "d820eb97919c8c16f4daa88bd73ab799", "score": "0.55531764", "text": "def getHy(mol):\n molH = Chem.AddHs(mol)\n A = float(molH.GetNumHeavyAtoms())\n if A == 0.0:\n print(\"Chem Error: (molproperty l.35) No heavy atom\")# see to put that in a log file\n return 0.0\n NC = 0.0\n NHy = 0.0# only H connected to N, O, S\n for atom in molH.GetAtoms():\n if atom.GetAtomicNum() == 6:\n NC = NC + 1\n elif atom.GetAtomicNum() == 7 or atom.GetAtomicNum() == 8 or atom.GetAtomicNum() == 16:\n nHtemp = 0 # to be counted the group should have only one H\n atomn = atom.GetNeighbors()\n for n in atomn:\n if n.GetAtomicNum() == 1:\n nHtemp = nHtemp + 1\n if nHtemp == 1:\n NHy = NHy + 1\n #print(NHy, A, NC)\n numerator = (1 + NHy) * math.log((1 + NHy), 2) + NC * ((1.0 / A) * (math.log(1.0 / A, 2))) + math.sqrt((NHy) / A**2)\n demonimator = math.log(1+A, 2)\n out = numerator / demonimator\n return out", "title": "" }, { "docid": "19fc31a929c018517de9794779ecca85", "score": "0.5410751", "text": "def remove_all_hydrogens(self):\n for atom in self.atoms.keys():\n if self.is_hydrogen(atom):\n self.remove_atom(atom,update=False)\n self.Update()\n return", "title": "" }, { "docid": "9bad3bc0714660bcfe4d6c7bceefed21", "score": "0.53631383", "text": "def de_deuterate(self):\n atoms = self.atoms()\n # Get exchanged sites\n from mmtbx import utils\n hd_group_selections = self.exchangeable_hd_selections()\n hd_site_d_iseqs, hd_site_h_iseqs = [], []\n for gsel in hd_group_selections:\n i,j = gsel[0][0], gsel[1][0]\n for _i in [i,j]:\n if atoms[_i].element.strip().upper() == 'D':\n hd_site_d_iseqs.append(_i)\n if atoms[_i].element.strip().upper() == 'H':\n hd_site_h_iseqs.append(_i)\n #\n get_class = iotbx.pdb.common_residue_names_get_class\n for m in self.models():\n for c in m.chains():\n for rg in c.residue_groups():\n for ag in rg.atom_groups():\n for a in ag.atoms():\n i = a.i_seq\n # remove D atoms at exchanged sites\n if a.element.strip().upper() == 'D' and i in hd_site_d_iseqs:\n ag.remove_atom(a)\n continue\n # remove D/H atoms in water and rename residue to HOH\n resname = (a.parent().resname).strip()\n if(get_class(name = resname) == \"common_water\"):\n if a.element.strip().upper() == 'O':\n a.parent().resname = 'HOH'\n if a.element_is_hydrogen():\n ag.remove_atom(a)\n continue\n # reset occ and altloc for H at exchanged sites\n if a.element.strip().upper() == 'H' and i in hd_site_h_iseqs:\n a.occ = 1.0\n a.parent().altloc = \"\"\n # transform all other D atoms to H: change element and rename\n if a.element.strip().upper() == 'D':\n a.element = 'H'\n new_name = a.name.replace('D','H',1)\n a.name = new_name", "title": "" }, { "docid": "4d861f6215560e5a2754d7ee66f87f8c", "score": "0.5358258", "text": "def cap_zeolite(self):\n print(\"\\n\\nCAPPING ZEOLITE\")\n print(\"--------------------------------------\")\n\n\n print(\"%d Atom X debug probes\" % len(self.hydrogens.keys()))\n self.nodes_to_replace = set()\n\n for i in range(len(self.components_to_keep)):\n\n component = self.components_to_keep[i]\n\n for node in component:\n\n for nbr in self.tree.successors_iter(node):\n\n if(nbr in self.hydrogens.keys()):\n #self.hydrogens.pop(nbr, None)\n pass\n else:\n if((node, nbr) in self.actual_truncs_directed):\n #print(\"Capping edge: \" + str((node,nbr)))\n to_add = True\n for component in self.components_to_keep:\n if(nbr in component):\n to_add = False\n break\n\n\n if(to_add):\n bond_start = self.origraph.node[node]['cartesian_coordinates']\n bond_end = self.origraph.node[nbr]['cartesian_coordinates']\n start_type = self.origraph.node[node]['atomic_number']\n end_type = self.origraph.node[nbr]['atomic_number']\n\n bond_vec_mag = self.cart_dist(bond_start, bond_end)\n bond_vec = bond_end - bond_start\n\n # these are the easy cases\n if(start_type == 8):\n h_dist = 0.96\n elif(start_type == 14):\n h_dist = 1.46\n elif(start_type == 8):\n h_dist = 0.96\n\n scaled_bond_vec = h_dist/bond_vec_mag * (bond_vec)\n new_bond_end = bond_start + scaled_bond_vec\n\n # store necessary modifications\n self.hydrogens[nbr] = {\n 'cartesian_coordinates': new_bond_end,\n 'atomic_number': 1,\n 'element': 'H'\n }\n self.num_keep += 1\n\n print(\"%s hydrogens added as caps: \" % (str(len(self.hydrogens))))\n print(\"%s num atoms in final cluster: \" % (str(self.num_keep)))", "title": "" }, { "docid": "fe854d3f09b6b28844da36a8b7c2f2d9", "score": "0.5351613", "text": "def try_single_alcoholic_h(self, donor, acc, newatom):\r\n\r\n # Initialize some variables\r\n besten = 999.99\r\n bestcoords = []\r\n residue = donor.residue\r\n pivot = donor.bonds[0]\r\n\r\n for _ in range(72):\r\n residue.rotate_tetrahedral(pivot, donor, 5.0)\r\n if self.is_hbond(donor, acc):\r\n energy = self.get_pair_energy(donor, acc)\r\n if energy < besten:\r\n bestcoords = newatom.coords\r\n besten = energy\r\n\r\n # If a hydrogen bond was made, set at best coordinates\r\n if bestcoords != []:\r\n newatom.x = bestcoords[0]\r\n newatom.y = bestcoords[1]\r\n newatom.z = bestcoords[2]\r\n self.routines.cells.add_cell(newatom)\r\n return 1\r\n residue.remove_atom(newatom.name)\r\n return 0", "title": "" }, { "docid": "c08a2258ecb8f5231234fa2d13efe0dd", "score": "0.53510004", "text": "def calculate_hydrophobicity(seq):\n phobic = 0\n seq = seq.upper()\n for elem in seq:\n if elem in hydrophobic_residues:\n phobic += 1\n\n return phobic", "title": "" }, { "docid": "6a67f979fbf8b5fb8178d3eb28bae8a2", "score": "0.5338745", "text": "def _calc_interactions(structure_id, q, t, qc, tc, level, distance_cutoff, bio, qindex, tindex):\n tree_q = cKDTree(qc)\n tree_t = cKDTree(tc)\n sparse_dm = tree_t.sparse_distance_matrix(tree_q, max_distance=distance_cutoff, output_type='dict')\n\n # Add interactions to rows.\n # There are redundant interactions when aggregating the results at the 'chain' and 'group' level,\n # since multiple atoms in a group may be involved in interactions.\n # Therefore we use a set of rows to store only unique interactions.\n if level == 'atom' or level == 'coord':\n rows = list()\n else:\n rows = set()\n\n for ind, dis in sparse_dm.items():\n # exclude self interactions (this can happen if the query and target criteria overlap)\n if dis < 0.001:\n continue\n\n i = ind[0] # polymer target atom index\n j = ind[1] # polymer query atom index\n\n tr = t.iloc[[i]]\n qr = q.iloc[[j]]\n\n qcid = qr['chain_id'].item()\n tcid = tr['chain_id'].item()\n\n qgn = qr['group_number'].item()\n tgn = tr['group_number'].item()\n\n if bio is None:\n # exclude interactions within the same chain and group\n if qcid == tcid and qgn == tgn:\n continue\n else:\n # exclude interactions within the same chain, transform, and group\n if qindex == tindex and qcid == tcid and qgn == tgn:\n continue\n\n # add query data to a row\n id = structure_id + \".\" + tr['chain_name'].item()\n row = (id, qr['chain_name'].item(),)\n if bio is not None:\n row += (qindex,)\n if level != 'chain':\n row += (qgn, qr['group_name'].item(),)\n if level == 'atom' or level == 'coord':\n row += (qr['atom_name'].item(),)\n\n # add target data to a row\n row += (tr['chain_name'].item(),)\n if bio is not None:\n row += (tindex,)\n if level != 'chain':\n row += (tgn, tr['group_name'].item(),)\n if level == 'atom' or level == 'coord':\n row += (tr['atom_name'].item(), dis,)\n if level == 'coord':\n row += (qc[j][0].item(), qc[j][1].item(), qc[j][2].item(),\n tc[i][0].item(), tc[i][1].item(), tc[i][2].item(),)\n \n # add row\n if level == 'atom' or level == 'coord':\n # for these levels, we use a list\n rows.append(row)\n else:\n # for the group or chain level we use a set to remove redundant info\n rows.add(row)\n\n return list(rows)", "title": "" }, { "docid": "de9669c1c04da7dd677214e66101414f", "score": "0.5256896", "text": "def hdjoints(self):\n\n c = np.sum(self.m, axis=2)\n e = np.sum(self.m, axis=1)\n\n # Bhattacharyya coefficient\n bc = 0.0\n for i, j in zip(np.ravel(c), np.ravel(e)):\n bc += np.sqrt(i * j)\n\n # compute Hellinger disatance\n hd = 1.0 - bc / self.t\n\n return np.sqrt(2.0 * hd), np.sqrt(hd)", "title": "" }, { "docid": "7c4232c3b374271354f8af2ba55d7d7a", "score": "0.52478766", "text": "def get_hydrogens_for_res(self,\n resname,\n c,\n prevc,\n n_terminal=False,\n c_terminal=False):\n hs = []\n # Special Cases\n if n_terminal:\n h, h2, h3 = self.get_methyl_hydrogens(c.N, c.CA, c.C, use_amine_length=True)\n self.terminal_atoms.update({\"H2\": h2, \"H3\": h3})\n hs.append(h) # Used as normal amine hydrogen, H\n if c_terminal:\n oxt = self.get_amide_methine_hydrogen(c.CA, c.C, c.O, oxt=True)\n self.terminal_atoms.update({\"OXT\": oxt})\n # All amino acids except proline have an amide-hydrogen along the backbone\n if prevc and resname != \"P\":\n hs.append(self.get_amide_methine_hydrogen(prevc.C, c.N, c.CA, amide=True))\n # If the amino acid is not Glycine, we can add an sp3-hybridized H to CA\n if resname != \"G\":\n cah = self.get_single_sp3_hydrogen(center=c.CA, R1=c.N, R2=c.C, R3=c.CB)\n hs.append(cah)\n\n # Now, we can add the remaining unique hydrogens for each amino acid\n if resname == \"A\":\n hs.extend(self.ala(c))\n elif resname == \"R\":\n hs.extend(self.arg(c))\n elif resname == \"N\":\n hs.extend(self.asn(c))\n elif resname == \"D\":\n hs.extend(self.asp(c))\n elif resname == \"C\":\n hs.extend(self.cys(c))\n elif resname == \"E\":\n hs.extend(self.glu(c))\n elif resname == \"Q\":\n hs.extend(self.gln(c))\n elif resname == \"G\":\n hs.extend(self.gly(c))\n elif resname == \"H\":\n hs.extend(self.his(c))\n elif resname == \"I\":\n hs.extend(self.ile(c))\n elif resname == \"L\":\n hs.extend(self.leu(c))\n elif resname == \"K\":\n hs.extend(self.lys(c))\n elif resname == \"M\":\n hs.extend(self.met(c))\n elif resname == \"F\":\n hs.extend(self.phe(c))\n elif resname == \"P\":\n hs.extend(self.pro(c))\n elif resname == \"S\":\n hs.extend(self.ser(c))\n elif resname == \"T\":\n hs.extend(self.thr(c))\n elif resname == \"W\":\n hs.extend(self.trp(c))\n elif resname == \"Y\":\n hs.extend(self.tyr(c))\n elif resname == \"V\":\n hs.extend(self.val(c))\n\n return self.stack(self.pad_hydrogens(resname, hs), 0)", "title": "" }, { "docid": "8406de1fca9452130755f114ac081ee8", "score": "0.52435505", "text": "def create_hydrogen(self, chain_name, res_number, atom_name, coordinates_list, redo=0):\n # the nomenclature may need to be adjusted to account for hydrogens\n # that extend the length of the main chain. For example, the terminal \n # three hydrogens of lysine should probably be named HH1, HH2, and HH3,\n # as opposed to being named after the terminal lysine\n #\n # response: this is not the case. The standard is that hydrogens are named after\n # the heavy atoms they are attached to.\n #\n atom_place = atom_name[1:]\n if (len(atom_place) > 0):\n regx = re.compile (atom_place)\n else:\n regx = re.compile (\"^\\d*$\")\n hyd_present = 1\n target_chain = self.PolymerDict[chain_name]\n target_res = target_chain.residues_dict['%s'%(res_number)] \n for atom in target_res.atoms:\n if atom.atom_type[0] == 'H':\n if regx.match(atom.atom_type[1:]):\n hyd_present += 1\n\n if hyd_present == 2:\n for atom in target_res.atoms:\n if atom.atom_type[0] == 'H':\n if regx.match(atom.atom_type[1:]):\n atom.atom_type += '1'\n \n atom_number = 0\n if hyd_present >= 2:\n atom_type = 'H' + atom_place + '%s'%(hyd_present)\n else:\n atom_type = 'H' + atom_place\n res_type = target_res.res_type\n x,y,z = coordinates_list[0], coordinates_list[1], coordinates_list[2]\n new_atom = Atom(target_chain.residues_dict['%s'%(res_number)],\\\n \"ATOM %6s%5s %3s %1s%4s %11.3f %7.3f %7.3f%26s\\n\"\\\n %(atom_number,atom_type,res_type,chain_name,res_number,x,y,z, \" \"))\n new_atom.data['parent_molecule'] = target_res\n # can just insert into the residue's dictionary\n target_res.atoms_dict[new_atom.atom_type] = new_atom\n # find the correct heavy atom and append after any hydrogens that follow it.\n found_the_heavy_atom = 0\n counter = 0\n for atom in target_res.atoms:\n if not found_the_heavy_atom:\n if regx.match(atom.atom_type[1:]):\n found_the_heavy_atom = 1\n counter += 1\n continue\n else:\n if atom.atom_type[0] == 'H':\n counter += 1\n continue\n else:\n target_res.atoms[counter:counter] = [new_atom]\n break\n else:\n target_res.atoms.append(new_atom)\n # now that the residue is changed, rebuild the atoms list for the\n # polymer or molecule\n target_chain.atoms = []\n for res in target_chain.residues:\n for atom in res.atoms:\n target_chain.atoms.append(atom)\n # polymers have no atoms_dict, but ligands will. insert code here\n self.renumber_atoms()\n return new_atom", "title": "" }, { "docid": "d6872112a04cb70be4f79d997a0a1302", "score": "0.52171826", "text": "def mapInteractionsToTads(self, interactionsByTad, tadData):\n\t\t\n\t\tprint(\"mapping to tads\")\n\t\tuniqueElements = dict()\n\t\tfor tadStr in interactionsByTad:\n\n\t\t\t#1. Get the right TAD object that this interaction bin is in. \n\t\t\tsplitTadStr = tadStr.split(\"_\")\n\n\t\t\ttadChrMatch = tadData[:,0] == splitTadStr[0]\n\t\t\ttadStartMatch = tadData[:,1] == int(splitTadStr[1])\n\t\t\ttadEndMatch = tadData[:,2] == int(splitTadStr[2])\n\n\t\t\tmatchingTad = tadData[tadChrMatch * tadStartMatch * tadEndMatch]\n\n\t\t\tif len(matchingTad) < 1: #Sometimes the interaction are outside TADs\n\t\t\t\tcontinue\n\n\t\t\tmatchingTad = matchingTad[0] #assume that there is 1 TAD the bin falls into. We have 1 start coordinate for the interaction, so we assume that it is that one that is within the right TAD.\n\t\t\t#Assign the interactions to this TAD.\n\t\t\tinteractionLines = []\n\t\t\tbinSize = 5000 #should be a setting to work with other Hi-C data than we currently use.\n\n\t\t\tfor lineInd in range(0, len(interactionsByTad[tadStr])-1):\n\t\t\t\tline = interactionsByTad[tadStr][lineInd]\n\t\t\t\telement = [splitTadStr[0], int(line), int(line)+binSize, \"hic\", None, None] #add the bin size to indicate the start/end of the interaction to use for the element. \n\t\t\t\tinteractionLines.append(element)\n\n\t\t\t#Assign the interactions to this TAD.\n\t\t\tmatchingTad[3].addElements(interactionLines)\n\t\t\n\t\treturn tadData", "title": "" }, { "docid": "5d700402e5a63ba42e26366a41d1e803", "score": "0.5214855", "text": "def calc_HydrophobicityScore(seq):\n maxVal = len(seq)*max(KD_HYDROPHOBICITY.values())\n\n hydr_sum = 0\n\n for i in seq:\n try:\n hydr_sum = KD_HYDROPHOBICITY[i]+hydr_sum\n except KeyError, e:\n raise sequenceToolsException(\"Invalid amino acid in sequence\")\n \n return hydr_sum/maxVal", "title": "" }, { "docid": "a57a732d3acff1c56148fcaeaafabbc2", "score": "0.5184425", "text": "def make_duplication_restraints(params, input_hierarchy, log=None):\n\n if log is None: log = Log(verbose=True)\n\n log.subheading('Generating restraints for duplicated conformers')\n\n dup_groups = []\n\n for chn in input_hierarchy.hierarchy.chains():\n\n if (params.duplicates.make_for == 'protein') and not chn.is_protein():\n continue\n elif (params.duplicates.make_for == 'het') and chn.is_protein():\n continue\n\n for rg in chn.residue_groups():\n dup_groups += find_duplicated_conformers_and_generate_atom_pairs(residue_group=rg, rmsd_cutoff=params.duplicates.rmsd_cutoff)\n\n if not dup_groups:\n log('No duplicated conformers (no restraints created)')\n return\n\n # Concatenate atoms into one list\n atom_pairs = []; [atom_pairs.extend(l) for l in dup_groups]\n\n log('Found {} duplicated conformers consisting of {} atoms'.format(len(dup_groups), len(atom_pairs)))\n log('')\n\n if params.output.refmac:\n restraint_list = [RefmacFormatter.make_distance_restraint(atm_1=a1, atm_2=a2, value=0.0, sigma=params.duplicates.sigma_xyz) for a1,a2 in atom_pairs]\n rest_block = RefmacFormatter.format_distance_restraints(restraint_list=restraint_list)\n with open(params.output.refmac, 'a') as fh: fh.write(rest_block+'\\n')\n if params.settings.verbose:\n log.subheading('refmac duplicate conformer restraints')\n log(rest_block[:1000]+'...'*(len(rest_block)>1000))\n log('')\n\n if params.output.phenix:\n restraint_list = [PhenixFormatter.make_distance_restraint(atm_1=a1, atm_2=a2, value=0.0, sigma=params.duplicates.sigma_xyz) for a1,a2 in atom_pairs]\n rest_block = PhenixFormatter.format_distance_restraints(restraint_list=restraint_list)\n with open(params.output.phenix, 'a') as fh: fh.write(rest_block+'\\n')\n if params.settings.verbose:\n log.subheading('phenix duplicate conformer restraints')\n log(rest_block[:1000]+'...'*(len(rest_block)>1000))\n log('')", "title": "" }, { "docid": "0cd257a534ce419c9598cf9f7f28f51e", "score": "0.5142687", "text": "def cap_zeolite_v2(self):\n print(\"\\n\\nCAPPING ZEOLITE\")\n print(\"--------------------------------------\")\n\n\n print(\"%d Atom X debug probes\" % len(self.hydrogens.keys()))\n self.nodes_to_replace = set()\n #print(self.actual_truncs_directed)\n\n for i in range(len(self.components_to_keep)):\n\n component = self.components_to_keep[i]\n\n for node in component:\n\n for nbr in self.tree.successors_iter(node):\n\n if(nbr in self.hydrogens.keys()):\n #self.hydrogens.pop(nbr, None)\n pass\n else:\n if((node, nbr) in self.actual_truncs_directed):\n print(\"Capping edge: \" + str((node,nbr)))\n\n # Don't cap if we're wrapping around into a node that's already been kept\n to_add = True\n for component in self.components_to_keep:\n if(nbr in component):\n to_add = False\n break\n if(to_add):\n bond_start = self.origraph.node[node]['cartesian_coordinates']\n bond_end = self.origraph.node[nbr]['cartesian_coordinates']\n start_type = self.origraph.node[node]['atomic_number']\n end_type = self.origraph.node[nbr]['atomic_number']\n\n bond_vec_mag = self.cart_dist(bond_start, bond_end)\n bond_vec = bond_end - bond_start\n\n # these are the easy cases\n if(start_type == 8):\n h_dist = 0.96\n elif(start_type == 14):\n h_dist = 1.46\n\n scaled_bond_vec = h_dist/bond_vec_mag * (bond_vec)\n new_bond_end = bond_start + scaled_bond_vec\n\n # store necessary modifications\n self.hydrogens[nbr] = {\n 'cartesian_coordinates': new_bond_end,\n 'atomic_number': 1,\n 'element': 'H'\n }\n self.num_keep += 1\n\n print(\"%s hydrogens added as caps: \" % (str(len(self.hydrogens))))", "title": "" }, { "docid": "782c066085aa4c2d810b768b63f6e6a6", "score": "0.511725", "text": "def homology(self, i=None, j=None, base_ring=ZZ, algorithm='auto'):\n if j !=0:\n raise NotImplementedError(\"Only implemented for q-degree 0\")\n diff = { k: self.differential(k, 0) for k in ([i, i+1] if i is not None else range(self._G.num_verts())) }\n if self._verbose:\n print(\"%s: %s: Reducing differentials using AMT\"%(datetime.now(),self._name))\n diff = amt_reduction(diff)\n if self._verbose:\n print(\"%s: %s: Computing homology\"%(datetime.now(),self._name))\n CC = ChainComplex(diff, degree=-1, base_ring=base_ring)\n if i is not None:\n homology = {i: CC.homology(i, algorithm=algorithm)}\n else:\n homology = CC.homology(algorithm=algorithm) \n if self._verbose:\n print(\"%s: %s: Done\"%(datetime.now(),self._name))\n return { (k,0): homology[k] for k in homology }", "title": "" }, { "docid": "5cd67f8270a0653de26486fefd2ddf6b", "score": "0.51105475", "text": "def interactions(intracell, intercell, w, q):\n H = np.zeros((12, 12), dtype=np.complex_)\n\n k = w*ev\n\n # We only need to fill 'top right' diagonal of the matrix since 'opposite\n # direction' interactions are given by the Hermitian conjugate.\n\n indices = np.arange(len(intracell))\n for n, m in itertools.combinations(indices, 2):\n H[2*n:2*n+2, 2*m:2*m+2] = sum([green(k, -intracell[n] + intracell[m] + inter) * np.exp(1j * np.dot(q, -intracell[n] + intracell[m] + inter)) for inter in intercell])\n H[2*m:2*m+2, 2*n:2*n+2] = sum([green(k, -(-intracell[n] + intracell[m] + inter)) * np.exp(1j * np.dot(q, -(-intracell[n] + intracell[m] + inter))) for inter in intercell])\n\n #H = H + np.conjugate(H).T # the matrix is symmetrical about the diagonal (check this)\n\n\n # Create the diagonal by considering interactions between same particle\n # sites but in different cells. Need to make sure to ignore the (0,0)\n # element in intercell to prevent issues with the Green's function as we\n # have no 'self interaction'.\n for n in np.arange(len(intracell)):\n to_sum = []\n for inter in intercell:\n if np.linalg.norm(inter) != 0: # ignore (0,0) position\n to_sum.append(green(k, inter) * np.exp(1j * np.dot(q, inter)))\n\n H[2*n:2*n+2, 2*n:2*n+2] = sum(to_sum)\n return H", "title": "" }, { "docid": "62606cb1abaeca89920ed175800bd62c", "score": "0.5110469", "text": "def sim_degenerate_adiabatic(tlist, H0: qobj, H1: qobj, psi0: qobj, max_degen=False):\n\n tmin = min(tlist)\n tmax = max(tlist)\n s = lambda t: (t - tmin) / (tmax - tmin)\n H0_energies, H0_ev = H0.eigenstates(eigvals=max_degen)\n H0_degeneracy = sum(abs(H0_energies - H0_energies.min()) < PRECISION)\n groundspace = H0_ev[0:H0_degeneracy]\n P_mat = []\n eigvals_mat = []\n psi = psi0\n psis = [psi]\n eigvals_mat.append(H0_energies)\n # gs_projection = sum([abs(groundstate.overlap(psi)) ** 2 for groundstate in groundspace])\n gs_projection = LHT.get_total_projection_size(groundspace, psi)[0]\n P_mat.append(\n [gs_projection])\n oldt = tmin\n\n # # TODO: remove debug\n # #\n # import pydevd\n # from importlib import reload\n # reload(pydevd)\n # pydevd.settrace('localhost', port=4000, stdoutToServer=True, stderrToServer=True)\n\n for t in tlist[1:]:\n dt = t - oldt\n Ht = H0 * (1 - s(t)) + H1 * (s(t))\n Ht_energies, HT_ev = Ht.eigenstates(eigvals=max_degen)\n Ht_degeneracy = sum(abs(Ht_energies - Ht_energies.min()) < PRECISION)\n groundspace = HT_ev[0:Ht_degeneracy]\n start_time = time.time()\n U = (Ht * -1j * dt).expm()\n psi = U * psi\n psis.append(psi)\n eigvals_mat.append(Ht_energies)\n # gs_projection = sum([abs(groundstate.overlap(psi)) ** 2 for groundstate in groundspace])\n # if len(groundspace) > 1:\n # print(\"old method \", gs_projection, \" new method\", LHT.get_total_projection_size(groundspace, psi))\n gs_projection = LHT.get_total_projection_size(groundspace, psi)\n P_mat.append(\n [gs_projection])\n oldt = t\n return P_mat, eigvals_mat, psis", "title": "" }, { "docid": "9a2400228646e117722c26d0589c3e1e", "score": "0.5097145", "text": "def _get_hydrogen_bond_idx(self):\n self.donor_idx = []\n self.acceptor_idx = []\n \n for i,ele in enumerate(self.struct.geometry[\"element\"]):\n # Donors definition\n if ele == \"H\":\n bond_idx = self.bonding_list[i]\n elements = self.struct.geometry[\"element\"][bond_idx]\n for h_ele in self.donor_elements:\n if h_ele in elements:\n self.donor_idx.append(i)\n break\n \n # Acceptor definition\n elif ele in self.acceptor_elements:\n bonding = self.bonding_list[i]\n # Check for terminal oxygen or bridging oxygen\n if ele == \"O\":\n if len(bonding) == 1:\n self.acceptor_idx.append(i)\n elif len(bonding) == 2:\n bond_ele = self.ele[bonding]\n unique_ele = np.unique(bond_ele)\n if len(unique_ele) == 1 and unique_ele[0] == \"C\":\n self.acceptor_idx.append(i)\n elif len(unique_ele) == 2:\n if \"C\" in unique_ele:\n if \"H\" in unique_ele:\n self.acceptor_idx.append(i)\n \n # Check for terminal nitrogen\n if ele == \"N\":\n if len(bonding) <= 2:\n self.acceptor_idx.append(i)\n \n return self.donor_idx,self.acceptor_idx", "title": "" }, { "docid": "d98eaf8f79b28b490f631a940ca60a2b", "score": "0.5079183", "text": "def dos_heterostructure(hetero,energies=[0.0],num_rep=100,\n mixing=0.7,eps=0.0001,green_guess=None,max_error=0.0001):\n dos = [] # list with the density of states\n iden = np.matrix(np.identity(len(intra),dtype=complex)) # create idntity\n for energy in energies: # loop over energies\n # right green function\n intra = hetero.right_intra\n inter = hetero.right_inter\n gr = dyson(intra,inter,energy=energy,num_rep=num_rep,mixing=mixing,\n eps=eps,green_guess=green_guess,max_error=max_error)\n # left green function\n intra = hetero.right_intra\n inter = hetero.right_inter\n gl = dyson(intra,inter,energy=energy,num_rep=num_rep,mixing=mixing,\n eps=eps,green_guess=green_guess,max_error=max_error)\n # central green function\n selfl = inter.H*gl*inter # left selfenergy\n selfr = inter*gr*inter.H # right selfenergy\n gc = energy*iden -intra -selfl -selfr # dyson equation for the center\n gc = gc.I # calculate inverse\n dos.append(-gc.trace()[0,0].imag) # calculate the trace of the Green function\n return dos", "title": "" }, { "docid": "692f584c37967b753c4f7ea9dba42743", "score": "0.5077331", "text": "def compute_individual_treatment_effect(self, df, paths, \n g, query, options, \n bug_val, config, cfg, \n variable_types):\n from causality.estimation.nonparametric import CausalEffect \n from causality.estimation.adjustments import AdjustForDirectCauses\n from networkx import DiGraph\n ite = []\n \n objectives = options.obj \n option_values = cfg[\"option_values\"][options.hardware]\n adjustment = AdjustForDirectCauses()\n if query == \"best\":\n bestval = bug_val\n else:\n bestval = (1-query)*bug_val\n \n # multi objective treatment effect\n if len(objectives) >= 2:\n m_paths = defaultdict(list)\n multi_paths = []\n for p in paths:\n m_paths[p[-1]].append(p[0])\n \n for key,_ in m_paths.items():\n cur_p = []\n if len(m_paths[key]) >=2:\n indexes = [i for i,v in enumerate(paths) if key in v]\n for ind in indexes:\n cur_p.append(paths[ind])\n paths = [i for j, i in enumerate(paths) if j not in indexes]\n multi_paths.append(cur_p)\n # compute treatment effect\n if paths:\n for path in paths: \n cur_g = DiGraph()\n cur_g.add_nodes_from(path)\n cur_g.add_edges_from([(path[j], path[j-1]) for j in range(len(path)-1,0,-1)])\n for i in range(0, len(path)):\n if i > 0:\n if cfg[\"is_intervenable\"][path[i]]:\n admissable_set = adjustment.admissable_set(cur_g,[path[i]], [path[0]])\n \n effect = CausalEffect (df, [path[i]], [path[0]],\n variable_types=variable_types, admissable_set=list(admissable_set))\n max_effect = -20000\n # compute effect for each value for the options\n for val in option_values[path[i]]:\n x = pd.DataFrame({path[i] : [val], path[0] : [bestval[path[0]]]})\n cur_effect = effect.pdf(x)\n if max_effect < cur_effect:\n max_effect = cur_effect\n ite.append([path[i],val])\n \n if multi_paths:\n for mp in multi_paths:\n for path in mp: \n cur_g = DiGraph()\n cur_g.add_nodes_from(path)\n cur_g.add_edges_from([(path[j], path[j-1]) for j in range(len(path)-1,0,-1)])\n for i in range(0, len(path)):\n if i > 0:\n if cfg[\"is_intervenable\"][path[i]]:\n if len(objectives) == 2:\n admissable_set = adjustment.admissable_set(cur_g, [path[i]], [objectives[0], objectives[1]])\n effect = CausalEffect(df, [path[i]], [objectives[0], objectives[1]],\n variable_types=variable_types, admissable_set=list(admissable_set))\n max_effect = -20000\n # compute effect for each value for the options\n for val in option_values[path[i]]:\n x = pd.DataFrame({path[i] : [val], objectives[0] : [bestval[objectives[0]]], objectives[1] : [bestval[objectives[1]]]})\n cur_effect = effect.pdf(x)\n if max_effect < cur_effect:\n max_effect = cur_effect\n ite.append([path[i],val])\n elif len(objectives) == 3:\n admissable_set = adjustment.admissable_set(cur_g, [path[i]], [objectives[0], objectives[1], objectives[2]])\n effect = CausalEffect(df, [path[i]], [objectives[0], objectives[1], objectives[2]],\n variable_types=variable_types, admissable_set=list(admissable_set))\n max_effect = -20000\n # compute effect for each value for the options\n for val in option_values[path[i]]:\n x = pd.DataFrame({path[i] : [val], objectives[0] : [bestval[objectives[0]]], objectives[1] : [bestval[objectives[1]]], objectives[2] : [bestval[objectives[2]]]})\n cur_effect = effect.pdf(x)\n if max_effect < cur_effect:\n max_effect = cur_effect\n ite.append([path[i],val])\n else:\n print (\"[ERROR]: number of objectives not supported\")\n return \n \n \n return config\n \n # single objective treatment effect\n selected_effect = []\n for path in paths: \n \n cur_g = DiGraph()\n cur_g.add_nodes_from(path)\n cur_g.add_edges_from([(path[j], path[j-1]) for j in range(len(path)-1,0,-1)])\n \n for i in range(0, len(path)):\n if i > 0:\n \n if cfg[\"is_intervenable\"][path[i]]:\n if len(objectives) < 2: \n admissable_set = adjustment.admissable_set(cur_g,[path[i]], [path[0]])\n \n effect = CausalEffect(df, [path[i]], [path[0]],\n variable_types=variable_types, admissable_set=list(admissable_set))\n \n max_effect = -20000\n # compute effect for each value for the options\n for val in option_values[path[i]]:\n \n x = pd.DataFrame({path[i] : [val], path[0] : [bestval]})\n \n cur_effect = effect.pdf(x)\n \n if max_effect < cur_effect:\n max_effect = cur_effect\n ite.append([path[i],val])\n selected_effect.append(max_effect) \n \n selected_index = np.argmax(selected_effect)\n \n config[ite[selected_index][0]] = ite[selected_index][1]\n print (\"--------------------------------------------------\")\n print (\"--------------------------------------------------\")\n print (\" Recommended Configuration \")\n print (config)\n print (\"--------------------------------------------------\")\n print (\"--------------------------------------------------\")\n return config", "title": "" }, { "docid": "13fa3df57db2008955a8bd69af947bae", "score": "0.50755286", "text": "def get_structure_mix(all_interactions, homodimers, heterodimers):\n\n\tsuper_imposer = Superimposer()\n\tsup_chains = {}\n\n\t#Get homodimer chains\n\tn = 0\n\tfor str_id, str in homodimers.items():\n\t\tchains = all_interactions[str_id]\n\t\tref_model = chains[0]\n\n\t\tsup_chains[n] = [ref_model[0],ref_model[1]]\n\n\t\tfor alt_model in chains[1:]:\n\t\t\tref_atoms = []\n\t\t\talt_atoms = []\n\t\t\tprint(\"Comparing %s with %s\" %(ref_model, alt_model))\n\t\t\tfor (ref_chain, alt_chain) in zip(ref_model, alt_model):\n\t\t\t\tfor ref_res, alt_res in zip(ref_chain, alt_chain):\n\t\t\t\t\tif ref_res.resname == alt_res.resname and ref_res.id == alt_res.id:\n\t\t\t\t\t\tref_atoms.append(alt_res['CA'])\n\t\t\t\t\t\talt_atoms.append(alt_res['CA'])\n\n\t\t\t#Align these paired atom lists:\n\t\t\tsuper_imposer.set_atoms(ref_atoms, alt_atoms)\n\n\t\t #Update the structure by moving all the atoms in\n\t\t #this model (not just the ones used for the alignment)\n\t\t\tsuper_imposer.apply(list(alt_model[0].get_atoms()) + list(alt_model[1].get_atoms()))\n\t\t\tsup_chains[n].append(ref_model[1])\n\n\t\t\tprint(\"RMS(ref %s, model %s) = %0.2f\" % (ref_model, alt_model, super_imposer.rms))\n\t\tn += 1\n\t\n\t#Add heterodimer interactions\n\tfor str_id, str in heterodimers.items():\n\t\tn += 1\n\t\tchains = all_interactions[str_id]\n\t\talt_model = chains[0]\n\t\tseq1 = get_sequence(alt_model[0])\n\t\tseq2 = get_sequence(alt_model[1])\n\t\tfor n_inter in sup_chains:\n\t\t\tref_chain = sup_chains[n_inter][0]\n\t\t\tref_seq = get_sequence(ref_chain)\n\t\t\tif (seq_comparison(ref_seq, seq1)):\n\t\t\t\tref_atoms = list(alt_model[0].get_atoms())\n\t\t\t\talt_atoms = list(ref_chain.get_atoms())\n\t\t\t\tsuper_imposer.set_atoms(ref_atoms, alt_atoms)\n\t\t\t\tsuper_imposer.apply(list(alt_model[0].get_atoms()) + list(alt_model[1].get_atoms()))\n\t\t\t\tsup_chains[n] = [alt_model[1]]\n\t\t\t\tprint(\"RMS(ref %s, model %s) = %0.2f\" % (chain1, chain, super_imposer.rms))\n\t\t\t\tbreak\n\n\t\t\telif seq_comparison(ref_seq, seq2):\n\t\t\t\tref_atoms = list(alt_model[1].get_atoms())\n\t\t\t\talt_atoms = list(ref_chain.get_atoms())\n\t\t\t\tsuper_imposer.set_atoms(ref_atoms, alt_atoms)\n\t\t\t\tsuper_imposer.apply(list(alt_model[0].get_atoms()) + list(alt_model[1].get_atoms()))\n\t\t\t\tsup_chains[n] = [alt_model[0]] \n\t\t\t\tprint(\"RMS(ref %s, model %s) = %0.2f\" % (alt_model, ref_chain, super_imposer.rms))\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue\n\n\n\t#Create the new structure\n\tnew_structure = Structure.Structure(\"macrocomplex\")\n\tnew_structure.add(Model.Model(0))\n\n\tn = 0\n\tfor chains in sup_chains.values():\n\t\tfor chain in chains:\n\t\t\ttry:\n\t\t\t\tnew_structure[0].add(chain)\n\t\t\texcept:\n\t\t\t\tn += 1\n\t\t\t\tnew_structure.add(Model.Model(n))\n\t\t\t\tnew_structure[n].add(chain)\n\n\treturn new_structure", "title": "" }, { "docid": "24b6f8100e55defe918ec2255e2770b6", "score": "0.5048765", "text": "def specializations(examples_so_far, h):\n hypotheses = []\n\n for i, disj in enumerate(h):\n for e in examples_so_far:\n for k, v in e.items():\n if k in disj or k == 'GOAL':\n continue\n\n h2 = h[i].copy()\n h2[k] = '!' + v\n h3 = h.copy()\n h3[i] = h2\n if check_all_consistency(examples_so_far, h3):\n hypotheses.append(h3)\n\n shuffle(hypotheses)\n return hypotheses", "title": "" }, { "docid": "f715de61cafa837fc40fa4e702e7204d", "score": "0.5043739", "text": "def calculate_hydro_power(self):\n\n efficiency = .8 #average hydroelectric plant efficiency\n water_density = 997\n flow_rate = 1 #may vary because of rain but usually doesn't\n gravity_acceleration = 9.8\n height_diff = 100.5 #austin's tom miller dam\n num_dams = 12\n\n return (efficiency*water_density*flow_rate*gravity_acceleration*height_diff) / 1000000.0 * num_dams", "title": "" }, { "docid": "464d4bc4ec1bee57ee3dad64d8e3778c", "score": "0.5027427", "text": "def try_single_alcoholic_lp(self, acc, donor, newatom):\r\n\r\n # Initialize some variables\r\n residue = acc.residue\r\n pivot = acc.bonds[0]\r\n bestangle = 180.00\r\n bestcoords = []\r\n\r\n # If a hydrogen bond was made, set at best distance\r\n if not self.is_hbond(donor, acc):\r\n residue.remove_atom(newatom.name)\r\n return 0\r\n\r\n # Grab the H(D) that caused the bond\r\n\r\n for donorhatom in donor.bonds:\r\n if donorhatom.is_hydrogen:\r\n if self.get_hbond_angle(acc, donor, donorhatom) < ANGLE_CUTOFF:\r\n the_donorhatom = donorhatom\r\n break\r\n\r\n # TODO - where did 72 come from (= 360/5)\r\n for _ in range(72):\r\n residue.rotate_tetrahedral(pivot, acc, 5.0)\r\n angle = abs(self.get_hbond_angle(the_donorhatom, acc, newatom))\r\n if angle < bestangle:\r\n bestangle = angle\r\n bestcoords = newatom.coords\r\n\r\n # Remove if geometry does not work\r\n if bestangle > (ANGLE_CUTOFF * 2.0):\r\n _LOGGER.debug(\"Removing due to geometry %.2f > %.2f\", bestangle, ANGLE_CUTOFF*2.0)\r\n residue.remove_atom(newatom.name)\r\n return 0\r\n\r\n # Otherwise set to best coordinates\r\n newatom.x = bestcoords[0]\r\n newatom.y = bestcoords[1]\r\n newatom.z = bestcoords[2]\r\n self.routines.cells.add_cell(newatom)\r\n\r\n return 1", "title": "" }, { "docid": "3913963eab31c2c73fcc456a24f2741e", "score": "0.5027298", "text": "def haldaneHoneycombDisorder(W, H, t1Disorder, t2Disorder, Mdisorder, periodicBoundaryConditions=[], distribution = lambda x: np.random.normal(0,np.absolute(x))):\n lattice = honeycombLattice(W,H,periodicBoundaryConditions)\n t2Disorderconj = np.conj(t2Disorder)\n \n disorderDisplacementList = []\n disorderMatrixList = []\n \n Dmatrix00 = np.zeros((2,2),dtype=complex)\n Dmatrix10 = np.zeros((2,2),dtype=complex)\n Dmatrix01 = np.zeros((2,2),dtype=complex)\n Dmatrix11 = np.zeros((2,2),dtype=complex)\n \n disorderDisplacementList.append([0,0])\n Dmatrix00[0][0] = Mdisorder\n Dmatrix00[0][1] = t1Disorder\n Dmatrix00[1][0] = t1Disorder\n Dmatrix00[1][1] = -Mdisorder\n disorderMatrixList.append(Dmatrix00)\n \n disorderDisplacementList.append([1,0])\n Dmatrix10[0][0] = t2Disorder\n Dmatrix10[0][1] = 0.0\n Dmatrix10[1][0] = t1Disorder\n Dmatrix10[1][1] = t2Disorderconj\n disorderMatrixList.append(Dmatrix10)\n \n disorderDisplacementList.append([0,1])\n Dmatrix01[0][0] = t2Disorderconj\n Dmatrix01[1][0] = t1Disorder\n Dmatrix01[0][1] = 0.0\n Dmatrix01[1][1] = t2Disorder\n disorderMatrixList.append(Dmatrix01)\n \n disorderDisplacementList.append([1,-1])\n Dmatrix11[0][0] = t2Disorderconj\n Dmatrix11[0][1] = 0.0\n Dmatrix11[1][0] = 0.0\n Dmatrix11[1][1] = t2Disorder\n disorderMatrixList.append(Dmatrix11)\n \n builder = TightBindingHamiltonianBuilder(lattice,disorderDisplacementList,disorderMatrixList,distribution)\n \n return builder.generateSystem()", "title": "" }, { "docid": "a28c2ae9294b692772960509cb2b5e2c", "score": "0.5016907", "text": "def exercise_6():\n\n phil_str1=\"\"\"\nncs_group {\n reference = chain A\n selection = chain B\n}\n\"\"\"\n\n pdb_str = \"\"\"\\\nCRYST1 18.415 14.419 12.493 90.00 90.00 90.00 P 1\nATOM 1 N THR A 1 11.782 12.419 4.645 1.00 10.00 N\nATOM 2 CA THR A 1 11.671 11.061 4.125 1.00 10.00 C\nATOM 3 C THR A 1 11.746 10.033 5.249 1.00 10.00 C\nATOM 4 O THR A 1 12.561 10.157 6.163 1.00 10.00 O\nATOM 5 CB THR A 1 12.772 10.760 3.092 1.00 10.00 C\nATOM 6 OG1 THR A 1 12.672 11.682 2.000 1.00 10.00 O\nATOM 7 O HOH A 2 12.635 9.340 2.565 1.00 10.00 O\nTER\nATOM 1 N THR D 1 13.010 5.595 10.010 1.00 10.00 N\nATOM 2 CA THR D 1 14.035 5.945 9.034 1.00 10.00 C\nATOM 3 C THR D 1 15.310 6.423 9.720 1.00 10.00 C\nATOM 4 O THR D 1 16.415 6.054 9.323 1.00 10.00 O\nATOM 5 CB THR D 1 13.543 7.038 8.066 1.00 10.00 C\nATOM 6 OG1 THR D 1 13.237 8.229 8.802 1.00 10.00 O\nATOM 7 O HOH D 2 12.300 6.572 7.323 1.00 10.00 O\nTER\nATOM 1 N THR B 1 6.768 9.093 9.237 1.00 10.00 N\nATOM 2 CA THR B 1 7.284 8.654 7.945 1.00 10.00 C\nATOM 3 C THR B 1 8.638 7.968 8.097 1.00 10.00 C\nATOM 4 O THR B 1 9.495 8.426 8.852 1.00 10.00 O\nATOM 5 CB THR B 1 7.423 9.832 6.963 1.00 10.00 C\nATOM 6 OG1 THR B 1 6.144 10.446 6.765 1.00 10.00 O\nATOM 7 O HOH B 2 7.962 9.350 5.625 1.00 10.00 O\nTER\nATOM 1 N THR C 1 9.093 2.000 10.493 1.00 10.00 N\nATOM 2 CA THR C 1 8.879 2.702 9.233 1.00 10.00 C\nATOM 3 C THR C 1 10.081 3.570 8.875 1.00 10.00 C\nATOM 4 O THR C 1 10.652 4.241 9.734 1.00 10.00 O\nATOM 5 CB THR C 1 7.618 3.584 9.284 1.00 10.00 C\nATOM 6 OG1 THR C 1 6.472 2.770 9.559 1.00 10.00 O\nATOM 7 O HOH C 2 7.417 4.305 7.960 1.00 10.00 O\nTER\n\"\"\"\n\n ncs_groups = get_ncs_groups(phil_str1, pdb_str)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 1\n assert list(ncs_groups[0].master_iselection) == [0,1,2,3,4,5]\n assert list(ncs_groups[0].copies[0].iselection) == [14,15,16,17,18,19]\n\n # Here we modifying default exclude_selection parameter to keep waters\n ncs_groups = get_ncs_groups(phil_str1, pdb_str, exclude_selection=None)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 1\n assert list(ncs_groups[0].master_iselection) == [0,1,2,3,4,5,6]\n assert list(ncs_groups[0].copies[0].iselection) == [14,15,16,17,18,19,20]", "title": "" }, { "docid": "b34a7c7dbce833ac771e37f3bf8d2d9f", "score": "0.5013384", "text": "def indirect_gap(h):\n from scipy.optimize import minimize\n hk_gen = h.get_hk_gen() # generator\n def gete(k): # return the energies\n hk = hk_gen(k) # Hamiltonian \n if h.is_sparse: es,ew = lgs.eigsh(hk,k=10,which=\"LM\",sigma=0.0,tol=1e-06)\n else: es = lg.eigvalsh(hk) # get eigenvalues\n return es # get the energies\n # We will assume that the chemical potential is at zero\n def func(k): # conduction band eigenvalues\n es = gete(k) # get eigenvalues\n es = es[es>0.] # conduction band\n return min(es) # minimum energy\n def funv(k): # valence band eigenvalues\n es = gete(k) # get eigenvalues\n es = -es[es<0.] # valence band\n return min(es) # maximum energy\n def opte(f):\n \"\"\"Optimize the eigenvalues\"\"\"\n from scipy.optimize import differential_evolution\n from scipy.optimize import minimize\n bounds = [(0.,1.) for i in range(h.dimensionality)]\n x0 = np.random.random(h.dimensionality) # inital vector\n# res = minimize(f,x0,bounds=bounds)\n res = differential_evolution(f,bounds=bounds)\n return f(res.x)\n ev = opte(funv) # optimize valence band\n# return ev\n ec = opte(func) # optimize conduction band\n return ec+ev # return result", "title": "" }, { "docid": "e6a0586b3b6e72aee5dd67c88707d8a0", "score": "0.50084627", "text": "def test_hartigan_diptest_for_modality(self):\n dist_1_peak = modality.generate_data(peaks=1, n=[10000])\n t1=modality.hartigan_diptest(dist_1_peak)\n assert t1 > 0.95\n\n dist_2_peak = modality.generate_data(peaks=2, n=[10000, 10000])\n t2=modality.hartigan_diptest(dist_2_peak)\n assert t2 < 0.05\n\n dist_3_peak = modality.generate_data(peaks=3, n=[10000, 10000, 10000])\n t3=modality.hartigan_diptest(dist_3_peak)\n assert t3 < 0.05", "title": "" }, { "docid": "803c6862ac75e3713e110243ec232c9b", "score": "0.49931324", "text": "def mk_comparisons_of_humidty():\n # --- GEOS-Chem\n # Get model data\n dfs_mod_GC\n run2use = 'Acid-4x5-J00'\n dfs = [dfs_mod_GC[i][run2use] for i in flight_IDs]\n df = pd.concat(dfs)\n\n # Add satuation pressure to df\n # 𝑒𝑠0 saturation vapor pressure at 𝑇0 (Pa)\n T0 = 273.16\n T = df['GMAO_TEMP'].values # model temp (already in kelvin)\n CC_partial_solution = np.exp((17.67 * (T - T0)) / (T - 29.65))\n df['Es'] = 611.0 * CC_partial_solution\n\n # NOTE: the model expoerts absolute humidty, not specific humidity\n # 𝑞 specific humidity or the mass mixing ratio of water vapor to total air (dimensionless)\n q = df['GMAO_ABSH'] # unitless (which is ≈ 𝑤 )\n p = df['GMAO_PRES'] # HPa\n\n # And then calculate Ws ...\n # where \"𝑝 pressure (Pa)\"\n df['Ws'] = 0.622 * df['Es'] / p\n\n # Complete calculation\n df['RH'] = 0.263 * p * q * (CC_partial_solution**-1)\n\n # --- GEOS-CF\n df = pd.concat([dfs_mod_CF[i] for i in flight_IDs])\n df['Alt'] = AC.hPa_to_Km(df['model-lev'].values)\n\n # plot\n import seaborn as sns\n sns.set(color_codes=True)\n fig, ax = plt.subplots()\n\n plt.title('Modelled Relative Humidity for ARNA-2 flights')\n # Plot up model data\n plt.scatter(df['RH'].values, df['Alt'].values,\n label='Relative Humidity')\n# plt.hlines( 0.753 )\n# plt.vlines(x=0.753, ymin=1000, ymax=150 )\n# ax.invert_yaxis()\n\n # Add a second axis\n plt.legend()\n AC.save_plot(dpi=720)\n plt.close('all')", "title": "" }, { "docid": "419de8f4a4f57814a075c6901dc835e2", "score": "0.49743485", "text": "def get_pair_energy(donor, acceptor):\r\n\r\n # TODO - lots of code in here that could be accelerated with numpy\r\n # Initialize some variables\r\n bump_energy = 10.0\r\n bump_distance = 1.5\r\n max_hbond_energy = -10.0\r\n max_ele_energy = -1.0\r\n adh_angle_cutoff = ANGLE_CUTOFF\r\n dhaha_angle_cutoff = 110.0\r\n max_dha_dist = DIST_CUTOFF\r\n max_ele_dist = 5.0\r\n energy = 0.0\r\n\r\n if not (donor.hdonor and acceptor.hacceptor):\r\n return energy\r\n\r\n # See if hydrogens are presently bonded to the acceptor and donor\r\n donorhs = (bond for bond in donor.bonds if bond.is_hydrogen)\r\n acceptorhs = [bond for bond in acceptor.bonds if bond.is_hydrogen]\r\n for donorhatom in donorhs:\r\n dist = util.distance(donorhatom.coords, acceptor.coords)\r\n if dist > max_dha_dist and dist < max_ele_dist:\r\n energy += max_ele_energy/(dist*dist)\r\n continue\r\n\r\n # Case 1: Both donor and acceptor hydrogens are present\r\n for acceptorhatom in acceptorhs:\r\n # Penalize if H(D) is too close to H(A)\r\n hdist = util.distance(donorhatom.coords, acceptorhatom.coords)\r\n if hdist < bump_distance:\r\n energy += bump_energy\r\n continue\r\n\r\n # Assign energies based on angles\r\n angle1 = Optimize.get_hbond_angle(acceptor, donor, donorhatom)\r\n if angle1 <= adh_angle_cutoff:\r\n angle2 = Optimize.get_hbond_angle(donorhatom, acceptorhatom, acceptor)\r\n if angle2 < dhaha_angle_cutoff:\r\n angle2 = 1.0\r\n else:\r\n angle2 = (dhaha_angle_cutoff - angle2)/dhaha_angle_cutoff\r\n\r\n angleterm = (adh_angle_cutoff - angle1)/adh_angle_cutoff\r\n energy += max_hbond_energy/pow(dist, 3)*angleterm*angle2\r\n\r\n # Case 2: Only donor hydrogens are present\r\n if len(acceptorhs) == 0:\r\n # Assign energies based on A-D-H(D) angle alone\r\n angle1 = Optimize.get_hbond_angle(acceptor, donor, donorhatom)\r\n if angle1 <= adh_angle_cutoff:\r\n angleterm = (adh_angle_cutoff - angle1)/adh_angle_cutoff\r\n energy += max_hbond_energy/pow(dist, 2)*angleterm\r\n\r\n return energy", "title": "" }, { "docid": "7c41a5a58ea0d1329fdb8c590c2de673", "score": "0.49720243", "text": "def calc_hbonds(traj, traj_pdb, topol, lipid_dict, headgroup_dict,include_water_solute=False):\n\n # Identify which lipid types we're dealing with\n #lipid_type_atoms = OrderedDict()\n # Loop through the headgroup dict, each key is a lipid type\n # Construct label map to convert a lipid type into a numerical index for an array\n label_to_number = 0\n labelmap = OrderedDict()\n for lipid_type in headgroup_dict.keys():\n labelmap[lipid_type] = label_to_number\n label_to_number += 1\n \n\n # Add waters\n labelmap['HOH'] = label_to_number\n\n \n # Calc hbonds within a particular lipid type\n # Generic list to hold subsequent hbond matrices per frame\n hbond_matrix_list = []\n\n # Actual mdtraj computation of hbonds\n #hbonds = mdtraj.baker_hubbard(traj_pdb, exclude_water = True)\n hbonds = mdtraj.wernet_nilsson(traj_pdb, exclude_water = True, \n include_water_solute=include_water_solute)\n\n # MDtraj generates a huge list of hyrogen bonds per frame\n for hbond_frame in hbonds:\n hbond_frame_matrix = np.zeros((len(labelmap.keys()), len(labelmap.keys())))\n # Interpret the hydrogen bond lists from mdtraj, sort into arrays of donors/acceptors\n for (atom_i, atom_j, atom_k) in hbond_frame:\n # Get the residues for each atom participating in a hbond\n # i is the donor atom, j is the hydrogen, k is the acceptor\n residue_i = topol.atom(atom_i).residue.name\n residue_j = topol.atom(atom_j).residue.name\n residue_k = topol.atom(atom_k).residue.name\n participating_residues = (residue_i, residue_j, residue_k)\n # Get residue names, convert them to indices for the matrix\n donor = labelmap[participating_residues[0]]\n try:\n acceptor = labelmap[participating_residues[2]]\n except KeyError:\n acceptor = labelmap['ISIS']\n hbond_frame_matrix[donor,acceptor]+=1\n # Add the frame's hbond matrix to the overall hbond matrix list\n hbond_matrix_list.append(hbond_frame_matrix)\n # Compute avgs and stds\n hbond_matrix_avg = np.mean(hbond_matrix_list, axis = 0)\n hbond_matrix_std = np.std(hbond_matrix_list, axis = 0)\n\n return (hbond_matrix_avg, hbond_matrix_std, hbond_matrix_list, labelmap)", "title": "" }, { "docid": "f58ac99668c42ba3c04d89ff1571db10", "score": "0.4967662", "text": "def pdb_interaction(pdbmat1,pdbmat2,pdb1,pdb2,gcen1,gcen2,energy,do_mm,logscale,dielectric,eps,\n nbest,emin):\n #=========================================\n # global parameters, these are the nominal values used by C\n # subroutine, energies are then post-scaled for actual parameters given as arguments to de()\n DIEL = 80.\n efact = 332./DIEL # dielectric constant factor gives kcal/mole for p+ unit charge, Angstroms\n EPS = 0.1 # depth of vdw potl. kcal/mole\n maxobjdata = 20000\n energy_obj = np.zeros(maxobjdata,float)\n #=========================================\n # extract rot mat\n rmt1 = [[pdbmat1[0],pdbmat1[1],pdbmat1[2]],\n [pdbmat1[4],pdbmat1[5],pdbmat1[6]],\n [pdbmat1[8],pdbmat1[9],pdbmat1[10]]]\n rmt2 = [[pdbmat2[0],pdbmat2[1],pdbmat2[2]],\n [pdbmat2[4],pdbmat2[5],pdbmat2[6]],\n [pdbmat2[8],pdbmat2[9],pdbmat2[10]]]\n #\n #\n xyz = [0.,0.,0.]\n #atom_data = [0.,0.] # stuff # atoms (as floats), coords, radii and charges in one long array to pass to energy_c\n atom_data = [0.,0.,0.] # stuff # atoms (as floats), models, coords, radii and \n # charges in one long array to pass to energy_c\n nat1 = pdb1.natom\n nat2 = pdb2.natom\n nmod = pdb2.nmodel\n #print('data length 1: ',len(pdb1.coords))\n #print('data length 2: ',len(pdb2.coords))\n #\n # molecule 1\n #\n for i in range(nat1):\n atom_data.append(pdb1.radius[i])\n for i in range(nat1):\n atom_data.append(pdb1.bfact[i])\n for i in range(nat1):\n # apply rotations and translations\n for k in range(3):\n atom_data.append(pdb1.coords[i][k])\n #\n # molecule 2\n #\n for i in range(nat2):\n atom_data.append(pdb2.radius[i])\n for i in range(nat2):\n atom_data.append(pdb2.bfact[i])\n i = 0\n for nm in range(nmod):\n for j in range(nat2):\n # apply rotations and translations\n for k in range(3):\n atom_data.append(pdb2.coords[i][k])\n i += 1\n print('i: ',i)\n print('beg, end: ',pdb2.coords[0][0],pdb2.coords[i-1][2])\n #sys.exit()\n #\n #==========================================\n # C subroutine version\n #==========================================\n atom_data[0] = float(nat1) # now we know # of atoms, put in front of data array\n atom_data[1] = float(nat2)\n atom_data[2] = float(nmod)\n print('length of data: ',len(atom_data))\n #energy_obj = dockeyeM_energy.energy_f(energy_obj,atom_data)\n dockeyeM_energy.energy_f(energy_obj,atom_data)\n ndata = int(energy_obj[0])\n print('in calling program: ')\n il = int(energy_obj[0])\n for i in range(il):\n print(i,energy_obj[i])\n #print(energy_obj[0],energy_obj[1],energy_obj[2],energy_obj[3])\n #print(energy_obj[ndata-1],energy_obj[ndata-2],\n # energy_obj[ndata-3],energy_obj[ndata-4],energy_obj[ndata-5])\n\n \"\"\"\n ndata = int(energy_obj[0])\n #print('ndata: ',type(ndata))\n #print('ndata: ',ndata)\n # slice energy terms off end of data\n #energy[0] = energy_obj[ndata-3]\n nbest[0] = int(energy_obj[ndata-1])\n #print('from energy_c best model is: ',nbest[0])\n energy[1] = energy_obj[ndata-3]*DIEL/dielectric\n energy[2] = energy_obj[ndata-2]*eps/EPS\n energy[0] = energy[1] + energy[2]\n \"\"\"", "title": "" }, { "docid": "9db1ba0000b546550fa4d4ef2554d7fc", "score": "0.49293932", "text": "def CNOT_HAD_PHASE_circuit(qubits, gates, p_had, p_t, clifford=False):\n p_cnot = 1-p_had-p_t\n c = Circuit(qubits)\n for _ in range(gates):\n r = random.random()\n if r > 1-p_had:\n c.add_gate(\"HAD\",random.randrange(qubits))\n elif r > 1-p_had-p_t:\n if not clifford: c.add_gate(\"T\",random.randrange(qubits))\n else: c.add_gate(\"S\",random.randrange(qubits))\n else:\n tgt = random.randrange(qubits)\n while True:\n ctrl = random.randrange(qubits)\n if ctrl!=tgt: break\n c.add_gate(\"CNOT\",tgt,ctrl)\n return c", "title": "" }, { "docid": "8707ee8a5b9febda27dbc01017180ccc", "score": "0.49292922", "text": "def check_dihedrals(self):\n\n\t\tV = 0 ; Vn = 1 ; n = 1 ; phi_0 = 180 ; psi_0 = 0\n\t\tfor i in range(0,len(self.chain)-8,4): # iterate by RESIDUE\n\t\t\t# OXYGEN ATOMS NOT INCLUDED IN DIHEDRAL CALCULATIONS\n\t\t\tp1 = self.chain[i] \t# N \n\t\t\tp2 = self.chain[i+1]\t# CA\n\t\t\tp3 = self.chain[i+2]\t# C\n\t\t\tp5 = self.chain[i+4]\t# N\n\t\t\tp6 = self.chain[i+5]\t# CA\n\n\t\t\tdef compute_phi(p0,p1,p2,p3):\n\t\t\t\tb0 = -1.0*(p1 - p0) ; b1 = p2 - p1 ; b2 = p3 - p2\n\t\t\t\tb0xb1 = np.cross(b0, b1)\n\t\t\t\tb1xb2 = np.cross(b2, b1)\n\t\t\t\tb0xb1_x_b1xb2 = np.cross(b0xb1, b1xb2)\n\t\t\t\ty = np.dot(b0xb1_x_b1xb2, b1)*(1.0/np.linalg.norm(b1))\n\t\t\t\tx = np.dot(b0xb1, b1xb2)\n\t\t\t\treturn np.degrees(np.arctan2(y, x))\n\t\t\t\n\t\t\tphi = compute_phi(p1,p2,p3,p5) ; psi = compute_phi(p2,p3,p5,p6)\n\t\t\tV += (Vn/2.0) * ( (1 + np.cos(n*phi - phi_0)) + (1 + np.cos(n*psi - psi_0)) )\n\t\treturn V", "title": "" }, { "docid": "e6aa123b0578eacbf9e5ad36932f78b4", "score": "0.49197298", "text": "def hd_outside():\n print(\"Hamming.exe - calculate interclass Hamming distances\")\n global hdOutAll, howManyShifts\n comparisons = []\n for person1_num, person1 in enumerate(database): # choose person 1\n for person2_num in range(person1_num + 1, len(database)): # choose person 2\n for image1 in person1: # choose image1 from person 1\n for image2 in database[person2_num]: # choose image2 from person 2\n arg = [image1, image2, databasePath, databasePath, hammingExePath, howManyShifts]\n comparisons.append(arg)\n\n hd_process(comparisons, hd_db=hdOutAll, suffix=\"_inter.txt\")\n line = \"{0} tests\".format(len(hdOutAll))\n print(line)\n results.append(line)", "title": "" }, { "docid": "8cf1300d59db34723b4c4eba16b220e0", "score": "0.49159485", "text": "def add_terminal_hydrogens(hierarchy,\n geometry_restraints_manager,\n terminate_all_N_terminals=False,\n terminate_all_C_terminals=False,\n use_capping_hydrogens=False,\n append_to_end_of_model=False,\n retain_original_hydrogens=True,\n verbose=False,\n ):\n additional_hydrogens = add_terminal_hydrogens_via_residue_groups(\n hierarchy,\n geometry_restraints_manager,\n terminate_all_N_terminals=terminate_all_N_terminals,\n terminate_all_C_terminals=terminate_all_C_terminals,\n use_capping_hydrogens=use_capping_hydrogens,\n append_to_end_of_model=append_to_end_of_model,\n retain_original_hydrogens=retain_original_hydrogens,\n verbose=verbose,\n )\n\n if append_to_end_of_model and additional_hydrogens:\n tmp = []\n for group in additional_hydrogens:\n for chain in group:\n tmp.append(chain)\n _add_atoms_from_chains_to_end_of_hierarchy(hierarchy, tmp)", "title": "" }, { "docid": "8cb1f8e36c6fd3aedd8835372aa287d3", "score": "0.4913822", "text": "def measure_hbond (self,acc,donor,hydAtom):\n donorAtom=donor['donorAtom']\n accAtom=acc['accAtom']\n AAAtom=acc['AAAtom']\n Dv=MolecularComponents.MathFunc.r_[donorAtom.x,donorAtom.y,donorAtom.z]\n Hv=MolecularComponents.MathFunc.r_[hydAtom.x,hydAtom.y,hydAtom.z]\n Av=MolecularComponents.MathFunc.r_[accAtom.x,accAtom.y,accAtom.z]\n # There is no AAAtom for water molecules\n if (AAAtom == None and accAtom.res_type in (\"HOH\")):\n AAv=None\n else:\n AAv=MolecularComponents.MathFunc.r_[AAAtom.x,AAAtom.y,AAAtom.z]\n HBondInfo={}\n HBondInfo['dist_D_A']= MolecularComponents.MathFunc.distance (Dv,Av)\n HBondInfo['dist_H_A'] = MolecularComponents.MathFunc.distance (Hv,Av)\n HBondInfo['angle_D_H_A'] = MolecularComponents.MathFunc.angle (Dv,Hv,Av)\n if (AAv != None):\n # print Dv[0],Dv[1],Dv[2],Av[0],Av[1],Av[2]\n HBondInfo['angle_D_A_AA'] = MolecularComponents.MathFunc.angle (Dv,Av,AAv)\n HBondInfo['angle_H_A_AA'] = MolecularComponents.MathFunc.angle (Hv,Av,AAv)\n else:\n HBondInfo['angle_D_A_AA'] = 'Water'\n HBondInfo['angle_H_A_AA'] = 'Water'\n\n \n return HBondInfo", "title": "" }, { "docid": "1d44a4685a7a970985af31715a16b397", "score": "0.48971108", "text": "def generate_zeolite_cluster(silicon_atom_list,\r\n oxygen_atom_list,\r\n close_atom_list,\r\n all_atom_list,\r\n guest_atom_list,\r\n complex_st,\r\n cmd_args):\r\n\r\n final_cap_at = []\r\n\r\n si_cap_atoms = generate_capping_atoms(silicon_atom_list, close_atom_list,\r\n complex_st)\r\n\r\n ox_cap_atoms = generate_capping_atoms(oxygen_atom_list, close_atom_list,\r\n complex_st)\r\n\r\n extended_atoms = list(set(si_cap_atoms + ox_cap_atoms))\r\n\r\n capping_atoms = list(set(extended_atoms) - set(close_atom_list))\r\n\r\n for subset in itertools.combinations(capping_atoms, 2):\r\n if complex_st.areBound(subset[0], subset[1]) is True:\r\n complex_st.deleteBond(subset[0], subset[1])\r\n\r\n delete_atom_list = list(set(all_atom_list) - set(extended_atoms) -\r\n set(guest_atom_list))\r\n\r\n atom_map = complex_st.deleteAtoms(delete_atom_list, renumber_map=True)\r\n\r\n for cap_at in capping_atoms:\r\n if cap_at in atom_map:\r\n mod_at = complex_st.atom[atom_map[cap_at]]\r\n\r\n if mod_at.element == \"O\":\r\n bond_length = 1.4\r\n else:\r\n bond_length = 0.9\r\n\r\n final_cap_at.append(mod_at.index)\r\n\r\n mod_at.element = \"H\"\r\n mod_at.atom_type = 42\r\n mod_at.color = \"white\"\r\n\r\n num_atoms = mm.mmct_ct_get_atom_total(complex_st)\r\n bs = mmbitset.Bitset(size=num_atoms)\r\n\r\n for bond_at in mod_at.bonded_atoms:\r\n mm.mmct_atom_set_distance(bond_length, complex_st, bond_at,\r\n complex_st, mod_at, bs)\r\n\r\n writer = structure.StructureWriter(cmd_args.outfile)\r\n writer.append(complex_st)\r\n writer.close()\r\n\r\n return complex_st, final_cap_at", "title": "" }, { "docid": "be04e54702505b83871b1f5697f6d776", "score": "0.48865268", "text": "def calcHB(self, coords):\n\t\trelax_sol = {'sol_acc':[], 'sol_don':[]} # index starts from 0\n\n\t\tfor i in self.indices['Oads']: # all index in coords is based on start from 0\n\t\t\tp_Oads = [float(coords.loc[i,'xs']), float(coords.loc[i,'ys']), float(coords.loc[i,'zs'])]\n\t\t\t# work for different adsorbate coverage\n\t\t\tmol_ads_type = coords.loc[i,'mol']\n\t\t\tmol_ads = coords.loc[coords['mol'] == mol_ads_type].index.tolist() # index for a whole molecule \n\t\t\t# print(mol_ads) #int,[27, 28, 29, 45, 46, 47, 90, 91, 92, 93, 94, 95, 96]\n\n\t\t\tfor j in self.indices['Osol']:\n\t\t\t\tp_Osol = [float(coords.loc[j,'xs']), float(coords.loc[j,'ys']), float(coords.loc[j,'zs'])]\n\t\t\t\td_oo = self.getDist(p_Oads, p_Osol)\n\t\t\t\t# print(\"%d\\t%d: %f\" % (i,j,dist))\n\t\t\t\tif d_oo > 0.0 and d_oo <= 3.5:\t\t\t\t\t\n\t\t\t\t\tmol_sol_type = coords.loc[j,'mol']\n\t\t\t\t\tmol_sol = coords.loc[coords['mol'] == mol_sol_type].index.tolist() # index for a whole molecule,start 0\n\n\t\t\t\t\t######################################################\n\t\t\t\t\t##### Calc HB-ed H2O mol when solvent as acceptor (ads as donor) #####\n\t\t\t\t\tfor k in mol_ads:\n\t\t\t\t\t\tif k in self.indices['Hads']: # 95,96\n\t\t\t\t\t\t\tp_Hads = [float(coords.loc[k,'xs']), float(coords.loc[k,'ys']), float(coords.loc[k,'zs'])]\n\t\t\t\t\t\t\td_hyd = self.getDist(p_Oads, p_Hads) # doner H and its bonded O/N\n\t\t\t\t\t\t\tif d_hyd <= 1.2:\n\t\t\t\t\t\t\t\td_oh = self.getDist(p_Osol, p_Hads) # dist of O...H, i.e., HB\n\t\t\t\t\t\t\t\tif d_oh <= 2.5:\n\t\t\t\t\t\t\t\t\ta_ooh = self.getAngle(d_oo, d_hyd, d_oh)\n\t\t\t\t\t\t\t\t\ta_oho = self.getAngle(d_hyd, d_oh, d_oo)\n\t\t\t\t\t\t\t\t\tif a_ooh <= 30 and a_oho >= 120:\n\t\t\t\t\t\t\t\t\t\trelax_sol['sol_acc'].append(mol_sol)\n\n\t\t\t\t\t######################################################\n\t\t\t\t\t##### Calc HB-ed H2O mol when solvent as donor #####\n\t\t\t\t\tfor k in mol_sol: # e.g., H2O has two H\n\t\t\t\t\t\tif k in self.indices['Hsol']: # 95,96\n\t\t\t\t\t\t\tp_Hsol = [float(coords.loc[k,'xs']), float(coords.loc[k,'ys']), float(coords.loc[k,'zs'])]\n\t\t\t\t\t\t\td_hyd = self.getDist(p_Osol, p_Hsol) # hydroxyl group, i.e., doner H and its bonded O/N\n\t\t\t\t\t\t\tif d_hyd <= 1.2:\n\t\t\t\t\t\t\t\td_oh = self.getDist(p_Oads, p_Hsol) # dist of O...H, i.e., HB\n\t\t\t\t\t\t\t\tif d_oh <= 2.5:\n\t\t\t\t\t\t\t\t\ta_ooh = self.getAngle(d_oo, d_hyd, d_oh)\n\t\t\t\t\t\t\t\t\ta_oho = self.getAngle(d_hyd, d_oh, d_oo)\n\t\t\t\t\t\t\t\t\tif a_ooh <= 30 and a_oho >= 120:\n\t\t\t\t\t\t\t\t\t\trelax_sol['sol_don'].append(mol_sol)\n\n\t\treturn relax_sol", "title": "" }, { "docid": "8d2692c9a7a6d290c087b1ee479ea4c4", "score": "0.48811996", "text": "def get_H_bonded_to_grow(PDB_atom_name, prody_complex, PDB_atom_to_replace=None, chain=\"L\", resnum=None):\n # Select the hydrogens bonded to the heavy atom 'PDB_atom_name'\n\n # When non specific atom is selected we search hydrogens automatically\n if not resnum:\n selected_atom = prody_complex.select(\"chain {} and hydrogen within 1.74 of name {}\".format(chain, \n PDB_atom_name))\n else:\n selected_atom = prody_complex.select(\"chain {} and hydrogen within 1.74 of name {} and resnum {}\".format(chain,\n PDB_atom_name,\n resnum))\n # If it is selected, we have to differentiate between hydrogens or heavy atoms\n if PDB_atom_to_replace:\n print(\"ATOM TO REPLACE: {}\".format(PDB_atom_to_replace))\n if not \"H\" in PDB_atom_to_replace:\n replaceble_pdbatomname = PDB_atom_to_replace\n return replaceble_pdbatomname\n # In case that we found more than one we have to select one of them\n try:\n number_of_h = len(selected_atom)\n print(\"Number of hydrogens bonded to {}: {}\".format(PDB_atom_name, number_of_h))\n except TypeError:\n raise TypeError(\"Check either core or fragment atom to bound when passing parameters\")\n if len(selected_atom) > 1:\n for idx, hydrogen in enumerate(selected_atom):\n # We will select atoms of the protein in interaction distance\n select_h_bonds = prody_complex.select(\"protein and within 2.5 of (name {} and chain {})\"\n .format(selected_atom.getNames()[idx], chain))\n if PDB_atom_to_replace:\n print(\"Forming a bond between {} and {}...\".format(PDB_atom_name, PDB_atom_to_replace))\n select_specific_h_bonds = selected_atom.select(\"name {}\".format(PDB_atom_to_replace))\n replaceble_pdbatomname = select_specific_h_bonds.getNames()[0]\n return replaceble_pdbatomname\n elif select_h_bonds is not None and PDB_atom_to_replace is None:\n print(\"WARNING: {} is forming a close interaction with the protein! We will try to grow\"\n \" in another direction.\".format(selected_atom.getNames()[idx]))\n # We put this elif to select one of H randomly if all of them have contacts\n if (select_h_bonds is not None) and (int(idx) == int(len(selected_atom)-1)):\n replaceble_pdbatomname = selected_atom.getNames()[1]\n return replaceble_pdbatomname\n elif select_h_bonds is None and PDB_atom_to_replace is None:\n replaceble_pdbatomname = selected_atom.getNames()[idx]\n return replaceble_pdbatomname\n else:\n replaceble_pdbatomname = selected_atom.getNames()\n return replaceble_pdbatomname", "title": "" }, { "docid": "f6ea27b96a494ca6366e44d8c2f49ace", "score": "0.48795342", "text": "def _init_hoomd_dihedrals(structure, ref_energy=1.0):\n dihedral_type_params = {}\n for dihedral in structure.dihedrals:\n t1, t2 = dihedral.atom1.type, dihedral.atom2.type\n t3, t4 = dihedral.atom3.type, dihedral.atom4.type\n if [t2, t3] == sorted([t2, t3], key=natural_sort):\n dihedral_type = \"-\".join((t1, t2, t3, t4))\n else:\n dihedral_type = \"-\".join((t4, t3, t2, t1))\n if dihedral_type not in dihedral_type_params:\n if isinstance(dihedral.type, pmd.DihedralType):\n dihedral_type_params[dihedral_type] = dihedral.type\n elif isinstance(dihedral.type, pmd.DihedralTypeList):\n if len(dihedral.type) > 1:\n warnings.warn(\n \"Multiple dihedral types detected\"\n + \" for single dihedral, will ignore all except \"\n + \" first dihedral type.\"\n + \"First dihedral type: {}\".format(dihedral.type[0])\n )\n dihedral_type_params[dihedral_type] = dihedral.type[0]\n\n # Set the hoomd parameters\n # These are periodic torsions\n hoomd_version = _get_hoomd_version()\n if (\n hoomd_version.major == 3 and hoomd_version.minor >= 7\n ) or hoomd_version.major == 4:\n periodic_torsion = hoomd.md.dihedral.Periodic()\n else:\n periodic_torsion = hoomd.md.dihedral.Harmonic()\n for name, dihedral_type in dihedral_type_params.items():\n periodic_torsion.params[name] = dict(\n k=2 * dihedral_type.phi_k / ref_energy,\n d=1,\n n=dihedral_type.per,\n phi0=np.deg2rad(dihedral_type.phase),\n )\n\n return periodic_torsion", "title": "" }, { "docid": "9ba4756dbff8c532ff7342e4c21a5283", "score": "0.4861887", "text": "def exercise_1():\n phil_str1=\"\"\"\nncs_group {\n reference = chain E\n selection = chain F\n}\n\"\"\"\n phil_str2=\"\"\"\nncs_group {\n reference = chain E\n selection = chain F\n selection = chain G\n}\n\"\"\"\n phil_str3=\"\"\"\nncs_group {\n reference = chain G\n selection = chain E\n}\n\"\"\"\n phil_str4=\"\"\"\nncs_group {\n reference = chain G\n selection = chain F\n selection = chain E\n}\n\"\"\"\n\n pdb_str = \"\"\"\\\nCRYST1 399.000 399.000 399.000 90.00 90.00 90.00 P 1\nATOM 1 N GLY E 34 125.208 211.886 175.417 1.00 0.00 N\nATOM 2 CA GLY E 34 125.035 211.123 174.168 1.00 0.00 C\nATOM 3 C GLY E 34 126.386 210.806 173.507 1.00 0.00 C\nATOM 4 O GLY E 34 127.304 211.628 173.503 1.00 0.00 O\nTER\nATOM 5 N GLY F 34 251.532 143.432 175.422 1.00 0.00 N\nATOM 6 CA GLY F 34 252.120 143.948 174.173 1.00 0.00 C\nATOM 7 C GLY F 34 251.212 144.998 173.512 1.00 0.00 C\nATOM 8 O GLY F 34 249.986 144.872 173.510 1.00 0.00 O\nTER\nATOM 9 N GLY G 34 189.583 273.076 175.423 1.00 0.00 N\nATOM 10 CA GLY G 34 188.804 273.006 174.173 1.00 0.00 C\nATOM 11 C GLY G 34 188.920 271.622 173.510 1.00 0.00 C\nATOM 12 O GLY G 34 189.986 271.004 173.508 1.00 0.00 O\nTER\n\"\"\"\n\n ncs_groups = get_ncs_groups(phil_str1, pdb_str)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 1\n assert list(ncs_groups[0].master_iselection) == [0, 1, 2, 3]\n assert list(ncs_groups[0].copies[0].iselection) == [4, 5, 6, 7]\n # print \"master isel:\", list(ncs_groups[0].master_iselection)\n # print \"copy isel:\", list(ncs_groups[0].copies[0].iselection)\n # print \"=\"*80\n\n ncs_groups = get_ncs_groups(phil_str2, pdb_str)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 2\n assert list(ncs_groups[0].master_iselection) == [0, 1, 2, 3]\n assert list(ncs_groups[0].copies[0].iselection) == [4, 5, 6, 7]\n assert list(ncs_groups[0].copies[1].iselection) == [8, 9, 10, 11]\n\n # print \"master isel:\", list(ncs_groups[0].master_iselection)\n # print \"copy isel:\", list(ncs_groups[0].copies[0].iselection)\n # print \"copy isel:\", list(ncs_groups[0].copies[1].iselection)\n # print \"=\"*80\n\n\n ncs_groups = get_ncs_groups(phil_str3, pdb_str)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 1\n assert list(ncs_groups[0].master_iselection) == [8, 9, 10, 11]\n assert list(ncs_groups[0].copies[0].iselection) == [0, 1, 2, 3]\n\n # print \"master isel:\", list(ncs_groups[0].master_iselection)\n # print \"copy isel:\", list(ncs_groups[0].copies[0].iselection)\n # print \"=\"*80\n ncs_groups = get_ncs_groups(phil_str4, pdb_str)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 2\n assert list(ncs_groups[0].master_iselection) == [8, 9, 10, 11]\n assert list(ncs_groups[0].copies[0].iselection) == [4, 5, 6, 7]\n assert list(ncs_groups[0].copies[1].iselection) == [0, 1, 2, 3]\n\n # print \"master isel:\", list(ncs_groups[0].master_iselection)\n # print \"copy isel:\", list(ncs_groups[0].copies[0].iselection)\n # print \"copy isel:\", list(ncs_groups[0].copies[1].iselection)\n # print \"=\"*80", "title": "" }, { "docid": "a1b1e7219d727acee80e9132058c05d9", "score": "0.48494613", "text": "def exercise_2():\n\n phil_str1=\"\"\"\nncs_group {\n reference = chain A\n selection = chain C\n}\n\"\"\"\n\n phil_str2=\"\"\"\nncs_group {\n reference = chain A\n selection = chain B\n selection = chain C\n}\n\"\"\"\n\n pdb_str = \"\"\"\\\nCRYST1 399.000 399.000 399.000 90.00 90.00 90.00 P 1\nATOM 1 N GLY A 34 125.208 211.886 175.417 1.00 0.00 N\nATOM 2 CA GLY A 34 125.035 211.123 174.168 1.00 0.00 C\nATOM 3 C GLY A 34 126.386 210.806 173.507 1.00 0.00 C\nATOM 4 O GLY A 34 127.304 211.628 173.503 1.00 0.00 O\nTER\nATOM 5 N GLY B 34 251.532 143.432 175.422 1.00 0.00 N\nATOM 6 CA GLY B 34 252.120 143.948 174.173 1.00 0.00 C\nATOM 7 C GLY B 34 251.212 144.998 173.512 1.00 0.00 C\nATOM 8 O GLY B 34 249.986 144.872 173.510 1.00 0.00 O\nTER\nATOM 9 N TYR C 34 189.583 273.076 175.423 1.00 0.00 N\nATOM 10 CA TYR C 34 188.804 273.006 174.173 1.00 0.00 C\nATOM 11 C TYR C 34 188.920 271.622 173.510 1.00 0.00 C\nATOM 12 O TYR C 34 189.986 271.004 173.508 1.00 0.00 O\nTER\n\"\"\"\n ncs_groups = get_ncs_groups(phil_str1, pdb_str)\n assert len(ncs_groups) == 0\n\n ncs_groups = get_ncs_groups(phil_str2, pdb_str)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 1\n assert list(ncs_groups[0].master_iselection) == [0, 1, 2, 3]\n assert list(ncs_groups[0].copies[0].iselection) == [4, 5, 6, 7]", "title": "" }, { "docid": "e7a5df55e12f530450f912948b3ab3e7", "score": "0.48413152", "text": "def conv_junc_to_exon():\n def overlap(start, end, start2, end2):\n return not (start > end2 or end < start2)\n\n cons_exons = []\n overlaps = 0\n prev_astart, prev_aend = juncs[0][-2], juncs[0][-1]\n prev_jstart, prev_jend = juncs[0][2], juncs[0][3]\n for i, (geneid, chrom, jstart, jend, dstart, dend, astart, aend) in enumerate(juncs, 1):\n if (prev_astart, prev_aend) == (dstart, dend):\n cons_exons.append((geneid, chrom, dstart, dend))\n if overlap(prev_jstart, prev_jend, jstart, jend):\n overlaps += 1\n # if prev_aend == dend:\n # cons_exons.append((geneid, chrom, jstart, jend, dstart, dend, astart, aend))\n prev_astart, prev_aend = astart, aend\n prev_jstart, prev_jend = jstart, jend\n\n print(f'Number of constitutive exons found this way: {len(cons_exons)}')\n print(f'Number of overlapping (supposedly constitutive) junctions: {overlaps}')\n\n with open('constitutive_exons.tsv', 'w') as f:\n for (geneid, chrom, dstart, dend) in cons_exons:\n f.write(f'{geneid}\\t{chrom}\\t{dstart}\\t{dend}\\n')", "title": "" }, { "docid": "ef236847c2e7c0009dfe2c12b7187da7", "score": "0.48411715", "text": "def hydrogen_harm(cds):\n massH = Constants.mass('H', to_AU=True)\n mass = massH\n omega = Constants.convert(3600., 'wavenumbers', to_AU=True)\n return np.squeeze(0.5 * mass * omega ** 2 * cds ** 2)", "title": "" }, { "docid": "de1d9085b524a3634b815f82fae85801", "score": "0.48342413", "text": "def jx_ends_in_cds(junction, cds_tree, id_name_dict):\n no_overlap = 0\n one_overlap = 0\n both_same = 0\n both_diff = 0\n left_genes = []\n right_genes = []\n left_names = []\n right_names = []\n chrom, left, right, strand = junction.split(';')\n left = int(left)\n right = int(right)\n try:\n jx_start = cds_tree[chrom][strand].overlaps(left)\n jx_stop = cds_tree[chrom][strand].overlaps(right)\n except KeyError:\n return (no_overlap, one_overlap, both_diff, both_same, left_genes,\n right_genes, left_names, right_names)\n\n if jx_start or jx_stop:\n for start_set in list(cds_tree[chrom][strand][left]):\n if start_set[2] not in left_genes:\n left_genes.append(start_set[2])\n name = id_name_dict[start_set[2]]\n if name not in left_names:\n left_names.append(name)\n for stop_set in list(cds_tree[chrom][strand][right]):\n if stop_set[2] not in right_genes:\n right_genes.append(stop_set[2])\n name = id_name_dict[stop_set[2]]\n if name not in right_names:\n right_names.append(name)\n if jx_start and jx_stop:\n num_same_genes = len(set(left_genes) & set(right_genes))\n if num_same_genes > 0:\n both_same = 1\n if ((len(right_genes) - num_same_genes > 0) or\n (len(left_genes) - num_same_genes > 0)):\n both_diff = 1\n else:\n one_overlap = 1\n\n if strand == '+':\n fivepr_genes = ','.join(left_genes)\n threepr_genes = ','.join(right_genes)\n fivepr_names = ','.join(left_names)\n threepr_names = ','.join(right_names)\n else:\n fivepr_genes = ','.join(right_genes)\n threepr_genes = ','.join(left_genes)\n fivepr_names = ','.join(right_names)\n threepr_names = ','.join(left_names)\n\n return (no_overlap, one_overlap, both_diff, both_same, fivepr_genes,\n threepr_genes, fivepr_names, threepr_names)", "title": "" }, { "docid": "661194ec87c259adab044e814819e200", "score": "0.4833686", "text": "def exercise_ca_absent():\n # should raise Sorry\n tst_pdb_1 = \"\"\"\\\nHELIX 2 2 GLY A 100 TRP A 101 1 2\nATOM 866 N GLY A 100 -128.903-139.939-104.460 1.00 27.58 N\nATOM 867 CA GLY A 100 -128.223-140.890-103.603 1.00 27.58 C\nATOM 868 C GLY A 100 -128.381-142.356-103.959 1.00 27.58 C\nATOM 869 O GLY A 100 -129.283-142.755-104.686 1.00 27.58 O\nATOM 870 H GLY A 100 -129.862-139.777-104.337 1.00 36.09 H\nATOM 871 N TRP A 101 -127.491-143.163-103.397 1.00 25.83 N\nATOM 873 C TRP A 101 -128.824-145.274-103.752 1.00 25.83 C\nATOM 874 O TRP A 101 -129.091-145.962-104.734 1.00 25.83 O\nATOM 875 CB TRP A 101 -126.659-145.240-102.480 1.00 42.04 C\nATOM 876 CG TRP A 101 -126.207-146.635-102.729 1.00 42.04 C\nATOM 877 CD1 TRP A 101 -126.180-147.300-103.922 1.00 42.04 C\nATOM 878 CD2 TRP A 101 -125.677-147.528-101.760 1.00 42.04 C\nATOM 879 NE1 TRP A 101 -125.663-148.557-103.751 1.00 42.04 N\nATOM 880 CE2 TRP A 101 -125.347-148.723-102.429 1.00 42.04 C\nATOM 881 CE3 TRP A 101 -125.446-147.437-100.384 1.00 42.04 C\nATOM 882 CZ2 TRP A 101 -124.799-149.820-101.767 1.00 42.04 C\nATOM 883 CZ3 TRP A 101 -124.899-148.528 -99.725 1.00 42.04 C\nATOM 884 CH2 TRP A 101 -124.581-149.703-100.415 1.00 42.04 C\nATOM 885 H TRP A 101 -126.863-142.797-102.745 1.00 42.04 H\n\"\"\"\n pdb_inp = iotbx.pdb.input(source_info=None, lines=tst_pdb_1)\n ann = pdb_inp.extract_secondary_structure()\n model = mmtbx.model.manager(\n model_input = pdb_inp)\n model.process(make_restraints=True)\n model.set_ss_annotation(ann)\n try:\n rm = ssb.substitute_ss(\n model)\n rm.run()\n except Sorry as e:\n assert e.args[0].startswith(\"C, CA or N\")\n except Exception:\n assert 0, \"Error: This test failed\"\n else:\n assert 0, \"Error: This test failed\"", "title": "" }, { "docid": "2153b8c4e0540a853118ab9e5694b2e1", "score": "0.483173", "text": "def test_attributes_and_number_of_terms(self):\n Hd = rydberg_interaction(register=atom_coordinates, wires=wires, interaction_coeff=1)\n settings = RydbergSettings(atom_coordinates, 1)\n\n assert isinstance(Hd, HardwareHamiltonian)\n assert Hd.wires == Wires(wires)\n N = len(wires)\n num_combinations = N * (N - 1) / 2 # number of terms on the rydberg_interaction hamiltonian\n assert len(Hd.ops) == num_combinations\n assert Hd.pulses == []\n assert Hd.settings == settings", "title": "" }, { "docid": "aa0839e6287668d20a644338897b73d9", "score": "0.48307574", "text": "def calc_TDI(self):\n \n Wave = self.Wavelet\n Orbit = self.Orbit \n\n t = np.arange(0.0, Orbit.Tobs, Orbit.dt) # Todo: Don't need Orbit, its in Wavelet\n N = len(t)\n \n f_min = Wave.f_min\n f_max = Wave.f_max\n\n N_lo = int(np.floor(f_min*Orbit.Tobs))\n N_hi = int(np.ceil (f_max*Orbit.Tobs))\n \n p12 = td.Phase(1,2, np.zeros(1), np.zeros(1))\n p21 = td.Phase(2,1, np.zeros(1), np.zeros(1))\n p13 = td.Phase(1,3, np.zeros(1), np.zeros(1))\n p31 = td.Phase(3,1, np.zeros(1), np.zeros(1))\n p23 = td.Phase(2,3, np.zeros(1), np.zeros(1))\n p32 = td.Phase(3,2, np.zeros(1), np.zeros(1))\n \n p12.phi_FT = np.zeros(1, dtype=np.complex_)\n p21.phi_FT = np.zeros(1, dtype=np.complex_)\n p13.phi_FT = np.zeros(1, dtype=np.complex_)\n p31.phi_FT = np.zeros(1, dtype=np.complex_)\n p23.phi_FT = np.zeros(1, dtype=np.complex_)\n p32.phi_FT = np.zeros(1, dtype=np.complex_)\n \n if (self.comp_id == 0): # Optical Path glitch, 1->2\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n p12.phi_FT = wv.get_Psi_FT(p12.freqs, Wave)\n \n elif (self.comp_id == 1): # Optical Path glitch, 2->1\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs # Yes, p12... (B/C TDI construction)\n p21.phi_FT = wv.get_Psi_FT(p12.freqs, Wave)\n \n elif (self.comp_id == 2): # Optical Path glitch, 1->3\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs # Yes, p12... (B/C TDI construction)\n p13.phi_FT = wv.get_Psi_FT(p12.freqs, Wave)\n \n elif (self.comp_id == 3): # Optical Path glitch, 3->1\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs # Yes, p12... (B/C TDI construction)\n p31.phi_FT = wv.get_Psi_FT(p12.freqs, Wave)\n \n elif (self.comp_id == 4): # Optical Path glitch, 2->3\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs # Yes, p12... (B/C TDI construction)\n p23.phi_FT = wv.get_Psi_FT(p12.freqs, Wave)\n\n elif (self.comp_id == 5): # Optical Path glitch, 3->2\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs # Yes, p12... (B/C TDI construction)\n p32.phi_FT = wv.get_Psi_FT(p12.freqs, Wave)\n \n elif (self.comp_id == 6): # Acceleration glitch, 1->2\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n # construct a wavelet whose central time is shifted to t0-L\n wave_temp = wv.Wavelet(Wave.A, Wave.f0, Wave.tau, \\\n Wave.t0 + Orbit.L/l.Clight, Wave.phi0 - Wave.f0/l.fstar, Orbit)\n \n p12.phi_FT = -wv.get_Psi_FT(p12.freqs, Wave) \n p21.phi_FT = wv.get_Psi_FT(p12.freqs, wave_temp) \n\n elif (self.comp_id == 7): # Acceleration glitch, 2->1\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n # construct a wavelet whose central time is shifted to t0-L\n wave_temp = wv.Wavelet(Wave.A, Wave.f0, Wave.tau, \\\n Wave.t0 + Orbit.L/l.Clight, Wave.phi0 - Wave.f0/l.fstar, Orbit)\n \n p21.phi_FT = -wv.get_Psi_FT(p12.freqs, Wave) \n p12.phi_FT = wv.get_Psi_FT(p12.freqs, wave_temp) \n\n elif (self.comp_id == 8): # Acceleration glitch, 1->3\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n # construct a wavelet whose central time is shifted to t0-L\n wave_temp = wv.Wavelet(Wave.A, Wave.f0, Wave.tau, \\\n Wave.t0 + Orbit.L/l.Clight, Wave.phi0 - Wave.f0/l.fstar, Orbit)\n \n p13.phi_FT = -wv.get_Psi_FT(p12.freqs, Wave) \n p31.phi_FT = wv.get_Psi_FT(p12.freqs, wave_temp) \n\n elif (self.comp_id == 9): # Acceleration glitch, 3->1\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n # construct a wavelet whose central time is shifted to t0-L\n wave_temp = wv.Wavelet(Wave.A, Wave.f0, Wave.tau, \\\n Wave.t0 + Orbit.L/l.Clight, Wave.phi0 - Wave.f0/l.fstar, Orbit)\n \n p31.phi_FT = -wv.get_Psi_FT(p12.freqs, Wave) \n p13.phi_FT = wv.get_Psi_FT(p12.freqs, wave_temp) \n\n elif (self.comp_id == 10): # Acceleration glitch, 2->3\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n # construct a wavelet whose central time is shifted to t0-L\n wave_temp = wv.Wavelet(Wave.A, Wave.f0, Wave.tau, \\\n Wave.t0 + Orbit.L/l.Clight, Wave.phi0 - Wave.f0/l.fstar, Orbit)\n \n p23.phi_FT = -wv.get_Psi_FT(p12.freqs, Wave) \n p32.phi_FT = wv.get_Psi_FT(p12.freqs, wave_temp) \n\n elif (self.comp_id == 11): # Acceleration glitch, 3->2\n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n # construct a wavelet whose central time is shifted to t0-L\n wave_temp = wv.Wavelet(Wave.A, Wave.f0, Wave.tau, \\\n Wave.t0 + Orbit.L/l.Clight, Wave.phi0 - Wave.f0/l.fstar, Orbit)\n \n p32.phi_FT = -wv.get_Psi_FT(p12.freqs, Wave) \n p23.phi_FT = wv.get_Psi_FT(p12.freqs, wave_temp) \n \n p12.freqs = np.arange(N_lo, N_hi, 1)/Orbit.Tobs\n \n self.TDI = td.TDI(p12, p21, p13, p31, p23, p32, Orbit)\n self.TDI.f_min = Wave.f_min\n self.TDI.f_max = Wave.f_max \n \n return", "title": "" }, { "docid": "74d208444fa59388de8c05458b48d8c8", "score": "0.48218632", "text": "def constructH(self): \n Htot = self.atoms[0].H\n HJ = self.constructCouplingHJ()\n HLeads = self.constructHLeads()\n for i in range(1,self.N):\n Htot = sp.kron(Htot,np.eye(self.atoms[i].dim)) + sp.kron(sp.eye(Htot.shape[0]),self.atoms[i].H)\n self.H = Htot + HJ + HLeads", "title": "" }, { "docid": "a8d590da75fed258473ea43ebd8b50ab", "score": "0.48186755", "text": "def isoform_analysis():\n\n with open(REFERENCE_PATH + PAS_DATASET + \"/pas_data_dict.pkl\", 'rb') as pas_data_in:\n pas_data_dict = pkl.load(pas_data_in)\n # with open(REFERENCE_PATH + PAS_DATASET + \"/pas_function.pkl\", 'rb') as pas_function_in:\n # pas_function = pkl.load(pas_function_in)\n pas_overlap_file_path = INPUT_FILE_PATH.replace(\".bed.gz\", \".bed\")\\\n .replace(OVERLAP_PATH, PAS_DATASET + \"/assignedpas/\")\n pas_overlap_file = open(pas_overlap_file_path, 'wt')\n\n outlier_count = 0\n file_input = gzip.open(INPUT_FILE_PATH, 'rt')\n reader = csv.reader(file_input, delimiter='\\t')\n cell = 'first_cell'\n cell_age = 0\n cell_cluster = 0\n cell_trajectory = 0\n cell_subtrajectory = 0\n cell_dict = {}\n gene_dict = {}\n coverage_dict = {}\n locus_dict = {}\n for chrom in pas_data_dict:\n if chrom not in locus_dict:\n locus_dict[chrom] = {}\n for row in reader:\n chrom = row[0]\n strand = row[5]\n gene = row[9]\n if gene not in GENE_DATA_DICT or chrom not in pas_data_dict or strand not in pas_data_dict[chrom]\\\n or gene not in pas_data_dict[chrom][strand]:\n continue\n\n new_cell = row[3]\n if new_cell not in CELL_DATA_DICT:\n continue\n # This makes our first cell id and cell dictionary.\n if cell == 'first cell':\n cell_age = CELL_DATA_DICT[new_cell][0]\n cell_cluster = CELL_DATA_DICT[new_cell][2]\n cell_trajectory = CELL_DATA_DICT[new_cell][8]\n cell_subtrajectory = CELL_DATA_DICT[new_cell][16]\n # This executes each time the script encounters a new cell.\n elif cell != new_cell:\n # First, we format the previous cell's data for output.\n for gene_id in cell_dict:\n cell_gene_median = np.median(cell_dict[gene_id])\n if gene_id not in gene_dict:\n gene_dict[gene_id] = [[], {}, {}]\n if cell_cluster not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_cluster] = []\n if cell_trajectory not in gene_dict[gene_id][1]:\n # Splitting this into two different indices of the head dict greatly simplifies later algorithms.\n gene_dict[gene_id][1][cell_trajectory] = []\n if cell_subtrajectory not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_subtrajectory] = []\n # This looks like it should not be necessary. It is.\n if cell_trajectory not in gene_dict[gene_id][2]:\n # This will hold our data for trajectories at certain ages.\n gene_dict[gene_id][2][cell_trajectory] = {}\n if cell_age not in gene_dict[gene_id][2][cell_trajectory]:\n gene_dict[gene_id][2][cell_trajectory][cell_age] = []\n if cell_age not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_age] = []\n gene_dict[gene_id][0].append(cell_gene_median)\n gene_dict[gene_id][1][cell_cluster].append(cell_gene_median)\n gene_dict[gene_id][1][cell_trajectory].append(cell_gene_median)\n gene_dict[gene_id][1][cell_age].append(cell_gene_median)\n gene_dict[gene_id][1][cell_subtrajectory].append(cell_gene_median)\n gene_dict[gene_id][2][cell_trajectory][cell_age].append(cell_gene_median)\n # Then, we reset our cell ID and cell dictionary for the next cell's entries.\n cell_age = CELL_DATA_DICT[new_cell][0]\n cell_cluster = CELL_DATA_DICT[new_cell][2]\n cell_trajectory = CELL_DATA_DICT[new_cell][8]\n cell_subtrajectory = CELL_DATA_DICT[new_cell][16]\n cell_dict = {}\n cell = new_cell\n\n # By default, we set the read as an outlier, which will change if we can attach it to an annotated isoform.\n # otherwise we scan forwards/backwards strand-wise\n gene_id = GENE_DATA_DICT[gene][0]\n pas_length = 30001\n pas_hits = []\n pas_counts = []\n pas_loci = []\n if strand == \"+\":\n locus = int(row[2])\n for pas_data in pas_data_dict[chrom][strand][gene]:\n pas = pas_data[0]\n pas_count = pas_data[3]\n pas_dist = locus - pas\n if -300 <= pas_dist <= 20:\n pas_length = pas_data[1]\n pas_hits.append((pas_dist, pas_length))\n pas_loci.append(pas)\n pas_counts.append(pas_count)\n else:\n continue\n else:\n locus = int(row[1])\n for pas_data in pas_data_dict[chrom][strand][gene]:\n pas = pas_data[0]\n pas_count = pas_data[3]\n pas_dist = pas - locus\n if -300 <= pas_dist <= 20:\n pas_length = pas_data[1]\n pas_hits.append((pas_dist, pas_length))\n pas_loci.append(pas)\n pas_counts.append(pas_count)\n else:\n continue\n\n # If no PAS overlaps are found, or the read's total length is an outlier, it is discarded.\n if pas_length > 30000:\n outlier_count = outlier_count + 1\n # Records the read's raw length for our coverage map.\n for exon in GENE_DATA_DICT[gene][1]:\n if exon[0] <= locus <= exon[1]:\n if strand == \"+\":\n raw_length = locus - exon[0] + exon[2]\n else:\n raw_length = exon[1] - locus + exon[2]\n if gene not in coverage_dict:\n coverage_dict[gene] = {'unfiltered': {raw_length: 1}}\n elif raw_length in coverage_dict[gene]['unfiltered']:\n coverage_dict[gene]['unfiltered'][raw_length] += 1\n else:\n coverage_dict[gene]['unfiltered'][raw_length] = 1\n continue\n else:\n # Writes the read and gene data to the pas assignment file.\n pas_overlap_file.write('\\t'.join(row) + '\\t')\n\n # If the read overlaps multiple PAS, we choose the nearest PAS and attach the read to it.\n if len(pas_hits) > 1:\n max_pas_idx = pas_counts.index(max(pas_counts))\n final_dist = pas_hits[max_pas_idx][0]\n final_length = pas_hits[max_pas_idx][1]\n locus = pas_loci[max_pas_idx]\n # Returns a list of tuples of pas hit indices and their corresponding probabilities\n # pas_weights = [(idx, pas_function[((pas_hit[0] + 25) // 5)]) for idx, pas_hit in enumerate(pas_hits)]\n # Sorts the list of tuples by probabilities, takes the index of the entry with the greatest probability,\n # then references that index in pas_hits to retrieve the corresponding PAS length as our final length.\n # final_pas = pas_hits[max(pas_weights, key=lambda t: t[1])[0]]\n # final_dist = final_pas[0]\n # final_length = final_pas[1]\n else:\n final_dist = pas_hits[0][0]\n final_length = pas_hits[0][1]\n locus = pas_loci[0]\n\n # Writes the PAS data to the pas assignment file.\n pas_overlap_file.write('\\t'.join([str(locus), str(final_dist), str(final_length)]) + '\\n')\n\n # Adds one to this locus' read assignment count\n if locus not in locus_dict[chrom]:\n locus_dict[chrom][locus] = 1\n else:\n locus_dict[chrom][locus] += 1\n\n raw_length = final_length + final_dist\n\n # Builds a dictionary of PAS lengths and their corresponding coverages, post-filtration.\n if gene not in coverage_dict:\n coverage_dict[gene] = {'filtered': {final_length: 1}, 'unfiltered': {raw_length: 1}}\n if 'filtered' not in coverage_dict[gene]:\n coverage_dict[gene] = {'filtered': {final_length: 1}}\n if 'unfiltered' not in coverage_dict[gene]:\n coverage_dict[gene] = {'unfiltered': {raw_length: 1}}\n if gene in coverage_dict and 'filtered' in coverage_dict[gene] and 'unfiltered' in coverage_dict[gene]:\n if final_length in coverage_dict[gene]['filtered']:\n coverage_dict[gene]['filtered'][final_length] += 1\n else:\n coverage_dict[gene]['filtered'][final_length] = 1\n if raw_length in coverage_dict[gene]['unfiltered']:\n coverage_dict[gene]['unfiltered'][raw_length] += 1\n else:\n coverage_dict[gene]['unfiltered'][raw_length] = 1\n\n if gene_id not in cell_dict:\n cell_dict[gene_id] = []\n cell_dict[gene_id].append(final_length)\n pas_overlap_file.close()\n\n # Executes on the last cell dataset of the file.\n for gene_id in cell_dict:\n cell_gene_median = np.median(cell_dict[gene_id])\n if gene_id not in gene_dict:\n gene_dict[gene_id] = [[], {}, {}]\n if cell_cluster not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_cluster] = []\n if cell_trajectory not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_trajectory] = []\n if cell_subtrajectory not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_subtrajectory] = []\n if cell_trajectory not in gene_dict[gene_id][2]:\n gene_dict[gene_id][2][cell_trajectory] = {}\n if cell_age not in gene_dict[gene_id][2][cell_trajectory]:\n gene_dict[gene_id][2][cell_trajectory][cell_age] = []\n if cell_age not in gene_dict[gene_id][1]:\n gene_dict[gene_id][1][cell_age] = []\n gene_dict[gene_id][0].append(cell_gene_median)\n gene_dict[gene_id][1][cell_cluster].append(cell_gene_median)\n gene_dict[gene_id][1][cell_trajectory].append(cell_gene_median)\n gene_dict[gene_id][1][cell_age].append(cell_gene_median)\n gene_dict[gene_id][2][cell_trajectory][cell_age].append(cell_gene_median)\n gene_dict[gene_id][1][cell_subtrajectory].append(cell_gene_median)\n\n file_input.close()\n\n print(outlier_count)\n\n return gene_dict, coverage_dict, locus_dict", "title": "" }, { "docid": "5496c99fac1112f08992f1dd053ab45d", "score": "0.48185143", "text": "def cap_3D_organic(self):\n print(\"\\n\\nCAPPING 3D ORGANIC\")\n print(\"--------------------------------------\")\n\n print(self.num_keep)\n all_nodes_to_keep = set()\n for component in self.components_to_keep:\n for node in component:\n all_nodes_to_keep.add(node)\n\n for i in range(len(self.components_to_keep)):\n\n component = self.components_to_keep[i]\n\n for node in component:\n\n #for nbr in self.tree.successors_iter(node):\n for nbr in self.origraph.neighbors(node):\n\n #if((node, nbr) in self.actual_truncs_directed):\n if(((node, nbr) in self.actual_truncs_directed and \\\n nbr not in all_nodes_to_keep)):\n #((nbr, node) in self.actual_truncs_directed and \\\n # node not in all_nodes_to_keep)):\n\n # NOTE this makes sure we don't place a cap somewhere\n # that constitutes an atom we want to keep\n\n\n print((node, nbr))\n\n #print(\"Capping edge: \" + str((node,nbr)))\n bond_start = self.origraph.node[node]['cartesian_coordinates']\n bond_end = self.origraph.node[nbr]['cartesian_coordinates']\n start_type = self.origraph.node[node]['atomic_number']\n end_type = self.origraph.node[nbr]['atomic_number']\n\n bond_vec_mag = self.cart_dist(bond_start, bond_end)\n bond_vec = bond_end - bond_start\n\n # these are the easy cases\n if(start_type == 6):\n h_dist = 1.09\n elif(start_type == 7):\n h_dist = 1.00\n elif(start_type == 8):\n h_dist = 0.96\n\n scaled_bond_vec = h_dist/bond_vec_mag * (bond_vec)\n new_bond_end = bond_start + scaled_bond_vec\n\n # store necessary modifications\n self.hydrogens[len(self.hydrogens.keys())] = {\n 'cartesian_coordinates': new_bond_end,\n 'atomic_number': 1,\n 'element': 'H'\n }\n self.num_keep += 1\n\n print(\"%s hydrogens added as caps: \" % (str(len(self.hydrogens))))\n print(\"%s num atoms in final cluster: \" % (str(self.num_keep)))", "title": "" }, { "docid": "4593ec61cedf3723b7096255d5241296", "score": "0.48178828", "text": "def threonine_h(ires,i,resNo,atoms2,atmpos2,resNo2,resseq2,nextres=None):\n atoms,coord= ires[1], ires[2]\n pos_N = coord[atoms.index('N')]\n pos_CA = coord[atoms.index('CA')]\n pos_C = coord[atoms.index('C')]\n pos_O = coord[atoms.index('O')]\n pos_CB = coord[atoms.index('CB')]\n pos_CG2 = coord[atoms.index('CG2')]\n pos_OG1 = coord[atoms.index('OG1')]\n\n if nextres !=None:\n nextres_CA = nextres[2][1]\n nextres_N = nextres[2][0]\n H = LinearAlgebra.class5(nextres_CA,nextres_N,pos_C,1.01)\n HA = LinearAlgebra.class3(pos_CB,pos_N,pos_CA)\n HG1 = LinearAlgebra.thr_HG1(pos_OG1,pos_CB,pos_CG2)\n HB = LinearAlgebra.class3(pos_CA,pos_OG1,pos_CB)\n HG12 = LinearAlgebra.calcCoordinate(pos_OG1,pos_CB,pos_CG2,1.09,60.5,109.4)\n HG22 = LinearAlgebra.calcCoordinate(pos_OG1,pos_CB,pos_CG2,1.09,-179.5,109.5)\n HG23 = LinearAlgebra.calcCoordinate(pos_OG1,pos_CB,pos_CG2,1.09,-59.5,109.5)\n di = LinearAlgebra.calcTorsionAngle(pos_CA,pos_CB,pos_OG1,HG1)\n xyz = [pos_CA,pos_CB,pos_OG1,HG1,0.96,di,0.41,0.0,0.0]\n HG1 = PotentialEnergy.optmizeH(resNo,atoms2,atmpos2,resNo2,resseq2,xyz)\n\n atoms_copy = copy.deepcopy(atoms)\n coord_copy = copy.deepcopy(coord)\n atoms_copy.extend(['HG1','HA','HB','1HG2','2HG2','3HG2'])\n coord_copy.extend([HG1,HA,HB,HG12,HG22,HG23])\n return coord_copy,[H,i], atoms_copy\n\n\n HA = LinearAlgebra.class3(pos_CB,pos_N,pos_CA)\n HG1 = LinearAlgebra.thr_HG1(pos_OG1,pos_CB,pos_CG2)\n HB = LinearAlgebra.class3(pos_CA,pos_OG1,pos_CB)\n HG12 = LinearAlgebra.calcCoordinate(pos_OG1,pos_CB,pos_CG2,1.09,60.5,109.4)\n HG22 = LinearAlgebra.calcCoordinate(pos_OG1,pos_CB,pos_CG2,1.09,-179.5,109.5)\n HG23 = LinearAlgebra.calcCoordinate(pos_OG1,pos_CB,pos_CG2,1.09,-59.5,109.5)\n di = LinearAlgebra.calcTorsionAngle(pos_CA,pos_CB,pos_OG1,HG1)\n xyz = [pos_CA,pos_CB,pos_OG1,HG1,0.96,di,0.41,0.0,0.0]\n HG1 = PotentialEnergy.optmizeH(resNo,atoms2,atmpos2,resNo2,resseq2,xyz)\n\n atoms_copy = copy.deepcopy(atoms)\n coord_copy = copy.deepcopy(coord)\n atoms_copy.extend(['HG1','HA','HB','1HG2','2HG2','3HG2'])\n coord_copy.extend([HG1,HA,HB,HG12,HG22,HG23])\n return coord_copy, atoms_copy", "title": "" }, { "docid": "f5fce5ac270a2e1de649832043bca166", "score": "0.48146203", "text": "def recursive_hommola(aligned_otu_seqs, host_subtree, host_dm, otu_tree, sample_names,\n taxon_names, otu_data, permutations=1000, recurse=False):\n\n # print \"Performing recursive Hommola et al cospeciation test...\"\n\n # calculate pairise distances between OTUs\n\n dist_calc = TN93Pair(DNA, alignment=aligned_otu_seqs)\n dist_calc.run()\n\n otu_dists = dist_calc.getPairwiseDistances()\n\n otu_dm = cogent_dist_to_qiime_dist(otu_dists)\n\n # convert pw distances (and tree distances for hosts) to numpy arrays with same\n # column/row headings as host/OTU positions in OTU table numpy array.\n\n #hdd = dist2Dict2D(host_dist,sample_names)\n #hdn = numpy.array(hdd.toLists())\n #sdd = dist2Dict2D(otu_dists,taxon_names)\n #sdn = numpy.array(sdd.toLists())\n\n # print \"got here\"\n\n # print host_dm\n # print sample_names\n # print otu_dm\n # print taxon_names\n\n host_dm = sort_dm_by_sample(host_dm, sample_names)\n otu_dm = sort_dm_by_sample(otu_dm, taxon_names)\n\n # convert OTU table to binary array, throwing out all OTUs below a given\n # thresh.\n\n interaction = otu_data.clip(0, 1)\n\n # traverse OTU tree and test each node\n\n # initialize our output lists\n s_nodes, h_nodes, p_vals, s_tips, h_tips, r_vals, r_distro_vals = [\n ], [], [], [], [], [], []\n # print \"just before loop\"\n # iterate over the tree of child OTUs\n for node in otu_tree.traverse(self_before=True, self_after=False):\n\n # get just OTUs in this node\n otu_subset = node.getTipNames()\n\n # subset dms and interaction matrix to just this node\n otu_dm_sub, host_dm_sub, interaction_sub = \\\n filter_dms(otu_dm, host_dm, interaction, otu_subset)\n\n # Make sure we have at least 3 hosts and symbionts represented\n if len(host_dm_sub[0]) > 2 and len(otu_dm_sub[0]) > 2 \\\n and host_dm_sub[1].sum() != 0 and otu_dm_sub[1].sum() != 0:\n\n # print node.asciiArt()\n\n # append symbiont nodes and host subtrees as tree objects\n s_nodes.append(node)\n h_nodes.append(host_subtree.getSubTree(host_dm_sub[0]))\n\n # append number of symbionts and hosts for this node\n s_tips.append(len(otu_dm_sub[0]))\n h_tips.append(len(host_dm_sub[0]))\n # calculate pemutation p value for hommola test for this node\n p, r, r_distro = hommola_cospeciation_test(host_dm_sub[1], otu_dm_sub[1],\n interaction_sub, permutations)\n # append to results list\n p_vals.append(p)\n r_vals.append(r)\n r_distro_vals.append(r_distro)\n\n # print node.asciiArt()\n # print p\n\n # If only testing top-level node, break out of tree traverse.\n if not recurse:\n break\n # else:\n # print \"Less than three hosts\"\n # s_nodes.append(node)\n # h_nodes.append(host_subtree.getSubTree(h_names))\n # s_tips.append(len(s_vec))\n # h_tips.append(len(h_vec))\n # p_vals.append('NA')\n\n # DEBUG:\n \"\"\"\n for i in range(len(p_vals)):\n if p_vals[i] < 0.1:\n print s_nodes[i].asciiArt()\n print h_nodes[i].asciiArt()\n print p_vals[i]\n pause = raw_input(\"\")\n \"\"\"\n\n # print \"finished recursive Hommola\"\n\n results_dict = {'p_vals': p_vals, 's_tips': s_tips,\n 'h_tips': h_tips, 's_nodes': s_nodes, 'h_nodes': h_nodes}\n acc_dict = {'r_vals': r_vals}\n # suppressed: return the distribution of r values\n # 'r_distro_vals':r_distro_vals\n return (results_dict, acc_dict)", "title": "" }, { "docid": "f033628bc462cf6f76f7a3057ea9a114", "score": "0.48122752", "text": "def syn_hyperhypo(word):\n\trelated = set()\n\tsyns = wn.synsets(word)\n\tfor synset in syns:\n\t\tprint(\"word={} lemmas={}\".format(word, synset.lemmas()))\n\t\tprint(\"word={} hypernyms={}\".format(word, synset.hypernyms()))\n\t\tprint(\"word={} hyponyms={}\".format(word, synset.hyponyms()))\n\t\tprint(\"word={} member_holonyms={}\".format(word, synset.member_holonyms()))\n\t\tfor lemma in synset.lemmas():\n\t\t\trelated.add(lemma.name())\n\t\tfor hypernym in synset.hypernyms():\n\t\t\trelated.add(hypernym.name())\n\t\tfor hyponym in synset.hyponyms():\n\t\t\trelated.add(hyponym.name())\n\t\tfor holonym in synset.member_holonyms():\n\t\t\trelated.add(holonym.name())\n\treturn related", "title": "" }, { "docid": "7581540395b20d6d661941fbc8c5ca2d", "score": "0.4805417", "text": "def test_from_openeye_implicit_hydrogen(self):\n from openeye import oechem\n\n smiles_impl = \"C#C\"\n oemol_impl = oechem.OEMol()\n oechem.OESmilesToMol(oemol_impl, smiles_impl)\n molecule_from_impl = Molecule.from_openeye(oemol_impl)\n\n assert molecule_from_impl.n_atoms == 4\n\n smiles_expl = \"HC#CH\"\n oemol_expl = oechem.OEMol()\n oechem.OESmilesToMol(oemol_expl, smiles_expl)\n molecule_from_expl = Molecule.from_openeye(oemol_expl)\n assert molecule_from_expl.to_smiles() == molecule_from_impl.to_smiles()", "title": "" }, { "docid": "fa768ad9f82222a425f2932fda1af770", "score": "0.48032895", "text": "def calculate_co_occurrence(self):\n for data in self.tokenized:\n abstract = ' '.join(data['abstract'])\n for combination in self.combinations:\n gene_space = f' {combination[0]} '\n gene_parentheses = f'({combination[0]})'\n phenotype = combination[1].lower()\n if self.in_title:\n self.__check_part(gene_space, gene_parentheses,\n phenotype, data['title'],\n 10, data['pmid'])\n\n if self.in_sentence:\n for sentence in data['abstract']:\n self.__check_part(gene_space, gene_parentheses,\n phenotype, sentence, 5, data['pmid'])\n if self.in_abstract:\n self.__check_part(gene_space, gene_parentheses,\n phenotype, abstract, 2, data['pmid'])\n\n if self.in_multiple_abstracts:\n all_abstracts = [' '.join(data['abstract']) for data in\n self.tokenized]\n for combination in self.combinations:\n amount = 0\n gene_space = f' {combination[0]} '\n gene_parentheses = f'({combination[0]})'\n phenotype = combination[1].lower()\n for abstract in all_abstracts:\n if (gene_space in abstract\n or gene_parentheses in abstract) \\\n and phenotype in abstract:\n amount += 1\n if amount > 1:\n gene = gene_space.strip()\n if f'{gene}, {phenotype}' not in self.co_occurrence:\n self.co_occurrence[f'{gene}, {phenotype}'] = {}\n self.co_occurrence[f'{gene}, {phenotype}'][\n 'options'] = self.options\n try:\n self.co_occurrence[f'{gene}, {phenotype}'][\n 'amount'] += 1\n except KeyError:\n self.co_occurrence[f'{gene}, {phenotype}'][\n 'amount'] = 1", "title": "" }, { "docid": "80bc34d5ec5c868f36a2f0af8600f6fb", "score": "0.479793", "text": "def pro(self, c):\n hs = []\n # Methylene x6: [HB2, HB3], [HD2, HD3], [HG2, HG3]\n hs.extend(self.get_methylene_hydrogens(c.CA, c.CB, c.CG))\n hs.extend(self.get_methylene_hydrogens(c.N, c.CD, c.CG))\n hs.extend(self.get_methylene_hydrogens(c.CD, c.CG, c.CB))\n return hs", "title": "" }, { "docid": "594c4cef28b17c778143813a03c51ed1", "score": "0.4795068", "text": "def compute_topographical_entropy_AEM(arg_hostDataContainer, arg_selector=\"all\", arg_outFile=None, arg_verbose=3):\n\tUtils.hbar(60)\n\tUtils.printflush(\"{:^60}\".format(\"Topographical entropy of residue side chains \\ncomputed using all the dihedrals with AEM method\"))\n\tUtils.hbar(60)\n\tif arg_outFile != None:\n\t\tUtils.printOut(arg_outFile,'-'*60)\n\t\tUtils.printOut(arg_outFile,\"{:^60}\".format(\"Topographical entropy of residue side chains \\ncomputed using all the dihedrals with AEM method\"))\n\t\tUtils.printOut(arg_outFile,'-'*60)\n\n\tallSel = arg_hostDataContainer.universe.select_atoms(arg_selector)\n\n\t# number of frames\n\tnumFrames = len(arg_hostDataContainer.trajSnapshots)\n\n\t# log of number of frames (a constant)\n\tlogNumFrames = nmp.log(numFrames)\n\n\t# total SC entropy\n\ttotalTopogEntropySC = 0.\n\t\n\n\t# browse through each residue in the system and get their dihedrals\n\tfor resindices in allSel.residues.resindices:\n\t\tUtils.printflush('-'*10,end='')\n\t\tUtils.printflush('Working on resid : {} ({})'.format(arg_hostDataContainer.universe.residues.resids[resindices], arg_hostDataContainer.universe.residues.resnames[resindices]), end='')\n\t\tUtils.printflush('-'*10)\n\n\t\tresid = arg_hostDataContainer.universe.residues.resids[resindices]\n\n\t\t# build a binary tree that will hold unique dihedrals \n\t\t# uniqueness is defined based on 2-3 atom indexes\n\t\tdiheds_in_rid = CustomDataTypes.BinaryTree()\n\t\tiAtom_in_rid = nmp.flip(allSel.select_atoms(f\"resid {resid}\").atoms.indices)\n\t\tfor idx in iAtom_in_rid:\n\n\t\t\tfor iDih in arg_hostDataContainer.dihedralTable[idx]:\n\t\t\t\t# see if it is a side chain dihedral exclusive to this resid \n\t\t\t\tif iDih.is_from_same_residue() == resid and iDih.is_heavy():\n\t\t\t\t\tdihNode = CustomDataTypes.TreeNode(None, None, iDih)\n\t\t\t\t\tdiheds_in_rid.add_node(dihNode)\n\n\t\tUtils.printflush('Found {} exclusive dihedrals in residue {}{}'.\\\n\t\t\tformat(len(diheds_in_rid), arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]))\n\n\t\t# create an object of Class ConformationEntity corresponding to this residue\n\t\tnewEntity = CONF.ConformationEntity(arg_order = len(diheds_in_rid), arg_numFrames = numFrames)\n\n\t\t# also initialize a string array that will store the state in each frame as a distinct string\n\t\t# made from coalesced character cast of numeric arrays\n\t\tridDecimalReprArray = []\n\n\t\t# for each dihedral identified, get the state vector\n\t\tfor i, iDih in enumerate(diheds_in_rid.list_in_order()):\n\t\t\tstateTS = iDih.get_state_ts(arg_verbose = arg_verbose)\n\t\t\tnewEntity.timeSeries[i,:] = stateTS\n\n\t\t# Now coalesce integer labels of the constituent dihedrals in each time point to get \n\t\t# an expression of the conformation at that time.\n\t\tfor iFrame in range(numFrames):\n\t\t\tridDecimalReprArray.append(Utils.coalesce_numeric_array(newEntity.timeSeries[:,iFrame]))\n\n\n\t\t# for each of the unique state get their count and compute the topographical entropy for this residue\n\t\tsetOfstates = set(ridDecimalReprArray)\n\t\tUtils.printflush('Found {} dihedrals which collectively acquire {} unique conformers'.format(len(diheds_in_rid), len(setOfstates)))\n\n\t\t# print(ridDecimalReprArray)\n\n\t\t# total SC entropy at the topographical level of this residue\n\t\tridTopogEntropy = 0.\n\n\t\tfor iState in setOfstates:\n\t\t\tiCount = ridDecimalReprArray.count(iState) \n\n\t\t\t# p Log(p) for this state\n\t\t\tiPlogP = iCount * (nmp.log(iCount) - logNumFrames)\n\t\t\tridTopogEntropy += iPlogP;\n\n\t\tridTopogEntropy /= numFrames;\n\t\tridTopogEntropy *= -CONST.GAS_CONST #(R)\n\n\t\t# Final residue SC information \n\t\tUtils.printflush('{:<40s} : {:.4f}'.format('Residue Topographical Entropy from AEM ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))\n\t\tif arg_outFile != None:\n\t\t\tUtils.printOut(arg_outFile, '{:<40s} : {:.4f}'.format('Residue Topographical Entropy from AEM ({} {})'.format(arg_hostDataContainer.universe.residues.resnames[resindices], arg_hostDataContainer.universe.residues.resids[resindices]), ridTopogEntropy))\n\n\t\t# add this residue's SC entropy to the total SC entropy\n\t\ttotalTopogEntropySC += ridTopogEntropy\n\t\t\t\n\t# total SC topographical entropy\n\tUtils.hbar(60)\n\tUtils.printflush('{:<40} : {:>15.3f}'.format('Total Topog. Entropy (AEM) ', totalTopogEntropySC))\n\tUtils.hbar(60)\n\tif arg_outFile != None:\n\t\tUtils.printOut(arg_outFile, '_'*60)\n\t\tUtils.printOut(arg_outFile, '{:<40} : {:>15.3f}'.format('Total Topog. Entropy (AEM)', totalTopogEntropySC))\n\t\tUtils.printOut(arg_outFile, '-'*60)\n\n\treturn totalTopogEntropySC", "title": "" }, { "docid": "37deb90c2b0b25ce8cf0f455987c909c", "score": "0.4781648", "text": "def exercise_10():\n pdb_str_15 = \"\"\"\nATOM 1 N1 XXX A 34 125.208 211.886 175.417 1.00 0.00 N\nATOM 2 CT XXX A 34 125.035 211.123 174.168 1.00 0.00 C\nATOM 3 C XXX A 34 126.386 210.806 173.507 1.00 0.00 C\nATOM 4 K XXX A 34 127.304 211.628 173.503 1.00 0.00 K\nATOM 1 O HOH A 35 135.208 211.886 175.417 1.00 0.00 O\nATOM 2 O HOH A 36 135.035 211.123 174.168 1.00 0.00 O\nATOM 3 O HOH A 37 136.386 210.806 173.507 1.00 0.00 O\nATOM 4 O HOH A 38 137.304 211.628 173.503 1.00 0.00 O\nTER\nATOM 5 N1 XXX B 34 251.532 143.432 175.422 1.00 0.00 N\nATOM 6 CT XXX B 34 252.120 143.948 174.173 1.00 0.00 C\nATOM 7 C XXX B 34 251.212 144.998 173.512 1.00 0.00 C\nATOM 8 K XXX B 34 249.986 144.872 173.510 1.00 0.00 K\nATOM 5 O HOH B 35 271.532 143.432 175.422 1.00 0.00 O\nATOM 6 O HOH B 36 272.120 143.948 174.173 1.00 0.00 O\nATOM 7 O HOH B 37 271.212 144.998 173.512 1.00 0.00 O\nATOM 8 O HOH B 38 279.986 144.872 173.510 1.00 0.00 O\nTER\n\"\"\"\n phil_str=\"\"\"\nncs_group {\n reference = chain A\n selection = chain B\n}\n\"\"\"\n ### user-supplied\n ncs_groups = get_ncs_groups(phil_str,pdb_str_15, exclude_selection=None)\n # group 1\n assert len(ncs_groups)==1\n assert len(ncs_groups[0].copies) == 1\n # chain A\n assert list(ncs_groups[0].master_iselection) == [0, 1, 2, 3, 4, 5, 6, 7]\n # chain B\n assert list(ncs_groups[0].copies[0].iselection) == [8,9,10,11,12,13,14,15]", "title": "" }, { "docid": "b13637e1cdd822d0ac6c69e772f16c2e", "score": "0.47809562", "text": "def remove_hd(self, reset_i_seq=False):\n n_removed = 0\n for pdb_model in self.models():\n for pdb_chain in pdb_model.chains():\n for pdb_residue_group in pdb_chain.residue_groups():\n for pdb_atom_group in pdb_residue_group.atom_groups():\n for pdb_atom in pdb_atom_group.atoms():\n if (pdb_atom.element.strip().upper() in [\"H\",\"D\"]):\n pdb_atom_group.remove_atom(pdb_atom)\n n_removed += 1\n if (pdb_atom_group.atoms_size() == 0):\n pdb_residue_group.remove_atom_group(pdb_atom_group)\n if (pdb_residue_group.atom_groups_size() == 0):\n pdb_chain.remove_residue_group(pdb_residue_group)\n if (pdb_chain.residue_groups_size() == 0):\n pdb_model.remove_chain(pdb_chain)\n if (pdb_model.chains_size() == 0):\n self.remove_model(pdb_model)\n if (reset_i_seq):\n self.atoms().reset_i_seq()\n return n_removed", "title": "" }, { "docid": "f9da09b78433575b5d4aa46b1cd320a7", "score": "0.477323", "text": "def build_complex_driver(inputDict1,in_metal=False):\n conf_dict,inputDict,core_preprocess_time,symmetry_preprocess_time,int_time1 = complex_driver(inputDict1=inputDict1,in_metal=in_metal)\n if len(conf_dict) == 0:\n if inputDict['parameters']['debug']:\n print('No possible geometries for the input ligand/coreType(s) combination.')\n ordered_conf_dict = conf_dict\n else:\n ordered_conf_dict = OrderedDict()\n xtb_energies = []\n energy_sorted_inds = []\n keys = []\n structs = []\n mol2strings = []\n init_mol2strings = []\n for key,val in conf_dict.items():\n xtb_energies.append(val.calculator.energy)\n keys.append(key)\n val.swap_metals_back(in_metal=in_metal)\n structs.append(val)\n if inputDict['parameters']['save_init_geos']:\n if (inputDict['parameters'].get('full_spin_nonxtb',None) is not None):\n val.initMol.uhf = inputDict['parameters']['full_spin_nonxtb']\n init_mol2strings.append(val.initMol.write_mol2('{}'.format(key), writestring=True))\n else:\n init_mol2strings.append(None)\n energy_sorted_inds.append(val.index) # Save energy sorted index for reference.\n if (inputDict['parameters'].get('full_spin_nonxtb',None) is not None):\n val.complexMol.uhf = inputDict['parameters']['full_spin_nonxtb']\n mol2strings.append(val.complexMol.write_mol2('{}'.format(key), writestring=True))\n order = np.argsort(xtb_energies)\n # Iterate through all structures and check/remove duplicate structures.\n # Remove extra classes that we don't need to persist\n del inputDict['core_geo_class']\n del inputDict['parameters']['ase_db'] \n for ind,i in enumerate(order):\n iscopy = False\n if (ind > 0) and (not inputDict['parameters']['skip_duplicate_tests']): # Check for copies\n for key,val in ordered_conf_dict.items():\n # if ('_init_only' in key) or ('_init_only' in keys[i]): # Do not do duplicate test on init_only structures.\n # continue\n # else:\n _, rmsd_full, _ = io_align_mol.calc_rmsd(mol2strings[i], val['mol2string'],\n coresize=10, override=True)\n if (rmsd_full < 0.5):\n iscopy = True\n break\n rmsd_core, _, _ = io_align_mol.calc_rmsd(mol2strings[i], val['mol2string'],\n override=True)\n if (rmsd_core < 0.7) and np.isclose(val['energy'],xtb_energies[i],atol=0.1):\n iscopy = True\n break\n if (not iscopy):\n ordered_conf_dict[keys[i]] = {'ase_atoms':structs[i].complexMol.ase_atoms,\n 'total_charge':int(structs[i].complexMol.charge),\n 'xtb_n_unpaired_electrons': structs[i].complexMol.xtb_uhf,\n 'xtb_total_charge':int(structs[i].complexMol.xtb_charge),\n 'calc_n_unpaired_electrons': structs[i].complexMol.uhf,\n 'metal_ox':inputDict['parameters']['metal_ox'],\n 'init_energy':structs[i].calculator.init_energy,\n 'energy':xtb_energies[i],\n 'mol2string':mol2strings[i], 'init_mol2string':init_mol2strings[i],\n 'energy_sorted_index': energy_sorted_inds[i],\n 'inputDict':inputDict\n }\n else:\n ordered_conf_dict[keys[i]] = {'ase_atoms':structs[i].complexMol.ase_atoms, \n 'total_charge':int(structs[i].complexMol.charge),\n 'xtb_n_unpaired_electrons': structs[i].complexMol.xtb_uhf,\n 'calc_n_unpaired_electrons': structs[i].complexMol.uhf,\n 'xtb_total_charge':int(structs[i].complexMol.xtb_charge),\n 'metal_ox':inputDict['parameters']['metal_ox'],\n 'init_energy':structs[i].calculator.init_energy,\n 'energy':xtb_energies[i],\n 'mol2string':mol2strings[i], 'init_mol2string':init_mol2strings[i],\n 'energy_sorted_index': energy_sorted_inds[i],\n 'inputDict':inputDict\n }\n if (not iscopy) and inputDict['parameters']['return_timings']:\n tdict = ordered_conf_dict[keys[i]]\n fin_time2 = time.time()\n tdict.update({'core_preprocess_time':core_preprocess_time,\n 'symmetry_preprocess_time':symmetry_preprocess_time,\n 'total_liggen_time':np.sum([x.total_liggen_time for x in structs[i].ligandList]),\n 'total_complex_assembly_time':structs[i].assemble_total_time,\n 'final_relaxation_time':structs[i].final_eval_total_time,\n 'sum_total_conformer_time_spent':fin_time2 - int_time1\n })\n ordered_conf_dict[keys[i]] = tdict\n if (not iscopy) and inputDict['parameters']['return_full_complex_class']: # Return whole complex class (all ligand geometries!)\n tdict = ordered_conf_dict[keys[i]]\n tdict.update({'full_complex_class':structs[i]\n })\n ordered_conf_dict[keys[i]] = tdict\n return ordered_conf_dict", "title": "" }, { "docid": "cc38e34a4e767bc4c91ccdda3f1c6c51", "score": "0.47608888", "text": "def tyr(self, c):\n hs = []\n # Methylene: [HB2, HB3]\n hs.extend(self.get_methylene_hydrogens(c.CA, c.CB, c.CG))\n # Methine x4: HD1, HD2, HE1, HE2\n hs.append(self.get_amide_methine_hydrogen(c.CG, c.CD1, c.CE1, amide=False))\n hs.append(self.get_amide_methine_hydrogen(c.CG, c.CD2, c.CE2, amide=False))\n hs.append(self.get_amide_methine_hydrogen(c.CD1, c.CE1, c.CZ, amide=False))\n hs.append(self.get_amide_methine_hydrogen(c.CD2, c.CE2, c.CZ, amide=False))\n # Hydroxyl: HH\n hs.append(self.get_thiol_hydrogen(c.OH, c.CZ, c.CE1, thiol=False))\n return hs", "title": "" }, { "docid": "62569a77de7d61c62d303c750416bc8d", "score": "0.47589082", "text": "def adiabatic_correction(self): # PREVIOUS VERSION; NOT IN USE!\n # Get Gamma values between 0 and 1 for all distances of interest\n gamma_left = self.left.morse_norm(self.r_corr - self.left.d_Heq) # 0-->0.66\n gamma_right = self.right.morse_norm(self.right.d_Heq - self.r_corr) # 0.66-->0\n\n # Morse values\n E_stretch_left = (gamma_left - 1) * self.left.De\n E_stretch_right = (gamma_right - 1) * self.right.De\n\n # E_hyb\n self.adia_left = E_stretch_left - gamma_left * (1-gamma_right) * (self.right.De + E_stretch_left)\n self.adia_right = E_stretch_right - gamma_right * (1-gamma_left) * (self.left.De + E_stretch_right)\n\n yts_ad = [min(self.adia_left[i],self.adia_right[i]) for i, _ in enumerate(self.adia_right)]\n self.yint_ad, idx = max((val, idx) for (idx, val) in enumerate(yts_ad))\n self.xint_ad = self.r_corr[idx]\n\n self.Ea_ad_left = self.yint_ad + self.left.De\n self.Ea_ad_right = self.yint_ad + self.right.De\n\n if max([self.Ea_ad_left, self.Ea_ad_right]) < np.abs(self.dG): ##Jiang corrected in the following\n if self.Ea_ad_left > self.Ea_ad_right:\n self.Ea_ad_left = np.abs(self.dG)\n self.Ea_ad_right = 0.\n elif self.Ea_ad_left < self.Ea_ad_right:\n self.Ea_ad_left = 0.\n self.Ea_ad_right = np.abs(self.dG)\n\n return self.Ea_ad_left, self.Ea_ad_right", "title": "" }, { "docid": "e1803af0533f96e62a004c90fcf98d38", "score": "0.47572786", "text": "def compute_hydrogen_bonds(traj_frag_molid, frame_idx, index_to_label, solvent_resn, sele_id, ligand=None,\n HBOND_CUTOFF_DISTANCE=3.5, HBOND_CUTOFF_ANGLE=70):\n itype = \"hb\"\n if ligand:\n itype = \"lhb\"\n donors, acceptors = calc_ligand_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id, ligand,\n HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE)\n else:\n donors, acceptors = calc_donor_acceptor_pairs(traj_frag_molid, frame_idx, solvent_resn, sele_id,\n HBOND_CUTOFF_DISTANCE, HBOND_CUTOFF_ANGLE)\n\n donors, acceptors = filter_duplicates(donors, acceptors)\n\n hbonds = []\n for idx, donor in enumerate(donors):\n acceptor = acceptors[idx]\n donor_label, acceptor_label = index_to_label[donor], index_to_label[acceptor]\n hbonds.append([frame_idx, donor_label, acceptor_label, itype])\n\n # Perform post processing on hbonds list to stratify into different subtypes\n if itype == \"hb\":\n hbond_subtypes = stratify_hbond_subtypes(hbonds, solvent_resn)\n elif itype == \"lhb\":\n hbond_subtypes = stratify_ligand_hbond_subtypes(hbonds, solvent_resn, ligand)\n\n return hbond_subtypes", "title": "" }, { "docid": "10d9ca3d77b328651eeddbcceca420e6", "score": "0.475608", "text": "def hommola_cospeciation(host_dist, par_dist, interaction, permutations=999):\n host_dist = DistanceMatrix(host_dist)\n par_dist = DistanceMatrix(par_dist)\n interaction = np.asarray(interaction, dtype=bool)\n\n num_hosts = host_dist.shape[0]\n num_pars = par_dist.shape[0]\n\n if num_hosts < 3 or num_pars < 3:\n raise ValueError(\"Distance matrices must be a minimum of 3x3 in size.\")\n if num_hosts != interaction.shape[1]:\n raise ValueError(\"Number of interaction matrix columns must match \"\n \"number of hosts in `host_dist`.\")\n if num_pars != interaction.shape[0]:\n raise ValueError(\"Number of interaction matrix rows must match \"\n \"number of parasites in `par_dist`.\")\n if permutations < 0:\n raise ValueError(\"Number of permutations must be greater than or \"\n \"equal to zero.\")\n if interaction.sum() < 3:\n raise ValueError(\"Must have at least 3 host-parasite interactions in \"\n \"`interaction`.\")\n\n # shortcut to eliminate nested for-loops specifying pairwise interaction\n # partners as randomizeable indices\n pars, hosts = np.nonzero(interaction)\n pars_k_labels, pars_t_labels = _gen_lists(pars)\n hosts_k_labels, hosts_t_labels = _gen_lists(hosts)\n\n # get a vector of pairwise distances for each interaction edge\n x = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data,\n np.arange(num_hosts))\n y = _get_dist(pars_k_labels, pars_t_labels, par_dist.data,\n np.arange(num_pars))\n\n # calculate the observed correlation coefficient for these hosts/symbionts\n corr_coeff = pearsonr(x, y)[0]\n\n # now do permutatitons. initialize index lists of the appropriate size\n mp = np.arange(num_pars)\n mh = np.arange(num_hosts)\n\n # initialize list of shuffled correlation vals\n perm_stats = np.empty(permutations)\n\n if permutations == 0 or np.isnan(corr_coeff):\n p_value = np.nan\n perm_stats.fill(np.nan)\n else:\n for i in range(permutations):\n # generate a shuffled list of indexes for each permutation. this\n # effectively randomizes which host is associated with which\n # symbiont, but maintains the distribution of genetic distances\n np.random.shuffle(mp)\n np.random.shuffle(mh)\n\n # get pairwise distances in shuffled order\n y_p = _get_dist(pars_k_labels, pars_t_labels, par_dist.data, mp)\n x_p = _get_dist(hosts_k_labels, hosts_t_labels, host_dist.data, mh)\n\n # calculate shuffled correlation coefficient\n perm_stats[i] = pearsonr(x_p, y_p)[0]\n\n p_value = ((perm_stats >= corr_coeff).sum() + 1) / (permutations + 1)\n\n return corr_coeff, p_value, perm_stats", "title": "" }, { "docid": "2a2cb1f551f70c1ec9a6dfcb6f4ee5f8", "score": "0.4755362", "text": "def trp(self, c):\n hs = []\n # Methylene: [HB2, HB3]\n hs.extend(self.get_methylene_hydrogens(c.CA, c.CB, c.CG))\n # Methine: HD1\n hs.append(self.get_amide_methine_hydrogen(c.CG, c.CD1, c.NE1, amide=False))\n # Amide/methine: HE1\n hs.append(self.get_amide_methine_hydrogen(c.CD1, c.NE1, c.CE2, amide=True))\n # Methine: HE3\n hs.append(self.get_amide_methine_hydrogen(c.CD2, c.CE3, c.CZ3, amide=False))\n # Methine: HH2\n hs.append(self.get_amide_methine_hydrogen(c.CZ3, c.CH2, c.CZ2, amide=False))\n # Methine HZ2\n hs.append(self.get_amide_methine_hydrogen(c.CE2, c.CZ2, c.CH2, amide=False))\n # Methine: HZ3\n hs.append(self.get_amide_methine_hydrogen(c.CE3, c.CZ3, c.CH2, amide=False))\n return hs", "title": "" }, { "docid": "d6f37a20259f948a6cae5e4f6a49103e", "score": "0.4748523", "text": "def get_structure_homodimer(structure):\n\tsup = Superimposer()\n\tnew_structure = structure.copy()\n\tref_model = new_structure[0]\n\n\t#Iterate over all interactions\n\tfor alt_model in new_structure:\n\t\t#Build paired lists of c-alpha atoms:\n\t\tref_atoms = []\n\t\talt_atoms = []\n\t\tfor (ref_chain, alt_chain) in zip(ref_model, alt_model):\n\t\t\tfor ref_res, alt_res in zip(ref_chain, alt_chain):\n\t\t\t\tif ref_res.resname == alt_res.resname and ref_res.id == alt_res.id:\n\t\t\t\t\tref_atoms.append(alt_res['CA'])\n\t\t\t\t\talt_atoms.append(alt_res['CA'])\n\n\t\t#Align these paired atom lists:\n\t\tsup.set_atoms(ref_atoms, alt_atoms)\n\n\t\tif ref_model.id == alt_model.id :\n\t #Check for self/self get zero RMS, zero translation\n\t #and identity matrix for the rotation.\n\t\t\tassert numpy.abs(sup.rms) < 0.0000001\n\t\t\tassert numpy.max(numpy.abs(sup.rotran[1])) < 0.000001\n\t\t\tassert numpy.max(numpy.abs(sup.rotran[0]) - numpy.identity(3)) < 0.000001\n\n\t\telse :\n\t #Update the structure by moving all the atoms in\n\t #this model (not just the ones used for the alignment)\n\t\t\tsup.apply(alt_model.get_atoms())\n\t\t\talt_model.detach_child(list(alt_model.get_chains())[0].id)\n\n\t\tprint(\"RMS(first model, model %i) = %0.2f\" % (alt_model.id, sup.rms))\n\n\treturn new_structure", "title": "" }, { "docid": "4670289dbd9133bf614d5004f0482c4b", "score": "0.47474384", "text": "def get_OH_HO2( ctm=None, t_p=None, a_m=None, vol=None, \\\n wd=None, HOx_weight=False, res='4x5', scale=1E5, trop_limit=True, \\\n molec_weight=True, time_averaged=True, debug=False ):\n\n if debug:\n print(('get_OH_HO2 called for ', wd))\n\n # --- Set specs, get constants, and extract output variables\n specs = ['OH','HO2']\n AVG= constants('AVG')\n RMM_air=constants('RMM_air')\n # Load arrays if not provided...\n if not isinstance(t_p, np.ndarray):\n if pygchem.__version__ == '0.2.0':\n t_p = get_gc_data_np( ctm, spec='TIMETROP', category='TIME-TPS', \\\n debug=debug)\n else:\n print('WARNING!!! - provide time in trop diag. ') \n t_p = get_GC_output( wd, vars=['TIME_TPS__TIMETROP'], \\\n trop_limit=trop_limit ) \n\n if not isinstance(a_m, np.ndarray):\n if pygchem.__version__ == '0.2.0':\n a_m = get_air_mass_np( ctm, debug=debug )\n else:\n print('WARNING!!! - provide air mass diag. ') \n a_m = get_GC_output( wd, vars=['BXHGHT_S__AD'], \\\n trop_limit=trop_limit, dtype=np.float64)\n if not isinstance(vol, np.ndarray):\n if pygchem.__version__ == '0.2.0':\n vol = get_volume_np( ctm, res=res, debug=debug)\n else:\n print('WARNING!!! - provide vol diag. ') \n vol = get_volume_np( wd=wd, res=res, trop_limit=trop_limit, \\\n debug=debug)\n\n # --- Extract OH and HO2 Data ( OH in [molec/cm3], HO2 in [v/v])\n if pygchem.__version__ == '0.2.0':\n OH, HO2 = [get_gc_data_np( ctm, \\\n i, category='CHEM-L=$') for i in specs ] \n else:\n OH, HO2 = get_GC_output( wd, trop_limit=trop_limit, \\\n vars=['CHEM_L_S__'+i for i in specs], r_list=True )\n\n # Mask for troposphere. \n OH, HO2 = mask4troposphere( [OH, HO2 ], t_ps=t_p, \\\n use_time_in_trop=True, multiply_method=True) \n \n # --- Process data\n molecs = ( ( (a_m*1E3) / RMM_air ) * AVG ) # molecules\n moles = a_m*1E3 / RMM_air # mols \n\n # Only consider troposphere\n print([ i.shape for i in (molecs, a_m, vol, moles) ])\n molecs, a_m, vol, moles = mask4troposphere( \\\n [molecs, a_m, vol, moles ], t_ps=t_p, \\\n use_time_in_trop=True, multiply_method=True) \n\n if debug:\n print([ (i.shape, np.mean(i) ) for i in [ OH, HO2, molecs, moles, a_m, \\\n vol ]])\n\n # convert HO2 [v/v] to [molec/cm3]\n HO2 = convert_v_v_2_molec_cm3( HO2, a_m=a_m, vol=vol, mols=moles )\n\n # Remove invalid values\n HO2, OH = [ np.ma.masked_invalid( i) for i in (HO2, OH) ]\n HO2, OH = [ np.ma.masked_where( i<0, i) for i in (HO2, OH) ]\n \n # Average over provided timesteps ( 4th dimension )\n if time_averaged:\n HO2, OH, molecs, vol = [ i.mean(axis=-1) for i in (HO2, OH, molecs, vol) ]\n\n if debug:\n print([ ( np.ma.sum(i), np.ma.mean(i) ) for i in (HO2, OH) ])\n print(('volume weighted: ', [ np.ma.sum( i *vol) / np.ma.sum(vol) \\\n for i in (HO2, OH) ])) \n\n if HOx_weight: # weigh by [HO]+[HO2] molecules\n HOx = HO2 + OH\n HO2, OH = [ np.ma.sum( ( i* HOx )) /np.ma.sum(HOx) for i in (HO2, OH) ]\n if debug:\n print([ i.shape for i in (OH, HO2, moles, vol, HOx) ])\n print(('HOx weighted: ',HO2, OH))\n\n elif molec_weight: # weight by # of molecules\n HO2, OH = [ ( i*molecs).sum() /molecs.sum() for i in (HO2, OH) ]\n \n else: # Volume weight\n# HO2, OH = [ np.ma.sum( i *vol) / np.ma.sum(vol) for i in HO2, OH ] \n print('Please specify weighting in get_OH_HO2() ')\n sys.exit()\n\n # Scale to set value ( e.g. 1E6 )\n HO2, OH = [i/scale for i in (HO2, OH) ]\n\n return OH, HO2", "title": "" }, { "docid": "7334ffdd4f4216cf075c30cab24603ea", "score": "0.47438785", "text": "def diag_coor_medium(self):\n main_diag = 0\n alter_diag = -1\n x_main = []\n x_alter = []\n o_main = []\n o_alter = []\n\n for lines in range(len(self.state_table)):\n if self.state_table[lines][main_diag] == 'X':\n x_main.append([main_diag + 1, main_diag + 1])\n\n elif self.state_table[lines][main_diag] == 'O':\n o_main.append([main_diag + 1, main_diag + 1])\n # print(f'x_main {x_main}')\n if len(x_main) == 2:\n if x_main[1][1] == 3:\n new_coor = [(x_main[1][0] - x_main[0][0]) - 1, (x_main[1][1] - x_main[0][1]) - 1]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n else:\n new_coor = [x_main[1][0] + x_main[0][0] - 1, (x_main[1][1] + x_main[0][1]) - 1]\n # print(f'this is{new_coor}')\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n elif len(o_main) == 2:\n if o_main[1][1] == 3:\n new_coor = [(o_main[1][0] - o_main[0][0]) - 1, (o_main[1][1] - o_main[0][1]) - 1]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n else:\n new_coor = [o_main[1][0] + o_main[0][0] - 1, (o_main[1][1] + o_main[0][1]) - 1]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n\n if self.state_table[lines][alter_diag] == 'X':\n x_alter.append([lines + 1, alter_diag])\n elif self.state_table[lines][alter_diag] == 'O':\n o_alter.append([lines + 1, alter_diag])\n\n if len(x_alter) == 2:\n if x_alter[1][1] == -3:\n new_coor = [x_alter[1][0] - x_alter[0][0] - 1, x_alter[1][1] - x_alter[0][1]]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n else:\n new_coor = [x_alter[1][0] + x_alter[0][0] - 1, x_alter[1][1] + x_alter[0][1]]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n\n elif len(o_alter) == 2:\n if o_alter[1][1] == -3:\n new_coor = [o_alter[1][0] - o_alter[0][0] - 1, o_alter[1][1] - o_alter[0][1]]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n else:\n new_coor = [o_alter[1][0] + o_alter[0][0] - 1, o_alter[1][1] + o_alter[0][1]]\n if self.state_table[new_coor[0]][new_coor[1]] == '_':\n return new_coor\n main_diag += 1\n alter_diag -= 1", "title": "" }, { "docid": "314ba017cad6c6b8fccc11d694ba2dd6", "score": "0.47323367", "text": "def get_methyl_hydrogens(self, carbon, prev1, prev2, use_amine_length=False):\n length = METHYL_LEN if not use_amine_length else AMINE_LEN\n\n # Define local vectors extending from CA\n N = prev2 - prev1\n CB = carbon - prev1\n\n # Define perpendicular vector\n PV = self.cross(CB, N)\n R109 = self.M(PV, np.deg2rad(METHYL_ANGLE)) # Rotate abt PV by 109.5 (tetrahed.)\n\n # Define Hydrogen extending from carbon\n H1 = self.dot(R109, -CB) # Place Hydrogen by rotating C along perpendicular axis\n H1 = self.scale(H1, length)\n\n R120 = self.M(CB, 2 * np.pi / 3)\n H2 = self.dot(R120, H1) # Place 2nd Hydrogen by rotating previous H 120 deg\n H3 = self.dot(R120, H2) # Place 3rd Hydrogen by rotating previous H 120 deg\n\n H1 += prev1 + CB\n H2 += prev1 + CB\n H3 += prev1 + CB\n\n return [H1, H2, H3]", "title": "" }, { "docid": "eedc3e8d49cb8be5d70f8e4205997553", "score": "0.4729259", "text": "def test_max_distance(self):\n # This threshold will remove interactions between atoms more than 5 micrometers away from each other\n max_distance = 5\n coords = [[0, 0], [2.5, 0], [5, 0], [6, 6]]\n h_wires = [1, 0, 2, 3]\n\n # Set interaction_coeff to one for easier comparison\n # factor (2 * np.pi) to convert between angular and standard frequency\n H_res = rydberg_interaction(\n register=coords,\n wires=h_wires,\n interaction_coeff=1 / (2 * np.pi),\n max_distance=max_distance,\n )\n H_exp = rydberg_interaction(\n register=coords[:3], wires=h_wires[:3], interaction_coeff=1 / (2 * np.pi)\n )\n\n # Only 3 of the interactions will be non-negligible\n assert H_res.coeffs == [2.5**-6, 5**-6, 2.5**-6]\n assert qml.equal(H_res([], t=5), H_exp([], t=5))", "title": "" }, { "docid": "f1d595c19ce8ded1b6bd96250aff4894", "score": "0.47250444", "text": "def twoPlasmidAnalysis(restrictionbatch, gene_sequence_two, gene_sequence_one):\n\n plasmid_WithoutInsert_analysis = Analysis(restrictionbatch, gene_sequence_one)\n plasmid_WithInsert_analysis = Analysis(restrictionbatch, gene_sequence_two)\n\n #This eliminates all the enzymes that cut the same in both plasmids\n temp_dict1 = plasmid_WithoutInsert_analysis.full()\n temp_dict2 = plasmid_WithInsert_analysis.full()\n enzymeEliminator(temp_dict1, temp_dict2, restrictionbatch)\n\n #TODO: See if we can get away with deleting the bottom line\n restrictionbatch2 = RestrictionBatch(enzyme_list) #Updates the restrictionbatch just in case our code didn't before\n\n final_enzymes = []\n for n in range(1, max_enzymes+1):\n combination_list = list(itertools.combinations(enzyme_list, n))\n\n for x in combination_list:\n if((differentiability_determiner(x, gene_sequence_one) and differentiability_determiner(x, gene_sequence_two)) and \\\n (size_band_eliminator(x, gene_sequence_two) and size_band_eliminator(x, gene_sequence_one)) ):\n \n final_enzymes.append(x)\n\n #Outputs the three sets of enzymes that produce the most cuts \n first_place = 0 \n second_place = 0\n third_place = 0\n\n first_pick = None\n second_pick = None\n third_pick = None\n\n for x in final_enzymes:\n total_num_sites = number_of_cuts(x, gene_sequence_one) + number_of_cuts(x, gene_sequence_two)\n\n if(total_num_sites > first_place):\n third_place = second_place\n third_pick = second_pick\n second_place = first_place\n second_pick = first_pick\n first_place = total_num_sites\n first_pick = x\n elif(total_num_sites > second_place):\n third_place = second_place\n third_pick = second_pick\n second_place = total_num_sites\n second_pick = x\n elif(total_num_sites > third_place):\n third_place = total_num_sites\n third_pick = x\n\n count = 0\n min_enzyme_list = []\n for x in final_enzymes:\n if(count < 3):\n min_enzyme_list.append(x)\n count += 1\n\n print(\"\\n\")\n print(\"The ideal set of restriction enzymes to digest this plasmid to produce maximum cuts are: {}, {}, and {}\" .format(first_pick, second_pick, third_pick))\n print(\"\\n\")\n print(\"The ideal set of restriction enzymes to digest this plasmid in the minimum number of enzymes are:\")\n for x in range(0, len(min_enzyme_list)):\n print(min_enzyme_list[x],)\n print(\"\\n\")\n print(\"The entire set of restriction enzymes to digest this plasmid based on the parameters given are:\") \n print(final_enzymes)\n print(\"\\n\")", "title": "" }, { "docid": "0e63512a8150410ab013c12ee988909c", "score": "0.4722264", "text": "def generate_hamiltonian(self):\n dimensions, ivectors = dimensions_spinvectors(self.cluster, central_spin=None)\n\n totalh = Hamiltonian(dimensions, vectors=ivectors)\n\n for ivec, n in zip(ivectors, self.cluster):\n hsingle = expanded_single(ivec, n.gyro, self.magnetic_field, n['Q'], n.detuning)\n\n if self.others is not None and self.other_states is not None:\n hsingle += overhauser_bath(ivec, n['xyz'], n.gyro, self.others.gyro,\n self.others['xyz'], self.other_states)\n\n hf = conditional_hyperfine(n['A'], ivec, self.projections)\n\n totalh.data += hsingle + hf\n\n totalh.data += bath_interactions(self.cluster, ivectors)\n\n return totalh\n # ham = total_hamiltonian(self.cluster, self.magnetic_field, self.zfs, others=self.others,\n # other_states=self.other_states, central_gyro=self.gyro, central_spin=self.spin)\n # return ham", "title": "" }, { "docid": "3ec21feebc862139e298d3cbba59f294", "score": "0.47175032", "text": "def all_hypotheses(examples):\n values = values_table(examples)\n h_powerset = powerset(values.keys())\n hypotheses = []\n for s in h_powerset:\n hypotheses.extend(build_attr_combinations(s, values))\n\n hypotheses.extend(build_h_combinations(hypotheses))\n\n return hypotheses", "title": "" }, { "docid": "3b987fddf8450550c07afed7737eba8e", "score": "0.4716402", "text": "def fix_mobile_h(mol, inchi, u1, u2):\n\n mobile_hydrogens = inchiutil.parse_H_layer(inchi)\n\n if mobile_hydrogens:\n # WIP: only consider the first system of mobile hydrogens:\n mobile_hydrogens = mobile_hydrogens[0]\n\n #find central atom:\n central, original, new_partner = util.swap(mobile_hydrogens, [u1, u2])\n\n central, original, new_partner = \\\n mol.atoms[central - 1], mol.atoms[original - 1], mol.atoms[new_partner - 1]\n\n # search hydrogen atom and bond\n hydrogen = None\n for at, bond in original.bonds.iteritems():\n if at.number == 1:\n hydrogen = at\n mol.removeBond(bond)\n break\n\n new_h_bond = Bond(new_partner, hydrogen, order='S')\n mol.addBond(new_h_bond)\n \n mol.getBond(central, new_partner).decrementOrder()\n\n central.radicalElectrons += 1\n original.radicalElectrons += 1\n return True\n\n return False", "title": "" }, { "docid": "641601c744ea707e8941c63958498150", "score": "0.47104195", "text": "def exercise_7():\n pdb_str1 = \"\"\"\nCRYST1 399.000 399.000 399.000 90.00 90.00 90.00 P 1\nATOM 1 N GLY A 34 125.208 211.886 175.417 1.00 0.00 N\nATOM 2 CA GLY A 34 125.035 211.123 174.168 1.00 0.00 C\nATOM 3 C GLY A 34 126.386 210.806 173.507 1.00 0.00 C\nATOM 4 O GLY A 34 127.304 211.628 173.503 1.00 0.00 O\nTER\nATOM 5 N GLY B 34 251.532 143.432 175.422 1.00 0.00 N\nATOM 6 CA GLY B 34 252.120 143.948 174.173 1.00 0.00 C\nATOM 7 C GLY B 34 251.212 144.998 173.512 1.00 0.00 C\nATOM 8 O GLY B 34 249.986 144.872 173.510 1.00 0.00 O\nTER\nATOM 9 N GLY C 34 189.583 273.076 175.423 1.00 0.00 N\nATOM 10 CA GLY C 34 188.804 273.006 174.173 1.00 0.00 C\nATOM 11 C GLY C 34 188.920 271.622 173.510 1.00 0.00 C\nATOM 12 O GLY C 34 189.986 271.004 173.508 1.00 0.00 O\nTER\n\"\"\"\n pdb_str2 = \"\"\"\nCRYST1 399.000 399.000 399.000 90.00 90.00 90.00 P 1\nATOM 1 O HOH S 1 109.583 203.076 175.423 1.00 0.00 O\nTER\nATOM 1 N GLY A 34 125.208 211.886 175.417 1.00 0.00 N\nATOM 2 CA GLY A 34 125.035 211.123 174.168 1.00 0.00 C\nATOM 3 C GLY A 34 126.386 210.806 173.507 1.00 0.00 C\nATOM 4 O GLY A 34 127.304 211.628 173.503 1.00 0.00 O\nTER\nATOM 5 N GLY B 34 251.532 143.432 175.422 1.00 0.00 N\nATOM 6 CA GLY B 34 252.120 143.948 174.173 1.00 0.00 C\nATOM 7 C GLY B 34 251.212 144.998 173.512 1.00 0.00 C\nATOM 8 O GLY B 34 249.986 144.872 173.510 1.00 0.00 O\nTER\nATOM 9 N GLY C 34 189.583 273.076 175.423 1.00 0.00 N\nATOM 10 CA GLY C 34 188.804 273.006 174.173 1.00 0.00 C\nATOM 11 C GLY C 34 188.920 271.622 173.510 1.00 0.00 C\nATOM 12 O GLY C 34 189.986 271.004 173.508 1.00 0.00 O\nTER\nATOM 9 O TYR D 4 189.583 273.076 175.423 1.00 0.00 O\nTER\n\"\"\"\n phil_str1 = \"\"\"\nncs_group {\n reference = chain A\n selection = chain B\n selection = chain C\n}\n\"\"\"\n ncs_groups = get_ncs_groups(phil_str1, pdb_str1)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 2\n assert list(ncs_groups[0].master_iselection) == [0,1,2,3]\n assert list(ncs_groups[0].copies[0].iselection) == [4,5,6,7]\n assert list(ncs_groups[0].copies[1].iselection) == [8,9,10,11]\n\n ncs_groups = get_ncs_groups(phil_str1, pdb_str2)\n assert len(ncs_groups) == 1\n assert len(ncs_groups[0].copies) == 2\n assert list(ncs_groups[0].master_iselection) == [1,2,3,4]\n assert list(ncs_groups[0].copies[0].iselection) == [5,6,7,8]\n assert list(ncs_groups[0].copies[1].iselection) == [9,10,11,12]", "title": "" }, { "docid": "f70922c0ff270215248467bb51eb1aef", "score": "0.4710278", "text": "def edit_distance(ref,hyp):\n n = len(ref)\n m = len(hyp)\n\n ins = dels = subs = corr = 0\n \n D = np.zeros((n+1,m+1))\n\n D[:,0] = np.arange(n+1)\n D[0,:] = np.arange(m+1)\n\n for i in xrange(1,n+1):\n for j in xrange(1,m+1):\n if ref[i-1] == hyp[j-1]:\n D[i,j] = D[i-1,j-1]\n else:\n D[i,j] = min(D[i-1,j],D[i,j-1],D[i-1,j-1])+1\n\n i=n\n j=m\n while i>0 and j>0:\n if ref[i-1] == hyp[j-1]:\n corr += 1\n elif D[i-1,j] == D[i,j]-1:\n ins += 1\n j += 1\n elif D[i,j-1] == D[i,j]-1:\n dels += 1\n i += 1\n elif D[i-1,j-1] == D[i,j]-1:\n subs += 1\n i -= 1\n j -= 1\n\n ins += i\n dels += j\n\n return D[-1,-1]", "title": "" }, { "docid": "18f01029f93596627f4b0226bb1fa862", "score": "0.47087944", "text": "def create_supercells_with_displacements_using_phono3py(structure, ph_settings, cutoff):\n from phono3py.phonon3 import Phono3py\n\n from aiida_phonopy.workchains.phonon import phonopy_bulk_from_structure\n\n # Generate phonopy phonon object\n phono3py = Phono3py(phonopy_bulk_from_structure(structure),\n supercell_matrix=ph_settings.dict.supercell,\n primitive_matrix=ph_settings.dict.primitive,\n symprec=ph_settings.dict.symmetry_precision,\n log_level=1)\n\n if float(cutoff) == 0:\n cutoff = None\n else:\n cutoff = float(cutoff)\n\n phono3py.generate_displacements(distance=ph_settings.dict.distance,\n cutoff_pair_distance=cutoff)\n\n cells_with_disp = phono3py.get_supercells_with_displacements()\n\n # Transform cells to StructureData and set them ready to return\n data_sets = phono3py.get_displacement_dataset()\n data_sets_object = ForceSetsData(data_sets3=data_sets)\n\n disp_cells = {'data_sets': data_sets_object}\n for i, phonopy_supercell in enumerate(cells_with_disp):\n if phonopy_supercell is None:\n print ('structure_{} cutoff skip'.format(i))\n continue\n supercell = StructureData(cell=phonopy_supercell.get_cell())\n for symbol, position in zip(phonopy_supercell.get_chemical_symbols(),\n phonopy_supercell.get_positions()):\n supercell.append_atom(position=position, symbols=symbol)\n disp_cells['structure_{}'.format(i)] = supercell\n\n return disp_cells", "title": "" }, { "docid": "9278da28cbc2455027cd03e6d89d11b0", "score": "0.4698808", "text": "def testAngularDiameterDistance(self):\n\n H0 = 56.0\n universe=CosmologyObject()\n for Om0 in numpy.arange(start=0.15, stop=0.56, step=0.2):\n for Ok0 in numpy.arange(start=-0.1, stop=0.11, step=0.2):\n for w0 in numpy.arange(start=-1.1, stop=-0.85, step=0.2):\n for wa in numpy.arange(start=-0.1, stop=0.11, step=0.2):\n\n universe = CosmologyObject(H0=H0, Om0=Om0, Ok0=Ok0, w0=w0, wa=wa)\n\n sqrtkCurvature = numpy.sqrt(numpy.abs(universe.OmegaCurvature()))*universe.H()/self.speedOfLight\n Og0 = universe.OmegaPhotons()\n Onu0 = universe.OmegaNeutrinos()\n Ode0 = universe.OmegaDarkEnergy()\n\n for zz in numpy.arange(start=0.1, stop=4.2, step=2.0):\n angularControl = universe.angularDiameterDistance(redshift=zz)\n comovingDistance = self.speedOfLight*scipy.integrate.quad(comovingDistanceIntegrand, 0.0, zz,\n args=(H0, Om0, Ode0, Og0, Onu0, w0, wa))[0]\n\n if universe.OmegaCurvature()<0.0:\n nn =sqrtkCurvature*comovingDistance\n nn = numpy.sin(nn)\n angularTest = nn/sqrtkCurvature\n elif universe.OmegaCurvature()>0.0:\n nn = sqrtkCurvature*comovingDistance\n nn = numpy.sinh(nn)\n angularTest = nn/sqrtkCurvature\n else:\n angularTest = comovingDistance\n angularTest /= (1.0+zz)\n self.assertAlmostEqual(angularControl/angularTest,1.0,4)", "title": "" }, { "docid": "a38635ceedac0736fb5380b1707439ab", "score": "0.46962267", "text": "def homogeneity(self):\n import nipy.neurospin.eda.dimension_reduction as dr \n coord = self.discrete_features['position']\n size = self.get_size()\n h = np.zeros(self.k)\n for k in range(self.k):\n edk = dr.Euclidian_distance(coord[k]) \n h[k] = edk.sum()/(size[k]*(size[k]-1))\n return h", "title": "" }, { "docid": "4551ad25d29b476c0d09cc0b943691ef", "score": "0.4692553", "text": "def make_exon_junction_adjacencies(self, metadata, db):\n exon_junction_adjacencies = adjacencies.ExonJunctionAdjacencies(\n metadata, db, max_de_novo_exon_length=self.max_de_novo_exon_length)\n\n novel_exons_gtf = os.path.join(self.gtf_folder, 'novel_exons.gtf')\n if self.maybe_overwrite(novel_exons_gtf):\n util.progress('Detecting de novo exons based on gaps between '\n 'junctions ...')\n exon_junction_adjacencies.detect_exons_from_junctions()\n util.done()\n\n novel_exons_gtf = os.path.join(self.gtf_folder, 'novel_exons.gtf')\n novel_exons = exon_junction_adjacencies.db.features_of_type(\n outrigger.common.NOVEL_EXON)\n n_novel_exons = sum(1 for _ in novel_exons)\n util.progress('Writing {n} novel exons to {gtf} ...'.format(\n n=n_novel_exons, gtf=novel_exons_gtf))\n exon_junction_adjacencies.write_de_novo_exons(novel_exons_gtf)\n util.done()\n\n csv = os.path.join(self.index_folder,\n 'exon_direction_junction.csv')\n if not os.path.exists(csv) or self.force:\n util.progress('Getting junction-direction-exon triples for graph '\n 'database ...')\n junction_exon_triples = \\\n exon_junction_adjacencies.upstream_downstream_exons()\n util.done()\n\n util.progress('Writing junction-exon-direction triples'\n ' to {}...'.format(csv))\n junction_exon_triples.to_csv(csv, index=False)\n util.done()\n elif self.resume:\n junction_exon_triples = pd.read_csv(csv,\n low_memory=self.low_memory)\n else:\n raise ValueError(\"Found existing junction-exon-triples file \"\n \"({csv}) but don't \"\n \"know whether you want me to continue where I \"\n \"stopped ('--resume') or force restart from \"\n \"scratch ('--force')! Exiting.\"\n \".\".format(csv=csv))\n\n return junction_exon_triples", "title": "" }, { "docid": "1089a6eb6a64b2e4b9087593bb3ef9fd", "score": "0.4692301", "text": "def ala(self, c):\n # HB1, HB2, HB3\n hs = self.get_methyl_hydrogens(carbon=c.CB, prev1=c.CA, prev2=c.N)\n return hs", "title": "" }, { "docid": "8e131e012c260c55409060910f514764", "score": "0.46881604", "text": "def homophily(G, chars, IDs):\n num_same_ties, num_ties = 0, 0\n for n1 in G.nodes():\n for n2 in G.nodes():\n\n nodes = [n1, n2]\n ties, same_ties = check_for_homophily(nodes, G, chars, IDs)\n\n num_ties += ties\n num_same_ties += same_ties\n\n\n return (num_same_ties / num_ties)", "title": "" }, { "docid": "9568eea6c5efa9d58b37508b970f8b71", "score": "0.4679919", "text": "def _get_improper_dihedral_types(\n structure, atom_types, epsilon_conversion_factor, imp_dih_precision=3\n):\n lj_unit = 1 / epsilon_conversion_factor\n improper_dihedrals = []\n for dihedral in structure.dihedrals:\n if dihedral.improper:\n dih_type = dihedral.type\n phase = abs(int(round(dih_type.phase, 0)))\n if not (phase == 0 or phase == 180):\n raise ValueError(\"Improper dihedral phase must be 0 or 180\")\n if phase:\n d = -1\n else:\n d = 1\n improper_dihedrals.append(\n (\n round(dih_type.phi_k * lj_unit, imp_dih_precision),\n d,\n int(round(dih_type.per, 0)),\n round(dih_type.scee, 1),\n round(dih_type.scnb, 1),\n dihedral.atom1.type,\n dihedral.atom2.type,\n dihedral.atom3.type,\n dihedral.atom4.type,\n )\n )\n unique_imp_dihedral_types = dict(enumerate(OrderedSet(*improper_dihedrals)))\n magnitude = np.ceil(np.log10(len(atom_types)))\n dihedral_tuples = [\n (x[1], x[5:]) for x in unique_imp_dihedral_types.values()\n ]\n ordered_dihedral_tuples = dihedral_tuples.copy()\n ordered_dihedral_tuples.sort(\n key=lambda x: atom_types.index(x[1][0]) * 10 ** (4 * magnitude)\n + atom_types.index(x[1][1]) * 10 ** (3 * magnitude)\n + atom_types.index(x[1][2]) * 10 ** (2 * magnitude)\n + atom_types.index(x[1][3]) * 10**magnitude\n + x[0]\n )\n\n unique_imp_dihedral_types = OrderedDict(\n [\n (unique_imp_dihedral_types[dihedral_tuples.index(x)], i + 1)\n for i, x in enumerate(ordered_dihedral_tuples)\n ]\n )\n imp_dihedral_types = [\n unique_imp_dihedral_types[dihedral_info]\n for dihedral_info in improper_dihedrals\n ]\n\n return imp_dihedral_types, unique_imp_dihedral_types", "title": "" }, { "docid": "84dd8d2e60a43a10a1d71410db36a6f2", "score": "0.46774772", "text": "def cliffords(qubits, depth, no_hadamard=False,t_gates=False,backend=None):\n\n #randomness parameters\n p_two_qubit = 0.4 #whether to add a edge between two qubits\n p_cnot = 1 #0.4 # whether to CNOT or to CZ\n p_phase = 0.6 #probability of adding a phase to a node\n p_had = 0.2 # probability of adding a hadamard on a qubit\n\n # initialise and add input row\n\n q = list(range(qubits)) # qubit index, initialised with input\n r = 1 # current rank\n ty = [0] * qubits # types of vertices\n qs = list(range(qubits)) # tracks qubit indices of vertices\n rs = [0] * qubits # tracks rank of vertices\n v = qubits # next vertex to add\n es1 = [] # normal edges to add\n es2 = [] # hadamard edges to add\n phases = {}\n\n # initial row of Z\n for i in range(qubits):\n es1.append((q[i], v))\n q[i] = v\n rs.append(r)\n qs.append(i)\n ty.append(1)\n v += 1\n r += 1\n\n # random gates\n for i in range(depth):\n c = random.randint(0, qubits-1)\n t = random.randint(0, qubits-2)\n if t >= c: t += 1\n if accept(p_two_qubit):\n if no_hadamard or accept(p_cnot): \n es1.append((v, v+1))\n ty += [1,2]\n else: \n es2.append((v,v+1))\n typ = random.randint(1,2)\n ty += [typ,typ]\n if accept(p_phase): phases[v] = random_phase(t_gates)\n if accept(p_phase): phases[v+1] = random_phase(t_gates)\n else:\n phases[v] = random_phase(t_gates)\n phases[v+1] = random_phase(t_gates)\n ty += [1,2]\n \n if not no_hadamard and accept(p_had): es2.append((q[c],v))\n else: es1.append((q[c],v))\n if not no_hadamard and accept(p_had): es2.append((q[t],v+1))\n else: es1.append((q[t],v+1))\n\n q[c] = v\n q[t] = v+1\n \n rs += [r,r]\n qs += [c,t]\n v += 2\n r += 1\n\n # final row of Z\n for i in range(qubits):\n es1.append((q[i], v))\n q[i] = v\n rs.append(r)\n qs.append(i)\n ty.append(1)\n v += 1\n r += 1\n\n # outputs\n qs += list(range(qubits))\n rs += [r] * qubits\n ty += [0] * qubits\n es1 += [(q[i], v+i) for i in range(qubits)]\n v += qubits\n\n g = Graph(backend)\n \n\n for i in range(v):\n g.add_vertex(ty[i],qs[i], rs[i])\n for w, phase in phases.items():\n g.set_phase(w,phase)\n\n g.add_edges(es1,1)\n g.add_edges(es2,2)\n\n for i in range(qubits):\n g.inputs.append(i)\n g.outputs.append(v-i-1)\n return g", "title": "" }, { "docid": "379dc1e8510b6fe6c88fc2f2d4617142", "score": "0.46735042", "text": "def fix_topology(dcd,pdb):\n t = md.load_dcd(dcd,pdb)\n cgtop = md.Topology()\n cgchain = cgtop.add_chain()\n for atom in t.top.atoms:\n cgres = cgtop.add_residue(atom.name, cgchain)\n cgtop.add_atom('CA', element=md.element.carbon, residue=cgres)\n traj = md.Trajectory(t.xyz, cgtop, t.time, t.unitcell_lengths, t.unitcell_angles)\n traj = traj.superpose(traj, frame=0)\n return traj", "title": "" }, { "docid": "3d241cf433ea198fba897e066c63070b", "score": "0.46721277", "text": "def make_histidine_lists(post_clu_pdb_filename, hip_residues_list, hid_residues_list):\n\n pdb_input_filename = post_clu_pdb_filename\n reduce_output_filename = post_clu_pdb_filename[:-4]+'_h.pdb'\n\n # run reduce on the protein\n #subprocess.call(\"reduce –build \" + pdb_name + \" > \" + pdb_name[:-4]+ \"_h.pdb\", shell=True)\n try:\n os.system(\"reduce -Build %s > %s\" % (pdb_input_filename, reduce_output_filename))\n except Exception as e:\n print(e)\n print(\"reduce failed on the command:\", (\"reduce -Build %s > %s\" % (pdb_input_filename, reduce_output_filename)))\n sys.exit()\n \n \n #read in the protein with biopandas\n ppdb = PandasPdb()\n ppdb.read_pdb(reduce_output_filename)\n atom_df = ppdb.df['ATOM']\n\n #construct variables for boolean selection\n HID = atom_df['atom_name']=='HD1'\n HIE = atom_df['atom_name']=='HE2'\n HIS = atom_df['residue_name']=='HIS'\n \n #make dataframes\n hid_df = atom_df.loc[HIS & HID]\n hie_df = atom_df.loc[HIS & HIE]\n \n #make booleans to check it has that type of his\n has_hid = hid_df.shape[0] > 0\n has_hie = hie_df.shape[0] > 0\n \n #construct the lists\n hid_list = []\n hie_list = []\n hip_list = []\n \n #first for his\n if has_hid:\n hid_nums = [str(x) for x in list(hid_df['residue_number'])]\n hid_chains = list(hid_df['chain_id'])\n hid_list = [hid_nums[i] + hid_chains[i] for i in range(len(hid_nums))]\n #now for hie\n if has_hie:\n hie_nums = [str(x) for x in list(hie_df['residue_number'])]\n hie_chains = list(hie_df['chain_id'])\n hie_list = [hie_nums[i] + hie_chains[i] for i in range(len(hie_nums))]\n \n #construct the hip list\n if has_hid and has_hie:\n for res in hie_list:\n if res in hid_list:\n hip_list.append(res)\n\n print('HIP list')\n print(hip_list)\n print('HIE list')\n print(hie_list)\n print('HID list')\n print(hid_list)\n\n #add to the global lists\n for res in hip_list:\n if (res not in hip_residues_list) and (res not in hid_residues_list):\n hip_residues_list.append(res)\n\n for res in hid_list:\n if (res not in hip_residues_list) and (res not in hid_residues_list):\n hid_residues_list.append(res)\n\n #check if propka disagrees with reduce\n run_propka(post_clu_pdb_filename, hip_residues_list)\n\n return hip_residues_list, hid_residues_list, reduce_output_filename", "title": "" }, { "docid": "cbd01c8e9e9364a49c1bd1bf47b95587", "score": "0.46704128", "text": "def calc_subhalo_props_descend(self,list_of_subhalo_properties,\n subhalo_descend_attrs, \n list_of_halo_properties):\n if self.ndesc > 0 :\n \n largest_desc_ID = self.desc_ids[0]\n\n\n descendent_subhalo = list_of_subhalo_properties[largest_desc_ID]\n \n for subhalo_property in subhalo_descend_attrs:\n \n to_descend = getattr(self, subhalo_property)\n \n setattr(descendent_subhalo, subhalo_property,\n getattr(descendent_subhalo, subhalo_property) \n + to_descend)\n \n # If no descendents stars go to host halo as intracluster light.\n elif self.ndesc ==0:\n \n list_of_halo_properties[self.host_halo_ID].intracluster_stellar_mass += \\\n self.stellar_mass", "title": "" }, { "docid": "709423379cdcd05f41f184c1f4ee5093", "score": "0.46690515", "text": "def cal_pathSim_all(G):\n\n # # 笛卡尔积\n # l = [1, 2, 3, 4, 5]\n # print(list(product(l, l)))\n # # 排列组合\n # print(list(combinations([1, 2, 3, 4, 5], 2)))\n\n mimnumber_set = G.keys() # all disease\n print(len(mimnumber_set)) # all disease mimnumbers\n\n d_combination_list = list(combinations(mimnumber_set, 2)) # 对全部疾病进行两两排列组合,组合结果存入list中\n\n # print(d_combination_list)\n\n d_pair_sim_dict = {} # 存储所有疾病对之间的pathsim值\n\n for pair in d_combination_list:\n mim1 = pair[0]\n mim2 = pair[1]\n cui_list_1 = G[mim1]\n cui_list_2 = G[mim2]\n intersect_cui_list = list(set(cui_list_1).intersection(set(cui_list_2)))\n\n path_count_mim1_mim2 = len(intersect_cui_list) # 分子部分,cui交集个数\n \n # 寻找 mim1 — mim2 — mim3 结点对, 记录 mim1 — mim3 相似度值\n # if path_count_mim1_mim2 != 0:\n # for mim3 in cui_list_2:\n\n\n path_count_self_mim1 = len(cui_list_1) # d1的cui个数\n path_count_self_mim2 = len(cui_list_2) # d2的cui个数\n\n # 计算pathsim值\n pathsim_score = (2 * path_count_mim1_mim2) / (path_count_self_mim1 + path_count_self_mim2)\n\n # 只把相似度大于0的记录保存, 1076370\n if (pathsim_score > 0):\n d_pair_sim_dict[(mim1, mim2)] = pathsim_score\n # print(\"(\", mim1, \",\", mim2, \"), pathsim:\", pathsim_score)\n\n print(\"pathsim file length:\", len(d_pair_sim_dict))\n\n # 输出为.pkl文件\n outfile = 'd_pair_pathsim'\n save_on_disk(d_pair_sim_dict, outfile)\n print(\"dump PathSim file successfully!\")\n # reference_relations_file = os.path.join('save/d_pair_pathsim.pkl')\n\n # if os.path.exists(reference_relations_file):\n # print(\"find dumped reference relations file, skip pathsim calculating.\")\n # else:\n # # 存储计算结果\n # with open(reference_relations_file, 'wb') as f:\n # pickle.dump(d_pair_sim_dict, f)\n # print(\"dump PathSim file successfully!\")\n\n return outfile", "title": "" }, { "docid": "3f4e937f9ca1c4aab0611736d9fa9bdd", "score": "0.46681377", "text": "def findHbAccepDon(atom_list):\n H_COV_BOND = 1.1 \n H_COV_BOND = H_COV_BOND ** 2 \n acceptor_types = ['OA', 'NA', 'SA']\n donor_types = ['N', 'O', 'OA', 'NA', 'SA']\n acceptors = []\n donors = []\n h = []\n dcandidate = []\n for l in atom_list:\n if l.startswith(\"ATOM\") or l.startswith(\"HETATM\"):\n l = l.strip()\n atype=l.split()[-1]\n if atype in acceptor_types:\n if not l in acceptors:\n acceptors.append(l)\n elif atype in donor_types:\n if not l in dcandidate:\n dcandidate.append(l)\n elif atype == 'HD':\n if not l in h:\n h.append(l)\n for a in dcandidate:\n for x in h:\n if dist(a, x) <= H_COV_BOND:\n donors.append(a)\n break\n return acceptors, donors", "title": "" }, { "docid": "b2ab9dfe647cd7661faea92029c33f2a", "score": "0.4665836", "text": "def generate_context_pairs_for_submission(unseen_hyponym: tuple,\n occurrences_of_hyponym: Dict[str, List[Tuple[str, Tuple[int, int]]]],\n synsets_with_sense_ids: Dict[str, Tuple[List[str], str]],\n source_senses: Dict[str, str],\n inflected_senses: Dict[str, Dict[str, Tuple[tuple, Tuple[int, int]]]],\n checked_synsets: Union[List[str], None] = None) -> \\\n List[Tuple[str, str, str]]:\n text_pairs = []\n all_synset_IDs = sorted(list(synsets_with_sense_ids.keys())) if checked_synsets is None else checked_synsets\n if (occurrences_of_hyponym is not None) and (len(occurrences_of_hyponym) > 0):\n for hypernym_synset_ID in all_synset_IDs:\n new_pairs = []\n for hypernym_sense_ID in synsets_with_sense_ids[hypernym_synset_ID][0]:\n if hypernym_sense_ID in inflected_senses:\n pairs_for_sense = []\n for morphotag in occurrences_of_hyponym:\n if morphotag in inflected_senses[hypernym_sense_ID]:\n hypernym = list(inflected_senses[hypernym_sense_ID][morphotag][0])\n for text_with_hyponym, hyponym_bounds in occurrences_of_hyponym[morphotag]:\n text_with_hypernym = text_with_hyponym.split()\n text_with_hypernym = text_with_hypernym[0:hyponym_bounds[0]] + hypernym + \\\n text_with_hypernym[hyponym_bounds[1]:]\n text_with_hypernym = ' '.join(text_with_hypernym)\n if ' '.join(text_with_hyponym.split()) != text_with_hypernym:\n pairs_for_sense.append((text_with_hyponym, text_with_hypernym, hypernym_synset_ID))\n if len(pairs_for_sense) > 0:\n new_pairs.append(random.choice(pairs_for_sense))\n del pairs_for_sense\n if len(new_pairs) > 0:\n text_pairs += new_pairs\n else:\n for hypernym_sense_ID in synsets_with_sense_ids[hypernym_synset_ID][0]:\n new_pairs.append((' '.join(unseen_hyponym), source_senses[hypernym_sense_ID], hypernym_synset_ID))\n if len(new_pairs) > 0:\n if len(new_pairs) > 3:\n text_pairs += random.sample(new_pairs[1:], 3)\n else:\n text_pairs += new_pairs\n del new_pairs\n del all_synset_IDs\n return text_pairs", "title": "" }, { "docid": "c7658a2769cc19bba5f7a33b18ebefc5", "score": "0.4659863", "text": "def test_change_dihedrals_and_force_field_it(self):\n ncc_xyz = \"\"\"N 0.92795000 -0.06591600 -0.03643200\nC 2.38932500 -0.06185100 -0.06491100\nC 2.91383400 1.35741700 -0.22361700\nH 2.74111100 -0.47429900 0.88565600\nH 2.81050800 -0.69503700 -0.86161200\nH 2.54377900 1.99297300 0.58410700\nH 4.00671000 1.37386200 -0.21263700\nH 2.58394500 1.79116300 -1.17337000\nH 0.55243400 0.27426600 -0.91441800\nH 0.56679600 -1.00155900 0.10247100\"\"\"\n ncc_spc = ARCSpecies(label='NCC', smiles='NCC', xyz=ncc_xyz)\n ncc_mol = ncc_spc.mol\n energies = conformers.get_force_field_energies(label='NCC', mol=ncc_mol, xyz=ncc_xyz, optimize=True)[1]\n self.assertAlmostEqual(energies[0], -6.15026868, 5)\n idx0 = 10\n for i, atom in enumerate(ncc_mol.atoms):\n if atom.isNitrogen():\n idx1 = i + 1\n elif atom.isCarbon():\n for atom2 in atom.edges.keys():\n if atom2.isNitrogen():\n idx2 = i + 1\n break\n else:\n idx3 = i + 1\n elif atom.isHydrogen():\n for atom2 in atom.edges.keys():\n if atom2.isNitrogen():\n if i + 1 < idx0:\n idx0 = i + 1\n torsion = (idx0, idx1, idx2, idx3)\n\n rd_conf, _, index_map = converter.rdkit_conf_from_mol(ncc_mol, converter.get_xyz_matrix(ncc_xyz)[0])\n rd_scan = [index_map[i - 1] for i in torsion] # convert the atom indices to RDKit indices\n angle = rdMT.GetDihedralDeg(rd_conf, rd_scan[0], rd_scan[1], rd_scan[2], rd_scan[3])\n\n self.assertAlmostEqual(angle, 62.9431377, 5)\n\n xyzs, energies = conformers.change_dihedrals_and_force_field_it(label='NCC', mol=ncc_mol, xyz=ncc_xyz,\n torsions=[torsion], new_dihedrals=[180])\n\n expected_xyz = \"\"\"N 0.93418641 -0.03285604 -0.03001207\nC 2.38993503 -0.07484791 -0.06911647\nC 2.94140390 -0.57952557 1.25275023\nH 2.71784999 -0.72977465 -0.88308842\nH 2.77585269 0.92949655 -0.27090234\nH 2.59029218 -1.59438178 1.46820445\nH 4.03547080 -0.60133377 1.22136235\nH 2.64077179 0.07038090 2.08153013\nH 0.57958892 0.25714689 -0.94038297\nH 0.56736059 -0.96947011 0.13439310\n\"\"\"\n\n self.assertAlmostEqual(energies[0], -6.1502687, 5)\n self.assertEqual(xyzs[0], expected_xyz)\n self.assertEqual(len(energies), 1)\n\n xyzs, energies = conformers.change_dihedrals_and_force_field_it(label='NCC', mol=ncc_mol, xyz=ncc_xyz,\n torsions=[torsion, torsion],\n new_dihedrals=[0, 180])\n self.assertEqual(len(energies), 2)\n\n xyzs, energies = conformers.change_dihedrals_and_force_field_it(label='NCC', mol=ncc_mol, xyz=ncc_xyz,\n torsions=[torsion, torsion],\n new_dihedrals=[[0, 180], [90, -120]])\n self.assertEqual(len(energies), 4)", "title": "" }, { "docid": "1a8eea0894a4adbe00d755873cab6986", "score": "0.46589118", "text": "def fix_gln_hydrogens(selection):\n # HZ21 is the cis, HZ22 is the trans\n for hindex in iterate_indices('({}) and (name HZ21+HZ22)'.format(selection)):\n if abs(cmd.get_dihedral('(idx {}) and ({})'.format(hindex, selection),\n '(name NZ2) and ({})'.format(selection),\n '(name CE) and ({})'.format(selection),\n '(name OZ1) and ({})'.format(selection))) < 90:\n cmd.alter('(idx {}) and ({})'.format(hindex, selection),\n 'name=\"HZ21\"')\n else:\n cmd.alter('(idx {}) and ({})'.format(hindex, selection),\n 'name=\"HZ22\"')", "title": "" } ]
80d8890b2cfb3aa416f6c087e9576b4f
Find an active survey for this patient and query type. Return the survey object. If multiple found, return one of them. If not found, return None.
[ { "docid": "3980c8603b659c86e9d1c71d6e573392", "score": "0.78635794", "text": "def find_active(patient, query_type):\n surveys = PatientSurvey.objects.filter(patient=patient,\n query_type=query_type,\n status=PatientSurvey.STATUS_STARTED)\n if len(surveys) == 0:\n return None\n return surveys[0]", "title": "" } ]
[ { "docid": "b19987f4089aeada05a187bfa945b38c", "score": "0.58423394", "text": "def survey(self):\n if self._survey is None:\n raise AttributeError(\"Simulation must have a survey set\")\n return self._survey", "title": "" }, { "docid": "0905845b3ce81b84d035abf5993036b6", "score": "0.5633258", "text": "def getContainingSurvey(questionID):\n sql = (\"SELECT surveyID FROM Question WHERE questionID = \" + str(questionID))\n csr.execute(sql)\n return(csr.fetchall()[0][0])", "title": "" }, { "docid": "a3f9174b2157d67c475989a114b85957", "score": "0.5625513", "text": "def getContainingSurvey(questionID):\r\n sql = (\"SELECT surveyID FROM Question WHERE questionID = \" + str(questionID))\r\n csr.execute(sql)\r\n return(csr.fetchall()[0][0])", "title": "" }, { "docid": "247b7989c90ae25f711e02a455ed7953", "score": "0.5520831", "text": "def find_one(self, where_dict):\n results = self.find(where_dict)\n return None if len(results) == 0 else results[0]", "title": "" }, { "docid": "6ca75c79d4d25bb58e81e9ba0a81dfbf", "score": "0.54957783", "text": "def one(self):\n answer = self.one_or_none()\n if answer is None:\n raise exceptions.NotFound(\"No rows matched the given query.\")\n return answer", "title": "" }, { "docid": "710e05b6615901b68f0ed2351b09fb66", "score": "0.5318442", "text": "def getOne(self, collection, query=None):\n results = self.query(collection, convert(query))\n return results and results[0] or {}", "title": "" }, { "docid": "705c4cc4579df5c069d7e7a507a9ed2e", "score": "0.52626497", "text": "def get_question(self,id):\n for q in self.questions:\n if q.id == id:\n return q\n \n return None", "title": "" }, { "docid": "72581c6ad568eea108f61974c3906ffd", "score": "0.52470416", "text": "def get_current_question(end_user_info, end_user_questionnaire):\n end_user = end_user_info[\"end_user_obj\"]\n\n questions = QuestionnaireQuestion.objects.filter(questionnaire_id=end_user_questionnaire.questionnaire.id)\n responses = Response.objects.filter(end_user_questionnaire=end_user_questionnaire)\n\n # catch possible errors\n if not questions or questions.count() == 0:\n raise LookupError(\"the specified questionnaire does not have any questions associated with it\")\n\n # if we have responses for all questions we have no questions to answer\n if responses.count() >= questions.count():\n return False\n\n # if we have no responses return the first question\n if responses.count() == 0:\n current_question = QuestionnaireQuestion.objects.filter(questionnaire_id=end_user_questionnaire.questionnaire.id, display_order=1).first()\n return current_question\n # if we have registered responses for this user questionnaire pick the first question without a response\n if responses.count() < questions.count():\n current_question = QuestionnaireQuestion.objects.filter(questionnaire_id=end_user_questionnaire.questionnaire.id, display_order=(responses.count() + 1)).first()\n return current_question", "title": "" }, { "docid": "93bc310aa383d26f0275c04209067559", "score": "0.5200904", "text": "def lookup(self, question):\n for record in self.cache:\n if record.name == question.name and record.rtype == question.rtype and record.rclass == question.rclass:\n return record\n return None", "title": "" }, { "docid": "90f94f72c8da1e8269cb401ee47c054c", "score": "0.5145972", "text": "def question(self):\n return Question.find(question_id=self.question_id)", "title": "" }, { "docid": "0121bdfcb3937c560ac45c578c7adcae", "score": "0.51334935", "text": "def query_one():\n return ResearchStudy.where(struct={'_count': '1'})", "title": "" }, { "docid": "6aed4d603bd499e78e86197c03a1a3e0", "score": "0.5104247", "text": "def query(self, expr: str) -> \"LimeSurvey\":\n\n # Make copy of LimeSurvey instance\n filtered_survey = self.__copy__()\n # Filter responses DataFrame\n filtered_survey.responses = self.responses.query(expr)\n\n return filtered_survey", "title": "" }, { "docid": "576047d7b699afe828f8ad2fb3de8043", "score": "0.51032853", "text": "def find_one(self, domain, search):\n try:\n return next(self.iter_find(domain, search))\n except StopIteration:\n return None", "title": "" }, { "docid": "bdfb592db9932bfd59bc94678ad26ccb", "score": "0.5014213", "text": "def get_survey():\n import os\n survey = os.getenv('OBZTAK_SURVEY',None)\n if survey is None:\n msg = \"$OBZTAK_SURVEY environment variable not set.\"\n raise EnvironmentError(msg)\n return survey.lower()", "title": "" }, { "docid": "d9f2557c21bed471e30d440d6df1a1f5", "score": "0.4982597", "text": "def first(self):\n self._fetch_results()\n if len(self._query_results_cache) > 0:\n return self._query_results_cache[0]\n\n return None", "title": "" }, { "docid": "7686ad2c4ddb682476e19f736b418dc1", "score": "0.49735385", "text": "def find_answer(self):\n if not self.parsed_query:\n raise ValueError(\"No query to answer\")\n\n fn = self._find_answer_function()\n if not fn:\n raise ValueError(\"No answer function found\")\n return fn()", "title": "" }, { "docid": "6054ba5518c75b0b746f3bf1acff8fc0", "score": "0.4964987", "text": "def get_current_question(self):\n return self.order_by('-asked_date').filter(asked_date__isnull=False,\n approved=1)[0]", "title": "" }, { "docid": "b92b2cff70820df4132365d55cfa6370", "score": "0.49544775", "text": "def get_ad(self, *args, **kwargs):\n qs = self._query_ad(*args, **kwargs)\n if qs.exists():\n return qs[0]\n else:\n return None", "title": "" }, { "docid": "e1fdfe1c95a21d51e984ec97a4d50db8", "score": "0.49425116", "text": "def fetch_one(self, query, collection):\n return getattr(self.opendc_db, collection).find_one(query)", "title": "" }, { "docid": "1c344f428584419aa05c71ac6754a495", "score": "0.49149877", "text": "def find_sample(self, key):\n sample_queryset = Sample.objects.filter(expedition_sample_code=key)\n if sample_queryset.exists():\n return sample_queryset[0]\n else:\n return None", "title": "" }, { "docid": "2fb32e32f86a2d3c865cdc4689c88030", "score": "0.49136627", "text": "def get_question(self, id: int) -> PredictItQuestion:\n for question in self.questions:\n if question.id == id:\n return question\n raise ValueError(\"Unable to find a question with that id.\")", "title": "" }, { "docid": "87dfa4d93829d06a0f814e08cb55dbf3", "score": "0.49014944", "text": "def staff_show_survey(id): \n question_packet = DataPacket(\"admin\", QUESTION_COL_IDS, \"_questions\")\n question_packet = DBMANAGER.retrieve_data(question_packet)\n\n survey_packet = DataPacket(\"admin\", SURVEY_COL_IDS, '_survey')\n survey_packet = DBMANAGER.retrieve_data(survey_packet)\n\n # Loop through the packet and find the right survey\n survey_info = [\"\", \"\", \"\"]\n for survey in survey_packet.retrieve_data():\n if survey[0] == id:\n survey_info = survey\n break\n \n question_ids = survey_info[2].split(',')\n display_info = []\n\n for question in question_packet.retrieve_data():\n for qid in question_ids:\n if qid == question[0]:\n display_info.append(question)\n\n if request.method == 'POST':\n if request.form['btn'] == 'approve':\n mod_survey_packet = DataPacket(\"admin\", SURVEY_COL_IDS, '_survey')\n mod_survey_packet = create_survey(mod_survey_packet,\n survey[0],\n survey[1],\n survey[2],\n \"Open\")\n\n DBMANAGER.modify_data(mod_survey_packet, True)\n\n return redirect(url_for('staff_homepage'))\n\n return render_template('staff/dash-staff-survey.html', survey_id=id, display=display_info, survey_state=survey[3])", "title": "" }, { "docid": "31dccc6d4188a8cd14bbdc41cbec2ac0", "score": "0.48324603", "text": "def get_first_question(self):\n if not self.question_list:\n return False\n\n first, _ = self.question_list.split(',', 1)\n question_id = int(first)\n return Question.objects.get(id=question_id)", "title": "" }, { "docid": "2a2899fd04e76176ae9eca738190fcc6", "score": "0.48159447", "text": "def ooc_intent(self):\n try:\n return self.questions.get(is_intent=True)\n except ActionOOCQuestion.DoesNotExist:\n return None", "title": "" }, { "docid": "d1bbd2bb58cd6fad321d24076c66b9de", "score": "0.4808674", "text": "def search(self, query):\n self.query = query\n answers = wanswers = []\n answers = self.get_boolean_answers(answers)\n answers = self.get_phrase_answers(answers)\n wanswers = self.get_wildcard_answers(wanswers)\n if wanswers: answers.append(set.intersection(*wanswers))\n if answers: return set.intersection(*answers)", "title": "" }, { "docid": "19209be5ac46fd125ebd1c4b830f9815", "score": "0.4799345", "text": "def pick_survey():\r\n \r\n title = request.args[\"pick_survey\"]\r\n picked_survey = surveys[title]\r\n session[\"survey_title\"] = title\r\n\r\n return render_template(\"start_survey.html\", \r\n title=picked_survey.title, \r\n instructions=picked_survey.instructions,\r\n )", "title": "" }, { "docid": "7f15a883e4b4f5827a8f06479e8c6b34", "score": "0.4763975", "text": "def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "7fb671f33776ce4b0ef9ba9617b9f2fa", "score": "0.4759795", "text": "def get_question_query_inspector_record(self, question_record_type):\n return # osid.assessment.records.QuestionQueryInspectorRecord", "title": "" }, { "docid": "e3bf7235456fe19239460f0d0e28f559", "score": "0.47539744", "text": "def find_one(self, attr, value):\n return self.collection.find_one({attr: value})", "title": "" }, { "docid": "577b9052016f288c5905f6790a40d89e", "score": "0.47486293", "text": "def get_lookup_record(\n self,\n ) -> Optional[WhoisQueryToolRecord]:\n\n return self.lookup_record", "title": "" }, { "docid": "f050f840c540a8103f5621e6940b233a", "score": "0.47311246", "text": "def query(self):\n if self._query_present:\n return self._query_value\n else:\n raise AttributeError(\"missing required field 'query'\")", "title": "" }, { "docid": "abb1659e4720624ce777204a325a0738", "score": "0.47283572", "text": "def once(self, query_str: str) -> Optional[Dict]:\n q = None\n try:\n q = PrologQuery(query_str, simple_query_srv=self._simple_query_srv,\n next_solution_srv=self._next_solution_srv, finish_srv=self._finish_query_srv)\n return next(Upper(q.solutions()))\n except StopIteration:\n return None\n finally:\n if q is not None:\n q.finish()", "title": "" }, { "docid": "c0a20826f5146cd3b222fbb705142b04", "score": "0.4713156", "text": "def get_answer_query_inspector_record(self, question_record_type):\n return # osid.assessment.records.AnswerQueryInspectorRecord", "title": "" }, { "docid": "a99b8681fa330a54acf33b0b32039748", "score": "0.47001654", "text": "def find(self, **kwargs):\n matches = self.findall(**kwargs)\n num_matches = len(matches)\n if not num_matches:\n msg = \"No %s matching: %s.\" % (self.resource_class.__name__, kwargs)\n raise exc.NotFound(404, msg)\n if num_matches > 1:\n msg = \"More than one %s matching: %s.\" % (\n self.resource_class.__name__, kwargs)\n raise exc.NoUniqueMatch(400, msg)\n else:\n return matches[0]", "title": "" }, { "docid": "7f8927e56f9717b816dce4efa9f04b21", "score": "0.4696955", "text": "def selected_track(self) -> Optional[Track]:\n try:\n return self.tracks[self._selected_track]\n except IndexError:\n return self.tracks[0]", "title": "" }, { "docid": "31bc2cf01379f6420ccda525d61c550b", "score": "0.46870393", "text": "def query(self, default=NO_VALUE):\n try:\n return self.get()\n except AttributeError:\n return default", "title": "" }, { "docid": "fa672af440d4266d9f0d74317022aa35", "score": "0.46830004", "text": "def survey_results(self, participant=None, datetime_object=False):\n \n if participant is None:\n participant = self.id\n \n participant_activities = lamp.activity.activity_all_by_participant(participant)['data']\n participant_activities_surveys = [activity for activity in participant_activities if activity['spec'] == 'lamp.survey']\n participant_activities_surveys_ids = [survey['id'] for survey in participant_activities_surveys] \n \n participant_surveys = {} #initialize dict mapping survey_type to occurence of scores\n participant_results = lamp.result_event.result_event_all_by_participant(participant)['data']\n \n for result in participant_results:\n\n #Check if it's a survey event\n if result['activity'] not in participant_activities_surveys_ids or len(result['temporal_events']) == 0: continue\n activity = lamp.activity.activity_view(result['activity'])['data'][0]\n \n #Check to see if all the event values are numerical\n try: [float(event['value']) for event in result['temporal_events']]\n except: continue\n \n #Get survey time\n survey_time = result['timestamp']\n if datetime_object: \n survey_time = datetime.datetime.utcfromtimestamp(survey_time/100).date()\n \n #Get survey score\n survey_score = sum([float(event['value']) for event in result['temporal_events']]) / len(result['temporal_events'])\n \n #Add result\n if activity['name'] not in participant_surveys:\n participant_surveys[activity['name']] = []\n participant_surveys[activity['name']].append((survey_score, survey_time))\n \n return participant_surveys", "title": "" }, { "docid": "01089e080f8c422497edafe653533bb1", "score": "0.46645543", "text": "def get_one(self, filter: dict) -> Company:\n try:\n company = Company()\n company.find(filter)\n if str(company._id) == \"\":\n return None\n return company\n except Exception as e:\n app.logger.error(e)\n return None", "title": "" }, { "docid": "361f46f304cf6e94943ee5d1c93b12f8", "score": "0.4653534", "text": "def get(self, query):\n try:\n cursor = self._connection.cursor()\n cursor.execute(self.SQL_GET_QUERY,\n [query, query])\n row = cursor.fetchone()\n if row is not None:\n return Record.from_cursor_row(row)\n except Exception as e:\n logger.exception(e)\n pass\n return None", "title": "" }, { "docid": "c0445099f64fd0e3aa33f4ff204bbb2e", "score": "0.4642259", "text": "def get_single_question(qstn_id):\n cursor.execute(\"SELECT * FROM questions WHERE qstn_id = '{}'\".format(qstn_id))\n row = cursor.fetchone()\n return row", "title": "" }, { "docid": "44be0811fc88c877cfc135dc9bf38ba8", "score": "0.46397325", "text": "def show_survey():\n return render_template(\"survey.html\", title_in_template=satisfaction_survey.title, instructions_in_template=satisfaction_survey.instructions)", "title": "" }, { "docid": "921b75a152c798ac1067ea9e05ec9673", "score": "0.4638929", "text": "def get(self):\n patient = self._proknow.patients.get(self._workspace_id, self._patient_id)\n entity = patient.find_entities(id=self._id)[0]\n return entity.get()", "title": "" }, { "docid": "1d54daa07c655795585dce9e15cb485e", "score": "0.4633456", "text": "def find_doc(col, query={'ctype':'pedestals'}) :\n docs = find_docs(col, query)\n #logger.debug('XXX Number of documents found:', docs.count())\n\n if (docs is None)\\\n or (docs.count()==0) :\n logger.warning('DB %s collection %s does not have document for query %s' % (col.database.name, col.name, str(query)))\n return None\n\n qkeys = query.keys()\n key_sort = 'time_sec' if 'time_sec' in qkeys else 'run'\n\n doc = docs.sort(key_sort, DESCENDING)[0]\n #msg = 'query: %s\\n %d docs found, selected doc[\"%s\"]=%s'%\\\n # (query, docs.count(), key_sort, doc[key_sort])\n #logger.info(msg)\n\n return doc", "title": "" }, { "docid": "b45c24290fca0c73bf2c0f2faf6c3c0e", "score": "0.4623174", "text": "def __getitem__(\n self, key: Union[pd.Series, pd.DataFrame, str, list, tuple]\n ) -> \"LimeSurvey\":\n filtered_survey = self.__copy__()\n\n # A bool-valued Series, e.g. survey[survey.responses[\"A3\"] == \"A5\"]\n # is interpreted as a row filter\n if isinstance(key, (pd.Series, pd.DataFrame)):\n filtered_survey.responses = filtered_survey.responses[key]\n # A question id as string, e.g. survey[\"A3\"]\n # is interpreted as a column filter\n elif isinstance(key, str):\n filtered_survey = filtered_survey[[key]]\n # A list of columns, e.g. survey[[\"C3_SQ001\", \"C3_Sq002\"]]\n # is interpreted as a column filter\n elif isinstance(key, list):\n columns = [\n column\n for question in key\n for column in filtered_survey.get_question(question).index.to_list()\n ]\n filtered_survey.responses = filtered_survey.responses[columns]\n # Two args, e.g. survey[survey.responses[\"A3\"] == \"A5\", \"B1\"]\n # or survey[1:10, [\"B1\", \"C1_SQ001\"]]\n # is interpreted as (row filter, column filter)\n elif isinstance(key, tuple) and len(key) == 2:\n rows, columns = key\n filtered_survey = filtered_survey[rows]\n filtered_survey = filtered_survey[columns]\n else:\n raise SyntaxError(\n \"\"\"\n Input must be of type pd.Series, pd.DataFrame, str, list, or tuple.\n Examples:\n pd.Series or pd.DataFrame: survey[survey.responses[\"A3\"] == \"A5\"]\n str: survey[\"A3\"]\n list of str: survey[[\"C3_SQ001\", \"C3_Sq002\"]]\n tuple: survey[survey.responses[\"A3\"] == \"A5\", \"B1\"]\n \"\"\"\n )\n\n return filtered_survey", "title": "" }, { "docid": "dca289c421211e3cf102099c4528d3fe", "score": "0.46132508", "text": "def one(self):\n length = len(self)\n if length > 1:\n raise MultipleResultsFound(\"%s results found.\" % length)\n\n return self.first()", "title": "" }, { "docid": "7080a8261311092ab54c038bf1216b26", "score": "0.46123973", "text": "async def get_first(cls, query, projection_dict=None):\n return await cls.collection().find_one(query, projection_dict)", "title": "" }, { "docid": "7080a8261311092ab54c038bf1216b26", "score": "0.46123973", "text": "async def get_first(cls, query, projection_dict=None):\n return await cls.collection().find_one(query, projection_dict)", "title": "" }, { "docid": "89febc13e8476b8d27e6570cc2cb7965", "score": "0.46109277", "text": "def single(self):\n \n for odorant in self.odorants:\n if self.odorants.count(odorant) == 1:\n return odorant\n return None", "title": "" }, { "docid": "ebd0c5c8a29bec2189d76823d54b0cd5", "score": "0.4598164", "text": "def show_survey(id):\n question_packet = DataPacket(\"admin\", QUESTION_COL_IDS, \"_questions\")\n question_packet = DBMANAGER.retrieve_data(question_packet)\n\n survey_packet = DataPacket(\"admin\", SURVEY_COL_IDS, '_survey')\n survey_packet = DBMANAGER.retrieve_data(survey_packet)\n\n # Loop through the packet and find the right survey\n survey_info = [\"\", \"\", \"\"]\n for survey in survey_packet.retrieve_data():\n if survey[0] == id:\n survey_info = survey\n break\n\n question_ids = survey_info[2].split(',')\n display_info = []\n\n for question in question_packet.retrieve_data():\n for qid in question_ids:\n if qid == question[0]:\n display_info.append(question)\n\n if request.method == 'POST':\n return redirect(url_for('student_homepage'))\n\n return render_template('student/dash-student-survey.html', survey_id=id, survey_course=survey_info[1], display=display_info)", "title": "" }, { "docid": "30e7b85092da7028279bc8f9ac386fbc", "score": "0.45933336", "text": "def find_by(cls, cond, find_one=False):\n kwargs = dict(cond)\n re_v = []\n try:\n if find_one:\n re_v = cls.query.filter_by(**kwargs).first()\n else:\n re_v = cls.query.filter_by(**kwargs)\n except exc.SQLAlchemyError as error:\n pass\n finally:\n return re_v", "title": "" }, { "docid": "cae8a8bd1761a05a9c803ba17b1bcf35", "score": "0.45850167", "text": "def get_active_driver_instance():\n # Get the first driver instance that exists (not None) in the list of possible driver instances.\n driver = next(\n (_driver for _driver in [BaseDriver.instance(), Remote.instance(), Generic.instance()] if _driver is not None),\n None,\n )\n if driver is None:\n raise SdkException(\"No active driver instance found for reporting\")\n return driver", "title": "" }, { "docid": "b12399842ace811c93bd6cb0a3ca241c", "score": "0.45849624", "text": "def get_record_first(self) -> Optional[OpenAPI]:\n return self.get_record_by_index(0)", "title": "" }, { "docid": "bf74b4ec4493a0fb95e267db25c68139", "score": "0.45849448", "text": "def query_one(cls, context, lockmode=False, **kwargs):\n query = db_query(cls, context, lockmode=lockmode, **kwargs)\n return query.first()", "title": "" }, { "docid": "1add86c850e4b6f3f43dd0c592f5c495", "score": "0.45705348", "text": "def queryset(self, request, queryset):\n if self.value():\n parent = Question.objects.get(slug=self.value())\n return Choice.objects.child_of(parent).order_by('-id')\n return queryset", "title": "" }, { "docid": "1eb9bc2e2ae7ecc8263fd010f70a72d8", "score": "0.4568315", "text": "def next_question(self):\n if self.start_date is None:\n self.start_date = datetime.now()\n\n question = self.cur_series.get_first_unanswered(self)\n if question is None: # When no more question are unanswered in the series\n self.cur_answer = None\n self.cur_series = None\n else:\n self.cur_answer = question.open(self)\n self.save()\n return self.cur_answer", "title": "" }, { "docid": "0fb9bd4a33bd5de530917882662d55cc", "score": "0.45588672", "text": "def _get_instance(self, ident):\n filters = self._get_ident_filters(ident)\n query = self.session.query(self.model)\n try:\n query = self.query_builder.apply_filters(\n query,\n model_class=self.model,\n filters=filters,\n nested_conditions=self.get_required_nested_filters,\n whitelist=self.whitelist,\n stack_size_limit=100,\n convert_key_names_func=self.convert_key_name,\n gettext=self.context.get(\"gettext\", None))\n query = self.apply_required_filters(query)\n except (TypeError, ValueError, InvalidMqlException, BadRequestError):\n # NOTE - BadRequestError only an issue on filters,\n # e.g. a bad ident provided.\n raise self.make_error(\"resource_not_found\", ident=ident)\n return query.first()", "title": "" }, { "docid": "306050262b473f57722279080c09c063", "score": "0.4553061", "text": "def get_assessment_query_inspector_record(self, assessment_record_type):\n return # osid.assessment.records.AssessmentQueryInspectorRecord", "title": "" }, { "docid": "0554a54c0f7bf6c4e1158e6282d78c67", "score": "0.45465904", "text": "def getQuestion(questionID):\n sql = (\"SELECT questionBody, questionType FROM Question WHERE questionID = \" + str(questionID))\n csr.execute(sql)\n questionData = csr.fetchall()[0]\n if questionData[1] == QUESTIONTYPEOPTIONS:\n return optionQuestionParse(questionData[0])[0]\n else:\n return(questionData[0])", "title": "" }, { "docid": "540956adb0d78501ff1fe7cdd3246806", "score": "0.45448503", "text": "def get_one(self, _id):\n return self.get_query().filter(pk=_id).first()", "title": "" }, { "docid": "2478419055683bfbd70340b43c75cb77", "score": "0.45383173", "text": "def get_current_encounter(self, session=db.session):\n active_encounters = session.query(Encounter)\\\n .filter(Encounter.patient == self)\\\n .filter(Encounter.parent == None)\\\n .filter(Encounter.end_time == None)\n\n if active_encounters.count() == 1:\n return active_encounters.one()\n elif active_encounters.count() > 1:\n raise dbexception.AutoMODatabaseError(\"Multiple active Encounters for patient found. This should not happen.\")\n else:\n return None", "title": "" }, { "docid": "9e3b11e75cc0ce773b15f8a183c479f9", "score": "0.45314738", "text": "def find_one(self, *criterion, **criterion_kargs):\n criterion = list(criterion) + [criterion_kargs]\n\n return self.search(*criterion).first()", "title": "" }, { "docid": "77a8b864a4f622840dc2ea2f6e6489f0", "score": "0.45282018", "text": "def _stage_find(self, team_id=False, domain=None, order='sequence'):\n\t\t# collect all team_ids by adding given one, and the ones related to the current leads\n\t\tteam_ids = set()\n\t\tif team_id:\n\t\t\tteam_ids.add(team_id)\n\t\tfor lead in self:\n\t\t\tif lead.team_id:\n\t\t\t\tteam_ids.add(lead.team_id.id)\n\t\t# generate the domain\n\t\tif team_ids:\n\t\t\tsearch_domain = ['|', ('team_id', '=', False), ('team_id', 'in', list(team_ids))]\n\t\telse:\n\t\t\tsearch_domain = [('team_id', '=', False)]\n\t\t# AND with the domain in parameter\n\t\tif domain:\n\t\t\tsearch_domain += list(domain)\n\t\t# perform search, return the first found\n\t\treturn self.env['crm.stage'].search(search_domain, order=order, limit=1)", "title": "" }, { "docid": "65c340a1d646260f0d991fd748807a1d", "score": "0.45097166", "text": "def single(self):\n records = list(self)\n size = len(records)\n if size == 0:\n return None\n if size != 1:\n warn(\"Expected a result with a single record, but this result contains %d\" % size)\n return records[0]", "title": "" }, { "docid": "2276d7978afccf9ce7466f87295e60c2", "score": "0.45090333", "text": "def find(self, query):\n cat = getattr(self, self.default_catalog, None)\n if not cat: return\n brains = cat({'serviceKeys': query})\n if not brains: return None\n for brain in brains:\n if brain.getPrimaryId.startswith(self.getPrimaryId()):\n try:\n return self.getObjByPath(brain.getPrimaryId)\n except KeyError:\n log.warn(\"bad path '%s' for index '%s'\",\n brain.getPrimaryId, self.default_catalog)", "title": "" }, { "docid": "84c8fa692bc6f2d6f7f0f35c5c3dc22f", "score": "0.4507439", "text": "def get_by(cls, **kwargs):\n return cls.query.filter_by(**kwargs).one()", "title": "" }, { "docid": "50b240f58223d2aebeb83933e6ed82a5", "score": "0.4505675", "text": "def get_query_answer(question):\r\n try:\r\n content = wikipedia.page(question)\r\n except PageError:\r\n raise SummarizerException('No content was found')\r\n\r\n return Summarizer.get_summary(content.summary)[0]", "title": "" }, { "docid": "d60e75c345fe26f57f74cfe7d4642bb9", "score": "0.4486059", "text": "def get_record(self, dns_type):\n try:\n record = [\n record for record in self.records if record['type'] == dns_type]\n except IndexError:\n raise RecordNotFound(\n 'Cannot find the specified dns record in domain {domain}'\n .format(domain=self.domain))\n return record[0]", "title": "" }, { "docid": "3173c2515ff6e85d9962d428a756fa5c", "score": "0.44844136", "text": "def get_first_record(queryset):\n records = sorted([record for record in queryset], key=lambda k: k.id)\n if records:\n return records[0]\n\n return None", "title": "" }, { "docid": "02be7471427052e5f027163281b1a883", "score": "0.44826606", "text": "def specific(self):\n real_type = self.real_type\n if isinstance(self, real_type.model_class()):\n return self\n else:\n return self.real_type.get_object_for_this_type(pk=self.pk)", "title": "" }, { "docid": "02be7471427052e5f027163281b1a883", "score": "0.44826606", "text": "def specific(self):\n real_type = self.real_type\n if isinstance(self, real_type.model_class()):\n return self\n else:\n return self.real_type.get_object_for_this_type(pk=self.pk)", "title": "" }, { "docid": "fd2cefe1614fa792a27a634f221d05b8", "score": "0.4477452", "text": "def get_query():\n return IfQuery", "title": "" }, { "docid": "4025fdf03306f18931271aa2507030f7", "score": "0.44671547", "text": "def study_type_find(self):\r\n print(\"mode 2\")\r\n self.change_study_type(self.answer_layout_light, self.answer_layout_dark)\r\n self.current_layout.change_study_type(\"Find\")", "title": "" }, { "docid": "dad7a7f7c3310f423a288103154aa55e", "score": "0.44601768", "text": "def query(sp_id=None, spname=None, chip_obj=None):\n try:\n sp = SoftwareProduct.objects.filter(\n models.Q(id=sp_id) |\n models.Q(sp_name=spname) |\n models.Q(chipset=chip_obj))\n assert(sp)\n except:\n sp = None\n\n return sp", "title": "" }, { "docid": "e0791739332e2315dba8f814a2181894", "score": "0.44593215", "text": "def get_queryset(self):\n return Choice.objects.filter(\n question__id=self.kwargs.get(self.lookup_field))", "title": "" }, { "docid": "195930fc68e6119919745ac6c3645a60", "score": "0.4457732", "text": "def set_survey(survey):\n import os\n os.environ['OBZTAK_SURVEY'] = survey\n return get_survey()", "title": "" }, { "docid": "a5462103c13684a76fa539cfc5d40957", "score": "0.4456993", "text": "def get_page(cls, page):\n return cls.query.filter_by(page=page).one_or_none()", "title": "" }, { "docid": "a5462103c13684a76fa539cfc5d40957", "score": "0.4456993", "text": "def get_page(cls, page):\n return cls.query.filter_by(page=page).one_or_none()", "title": "" }, { "docid": "4b2401681f3d34f9a22fdf7607acde8d", "score": "0.44569865", "text": "def get(self):\n return self._proknow.patients.get(self._workspace_id, self._id)", "title": "" }, { "docid": "42291dbe69e4426fa76916c4af42bc38", "score": "0.44565615", "text": "def get(self, id):\n\t\ttry:\n\t\t\tquestionResult = Service.db.query(\"SELECT * FROM questions WHERE id = %s\", id)[0]\n\t\t\treturn self._map(questionResult)\n\t\texcept IndexError, e:\n\t\t\traise NoResultFound(\"No question was found with id %i\" % id)", "title": "" }, { "docid": "5349fa5d79979b01bd4cfff99c9ffaa1", "score": "0.44554046", "text": "def get_queryset(self):\n survey = get_object_or_404(Survey, pk=self.kwargs['pk'])\n if not survey.results_published:\n return Feedback.objects.none()\n questions = survey.question_set.all()\n return Feedback.objects.filter(recipient=self.request.user,\n question__in=questions)", "title": "" }, { "docid": "ca3562bbb68da8c463685c3ece621906", "score": "0.445399", "text": "def find(self, dasquery):\n cond = {'qhash': dasquery.qhash, 'das.system':'das'}\n return self.col.find_one(cond)", "title": "" }, { "docid": "d28592dcd3743e141e2f423a1c2ca306", "score": "0.4450601", "text": "def getone(self, query, *parameters):\n rows = self.query(query, *parameters)\n if not rows:\n return None\n else:\n if len(rows) > 1:\n raise Exception(\"Multiple rows returned for Database.get() query\")\n else:\n return rows[0]", "title": "" }, { "docid": "79873a7819f78fe5192979dbfc4b651e", "score": "0.44472286", "text": "def get(self, query, OneOrAll='One'):\n with sqlite3.connect(self._dbpath) as __conn:\n __db = __conn.cursor()\n __db.execute(query)\n if OneOrAll == 'One':\n return __db.fetchone()\n else:\n return __db.fetchall()", "title": "" }, { "docid": "a883465a3cb3f563f3f8b4ec08042f3c", "score": "0.44463205", "text": "def __init__(self, survey_id):\n if _Utils.survey_exists(survey_id) is False:\n self.k = False\n return None\n self.k = True\n self.sdb = utils.Database().survey\n self._survey_dat = self.sdb.find_one({\"_id\": ObjectId(survey_id)})\n self._updates = set()", "title": "" }, { "docid": "70ad2bb35c31d4cd281c6f6313659c82", "score": "0.44460037", "text": "def query_as_single_item(self, query: str, super_uri: bool = False):\n\n result = self.query_as_list(query, super_uri=super_uri)\n\n return result[0][0]", "title": "" }, { "docid": "df7d0ea566f765a8f8aecd35ac9f88ef", "score": "0.4444951", "text": "def get_single_query(data, srch_id):\n return data.loc[data['srch_id'] == srch_id]", "title": "" }, { "docid": "fb83e347fc38a9b2b5f2269ac722972a", "score": "0.44434384", "text": "def querySpeaker(self, request):\n\n q = Speaker.query()\n q = q.filter(Speaker.speaker == request.speaker)\n\n # if an organization query is provided, query speaker by organization\n if request.organization:\n q = q.filter(Speaker.organization == request.organization)\n\n return SpeakerForms(items = [self._copySpeakerToForm(s) for s in q])", "title": "" }, { "docid": "fbdd10159672d87758c1b25a44d0d3b6", "score": "0.44341874", "text": "def query(self) -> Optional[\"QuerySpec\"]:\n return self.__query", "title": "" }, { "docid": "cbffd90fd5db9f9a65d86bd049963f7a", "score": "0.44236532", "text": "def db_for_read(self, model, **hints):\n #return settings.DATABASE_APPS_MAPPING.get(model._meta.sparqls, None)\n print(\"model._meta\",model._meta,model.__dict__)\n if model._meta in ['polls.query', 'polls.choice']:\n return settings.DATABASE_APPS_MAPPING.get('qc', None)\n return None", "title": "" }, { "docid": "285ab22a890ae08b2b7c854b24126de4", "score": "0.44234085", "text": "def FindSurveysStartingToday():\n course_db = geclass.course_db.CourseDB()\n pre, post = course_db.get_surveys_today()\n return pre, post", "title": "" }, { "docid": "e7ff3209d42a2261338f7b54bca30ddc", "score": "0.4423127", "text": "def get_query_by_id(self, query_id) -> Optional[Query]:\n return self.session.query(Query).get(query_id)", "title": "" }, { "docid": "fac3729d08ee5211f51e53c21b795e2e", "score": "0.44196558", "text": "async def find_record(\n self, type_filter: str, tag_query: Mapping = None, options: Mapping = None\n ) -> StorageRecord:\n scan = self.search_records(type_filter, tag_query, options)\n results = await scan.fetch(2)\n await scan.close()\n if not results:\n raise StorageNotFoundError(\"Record not found\")\n if len(results) > 1:\n raise StorageDuplicateError(\"Duplicate records found\")\n return results[0]", "title": "" }, { "docid": "c15fccaec4b5bc001ac5fed2efd8d302", "score": "0.44151294", "text": "def one_or_none(self):\n # Sanity check: Has consumption of this query already started?\n # If it has, then this is an exception.\n if self._metadata is not None:\n raise RuntimeError(\n \"Can not call `.one` or `.one_or_none` after \"\n \"stream consumption has already started.\"\n )\n\n # Consume the first result of the stream.\n # If there is no first result, then return None.\n iterator = iter(self)\n try:\n answer = next(iterator)\n except StopIteration:\n return None\n\n # Attempt to consume more. This should no-op; if we get additional\n # rows, then this is an error case.\n try:\n next(iterator)\n raise ValueError(\"Expected one result; got more.\")\n except StopIteration:\n return answer", "title": "" }, { "docid": "ca3b58de3b4dd204fe0088c290d4ba06", "score": "0.44149923", "text": "def find(cls, name):\n return cls.objects.filter(name=name).first()", "title": "" }, { "docid": "6036db207237e50224400a9e007689ab", "score": "0.44149256", "text": "def get_query(self) -> LFJsonQuery:\n if self.query_instance:\n return self.query_instance\n self.query_instance = LFJsonQuery(session_obj=self, debug=self.debug_on)\n return self.query_instance", "title": "" }, { "docid": "989caf911a669301c0b9b30aed936e55", "score": "0.4412653", "text": "def choose_solution(candidates, query_string, hypothesis):\n min_score = current_app.config['MIN_SCORE_FIRST_ROUND']\n filtered = [(score, solution) for score, solution in candidates if score >= min_score*len(score)]\n\n if len(filtered)==0:\n if candidates:\n current_app.logger.debug(\"No score above minimal score, inspecting doubtful solutions.\")\n return inspect_doubtful_solutions(candidates, query_string, hypothesis)\n raise NoSolution(\"Not even a doubtful solution\")\n\n elif len(filtered)==1:\n current_app.logger.debug(\"Accepting single unique solution\")\n evidence, solution = filtered[0]\n return evidence, solution\n\n elif len(filtered)>1:\n current_app.logger.debug(\"Trying to disentangle multiple equal-scored solutions\")\n # get all equal-scored matches with the highest scores\n best_score = max(item[0].get_score() for item in filtered)\n best_solution = [(ev, solution) for ev, solution in filtered if ev.get_score()==best_score]\n if len(best_solution)==1:\n evidence, solution = best_solution[0]\n return evidence, solution\n else:\n current_app.logger.debug(\"...impossible\")\n raise Undecidable(\"%s solutions with equal (good) score.\"%len(best_solution))", "title": "" }, { "docid": "90713392e1c8d10236da2429dc2d49a7", "score": "0.44067153", "text": "def get_selected_app(self):\n selection_index = self.get_active()\n if selection_index == 0: # When \"Choose An App)\" is selected\n return None\n else:\n return self._app_list[selection_index - 1]", "title": "" }, { "docid": "36cff022c37c81d16952123bd30813f7", "score": "0.4398955", "text": "def find_by_id(self, id: str) -> Optional[AssistantEndpoint]:\n for resource in self.list():\n if resource.id == id:\n return resource\n return None", "title": "" }, { "docid": "f58f2a7e3735b4216389677579bfe578", "score": "0.4398198", "text": "def get(cls, request_id):\n return cls.query.filter_by(id=request_id).one_or_none()", "title": "" }, { "docid": "bf0f431347f15793d4a53be7c399f41b", "score": "0.43940058", "text": "def get(self, query, *parameters):\n rows = self.query(query, *parameters)\n if not rows:\n return None\n else:\n return rows[0]", "title": "" } ]
8384be76a9abe6dd28cd5d8a5cc81821
Map a legacy Broker command to a ProtocolEngine command. A "before" message from the Broker is mapped to a ``RUNNING`` ProtocolEngine command. An "after" message from the Broker is mapped to a ``SUCCEEDED`` ProtocolEngine command. It has the same ID as the original ``RUNNING`` command, so when you send it to the ProtocolEngine, it will update the original command's status inplace.
[ { "docid": "c8bca8a292a19f41c2a3a6c0674068dc", "score": "0.6899797", "text": "def map_command( # noqa: C901\n self,\n command: legacy_command_types.CommandMessage,\n ) -> List[pe_actions.Action]:\n command_type = command[\"name\"]\n\n if command_type in _HIGHER_ORDER_COMMAND_TYPES:\n return []\n\n command_error = command[\"error\"]\n stage = command[\"$\"]\n # TODO(mc, 2021-12-08): use message ID as command ID directly once\n # https://github.com/Opentrons/opentrons/issues/8986 is resolved\n broker_id = command[\"id\"]\n now = ModelUtils.get_timestamp()\n\n results: List[pe_actions.Action] = []\n\n if stage == \"before\":\n count = self._command_count[command_type]\n command_id = f\"{command_type}-{count}\"\n engine_command = self._build_initial_command(command, command_id, now)\n\n self._command_count[command_type] = count + 1\n self._commands_by_broker_id[broker_id] = engine_command\n\n results.append(pe_actions.UpdateCommandAction(engine_command))\n\n elif stage == \"after\":\n running_command = self._commands_by_broker_id[broker_id]\n completed_command: pe_commands.Command\n if command_error is None:\n if isinstance(running_command, pe_commands.PickUpTip):\n completed_command = running_command.copy(\n update={\n \"result\": pe_commands.PickUpTipResult.construct(\n tipVolume=command[\"payload\"][\"location\"].max_volume, # type: ignore[typeddict-item]\n tipLength=command[\"payload\"][\"instrument\"].hw_pipette[\"tip_length\"], # type: ignore[typeddict-item]\n position=pe_types.DeckPoint(x=0, y=0, z=0),\n ),\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n elif isinstance(running_command, pe_commands.DropTip):\n completed_command = running_command.copy(\n update={\n \"result\": pe_commands.DropTipResult.construct(\n position=pe_types.DeckPoint(x=0, y=0, z=0)\n ),\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n elif isinstance(running_command, pe_commands.Aspirate):\n completed_command = running_command.copy(\n update={\n # Don't .construct() result, because we want to validate\n # volume.\n \"result\": pe_commands.AspirateResult(\n volume=running_command.params.volume,\n position=pe_types.DeckPoint(x=0, y=0, z=0),\n ),\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n elif isinstance(running_command, pe_commands.Dispense):\n completed_command = running_command.copy(\n update={\n # Don't .construct() result, because we want to validate\n # volume.\n \"result\": pe_commands.DispenseResult(\n volume=running_command.params.volume,\n position=pe_types.DeckPoint(x=0, y=0, z=0),\n ),\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n elif isinstance(running_command, pe_commands.BlowOut):\n completed_command = running_command.copy(\n update={\n \"result\": pe_commands.BlowOutResult.construct(\n position=pe_types.DeckPoint(x=0, y=0, z=0)\n ),\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n elif isinstance(running_command, pe_commands.Custom):\n completed_command = running_command.copy(\n update={\n \"result\": pe_commands.CustomResult.construct(),\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n else:\n completed_command = running_command.copy(\n update={\n \"status\": pe_commands.CommandStatus.SUCCEEDED,\n \"completedAt\": now,\n }\n )\n results.append(pe_actions.UpdateCommandAction(completed_command))\n\n if isinstance(completed_command, pe_commands.WaitForResume):\n results.append(\n pe_actions.PauseAction(source=pe_actions.PauseSource.PROTOCOL)\n )\n\n else:\n results.append(\n pe_actions.FailCommandAction(\n command_id=running_command.id,\n error_id=ModelUtils.generate_id(),\n failed_at=now,\n error=LegacyContextCommandError(command_error),\n )\n )\n\n return results", "title": "" } ]
[ { "docid": "c6a69c15ffe15ed753ebb81596aff1e5", "score": "0.5849269", "text": "def apply_command(cmd):\n engine = cmd.engine\n engine.receive([cmd])", "title": "" }, { "docid": "e6b0287b396bfd7da5b1fd186c729b89", "score": "0.51245576", "text": "def _map_module_load(\n self, module_load_info: LegacyModuleLoadInfo\n ) -> pe_commands.Command:\n now = ModelUtils.get_timestamp()\n\n count = self._command_count[\"LOAD_MODULE\"]\n command_id = f\"commands.LOAD_MODULE-{count}\"\n module_id = f\"module-{count}\"\n requested_model = _LEGACY_TO_PE_MODULE[module_load_info.requested_model]\n loaded_model = _LEGACY_TO_PE_MODULE[module_load_info.loaded_model]\n\n # This will fetch a V2 definition only. PAPI < v2.3 use V1 definitions.\n # When running a < v2.3 protocol, there will be a mismatch of definitions used\n # during analysis+LPC (V2) and protocol execution (V1).\n # But this shouldn't result in any problems since V2 and V1 definitions\n # have similar info, with V2 having additional info fields.\n loaded_definition = self._module_definition_by_model.get(\n loaded_model\n ) or self._module_data_provider.get_definition(loaded_model)\n\n load_module_command = pe_commands.LoadModule.construct(\n id=command_id,\n key=command_id,\n status=pe_commands.CommandStatus.SUCCEEDED,\n createdAt=now,\n startedAt=now,\n completedAt=now,\n params=pe_commands.LoadModuleParams.construct(\n model=requested_model,\n location=pe_types.DeckSlotLocation(\n slotName=module_load_info.deck_slot,\n ),\n moduleId=module_id,\n ),\n result=pe_commands.LoadModuleResult.construct(\n moduleId=module_id,\n serialNumber=module_load_info.module_serial,\n definition=loaded_definition,\n model=loaded_model,\n ),\n )\n self._command_count[\"LOAD_MODULE\"] = count + 1\n self._module_id_by_slot[module_load_info.deck_slot] = module_id\n self._module_definition_by_model[loaded_model] = loaded_definition\n return load_module_command", "title": "" }, { "docid": "629fafd27a586c6139a88cadf9982d43", "score": "0.4827061", "text": "def commands_from_messages(self, messages):", "title": "" }, { "docid": "e561bfd11af27a46ba79d376a734268a", "score": "0.47645253", "text": "def _unpack_command(self, given_buffer):\r\n assert self._is_client_side is not None, \"Ambiguous connection state\"\r\n\r\n if not given_buffer:\r\n cmd_type = None\r\n cmd_args = None\r\n cmd_len = 0\r\n elif given_buffer[0] == NULL_CHAR:\r\n # We'll be expecting a response if we know we're a client side command\r\n is_response = bool(self._is_client_side)\r\n cmd_type, cmd_args, cmd_len = parse_binary_command(given_buffer, is_response=is_response)\r\n else:\r\n cmd_type, cmd_args, cmd_len = parse_text_command(given_buffer)\r\n\r\n if _DEBUG_MODE_ and cmd_type is not None:\r\n gearman_logger.debug('%s - Recv - %s - %r', hex(id(self)), get_command_name(cmd_type), cmd_args)\r\n\r\n return cmd_type, cmd_args, cmd_len", "title": "" }, { "docid": "d850e6943a6ea8f8e75483a0e55c57eb", "score": "0.47510493", "text": "def process_command(self, message: Message) -> Response:\n # This worker must be in status processing or preparing.\n # All messages from workers not in this status will be discarted and\n # error will be returned.\n if self.worker.status not in [\n Worker.STATUS_PROCESSING,\n Worker.STATUS_PREPARING,\n ]:\n self._log_error(\n f\"Wrong worker status: {self.worker.status} for peer with id {self.data_id}.\"\n )\n return message.respond_error(f\"Wrong worker status: {self.worker.status}\")\n\n command_name = message.command_name\n handler_name = f\"handle_{command_name}\"\n logger.debug(__(\"Message for handler {} received.\", handler_name))\n handler = plugin_manager.get_handler(command_name)\n if not handler:\n error = f\"No command handler for '{command_name}'.\"\n self._log_error(error, save_to_data_object=False)\n return message.respond_error(error)\n\n # Read sequence number and refresh data object if it differs.\n if self.expected_sequence_number != message.sequence_number:\n try:\n self.data.refresh_from_db()\n self.worker.refresh_from_db()\n except:\n self._log_exception(\"Unable to refresh data object\")\n return message.respond_error(\"Unable to refresh the data object\")\n\n if self.worker.status != Worker.STATUS_PROCESSING:\n self.worker.status = Worker.STATUS_PROCESSING\n self.worker.save(update_fields=[\"status\"])\n\n if self.data.started is None:\n self.data.started = now()\n self.data.save(update_fields=[\"started\"])\n\n self.expected_sequence_number = message.sequence_number + 1\n try:\n with PrioritizedBatcher.global_instance():\n logger.debug(__(\"Invoking handler {}.\", handler_name))\n result = handler(message, self)\n # Set status of the response to ERROR when data object status\n # is Data.STATUS_ERROR. Such response will trigger terminate\n # procedure in the processing container and stop processing.\n if self.data.status == Data.STATUS_ERROR:\n result.type_data = ResponseStatus.ERROR.value\n return result\n except ValidationError as err:\n error = (\n f\"Validation error when saving Data object of process \"\n f\"'{self.data.process.slug}' ({handler_name}): \"\n f\"{err}\"\n )\n self._log_exception(error)\n return message.respond_error(\"Validation error\")\n except Exception as err:\n error = f\"Error in command handler '{handler_name}': {err}\"\n self._log_exception(error)\n return message.respond_error(f\"Error in command handler '{handler_name}'\")", "title": "" }, { "docid": "4eb78ffa4143648152571e82d8d104e4", "score": "0.47319993", "text": "async def on_protocol_event(self, cmd: typing.Any):\n pass", "title": "" }, { "docid": "3c0305ffa3e310c111714edc67eeaa27", "score": "0.4724909", "text": "def processStateChange(self, newState):\n\n if newState == 'leader':\n if self.isLeader == False or self.isLeader is None:\n self.isLeader = True\n self.addEntryEvent.set()\n self.executeCommand(NO_OP, [], None)\n self.stateNotifier(newState, self.serverID)\n\n elif newState == 'follower' or newState == 'candidate':\n if self.isLeader == True or self.isLeader is None:\n self.isLeader = False\n self.commandBuffer = {}\n self.stateNotifier(newState, self.serverID)", "title": "" }, { "docid": "11d4adb69990d5f2816783ab081c7fe8", "score": "0.4724318", "text": "def callback_service_server(self,command,result):\n pass\n #if result is None:\n #command.protocol.incoming(command.wrapper.request_command())", "title": "" }, { "docid": "ee70d4093ebc964319101feefb0620a6", "score": "0.46985444", "text": "async def test_execute_command_creates_command(\n engine: ProtocolEngine,\n mock_state_store: MagicMock\n) -> None:\n req = MoveToWellRequest(pipetteId=\"123\", labwareId=\"abc\", wellName=\"A1\")\n\n await engine.execute_command(req, command_id=\"unique-id\")\n mock_state_store.handle_command.assert_any_call(\n RunningCommand(\n created_at=cast(datetime, CloseToNow()),\n started_at=cast(datetime, CloseToNow()),\n request=req\n ),\n command_id=\"unique-id\"\n )", "title": "" }, { "docid": "1874b96d08e90c944287ba7107aede68", "score": "0.4696515", "text": "def load(self) -> None:\n for json_cmd in self._protocol.commands:\n translated_items = self._command_translator.translate(json_cmd)\n for cmd in translated_items:\n self._protocol_engine.add_command(cmd)", "title": "" }, { "docid": "c960bee07df668042d64dc6b6efba985", "score": "0.4687248", "text": "def _unpack_command(self, given_buffer):\n assert self._is_client_side is not None, \"Ambiguous connection state\"\n\n if not given_buffer:\n cmd_type = None\n cmd_args = None\n cmd_len = 0\n elif struct.pack(\"B\", given_buffer[0]) == NULL_CHAR:\n # We'll be expecting a response if we know we're a client side command\n is_response = bool(self._is_client_side)\n cmd_type, cmd_args, cmd_len = parse_binary_command(given_buffer, is_response=is_response)\n else:\n cmd_type, cmd_args, cmd_len = parse_text_command(given_buffer)\n\n if _DEBUG_MODE_ and cmd_type is not None:\n gearman_logger.debug('%s - Recv - %s - %r', hex(id(self)), get_command_name(cmd_type), cmd_args)\n\n return cmd_type, cmd_args, cmd_len", "title": "" }, { "docid": "f0111e9e6dfe8a735c565d7dbdb4fa45", "score": "0.46757856", "text": "def cmd(message):\n try:\n worker = hook.local_worker\n if not len(worker.current_objects()):\n recover_objects(hook)\n\n # Decode Message\n encoded_message = message[\"message\"]\n decoded_message = binascii.unhexlify(encoded_message[2:-1])\n\n # Process and encode response\n decoded_response = worker._recv_msg(decoded_message)\n encoded_response = str(binascii.hexlify(decoded_response))\n\n snapshot(worker)\n\n emit(\"/cmd-response\", encoded_response)\n except Exception as e:\n emit(\"/cmd-response\", str(e))", "title": "" }, { "docid": "e61c0966a7afbe7bce046df95ee719f5", "score": "0.46696317", "text": "def get_cmd_dict(self) -> dict[ProtocolMessages, Callable[[str], None]]:\n return {\n ProtocolMessages.GET_NEXT_BLOCKS: self.handle_get_next_blocks,\n ProtocolMessages.BLOCKS: self.handle_blocks,\n ProtocolMessages.BLOCKS_END: self.handle_blocks_end,\n ProtocolMessages.GET_BEST_BLOCK: self.handle_get_best_block,\n ProtocolMessages.BEST_BLOCK: self.handle_best_block,\n ProtocolMessages.GET_TRANSACTIONS_BFS: self.handle_get_transactions_bfs,\n ProtocolMessages.TRANSACTION: self.handle_transaction,\n ProtocolMessages.TRANSACTIONS_END: self.handle_transactions_end,\n ProtocolMessages.GET_PEER_BLOCK_HASHES: self.handle_get_peer_block_hashes,\n ProtocolMessages.PEER_BLOCK_HASHES: self.handle_peer_block_hashes,\n ProtocolMessages.STOP_BLOCK_STREAMING: self.handle_stop_block_streaming,\n ProtocolMessages.GET_TIPS: self.handle_get_tips,\n ProtocolMessages.TIPS: self.handle_tips,\n ProtocolMessages.TIPS_END: self.handle_tips_end,\n # XXX: overriding ReadyState.handle_error\n ProtocolMessages.ERROR: self.handle_error,\n ProtocolMessages.GET_DATA: self.handle_get_data,\n ProtocolMessages.DATA: self.handle_data,\n ProtocolMessages.RELAY: self.handle_relay,\n ProtocolMessages.NOT_FOUND: self.handle_not_found,\n }", "title": "" }, { "docid": "9871be1ef38ea4626536477bd5734da7", "score": "0.4652895", "text": "def _mapping(self):\n return [('message.received', self.on_new_message)]", "title": "" }, { "docid": "ece4599e4af435a5950d5fd7197d82b6", "score": "0.46455175", "text": "def messages_from_commands(self, commands):", "title": "" }, { "docid": "39e363b11da75a074e9e47cc417fbbbd", "score": "0.45871195", "text": "def new_command(self, msg):\n if not self.past_state.mouth == msg.mouth:\n self.coms.sendData([0] + self.bytize(msg.mouth))\n if not self.past_state.right_eye == msg.right_eye:\n self.coms.sendData([1] + self.bytize(msg.right_eye))\n if not self.past_state.left_eye == msg.left_eye:\n self.coms.sendData([2] + self.bytize(msg.left_eye))\n if not self.past_state.mouth_brightness == msg.mouth_brightness:\n self.coms.sendData([3, msg.mouth_brightness])\n if not self.past_state.right_eye_brightness == msg.right_eye_brightness:\n self.coms.sendData([4, msg.right_eye_brightness])\n if not self.past_state.left_eye_brightness == msg.left_eye_brightness:\n self.coms.sendData([5, msg.left_eye_brightness])\n\n self.past_state = msg", "title": "" }, { "docid": "a2641baf0f25b2620318a465c6428e2b", "score": "0.45748067", "text": "async def on_socket_response(self, msg):\n if msg[\"t\"] != \"INTERACTION_CREATE\":\n return\n\n to_use = msg[\"d\"]\n\n if to_use[\"data\"][\"name\"] in self.commands:\n\n ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)\n cmd_name = to_use[\"data\"][\"name\"]\n\n if cmd_name not in self.commands and cmd_name in self.subcommands:\n return await self.handle_subcommand(ctx, to_use)\n\n selected_cmd = self.commands[to_use[\"data\"][\"name\"]]\n\n if (\n selected_cmd.allowed_guild_ids\n and ctx.guild_id not in selected_cmd.allowed_guild_ids\n ):\n return\n\n if selected_cmd.has_subcommands and not selected_cmd.func:\n return await self.handle_subcommand(ctx, to_use)\n\n if \"options\" in to_use[\"data\"]:\n for x in to_use[\"data\"][\"options\"]:\n if \"value\" not in x:\n return await self.handle_subcommand(ctx, to_use)\n\n # This is to temporarily fix Issue #97, that on Android device\n # does not give option type from API.\n temporary_auto_convert = {}\n for x in selected_cmd.options:\n temporary_auto_convert[x[\"name\"].lower()] = x[\"type\"]\n\n args = (\n await self.process_options(\n ctx.guild,\n to_use[\"data\"][\"options\"],\n selected_cmd.connector,\n temporary_auto_convert,\n )\n if \"options\" in to_use[\"data\"]\n else {}\n )\n\n self._discord.dispatch(\"slash_command\", ctx)\n\n await self.invoke_command(selected_cmd, ctx, args)", "title": "" }, { "docid": "ed94e37c65a5cc519ff6cd8985494060", "score": "0.45665738", "text": "def do_send_command_to_device(self, lab_session_id, command):\n if self.debug:\n print \"[Aquarium*] do_send_command_to_device called: %s\" % command\n\n if command == 'get-status':\n return json.dumps(self._status.get_status())\n elif command.startswith('ball:'):\n try:\n _, ball, on = command.split(':')\n except:\n traceback.print_exc()\n return \"ERROR:Invalid ball command\"\n\n if not ball.lower() in COLORS:\n return \"ERROR:Invalid ball color\"\n\n if not on.lower() in ('true', 'false'):\n return \"ERROR:Invalid state\"\n\n on = on.lower() == 'true'\n ball = ball.lower()\n \n self._status.move(ball, on)\n\n return json.dumps(self._status.get_status())\n elif command == 'process':\n return json.dumps(self.proxy.process_image())\n\n return 'ERROR:Invalid command'", "title": "" }, { "docid": "891bfeaf07cd6ea6baaaa7fa588d8922", "score": "0.45612594", "text": "async def test_execute_command_adds_result_to_state(\n engine: ProtocolEngine,\n mock_handlers: AsyncMock,\n mock_state_store: MagicMock,\n now: datetime,\n) -> None:\n result = MoveToWellResult()\n mock_req = MagicMock(spec=MoveToWellRequest)\n mock_impl = AsyncMock(spec=MoveToWellImplementation)\n\n mock_req.get_implementation.return_value = mock_impl\n mock_impl.create_command.return_value = PendingCommand(\n request=mock_req,\n created_at=now\n )\n mock_impl.execute.return_value = result\n\n cmd = await engine.execute_command(mock_req, command_id=\"unique-id\")\n\n assert cmd == CompletedCommand(\n created_at=cast(datetime, CloseToNow()),\n started_at=cast(datetime, CloseToNow()),\n completed_at=cast(datetime, CloseToNow()),\n request=mock_req,\n result=result,\n )\n\n mock_state_store.handle_command.assert_called_with(\n cmd,\n command_id=\"unique-id\",\n )", "title": "" }, { "docid": "e0939f2556895f4eaefc3cbbf153b46e", "score": "0.4560034", "text": "async def check_edited_commands(bot, before, after):\n if before.content != after.content and before.id in bot.edit_dictionary:\n message_reference = bot.edit_dictionary.pop(before.id)\n await bot.on_message(after, replacement_message=message_reference)", "title": "" }, { "docid": "818e548f9df201eb35c3fe6260dff338", "score": "0.45238695", "text": "def query_on_command(self, command):\n answer_dict = {}\n # -------------------------------------------------------------------------------------------------------------------------\n # commands, like for adjusting a set temperature on the device\n # commands are received via zmq tcp, and executed here\n # examples:\n # if 'configTempLimit' in command:\n # self.configTempLimit(command['configTempLimit'])\n try:\n self.act_on_command(command)\n answer_dict.update(\n dict(\n Current_A=self.Keithley6221.source_current,\n OutputOn=self.getstatus(),\n )\n )\n answer_dict[\"OK\"] = True\n\n finally:\n return answer_dict\n # -------------------------------------------------------------------------------------------------------------------------", "title": "" }, { "docid": "94dcc6b4327f2fcb2655d67a856659ee", "score": "0.4521917", "text": "def _register_application_command(self, command):\n for sync_id in command._iter_sync_ids():\n if sync_id == SYNC_ID_NON_GLOBAL:\n is_non_global = True\n else:\n is_non_global = False\n \n try:\n command_state = self._command_states[sync_id]\n except KeyError:\n command_state = self._command_states[sync_id] = CommandState(is_non_global)\n \n command, change_identifier = command_state.add(command)\n if change_identifier == COMMAND_STATE_IDENTIFIER_ADDED:\n self._sync_done.discard(sync_id)\n self._sync_should.add(sync_id)\n continue\n \n if change_identifier == COMMAND_STATE_IDENTIFIER_ACTIVE:\n continue\n \n if change_identifier == COMMAND_STATE_IDENTIFIER_KEPT:\n for application_command_id in command._iter_application_command_ids():\n self.command_id_to_command[application_command_id] = command\n continue\n \n if change_identifier == COMMAND_STATE_IDENTIFIER_NON_GLOBAL:\n continue", "title": "" }, { "docid": "fa9bd909233db1be0437e037bc822c4e", "score": "0.45193762", "text": "def _message(self, m) :\n if m.message.startswith(self.config[\"command\"]) :\n parts = m.message.split(\" \", 1)\n cmd = parts[0][len(self.config[\"command\"]):]\n flags = parts[1] if (len(parts) > 1) else False\n m.commander(cmd, flags)\n\n self.bot.triggerCommand(self, m)", "title": "" }, { "docid": "c091aaa2a60746d090c925e082eb9985", "score": "0.45140526", "text": "def _convert(e):\n out = swarming_pb2.BotEvent()\n e.to_proto(out)\n # This is fine because bot_id cannot contain ':'. See\n # config.DIMENSION_KEY_RE.\n bq_key = e.id + ':' + e.ts.strftime(fmt)\n return (e.key.urlsafe(), bq_key, out)", "title": "" }, { "docid": "1d0564c54da90685b440c393dab7ffa8", "score": "0.44978523", "text": "def test_migration_job_converts_old_question(self):\n # Generate question with old(v27) state data.\n self.save_new_question_with_state_data_schema_v27(\n self.QUESTION_ID, self.albert_id, [self.skill_id])\n question = (\n question_services.get_question_by_id(self.QUESTION_ID))\n self.assertEqual(question.question_state_data_schema_version, 30)\n\n # Start migration job.\n job_id = (\n question_jobs_one_off.QuestionMigrationOneOffJob.create_new())\n question_jobs_one_off.QuestionMigrationOneOffJob.enqueue(job_id)\n self.process_and_flush_pending_tasks()\n\n # Verify the question migrates correctly.\n updated_question = (\n question_services.get_question_by_id(self.QUESTION_ID))\n self.assertEqual(\n updated_question.question_state_data_schema_version,\n feconf.CURRENT_STATE_SCHEMA_VERSION)\n\n output = question_jobs_one_off.QuestionMigrationOneOffJob.get_output(job_id) # pylint: disable=line-too-long\n expected = [[u'question_migrated',\n [u'1 questions successfully migrated.']]]\n self.assertEqual(expected, [ast.literal_eval(x) for x in output])", "title": "" }, { "docid": "7df3a5f1b44466a400c4836c84392fcb", "score": "0.44797987", "text": "def on_receive(self, message):\n command = message.get('command')\n result = None\n if command is None:\n logging.error('No command specified')\n elif command == k.CMD_STOP_ACTOR:\n self.stop()\n elif command == k.CMD_START_MONITOR:\n result = self.start_monitor()\n elif command == k.CMD_STOP_MONITOR:\n result = self.stop_monitor()\n elif command == k.CMD_GET_PROCESSES_NAMES:\n result = self.get_processes_names()\n elif command == k.CMD_GET_PROCESS_INFO:\n result = self.get_process_info(message.get('process_name'))\n elif command == k.CMD_GET_ALL_PROCESSES_INFO:\n result = self.get_all_processes_info()\n elif command == k.CMD_REFRESH_ALL_PROCESSES_INFO:\n result = self.refresh_all_processes_info()\n elif command == k.CMD_GET_INFO:\n result = self.get_info()\n elif command == k.CMD_TIMER:\n result = self.refresh_all_processes_info()\n elif command in [k.CMD_STOP_PROC, k.CMD_START_PROC]:\n result = self.start_stop_process(message)\n elif command in [k.CMD_RESTART_ALL_PROC, k.CMD_RESTART_GLOB]:\n result = self.restart_all_processes()\n elif command in [k.CMD_START_ALL_PROC, k.CMD_START_GLOB]:\n result = self.start_all_processes()\n elif command in [k.CMD_STOP_ALL_PROC, k.CMD_STOP_GLOB]:\n result = self.stop_all_processes()\n else:\n err_message = 'Invalid command received: {}'.format(command)\n result = {\n 'status': k.ERROR,\n 'message': err_message\n }\n logging.error('Actor [%s]: %s', self.server_name, err_message)\n return result", "title": "" }, { "docid": "8acbad10cba040b481c756b0c83d8caa", "score": "0.44729173", "text": "def send_command(self, cmd):\n\n commands = {\n \"play\": \"{} play\",\n \"pause\": \"{} pause\",\n \"next\": \"{} playlist index +1\",\n \"previous\": \"{} playlist index +1\"\n }\n\n if cmd in commands:\n lms_cmd = commands[cmd].format(self.playerid)\n else:\n logging. error(\"command %s not implemented\", cmd)\n return\n\n self.lms.send(lms_cmd)", "title": "" }, { "docid": "85e8aac0cb7972829398b5bd11f42293", "score": "0.44727427", "text": "def process(self, ident, _from, to, msg):\n cmd = self.get_cmd(msg)\n if cmd != \"\":\n if hasattr(self, \"cmd_\"+cmd):\n getattr(self, \"cmd_\"+cmd)(ident, _from, to, msg)\n return None", "title": "" }, { "docid": "3826cf85f09d54c6bdf297c25d55cc69", "score": "0.44576722", "text": "def command_map(self) -> dict:\n command_map = super().command_map\n command_map.update(\n {\n 'createconfig': self.process_create_config_command,\n 'deleteconfig': self.process_delete_config_command,\n }\n )\n return command_map", "title": "" }, { "docid": "468b17ec603dd48506320212e970d395", "score": "0.4440345", "text": "def map_bidi(self, src_port, dst_port, command_logger):\n raise NotImplementedError", "title": "" }, { "docid": "5abc35340a002899590b180854ba943c", "score": "0.4435138", "text": "def react_to_command(data):\n\n decoded_data = json.loads(data)\n decoded_data = dict(ast.literal_eval(decoded_data))\n srv_cmd = decoded_data['command']\n\n if (srv_cmd != 'enable') and (srv_cmd != 'shutdown'):\n raise Exception('Incorrect command type')\n\n voltage = 'LOW'\n if srv_cmd == 'enable':\n voltage = 'HIGH'\n print('EXCELLENT', voltage)\n # fan_command(voltage)", "title": "" }, { "docid": "36ca14dde902299e467ad4acb525a479", "score": "0.44210285", "text": "def read_eieio_command_message(data, offset):\n command_header = EIEIOCommandHeader.from_bytestring(data, offset)\n command_number = command_header.command\n\n if command_number == EIEIO_COMMAND_IDS.DATABASE.value:\n return NotificationProtocolDatabaseLocation.from_bytestring(\n command_header, data, offset + 2)\n # Fill in buffer area with padding\n elif command_number == EIEIO_COMMAND_IDS.EVENT_PADDING.value:\n return PaddingRequest()\n # End of all buffers, stop execution\n elif command_number == EIEIO_COMMAND_IDS.EVENT_STOP.value:\n return EventStopRequest()\n # Stop complaining that there is SDRAM free space for buffers\n elif command_number == EIEIO_COMMAND_IDS.STOP_SENDING_REQUESTS.value:\n return StopRequests()\n # Start complaining that there is SDRAM free space for buffers\n elif command_number == EIEIO_COMMAND_IDS.START_SENDING_REQUESTS.value:\n return StartRequests()\n # SpiNNaker requesting new buffers for spike source population\n elif command_number == EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_BUFFERS.value:\n return SpinnakerRequestBuffers.from_bytestring(\n command_header, data, offset + 2)\n # Buffers being sent from host to SpiNNaker\n elif command_number == EIEIO_COMMAND_IDS.HOST_SEND_SEQUENCED_DATA.value:\n return HostSendSequencedData.from_bytestring(\n command_header, data, offset + 2)\n # Buffers available to be read from a buffered out vertex\n elif command_number == EIEIO_COMMAND_IDS.SPINNAKER_REQUEST_READ_DATA.value:\n return SpinnakerRequestReadData.from_bytestring(\n command_header, data, offset + 2)\n # Host confirming data being read form SpiNNaker memory\n elif command_number == EIEIO_COMMAND_IDS.HOST_DATA_READ.value:\n return HostDataRead.from_bytestring(\n command_header, data, offset + 2)\n return EIEIOCommandMessage(command_header, data, offset + 2)", "title": "" }, { "docid": "6e8065fabdd088863e52ed03b6ae9fa4", "score": "0.44182006", "text": "def _get_command_map(cls) -> typing.Dict[HardwareEnum, str]:\n raise NotImplementedError()", "title": "" }, { "docid": "75e5f9fcebdde1a322fe5219b8149941", "score": "0.44180557", "text": "def _build_cmd_dict(self):\n self._cmd_dict = ProtocolCommandDict()", "title": "" }, { "docid": "bcea53470cbad6ec4aa92926f49a4877", "score": "0.44165888", "text": "def interpret_message(self, msg):\r\n obj = None\r\n (command, ) = struct.unpack('B', msg[0:struct.calcsize('B')])\r\n cur_msg_index = struct.calcsize('B')\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface : msg length:{}\".format(len(msg)))\r\n if command == self.ACK:\r\n obj = None\r\n elif command == self.CONTINUE_SIM:\r\n obj = None\r\n elif command == self.HEART_BEAT:\r\n (sim_status,) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n (sim_time_ms, ) = struct.unpack('I', msg[cur_msg_index:cur_msg_index + struct.calcsize('I')])\r\n obj = HeartBeat(simulation_status=sim_status, simulation_time_ms=sim_time_ms)\r\n elif command == self.SET_CONTROLLER_PARAMETER:\r\n obj = self.interpret_set_controller_parameter_command(msg)\r\n elif command == self.SET_DATA_LOG_PERIOD_MS:\r\n (obj, ) = struct.unpack('I', msg[cur_msg_index:cur_msg_index + struct.calcsize('I')])\r\n elif command in (self.SURROUNDINGS_DEF, self.SURROUNDINGS_ADD):\r\n (item_type, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n if item_type == self.S_ROAD:\r\n if self.debug_mode:\r\n print(\"Road\")\r\n obj = WebotsRoad()\r\n (obj.position[0],\r\n obj.position[1],\r\n obj.position[2],\r\n obj.rotation[0],\r\n obj.rotation[1],\r\n obj.rotation[2],\r\n obj.rotation[3],\r\n obj.length,\r\n obj.width,\r\n road_type,\r\n obj.number_of_lanes,\r\n obj.right_border_bounding_object,\r\n obj.left_border_bounding_object) = \\\r\n struct.unpack('dddddddddBB??', msg[cur_msg_index:cur_msg_index + struct.calcsize('dddddddddBB??')])\r\n cur_msg_index += struct.calcsize('dddddddddBB??')\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: rd length: {}\".format(obj.length))\r\n if road_type == self.STRAIGHT_ROAD:\r\n obj.road_type = \"StraightRoadSegment\"\r\n else:\r\n print(\"SimulationMessageInterface: Unknown SURROUNDINGS DEF {}\".format(item_type))\r\n elif command in (self.DUMMY_ACTORS_DEF, self.DUMMY_ACTORS_ADD, self.VUT_DEF, self.VUT_ADD):\r\n (item_type, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n if (((command in (self.DUMMY_ACTORS_DEF, self.DUMMY_ACTORS_ADD)) and item_type == self.D_VHC)\r\n or ((command in (self.VUT_DEF, self.VUT_ADD)) and item_type == self.VUT_VHC)):\r\n obj = WebotsVehicle()\r\n (obj.current_position[0],\r\n obj.current_position[1],\r\n obj.current_position[2],\r\n obj.current_rotation[0],\r\n obj.current_rotation[1],\r\n obj.current_rotation[2],\r\n obj.current_rotation[3],\r\n obj.id,\r\n vhc_model,\r\n obj.controller) = struct.unpack('dddddddB25s30s',\r\n msg[cur_msg_index:cur_msg_index + struct.calcsize('dddddddB25s30s')])\r\n cur_msg_index += struct.calcsize('dddddddB25s30s')\r\n vhc_model = vhc_model.rstrip(' \\t\\r\\n\\0') # Remove null characters at the end\r\n vhc_model = vhc_model.strip() # Remove space characters\r\n obj.set_vehicle_model(vhc_model)\r\n if command in (self.DUMMY_ACTORS_DEF, self.DUMMY_ACTORS_ADD):\r\n obj.def_name = \"DVHC_\" + str(obj.id)\r\n else:\r\n obj.def_name = \"VUT_\" + str(obj.id)\r\n\r\n obj.controller = obj.controller.rstrip('\\0')\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface : Adding vhc: Controller: {}, length: {}\".format(\r\n obj.controller, len(obj.controller)))\r\n\r\n # Read Vehicle Parameters to be used in proto settings\r\n (num_of_params, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Adding vhc: numOf vehicle Params {}\".format(num_of_params))\r\n for i in range(num_of_params):\r\n (length_of_setting, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n param_name_str = ''\r\n if length_of_setting > 0:\r\n param_name_str += msg[cur_msg_index:cur_msg_index + length_of_setting]\r\n cur_msg_index += length_of_setting\r\n (length_of_setting, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n param_str = ''\r\n if length_of_setting > 0:\r\n param_str += msg[cur_msg_index:cur_msg_index + length_of_setting]\r\n cur_msg_index += length_of_setting\r\n obj.vehicle_parameters.append((param_name_str, param_str))\r\n\r\n # Read Controller Arguments additional to vehicle type\r\n (num_of_contr_arguments, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Adding vhc: num_of_contr_arguments {}\".format(\r\n num_of_contr_arguments))\r\n for i in range(num_of_contr_arguments):\r\n (length_of_setting, ) = struct.unpack('I', msg[cur_msg_index:cur_msg_index + struct.calcsize('I')])\r\n cur_msg_index += struct.calcsize('I')\r\n if length_of_setting > 0:\r\n argument_str = msg[cur_msg_index:cur_msg_index + length_of_setting]\r\n cur_msg_index += length_of_setting\r\n obj.controller_arguments.append(argument_str)\r\n\r\n # Read signals\r\n (num_of_signals, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n obj.signal = []\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Adding vhc: num_of_signals {}\".format(num_of_signals))\r\n for i in range(0, num_of_signals):\r\n (signal_type, interpolation_type, signal_ref_index, signal_ref_field, signal_val_count) = \\\r\n struct.unpack('BBBBh', msg[cur_msg_index:cur_msg_index + struct.calcsize('BBBBh')])\r\n cur_msg_index += struct.calcsize('BBBBh')\r\n signal_values = []\r\n reference_values = []\r\n for j in range(0, signal_val_count):\r\n (sig_val, ) = struct.unpack('d', msg[cur_msg_index:cur_msg_index + struct.calcsize('d')])\r\n signal_values.append(sig_val)\r\n cur_msg_index += struct.calcsize('d')\r\n for j in range(0, signal_val_count):\r\n ref_val = struct.unpack('d', msg[cur_msg_index:cur_msg_index + struct.calcsize('d')])\r\n reference_values.append(ref_val)\r\n cur_msg_index += struct.calcsize('d')\r\n obj.signal.append(STaliroSignal(signal_type,\r\n interpolation_type,\r\n signal_ref_index,\r\n signal_ref_field,\r\n signal_values,\r\n reference_values))\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Added Signal\")\r\n\r\n # Read Sensors\r\n (num_of_sensors, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Adding vhc: num_of_sensors {}\".format(num_of_sensors))\r\n cur_msg_index += struct.calcsize('B')\r\n obj.sensor_array = [WebotsSensor() for i in range(num_of_sensors)]\r\n for i in range(0, num_of_sensors):\r\n (obj.sensor_array[i].sensor_location, ) = \\\r\n struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n (len_of_type, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n obj.sensor_array[i].sensor_type = msg[cur_msg_index:cur_msg_index + len_of_type]\r\n cur_msg_index += len_of_type\r\n (len_of_field_name, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n field_index = 0\r\n while len_of_field_name > 0:\r\n temp_field_name = msg[cur_msg_index:cur_msg_index + len_of_field_name]\r\n cur_msg_index += len_of_field_name\r\n (len_of_field_val, ) = \\\r\n struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n temp_field_val = msg[cur_msg_index:cur_msg_index + len_of_field_val]\r\n cur_msg_index += len_of_field_val\r\n obj.sensor_array[i].add_sensor_field(temp_field_name, temp_field_val)\r\n (len_of_field_name, ) = \\\r\n struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n field_index += 1\r\n\r\n # Read Controller Parameters (NOT arguments!)\r\n (num_of_control_params, ) = struct.unpack('B', msg[cur_msg_index:cur_msg_index + struct.calcsize('B')])\r\n cur_msg_index += struct.calcsize('B')\r\n obj.controller_parameters = []\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Adding vhc : num_of_control_params {}\".format(\r\n num_of_control_params))\r\n for i in range(0, num_of_control_params):\r\n (controller_param, param_msg_size) = \\\r\n self.controller_comm_interface.interpret_controller_parameter_message(msg[cur_msg_index:])\r\n cur_msg_index += param_msg_size\r\n controller_param.set_vehicle_id(obj.id)\r\n obj.controller_parameters.append(controller_param)\r\n if self.debug_mode:\r\n print(\"SimulationMessageInterface: Added Controller Parameter.\")\r\n else:\r\n print('SimulationMessageInterface: UNEXPECTED DUMMY ACTOR')\r\n elif command == self.SET_ROBUSTNESS_TYPE:\r\n (obj, ) = struct.unpack('I', msg[cur_msg_index:cur_msg_index + struct.calcsize('I')])\r\n elif command == self.ADD_DATA_LOG_DESCRIPTION:\r\n obj = ItemDescription()\r\n (obj.item_type, obj.log_item_index, obj.item_state_index) = \\\r\n struct.unpack('BBB', msg[cur_msg_index:cur_msg_index + struct.calcsize('BBB')])\r\n elif command == self.START_SIM:\r\n obj = SimData()\r\n (obj.simulation_duration_ms, obj.simulation_step_size_ms, obj.simulation_execution_mode) = \\\r\n struct.unpack('IIB', msg[cur_msg_index:cur_msg_index + struct.calcsize('IIB')])\r\n print(\"SimulationMessageInterface: Simulation Duration: {} step size: {} type: {}\".format(\r\n obj.simulation_duration_ms,\r\n obj.simulation_step_size_ms,\r\n obj.simulation_execution_mode))\r\n elif command == self.RELOAD_WORLD:\r\n obj = None\r\n print(\"SimulationMessageInterface: Revert world\")\r\n elif command == self.GET_ROBUSTNESS:\r\n obj = None\r\n elif command == self.GET_DATA_LOG_INFO:\r\n obj = None\r\n elif command == self.GET_DATA_LOG:\r\n (log_start_index, log_end_index) = \\\r\n struct.unpack('II', msg[cur_msg_index:cur_msg_index + struct.calcsize('II')])\r\n obj = (log_start_index, log_end_index)\r\n elif command == self.DATA_LOG_INFO:\r\n (num_log, size_of_each_log) = struct.unpack('II', msg[cur_msg_index:cur_msg_index + struct.calcsize('II')])\r\n obj = (num_log, size_of_each_log)\r\n elif command == self.DATA_LOG:\r\n (num_data, ) = struct.unpack('I', msg[cur_msg_index:cur_msg_index + struct.calcsize('I')])\r\n cur_msg_index += struct.calcsize('I')\r\n obj = np.fromstring(msg[cur_msg_index:], dtype='d%s' % num_data)\r\n elif command == self.SET_HEART_BEAT_CONFIG:\r\n obj = HeartBeatConfig()\r\n (obj.sync_type, obj.period_ms) = \\\r\n struct.unpack('II', msg[cur_msg_index:cur_msg_index + struct.calcsize('II')])\r\n print(\"Heart Beat Type: {} Period: {}\".format(obj.sync_type, obj.period_ms))\r\n elif command == self.SET_VIEW_FOLLOW_ITEM:\r\n obj = ItemDescription()\r\n (obj.item_type, obj.item_index) = \\\r\n struct.unpack('BB', msg[cur_msg_index:cur_msg_index + struct.calcsize('BB')])\r\n elif command == self.ROBUSTNESS:\r\n (obj, ) = struct.unpack('d', msg[cur_msg_index:cur_msg_index + struct.calcsize('d')])\r\n else:\r\n print(\"SimulationMessageInterface: Unknown COMMAND {}\".format(command))\r\n\r\n ret_cmd = SimulationCommand(command, obj)\r\n return ret_cmd", "title": "" }, { "docid": "08f4335bf47a7380762f2d653c9c140b", "score": "0.4411586", "text": "def newCmd(self, cmd):\n\n self.cmdLog.info('new cmd: %s' % (cmd))\n\n # Empty cmds are OK; send an empty response...\n if len(cmd.rawCmd) == 0:\n cmd.finish('')\n return None\n\n if self.runInReactorThread:\n self.runActorCmd(cmd)\n else:\n self.commandQueue.put(cmd)\n\n return self", "title": "" }, { "docid": "7bfb3143aad0040974ecb40f48e1c870", "score": "0.44069836", "text": "def _map_labware_load(\n self, labware_load_info: LegacyLabwareLoadInfo\n ) -> pe_commands.Command:\n now = ModelUtils.get_timestamp()\n count = self._command_count[\"LOAD_LABWARE\"]\n slot = labware_load_info.deck_slot\n location: pe_types.LabwareLocation\n if labware_load_info.on_module:\n location = pe_types.ModuleLocation.construct(\n moduleId=self._module_id_by_slot[slot]\n )\n else:\n location = pe_types.DeckSlotLocation.construct(slotName=slot)\n\n command_id = f\"commands.LOAD_LABWARE-{count}\"\n labware_id = f\"labware-{count}\"\n\n load_labware_command = pe_commands.LoadLabware.construct(\n id=command_id,\n key=command_id,\n status=pe_commands.CommandStatus.SUCCEEDED,\n createdAt=now,\n startedAt=now,\n completedAt=now,\n params=pe_commands.LoadLabwareParams.construct(\n location=location,\n loadName=labware_load_info.labware_load_name,\n namespace=labware_load_info.labware_namespace,\n version=labware_load_info.labware_version,\n displayName=labware_load_info.labware_display_name,\n ),\n result=pe_commands.LoadLabwareResult.construct(\n labwareId=labware_id,\n definition=LabwareDefinition.parse_obj(\n labware_load_info.labware_definition\n ),\n offsetId=labware_load_info.offset_id,\n ),\n )\n\n self._command_count[\"LOAD_LABWARE\"] = count + 1\n if isinstance(location, pe_types.DeckSlotLocation):\n self._labware_id_by_slot[location.slotName] = labware_id\n elif isinstance(location, pe_types.ModuleLocation):\n self._labware_id_by_module_id[location.moduleId] = labware_id\n return load_labware_command", "title": "" }, { "docid": "7faaa06ff59e117decaf9f8862cd0193", "score": "0.4401838", "text": "def handle_new_data(self, par: Participant, data: bytes):\n msg_name, msg_data = pickle.loads(data)\n if msg_name == Info.TOGGLE_AUDIO:\n par.client_info.is_audio_on = not par.client_info.is_audio_on\n elif msg_name == Info.TOGGLE_VIDEO:\n par.client_info.is_video_on = not par.client_info.is_video_on\n else:\n # if the message isn't an opposite message,\n # handle_opposite_msg won't do anything\n self.handle_opposite_msg(par, msg_name, msg_data)\n\n packet = create_packet(data)\n self.broadcast(par, packet)", "title": "" }, { "docid": "d09977e8b36b78f1ae589d45f93c6be3", "score": "0.43974233", "text": "def message_callback(self, **payload):\n cmd = BotCommand.FromText(search=self._self_mention, payload=payload)\n if cmd.cmd:\n cb = self._known_cmd.get(cmd.cmd, None)\n if cb is not None:\n cb[0](cmd=cmd)\n else:\n self._boilerplate_callback(cmd=cmd)", "title": "" }, { "docid": "b1ef2d0f90dcf7f93e968ada9510eb5a", "score": "0.43966293", "text": "def translate_command(command):\n command = replace_command(command)\n return [TRANSLATIONS.get(chunk, chunk) for chunk in command if chunk]", "title": "" }, { "docid": "a44240eed0030345895a1e9457dfcf38", "score": "0.43922848", "text": "async def test_execute_command_adds_error_to_state(\n engine: ProtocolEngine,\n mock_handlers: AsyncMock,\n mock_state_store: MagicMock,\n now: datetime,\n) -> None:\n error = errors.ProtocolEngineError(\"oh no!\")\n mock_req = MagicMock(spec=MoveToWellRequest)\n mock_impl = AsyncMock(spec=MoveToWellImplementation)\n\n mock_req.get_implementation.return_value = mock_impl\n mock_impl.create_command.return_value = PendingCommand(\n request=mock_req,\n created_at=now\n )\n mock_impl.execute.side_effect = error\n\n cmd = await engine.execute_command(mock_req, command_id=\"unique-id\")\n\n assert cmd == FailedCommand(\n created_at=cast(datetime, CloseToNow()),\n started_at=cast(datetime, CloseToNow()),\n failed_at=cast(datetime, CloseToNow()),\n request=mock_req,\n error=error,\n )\n\n mock_state_store.handle_command.assert_called_with(\n cmd,\n command_id=\"unique-id\",\n )", "title": "" }, { "docid": "580df12cae900b313fa1f6a332b51b87", "score": "0.43900654", "text": "def execCommand(self, proto, cmd):\n raise Exception(\"not executing commands\")", "title": "" }, { "docid": "46d6201f97132c3ee08eed73d2d563cc", "score": "0.43896404", "text": "def forward_com(self, com):\n\n flag = {'from': 'Portable_A.V.A.', 'status': True, 'for': 'remote_com', 'reason': '', 'options': com}\n self.mqtt_receimitter.publish('Augmented/A.V.A.', flag)\n # time.sleep(1) # some check for waiting the result code will be here.", "title": "" }, { "docid": "7c753be181ba92ad88ad9f99aae0099e", "score": "0.43889514", "text": "def action_to_gym(self, action):\n return action['command']", "title": "" }, { "docid": "2adb28f58a4ed015cb80a91781ce0ce4", "score": "0.4383849", "text": "async def on_message(self, data: Mapping[str, Any]) -> None:\n if data[\"type\"] in [\"error\", \"success\"]:\n # This is a command response or error\n future = self.pending_commands.get(data[\"id\"])\n if future is None:\n raise ValueError(f\"No pending command with id {data['id']}\")\n if data[\"type\"] == \"success\":\n assert isinstance(data[\"result\"], dict)\n future.set_result(data[\"result\"])\n else:\n assert isinstance(data[\"error\"], str)\n assert isinstance(data[\"message\"], str)\n exception = from_error_details(data[\"error\"],\n data[\"message\"],\n data.get(\"stacktrace\"))\n future.set_exception(exception)\n elif data[\"type\"] == \"event\":\n # This is an event\n assert isinstance(data[\"method\"], str)\n assert isinstance(data[\"params\"], dict)\n\n listeners = self.event_listeners.get(data[\"method\"], [])\n if not listeners:\n listeners = self.event_listeners.get(None, [])\n for listener in listeners:\n await listener(data[\"method\"], data[\"params\"])\n else:\n raise ValueError(f\"Unexpected message: {data!r}\")", "title": "" }, { "docid": "f0de78500d06469fd34dd8547dab8e30", "score": "0.43800783", "text": "def mapCommand(self, cmdName):\n if cmdName in self._cmdMap:\n return self._cmdMap[cmdName]\n return cmdName", "title": "" }, { "docid": "c36e99acb619997ac6332607bd06572a", "score": "0.43769538", "text": "def msg_to_status(msg: str) -> Status:\n return {\n \"send_hash\": Status.JOINING_QUEUE,\n \"queue_full\": Status.QUEUE_FULL,\n \"estimation\": Status.IN_QUEUE,\n \"send_data\": Status.SENDING_DATA,\n \"process_starts\": Status.PROCESSING,\n \"process_generating\": Status.ITERATING,\n \"process_completed\": Status.FINISHED,\n \"progress\": Status.PROGRESS,\n }[msg]", "title": "" }, { "docid": "1bd86b63a52c2f437f592b8dc99fc86e", "score": "0.43643567", "text": "def truth_processor(self):\n while True:\n msg = yield self.comm_in.get()\n if msg.origin_id in self.current_message.keys():\n self.prev_message[msg.origin_id] = self.current_message[msg.origin_id]\n else:\n self.prev_message[msg.origin_id] = None\n\n self.current_message[msg.origin_id] = msg\n self.position_update.succeed(value=msg.origin_id)\n self.position_update = self.env.event()", "title": "" }, { "docid": "cbcae193c0b0a17bd66682f0128d3067", "score": "0.43620965", "text": "def message_to_event(direction, message):\n server = message.server\n\n # change numerics into nice names\n if message.verb in numerics:\n message.verb = numerics[message.verb]\n verb = message.verb.lower()\n\n # modify public/private verbs\n if verb == 'privmsg':\n if server.is_channel(message.params[0]):\n verb = 'pubmsg'\n if verb == 'notice':\n verb = 'privnotice'\n if server.is_channel(message.params[0]):\n verb = 'pubnotice'\n elif verb == 'mode':\n verb = 'umode'\n if server.is_channel(message.params[0]):\n verb = 'cmode'\n\n # this is the same as ircreactor does\n info = message.__dict__\n info['direction'] = direction\n info['verb'] = verb\n\n if 'time' in info['tags']:\n info['server_time'] = time.strptime(info['tags']['time'],\n '%Y-%m-%dT%H:%M:%S.%fZ')\n\n infos = [[verb, info], ]\n\n # handle shitty ctcp\n if verb in ('privmsg', 'pubmsg', 'privnotice', 'pubnotice'):\n infos = ctcp_unpack_message(info)\n\n # work on each info object separately\n i = -1\n while i < (len(infos) - 1):\n i += 1\n name = infos[i][NAME_ATTR]\n\n # standard message attributes\n for attr, param_map in _verb_param_map.items():\n # escaping\n escaped = False\n if attr.startswith('escaped_'):\n attr = attr.lstrip('escaped_')\n escaped = True\n\n for param_number, verbs in param_map.items():\n if len(infos[i][INFO_ATTR]['params']) > param_number and name in verbs:\n value = infos[i][INFO_ATTR]['params'][param_number]\n if escaped:\n value = escape(value)\n infos[i][INFO_ATTR][attr] = value\n\n # custom processing\n if name == 'welcome':\n # for servers where a low nicklen makes them silently truncate our nick\n server.nick = server.istring(infos[i][INFO_ATTR]['nick'])\n\n # custom message attributes\n if name == 'ctcp':\n if infos[i][INFO_ATTR]['ctcp_verb'] == 'action':\n info = dict(infos[i][INFO_ATTR])\n info['message'] = info['ctcp_text']\n if server.is_channel(info['target']):\n name = 'pubaction'\n info['channel'] = info['target']\n else:\n name = 'privaction'\n infos.append([name, info])\n\n if name == 'umode' and len(infos[i][INFO_ATTR]['params']) > 1:\n modestring = infos[i][INFO_ATTR]['params'][1:]\n modes = parse_modes(modestring)\n\n infos[i][INFO_ATTR]['modestring'] = ' '.join(modestring).strip()\n infos[i][INFO_ATTR]['modes'] = modes\n\n if name == 'cmode' and len(infos[i][INFO_ATTR]['params']) > 1:\n modestring = infos[i][INFO_ATTR]['params'][1:]\n chanmodes = server.features.get('chanmodes')\n prefixes = list(server.features.get('prefix').keys())\n modes = parse_modes(modestring, chanmodes, prefixes)\n\n infos[i][INFO_ATTR]['modestring'] = ' '.join(modestring).strip()\n infos[i][INFO_ATTR]['modes'] = modes\n\n if name == 'cmodeis':\n if len(infos[i][INFO_ATTR]['params']) > 2:\n modestring = infos[i][INFO_ATTR]['params'][2:]\n chanmodes = server.features.get('chanmodes')\n modes = parse_modes(modestring, chanmodes)\n\n infos[i][INFO_ATTR]['modestring'] = ' '.join(modestring).strip()\n infos[i][INFO_ATTR]['modes'] = modes\n else:\n infos[i][INFO_ATTR]['modestring'] = ''\n infos[i][INFO_ATTR]['modes'] = []\n\n if name == 'namreply':\n channel_name = infos[i][INFO_ATTR]['params'][2]\n server.info.create_channel(channel_name)\n channel = server.info.channels.get(channel_name)\n\n nice_names = []\n channel_prefixes = {}\n if len(infos[i][INFO_ATTR]['params']) > 3:\n raw_names = infos[i][INFO_ATTR]['params'][3].split(' ')\n else:\n raw_names = []\n\n for name in raw_names:\n # InspIRCd sends us an empty last param because they are cool\n if not len(name):\n continue\n\n prefixes = ''\n while name[0] in server.features.available['prefix'].values():\n prefixes += name[0]\n name = name[1:]\n\n nick = NickMask(name).nick\n\n server.info.create_user(nick)\n nice_names.append(name)\n server.info.create_user(name)\n user = server.info.users.get(nick)\n channel_prefixes[user] = prefixes\n channel.add_user(nick, prefixes=prefixes)\n\n infos[i][INFO_ATTR]['users'] = ','.join(nice_names)\n infos[i][INFO_ATTR]['prefixes'] = channel_prefixes\n\n # source / target mapping\n for attr in ('source', 'target', 'channel'):\n if attr in infos[i][INFO_ATTR] and infos[i][INFO_ATTR][attr]:\n source = infos[i][INFO_ATTR][attr]\n if server.is_channel(source):\n server.info.create_channel(source)\n infos[i][INFO_ATTR][attr] = server.info.channels.get(source)\n elif '.' in source and server.is_server(source):\n server.info.create_server(source)\n infos[i][INFO_ATTR][attr] = server.info.servers.get(source)\n elif server.is_nick(source):\n server.info.create_user(source)\n infos[i][INFO_ATTR][attr] = server.info.users.get(NickMask(source).nick)\n else: # we assume this is a user with messed up characters\n server.info.create_user(source)\n infos[i][INFO_ATTR][attr] = server.info.users.get(NickMask(source).nick)\n\n if 'channels' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['channels']:\n channels = []\n for chan in infos[i][INFO_ATTR]['channels'].split(','):\n server.info.create_channel(chan)\n channels.append(server.info.channels.get(chan))\n infos[i][INFO_ATTR]['channels'] = channels\n\n if 'users' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['users']:\n users = []\n for user in infos[i][INFO_ATTR]['users'].split(','):\n server.info.create_user(user)\n users.append(server.info.users.get(NickMask(user).nick))\n infos[i][INFO_ATTR]['users'] = users\n\n # custom from_to attribute for ease in bots\n verb = infos[i][INFO_ATTR]['verb']\n dir = infos[i][INFO_ATTR]['direction']\n source = infos[i][INFO_ATTR].get('source')\n target = infos[i][INFO_ATTR].get('target')\n\n if verb in ['pubmsg', 'pubnotice', 'pubaction']:\n infos[i][INFO_ATTR]['from_to'] = target\n elif verb in ['privmsg', 'privnotice', 'privaction']:\n if dir == 'out':\n infos[i][INFO_ATTR]['from_to'] = target\n elif dir == 'in':\n if 'echo-message' in server.capabilities.enabled:\n infos[i][INFO_ATTR]['from_to'] = target\n else:\n infos[i][INFO_ATTR]['from_to'] = source\n if 'from_to' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['from_to'] is None:\n del infos[i][INFO_ATTR]['from_to']\n\n # convenience function so unnecessary messages can get ignored easily\n infos[i][INFO_ATTR]['will_be_echod'] = False\n if verb in ['pubmsg', 'pubnotice', 'privmsg', 'privnotice']:\n if dir == 'out' and 'echo-message' in server.capabilities.enabled:\n infos[i][INFO_ATTR]['will_be_echod'] = True\n\n if 'from_to' in infos[i][INFO_ATTR] and infos[i][INFO_ATTR]['from_to'].is_server:\n del infos[i][INFO_ATTR]['from_to']\n\n return infos", "title": "" }, { "docid": "4167812ca72a4648538728ba9ee525fb", "score": "0.43506196", "text": "def to_mido_note_on_note_off(\n note: Note, channel: int, use_note_on_as_note_off: bool = True\n) -> Tuple[Message, Message]:\n velocity = note.velocity if note.velocity is not None else DEFAULT_VELOCITY\n note_on_msg = Message(\n \"note_on\",\n time=note.time,\n note=note.pitch,\n velocity=velocity,\n channel=channel,\n )\n if use_note_on_as_note_off:\n note_off_msg = Message(\n \"note_on\",\n time=note.end,\n note=note.pitch,\n velocity=0,\n channel=channel,\n )\n else:\n note_off_msg = Message(\n \"note_off\",\n time=note.end,\n note=note.pitch,\n velocity=velocity,\n channel=channel,\n )\n\n return note_on_msg, note_off_msg", "title": "" }, { "docid": "349b554c5c239bc93486fa68dc37f4da", "score": "0.43498144", "text": "def ps_eo_cmd(broker):\n content = broker[LocalSpecs.ps_eo_args].content\n data = []\n data.append('PID COMMAND')\n start = False\n for l in content:\n if 'PID' in l and 'COMMAND' in l:\n start = True\n continue\n if not start:\n continue\n pid, args = l.strip().split(None, 1)\n if ' ' in args:\n cmd, _ = args.split(None, 1)\n else:\n cmd = args\n data.append('{0} {1}'.format(pid, cmd))\n\n if len(data) > 1:\n return DatasourceProvider('\\n'.join(data), relative_path='insights_commands/ps_eo_cmd')\n\n raise SkipComponent()", "title": "" }, { "docid": "d64de615b454c172459d625186369bf0", "score": "0.43485543", "text": "def get_command(self, body: dict) -> dict:\n return TwilioCheckVerifyCommand.parse_obj(body).to_dict()", "title": "" }, { "docid": "8c403aca5876a1ea728f1bcd4d085a19", "score": "0.43472105", "text": "def add_cmd(cls, session, command):\n cmd = cls(\n start_time=command[\"Start\"],\n end_time=command[\"End\"],\n success=command[\"Success\"],\n target_id=command[\"Target\"],\n plugin_key=command[\"PluginKey\"],\n modified_command=command[\"ModifiedCommand\"].strip(),\n original_command=command[\"OriginalCommand\"].strip(),\n )\n session.add(cmd)\n session.commit()", "title": "" }, { "docid": "22e88a089a7cdd8e00a59a3ec9b3ae04", "score": "0.4342002", "text": "def get_command(self, pos):\n content = self._get(Streams.COMMAND, pos)\n next_pos = content.get('metadata').get('pos')\n reply = content.get('cmd')\n return next_pos, reply", "title": "" }, { "docid": "6092b12e26caa4a2b2611c80d1088e40", "score": "0.43310893", "text": "def update_protocol(module, sdk, cloud, protocol):\n\n mapping_id = module.params.get('mapping_id')\n\n attributes = {}\n\n if (mapping_id is not None) and (mapping_id != protocol.mapping_id):\n attributes['mapping_id'] = mapping_id\n\n if not attributes:\n return False, protocol\n\n if module.check_mode:\n return True, None\n\n try:\n new_protocol = cloud.identity.update_federation_protocol(None, protocol, **attributes)\n except sdk.exceptions.OpenStackCloudException as ex:\n module.fail_json(msg='Failed to update protocol: {0}'.format(str(ex)))\n return (True, new_protocol)", "title": "" }, { "docid": "6345546bb82ede402e51aea1be7d0bc2", "score": "0.43250558", "text": "async def process(self, msg):\n logger.debug(\"msg:\", msg, caller=self)\n\n t = msg.get(\"type\")\n if t == \"l2_updates\":\n symbol = msg[\"symbol\"]\n datas = msg[\"changes\"]\n await self.process_orderbook_update(symbol, datas)", "title": "" }, { "docid": "ecaf3a9585edb061166e6291f8564417", "score": "0.431756", "text": "def handle_command(command, event):\n # Finds and executes the given command, filling in response\n for cmd, callback in command_mapping.items():\n if command.lower().startswith(cmd):\n # command cleanup\n command = command.replace(cmd, \"\").strip()\n if command.endswith(\"?\"):\n command = command.replace(\"?\", \"\")\n return callback(command, event)\n\n default_command(command, event)", "title": "" }, { "docid": "92beb2fa8a46342f0644f20b62323fa6", "score": "0.4317419", "text": "def new_mapper(agent):\n recp = recipient.Broadcast(MappingUpdatesPoster.protocol_id, 'lobby')\n return agent.initiate_protocol(MappingUpdatesPoster, recp)", "title": "" }, { "docid": "41699504c374c27b8cc82559245c50c5", "score": "0.4309621", "text": "def _interact(self, command, *params):\n\n # Send client command to IMAP4 server.\n command = command.upper()\n if command not in COMMANDS:\n raise InvalidCommandError('Command ' + command + ' dees not exists')\n if self.state not in COMMANDS[command][0]:\n raise InvalidCommandError('Command ' + command + ' is not available in ' + self.state + ' state')\n # Generate a different tag for each command. [2.2.1]\n # The tag is generated to be a random 6-bit hexadecimal value.\n tag = hex(random.randint(1048576, 16777215))[2:]\n params = ' ' + ' '.join(params) if len(params) > 0 else ''\n msg = tag + ' ' + command + params + CRLF\n self.sock.send(msg)\n printd('\\n' + msg)\n\n # Receive server response.\n tagged_response = ''\n untagged_response = []\n while 1:\n curr_tag, info = self._recv_line()\n # Decide action by type.\n if curr_tag == '*':\n # Add quoted string if literal.\n match = re.match(Literal, info)\n if match:\n size = match.group('size')\n # Read the literal and the tail.\n quoted = self.buffer.read(int(size)) + self.buffer.readline()\n printd(quoted)\n info += CRLF + quoted[:-2]\n untagged_response.append(info)\n elif curr_tag == '+':\n # [7.5]\n self._recv_line()\n elif curr_tag == tag:\n tagged_response = info\n break\n else:\n raise InvalidCommandError('Receive invalid tagged response')\n\n # Analysis and interact with server response.\n # Check response type.\n type, tagged_data = tagged_response.split(' ', 1)\n if type == 'BAD':\n raise InvalidCommandError(tagged_data)\n # Update current states.\n new_state = {\n 'OK': COMMANDS[command][1][0],\n 'NO': COMMANDS[command][1][1]\n }.get(type, None)\n if new_state != None:\n self.state = COMMANDS[command][1][0]\n printd('\\n[current state swith to ' + self.state + ']\\n')\n\n # Return response for further processing in higher level functions.\n return type, tagged_data, untagged_response", "title": "" }, { "docid": "ae7ff3bb09c550cb9ace8bd4930fdfa5", "score": "0.4305486", "text": "def get_pong(self, msg):\n (msgid, status) = struct.unpack(_pong_struc, msg[:_pong_struc_size])\n data = msg[_pong_struc_size:_pong_struc_size+3*MAP_SIZE]\n\n return (msgid, status, data)", "title": "" }, { "docid": "8567342d7c9eeceea670621f56759e67", "score": "0.43053705", "text": "def COM(cmd,data): #Status: WIP\n #Desc CMD Target Address\n if cmd == 1: #get update 0x01 \n rpc(addr,getLocals,addr, lTime, lSped, lLoca, lStat)\n elif cmd == 2: #stop 0x02 addr\n rpc(addr, stopRPC)\n elif cmd == 3: #slow 0x03 addr \n rpc(addr, slowRPC)\n elif cmd == 4: #speed 0x04 addr\n pass\n elif cmd == 5: #merge 0x05\n merge()\n elif cmd == 10: #help 0x10 multicasted\n emergency()\n elif cmd ==255: #local 0xFF\n pass", "title": "" }, { "docid": "8c5f2111200df645ccab97f5fa9cb5af", "score": "0.43003353", "text": "def accept_command(self, cmd):\n processed = [\n cmd.get('time', (datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\"))),\n self.data['node_id'],\n self.device_type,\n cmd['cmd_id'],\n cmd['args'],\n cmd.get('source', 'internal')\n ]\n if self.device_type == 'PBR' and processed[3] == 19:\n processed[4] = str([0, self.data['setup']['pump_id']])\n\n self.q.put(processed) # put it to queue\n self.q_new_item.set() # notify checker that a new object has been added to queue", "title": "" }, { "docid": "ada9b9b4f8e80d8eef91041bb3db68d3", "score": "0.42989993", "text": "async def test_execute_command_adds_unexpected_error_to_state(\n engine: ProtocolEngine,\n mock_handlers: AsyncMock,\n mock_state_store: MagicMock,\n now: datetime,\n) -> None:\n error = RuntimeError(\"oh no!\")\n mock_req = MagicMock(spec=MoveToWellRequest)\n mock_impl = AsyncMock(spec=MoveToWellImplementation)\n\n mock_req.get_implementation.return_value = mock_impl\n mock_impl.create_command.return_value = PendingCommand(\n request=mock_req,\n created_at=now\n )\n mock_impl.execute.side_effect = error\n\n cmd = await engine.execute_command(mock_req, command_id=\"unique-id\")\n\n assert type(cmd.error) == errors.UnexpectedProtocolError # type: ignore[union-attr]\n assert cmd.error.original_error == error # type: ignore[union-attr]\n\n mock_state_store.handle_command.assert_called_with(\n cmd,\n command_id=\"unique-id\",\n )", "title": "" }, { "docid": "baa1ba2fa6461182432c938378e7624c", "score": "0.42924383", "text": "def _create_command_interface(self, peer):\n\n cmd_itf = od.POINTER_T(od.struct_arsdk_cmd_itf)()\n\n res = od.arsdk_peer_create_cmd_itf(\n peer, self._cmd_itf_cbs, ctypes.pointer(cmd_itf)\n )\n\n if res != 0:\n self.logger.error(f\"Error while creating command interface: {res}\")\n cmd_itf = None\n else:\n self.logger.info(\"Command interface has been created\")\n\n return cmd_itf", "title": "" }, { "docid": "df952630e0722f9226326c41ef8f35df", "score": "0.42891017", "text": "def create_event_from_class(self, klass):\n target = getattr(klass, 'target', None)\n \n if hasattr(klass, 'custom_id'):\n if (target is None) or (target in COMMAND_TARGETS_COMPONENT_COMMAND):\n command = ComponentCommand.from_class(klass)\n \n elif (target in COMMAND_TARGETS_FORM_COMPONENT_COMMAND):\n command = FormSubmitCommand.from_class(klass)\n \n else:\n raise ValueError(\n f'Unknown command target: {target!r}; If `custom_id` parameter is given, `target` '\n f'can be any of: `{COMMAND_TARGETS_COMPONENT_COMMAND | COMMAND_TARGETS_FORM_COMPONENT_COMMAND}`.'\n )\n \n else:\n target = validate_application_target_type(target)\n if target in APPLICATION_COMMAND_CONTEXT_TARGET_TYPES:\n command = ContextCommand.from_class(klass)\n \n else:\n command = SlashCommand.from_class(klass)\n \n \n if isinstance(command, Router):\n command = command[0]\n \n if isinstance(command, SlashCommand):\n self._add_application_command(command)\n else:\n self._add_component_command(command)\n \n return command", "title": "" }, { "docid": "a8a10d3b25f458724236653f3961c6f9", "score": "0.42776462", "text": "async def on_channel_update(self, before, after):", "title": "" }, { "docid": "155abf10ba0375494d254473dfb6aa90", "score": "0.4277569", "text": "def map_code_to_message(status_code):\n # RFC https://datatracker.ietf.org/doc/html/rfc1945#section-6.1.1\n # specify 3 digits letters\n if len(str(status_code)) != 3:\n # Execute the error\n return _err()\n # First digit of integer code\n family = str(status_code)[0]\n # Get the function from switcher dictionary\n func = switcher.get(family)\n # Execute the function\n return func()", "title": "" }, { "docid": "3a2845e8d3fd0b7c02a24f9e22e46fce", "score": "0.42698956", "text": "async def set_device_state(self, new_state, old_state):\n\n unit_data = {\n \"unitid\": new_state.get(\"id\", 0),\n \"v\": 3,\n \"commands\": \"\",\n }\n\n commands = []\n if new_state[\"power\"] != old_state[\"power\"]:\n if new_state.get(\"power\", 0) == 1:\n commands.append(\"PW1\")\n else:\n commands.append(\"PW0\")\n\n if new_state[\"settemp\"] != old_state[\"settemp\"]:\n commands.append((\"TS%.1f\" % (float(new_state[\"settemp\"]))))\n\n if new_state[\"setmode\"] != old_state[\"setmode\"]:\n commands.append(\"MD%d\" % new_state[\"setmode\"])\n\n if new_state[\"setfan\"] != old_state[\"setfan\"]:\n commands.append(\"FS%d\" % new_state[\"setfan\"])\n\n if new_state[\"airdir\"] != old_state[\"airdir\"]:\n commands.append(\"AV%d\" % new_state[\"airdir\"])\n\n unit_data[\"commands\"] = \",\".join(commands)\n\n async with self._session.post(\n f\"{BASE_URL}/unitcommand.aspx\",\n headers=_headers(self._token),\n json=unit_data,\n raise_for_status=True,\n timeout=1,\n ) as resp:\n return await resp.json()", "title": "" }, { "docid": "d74e46a318965a36a05a77fdcb28bba7", "score": "0.42576307", "text": "def _wrap_command(self, rpc, request):\n self._msgseq += 1\n kwargs = {\n \"type\": rpc,\n \"id\": self._msgseq,\n \"client\": self.name,\n }\n\n if rpc == ACCOUNT:\n kwargs[\"account\"] = request\n elif rpc == TRANSFER:\n kwargs[\"transfer\"] = request\n\n return Command(**kwargs)", "title": "" }, { "docid": "4788cf4b9822a093591aa84983dd87d8", "score": "0.4257139", "text": "def toggle_cmd_callback(self, data):\n if data.arm and not self.armed:\n self.arm()\n if data.disarm and self.armed and not self.just_armed:\n self.disarm()\n if data.rtl:\n self.RTL()\n if data.land:\n self.land()\n if data.fiducial:\n self.fiducial()\n if data.planner:\n planner = Map_Planner(self)\n if data.takeoff:\n pass\n if data.mode:\n self.mode = data.mode\n self.srv_mode(0, modes[self.mode])\n print 'switched to %s' %data.mode", "title": "" }, { "docid": "74deec6352d404e9228697980dd2ff88", "score": "0.42559722", "text": "async def on_message_update(before: models.Message, after: models.Message):\n embed = models.Embed.create(\n title=\"Edited Message!\",\n description=f\"**Before**: {before.content}\\n**After**: {after.content}\",\n color=0xFFAAAA\n )\n await models.Message.create(client,\n embeds=[embed]\n ).send(LOGS_CHANNEL)", "title": "" }, { "docid": "6021dcea31bf8e771b02f6f7ffa40b81", "score": "0.42512175", "text": "def incoming(self, message, comm):\n pass", "title": "" }, { "docid": "7dac9139a57ab189bc4deac50d39f302", "score": "0.42483357", "text": "async def on_message_edit(old_msg, new_msg):\n if old_msg.author.bot and new_msg.author.bot or old_msg.content == new_msg.content:\n return\n else:\n # Run the command\n await bot.process_commands(new_msg)", "title": "" }, { "docid": "d5d1abd70c8b5499be9a2697720369b0", "score": "0.4239209", "text": "def exec_update(msg):\r\n if msg.args and len(msg.args) > 1:\r\n command_token = msg.args[0].lower()\r\n command_output = ' '.join(msg.args[1:])\r\n if command_token in value_set.custom_commands:\r\n value_set.custom_commands.pop(command_token)\r\n value_set.custom_commands[command_token] = command_output\r\n save_dict(value_set.custom_commands, 'custom_commands')\r\n return f\"{command_token} updated!\"\r\n return f\"{command_token} wasn't found in custom commands!\"", "title": "" }, { "docid": "03d5019b875f484a1e438074e7a488b8", "score": "0.42389062", "text": "def cmd_translate(self, ident, _from, to, msg):\n global TRANSLATOR_INIT\n cinfo = self.init_cmd(ident, _from, to, msg)\n\n if cinfo[2] < cinfo[0]:\n self.privmsg(self.risc.channel, COLOR[\"boldred\"]+_from+COLOR[\"rewind\"]+\\\n \": Access denied. Check \"+self.risc.cmd_prefix+\"help \"+self.get_cmd(msg)+'.')\n return None\n\n argv = self.clean_list(msg.split(' '))\n argc = len(argv)\n\n if argc < 4:\n self.privmsg(cinfo[1], \"Check \"+self.risc.cmd_prefix+\"help translate.\")\n return None\n\n if TRANSLATOR_INIT == 0:\n if not self._cmd_translate_init():\n TRANSLATOR_INIT = -1\n self.privmsg(cinfo[1], \"Wrong API credentials.\")\n return None\n TRANSLATOR_INIT = 1\n elif TRANSLATOR_INIT == -1:\n self.privmsg(cinfo[1], \"Wrong API credentials.\")\n return None\n\n self.privmsg(cinfo[1], translator.translate(' '.join(msg.split(' ')[3:]), argv[1], argv[2]).encode(\"utf-8\", errors=\"ignore\"))\n return None", "title": "" }, { "docid": "1b32a5afaea38e74d1699b44d4299f0d", "score": "0.42303264", "text": "def _fixup_cmd(cmd, slave_host_name):\n new_cmd = []\n for elem in cmd:\n if isinstance(elem, ResolvableCommandElement):\n resolved_elem = elem.resolve(slave_host_name)\n new_cmd.append(resolved_elem)\n else:\n new_cmd.append(elem)\n return new_cmd", "title": "" }, { "docid": "8c9bcf004d4bdec5b506f69b3a67de99", "score": "0.42279768", "text": "def On(self):\n handler = self.get_command_object(\"On\")\n return_code, unique_id = handler()\n return [[return_code], [str(unique_id)]]", "title": "" }, { "docid": "bd7c433d9c8950e40fcb9e7aedee288a", "score": "0.42277738", "text": "def step_SM(self, action, vnext):\n\n if (self.state == 'command_receive'):\n if action == 0: #\n self.cmd = 'switch_lane_left'\n self.change_needed = 1\n self.oldlane = self.laneindex\n self.state = 'command_execute'\n self.switch_lane_left()\n\n elif action == 1: #\n self.cmd = 'switch_lane_right'\n self.change_needed = 1\n self.oldlane = self.laneindex\n self.state = 'command_execute'\n self.switch_lane_right()\n\n else:\n self.in_lane(vnext)\n\n elif (self.state == 'command_execute'):\n if (self.cmd == 'switch_lane_right'):\n self.switch_lane_right()\n elif (self.cmd == 'switch_lane_left'):\n self.switch_lane_left()", "title": "" }, { "docid": "485f4f12824dc6d3151b9c057db121f3", "score": "0.42258647", "text": "def on_client_command(self, message):", "title": "" }, { "docid": "18e36a6fb46b04d08027e0e803b1888b", "score": "0.42225334", "text": "def transform_external_proxy_msg(message):\n if message['type'] == 'reference_price_change':\n message['type'] = 'external_reference_price'\n return message\n if not external_market_state:\n raise Exception('external_market_state is not set.')\n if message['type'] == 'bbo':\n message['e_best_bid'] = message['best_bid']\n message['e_best_offer'] = message['best_offer']\n external_market_state['e_best_bid'] = message['best_bid']\n external_market_state['e_best_offer'] = message['best_offer']\n message['e_signed_volume'] = external_market_state['e_signed_volume']\n if message['type'] == 'post_batch':\n message['e_best_bid'] = message['best_bid']\n message['e_best_offer'] = message['best_offer']\n external_market_state['e_best_bid'] = message['best_bid']\n external_market_state['e_best_offer'] = message['best_offer']\n message['e_signed_volume'] = external_market_state['e_signed_volume']\n if message['type'] == 'signed_volume':\n message['e_signed_volume'] = message['signed_volume']\n message['e_best_bid'] = external_market_state['e_best_bid']\n message['e_best_offer'] = external_market_state['e_best_offer']\n external_market_state['e_signed_volume'] = message['signed_volume']\n message['type'] = 'external_feed_change'\n return message", "title": "" }, { "docid": "2ac6d2c58b74c0491f6926c49204200a", "score": "0.42202327", "text": "def _state_replaced(self, w, have):\n commands = []\n name = w[\"name\"]\n obj_in_have = search_obj_in_list(name, have, \"name\")\n if obj_in_have:\n # If 'w' does not specify mode then intf may need to change to its\n # default mode, however default mode may depend on sysdef.\n if not w.get(\"mode\") and re.search(\"Ethernet|port-channel\", name):\n sysdefs = self.intf_defs[\"sysdefs\"]\n sysdef_mode = sysdefs[\"mode\"]\n if obj_in_have.get(\"mode\") != sysdef_mode:\n w[\"mode\"] = sysdef_mode\n diff = dict_diff(w, obj_in_have)\n else:\n diff = w\n\n merged_commands = self.set_commands(w, have)\n # merged_commands:\n # - These commands are changes specified by the playbook.\n # - merged_commands apply to both existing and new objects\n # replaced_commands:\n # - These are the unspecified commands, used to reset any params\n # that are not already set to default states\n # - replaced_commands should only be used on 'have' objects\n # (interfaces that already exist)\n if obj_in_have:\n if \"name\" not in diff:\n diff[\"name\"] = name\n wkeys = w.keys()\n dkeys = diff.keys()\n for k in wkeys:\n if k in self.exclude_params and k in dkeys:\n del diff[k]\n replaced_commands = self.del_attribs(diff)\n cmds = set(replaced_commands).intersection(set(merged_commands))\n for cmd in cmds:\n merged_commands.remove(cmd)\n commands.extend(replaced_commands)\n\n commands.extend(merged_commands)\n return commands", "title": "" }, { "docid": "dcf0938a7505582977c0e5f348f46142", "score": "0.42189157", "text": "def send_capi_command(toaddr, capi_elem):\n global iDNB, iINV\n capi_run = ','.join(capi_elem)\n capi_cmd = capi_run + ' \\r\\n'\n asock = conntable.get(toaddr)\n asock.send(capi_cmd)\n displayaddr = toaddr\n if toaddr in DisplayNameTable:\n displayaddr = DisplayNameTable[toaddr]\n logging.info(\"%s (%-15s) ---> %s\" % (displayaddr, toaddr, capi_cmd.rstrip('\\r\\n')))\n status = asock.recv(2048)\n logging.debug(\"%s (%s) <--- %s\" % (displayaddr, toaddr, status.rstrip('\\r\\n')))\n\n # Status,Running\n # Quick fix for case where AzWTG sends response RUNNING and COMPLETED in one read\n if len(status) > 25:\n status = status.split('\\n')\n status = status[1]\n else:\n if iDNB == 0:\n status = asock.recv(2048)\n else:\n iDNB = 0\n\n if displayaddr == cSLog.name:\n cSLog.log(\"%s ---> %s\" % (displayaddr, capi_cmd.rstrip('\\r\\n')))\n cSLog.log(\"%s <--- %s\\n\" % (displayaddr, status.rstrip('\\r\\n')))\n\n if re.search(\"FAIL\", status) and re.search(\"SNIFFER\", displayaddr) and iINV == 0:\n logging.info(\"%s <--- %s\\n\" % (displayaddr, status.rstrip('\\r\\n')))\n wfa_sys_exit(\"Command returned FAIL\")\n return status", "title": "" }, { "docid": "48bd3fc184b5d547c5d2e7f114af8466", "score": "0.42143497", "text": "def test_commands(self):\n self.assert_initialize_driver()\n\n ####\n # First test in command mode\n ####\n self.assert_driver_command(ProtocolEvent.CLOCK_SYNC)\n self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)\n self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)\n self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)\n self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')\n\n self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')\n\n ####\n # Test in streaming mode\n ####\n # Put us in streaming\n self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.AUTOSAMPLE, delay=1)\n\n self.assert_driver_command(ProtocolEvent.SCHEDULED_CLOCK_SYNC)\n self.assert_driver_command(ProtocolEvent.ACQUIRE_STATUS, regex=r'serial sync mode')\n\n self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=1)\n\n ####\n # Test a bad command\n ####\n self.assert_driver_command_exception('ima_bad_command', exception_class=InstrumentCommandException)", "title": "" }, { "docid": "e2a9ef96d321844ba6857a48d89e5e04", "score": "0.4214063", "text": "def map(cls, cmdName):\n if cls.instance:\n return cls.instance.mapCommand(cmdName)\n return cmdName", "title": "" }, { "docid": "7e5006f820cddf0d5250e6000540f85a", "score": "0.42132476", "text": "def generate_hooked_command(cmd_name, cmd_cls, hooks):\n\n def run(self, orig_run=cmd_cls.run):\n self.run_command_hooks('pre_hooks')\n orig_run(self)\n self.run_command_hooks('post_hooks')\n\n return type(cmd_name, (cmd_cls, object),\n {'run': run, 'run_command_hooks': run_command_hooks,\n 'pre_hooks': hooks.get('pre', []),\n 'post_hooks': hooks.get('post', [])})", "title": "" }, { "docid": "4be9341014a93b115e7a02dab158c293", "score": "0.41959998", "text": "def at_post_cmd(self):\n char = self.character\n account = self.account\n here = char.location if char else None\n who = account.key if account else (char if char else '-visitor-')\n cmd = self.cmdstring if self.cmdstring != '__nomatch_command' else ''\n if here:\n if char.db.settings and 'broadcast commands' in char.db.settings and \\\n char.db.settings['broadcast commands'] is True:\n for each in here.contents:\n if each.has_account:\n if each == self or each.db.settings and 'see commands' in each.db.settings and\\\n each.db.settings['see commands'] is True:\n each.msg('|r(|w%s|r)|n %s%s|n' % (char.key, cmd, self.raw.replace('|', '||')))\n command_time = time.time() - self.command_time\n if account:\n account.db._command_time_total = (0 if account.db._command_time_total is None\n else account.db._command_time_total) + command_time\n if char:\n if char.traits.ct is None:\n char.traits.add('ct', 'Core Time', 'counter')\n if char.traits.cc is None:\n char.traits.add('cc', 'Core Count', 'counter')\n char.traits.ct.current += command_time\n char.traits.cc.current += 1\n print(u'{}> {}{} ({:.4f})'.format(who, cmd, self.raw, command_time))", "title": "" }, { "docid": "c6e3bf63802cc1dc71188522a86e098a", "score": "0.41929978", "text": "def _engine_status_to_status_bar(status: Optional[EngineStatus]) -> StatusBarState:\n if status is None:\n return StatusBarState.IDLE\n\n return {\n EngineStatus.IDLE: StatusBarState.IDLE,\n EngineStatus.RUNNING: StatusBarState.RUNNING,\n EngineStatus.PAUSED: StatusBarState.PAUSED,\n EngineStatus.BLOCKED_BY_OPEN_DOOR: StatusBarState.PAUSED,\n EngineStatus.STOP_REQUESTED: StatusBarState.UPDATING,\n EngineStatus.STOPPED: StatusBarState.IDLE,\n EngineStatus.FINISHING: StatusBarState.UPDATING,\n EngineStatus.FAILED: StatusBarState.HARDWARE_ERROR,\n EngineStatus.SUCCEEDED: StatusBarState.RUN_COMPLETED,\n }[status]", "title": "" }, { "docid": "8e6ec644c0df76be9c474092ebe01bd8", "score": "0.41873163", "text": "def command_type(self):\n if '@' in self.next_command:\n return 'A_COMMAND'\n if self.next_command[0] == '(':\n return 'L_COMMAND'\n return 'C_COMMAND'", "title": "" }, { "docid": "cb518dae87cac1325df466cc027ff689", "score": "0.41848627", "text": "def onecmd(self, line):\n if not self.commands_defining:\n return cmd.Cmd.onecmd(self, line)\n else:\n return self.handle_command_def(line)", "title": "" }, { "docid": "a114187fb8311ead87a8141fbc1ce261", "score": "0.41815203", "text": "def exchange_bytes(self, cmd):\n assert len(cmd) == 8, \"Must send only 8 bytes\"\n #feature report out, id = 0\n self._dev.ctrl_transfer(0x21, 0x09, 0x0300, 0, cmd)\n #feature report in, id = 1\n return self._dev.ctrl_transfer(0xa1, 0x01, 0x0301, 0, 8)", "title": "" }, { "docid": "ca6e43e8eb36309c9aedf72c97a90afe", "score": "0.41773966", "text": "def send_command(self, code):\n logger.info(\n \"Sending NMT command 0x%X to node %d\", code, self.id)\n self.network.send_message(0, [code, self.id])\n if code in COMMAND_TO_STATE:\n self._state = COMMAND_TO_STATE[code]\n logger.info(\"Changing NMT state to %s\", self.state)", "title": "" }, { "docid": "7f03f48f00911006f55d7cbe57d2e2fc", "score": "0.41754052", "text": "def comm(self, message):\n\n #check the process is running\n if self.process.poll() != None:\n self.execute()\n \n self.socket.send_string(message)\n return self.socket.recv().decode('utf-8')", "title": "" }, { "docid": "c5516eebb8ae6247c03fbd4f346b4a60", "score": "0.41620025", "text": "async def process_command(\n self, peer_identity: PeerIdentity, received_message: Message\n ):\n # Make it run in the background so another message can be processed.\n asyncio.ensure_future(self._process_command(peer_identity, received_message))", "title": "" }, { "docid": "6a84d967114ae9c4522ebed95890f601", "score": "0.4161549", "text": "def process(cmd, args):\n response = None\n line = cmd + ' '.join(args)\n try:\n response = session.query(line)\n # FIXME: Find out what exceptions will be passed and handle them properly\n except Exception as e:\n pass\n return (modes.remote, response)", "title": "" }, { "docid": "5b8428c029d0889685d0105b01518a8e", "score": "0.4150302", "text": "def _manual_change_received(self, msg):\n self._send_status_request()", "title": "" }, { "docid": "ce16eddeb5809b171d4374010c9f5a23", "score": "0.4147176", "text": "def update_port_postcommit(self, context):\n port = context.current\n old_port = context.original\n old_device = old_port['device_id']\n instance_id = port['device_id'] if 'device_id' in port else \"\"\n\n # Check if there's a new device_id\n if instance_id and not old_device:\n self._invoke_nexus_on_port_event(context, instance_id)", "title": "" }, { "docid": "e8a97a889a6def69b990490b7d9869cd", "score": "0.4146847", "text": "def __send_switching_command(self, device, order, list_of_commands):\n if list_of_commands:\n if list_of_commands[0]:\n command = self.build_command(device, (order, list_of_commands))\n self.vcw.write(device, command) # Write new switching", "title": "" }, { "docid": "b2cafe5dc1460d117509e7cacd3efb1f", "score": "0.41466486", "text": "async def on_message_edit(self, before: Message, after: Message):", "title": "" }, { "docid": "dad055bcc7d9f5c7369d771416dfc643", "score": "0.4145081", "text": "def after_b(session: Session, operation: Operation, model: SQLClass):\n print(\"***************before_name:\", operation.command, model, session)\n print(model.comment)\n if operation.command != 'd':\n model.comment_after = f\"processed_after_{operation.command}: {model.id}\"\n\n if operation.command == 'u':\n session.merge(model)\n print(model.comment)", "title": "" }, { "docid": "7c136715566c3a20d05414ec31c5e723", "score": "0.41395816", "text": "def readcmd(self):\n\n b = self.read(4)\n\n app = b[0]\n verb = b[1]\n n = b[2] + (b[3] << 8)\n\n if n > 0:\n data = self.read(n)\n else:\n data = b''\n\n if len(data) != n:\n raise ValueError('Facedancer expected ' + str(n) \\\n + ' bytes but received only ' + str(len(data)))\n\n cmd = FacedancerCommand(app, verb, data)\n\n if self.verbose > 1:\n print(\"Facedancer Rx command:\", cmd)\n\n return cmd", "title": "" } ]
a126d70679d5ec5f9ed98706600eff52
Calls the admin/getdata endpoint.
[ { "docid": "39058c1a3e6b1ac687799b0c85a84b93", "score": "0.0", "text": "def get_data(server, experiment_id, participant_id, key, table=None, fmt=\"JSON\", version=cur_version):\n if table:\n server = \"%s/red-api/%s/admin/get-data/%s/%s/%s\" %(server, version, experiment_id, participant_id, table)\n else:\n server = \"%s/red-api/%s/admin/get-data/%s/%s\" %(server, version, experiment_id, participant_id)\n json_object = {\"key\": key, \"format\": fmt}\n\n response = requests.get(server, json=json_object)\n\n data = response.json()\n\n if \"error\" in data:\n raise REDServerError(data[\"error\"])\n\n if fmt.upper() == \"JSON\":\n return json.dumps(data, indent=\" \")", "title": "" } ]
[ { "docid": "4f15b4d47d7933e98acb489bd88a4210", "score": "0.6539754", "text": "def get_data(request):", "title": "" }, { "docid": "d837cc205318cee308e989ecee49c2df", "score": "0.64337784", "text": "def get_data(self, request=None):", "title": "" }, { "docid": "3399d8a3276a3f8d2e0a6b2cfc3f4906", "score": "0.6087905", "text": "def get(self):\n try:\n dashboard_id = http_util.GetIntegerParam(self.request, fields.ID)\n row = dashboard_model.Dashboard.GetDashboard(dashboard_id)\n data = row.GetDashboardData()\n filename = http_util.GetStringParam(\n self.request, 'filename', required=False)\n\n self.RenderJson(data, filename=filename)\n except Exception as err:\n self.RenderJson(\n data={error_fields.MESSAGE: err.message}, status=400)", "title": "" }, { "docid": "1f462eecf4c10e49ac610f7a38903b62", "score": "0.60039324", "text": "def test_get_data(self):\n output_file = os.path.join(self.data_file, \"output.txt\")\n self.result = self.admin.getData(2, output_file)\n self.assertIsInstance(self.result, dict)", "title": "" }, { "docid": "e087744bdb30060f689d11ba0a9f1c18", "score": "0.59763694", "text": "def get_admin(self):\n return self._admin_data", "title": "" }, { "docid": "06f9c126f8692d8f8b3ef7dc7a1c7007", "score": "0.59089816", "text": "def get_data(*args):\n return Resources().get_path(\"aster\", \"data\", *args)", "title": "" }, { "docid": "182dbec4f11a65be319cdb40737af870", "score": "0.5894338", "text": "def get_data(self):\n return self._client.get_data()", "title": "" }, { "docid": "464a66bc85914d3ec7c16c960a450ba8", "score": "0.5870663", "text": "def do_get(self, args):\n if len(args) == 0:\n logger_data.logger_data_get()\n else:\n print(\"Incorrect usage of command \\'get\\'! Execute \\'help get\\' to see available options.\")", "title": "" }, { "docid": "eca751a1064fe5d7a9e2a28cd4560ca8", "score": "0.58399695", "text": "def get(self):\n args = parser.parse(datasets_args, request)\n data_task = get_data.apply_async(kwargs=args)\n api = Api\n return \"{}\", 202, {\"Location\": api.url_for(api(current_app), Task, task_id=data_task.id)}", "title": "" }, { "docid": "07e6b8b17c8e7f7237fb9cc7995ad279", "score": "0.58224016", "text": "def get_data(self):\n pass", "title": "" }, { "docid": "2979ec37f0589af2fb6aee609b1ea92b", "score": "0.58211553", "text": "def get(self):\n\t\treturn self.communicate({\n\t\t\t\"action\": \"get\"\n\t\t})", "title": "" }, { "docid": "5591324e2f33c34973881aed9c3fed4a", "score": "0.5784859", "text": "def default_get():\n return Response('Datacollector API', 200)", "title": "" }, { "docid": "1507cebea4695625759c9d7a26d70a52", "score": "0.57715106", "text": "def api_data():\n response = api.data_get(request.get_json())\n return jsonify(response)", "title": "" }, { "docid": "9a55bee1790609c686625d55a29243c5", "score": "0.576187", "text": "def all_data(request, data):\n return data", "title": "" }, { "docid": "fa315672d498417bb9c43752602d45e5", "score": "0.569435", "text": "def GET(self, *args):\n return dumps({'results':self.get_all()})", "title": "" }, { "docid": "dc3bd6a1522c8eaca370d22ab5f203b3", "score": "0.5663418", "text": "def getData(self) -> ghidra.program.model.listing.Data:\n ...", "title": "" }, { "docid": "9699a5b524f1c7eeceab59db57a29ffc", "score": "0.56537133", "text": "def get(self, data, access, mutator):\n pass", "title": "" }, { "docid": "d59c82367857c63a606f6828fd1719ba", "score": "0.56352127", "text": "def get(self):\n\n try:\n mine = self.request.get(fields.MINE)\n owner = self.request.get(fields.OWNER)\n\n query = dashboard_model.Dashboard.query()\n\n filter_property = ndb.GenericProperty(fields.CREATED_BY)\n if owner:\n owner_user = user_validator.UserValidator.GetUserFromEmail(\n owner)\n\n if owner_user:\n query = query.filter(filter_property == owner_user)\n else:\n self.RenderJson({fields.DATA: []})\n return\n elif mine:\n query = query.filter(\n filter_property == users.get_current_user())\n\n query = query.order(dashboard_model.Dashboard.title)\n # TODO: Add paging to the dashboard UI and limit fetches.\n results = query.fetch(limit=2000)\n\n response = []\n dashboard_data = None\n\n for result in results:\n dashboard_data = json.loads(result.data)\n created_by = None\n modified_by = None\n\n if result.created_by:\n created_by = result.created_by.email()\n\n if result.modified_by:\n modified_by = result.modified_by.email()\n\n response.append({\n fields.ID: result.key.integer_id(),\n fields.OWNER: dashboard_data.get('owner'),\n fields.TITLE: (result.title or\n dashboard_model.DEFAULT_DASHBOARD_TITLE),\n fields.CREATED_BY: created_by,\n fields.CREATED_DATE: result.created_date,\n fields.MODIFIED_BY: modified_by,\n fields.MODIFIED_DATE: result.modified_date})\n\n self.RenderJson({fields.DATA: response})\n except (base.InitializeError, dashboard_model.InitializeError,\n http_util.ParameterError) as err:\n self.RenderJson(data={error_fields.MESSAGE: err}, status=400)", "title": "" }, { "docid": "0ae720971a07e9413924f1d362c8e741", "score": "0.5616826", "text": "def get(self):\n #- take query if any\n theQuery = self.request.get('q')\n logging.info(\"The sent query is:\")\n logging.info(theQuery)\n\n #- First, just an example:\n #- inst a form, pass and render to jinja\n #self.renderAndWrite({ \"form\": self.form , \"myQuery\": theQuery }, \"admin.html\")\n #- Second. try to list all my episodes\n self.renderAndWrite({ \"form\": self.form }, \"admin.html\")", "title": "" }, { "docid": "ebf2982a19e66dbe46c6dbc98592fa34", "score": "0.56148565", "text": "def get_data():\n\treturn(get_remote_data())\n\t#return(get_local_data())", "title": "" }, { "docid": "9a91470aa975607e47481880d12436b1", "score": "0.5569496", "text": "def get(self):\n res = self.plag_dao.get_docs(page=int(request.args.get(\"page\", 1)),\n per_page=int(request.args.get(\"per_page\", 10)), all='all' in request.args)\n docs_info = dict(data=[d.to_dict() for d in res['data']], count=res['count'])\n print(docs_info)\n return Response(data=docs_info)", "title": "" }, { "docid": "7915f424db10f1c5528d069741ff81dc", "score": "0.55643255", "text": "def get(self, request, *args, **kwargs):\r\n response = self.get_result_list()\r\n if response:\r\n return response\r\n\r\n context = self.get_context()\r\n context.update(kwargs or {})\r\n\r\n response = self.get_response(context, *args, **kwargs)\r\n\r\n return response or TemplateResponse(request, self.object_list_template or\r\n self.get_template_list('views/model_list.html'), context, current_app=self.admin_site.name)", "title": "" }, { "docid": "4733bc2676041e2cee1b34b3da639011", "score": "0.55551827", "text": "def get(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "fce03c344dda08fc03868599ea599aee", "score": "0.55466366", "text": "def get(self, request, *args, **kwargs):\n return self.list(request, *args, **kwargs)", "title": "" }, { "docid": "0b92a57b4d32d8d99b144e940f90e5fb", "score": "0.55332315", "text": "def get(self, request):\n user_id = request.GET.get(\"id\")\n if user_id:\n try:\n user = User.objects.get(id=user_id)\n except User.DoesNotExist:\n return self.error(\"User does not exist\")\n return self.success(UserAdminSerializer(user).data)\n\n user = User.objects.all().order_by(\"-create_time\")\n\n keyword = request.GET.get(\"keyword\", None)\n if keyword:\n user = user.filter(Q(username__icontains=keyword) |\n Q(userprofile__real_name__icontains=keyword) |\n Q(email__icontains=keyword))\n\n only_admin = request.GET.get(\"onlyadmin\", None)\n if only_admin:\n user = user.filter(Q(admin_type__icontains=\"Admin\"))\n\n return self.success(self.paginate_data(request, user, UserAdminSerializer))", "title": "" }, { "docid": "4fea016365129da8a7c911287f56aa7e", "score": "0.55213195", "text": "def configureGet(request):", "title": "" }, { "docid": "52b48f1ebb6573d5e5fab280ee7105c9", "score": "0.5510217", "text": "def GET(self, req):\n conn = HTTPConnection('%s:%s' % (self.mds_ip, self.mds_port))\n headers = req.params\n conn.request('GET', req.path, headers=headers)\n resp = conn.getresponse()\n return Response(request=req, body=resp.read(), content_type=resp.getheader('Content-Type'))", "title": "" }, { "docid": "5aeb6329f56d10e9f173bd4c16a1b392", "score": "0.55099034", "text": "def _get_data(**kwargs) -> None:\n # Make the HTTP GET request to get all devices\n response = requests.get(**kwargs)\n print(f\"HTTP GET: {response.url}\")\n\n # Raise an exception if the response is not OK\n if not response.ok:\n print(response.text)\n response.raise_for_status()\n\n # Extend the global list device_list with the response data\n device_list.extend(response.json())\n\n # Pagination\n # Meraki Dashboard APIs implement the RFC5988 - Web Linking standard\n # for pagination.\n # By default, the Meraki Dashboard API returns a maximum of 1000 items\n # prior to requiring pagination.\n # In the HTTP Response, an HTTP Header called Link contains information\n # on how to retrieve additional items from the Meraki Dashboard API.\n # First we need to check if the header contains the next key:\n if response.links.get('next'):\n next_url = response.links['next'].get('url')\n kwargs['url'] = next_url\n _get_data(**kwargs)", "title": "" }, { "docid": "0a32fa4d69748df62f017a5d192c9625", "score": "0.5509038", "text": "def get(self, *req_args, **req_kwargs):\n return self.request('GET', *req_args, **req_kwargs)", "title": "" }, { "docid": "f678e501538ce1260bf6054f1f842b64", "score": "0.55077046", "text": "def get(self, request):\n\t\tresponse_data = {\n\t\t\t'status': \"success\",\n\t\t\t'sCode': 200,\n\t\t\t'data': \"sample data\",\n\t\t\t'message': \"sample data successfully retrieved\"\n\t\t}\n\t\treturn Response(data=response_data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "4a78fa2e31d0346ff269f3a573a28678", "score": "0.54812986", "text": "def get(request):\n raise NotImplementedError", "title": "" }, { "docid": "29a4e58dc900bc2ce69c6fd99a50fef9", "score": "0.54767615", "text": "def get_data(self):\n print(\"Loading the JSON's file ...\")\n self.api_access.load_id_countries()\n print(\"Getting products from the API ...\")\n self.api_access.get_products()\n print(\"Deleting superfluous categories for the products ...\")\n self.api_access.keep_one_cat()\n print(\"Replace and sort categories from the request ...\")\n self.api_access.replace_sort_categories()\n print(\"Insert the categories into the database ...\")\n self.api_access.insert_categories(self.database)\n print(\"Uploading products to the database ...\")\n self.api_access.add_products(self.database)", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "b5486455d5a48e9166eeb4f7424c4330", "score": "0.54699993", "text": "def get(self, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "48eadd487fde2d3ecfd4f810e0982570", "score": "0.5458538", "text": "def get(self):\n print_url_for('GET', ep_1)\n return DAO.users, 200", "title": "" }, { "docid": "212d60f6793eca8ef7524fda9437f8b6", "score": "0.54555327", "text": "def test_get_all_as_superuser_returns_all_data(self):\n mock_user = create_mock_user(1, is_superuser=True)\n data_list = data_api.get_all(mock_user)\n self.assertTrue(len(data_list) == len(self.fixture.data_collection))", "title": "" }, { "docid": "c0e692ee012d330eb117d205c3dd00f5", "score": "0.5441976", "text": "def get_data(self):\n if self.kind == 'clients':\n return self._get_data_clients()\n else:\n return self._get_data_services()", "title": "" }, { "docid": "a6277a83575404be27593ee850b16803", "score": "0.5432421", "text": "def get(self, _id=None):\n\n # /api/dataset/\n if not _id:\n\n if self.args.private:\n if not self.current_user:\n raise HTTPError(401)\n if not self.args.user: # fallback\n self.args.user = self.current_user\n # cannot retrieve others' datasets\n if self.args.user != self.current_user:\n raise HTTPError(403)\n\n show_metadata = self.args.pop('meta')\n start = self.args.pop('start')\n size = self.args.pop('size')\n\n raise Finish({\n \"total\": datasets.total(**self.args),\n \"hits\": [\n repr_regdoc(dataset, show_metadata)\n for dataset in datasets.get_all(start, size, **self.args)\n ],\n })\n\n # /api/dataset/83dc3401f86819de\n # /api/dataset/83dc3401f86819de.js\n\n if not _id.endswith('.js'):\n dataset = datasets.get(_id)\n else: # remove .js to get _id\n dataset = datasets.get(_id[:-3])\n\n # add metadata field\n dataset = repr_regdoc(dataset, self.args.pop('meta'))\n # add copyright field\n dataset = add_copyright(dataset, self.request)\n\n # javascript\n if _id.endswith('.js'):\n self.set_header('Content-Type', 'application/javascript')\n dataset = wrap_javascript(dataset)\n\n self.finish(dataset)", "title": "" }, { "docid": "2416a1a8a3cdcfd139311a56a6a6cf34", "score": "0.54284835", "text": "def get(self, data: dict) -> Response:\n return self._get(\n params=self.get_default_data(data)\n )", "title": "" }, { "docid": "3f6476568d7885dcb180a7e425e21fcb", "score": "0.5418812", "text": "def index():\n print(request.headers, request.get_json())\n return 'Python Grafana data source, used for rendering HTML panels and timeseries data.'", "title": "" }, { "docid": "bb9be18595608e37085dbffd8d25daae", "score": "0.54179245", "text": "def get_data(self, kwargs):\n head = dict(timestamp=time.time())\n head['args'] = kwargs\n uinput = kwargs.get('input', '')\n inst = kwargs.get('instance', self.dbs_global)\n idx = getarg(kwargs, 'idx', 0)\n limit = getarg(kwargs, 'limit', 0) # do not impose limit\n coll = kwargs.get('collection', 'merge')\n status = kwargs.get('status')\n error = kwargs.get('error')\n reason = kwargs.get('reason')\n dasquery = kwargs.get('dasquery', None)\n time0 = time.time()\n if dasquery:\n dasquery = DASQuery(dasquery, instance=inst)\n if dasquery.error:\n return self.page(form + dasquery.error, ctime=time.time()-time0)\n else:\n check, content = \\\n self.generate_dasquery(uinput, inst, html_mode=False)\n if check:\n head.update({'status': 'fail', 'reason': content,\n 'ctime': time.time()-time0, 'input': uinput})\n data = []\n return head, data\n dasquery = content # returned content is valid DAS query\n try:\n nres = self.dasmgr.nresults(dasquery, coll)\n data = \\\n self.dasmgr.get_from_cache(dasquery, idx, limit)\n # check that we got what we expected\n data = [r for r in data]\n if nres and not len(data):\n for retry in range(1, 3, 5):\n msg = 'retry in %s sec' % retry\n dasprint(dastimestamp('DAS WARNING '), msg, dasquery)\n time.sleep(retry) # retry one more time\n data = \\\n self.dasmgr.get_from_cache(dasquery, idx, limit)\n data = [r for r in data]\n if len(data):\n break\n if nres and not len(data):\n msg = 'fail to get all data for %s, nres=%s, len(data)=%s' \\\n % (dasquery, nres, len(data))\n dasprint(dastimestamp('DAS WARNING '), msg)\n status = 'fail'\n reason = 'Fail to retrieve data from DAS cache, please retry'\n\n if dasquery.aggregators:\n # aggregators split DAS record into sub-system and then\n # apply aggregator functions, therefore we need to correctly\n # account for nresults. Resolve generator into list and take\n # its length as nresults value.\n data = [r for r in data]\n nres = len(data)\n if error: # DAS record contains an error\n status = 'error'\n head.update({'status':status, 'nresults':nres,\n 'ctime': time.time()-time0, 'dasquery': dasquery})\n except Exception as exc:\n status = 'fail'\n reason = str(exc)\n print_exc(exc)\n head.update({'status': status,\n 'ctime': time.time()-time0, 'dasquery': dasquery})\n data = []\n head.update({'incache':self.dasmgr.incache(dasquery, coll='cache'),\n 'apilist':self.dasmgr.apilist(dasquery)})\n if reason:\n head.update({'reason': reason})\n if status != 'ok':\n head.update(self.info())\n\n # check if query had dataset input and returned no results\n # then run hint functions to find dataset in other DBS instances\n mquery = dasquery.mongo_query\n empty = False\n for item in data:\n if 'dataset.name' in mquery['spec'] and 'dataset' in mquery['fields'] \\\n and 'result' not in item:\n if not item['dataset']:\n empty = True\n break\n if empty: # if no results found add dataset from other DBS instances\n hints = self.hint_datasets(kwargs)\n for item in data:\n item.update({'hints': hints})\n\n return head, data", "title": "" }, { "docid": "bd30b3d95e6c9ce4c97271d028ee1674", "score": "0.54068613", "text": "def get_data(self) -> str:", "title": "" }, { "docid": "c807fa386e6d5ebaba042af53f619925", "score": "0.54060346", "text": "def get(self):\n self.admin_required()\n\n cursor_key = self.request.GET.get('cursor')\n staff, cursor = models.User.get_staff(cursor_key)\n return self.render_json({\n 'type': 'users',\n 'users': [s.summary() for s in staff],\n 'cursor': cursor if cursor else ''\n })", "title": "" }, { "docid": "15530329f9962206d1d026c56e6ad244", "score": "0.54006207", "text": "def get(self, request):\n return self.json_message('API running.')", "title": "" }, { "docid": "499c412e218a82d7fcb2d611f7aa8698", "score": "0.5393497", "text": "def do_GET(self):\n\t\treload(methods)\n\t\tmethods.do_GET(self)", "title": "" }, { "docid": "951e5a49ca9ead06dae8878a85daca28", "score": "0.53816205", "text": "def get(self, *args):\n pass", "title": "" }, { "docid": "9e890ad81bd018edeea6661431ea2c33", "score": "0.5376045", "text": "def get(self, request, chart_url=None, *args, **kwargs):\n params = request.GET.copy()\n user = request.user\n data = DataProcessor().fetch_chart_data(chart_url, params, user, params.get('chartType'))\n return Response(data)", "title": "" }, { "docid": "f57c35a06f4a33fd8fc557b9fdc31e79", "score": "0.53694594", "text": "async def get_data(self, user_id):\n raise NotImplementedError", "title": "" }, { "docid": "0eece2b1945121c84657b641f9da1af2", "score": "0.5368405", "text": "def listview(self, kwargs):\n # force to load the page all the time\n cherrypy.response.headers['Cache-Control'] = 'no-cache'\n cherrypy.response.headers['Pragma'] = 'no-cache'\n\n time0 = time.time()\n ajaxreq = getarg(kwargs, 'ajax', 0)\n uinput = getarg(kwargs, 'input', '')\n limit = getarg(kwargs, 'limit', 10)\n show = getarg(kwargs, 'show', 'json')\n form = self.form(uinput=uinput)\n # self.status sends request to Cache Server\n # Cache Server uses das_core to retrieve status\n status = self.status(input=uinput, ajax=0)\n if status == 'no data':\n # no data in raw cache, send POST request\n self.send_request('POST', kwargs)\n ctime = (time.time()-time0)\n# page = self.templatepage('not_ready')\n page = self.status(input=uinput)\n page = self.page(form + page, ctime=ctime)\n return page\n elif status == 'fail':\n kwargs['reason'] = 'Unable to get status from data-service'\n return self.error(self.gen_error_msg(kwargs))\n\n total = self.nresults(kwargs)\n rows = self.result(kwargs)\n nrows = len(rows)\n page = \"\"\n ndict = {'nrows':total, 'limit':limit}\n page = self.templatepage('das_nrecords', **ndict)\n# for nrecord in range(0, len(rows)):\n# row = rows[nrecord]\n# style = \"white\"\n# if nrecord % 2:\n# style = \"white\"\n# else:\n# style = \"gray\" \n style = \"white\"\n for row in rows:\n id = row['_id']\n page += '<div class=\"%s\"><hr class=\"line\" />' % style\n gen = self.convert2ui(row)\n for uikey, value in [k for k, g in groupby(gen)]:\n page += \"<b>%s</b>: %s<br />\" % (uikey, value)\n pad = \"\"\n if show == 'json':\n jsoncode = {'jsoncode': json2html(row, pad)}\n jsonhtml = self.templatepage('das_json', **jsoncode)\n jsondict = dict(data=jsonhtml, id=id, rec_id=id)\n page += self.templatepage('das_row', **jsondict)\n elif show == 'code':\n code = pformat(row, indent=1, width=100)\n data = self.templatepage('das_code', code=code)\n datadict = {'data':data, 'id':id, rec_id:id}\n page += self.templatepage('das_row', **datadict)\n else:\n code = yaml.dump(row, width=100, indent=4, \n default_flow_style=False)\n data = self.templatepage('das_code', code=code)\n datadict = {'data':data, 'id':id, rec_id:id}\n page += self.templatepage('das_row', **datadict)\n page += '</div>'\n ctime = (time.time()-time0)\n return self.page(form + page, ctime=ctime)", "title": "" }, { "docid": "2a648f55e47a542aa1c7b58c3a0124a7", "score": "0.53582805", "text": "def getData(self):\n return self.data", "title": "" }, { "docid": "87c630008761874ca7a4a639261d7abf", "score": "0.5341491", "text": "def admin():\n rows = db.execute(\"SELECT * FROM residents WHERE flat < 101 ORDER BY flat ASC\")\n return render_template(\"admin.html\", rows=rows)", "title": "" }, { "docid": "979384d203391075e97177c7f5986e58", "score": "0.53328496", "text": "def get_data(self):\n msg = self._send_msg(self._CMD_GET_DATA, [0])\n return msg", "title": "" }, { "docid": "1672fc7a18ee413334633d348d5bc90a", "score": "0.5310197", "text": "def get(self, datastore):\n return self._get(\"/datastores/%s\" % base.getid(datastore),\n \"datastore\")", "title": "" }, { "docid": "8af29dfe302a95d359be0a28154a6853", "score": "0.53063333", "text": "def get(self):\n if not utils.IsInternalUser():\n self.RenderHtml('result.html', {\n 'errors': ['Only logged-in internal users can access stats.']\n })\n return\n\n key = self.request.get('key')\n if key:\n self._DisplayResults(key)\n else:\n self._DisplayForm()", "title": "" }, { "docid": "5c29f16b0b1f2fb97aed7ca833e72641", "score": "0.5304319", "text": "def test_read_admin(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.get(\n reverse(\n 'order-detail',\n kwargs={'pk': 1},\n ),\n )\n\n data = json.loads(response.content)\n\n content = {\n 'id': 1,\n 'transaction_date': data['transaction_date'],\n 'authorization_id': '1',\n 'settlement_id': '1',\n 'order_lines': [{\n 'content_type': 'package',\n 'id': 1,\n 'object_id': 1,\n 'order': 'http://testserver/orders/1',\n 'quantity': 1,\n 'url': 'http://testserver/order_lines/1'\n }],\n 'url': 'http://testserver/orders/1',\n 'user': 'http://testserver/users/1'\n }\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "38764ba1c9e195594aa5f5109b55a380", "score": "0.5302005", "text": "def test_admin_get():\r\n a, code, message = Admin.get(1)\r\n assert a is not None\r\n assert code == 200\r\n assert message == \"OK\"", "title": "" }, { "docid": "c7d94125f8800723890d00b5796ed3c8", "score": "0.52971566", "text": "def getData():\n popd=os.getcwd()\n dssvuePath = \"C:/Program Files (x86)/HEC/HEC-DSSVue/\"\n os.chdir(dssvuePath)\n # Path to scritp that extracts data from DSS file\n scriptPath = \"C:/Users/nschiff2/IdeaProjects/AutoHEC/src/Analysis/\"\n # Use HEC-DSSVue to run script (only way to use hec package that accesses DSS files)\n call([\"HEC-DSSVue.cmd\", \"-s\", scriptPath + \"getSOData.py\"], shell=True)\n os.chdir(popd)", "title": "" }, { "docid": "de2e5782a4296756e2fb5c6b7583fe33", "score": "0.52965313", "text": "def get(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "de2e5782a4296756e2fb5c6b7583fe33", "score": "0.52965313", "text": "def get(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "de2e5782a4296756e2fb5c6b7583fe33", "score": "0.52965313", "text": "def get(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "3087a702385383901c591a19a340ea08", "score": "0.5295194", "text": "def data_list(request):\n try:\n size = 15\n data = list()\n query = request.GET.get(\"query\", \"\")\n page = int(request.GET.get(\"page\", 0))\n page = page - 1 if page > 0 else 0\n collect = request.GET.get(\"collection\", \"\")\n try:\n de_str = base64.b64decode(query).decode(\"utf-8\") if query else None\n query = parser_expr(de_str) if de_str else {}\n except Exception as ex:\n log_error(ex)\n return json_response(status=STATUS.Ok, msg=\"success\",\n data={\"list\": [],\n \"count\": 0,\n \"current_size\": size,\n \"current_page\": page + 1,\n \"current_collection\": collect})\n pos = page * size\n if len(connection) < 1:\n return json_response(status=STATUS.Err,\n msg=\"database config not found\", data={})\n mgo = connection[\"client\"]\n total = mgo.get_collection(collect).find(query if len(query) > 0 else {}).count()\n rows = mgo.get_collection(collect).find(query if len(query) > 0 else {}).skip(pos).limit(size)\n for item in rows:\n data.append(item)\n connection[\"current_collection\"] = collect\n connection[\"collection_keys\"] = collection_keys(collect)\n data = json_data_filter_list(json_data=data)\n return json_response(status=STATUS.Ok, msg=\"success\",\n data={\"list\": data,\n \"count\": total,\n \"current_size\": size,\n \"current_page\": page + 1,\n \"current_collection\": collect,\n \"collection_keys\": connection[\"collection_keys\"]})\n except Exception as ex:\n log_error(ex)\n return json_response(status=STATUS.Err, msg=\"query data list err: %s\" % ex, data={})", "title": "" }, { "docid": "c8c31585058cd29f87b7c36a0c2e83de", "score": "0.52700764", "text": "def show_bus_data():\n return jsonify(get_bus_data(), 200)", "title": "" }, { "docid": "6747c5002b62331ce02c6b48b237a3a1", "score": "0.52655077", "text": "def get(self, *args):", "title": "" }, { "docid": "eb67d1d6cfcecb98623fcc915f3f76c4", "score": "0.52620846", "text": "def GET(self, *args):\n if not args:\n self.logger.error(\"No Arguments were given\")\n return dumps({\"results\":'Error: No arguments were given'})\n return dumps({'results':self.get_request(args[0])})", "title": "" }, { "docid": "35f2cd304e640c7c7c1d7b3557328a02", "score": "0.525862", "text": "def _get_data(self):", "title": "" }, { "docid": "938f8de4c9b28b691e4e34e81e0840d6", "score": "0.5256385", "text": "def get_list(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "938f8de4c9b28b691e4e34e81e0840d6", "score": "0.5256385", "text": "def get_list(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "938f8de4c9b28b691e4e34e81e0840d6", "score": "0.5256385", "text": "def get_list(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "938f8de4c9b28b691e4e34e81e0840d6", "score": "0.5256385", "text": "def get_list(self, request, *args, **kwargs):\r\n pass", "title": "" }, { "docid": "e2ceea746e1c80f366e049cfb47ede06", "score": "0.52557164", "text": "def get_queryset(self):\n return HttpResponse(\"Hello, world. You're at the durumiApp index.\")", "title": "" }, { "docid": "2d48c197bd25dd74e65677a9917ec5a9", "score": "0.5244427", "text": "def fetch_api_data(self):\n if self.session is None:\n self.apiLogin()\n if self.device_id is None or self.modules is None:\n self.getDevices()\n self.getParameters()\n self.getData()\n return self.data", "title": "" }, { "docid": "4e7b0b72cedc5c6e1345e95e382b2ae3", "score": "0.5242288", "text": "def get(self):\n current_user = get_jwt_identity()\n if current_user['admin'] == False:\n return {'error':'Sorry, this route is only accessible to admins'}, 403\n return Sales().get_all_sales()", "title": "" }, { "docid": "74529a2a1840e1cfac6a5439102b27da", "score": "0.5235968", "text": "def endpoint_call(self, data: BulkDataType = None, method: str = 'get', code: int = 200) -> ApiResultType:\n\n if data is not None:\n data = json.dumps(data)\n\n return self.get_result(\n method,\n f'/{self._settings(\"VST_API_URL\")}/endpoint/',\n data=data,\n code=code\n )", "title": "" }, { "docid": "bb0ffce29f8e2bbe138ce338f8d21c27", "score": "0.52356905", "text": "def administrador(self, data):\n self.controlador.modulo.administrador()", "title": "" }, { "docid": "2348ca01394fd768d2cbdda32d6fab05", "score": "0.52323854", "text": "def get(self):\n \n return self._handle_request()", "title": "" }, { "docid": "937cb81173d3b0972346f7dfda4ead9d", "score": "0.5230643", "text": "def get(self, request):\n pass", "title": "" }, { "docid": "caf6691a0484d5d2ef610a3ff6f53902", "score": "0.522302", "text": "def get(self, datasource=\"tranquility\",**kwargs):\n kwargs_dict ={\n\"datasource\" : datasource, \n }\n kwargs_dict.update(kwargs)\n return EsiRequestObject(self.base_url, self.get_responses) \\\n .get(**kwargs_dict)", "title": "" }, { "docid": "caf6691a0484d5d2ef610a3ff6f53902", "score": "0.522302", "text": "def get(self, datasource=\"tranquility\",**kwargs):\n kwargs_dict ={\n\"datasource\" : datasource, \n }\n kwargs_dict.update(kwargs)\n return EsiRequestObject(self.base_url, self.get_responses) \\\n .get(**kwargs_dict)", "title": "" }, { "docid": "37edd024743b9a233421dea49bed0ac1", "score": "0.5220882", "text": "def get(self):\n try:\n args = request.args.copy()\n model_ids = args.pop(MODEL_IDS).split(\",\") if args.get(MODEL_IDS) else None\n model_names = args.pop(MODEL_NAMES).split(\",\") if args.get(MODEL_NAMES) else None\n model_desc = args.pop(MODEL_DESC, None)\n show_all = args.pop(SHOW_ALL, None)\n crunch_data = args.pop(DO_NOT_CRUNCH_DATA_NOW, None)\n if len(args) > 0: return json_response(\"The following query parameters are invalid: \" + str(args.keys()),\n 400)\n show_all = True if show_all is not None and show_all.lower() == \"true\" else False\n crunch_data = False if crunch_data is not None and crunch_data.lower() == \"false\" else True\n return service.bulk_add_models_get(model_ids, model_names, model_desc, show_all, crunch_data)\n except:\n return str(traceback.format_exc()), 500", "title": "" }, { "docid": "abe8e12f53cad8a7cbe15c80ac1c3574", "score": "0.52054006", "text": "def user_data(self, access_token, *args, **kwargs):\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n return requests.get(\n urljoin(append_slash(self.setting(\"URL\")), \"oapi/v1/users/~\"),\n headers=headers,\n ).json()", "title": "" }, { "docid": "ee4fc307e220781d6bff271274b3e35b", "score": "0.52053785", "text": "def admin(admin_path):\n return \"Index function\"", "title": "" }, { "docid": "edccf0d057e59ebec6c5b32bd9e97b8e", "score": "0.52044046", "text": "def data(self):\n\n # Get user request\n tool, name = self._get_tool()\n\n # Get desired format\n data_format = self.request.GET.get('format', 'csv')\n\n # Get the results of the tool\n res = tool.run(save_results=True)\n\n # Render into desired format\n data_artifact = res['data']\n output_data = data_artifact.render_output(data_format)\n extension = data_artifact.available_formats()[data_format]['extension']\n\n # Send out the data in CSV format\n return Response(\n content_type=\"application/force-download\",\n content_disposition='attachment; filename=%s.%s' % (tool.name, extension),\n body=output_data\n )", "title": "" }, { "docid": "8e8e3cebb3daf44b680e1f8e2b771d98", "score": "0.5198956", "text": "def get(self, request, *args, **kwargs):\n\n # TODO: use custom_resource to get cr_type_all\n cr_type_all = custom_resource_api.get_current_custom_resource_type_all(\n request=request\n )\n\n custom_resources = list(\n custom_resource_api.get_all_of_current_template(\n request=request\n ).order_by(\"sort\")\n )\n # Get arguments\n tab = request.GET.get(\"ispublished\", \"all\")\n tab = (\n tab\n if tab in [\"all\", \"published\", \"unpublished\", \"draft\"]\n else \"all\"\n )\n page = request.GET.get(\"page\", 1)\n if tab == \"draft\":\n document = (\n dashboard_common_constants.FUNCTIONAL_OBJECT_ENUM.FORM.value\n )\n template = (\n dashboard_constants.DASHBOARD_FORMS_TEMPLATE_TABLE_PAGINATION\n )\n else:\n document = (\n dashboard_common_constants.FUNCTIONAL_OBJECT_ENUM.RECORD.value\n )\n template = self.data_template\n\n context = {\n \"page\": page,\n \"roles\": \",\".join(request.GET.getlist(\"role\", [cr_type_all.slug])),\n \"ispublished\": tab,\n }\n\n # Get resources\n if tab == \"draft\":\n filtered_data = self.load_drafts(request)\n else:\n filtered_data = self.load_records(request, tab, custom_resources)\n\n # Paginator\n results_paginator = ResultsPaginator.get_results(\n filtered_data, page, settings.RECORD_PER_PAGE_PAGINATION\n )\n\n # Data context\n if tab in [\"all\", \"published\", \"unpublished\"]:\n results_paginator.object_list = self._format_data_context_registry(\n results_paginator.object_list,\n )\n elif tab == \"draft\":\n results_paginator.object_list = (\n self._format_draft_context_registry(\n results_paginator.object_list,\n )\n )\n\n # Add user_form for change owner\n user_form = UserForm(request.user)\n context.update(\n {\n \"number_total\": len(filtered_data),\n \"user_data\": results_paginator,\n \"user_form\": user_form,\n \"document\": document,\n \"template\": template,\n \"action_form\": ActionForm(\n [(\"2\", \"Change owner of selected records\")]\n ),\n \"menu\": self.administration,\n \"administration\": self.administration,\n \"username_list\": get_id_username_dict(\n user_api.get_all_users()\n ),\n \"resources\": True,\n \"url_resources\": reverse(\"core-admin:core_dashboard_records\")\n if self.administration\n else reverse(\"core_dashboard_records\"),\n \"custom_resources\": custom_resources,\n \"display_not_resource\": True, # display all resource\n \"role_custom_resource_type_all\": cr_type_all.slug,\n \"list_role_custom_resource\": \",\".join(\n [\n cr.slug\n for cr in custom_resources\n if custom_resource_api._is_custom_resource_type_resource(\n cr\n )\n and cr.display_icon\n ]\n ),\n \"type_resource\": CUSTOM_RESOURCE_TYPE.RESOURCE.value,\n }\n )\n\n modals = [\n \"core_main_app/user/workspaces/list/modals/assign_workspace.html\",\n dashboard_common_constants.MODALS_COMMON_DELETE,\n dashboard_common_constants.MODALS_COMMON_CHANGE_OWNER,\n EditDataView.get_modal_html_path(),\n ]\n\n assets = self._get_assets()\n\n # Set page title\n context.update({\"page_title\": \"Dashboard\"})\n\n return self.common_render(\n request,\n self.template,\n context=context,\n assets=assets,\n modals=modals,\n )", "title": "" }, { "docid": "cfd0a0805b41900c5d58dca5c838915e", "score": "0.51973444", "text": "def get(self, slug=None):\n\t\t\t\t\n\t\t# print \n\t\t# app_log.info(\"••• APIrestHandler.get ...\\n\")\n\n\t\t# self.site_section = \"api\"\n\n\t\t# # get current page - optionnal\n\t\t# current_page = self.get_current_uri_without_error_slug()\n\t\t# app_log.info(\"••• current_page : %s\", current_page )\n\n\n\n\t\t# ### check user auth level \n\t\t# app_log.info(\"••• self.user_auth_level : %s \", self.user_auth_level )\n\t\t# # --> open data level : \"opendata\", \"commons\", \"collective\", \"admin\"\n\t\t# # check OPEN_LEVEL_DICT in settings_corefields.py for more infos\n\t\t# # open_level = \"opendata\"\n\t\t# # token = query_data[\"token\"]\n\t\t# # if token != None : \n\t\t# # \t# TO DO : check token to get corresponding opendata level \n\t\t# # \topen_level = \"commons\"\n\t\t# ### log query as warning if request allowed gets private or collective info\n\t\t# open_level = self.user_auth_level_dict[\"data\"] # generated by @check_request_token\n\t\t# app_log.info(\"••• open_level : %s \", open_level )\n\n\n\n\t\t# # get slug\n\t\t# slug_ = self.request.arguments\n\t\t# app_log.info(\"••• slug_ : \\n %s\", pformat(slug_) )\n\n\t\t# # filter slug\n\t\t# query_data = self.filter_slug( slug_, slug_class=\"data\", query_from=\"api\" )\n\t\t# app_log.info(\"••• query_data : \\n %s \", pformat(query_data) )\n\n\n\t\t# # TO DO : limit results \n\t\t# ### override query_data[\"results_per_page\"] if auth level doesn't allow it\n\t\t# if open_level == \"opendata\" and query_data[\"results_per_page\"] > QUERIES_MAX_RESULTS_IF_API :\n\t\t# \tquery_data[\"results_per_page\"] = QUERIES_MAX_RESULTS_IF_API\n\n\n\t\t# ### TO DO : FACTORIZE WITH DataScrapedHandler HANDLER\n\n\t\t# ### retrieve datamodel from DB top make correspondances field's _id --> field_name\n\t\t# # data_model_custom_cursor\t = self.application.coll_model.find({\"field_class\" : \"custom\", \"is_visible\" : True }) \n\t\t# # data_model_custom \t\t\t = list(data_model_custom_cursor)\n\t\t# # data_model_custom_dict \t\t = { str(field[\"_id\"]) : field for field in data_model_custom }\n\t\t# # data_model_custom_dict_names = { field[\"field_name\"] : field for field in data_model_custom }\n\n\t\t# # data_model_core_cursor \t\t = self.application.coll_model.find({\"field_class\" : \"core\" }) \n\t\t# # data_model_core \t\t\t = list(data_model_core_cursor)\n\t\t# # data_model_core_dict_names\t = { field[\"field_name\"] : field for field in data_model_core }\n\n\t\t# dm_set = self.get_datamodel_set()\n\t\t# data_model_custom_list\t\t \t= dm_set[\"data_model_custom_list\"]\n\t\t# data_model_custom_dict \t\t \t= dm_set[\"data_model_custom_dict\"]\n\t\t# data_model_custom_dict_names \t= dm_set[\"data_model_custom_dict_names\"]\n\t\t# data_model_core_list\t\t \t= dm_set[\"data_model_core_list\"]\n\t\t# data_model_core_dict_names\t\t= dm_set[\"data_model_core_dict_names\"]\n\n\n\t\t# ### filter results depending on field's opendata level\n\t\t# # get fields allowed\n\t\t# allowed_fields_list, allowed_custom_fields, allowed_core_fields = self.get_authorized_datamodel_fields(open_level, data_model_custom_list, data_model_core_list )\n\t\t# app_log.info(\"••• allowed_fields_list : \\n %s \", allowed_fields_list ) \n\n\t\t# # get data \n\t\t# data, is_data, page_n_max = self.get_data_from_query( \tquery_data, \n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\tcoll_name\t\t\t\t\t = \"data\", \n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\tquery_from\t\t\t\t\t = self.site_section, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\tallowed_fields_list\t\t\t = allowed_fields_list,\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\tignore_fields_list\t\t\t = [\"_id\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\tdata_model_custom_dict_names = data_model_custom_dict_names,\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t\tdata_model_core_dict_names\t = data_model_core_dict_names\n\t\t# \t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t# # data, is_data, page_n_max = raw[0], raw[1], raw[3]\n\t\t# app_log.info(\"••• is_data : %s \", is_data ) \n\n\t\t\n\t\t\n\t\t# ### operations if there is data\n\t\t# if is_data : \n\t\t\t\n\t\t# \tcount_results = len(data)\n\t\t# \tapp_log.info(\"••• data[0] : \\n %s \" , pformat(data[0]) )\n\t\t\t\n\t\t# \t### rewrite field names as understable ones --> replace field_oid by field_name \n\t\t# \t# cf : https://sedimental.org/remap.html\n\t\t# \tdata = remap( data, lambda p, k, v: ( data_model_custom_dict[k][u\"field_name\"], v) if k in data_model_custom_dict else (k, v))\n\n\t\t# else :\n\t\t# \tcount_results = 0\n\t\t# \tdata = \"no data for this query\"\n\n\n\t\t# ### add header to tell user which level auth he/she gets to get\n\t\t# full_json = { \n\t\t\t\n\t\t# \t\"status\" : \"ok\", \n\t\t# \t# header of the json with infos \n\t\t# \t\"query_log\" : {\n\t\t# \t\t\"auth_level\" \t\t\t: open_level ,\n\t\t# \t\t\"query\"\t\t\t\t\t: query_data ,\n\t\t# \t\t\"uri\"\t\t\t\t\t: self.request.uri ,\n\t\t# \t\t\"count_results\"\t\t\t: count_results,\n\t\t# \t\t\"fields_open_level\" \t: [ \n\t\t# \t\t\t\t\t\t\t\t\t{ \t\n\t\t# \t\t\t\t\t\t\t\t\t\t\"field_name\" : f[\"field_name\"], \n\t\t# \t\t\t\t\t\t\t\t\t\t\"field_open\" : f[\"field_open\"],\n\t\t# \t\t\t\t\t\t\t\t\t\t\"field_type\" : f[\"field_type\"]\n\t\t# \t\t\t\t\t\t\t\t\t} for f in data_model_custom_list if f[\"field_open\"] in OPEN_LEVEL_DICT[open_level]\n\t\t# \t\t\t\t\t\t\t\t ]\n\t\t# \t},\n\t\t# \t# data retrieved\n\t\t# \t\"data_list\" \t \t: data\n\t\t# } \n\n\t\tfull_json = self.retrieve_results()\n\t\t# full_json = { \"status\" : \"ok\", \"sent\" : self.retrieve_results() }\n\n\t\t### write data as json\n\t\t# cf : https://stackoverflow.com/questions/35083374/how-to-decode-a-unicode-string-python\n\t\tresults = json.dumps(full_json, ensure_ascii=False, default=json_util.default).encode('utf8')\n\n\t\tprint '.....\\n' \n\n\t\tself.write( results )\n\t\t# raise gen.Return(self.write( results ))\n\t\t\n\t\tself.finish()", "title": "" }, { "docid": "1fdf529c1c578a4c03f51e02e63fa7bf", "score": "0.5196009", "text": "def get(self, slug=None):\n\t\t\n\t\tself.site_section = \"api\"\n\t\t# self.set_status(204)\n\n\t\t# get slug\n\t\tslug_ = self.request.arguments\n\t\tapp_log.info(\"••• slug_ : \\n %s\", pformat(slug_) )\n\n\t\t# filter slug\n\t\tquery_data = self.filter_slug( slug_, slug_class=\"infos\", query_from=self.site_section )\n\t\tapp_log.info(\"••• query_data : \\n %s \", pformat(query_data) )\n\n\t\t### get datamodel set infos\n\t\tdm_set = self.get_datamodel_set( exclude_fields={\"added_by\" : 0, \"modified_by\" : 0 } )\n\t\t# app_log.info(\"••• data_model_custom_list : \\n %s \", pformat(dm_set) ) \n\n\t\t### get spiders_list\n\t\tif query_data[\"get_all_spiders\"] : \n\t\t\tspiders_dict = self.get_spiders_infos(as_dict=True, query={} )\n\t\telse :\n\t\t\tspiders_dict = self.get_spiders_infos(as_dict=True, query={ \"scraper_log.is_tested\" : True} )\n\t\t# app_log.info(\"••• spiders_dict : \\n %s \", pformat(spiders_dict) ) \n\n\t\t# count docs by spider_id\n\t\tcount_docs_by_spiders = self.count_docs_by_field(coll_name=\"data\", field_name=\"spider_id\")\n\t\t# app_log.info(\"count_docs_by_spiders : \\n %s\", pformat(count_docs_by_spiders) )\n\n\n\t\tfull_json_dft = {\t\n\t\t\t\n\t\t\t\"status\" : \"ok\", \n\n\t\t\t# header of the json with infos \n\t\t\t\"query_log\" : {\n\t\t\t\t\"_description\"\t\t\t: \"synthesis of the query made by the user\",\n\t\t\t\t\"query\"\t\t\t\t\t: query_data ,\n\t\t\t\t\"uri\"\t\t\t\t\t: self.request.uri ,\n\t\t\t},\n\t\t}\n\n\t\tif query_data[\"only_dm_list\"] : \n\n\t\t\tfull_json = {\n\n\t\t\t\t\"datamodel\" : {\n\t\t\t\t\t\"data_model_custom_dict\"\t: dm_set[\"data_model_custom_dict\"],\n\t\t\t\t\t\"data_model_core_dict\" \t\t: dm_set[\"data_model_core_dict\"],\n\t\t\t\t},\n\t\t\t}\n\n\t\telif query_data[\"only_spiders_list\"] : \n\n\t\t\tfull_json = {\n\n\t\t\t\t\"spiders\" \t: {\n\t\t\t\t\t\"spiders_dict\"\t: spiders_dict,\n\t\t\t\t\t\"spiders_list\"\t: [ \n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"id\" \t\t: k,\n\t\t\t\t\t\t\t\t\"name\" \t\t: v[\"name\"],\n\t\t\t\t\t\t\t\t\"fullname\" \t: v[\"name\"] \n\t\t\t\t\t\t\t} for k,v in spiders_dict.iteritems() if k in count_docs_by_spiders.keys() \n\t\t\t\t\t\t],\n\t\t\t\t},\n\t\t\t}\n\n\t\telse : \n\n\t\t\tfull_json = {\n\n\t\t\t\t\"datamodel\" : {\n\t\t\t\t\t\"data_model_custom_dict\"\t: dm_set[\"data_model_custom_dict\"],\n\t\t\t\t\t\"data_model_core_dict\" \t\t: dm_set[\"data_model_core_dict\"],\n\t\t\t\t},\n\n\t\t\t\t\"spiders\" \t: {\n\t\t\t\t\t\"spiders_dict\"\t: spiders_dict,\n\t\t\t\t\t\"spiders_list\"\t: [ \n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"id\" \t\t: k,\n\t\t\t\t\t\t\t\t\"name\" \t\t: v[\"name\"],\n\t\t\t\t\t\t\t\t\"fullname\" \t: v[\"name\"] \n\t\t\t\t\t\t\t} for k,v in spiders_dict.iteritems() if k in count_docs_by_spiders.keys() \n\t\t\t\t\t\t],\n\t\t\t\t},\n\n\t\t\t\t\"counts\" \t: {\n\n\t\t\t\t\t\"data_by_spiders\"\t: count_docs_by_spiders,\n\t\t\t\t\t\"datamodel_custom\"\t: self.count_documents(coll_name=\"datamodel\", \t query={ \"field_class\" : \"custom\" }), \n\t\t\t\t\t\"spiders_tested\"\t: self.count_documents(coll_name=\"contributors\", query={ \"scraper_log.is_tested\" : True}), \n\t\t\t\t\t\"data\"\t\t\t\t: self.count_documents(coll_name=\"data\"), \n\t\t\t\t\t\"users\"\t\t\t\t: self.count_documents(coll_name=\"users\"), \n\t\t\t\t} \n\n\t\t\t}\n\n\n\t\t### write data as json\n\n\t\tfull_json.update(full_json_dft)\n\n\t\t# cf : https://stackoverflow.com/questions/35083374/how-to-decode-a-unicode-string-python\n\t\tresults = json.dumps(full_json, ensure_ascii=False, default=json_util.default).encode('utf8')\n\n\t\tprint '.....\\n' \n\n\t\tself.write( results )\n\t\t\n\t\tself.finish()", "title": "" }, { "docid": "aa33d2aaed4df89102c6f427a1f06ad2", "score": "0.5195877", "text": "def get_data():\n return TEST_DATA", "title": "" }, { "docid": "a980207b0f27f65a95daa8f8c24ac3d9", "score": "0.51957905", "text": "def get(self, *args, **kwargs):\n self._callAction(*args, **kwargs)", "title": "" } ]
9acd87d0c5a1d50924a4858d5678a70a
Progress to the next task.
[ { "docid": "33a243f132a60ac649dcb76eae7d60aa", "score": "0.5585365", "text": "def next_task(self, indices=None):\n \n if indices is None:\n self.r_ind, self.c_ind = next(self.coarse_ind)\n else:\n self.r_ind, self.c_ind = indices", "title": "" } ]
[ { "docid": "174e212eb780bb121a7557b0ced88659", "score": "0.7682763", "text": "def advance(self):\r\n self._progress.update(self._task, advance=1)", "title": "" }, { "docid": "7725f6395dd670053c4e38b89900d66c", "score": "0.70731795", "text": "def step(self):\n self.progress()", "title": "" }, { "docid": "b69ddce17132df7db099b9b1dcca273a", "score": "0.6582751", "text": "def progress(self):", "title": "" }, { "docid": "21d1dca6b6195a1d8c12fc506cec1994", "score": "0.6360893", "text": "def run_next(self):\n try:\n self._run_task(*self._batch.next())\n except StopIteration:\n self.batch_complete()", "title": "" }, { "docid": "3af373b09b5a827ca2ab7f5400eab93e", "score": "0.62618905", "text": "def run(self):\n for i in tqdm(range(self.n_steps)):\n self.step()", "title": "" }, { "docid": "efc726dccb94716b2c1296d9a07a7865", "score": "0.6249719", "text": "def progress(self, timestep_secs):\n pass", "title": "" }, { "docid": "a10c00f254022b70a5252a52c1415f62", "score": "0.6243955", "text": "def next_step(self, done):\n\n self.log(self.completed_episodes, self.step, self.total_step)\n\n if self._callback_on_step_end is not None:\n self._callback_on_step_end(\n agent=self,\n reward=self._trajectory[-1].reward,\n observation=self._trajectory[-1].observation,\n done=self._trajectory[-1].done,\n action=self._trajectory[-1].action)\n\n # -- roll into next time step --\n\n self._curr_step += 1\n self._curr_total_step += 1\n\n if done:\n self.reset()\n\n self.policy.next_step(self._curr_total_step)", "title": "" }, { "docid": "b27f75cd8fd337a86b24664bc0afd484", "score": "0.6201859", "text": "def next(self):\n if self.maximum is None or self.progress <= self.maximum:\n # If first print, print title, else clear last print.\n prefix = self.title if self.progress == 0 \\\n else ''.join(['\\b' for i in xrange(0, len(str(self.progress)))]) if self.maximum is None \\\n else ''.join(['\\b' for i in xrange(0, 2 * len(str(self.maximum)) + 1)])\n\n self.progress += 1\n sys.stdout.write(prefix + (self.format % self.progress))\n sys.stdout.flush()", "title": "" }, { "docid": "97bab66f99fd58d5c12aef83467b1fcc", "score": "0.6184512", "text": "def goto_next_step(self) -> None:\n self.current_step = self.current_step + 1", "title": "" }, { "docid": "7f4d6167a54a57e1b470ad6f74d4f717", "score": "0.61521083", "text": "def progress(self, progress):\n\n self._progress = progress", "title": "" }, { "docid": "34062ad24352c75291f34990ea7400f1", "score": "0.6101756", "text": "def nextTask(self):\n\n rospy.loginfo('mission_manager.nextTask: pending_command: %s'%\n str(self.pending_command))\n\n # do_override:\n if self.pending_command == 'do_override':\n if ( (self.current_task is not None) and\n (self.current_task['type'] == 'mission_plan') ):\n self.current_task['current_path'] = None\n self.saved_task = self.current_task\n self.pending_command = None\n return\n \n if self.override_task is not None:\n self.current_task = self.saved_task\n if self.current_task is None and len(self.tasks):\n self.current_task = self.tasks[0]\n self.override_task = None\n if self.pending_command == 'next_task':\n self.pending_command = None\n return\n\n if self.pending_command == 'restart_mission' and len(self.tasks):\n for t in self.tasks:\n if t['type'] == 'mission_plan':\n t['current_nav_objective_index'] = None\n t['current_path'] = None\n self.current_task = self.tasks[0]\n\n if self.pending_command in ('next_task','prev_task'):\n if len(self.tasks):\n if self.current_task is None:\n if self.pending_command == 'next_task':\n self.current_task = self.tasks[0]\n if self.pending_command == 'prev_task':\n self.current_task = self.tasks[-1]\n else:\n try:\n i = self.tasks.index(self.current_task)\n rospy.loginfo('nextTask: current task index: %d'%i)\n if self.pending_command == 'next_task':\n i += 1\n if i >= len(self.tasks):\n self.current_task = None\n else:\n self.current_task = self.tasks[i]\n if self.pending_command == 'prev_task':\n i -= 1\n if i < 0:\n self.current_task = None\n else:\n self.current_task = self.tasks[i]\n except ValueError:\n rospy.loginfo(\"nextTask: can't find current task index!\")\n self.current_task = None\n if self.current_task is None: #end of the list or error figuring out where in the list we were.\n if self.done_behavior == 'restart':\n self.current_task = self.tasks[0]\n elif self.done_behavior == 'hover':\n self.current_task = {'type':'hover'}\n position = self.robot_nav.positionLatLon()\n self.current_task['latitude'] = math.degrees(position[0])\n self.current_task['longitude'] = math.degrees(position[1])\n if self.current_task is not None and self.current_task['type'] == 'mission_plan':\n self.current_task['current_nav_objective_index'] = None\n self.current_task['current_path'] = None\n\n \n if (self.current_task is not None and\n self.current_task['type'] == 'mission_plan' and\n (self.pending_command is not None and\n ( self.pending_command.startswith('goto_line') or\n self.pending_command.startswith('start_line')))):\n parts = self.pending_command.strip().split(None,1)\n if len(parts) == 2:\n cmd = parts[0]\n line_no = int(parts[1])\n if line_no >= 0 and line_no < len(self.current_task['nav_objectives']):\n self.current_task['current_nav_objective_index'] = line_no\n self.current_task['current_path'] = None\n if cmd == 'goto_line':\n self.current_task['do_transit'] = False\n if cmd == 'start_line':\n self.current_task['do_transit'] = True\n \n \n self.pending_command = None", "title": "" }, { "docid": "d1b8f00d463ac255ee11491601019cd4", "score": "0.6094853", "text": "def step_with_counter(self):\n # load the latest value out of the cache\n self.load()\n\n value = self.data['progress']\n if value + self.increment_value() >= 100.0:\n value = 100.0\n else:\n value += self.increment_value()\n\n self.data['progress'] = value\n self.data['status'] = 'running'\n self.data['completed_records'] = round(value / 100.0 * self.data['total_records'])\n self.data['status_message'] = f'{self.data[\"completed_records\"]:,} / {self.data[\"total_records\"]:,}'\n\n self.save()\n\n return self.result()", "title": "" }, { "docid": "84aa0935b6707fb02a7e9ad2100ca10f", "score": "0.60759234", "text": "def step(self, incement=1):\n self.steps += incement\n self.run_pending()", "title": "" }, { "docid": "1b847a3ddc76ac919bada089e03d3471", "score": "0.60590196", "text": "def jumpToNext(self):\n where = self.__getNextTrackIdx()\n if where != -1:\n self.jumpTo(where)", "title": "" }, { "docid": "92245f5ea498c560b89af723e4c7770a", "score": "0.6054898", "text": "def ProgressDone(self):\r\n pass", "title": "" }, { "docid": "7e6d168a910dc3e6bfdfce84e1bd0cd8", "score": "0.60338706", "text": "def set_progress(self, progress):", "title": "" }, { "docid": "12a8caca10047d3abaa1ec31e21a0405", "score": "0.60295206", "text": "def progress(self, progress):\n self._send(\"PROGRESS {progress}\".format(progress=int(progress)))", "title": "" }, { "docid": "5a1da474f96a36f98260a0f1bb03207f", "score": "0.59963804", "text": "def _advance_progress_bar(self):\n curr_val = self._pair_idx\n max_val = self._progress_bar.maximum()\n self._progress_bar.setValue(curr_val + (max_val - curr_val) / 100)", "title": "" }, { "docid": "6970d89b8f470d49894df98b03d2014e", "score": "0.59792674", "text": "def find_next_task(self):\n raise NotImplementedError", "title": "" }, { "docid": "40fcfb815516e5f251ffe5ca5987a114", "score": "0.59696347", "text": "def _on_progress(self, num):\n\t\tself._num_progresses += num\n\t\tself._log.debug(\"progress incrementing by {}\".format(num))\n\t\tself._host_comms.send_msg(\"progress\", num)", "title": "" }, { "docid": "42c8504d906a2a9a77d8d823f1fcb9e6", "score": "0.5962047", "text": "def next_task(self, timeout=None):\n raise NotImplementedError()", "title": "" }, { "docid": "1965a5429999496268b4a5464cf09e2e", "score": "0.591434", "text": "def _update_next_n(self, step):\n self._next_n += step", "title": "" }, { "docid": "a807d26ec3d2e70cf428c1bbf49c20e3", "score": "0.5903467", "text": "def _update_next_batch(self):\n pass", "title": "" }, { "docid": "db5736c973f9fb5a9f5ecec5eb0e79e8", "score": "0.58929515", "text": "def next_starter(self):\n self.start += 1", "title": "" }, { "docid": "e87b4ad78a3fb1f1c9715c128b66855a", "score": "0.58881956", "text": "def on_progress(self,step,pct):\n pass", "title": "" }, { "docid": "b31bacb2ff07a6f81177e3093ddca10e", "score": "0.58642966", "text": "def step(self, item=None):\n\n if self.is_cancelled():\n raise CancelledError\n if not self.is_busy():\n self.set_value(self.count * 100 / self.total)\n self.set_message(self.message_factory(item))\n self.last_step_time = time.time()\n self.count += 1", "title": "" }, { "docid": "824638d962ac8d091cb5159ff51e896b", "score": "0.5857854", "text": "def step(self):\n self._step()", "title": "" }, { "docid": "33b2a94ba610b877dc645fde20aa5fe1", "score": "0.58394206", "text": "def step(self):\n if self.step_index == (len(self._data) - 1):\n self.step_index = 0 #cycle back\n else:\n self.step_index += 1", "title": "" }, { "docid": "e2e818119b1f52fadf6ddec09c32075c", "score": "0.58355355", "text": "def request_next_task(self):\n\n return self.api.request_next_task()", "title": "" }, { "docid": "c1f27b1200a9f684f74220b6385bb0ad", "score": "0.5812973", "text": "async def _consume(self):\n for _ in range(1, self.pagenums + 1):\n batch_e = await self.batch.get()\n await batch_e.task\n self.batch.task_done()", "title": "" }, { "docid": "d9f4c9612e90593f71103b62e6ffd6b2", "score": "0.5810154", "text": "def task_inc(self):\n self._item_counter.increment()", "title": "" }, { "docid": "38af3a93fc7064277339a0f141c57eb1", "score": "0.58059657", "text": "def progress(current, total):\n update('[%d/%d] ' % (current, total))", "title": "" }, { "docid": "a81a6921dc952b640a178c0a06e312c4", "score": "0.57933533", "text": "def join(self):\n self.progress.print_update()\n with self.all_tasks_done:\n while self.unfinished_tasks:\n self.progress.print_update()\n # Use a short wait timeout so updates are printed in a timely manner.\n # TODO(maruel): Find a way so Progress.queue and self.all_tasks_done\n # share the same underlying event so no polling is necessary.\n self.all_tasks_done.wait(0.1)\n self.progress.print_update()", "title": "" }, { "docid": "e835676b167c4ee769886bcfffc1b2b2", "score": "0.5782976", "text": "def progresslabel(self):\r\n global stop_thread\r\n stop_thread = \"1\"\r\n while stop_thread == \"1\":\r\n time.sleep(0.01)\r\n self.pb.task = self.scraper.task\r\n self.pb.value = self.scraper.value\r\n self.pb.max = self.scraper.max", "title": "" }, { "docid": "50e45d9749c86322b56f4958b470126e", "score": "0.5782428", "text": "def update_progress():\r\n if self.is_set():\r\n timer.cancel()\r\n return\r\n self.set_progress(self._progress_percent + one_progress)", "title": "" }, { "docid": "fc4ef73bc173ce211c933366ac276e82", "score": "0.57822496", "text": "def next(self, task, sender):\n next_recipient = self._get_next_recipient(task)\n\n self.log(\"%s -> %s\" % (self.index, next_recipient))\n return next_recipient", "title": "" }, { "docid": "d702b9a181dc71a21abc7bf83d54c9b6", "score": "0.57800746", "text": "def start(self):\n self.state = 'progress'", "title": "" }, { "docid": "defe63af1cc8c9b7bd7abc4f6baad0ab", "score": "0.5778478", "text": "def progress(self, steps_completed=-1):\n if steps_completed < 0:\n self.completed += 1\n steps_completed = self.completed\n if steps_completed % ceil(self.total_steps / self.resolution) == 0:\n self.progress_visualization(steps_completed)\n if steps_completed == self.total_steps:\n self.finish()", "title": "" }, { "docid": "57647fca72854bdcf8cc8cb45034aca1", "score": "0.57767886", "text": "def _progress(self):\n\n if self._verbosity is Verbosity.ALL:\n text = '[Progress: {0:3.2f}% {1}/{2}'.format(\n self._finished_tasks / self._total_tasks * 100.0,\n self._finished_tasks, self._total_tasks)\n # optionally add the estimated time to complete\n if self._timer:\n run_time = time.time() - self._start_time\n exec_rate = (self._successful_tasks + self._failed_tasks) / run_time\n if exec_rate == 0.0:\n text += ' INFINITY'\n else:\n est_time = (self._total_tasks - self._finished_tasks) / exec_rate\n text += ' {0}'.format(_time_string(est_time))\n text += ']'\n # log\n if self._log:\n print(text, file=self._log)\n # print\n if USE_TERM_COLOR:\n text = colored(text, 'magenta')\n print(text)", "title": "" }, { "docid": "4e3cfd193ca2c47cc42f7887289e442d", "score": "0.5774149", "text": "def progress(self):\n return tqdm(self, total=len(self))", "title": "" }, { "docid": "4e3cfd193ca2c47cc42f7887289e442d", "score": "0.5774149", "text": "def progress(self):\n return tqdm(self, total=len(self))", "title": "" }, { "docid": "95b7d6cfbcbe338b922e7175542cede3", "score": "0.5769303", "text": "def step(self):\n self.steps += 1\n self._update()\n self._observe()", "title": "" }, { "docid": "3918278e63fb07b8802df66ab2a514e6", "score": "0.576793", "text": "def set_current_task(self, index):\r\n self.task_manager.set_current_task(index)", "title": "" }, { "docid": "c837fcaf74f8c56553f76d762a4bfe99", "score": "0.5765392", "text": "def test_progress():\n from time import sleep\n\n for index, item in ShowingProgress(range(100), seconds=4):\n sleep(0.237)", "title": "" }, { "docid": "a7ef0631e714a1199427411c0b789730", "score": "0.5761473", "text": "def increment(self) -> None:\n self.step += 1", "title": "" }, { "docid": "ff835180bd4bf85803539f568fc8a8ad", "score": "0.57600147", "text": "def next(self):\n #increments current or sets to 0 if == to number of\n #songs in playlist\n self.current = (self.current + 1) % len(self.playlist)\n self.play(self.current)", "title": "" }, { "docid": "a436daa91d76faab5908098282d18ac3", "score": "0.5757235", "text": "def step(self):\n self.ep.kStep += 1\n self.exchange_data()", "title": "" }, { "docid": "a35ea35f32aa70c72079b53f697f13be", "score": "0.5752344", "text": "def __enter__(self) -> \"Progress\":\r\n print(\"\\n\")\r\n self._progress.start()\r\n return self", "title": "" }, { "docid": "8f8fa9b9cb76219ec32262177ede9afa", "score": "0.5749334", "text": "def progress(self, index, reward):\n self._progress.value = index\n self._label.value = (\n \"{name}: {size}/{index}, Best reward: {reward}\".format(\n name=self.name,\n size=self.size,\n index=index,\n reward=reward\n )\n )", "title": "" }, { "docid": "ccbd5e3ff3a3342a632ffcd8a56933a2", "score": "0.574552", "text": "def advance(self):\n\t\tself._set_next_marker()", "title": "" }, { "docid": "80e38732552335f55143be87cbb6bc02", "score": "0.57330096", "text": "def step(self):\n while not self.done:\n self.action = self.pick_action()\n self.conduct_action(self.action)\n if self.time_for_q_network_to_learn():\n for _ in range(self.hyperparameters[\"learning_iterations\"]):\n self.learn()\n self.save_experience()\n self.state = self.next_state #this is to set the state for the next iteration\n self.global_step_number += 1\n self.episode_number += 1", "title": "" }, { "docid": "6c3451febd6b786f0f00908fab42164f", "score": "0.5718386", "text": "def _advance(self):\n self._current += 1", "title": "" }, { "docid": "ea53537fb77757c8bcc2930268af4c17", "score": "0.5702396", "text": "def finish_current_task(self):\n # for instatiating the product\n if self.previous_task == None:\n self.next_task = self.production_system.tasks_for_product[self.product_type][0]\n #return self.next_task\n else:\n number_of_tasks = len(self.production_system.tasks_for_product[self.product_type])\n for i in range(0,number_of_tasks):\n # when previous_task is task at i-th step, set i+1-th task as next_task\n if self.previous_task == self.production_system.tasks_for_product[self.product_type][i]:\n #if previous_task was last task, set product as finished, next_task as None\n if i == number_of_tasks - 1:\n self.next_task = None\n self.finished = True\n #return self.next_task\n else:\n self.next_task = self.production_system.tasks_for_product[self.product_type][i+1]\n #return self.next_task", "title": "" }, { "docid": "d19d4faee240aba129912787c8dfd140", "score": "0.56953317", "text": "def end_progress(self):", "title": "" }, { "docid": "d19d4faee240aba129912787c8dfd140", "score": "0.56953317", "text": "def end_progress(self):", "title": "" }, { "docid": "d35a50200432521c5445cda2b8b840c7", "score": "0.56906754", "text": "def _advance(self):\r\n self._current += 1", "title": "" }, { "docid": "eb409e5e7aea74073fd56cb09b43c04a", "score": "0.56787276", "text": "def in_parallel_by(self, tasks, amount):\n for index, task in enumerate(tasks):\n if index % amount == 0:\n self._priority.next()\n task['priority'] = self._priority.current", "title": "" }, { "docid": "300d7c277b7900f436c05dc9d150f401", "score": "0.56774676", "text": "def _advance(self):\n\t\tself.current += 1", "title": "" }, { "docid": "56ebca6928c22dc0af6c0dc1c8df0e4c", "score": "0.5670732", "text": "def next(self):\n print('Next')\n skip_track(self.url)", "title": "" }, { "docid": "a27182cd870e9f544bae7be2fcd7d9d6", "score": "0.56697786", "text": "def _advance(self):\n\t\tself._current += 1", "title": "" }, { "docid": "4432e36f225442e28aa9e767af23c5a0", "score": "0.5669733", "text": "def increment_progress(self, increment_by):\n @synchronized('progress_'.join(self._inst_uuid), 'nova-prog-')\n def _inc_progress():\n global __INST_PROGRESS__\n if not self._inst_uuid in __INST_PROGRESS__:\n LOG.error(_(\"The progress updater has not started\"\n \" for virtual machine %s.\") % self._inst_uuid)\n raise pvcex.IBMPowerVMProgressUpdateError(uuid=self._inst_uuid)\n # make sure it's a positive numeric value\n if increment_by > 0 and isinstance(increment_by, numbers.Number):\n progress = round(increment_by)\n progress += __INST_PROGRESS__[self._inst_uuid]\n LOG.debug(\"current progress %s\" %\n __INST_PROGRESS__[self._inst_uuid])\n if progress > 100:\n progress = round(100)\n LOG.debug(\"deploy_progress_update %s\" % progress)\n conductor.API().instance_update(self._context,\n self._inst_uuid,\n progress=progress)\n __INST_PROGRESS__[self._inst_uuid] = progress\n\n _inc_progress()", "title": "" }, { "docid": "ec10e0c167df9da2b41681be308f5d12", "score": "0.5668581", "text": "def UpdateProgress(self, item):\r\n if item.state == STATE_GOT:\r\n count = self.result_db.StoreEntities(item.download_result.keys,\r\n item.download_result.entities)\r\n self.db.DeleteKey(item.progress_key)\r\n self.entities_transferred += count\r\n else:\r\n self.db.UpdateState(item.progress_key, item.state)", "title": "" }, { "docid": "b68aa3fe7e20309ae320959b5fab3954", "score": "0.56646913", "text": "def act(self):\n self.progress = ActionProgress(ActionState.AWAITING_PROMT, [])\n yield self._step_progress()\n self.await_promt()\n yield self._step_progress()\n self._send_command(self.action.command)\n self.await_promt()\n yield self._step_progress()\n # TODO Make failure handling to \n result = ActionResult(ActionResultState.SUCCESS, 0, self.progress.raw_output, [])\n yield (self.action, self.progress, result)", "title": "" }, { "docid": "08b029e79a727bbbf6202fa5961b126a", "score": "0.56637853", "text": "def advance(self):\n self.index+=1\n if self.index>=len(self.file_content): self.index = None\n else:\n self.time = self.file_content[self.index][0]", "title": "" }, { "docid": "3fb66427a7d061a644758eb91c8066bc", "score": "0.56625485", "text": "def NextItemStep(self, buffer=None):\r\n\r\n step = (len(buffer)-1) / 100 * config.main['client']['step']\r\n self.NextItem(step=step)", "title": "" }, { "docid": "1276ac93d5f05f1b6ad7f2b8a765af33", "score": "0.56496054", "text": "def next_step(self, index, title):\n self.display(index, colorama.Fore.CYAN)\n self.display(title, colorama.Fore.CYAN, True)", "title": "" }, { "docid": "1c92eb03d5fc46107c6d348d63275fb0", "score": "0.56494606", "text": "def to_trial(self):\n self.next_block()", "title": "" }, { "docid": "8ec72bfc1d4a360f3f5d3b931785a8d7", "score": "0.5643822", "text": "def start_next_step(self):\n self.current_leader_step = self.current_figure.leader_steps[self.step_index]\n self.leader.set_free_foot(self.current_leader_step.foot)\n self.current_follower_step = self.current_figure.follower_steps[self.step_index]\n self.follower.set_free_foot(self.current_follower_step.foot)\n\n self.time_at_next_step = self.current_time + self.current_leader_step.duration\n\n for foot in range(Foot.BOTH):\n self.leader.pivot(foot, self.current_leader_step.pre_step_pivot)\n leader_update_vector = self.current_leader_step.get_update_vector(foot, self.leader)\n self.leader.set_delta_pos(foot, leader_update_vector)\n for foot in range(Foot.BOTH):\n self.follower.pivot(foot, self.current_follower_step.pre_step_pivot)\n follower_update_vector = self.current_follower_step.get_update_vector(foot, self.follower)\n self.follower.set_delta_pos(foot, follower_update_vector)", "title": "" }, { "docid": "7741f202df9955951d34c147627c0922", "score": "0.56432223", "text": "def _progress(self, progress, message):\n self.progress = progress\n self.print_progress(message=message)", "title": "" }, { "docid": "47bd5454e9d3f03bacfffe9bf7b042a9", "score": "0.5641232", "text": "def next(self):\n if self._current < len(self._data) - 1:\n self._current += 1\n self.update()", "title": "" }, { "docid": "cfbfc1cb0fdce09d2d03b8abdbb741b0", "score": "0.5630476", "text": "def proceed_to_next_player_turn(self):\n while True:\n player = self.get_player()\n player_i = self.things.index(player)\n for thing in self.things[player_i+1:]:\n thing.proceed()\n self.turn += 1\n for thing in self.things[:player_i]:\n thing.proceed()\n player.proceed(is_AI=False)\n if player.task is None:\n break", "title": "" }, { "docid": "b045498fd3645d02cef1d73a0cf5eea6", "score": "0.56234753", "text": "def advance(self):\n self.command = self.flines[self.index].split()\n self.index = self.index + 1", "title": "" }, { "docid": "521134f7f3ce54bea65bf4e6930410c9", "score": "0.5604975", "text": "def on_next(self):\n self.notify(self.actions.next_value())", "title": "" }, { "docid": "54e5fb76af1976bbafb4b7ca09bf49f7", "score": "0.560117", "text": "def set_current_task(self, index):\r\n self.current_task_index = int(index)", "title": "" }, { "docid": "639911238e0fdbac7f60f9f6260bc490", "score": "0.5598425", "text": "def start_progress(self, header=None):", "title": "" }, { "docid": "639911238e0fdbac7f60f9f6260bc490", "score": "0.5598425", "text": "def start_progress(self, header=None):", "title": "" }, { "docid": "c8a8555c19959a95395f293ff3767123", "score": "0.5590748", "text": "def ProgressBegin(self):\r\n pass", "title": "" }, { "docid": "7c0ba54644150fe58adf2e8cc74a06c6", "score": "0.55907106", "text": "async def work(self, index):\n logging.warning(\"Worker[%s] start\", index)\n\n headers = {\"User-Agent\": make_random_useragent(), \"Accept-Encoding\": \"gzip\"}\n session = aiohttp.ClientSession(loop=self.loop, headers=headers)\n try:\n while True:\n # get a task\n priority, url, keys, deep, repeat = await self.get_a_task(task_name=TPEnum.URL_FETCH)\n\n # fetch the content of a url ================================================================\n fetch_result, content = await self.fetch(session, url, keys, repeat)\n if fetch_result > 0:\n self.update_number_dict(TPEnum.URL_FETCH, +1) # =======================\n\n # parse the content of a url ============================================================\n self.update_number_dict(TPEnum.HTM_NOT_PARSE, +1)\n parse_result, url_list, save_list = await self.parse(priority, url, keys, deep, content)\n self.update_number_dict(TPEnum.HTM_NOT_PARSE, -1)\n\n if parse_result > 0:\n self.update_number_dict(TPEnum.HTM_PARSE, +1) # =======================\n\n # add new task to self.queue\n for _url, _keys, _priority in url_list:\n self.add_a_task(TPEnum.URL_FETCH, (_priority, _url, _keys, deep+1, 0))\n\n # save the item of a url ============================================================\n for item in save_list:\n self.update_number_dict(TPEnum.ITEM_NOT_SAVE, +1)\n save_result = await self.save(url, keys, item)\n self.update_number_dict(TPEnum.ITEM_NOT_SAVE, -1)\n\n if save_result:\n self.update_number_dict(TPEnum.ITEM_SAVE, +1) # =======================\n elif fetch_result == 0:\n self.add_a_task(TPEnum.URL_FETCH, (priority+1, url, keys, deep, repeat+1))\n else:\n pass\n\n # finish a task\n self.finish_a_task(task_name=TPEnum.URL_FETCH)\n\n # print the information of this pool\n if self.number_dict[TPEnum.URL_FETCH] % 100 == 0:\n self.print_status()\n except asyncio.CancelledError:\n pass\n\n session.close()\n logging.warning(\"Worker[%s] end\", index)\n return", "title": "" }, { "docid": "abdfd9c15a2db9ad4f8145c67e02544f", "score": "0.5586679", "text": "def setProgress(self, progress):\n self.progress = progress", "title": "" }, { "docid": "79d17fcba3c543f7294ccf4a7428351e", "score": "0.5585646", "text": "def progress_game(self):\n for _ in range(self.rounds):\n self.progress_round()", "title": "" }, { "docid": "7c8802e933fcf4069a1a7a133b44fd7a", "score": "0.55756074", "text": "def progress(iter, prog, final):\n arg = np.floor(float(iter) / float(final) * 10.0)\n\n if arg > prog:\n\n print(\"Done \" + str(arg * 10) + \" %\")\n prog = arg\n\n return prog", "title": "" }, { "docid": "4cd4b393391d8bf9a19dfef9e7ab9fb6", "score": "0.55744714", "text": "def next_timestep(self):\r\n self.currentdate += 1\r\n if self.currentdate >= self.notradingdays-1: # index number vs. count\r\n self.done = True\r\n return self.done", "title": "" }, { "docid": "f8ee9db5483dc9945484c0f96dfc6245", "score": "0.5574094", "text": "def PerformWork(self):\r\n while not self.exit_flag:\r\n try:\r\n item = self.progress_queue.get(block=True, timeout=1.0)\r\n except Queue.Empty:\r\n continue\r\n if item == _THREAD_SHOULD_EXIT:\r\n break\r\n\r\n if item.state == STATE_READ and item.progress_key is None:\r\n item.progress_key = self.db.StoreKeys(item.kind,\r\n item.key_start,\r\n item.key_end)\r\n else:\r\n assert item.progress_key is not None\r\n self.UpdateProgress(item)\r\n\r\n item.progress_event.set()\r\n\r\n self.progress_queue.task_done()\r\n\r\n self.db.ThreadComplete()", "title": "" }, { "docid": "eb7c6fb2c38471e585c15032a4e5b77a", "score": "0.5566322", "text": "def run_to_completion(self):\n assert self._started\n for call in self._clock.getDelayedCalls():\n amount = max(0, call.getTime() - self._clock.seconds())\n self._clock.advance(amount)", "title": "" }, { "docid": "e556d976f6106e13200b6d5be7f3b19f", "score": "0.5558604", "text": "def next_iteration(self):", "title": "" }, { "docid": "a28f84e86f92b00df18e3f1825d634aa", "score": "0.5558007", "text": "def setProgress(self, value):\n pass", "title": "" }, { "docid": "d6db452f870f40e8c64ff7b87cfa03c0", "score": "0.5553805", "text": "def on_progress(self, progress):\n self.progressBarSet(progress, processEvents=None)", "title": "" }, { "docid": "f2bc9a4eee571fb942c644b4d3ba3ae6", "score": "0.55531883", "text": "def nextIteration(self):\n\n # get id for url\n self.newCallback(\n self.supervisor.urlRepository.generateUrlId(),\n deferredaction.DeferredUrlId())", "title": "" }, { "docid": "dfba81a660ad616d1bb9a94f5e41dbf0", "score": "0.5547851", "text": "def in_progress(self, in_progress):\n\n self._in_progress = in_progress", "title": "" }, { "docid": "a77d4dbadb39e8003a25602b4d9f1beb", "score": "0.5540619", "text": "def callback_sent(self):\n # progress bar update\n self._pbar_lock.acquire()\n var = self.variables['progressbar']\n var.set(var.get() + 1)\n self._pbar_lock.release()", "title": "" }, { "docid": "d1fb46c661078b6d538187d760148e7b", "score": "0.5534875", "text": "def UpdateProgress(self, item):\r\n if item.state == STATE_GOT:\r\n self.entities_transferred += item.count\r\n self.db.DeleteKey(item.progress_key)\r\n else:\r\n self.db.UpdateState(item.progress_key, item.state)", "title": "" }, { "docid": "bd3acb9d238b0dbfb50819685af20671", "score": "0.5534618", "text": "def nextTrack(self):\n self._trackChange(TRACK_NEXT)", "title": "" }, { "docid": "b752ededcdcf2503acd95e1716614c0e", "score": "0.5527086", "text": "def next(self):\n if self.current_task_idx > len(self.tasks) - 1:\n raise StopIteration\n else:\n self.current_task_idx += 1\n return self.tasks[self.current_task_idx - 1]", "title": "" }, { "docid": "a93ade37893234537b0ac40a30b3bbba", "score": "0.551957", "text": "def next(self):\n self._count += 1\n if self._count >= self._maxCount:\n self._count -= 1\n raise MutatorCompleted()", "title": "" }, { "docid": "a93ade37893234537b0ac40a30b3bbba", "score": "0.551957", "text": "def next(self):\n self._count += 1\n if self._count >= self._maxCount:\n self._count -= 1\n raise MutatorCompleted()", "title": "" }, { "docid": "a93ade37893234537b0ac40a30b3bbba", "score": "0.551957", "text": "def next(self):\n self._count += 1\n if self._count >= self._maxCount:\n self._count -= 1\n raise MutatorCompleted()", "title": "" }, { "docid": "a93ade37893234537b0ac40a30b3bbba", "score": "0.551957", "text": "def next(self):\n self._count += 1\n if self._count >= self._maxCount:\n self._count -= 1\n raise MutatorCompleted()", "title": "" }, { "docid": "a93ade37893234537b0ac40a30b3bbba", "score": "0.551957", "text": "def next(self):\n self._count += 1\n if self._count >= self._maxCount:\n self._count -= 1\n raise MutatorCompleted()", "title": "" }, { "docid": "882824c7b75fa36eea37986393ec1472", "score": "0.551668", "text": "def do_next_backup(self):\n self.logger.info(\"starting next backup action...\")\n try:\n rclone.backup(self.next_backup_args)\n status = backup_stats.ACTION_STATUS.DONE\n backup_history.log_backup_action(self.next_backup_args)\n except Exception as err:\n self.logger.error(\"backup action failed!\", err)\n status = backup_stats.ACTION_STATUS.FAIL\n finally:\n backup_stats.update_stats(\n self.next_backup_args,\n status\n )\n backup_plan.remove_action(self.next_backup_args)", "title": "" }, { "docid": "abb56ef3723ad9644858f66aea304163", "score": "0.5515874", "text": "def progress(self, percent, message=''):", "title": "" } ]