query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Categorical/softmax crossentropy loss with masking
def masked_categorical_crossentropy(y_true, y_pred): mask = y_true[:, -1] # y_true = y_true[:, :-1] loss = K.categorical_crossentropy(target=y_true, output=y_pred, from_logits=True) mask = K.cast(mask, dtype=np.float32) loss *= mask return K.mean(loss, axis=-1)
[ "def masked_softmax_cross_entropy(preds, labels, mask):\r\n loss = -tf.reduce_sum(labels*tf.log(tf.nn.softmax(preds)+1e-7), axis=1)\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n mask /= tf.reduce_mean(mask)\r\n loss *= mask\r\n return tf.reduce_mean(loss)", "def _cross_entropy_loss(self, y_true_clf, y_pred_clf, training_mask):\n return torch.nn.functional.binary_cross_entropy(y_pred_clf*training_mask, (y_true_clf*training_mask))", "def loss(y_true, y_pred):\n return categorical_crossentropy(y_true=y_true, y_pred=y_pred)", "def categorical_cross_entropy(pred, expect):\n\n return -np.sum( np.log( (pred*expect)[ (pred*expect) > 0]) ) / pred.shape[0]", "def onehot_cross_entropy(self, input, target):\n logsoftmax = nn.LogSoftmax() # :math:`f_i(x) = log(exp(x_i) / sum_j exp(x_j) )`\n return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))", "def logit_binary_cross_entropy_loss(x,\n y):\n return optax.sigmoid_binary_cross_entropy(x, y).mean()", "def test_cross_entropy():\n net = CategoricalCrossEntropy()\n probs_b = Tensor([0.3, 0.1, 0.6], dtype=dtype.float32)\n probs_a = Tensor([0.7, 0.2, 0.1], dtype=dtype.float32)\n ans = net(probs_b, probs_a)\n assert isinstance(ans, Tensor)", "def test_cross_entropy_one_hot():\n y_pred = torch.tensor([[0.2, 0.8], [0.9, 0.1]])\n y_true = torch.tensor([[1, 0], [1, 0]])\n\n loss = MultiLabelCrossEntropy()\n assert abs(loss(y_pred, y_true).item() - 0.70429) < 1e-2", "def NLLLoss_mask(pred, target, mask):\n\n pred = torch.gather(pred, 1, target.view(-1, 1))\n cross_entropy = - pred.squeeze(1)\n loss = cross_entropy.masked_select(mask).sum()\n return loss", "def ctc_loss(inputs, padding_mask=-1, **kwargs):\n inputs[0] = activation_ops.softmax(inputs[0], axis=2)\n if context.executing_eagerly():\n raise NotImplementedError\n return OpLib.add('CTCLoss', inputs, padding_mask=padding_mask, **kwargs)", "def binary_cross_entropy_loss(x, y):\n return -jnp.mean(y * jnp.log(jnp.clip(x, a_min=utils.EPS)) +\n (1 - y) * jnp.log(jnp.clip(1 - x, a_min=utils.EPS)))", "def softmax_cross_entropy_loss(self):\n self._log_accuracy()\n loss_cls = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.proposals.data.get_field('gt_classes'),\n logits=self.pred_class_logits\n )\n )\n loss_cls = tf.cond(\n tf.shape(self.pred_class_logits)[0] > 0, lambda: loss_cls, lambda: 0.\n )\n return loss_cls", "def binary_classification_loss(concat_true, concat_pred):\n t_true = concat_true[:, 1]\n t_pred = concat_pred[:, 2]\n t_pred = (t_pred + 0.001) / 1.002\n losst = tf.reduce_sum(K.binary_crossentropy(t_true, t_pred))\n\n return losst", "def binary_cross_entropy(\n pred: Tensor, label: Tensor, with_logits: bool = True, reduction: str = \"mean\",\n) -> Tensor:\n if not with_logits:\n return -(label * log(pred) + (1 - label) * log(1 - pred))\n # logsigmoid(pred) and logsigmoid(-pred) has common sub-expression\n # hopefully the backend would optimize this\n return -(label * logsigmoid(pred) + (1 - label) * logsigmoid(-pred))", "def binary_cross_entropy(y, t_data):\r\n x = np.maximum(y, 10**-15)\r\n x = np.minimum(x, 1 - 10**-15)\r\n return -np.sum(t_data * np.log(x) + (1 - t_data) * np.log(1 - x))", "def mask_rcnn_loss(mask_outputs,\n mask_targets,\n class_targets):\n batch_size, num_masks, mask_height, mask_width = mask_outputs.shape\n weights = jnp.tile(\n (class_targets > 0).reshape([batch_size, num_masks, 1, 1]),\n [1, 1, mask_height, mask_width])\n\n mask_loss = sigmoid_cross_entropy(\n mask_outputs, mask_targets, weights=weights)\n\n return mask_loss", "def classification_loss(self, classifier, pos_box_ind, neg_box_ind):\n \n # Gather up the classifier values at the negative and \n # positive indexes:\n with tf.variable_scope(\"rpn_cls_loss\"):\n pos_class = tf.gather(classifier, pos_box_ind)\n\n pos_class = tf.cond(tf.rank(pos_class) > 2,\n true_fn = lambda: tf.squeeze(pos_class, axis=1),\n false_fn = lambda: pos_class)\n\n neg_class = tf.gather(classifier, neg_box_ind)\n\n neg_class = tf.cond(tf.rank(neg_class) > 2,\n true_fn = lambda: tf.squeeze(neg_class, axis=1),\n false_fn = lambda: neg_class)\n\n # Set up the \"true\" answers:\n pos_true = tf.zeros(tf.shape(pos_class)) + (0,1,)\n neg_true = tf.zeros(tf.shape(neg_class)) + (1,0,)\n\n # Now, collect pos and negative into one:\n true_labels = tf.concat((pos_true, neg_true), axis=0)\n class_labels = tf.concat((pos_class, neg_class), axis=0)\n\n # Finally, convert this into cross entropy loss:\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=true_labels,\n logits=class_labels))\n\n return cross_entropy", "def loss_func(y_true, y_pred):\n\n return tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1, reduction=\"none\")(y_true, y_pred)", "def binxent(output, target):\n return T.nnet.binary_crossentropy(output, target).sum(axis=1).mean()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the packet `size`.
def test_size(): assert Packet2.size == 6
[ "def test_size():\n assert Packet12.size == 1", "def check_size(msg):\n\n if len(msg) > TWEET_SIZE:\n return False\n return True", "def __check_size__(self, size):\n # size must be an integer, otherwise raise a TypeError exception\n if type(size) != int:\n raise TypeError(\"size must be an integer\")\n # if size is less than 0, raise a ValueError\n if size < 0:\n raise ValueError(\"size must be >= 0\")", "def verify_layer_size(layer_size, l_num):\r\n\r\n if (isinstance(layer_size, int) and layer_size > 0):\r\n return layer_size\r\n else:\r\n raise ValueError(\"'\" + str(layer_size) + \"' (layer \" + str(l_num + 1) + \")\")", "def assert_sample_queue_size(self, stream_name, size):\n if(not self.data_subscribers.samples_received.has_key(stream_name) and size == 0):\n return\n\n self.assertTrue(self.data_subscribers.samples_received.has_key(stream_name), msg=\"Sample queue does not exists\")\n self.assertEqual(len(self.data_subscribers.samples_received.get(stream_name)), size)", "def test_resolution(self, size: (float, float)) -> (bool, (float, float)):\n ret1 = self._stream.set(CAP_PROP_FRAME_WIDTH, size[0])\n ret2 = self._stream.set(CAP_PROP_FRAME_HEIGHT, size[1])\n ret, frame = self._stream.read()\n y, x = frame.shape[0], frame.shape[1]\n if ret1 and ret2 and size[0] == x and size[1] == y:\n return True, size\n else:\n return False, (self._stream.get(CAP_PROP_FRAME_WIDTH), self._stream.get(CAP_PROP_FRAME_HEIGHT))", "def check_resize(extract):\n if not ethosn_available():\n return False\n\n return _ethosn.resize(extract)", "def test_getTileSize(self):\n assert_equal(self.testTile.getTileSize(), 50)", "def test_isc_utils_size_spec_passing(self):\n test_data = [\n 'unlimited',\n 'default',\n '1',\n '0',\n '100',\n '1K',\n '2k',\n '3M',\n '4m',\n '5G',\n '6g',\n ]\n result = size_spec.runTests(test_data, failureTests=False)\n self.assertTrue(result[0])", "def check_file_size(size, file_name):\n infile = open(file_name, 'at')\n string = \"if(fileSize < \" + size + \"){fclose(infile);return 0;}\"\n infile.write(string)\n infile.close()", "def is_length(message):\n\n if len(message) <= 25:\n return True\n else:\n return False", "def check_validitiy(packet):\n\n magicNumber = packet[0] << 8 | packet[1]\n packetType = packet[2] << 8 | packet[3]\n requestType = packet[4] << 8 | packet[5]\n length = len(packet)\n \n validPack = True\n \n if length != 6:\n print(\"The request packet must be 6 bytes long\")\n validPack = False\n elif magicNumber != 18814:\n print(\"The MagicNo must be 0x497E\")\n validPack = False\n elif packetType != 1:\n print(\"The PacketType must be 0x0001\")\n validPack = False\n elif requestType < 0 or requestType > 2:\n print(\"The RequestType must be 0x0001 or 0x0002\")\n validPack = False\n \n return validPack", "def verify_packet(self, packet, context):\n pass", "def test_subset_check_size(\n self, prepare_data_file):\n\n data_frame = phout.parse_phout(prepare_data_file)\n subset_data_frame = phout.subset(data_frame, 0, 5)\n\n assert subset_data_frame.shape[0] == 5, \\\n \"unexpected dataframe size value\"\n\n assert subset_data_frame['latency'].iloc[\n 0] == 5785, \"unexpected the first element value\"\n assert subset_data_frame['latency'].iloc[\n -1] == 5740, \"unexpected the last element value\"\n\n data_frame = phout.parse_phout(prepare_data_file)\n subset_data_frame = phout.subset(data_frame, 5, 10)\n\n assert subset_data_frame.shape[0] == 5, \\\n \"unexpected dataframe size value\"\n\n assert subset_data_frame['latency'].iloc[\n 0] == 4555, \"unexpected the first element value\"\n assert subset_data_frame['latency'].iloc[\n -1] == 4750, \"unexpected the last element value\"", "def assert_fraction_is_big_enough(fraction: float, size: int, verbose: bool) -> bool:\n calculation_inaccuracy = 10e-5\n min_frac = 1 / size\n y = min(fraction, 1 - fraction)\n if y + calculation_inaccuracy < min_frac:\n if verbose:\n print(f\"Warn: Split-fraction {fraction} is to small, it should be >= {min_frac}.\")\n return False\n return True", "def _check(self):\n self.assertSizes(\"_char\")\n self.assertSizes(\"_uint\")\n self.assertSizes(\"_ulong\")\n self.assertSizes(\"_double\")\n self.assertSizes(\"_longdouble\")\n self.assertSizes(\"_float\")", "def _validate_size(self, object_size):\n def make_error(name, value):\n ver = (\"?versionId=\"+self._version_id) if self._version_id else \"\"\n return ValueError(\n f\"Source {self._bucket_name}/{self._object_name}{ver}: \"\n f\"{name} {value} is beyond object size {object_size}\"\n )\n\n if self._offset is not None and self._offset >= object_size:\n raise make_error(\"offset\", self._offset)\n if self._length is not None:\n if self._length > object_size:\n raise make_error(\"length\", self._length)\n offset = self._offset or 0\n if offset+self.length > object_size:\n raise make_error(\"compose size\", offset+self._length)", "def test_enum_size(self) -> None:\n assert_that(len(CardTransactionResult), is_(5))", "def valid(packet):\n if is_udp(packet) and is_without_data(packet):\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Constructs a `PythonLogLevel` field.
def __init__(self, description=None): # type: (Optional[six.text_type]) -> None super(PythonLogLevel, self).__init__( logging.getLevelName(logging.DEBUG), logging.getLevelName(logging.INFO), logging.getLevelName(logging.WARNING), logging.getLevelName(logging.ERROR), logging.getLevelName(logging.CRITICAL), description=description, )
[ "def as_python_level(self) -> int:\n to_python_level = {\n LogLevel.CRITICAL: logging.CRITICAL,\n LogLevel.ERROR: logging.ERROR,\n LogLevel.WARNING: logging.WARNING,\n LogLevel.INFO: logging.INFO,\n LogLevel.DEBUG: logging.DEBUG}\n return to_python_level[self]", "def getLogLevel(self) -> \"int\":\n return _coin.ScXMLStateMachine_getLogLevel(self)", "def to_logging_level(rg_log_level):\n if rg_log_level == LogLevel.Debug:\n return logging.DEBUG\n elif rg_log_level == LogLevel.Verbose:\n return logging.INFO\n elif rg_log_level == LogLevel.Notice:\n return logging.WARNING\n elif rg_log_level == LogLevel.Warning:\n return logging.ERROR # Maybe a little un-intuitive ;)\n else:\n return logging.INFO", "def setLogLevel(self, *vals):\n return self.config.loglevel(vals)", "def log_level(self):\n return self._log_level if self._log_level is not None else debug.LOG_LEVEL", "def __init__(self, org_apache_sling_commons_log_level: ConfigNodePropertyDropDown=None, org_apache_sling_commons_log_file: ConfigNodePropertyString=None, org_apache_sling_commons_log_pattern: ConfigNodePropertyString=None, org_apache_sling_commons_log_names: ConfigNodePropertyArray=None, org_apache_sling_commons_log_additiv: ConfigNodePropertyBoolean=None): # noqa: E501\n self.openapi_types = {\n 'org_apache_sling_commons_log_level': ConfigNodePropertyDropDown,\n 'org_apache_sling_commons_log_file': ConfigNodePropertyString,\n 'org_apache_sling_commons_log_pattern': ConfigNodePropertyString,\n 'org_apache_sling_commons_log_names': ConfigNodePropertyArray,\n 'org_apache_sling_commons_log_additiv': ConfigNodePropertyBoolean\n }\n\n self.attribute_map = {\n 'org_apache_sling_commons_log_level': 'org.apache.sling.commons.log.level',\n 'org_apache_sling_commons_log_file': 'org.apache.sling.commons.log.file',\n 'org_apache_sling_commons_log_pattern': 'org.apache.sling.commons.log.pattern',\n 'org_apache_sling_commons_log_names': 'org.apache.sling.commons.log.names',\n 'org_apache_sling_commons_log_additiv': 'org.apache.sling.commons.log.additiv'\n }\n\n self._org_apache_sling_commons_log_level = org_apache_sling_commons_log_level\n self._org_apache_sling_commons_log_file = org_apache_sling_commons_log_file\n self._org_apache_sling_commons_log_pattern = org_apache_sling_commons_log_pattern\n self._org_apache_sling_commons_log_names = org_apache_sling_commons_log_names\n self._org_apache_sling_commons_log_additiv = org_apache_sling_commons_log_additiv", "def __init__(self, logger_module, min_level=\"INFO\"):\n self.logger_module = logger_module\n self.min_level_num = _LEVELS[min_level.upper()]", "def _log_level_from_verbosity(verbosity):\n if verbosity == 0:\n return 40\n elif verbosity == 1:\n return 20\n elif verbosity >= 2:\n return 10", "def log_level_type(arg):\n\tif not arg.upper() in ('NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'):\n\t\traise argparse.ArgumentTypeError(\"{0} is not a valid log level\".format(repr(arg)))\n\treturn getattr(logging, arg.upper())", "def __init__(self, logger, extra=None, enable_extra_levels=False):\n if enable_extra_levels and not Rfc5424SysLogAdapter._extra_levels_enabled:\n logging.addLevelName(EMERGENCY, 'EMERG')\n logging.addLevelName(EMERGENCY, 'EMERGENCY')\n logging.addLevelName(ALERT, 'ALERT')\n logging.addLevelName(NOTICE, 'NOTICE')\n Rfc5424SysLogAdapter._extra_levels_enabled = True\n\n if extra is not None and not isinstance(extra, dict):\n raise TypeError(\"Parameter extra must be a dictionary\")\n\n super(Rfc5424SysLogAdapter, self).__init__(logger, extra or {})", "def get_log_level_from_string(logLevel):\n\n if logLevel == 'DEBUG':\n return logging.DEBUG\n\n if logLevel == 'WARN':\n return logging.WARN\n\n if logLevel == 'ERROR':\n return logging.ERROR\n\n if logLevel == 'CRITICAL':\n return logging.CRITICAL\n\n if logLevel == 'INFO':\n return logging.INFO\n\n return None", "def access_log_level(lvl):\n if lvl not in (logging.DEBUG, logging.WARN, logging.ERROR, logging.CRITICAL, logging.FATAL):\n raise ValueError('%s is not a valid logging level' % (lvl,))\n\n def deco_view(func):\n func.access_log_level = lvl\n return func\n return deco_view", "def get_log_level(config):\n return to_log_level(config.get_string(\"logging.level\"))", "def __init__(self, *args, **kw):\n logging.Logger.__init__(self, *args, **kw)\n self.parent = logging.getLogger()", "def apply_logging_args(args):\n global default_level\n default_level = logging.getLevelName(args.log_level.upper())", "def getLevelAttribute(self) -> \"char const *\":\n return _coin.ScXMLLogElt_getLevelAttribute(self)", "def get_logger(**kwargs):\n # Configure logging modules\n configure()\n # Return structlog\n return structlog.get_logger(**kwargs)", "def from_grpc(\n level: mmp.LogLevel\n ) -> 'LogLevel':\n log_level_map = {\n mmp.LOG_LEVEL_DEBUG: LogLevel.DEBUG,\n mmp.LOG_LEVEL_INFO: LogLevel.INFO,\n mmp.LOG_LEVEL_WARNING: LogLevel.WARNING,\n mmp.LOG_LEVEL_ERROR: LogLevel.ERROR,\n mmp.LOG_LEVEL_CRITICAL: LogLevel.CRITICAL\n } # type: Dict[mmp.LogLevel, LogLevel]\n return log_level_map[level]", "def get_log_level_name(log_level):\n return LOG_LEVEL_MAP_NAME[int(log_level)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Follows the same initialization design as the PlotSessions class, but just returns the raw info for the API
def __init__(self, session_objs: SessionsFromInterval=None, session: SessionData=None): self.sessions = None self.session_objs = session_objs if session_objs: self.sessions = session_objs elif session: self.sessions = [session] self.info = self.get_sessions_info()
[ "def info(self):\n sess_info = {}\n\n sess_info['ID'] = self.get('ID')\n sess_info['label'] = self.get('label')\n sess_info['note'] = self.get('xnat:note')\n sess_info['session_type'] = self.get('session_type')\n sess_info['project_id'] = self.project\n sess_info['original'] = self.get('original')\n sess_info['modality'] = self.get('modality')\n sess_info['UID'] = self.get('UID')\n sess_info['subject_id'] = self.get('xnat:subject_ID')\n sess_info['subject_label'] = self.subject\n sess_info['project_label'] = sess_info['project_id']\n sess_info['project'] = sess_info['project_id']\n sess_info['subject_ID'] = self.get('xnat:subject_ID')\n sess_info['URI'] = '/data/experiments/%s' % sess_info['ID']\n sess_info['session_label'] = sess_info['label']\n sess_info['last_updated'] = sess_info['original']\n sess_info['type'] = sess_info['modality']\n\n return sess_info", "def __init__(self):\n self.urls = parse_config(\"export_urls\")\n # self.config = parse_config(\"config\")\n self.session = requests.Session()", "def get_session_info(self):\n\n return self.get_session_key(), self.get_session_location()", "def info(self):\n print \"---SESSION DETAILS---\"\n print \"URL\",self.session.get_full_url()\n print \"HEADERS\",self.session.header_items()\n print \"METHOD\",self.session.get_method()\n print \"DATA\",self.session.get_data()\n print \"TYPE\",self.session.get_type()\n print \"SELECTOR\",self.session.get_selector()\n print \"---------------------\"", "def session(self):\n\n return {\n \"api_key\": self._api_key,\n \"app_secret\": self._app_secret,\n \"session_key\": self._session_key,\n \"session_secret\": self._session_secret,\n }", "def __init__(self, session):\n\n super(IntegrationsAPI, self).__init__()\n\n self._session = session", "def _build_info(self):\n temp_forecast = np.array(self.temperatures[self.time_step+1:self.time_step+25])\n price_forecast = np.array(self.prices[self.time_step+1:self.time_step+25])\n return {\"temperature_forecast\": temp_forecast, \n \"price_forecast\": price_forecast,\n \"forecast_times\": np.arange(0,self.iterations)}", "def get_raw_session(self):\n\t\traise NotImplementedError(\"Abstract Base Class\")", "def initialize_session(self, session: Session) -> None:", "def new_session(api_url: Optional[str] = None) -> Session:\n sess = Session()\n sess.mount('http+api://', MetadataAPIAdapter(base_url=api_url))\n return sess", "def __init__(self):\n\n def json_serial(obj):\n \"\"\"JSON serializer for objects not serializable by default json code\n\n Parameters\n ----------\n obj : datetime\n The date and time.\n\n Returns\n -------\n The date as a string.\n \"\"\"\n\n if isinstance(obj, datetime):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n date = datetime.now()\n date = json_serial(date)\n\n package_versions = (f'pandas: {version(\"pandas\")}', f'pydov: {version(\"pydov\")}')\n\n self.dictionary = {\"date\": date, \"versions\": package_versions, \"nb_datapoints\": [{}]}\n self.combined_datasets = {}", "def getSessionData(create=True): # pragma: no cover", "def _apiInitialize(self):\n # Specify the API that is being called\n self.api = 'https://api.spotify.com/'\n # Specify the Action that needs to be performed\n self.action = 'v1/search'\n self.actiontype = '&type=track'", "def __init__(self, api_key, debug=False, print_function=None):\n self.session = Session()\n # requests accepts http basic auth as tuple (user, pass), however,\n # Flowdoc uses only api key as username without password\n self.session.auth = (api_key, None)\n self.debug = debug\n self.print = print_function if print_function else print", "def _get_session_data(self) -> {}:\n if self.index in session: # check for the self index in session\n return session.get(self.index) # get stored data if is there\n return {}", "def show_session():\n\n return dict(session)", "def __init__(self, info_version, info):\r\n InfoSampledData.__init__(self, info)\r\n McsHdf5Protocols.check_protocol_type_version(\"InfoChannel\", info_version)\r\n self.__version = info_version", "def __init__(self):\n\n super().__init__(actor_infos=[], environment_specs=None)\n raise NotImplementedError(\"`EnvironmentSessionWrapper` should not be initialized directly\")", "def __init__(self, **kwargs):\n quandl.ApiConfig.api_key = kwargs[\"api_key\"]\n self.stock_data_path = \"stockdata/stockdata.csv\"\n self.stock_data_info = self.get_stock_data_info()\n self._stock_ticker_list = self.get_stock_ticker_list()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
test whether users exist on changelist page
def test_users_on_changelist(self): url = reverse('admin:core_user_changelist') response = self.client.get(url) self.assertContains(response, self.user.email) self.assertContains(response, self.admin_user.email)
[ "def test_users_listed(self) -> None:\n url = reverse(\"admin:core_user_changelist\")\n response_ = self.client.get(url)\n\n self.assertContains(response_, self.user.name)\n self.assertContains(response_, self.user.email)", "def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.email)", "def user_exists():\n return 'adminId' in session and Admin.query.filter_by(\n id=session['adminId']).first() is not None", "def test_user_list(self):\n self._create_release(user='userOne')\n self._create_release(user='userTwo')\n\n result = orlo.queries.user_list().all()\n self.assertEqual(len(result), 2)\n users = [r[0] for r in result]\n self.assertIn('userOne', users)\n self.assertIn('userTwo', users)", "def check_existing_users(locker_user_name):\n return User.user_exist(locker_user_name)", "def test_user_list_with_duplicates(self):\n self._create_release(user='userOne')\n self._create_release(user='userOne')\n self._create_release(user='userTwo')\n\n result = orlo.queries.user_list().all()\n self.assertEqual(len(result), 2)\n users = [r[0] for r in result]\n self.assertIn('userOne', users)\n self.assertIn('userTwo', users)", "def existUser(self, userName):\n return userName in self.users.keys()", "def getuserexists(self, upn):\n\n url_encoded = urllib.parse.quote_plus(f\"userPrincipalName eq '{upn}'\")\n request_string = f\"{self.base_url}/users?&$filter={url_encoded}\"\n response = requests.get(request_string, headers=self.header_params_GMC)\n data = response.json()\n datal = len(data['value'])\n if datal == 0:\n return False\n elif datal == 1:\n return True\n else:\n return \"Error\"", "def test_custom_user_list_view(init_feasible_db, client):\n client.login(username=\"temporary\", password=\"temporary\")\n response = client.get(reverse(\"customuser_list\"))\n assert response.status_code == 200\n assert \"Staff:\" in response.rendered_content\n assert \"customuser_list.html\" in [t.name for t in response.templates]", "def have_linked_user(user_id):\n nih_users = NIH_User.objects.filter(user_id=user_id, linked=True)\n return len(nih_users) == 1", "def test_get_user_list(self):\n self.assertEqual(self.number_of_users,self.sharezone.get_user_list().count())", "def test_get_run_as_users_list(self):\n pass", "def check():\n name = request.args.get('username')\n if len(name) < 1:\n return jsonify(False)\n # query database to see if there are any row with this username\n row = db.execute(\"select * from users where username = ?\", (name,))\n if len(row) == 0:\n avail = True\n else:\n avail = False\n return jsonify(avail)", "def user_exist(req_username: str) -> bool:\n file: dict = json.load(open('last_tweet.json', 'r'))\n for username in file:\n if username == req_username:\n return True\n return False", "def test_user_list_view(self):\n # A profile list should be shown.\n self.client.login(\n username=self.user_data[0], password=self.user_data[1]\n )\n with self.defaults(MANIFEST_DISABLE_PROFILE_LIST=False):\n response = self.client.get(reverse(\"user_list_api\"))\n self.assertEqual(response.status_code, 200)", "def test_user_list_starred(self):\n pass", "def testTeamMemberUser(self):\r\n check_page_status(self, 'team_member', URL_ROLES)", "def check_for_invalid_user_in_terraform_users(self):\n # Obtain a list of azure ad user account names.\n azure_ad_account_names = self.azure.get_ad_user_account_names()\n logger.info(f\"obtained {len(azure_ad_account_names)} azure ad account names\")\n # Obtain a list of terraform user account names.\n terraform_user_names = self.terraform.get_user_names()\n logger.info(f\"obtained {len(terraform_user_names)} terraform usernames\")\n # Update our list of users found in terraform that are not found in azure ad.\n self.terraform_users_not_in_azure_active_directory = [\n terraform_user_name for terraform_user_name in terraform_user_names\n if terraform_user_name not in azure_ad_account_names\n ]", "def check_users_botometer(list_users):\n return dict(BOM.check_accounts_in(list_users))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a set of wayback URLs
def getUrls(domain): wayback_urls = set() history = requests.get(API_URL + domain).text.splitlines() for line in history: record = parse_wayback_record(line) if record.mimetype == "text/html": url = domain + record.path wayback_url = BASE_URL + record.timestamp + "/" + url wayback_urls.add(wayback_url) return wayback_urls
[ "def _get_all_url(cls) -> str:", "def urls(self):\n return [\n url('', include(self.get_list_urls()), {'flow_class': self.flow_class}),\n self.flow_class.instance.urls\n ]", "def get_urls(self):\n return [\n url(\"^$\", self.browser_index, name=\"api_playground_index\"),\n url(\"^submit-feedback$\", self.submit_feedback, name=\"api_playground_submit_feedback\"),\n ]", "def return_urls(self):\n return self._return_urls", "def list_URLs(app):\n with app.application.app_context():\n links = []\n for rule in app.application.url_map.iter_rules():\n # Filter out rules we can't navigate to in a browser\n # and rules that require parameters\n if 'GET' in rule.methods and has_no_empty_params(rule):\n url = get_url_for(rule.endpoint)\n links.append(url)\n return links", "def getURLs(self):\n\t\treturn [x.strip() for x in self.urls.split(\" \")]", "def fetch_urls():\n return set(\n url\n for url in get_paths_from_sitemaps(site, protocol)\n if not should_exclude(url)\n )", "def get_urls(self):\n return (\n ('/search/', self.require_method(self.api_search, ['GET'])),\n ) + super(HookedResource, self).get_urls()", "def to_visit_urls(self) -> Any:\n return self._to_visit_urls", "def get_urls(self):\n urls = []\n\n if self.include_root_view:\n root_url = url(r'^$', self.get_api_root_view(), name=self.root_view_name)\n urls.append(root_url)\n\n default_urls = super(DefaultDynamicQueryRouter, self).get_urls()\n urls.extend(default_urls)\n\n if self.include_format_suffixes:\n urls = format_suffix_patterns(urls)\n\n # self.logger.info(\"router.urls: %s\", json.dumps([str(item) for item in urls], sort_keys=True, indent=4))\n # self.logger.info(\"attached methods: %s\", json.dumps(self._attached, sort_keys=True, indent=4))\n return urls", "def get_fallback_urls(self):\n if not self._fallback:\n for appcache in self.registry:\n self._fallback.update(appcache.get_fallback(self.request))\n self._fallback.update(self._external_appcaches['fallback'])\n self._fallback.update(get_setting('FALLBACK_URL'))\n return self._fallback", "def extract_routes(self):\n\n soup = BeautifulSoup(requests.get(self.base_url).content, \"html.parser\")\n tags = soup.findAll('td', style='width: 100%;')\n urls = ['https:' + tag.find_next()['href'] + 'routes/all/'\n for tag in tags]\n return urls", "def visited_urls(self):\r\n return list(set(self._visited_urls) | set(self.paths.keys()))", "def _create_urls(self):\n\n urls = []\n for item in self.search_results['statuses']:\n url = 'https://twitter.com/' + item['user']['screen_name'] + '/status/' + item['id_str'] \n urls.append(url)\n return urls", "def build_urls(self) -> List[str]:\n path = Path(PROJECT_ROOT).joinpath(\"zones.txt\")\n with open(str(path)) as zones:\n urls = [f\"{self.base_url}/{zone.lower().strip().replace(' ', '-')}#quests\"\n for zone in zones]\n return urls", "def urls():\n projects = ccmenu.preferences.read().get(\"Projects\",[])\n return list(sorted(map(lambda p:p[\"serverUrl\"],projects)))", "def retrieve_listing_page_urls(self) -> List[str]:\n return [\"https://fatabyyano.net/newsface/0/\"]", "def public_url_list(self):\n src_pattern_list = [(r'(?P<filename>(specs\\.4\\.8|prerelease_specs\\.4\\.8|latest_specs\\.4\\.8|Marshal\\.4\\.8|'\n r'versions\\.list|names\\.list)(\\.gz)?)', 'specs', 'specs'),\n (r'gems/(?P<filename>.+)', 'download', 'download'),\n (r'specs/(?P<filename>.+)\\.gemspec', 'gem_specs', 'gem_specs'),\n (r'quick/Marshal\\.4\\.8/(?P<filename>.+)\\.gemspec(?P<compression>(\\.rz|))',\n 'quick_gem_specs', 'quick_gem_specs'),\n (r'', 'index', 'index'),\n ]\n pattern_list = []\n for pattern, view, name in src_pattern_list:\n pattern_list.append(\n url(r\"^(?P<rid>\\d+)/(?P<repo_slug>[\\w\\-\\._]*)/a/%s$\" % pattern, self.wrap_view(view), name=name)\n )\n pattern_list.append(\n url(r\"^(?P<rid>\\d+)/(?P<repo_slug>[\\w\\-\\._]*)/s/(?P<state_slug>[\\w\\-\\._]+)/%s$\" % pattern,\n self.wrap_view(view), name=name)\n )\n pattern_list += [\n url(r\"^(?P<rid>\\d+)/$\", self.wrap_view('index'), name=\"index\"),\n ]\n return pattern_list", "def make_url_patterns():\n\n from django.conf.urls import url\n\n # Index is handled differently.\n routes = dict(ROUTES)\n routes.pop('', None)\n\n result = []\n for route, view in routes.items():\n result.append(url(regex_from_route(route), view))\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a new proposal from a trajectory state. The trajectory state records information about the position in the state space and corresponding potential energy. A proposal also carries a weight that is equal to the difference between the current energy and the previous one. It thus carries information about the previous state as well as the current state.
def update( previous_proposal: Proposal, state: IntegratorState ) -> Tuple[Proposal, bool]: energy = previous_proposal.energy new_energy = state.potential_energy + kinetic_energy( state.position, state.momentum ) delta_energy = energy - new_energy delta_energy = jnp.where(jnp.isnan(delta_energy), -jnp.inf, delta_energy) is_transition_divergent = jnp.abs(delta_energy) > divergence_threshold # The weight of the new proposal is equal to H(z) - H(z_new) weight = delta_energy return ( Proposal( state, new_energy, weight, ), is_transition_divergent, )
[ "def create_proposal(sequence) -> Proposal:\n return Proposal(sequence)", "def transition_from(self, state):\n a, b, c = state\n tomorrow_state = [(0, 0, 0)]\n if a == 0:\n proba_state = [1.0] # exogenous state is absorbing. Done.\n else:\n proba_state = [self.lam]\n if b < self.N - 1:\n i = 1\n trans = ((1.0 - self.gamma), self.gamma)\n else:\n i = 0\n trans = (1.0, 0)\n while True:\n for cprime in self.c_states:\n if self.c_transition[c][cprime]:\n tomorrow_state.append((a, b + i, cprime))\n proba_state.append((1 - self.lam) * trans[i] *\n self.c_transition[c][cprime])\n if i == 0:\n break\n i -= 1\n return tomorrow_state, proba_state", "def _reward(self, state):\n l1 = self._ped_params['lambda1']\n l2 = self._ped_params['lambda2']\n sd = self._ped_params['sigma_d']\n sw = self._ped_params['sigma_w']\n beta = self._ped_params['beta']\n alpha = self._ped_params['alpha']\n p2m = self._ped_params['pixel2meters']\n \n ped_velocity = state['ped_velocity']\n ped_position = state['ped_position']\n v = state['agent_velocity'] * p2m\n p = state['agent_position'] * p2m\n\n E_sum = 0\n for i in range(len(ped_velocity)):\n v2 = ped_velocity[i] * p2m\n p2 = ped_position[i] * p2m\n k = p - p2\n q = v - v2\n t = -np.dot(k, q) / np.linalg.norm(q) ** 2\n d = k + q * max(t, 0)\n dsquare = np.linalg.norm(d) ** 2\n E = np.exp(-dsquare / (2 * sd ** 2))\n wd = np.exp(-np.linalg.norm(k)**2 / (2 * sw**2))\n cos = -np.dot(k, v) / (np.linalg.norm(k) * np.linalg.norm(v))\n wphi = ((1 + cos) / 2)**beta\n E_sum += E * wd * wphi\n\n reward_params = {\n 'pixel2meters': p2m,\n 'lambda1': l1\n }\n E_sum += self._agent.reward(reward_params)\n # 0.5 is the time penalty\n return - E_sum - self._time_penalty_hyperparameter", "def __get_state__(self):\n\t\t## unroll all the parameters\n\t\tgates = self._gates\n\t\t\n\t\tThetas = [theta for gate in gates for theta in gate.__get_state__()['Thetas']] \n\t\tparams = [weight for gate in gates for weight in gate.__get_state__()['params']]\n\n\t\tprint \"Total number of parameters: %d \" % len(params) \n\n\t\treturn dict(Thetas=Thetas,params=params)", "def der_cost ( self, x_dict, state_config ):\n i = 0\n cost = 0.\n n = 0\n n = 0\n for param, typo in state_config.iteritems():\n if typo == CONSTANT:\n n += 1\n elif typo == VARIABLE:\n n_elems = len ( x_dict[param] )\n n += n_elems\n der_cost = np.zeros ( n )\n x_params = np.empty ( ( len( x_dict.keys()), self.nt ) )\n j = 0\n ii = 0\n the_derivatives = np.zeros ( ( len( x_dict.keys()), self.nt ) )\n for param, typo in state_config.iteritems():\n \n if typo == FIXED or typo == CONSTANT:\n x_params[ j, : ] = x_dict[param]\n \n elif typo == VARIABLE:\n x_params[ j, : ] = x_dict[param]\n\n j += 1\n \n\n for itime, tstep in enumerate ( self.state_grid ):\n if self.mask[itime, 0] == 0:\n # No obs here\n continue\n # We use the `get_emulator` method to select the required\n # emulator for this geometry, spectral setting etc\n obs_ops = self.get_emulator ( itime, self.mask, self.emulators )\n sigma_obs_vis, sigma_obs_vis = self.bu[ :, itime ]\n # forward model the proposal\n x = x_params[:, itime]\n model_albedo_vis, vis_var, vis_der = \\\n obs_ops[0] ( np.atleast_2d(x) )\n model_albedo_nir, nir_var, nir_der = \\\n obs_ops[1] ( np.atleast_2d(x) )\n # Calculate the actual cost\n this_cost = 0.5*( model_albedo_vis - albedo_vis )**2/sigma_obs_vis**2 + \\\n 0.5*( model_albedo_nir - albedo_nir )**2/sigma_obs_nir**2\n \n # The partial derivatives of the cost function are then\n this_der= (1./sigma_obs_vis**2)*( model_albedo_vis - \\\n albedo_vis )*vis_der + \\\n (1./sigma_obs_nir**2)*( model_albedo_nir - albedo_nir )*nir_der \n \n\n cost += this_cost\n the_derivatives[ :, itime] = this_der\n \n \n j = 0\n for i, (param, typo) in enumerate ( state_config.iteritems()) :\n if typo == CONSTANT:\n der_cost[j] = the_derivatives[i, 0]\n j += 1\n elif typo == VARIABLE:\n n_elems = len ( x_dict[param] )\n der_cost[j:(j+n_elems) ] = the_derivatives[i, :]\n j += n_elems\n \n return cost, der_cost", "def Estimate(self):\n\n #Different estimates:\n #Could do mode, median, mean, or other.\n #Bayesian MMSE estimator is just weighted arithmetic mean of particle states:\n \n #Get the WEIGHTED array of particle states to use in both calculations:\n weights_repeat = np.repeat(np.expand_dims(self.weights,axis=1),self.P.shape[0],axis=1)\n weighted_P = self.P*weights_repeat\n #(on the first time step, and on time steps where SIR was just done,\n #the weights are all 1/nParticles, so the mean = weighted mean,\n #and same for the covariance matrix calculation)\n\n #The estimated state vector is the weighted mean of all particles:\n self.mu = np.mean(weighted_P,axis=1)\n\n #The estimated state covariance matrix is the weighted covariance matrix of all particles:\n self.StateCovariance = np.cov(weighted_P) #np.cov default assumption is rows are variables, columns are observations", "def function(self, state, time_interval, **kwargs):\n\n # Total time that will have passed since initial state up until transition is complete\n delta_t = (state.timestamp + time_interval - self.init_state.timestamp).total_seconds()\n\n # New position and velocity calculated only from `delta_t`\n # Assumed that `state` lies on the constant jerk path connecting `initial_state` with\n # `final_state`\n\n new_position = list()\n new_velocity = list()\n for init_x, init_v, init_a, jerk in zip(self.init_X, self.init_V, self.init_A, self.jerk):\n new_position.append(self.calculate_pos(init_x, init_v, init_a, jerk, delta_t))\n new_velocity.append(self.calculate_vel(init_v, init_a, jerk, delta_t))\n\n # Non-kinematic components remain constant\n new_sv = np.copy(state.state_vector).astype(float) # May initiate with integers\n new_sv[self.position_mapping, 0] = new_position\n new_sv[self.velocity_mapping, 0] = new_velocity\n\n return StateVector(new_sv)", "def build_state(self):\n\n # Collect data about the environment\n waypoint = self.planner.next_waypoint() # The next waypoint \n inputs = self.env.sense(self) # Visual input - intersection light and traffic\n for key, value in iter(inputs.items()):\n if value is None:\n inputs.update({key:'None'})\n deadline = self.env.get_deadline(self) # Remaining deadline\n\n ########### \n ## TO DO ##\n ###########\n \n # NOTE : you are not allowed to engineer features outside of the inputs available.\n # Because the aim of this project is to teach Reinforcement Learning, we have placed \n # constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.\n # With the hand-engineered features, this learning process gets entirely negated.\n \n # Set 'state' as a tuple of relevant data for the agent \n return self.build_index(inputs,waypoint)", "def progressive_biased_sampling(rng_key, proposal, new_proposal):\n p_accept = jnp.exp(new_proposal.weight - proposal.weight)\n p_accept = jnp.clip(p_accept, a_max=1.0)\n do_accept = jax.random.bernoulli(rng_key, p_accept)\n\n updated_proposal = Proposal(\n new_proposal.state,\n new_proposal.energy,\n jnp.logaddexp(proposal.weight, new_proposal.weight),\n )\n\n return jax.lax.cond(\n do_accept, lambda _: updated_proposal, lambda _: proposal, operand=None\n )", "def _proposal(self, currval, params):\n\t\treturn self._sample_impl(params)", "def replan(self, start, goal, goal_pose, weights, T, timestep, seed=None):\n\t\tassert weights is not None, \"The weights vector is empty. Cannot plan without a cost preference.\"\n\t\tself.weights = weights\n\n\t\twaypts = self.trajOpt(start, goal, goal_pose, traj_seed=seed)\n\t\twaypts_time = np.linspace(0.0, T, self.num_waypts)\n\t\ttraj = Trajectory(waypts, waypts_time)\n\t\treturn traj.upsample(int(T/timestep) + 1)", "def compute_next_state(self, state, efforts, dt, wind_wrench=None):\n accel = self.compute_accel(state, efforts, wind_wrench)\n return motion.State(motion.Pose(state.pose.lin + state.pose.rotate_vector(dt*state.twist.lin + 0.5*(dt**2)*accel.lin),\n motion.quaternion_multiply(state.pose.ang, motion.quaternion_from_rotvec(dt*state.twist.ang + 0.5*(dt**2)*accel.ang))),\n motion.Twist(state.twist.lin + dt*accel.lin,\n state.twist.ang + dt*accel.ang),\n state.time + dt)", "def make_proposal(\n self, message_proto, role_id, user_id, reason=None, metadata=None\n ):\n return message_proto(\n proposal_id=uuid4().hex,\n role_id=role_id,\n user_id=user_id,\n reason=reason,\n metadata=metadata,\n )", "def sensor_model(self, observation, state):\n # Write your code here!\n last_state = self.forward(state, self.opposite[state[2]])\n last_state = self.try_bounce(last_state)\n\n [o_row], [o_col] = np.nonzero(observation)\n prob = np.zeros((20, 20))\n prob += (1 - self.alpha) / 400\n\n for d in range(9):\n nxt_state = self.forward(last_state, d)\n nxt_state = self.try_bounce(nxt_state)\n prob[nxt_state[0]][nxt_state[1]] += self.alpha / 18\n\n for d in range(9):\n nxt_state = self.forward(state, d)\n nxt_state = self.try_bounce(nxt_state)\n prob[nxt_state[0]][nxt_state[1]] += self.alpha / 18\n\n return prob[o_row][o_col]", "def __init__(self, proposal_set,\r\n sampler=\"opus_core.samplers.weighted_sampler\",\r\n weight_string = \"exp_roi = exp(urbansim_parcel.development_project_proposal.expected_rate_of_return_on_investment)\",\r\n filter_attribute=None,\r\n run_config=None, estimate_config=None,\r\n debuglevel=0, dataset_pool=None):\r\n self.dataset_pool = self.create_dataset_pool(dataset_pool, pool_packages=['urbansim_parcel', 'urbansim', 'opus_core'])\r\n self.dataset_pool.add_datasets_if_not_included({proposal_set.get_dataset_name(): proposal_set})\r\n self.proposal_set = proposal_set\r\n # Code added by Jesse Ayers, MAG, 7/27/2009\r\n # Checking the size of the proposal set\r\n # if there are no proposals, skip running the model and \r\n # print a message\r\n self.positive_proposals = True\r\n if self.proposal_set.n <= 0:\r\n logger.log_status(\"Proposal Set size <= 0, no proposals to consider, skipping DPPSM.\")\r\n self.positive_proposals = None\r\n return\r\n if not self.dataset_pool.has_dataset(\"development_project_proposal_component\"):\r\n self.proposal_component_set = create_from_proposals_and_template_components(proposal_set, \r\n self.dataset_pool.get_dataset('development_template_component'))\r\n self.dataset_pool.replace_dataset(self.proposal_component_set.get_dataset_name(), self.proposal_component_set)\r\n else:\r\n self.proposal_component_set = self.dataset_pool.get_dataset(\"development_project_proposal_component\")\r\n\r\n if weight_string is not None:\r\n if weight_string not in proposal_set.get_known_attribute_names():\r\n proposal_set.compute_variables(weight_string, dataset_pool=self.dataset_pool)\r\n self.weight = self.proposal_set.get_attribute(weight_string)\r\n else:\r\n self.weight = ones(self.proposal_set.size(), dtype=\"float64\") #equal weight\r\n\r\n ## TODO: handling of filter_attribute\r", "def trajOpt(self, state_initial, dircol=0, second_pass=False):\n\n # stopwatch for solver time\n tsolve_pre = time.time()\n\n (x_goal, V_goal, gamma_goal, q_goal) = (200.0, state_initial[2], 0.0, 0.0)\n\n # number of knot points - proportional to x-distance seems to work well\n if not dircol:\n N = int(np.floor(0.8 * np.abs(x_goal - state_initial[0])))\n else:\n N = 30\n\n # optimization problem: variables t_f, u[k], x[k]\n mp = MathematicalProgram()\n\n t_f = mp.NewContinuousVariables(1, \"t_f\")\n dt = t_f[0] / N\n\n k = 0\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n input_trajectory = u\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n state_trajectory = x\n\n for k in range(1, N):\n u = mp.NewContinuousVariables(2, \"u_%d\" % k)\n x = mp.NewContinuousVariables(6, \"x_%d\" % k)\n input_trajectory = np.vstack((input_trajectory, u))\n state_trajectory = np.vstack((state_trajectory, x))\n\n x = mp.NewContinuousVariables(6, \"x_%d\" % N)\n state_trajectory = np.vstack((state_trajectory, x))\n\n # for dircol we can use u_N and first-order hold\n if dircol:\n u = mp.NewContinuousVariables(2, \"u_%d\" % N)\n input_trajectory = np.vstack((input_trajectory, u))\n\n print \"Number of decision vars\", mp.num_vars()\n\n # cost function: penalize time and control effort\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n # TODO: use u of length n+1 for dircol\n def totalcost(X):\n dt = X[0] / N\n u0 = X[1:N + 1]\n u1 = X[N + 1:2 * N + 1]\n v = X[2 * N + 1:3 * N + 1] # cut last item if dirtrans\n return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1)) + 1.0 * X[0] * (u0.dot(v))\n # return dt * (1.0 * u0.dot(u0) + 1.0 * u1.dot(u1) + 10.0 * X[0] * (u0.dot(v)))\n\n mp.AddCost(totalcost, allvars)\n\n # initial state constraint\n for i in range(len(state_initial)):\n mp.AddLinearConstraint(state_trajectory[0, i] == state_initial[i])\n\n # final state constraint (x position)\n mp.AddLinearConstraint(state_trajectory[-1, 0] == x_goal)\n\n # final state constraint (z position) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 1] <= 1.5)\n mp.AddLinearConstraint(state_trajectory[-1, 1] >= 0.5)\n\n # final state constraint (velocity) NOTE: range is acceptable\n mp.AddLinearConstraint(state_trajectory[-1, 2] <= 1.5 * V_goal)\n mp.AddLinearConstraint(state_trajectory[-1, 2] >= V_goal)\n\n # final state constraint (flight path angle) NOTE: small range here\n mp.AddLinearConstraint(state_trajectory[-1, 3] <= gamma_goal + 1.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[-1, 3] >= gamma_goal - 1.0 * np.pi / 180.0)\n\n # final state constraint (pitch rate)\n mp.AddLinearConstraint(state_trajectory[-1, 5] == q_goal)\n\n # input constraints\n for i in range(len(input_trajectory[:, 0])):\n mp.AddLinearConstraint(input_trajectory[i, 0] >= 0.0)\n mp.AddLinearConstraint(input_trajectory[i, 0] <= 1.2 * self.m * self.g)\n mp.AddLinearConstraint(input_trajectory[i, 1] >= -30.0)\n mp.AddLinearConstraint(input_trajectory[i, 1] <= 30.0)\n\n # state constraints\n for i in range(len(state_trajectory[:, 0])):\n # x position\n mp.AddLinearConstraint(state_trajectory[i, 0] >= state_initial[0])\n mp.AddLinearConstraint(state_trajectory[i, 0] <= x_goal)\n # z position\n mp.AddLinearConstraint(state_trajectory[i, 1] >= 0.3)\n mp.AddLinearConstraint(state_trajectory[i, 1] <= 2.0)\n # velocity\n mp.AddLinearConstraint(state_trajectory[i, 2] >= 1.0)\n mp.AddLinearConstraint(state_trajectory[i, 2] <= 3.0 * state_initial[2])\n # flight path angle\n mp.AddLinearConstraint(state_trajectory[i, 3] >= -30.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 3] <= 30.0 * np.pi / 180.0)\n # pitch angle\n mp.AddLinearConstraint(state_trajectory[i, 4] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 4] <= 40.0 * np.pi / 180.0)\n # pitch rate\n mp.AddLinearConstraint(state_trajectory[i, 5] >= -20.0 * np.pi / 180.0)\n mp.AddLinearConstraint(state_trajectory[i, 5] <= 20.0 * np.pi / 180.0)\n\n # dynamic constraints\n if not dircol:\n # direct transcription\n for j in range(1, N + 1):\n dynamic_prop = dt * self.airplaneLongDynamics(state_trajectory[j - 1, :], input_trajectory[j - 1, :])\n for k in range(len(state_initial)):\n mp.AddConstraint(state_trajectory[j, k] == state_trajectory[j - 1, k] + dynamic_prop[k])\n else:\n # direct collocation\n for j in range(1, N + 1):\n x0 = state_trajectory[j - 1, :]\n x1 = state_trajectory[j, :]\n xdot0 = self.airplaneLongDynamics(x0, input_trajectory[j - 1, :])\n xdot1 = self.airplaneLongDynamics(x1, input_trajectory[j, :])\n\n xc = 0.5 * (x1 + x0) + dt * (xdot0 - xdot1) / 8.0\n xdotc = - 1.5 * (x0 - x1) / dt - 0.25 * (xdot0 + xdot1)\n uc = 0.5 * (input_trajectory[j - 1, :] + input_trajectory[j, :])\n f_xc = self.airplaneLongDynamics(xc, uc)\n for k in range(len(state_initial)):\n # TODO: why does \"==\" cause \"kUnknownError\"?\n # mp.AddConstraint(xdotc[k] - f_xc[k] == 0.0)\n mp.AddConstraint(xdotc[k] <= f_xc[k] + 0.001)\n mp.AddConstraint(xdotc[k] >= f_xc[k] - 0.001)\n\n # allow for warm start of dircol program with output of dirtrans program\n if (second_pass) and (self.mp_result == SolutionResult.kSolutionFound):\n # warm start using previous output\n print 'warm start to traj opt'\n t_guess = self.ttraj[-1]\n mp.SetInitialGuess(t_f[0], t_guess)\n\n for i in range(len(state_trajectory[:, 0])):\n for j in range(len(state_initial)):\n mp.SetInitialGuess(state_trajectory[i, j], self.xdtraj[i, j])\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.udtraj[i, 0])\n mp.SetInitialGuess(input_trajectory[i, 1], self.udtraj[i, 1])\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 1.25 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.8 * t_guess)\n\n else:\n # initial guesses\n t_guess = np.abs(x_goal - state_initial[0]) / (0.5 * (V_goal + state_initial[2]))\n mp.SetInitialGuess(t_f[0], t_guess)\n\n z_final_dummy = state_initial[1]\n theta_final_dummy = state_initial[4]\n state_final_dummy = np.array([x_goal, z_final_dummy, V_goal, gamma_goal, theta_final_dummy, q_goal])\n for i in range(len(state_trajectory[:, 0])):\n state_guess = ((N - i) / N) * state_initial + (i / N) * state_final_dummy\n for j in range(len(state_guess)):\n mp.SetInitialGuess(state_trajectory[i, j], state_guess[j])\n\n for i in range(N):\n mp.SetInitialGuess(input_trajectory[i, 0], self.m * self.g / 3.5)\n mp.SetInitialGuess(input_trajectory[i, 1], 0.01)\n\n # time constraints\n mp.AddLinearConstraint(t_f[0] <= 2.0 * t_guess)\n mp.AddLinearConstraint(t_f[0] >= 0.5 * t_guess)\n\n # set SNOPT iteration limit\n it_limit = int(max(20000, 40*mp.num_vars()))\n mp.SetSolverOption(SolverType.kSnopt, 'Iterations limit', it_limit)\n\n print(\"** solver begin with N = %d **\" % N)\n # solve nonlinear optimization problem (w/SNOPT)\n result = mp.Solve()\n print result\n\n # convert from symbolic to float\n input_trajectory = mp.GetSolution(input_trajectory)\n t_f = mp.GetSolution(t_f)\n state_trajectory_approx = mp.GetSolution(state_trajectory)\n time_array = t_f[0] * np.linspace(0.0, 1.0, (N + 1))\n\n tsolve_post = time.time()\n tsolve = tsolve_post - tsolve_pre\n\n solver_id = mp.GetSolverId()\n\n print (\"** %s solver finished in %.1f seconds **\\n\" % (solver_id.name(), tsolve))\n print (\"t_f computed: %.3f seconds\" % t_f[0])\n\n # get total cost of solution\n if result == SolutionResult.kSolutionFound:\n thrust = input_trajectory[:, 0]\n elev = input_trajectory[:, 1]\n vel = state_trajectory_approx[:, 2]\n allvars = np.hstack((t_f[0], thrust, elev, vel))\n print (\"cost computed: %.3f\" % totalcost(allvars))\n\n # save traj (this is a bit sloppy and redundant but scripts for visualization currently rely on this)\n self.udtraj = input_trajectory\n self.xdtraj = state_trajectory_approx\n self.ttraj = time_array\n self.mp_result = result\n\n # save polynomials of input, state trajectories\n if not dircol:\n self.udtraj_poly = PiecewisePolynomial.FirstOrderHold(time_array[0:-1], input_trajectory.T)\n else:\n self.udtraj_poly = PiecewisePolynomial.FirstOrderHold(time_array, input_trajectory.T)\n self.xdtraj_poly = PiecewisePolynomial.Cubic(time_array, state_trajectory_approx.T)\n\n return input_trajectory, state_trajectory_approx, time_array", "def T(self, state, action):\n if action == 'Commit':\n if state in self.transitionModel:\n keys = self.transitionModel[state].keys() #all the next states\n keys.append(None) # account for failure\n probs = [item[0] * self.pickupProb[state] for item in self.transitionModel[state].values()] #the prob values of the next states\n probs.append(1. - self.pickupProb[state]) #probability you don't pick up anyone\n return zip(keys,probs) #return the tuple\n else:\n return []\n #else choose to drive elsewhere\n else:\n newState = self.successorState(state,action)\n return [(newState,1)]", "def gradLikelihood(self, state):\n return", "def creation(i,state_in):\n coef = np.sqrt(state_in[i]+1)\n state_out=state_in.copy()\n state_out[i] = state_out[i]+1\n return state_out,coef" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Baised proposal sampling. Unlike uniform sampling, biased sampling favors new proposals. It thus biases the transition away from the trajectory's initial state.
def progressive_biased_sampling(rng_key, proposal, new_proposal): p_accept = jnp.exp(new_proposal.weight - proposal.weight) p_accept = jnp.clip(p_accept, a_max=1.0) do_accept = jax.random.bernoulli(rng_key, p_accept) updated_proposal = Proposal( new_proposal.state, new_proposal.energy, jnp.logaddexp(proposal.weight, new_proposal.weight), ) return jax.lax.cond( do_accept, lambda _: updated_proposal, lambda _: proposal, operand=None )
[ "def sample(self, state):\n # get probabilities for next state over all states observed so far, plus oracle proba in final index:\n base_probas = self.base_probas(state)\n # sample one of the states (or oracle query):\n next_state = np.random.choice(range(len(base_probas)), p=base_probas)\n # update tables and return state if our choice is not oracle:\n if next_state < self.seen_states:\n self.base_counts[state,next_state] += 1\n return next_state\n # otherwise if we choose final state, sample from oracle (also updating count tables/num_states):\n else:\n oracle_probas = self.oracle_probas()\n next_oracle_state = np.random.choice(range(len(oracle_probas)), p=oracle_probas)\n # update both counts:\n self.base_counts[state,next_oracle_state] += 1\n self.oracle_counts[next_oracle_state,0] += 1\n # update num_states if new state seen:\n if next_oracle_state == (oracle_probas.shape[0]-1):\n self.seen_states += 1\n # return:\n return next_oracle_state", "def _sample_with_shrinkage(x_initial, target_log_prob, log_slice_heights,\n step_size, lower_bounds, upper_bounds, seed,\n name=None):\n with tf.name_scope(name or 'sample_with_shrinkage'):\n seed = samplers.sanitize_seed(seed)\n # Keeps track of whether an acceptable sample has been found for the chain.\n found = tf.zeros_like(x_initial, dtype=tf.bool)\n cond = lambda found, *ignored_args: ~tf.reduce_all(found)\n x_next = tf.identity(x_initial)\n x_initial_shape = ps.shape(x_initial)\n x_initial_dtype = dtype_util.base_dtype(x_initial.dtype)\n def _body(found, seed, left, right, x_next):\n \"\"\"Iterates until every chain has found a suitable next state.\"\"\"\n proportions_seed, next_seed = samplers.split_seed(seed)\n proportions = samplers.uniform(\n x_initial_shape, dtype=x_initial_dtype, seed=proportions_seed)\n x_proposed = tf.where(~found, left + proportions * (right - left), x_next)\n accept_res = _test_acceptance(x_initial, target_log_prob=target_log_prob,\n decided=found,\n log_slice_heights=log_slice_heights,\n x_proposed=x_proposed, step_size=step_size,\n lower_bounds=left, upper_bounds=right)\n boundary_test = log_slice_heights < target_log_prob(x_proposed)\n can_accept = boundary_test & accept_res\n next_found = found | can_accept\n # Note that it might seem that we are moving the left and right end points\n # even if the point has been accepted (which is contrary to the stated\n # algorithm in Neal). However, this does not matter because the endpoints\n # for points that have been already accepted are not used again so it\n # doesn't matter what we do with them.\n next_left = tf.where(x_proposed < x_initial, x_proposed, left)\n next_right = tf.where(x_proposed >= x_initial, x_proposed, right)\n return (next_found, next_seed, next_left, next_right, x_proposed)\n\n return tf.while_loop(\n cond=cond,\n body=_body,\n loop_vars=(found, seed, lower_bounds, upper_bounds, x_next))[-1]", "def _proposal(self, currval, params):\n\t\treturn self._sample_impl(params)", "def gibbs_sample_converge(self):\n def step(x, stop_condition):\n x_prev = x\n x = sample(x)\n #Propagate the visible values to sample the hidden values\n h_k = sample(tf.sigmoid(tf.matmul(x, self.W) + self.hb))\n #Propagate the hidden values to sample the visible values\n x = tf.sigmoid(tf.matmul(h_k, tf.transpose(self.W)) + self.vb)\n \n # Convergence of probability vectors\n stop_condition = (tf.reduce_mean(tf.square(x - x_prev)) > 0.2)\n return x, stop_condition\n\n [x_sample, _] = tf.while_loop(lambda x, stop_condition: stop_condition,\n step, [self.x, tf.constant(True)], \n parallel_iterations=1,\n back_prop = False)\n\n x_sample = sample(x_sample)\n # TF tutorials said we need this to stop RBM values from backpropogating\n x_sample = tf.stop_gradient(x_sample) \n return x_sample", "def gibbs_sample_parameters(self, state):\n return state[0]", "def active_sampling(self,prunning:List[int])->int:\n p_LB = self.p_LB[prunning]\n w = self.leaves_count[prunning]\n L = self.major_label[prunning]\n p_LB = p_LB[np.arange(len(p_LB)),L]\n prob = w*(1-p_LB)\n if np.sum(prob) == 0:\n return choice(prunning)\n prob = prob/prob.sum()\n return choice(prunning,p = prob)", "def gen_branch_history_sample(\n primary_state_in, blink_state_in, blen_in,\n dg,\n partition, rate_on, rate_off,\n ):\n\n # The state space of this process is somewhat complicated.\n # The primary state is an integer.\n # The blink state is a map from partition part to binary blink state.\n # An invariant of the compound state\n # is that the blink state of part k must be 1 if the partition part of\n # the primary state is k.\n # Otherwise, any combination of primary states and blink states is allowed.\n primary_state = primary_state_in\n blink_state = dict(blink_state_in)\n blen_accum = 0\n while True:\n\n # Get the list of the allowed moves out of the current state,\n # and the corresponding rates associated with these moves.\n compound_state = (primary_state, blink_state)\n moves = get_moves(compound_state, dg, partition, rate_on, rate_off)\n successors, rates = zip(*moves)\n\n # Compute the total rate out of the current compound state,\n # and draw a random wait time that depends on this total rate.\n # If this wait time puts us over the allotted time period,\n # then we are done sampling the histories.\n total_rate = sum(rates)\n scale = 1.0 / total_rate\n blen_delta = np.random.exponential(scale=scale)\n blen_accum += blen_delta\n if blen_accum >= blen_in:\n return\n\n # Next we randomly pick a successor state\n # according to the rate proportions.\n pre_distn = np.array(rates, dtype=float)\n distn = pre_distn / np.sum(pre_distn)\n next_compound_state = successors[cmedbutil.random_category(distn)]\n primary_state, blink_state = next_compound_state\n\n # Yield the cumulative time and the new primary and blink states\n yield blen_accum, primary_state, blink_state", "def sample(self, state):\n\n\n\t\talphabet = []\n\t\tfor model in self.models[0]:\n\t\t\talphabet.extend(model.alphabet)\n\n\t\talphabet = list(set(alphabet))\n\t\talphabet.sort()\n\n\t\tdistribution = []\n\t\t# We reconstruct the distribution according to the sorting of the alphabet\n\t\tfor elem in alphabet:\n\t\t\tdistribution.append(self.getLikelihood(state, elem))\n\n\t\t#print(state)\n\t\t#print(np.sum(distribution))\n\n\t\tret = int(np.random.choice(alphabet, p=distribution))\n\n\t\treturn ret", "def toss_biased(p=GLOBAL_PROB):\n coin = np.random.rand()\n return 1 if coin < p else 0", "def test_prioritized_sampling():\n np.random.seed(1337)\n buf = PrioritizedReplayBuffer(capacity=10, alpha=1.5, beta=1, epsilon=0.5)\n for i in range(10):\n sample = {'obs': 0, 'action': 0, 'reward': 0, 'new_obs': 0, 'steps': 1, 'idx': i}\n buf.add_sample(sample, init_weight=i)\n sampled_idxs = []\n for i in range(50000):\n for sample in buf.sample(3):\n sampled_idxs.append(sample['idx'])\n counts = Counter(sampled_idxs)\n probs = np.power(np.arange(10).astype('float64') + 0.5, 1.5)\n probs /= np.sum(probs)\n for i, prob in enumerate(probs):\n frac = counts[i] / len(sampled_idxs)\n assert frac > prob - 0.01\n assert frac < prob + 0.01", "def init_params_random(self) -> None:\n self.probs = Dirichlet(self.prior).sample()", "def propose(self):\n runenv.stepblockind=self.blockind\n if self.proposal_distribution == \"Normal\":\n self.stochastic.value = rnormal(self.stochastic.value, self.adaptive_scale_factor * self.proposal_sd, size=self.stochastic.value.shape)\n elif self.proposal_distribution == \"Prior\":\n self.stochastic.random()", "def sample_goal(self):\n #TODO: We don't need this\n raise NotImplementedError", "def sample_action(self, state):\n # YOUR CODE HERE\n action = np.random.choice([0, 1], p=self.get_probs([state, state], [0, 1]))\n return action", "def sample(probs):\n import tensorflow as tf\n return tf.floor(probs + tf.random.uniform(tf.shape(probs), 0, 1))", "def rejection_sampling(target_pdf_fn, proposal_pdf_fn, proposal_draw_fn, N=1):\n\n samples = []\n\n while len(samples) < N:\n # draw point along X-axis from proposal distribution\n x = proposal_draw_fn()\n\n # calculate proposal pdf at x\n y = proposal_pdf_fn(x)\n\n # calculate pdf at x\n fx = target_pdf_fn(x)\n\n # draw point randomly between 0 and y\n u = random.random()*y\n\n # the proposal should contain the target for all x \n assert fx <= y\n\n # if u is less than the target distribution pdf at x, then accept x\n if u < fx:\n samples.append(x)\n\n if N == 1:\n return samples[0]\n else:\n return samples", "def _sample_beta(self):\n def _neg_log_posterior(beta_flat):\n beta = beta_flat.reshape(self.J, self.M + 1)\n LL = self.log_likelihood(beta=beta)\n LP = ag_mvn.logpdf(beta, self.b0, self.B0).sum()\n return -(LL + LP)\n\n resp = minimize(_neg_log_posterior,\n x0=np.copy(self.beta),\n jac=jacobian(_neg_log_posterior),\n method='L-BFGS-B',\n options=dict(\n maxiter=self.max_iters\n ))\n beta_map = resp.x.reshape(self.J, self.M + 1)\n self.beta = beta_map", "def sample(self):\n if self.params is not None:\n self.value = np.random.choice(self.params)", "def mh_sample(x, log_pdf_lambda, jump_std, D, num_samples=1, burn=1, lag=1):\n num_collected = 0\n iters = 0\n samples = []\n\n t_samples = num_samples*lag+burn\n\n checkevery = max(20, int(t_samples/100.0))\n accepted = 0.0\n acceptance_rate = 0.0\n iters = 1.0\n aiters = 1.0\n\n if D[0] >= 0.0 and D[1] == float('Inf'):\n jumpfun = lambda x, jstd: fabs(x + normrnd(0.0, jstd))\n elif D[0] == 0 and D[1] == 1:\n def jumpfun(x, jstd):\n x = fabs(x + normrnd(0.0, jstd))\n if x > 1.0:\n x = x%1\n\n assert x > 0 and x < 1\n\n return x\n else:\n jumpfun = lambda x, jstd: x + normrnd(0.0, jstd)\n\n logp = log_pdf_lambda(x)\n while num_collected < num_samples:\n\n # every now and then propose wild jumps incase there very distant modes\n x_prime = jumpfun(x, jump_std)\n assert( x_prime > D[0] and x_prime < D[1] )\n \n logp_prime = log_pdf_lambda(x_prime)\n\n # if log(random.random()) < logp_prime - logp:\n if log(random.random()) < logp_prime - logp:\n x = x_prime\n logp = logp_prime\n accepted += 1.0\n acceptance_rate = accepted/aiters\n\n if iters > burn and iters%lag == 0:\n num_collected += 1\n samples.append(x)\n\n # keep the acceptance rate around .3 +/- .1\n if iters % checkevery == 0:\n if acceptance_rate >= .4:\n jump_std *= 1.1\n elif acceptance_rate <= .2:\n jump_std *= .9019\n # print(\"j : %1.4f, AR: %1.4f\" % (jump_std, acceptance_rate))\n accepted = 0.0\n acceptance_rate = 0.0\n aiters = 0.0\n\n\n iters += 1.0\n aiters += 1.0\n\n if num_samples == 1:\n return samples[0]\n else:\n return samples" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Provides parse results over an iterable of input lines which are parsed according to the format of the implementation.
def parse_lines(self, lines): raise NotImplementedError(self.__class__)
[ "def build_from(lines:[str]) -> [object]:\n lines = iter(lines)\n current_line = None\n while True:\n try:\n line = next(lines).strip()\n except StopIteration:\n break\n if not line: break\n if REG_CHARACTER.match(line): # new line\n if current_line:\n yield current_line\n try:\n character, content, refs = parse_line(line)\n except TypeError: # parse_line returned None ?!\n print(f\"ERROR: parse_line didn't parse '{line}'\")\n current_line = Line(character.strip(), content.strip(), refs)\n else: # continuation of previous line\n # print('CURRENT LINE:', current_line)\n # print(' :', line)\n current_line.content += '\\n' + line\n if current_line:\n yield current_line", "def parse_lines(*lines):\n return list(parse_lines(lines, []))", "def parse(cls, input):", "def process_lines(self, lines, file):\n return lines", "def read(self):\n if self.verbose:\n print \"Reading\", self.file_name\n with open(self.input) as f:\n for line in f:\n line = line.rstrip().split()\n yield self.formatted_line(line)", "def _parse(input_text):\n result = []\n tmpl_json = {}\n tmpl_vars = {}\n markup2issuetype = _default_markup2issuetype()\n\n for line_number, line in enumerate(input_text.splitlines(), 1):\n line = _preprocess(tmpl_vars, line)\n\n if line.startswith(('h', '#', '(')):\n _maybe_task(result, line, line_number,\n tmpl_json, markup2issuetype)\n\n elif line.startswith('='):\n _update_task_description(result[-1], line[1:])\n\n elif line.startswith('.'):\n _maybe_precreated_task(result, line, line_number)\n\n elif line.startswith('{'):\n _maybe_json(tmpl_json, line)\n\n elif line.startswith('['):\n _maybe_variable(tmpl_vars, line)\n\n elif line.startswith('!define '):\n _maybe_redefine_issuetype(markup2issuetype, line)\n\n elif len(result) > 0:\n result.append({'text': line})\n\n return result", "def testLineParsingNormal(self):\n\n a = LedSwitcher(\"../test/testinputs/input_assign3.txt\")\n a.parseFile()\n self.assertTrue(a.parseEachLine(\"turn on 619,181 through 736,944\") == [True, 619, 181, 736, 944])", "def _iter_by_line(self, content):\r\n for line in content.split(self.linesep):\r\n yield line", "def parse_file(self, input_file):\n with open(input_file, \"r\") as f:\n lines = f.readlines()\n res = []\n for line in lines:\n if not line:\n continue\n res.append(self.parser_one_line(line))\n return res", "def process(self, lines):\n for line in lines:\n self._process_line(line)", "def _eagerly_parse_lines(self, lines, skeleton_regex, event_parsers, events, time=None):\n\n # Recompile all regex so that they work on bytes rather than strings.\n # This simplifies the rest of the code while allowing the raw output\n # from a process to be fed\n def encode(string):\n return string.encode('ascii')\n\n events = list(map(encode, events))\n event_parsers = {\n encode(event): parser\n for event, parser in event_parsers.items()\n }\n\n # Only add an extra iterator and tuple unpacking if that is strictly\n # necessary, as it comes with a performance cost\n time_is_provided = time is not None\n skel_search = skeleton_regex.search\n if time_is_provided:\n lines = zip(time, lines)\n drop_filter = lambda line: not skel_search(line[1])\n else:\n drop_filter = lambda line: not skel_search(line)\n\n # First, get rid of all the lines coming before the trace\n lines = itertools.dropwhile(drop_filter, lines)\n\n # Appending to lists is amortized O(1). Inside the list, we store\n # tuples since they are:\n # 1) the most compact Python representation of a product type\n # 2) output directly by regex.search()\n skeleton_data = []\n events_data = {\n **{event: (None, None) for event in events},\n **{\n event: (parser.bytes_regex.search, [])\n for event, parser in event_parsers.items()\n },\n }\n available_events = set()\n\n begin_time = None\n end_time = None\n time_type = getattr(np, self.HEADER_FIELDS['__timestamp'])\n\n # THE FOLLOWING LOOP IS A THE MOST PERFORMANCE-SENSITIVE PART OF THAT\n # CLASS, APPLY EXTREME CARE AND BENCHMARK WHEN MODIFYING\n # Best practices:\n # - resolve all dotted names ahead of time\n # - minimize the amount of local variables. Prefer anonymous\n # expressions\n # - Catch exceptions for exceptional cases rather than explicit check\n\n # Pre-lookup methods out of the loop to speed it up\n append = list.append\n group = self._RE_MATCH_CLS.group\n groups = self._RE_MATCH_CLS.groups\n nextafter = np.nextafter\n inf = math.inf\n line_time = 0\n parse_time = '__timestamp' in skeleton_regex.groupindex.keys()\n\n for line in lines:\n prev_time = line_time\n if time_is_provided:\n line_time, line = line\n\n match = skel_search(line)\n # Stop at the first non-matching line\n try:\n event = group(match, '__event')\n line_time = time_type(group(match, '__timestamp'))\n # Assume only \"time\" is not in the regex. Keep that out of the hot\n # path since it's only needed in rare cases (like nesting parsers)\n except IndexError:\n # If we are supposed to parse time, let's re-raise the\n # exception\n if parse_time:\n raise\n else:\n # Otherwise, make sure \"event\" is defined so that we only\n # go a match failure on \"time\"\n event # pylint: disable=pointless-statement\n # The line did not match the skeleton regex, so skip it\n except TypeError:\n if b'EVENTS DROPPED' in line:\n raise DroppedTraceEventError('The trace buffer got overridden by new data, increase the buffer size to ensure all events are recorded')\n # Unknown line, could be coming e.g. from stderr\n else:\n continue\n\n # Do a global deduplication of timestamps, across all\n # events regardless of the one we will parse. This ensures\n # stable results and joinable dataframes from multiple\n # parser instance.\n if line_time <= prev_time:\n line_time = nextafter(prev_time, inf)\n\n if begin_time is None:\n begin_time = line_time\n\n # If we can parse it right away, let's do it now\n try:\n search, data = events_data[event]\n append(\n data,\n # Add the fixedup time\n groups(search(line)) + (line_time,)\n )\n # If we don't have a parser for it yet (search == None),\n # just store the line so we can infer its parser later\n except TypeError:\n # Add the fixedup time and the full line for later\n # parsing as well\n append(\n skeleton_data,\n groups(match) + (line_time, line)\n )\n # We are not interested in that event, but we still remember the\n # pareseable events\n except KeyError:\n available_events.add(event)\n\n # This should have been set on the first line.\n # Note: we don't raise the exception if no events were asked for, to\n # allow creating dummy parsers without any line\n if begin_time is None and events:\n raise ValueError('No lines containing events have been found')\n\n end_time = line_time\n available_events.update(\n event\n for event, (search, data) in events_data.items()\n if data\n )\n\n events_df = {}\n for event, parser in event_parsers.items():\n try:\n # Remove the tuple data from the dict as we go, to free memory\n # before proceeding to the next event to smooth the peak memory\n # consumption\n _, data = events_data.pop(event)\n except KeyError:\n pass\n else:\n decoded_event = event.decode('ascii')\n df = self._make_df_from_data(parser.regex, data, ['__timestamp'])\n # Post-process immediately to shorten the memory consumption\n # peak\n df = self._postprocess_df(decoded_event, parser, df)\n events_df[decoded_event] = df\n\n # Compute the skeleton dataframe for the events that have not been\n # parsed already. It contains the event name, the time, and potentially\n # the fields if they are needed\n skeleton_df = self._make_df_from_data(skeleton_regex, skeleton_data, ['__timestamp', 'line'])\n # Drop unnecessary columns that might have been parsed by the regex\n to_keep = {'__event', '__fields', 'line'}\n skeleton_df = skeleton_df[sorted(to_keep & set(skeleton_df.columns))]\n # Make the event column more compact\n skeleton_df['__event'] = skeleton_df['__event'].astype('category', copy=False)\n # This is very fast on a category dtype\n available_events.update(skeleton_df['__event'].unique())\n\n available_events = {event.decode('ascii') for event in available_events}\n return (events_df, skeleton_df, (begin_time, end_time), available_events)", "def parse(self):\n reader_args = (self.filename,\n self.fs,\n self.header,\n self.max_lines,\n self.field_pre_filter,\n self.record_pre_filter)\n\n with Reader(*reader_args) as reader:\n for nr, record in enumerate(reader, 1): # line numbers start from 1\n record = self.record_func(nr, self._parse_fields(record))\n if self.record_post_filter(nr, record):\n yield record", "def parse(self, lines):\n self.reset()\n if type(lines) is str:\n lines = lines.split(\"\\n\")\n\n line_no = 0\n for line in lines:\n line_no += 1\n\n # Block begin?\n m, block_class = self.is_block_begin(line)\n if block_class:\n new_block = block_class(line_no, m.group(1))\n self.push_block(switch=self.add_element(new_block))\n continue\n # Block end?\n m = self.is_block_end(line)\n if m:\n self.pop_block(m.group(1))\n continue\n\n m = self.RE_EXEC.search(line)\n if m:\n element = exec_t(line_no, stmt=m.group(2), indent=m.end(1))\n else:\n element = line_t(line_no, line)\n\n # Regular line\n self.add_element(element)", "def updateLineParsing(self):\n self.titleLine = self.parseLine(self.getTitleLine())\n self.outputLines = [self.parseLine(line) for line in\n self.getOutputLines(False)]\n if self.origOutputLines:\n self.origOutputLines = [self.parseLine(line) for line in\n self.getOutputLines(True)]", "def input_parser(input_file):\r\n # list to collect lines for each experiment\r\n experiment = []\r\n # iterate over lines in file\r\n for line in input_file:\r\n line = line.strip()\r\n # as long as line is not empty, append to experiment\r\n if line != '':\r\n experiment.append(line.split(\",\"))\r\n # on empty lines, yield experiment list\r\n else:\r\n yield experiment\r\n experiment = []\r\n\r\n # make sure to yield final experiment (if no newline at last test)\r\n if experiment:\r\n yield experiment", "def parse(cls, lines):\n header = lines[0].strip()\n term, classifiers = header_regex.split(\n header, maxsplit=1) if (' :' in header) else (header, '')\n classifiers = [\n classifier.strip() for classifier in classifiers.split('or')]\n if classifiers == ['']:\n classifiers = []\n trimed_lines = trim_indent(lines[1:]) if (len(lines) > 1) else ['']\n definition = [line.rstrip() for line in trimed_lines]\n return Item(term.strip(), classifiers, definition)", "def rows(self):\n def parse_result_row(row):\n return row.split(\"\\t\")\n\n for row in self.results.data:\n yield parse_result_row(row)", "def _process_lines(lines: typing.List[str], offset: int, registration_processor: RegistrationProcessor):\n\n onnx_op = \"ONNX_OPERATOR_KERNEL_CLASS_NAME\"\n onnx_op_len = len(onnx_op)\n onnx_typed_op = \"ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME\"\n onnx_typed_op_len = len(onnx_typed_op)\n onnx_versioned_op = \"ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME\"\n onnx_versioned_op_len = len(onnx_versioned_op)\n onnx_versioned_typed_op = \"ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME\"\n onnx_versioned_typed_op_len = len(onnx_versioned_typed_op)\n onnx_two_typed_op = \"ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME\"\n onnx_two_typed_op_len = len(onnx_two_typed_op)\n onnx_versioned_two_typed_op = \"ONNX_OPERATOR_VERSIONED_TWO_TYPED_KERNEL_CLASS_NAME\"\n onnx_versioned_two_typed_op_len = len(onnx_versioned_two_typed_op)\n end_marks = tuple([\");\", \")>\", \")>,\", \")>,};\", \")>};\"])\n\n end_mark = \"\"\n lines_to_process = []\n\n # merge line if split over multiple.\n # original lines will be in lines_to_process. merged and stripped line will be in code_line\n while True:\n lines_to_process.append(lines[offset])\n stripped = lines[offset].strip()\n line_end = False\n\n for mark in end_marks:\n if stripped.endswith(mark):\n end_mark = mark\n line_end = True\n break\n\n if line_end:\n break\n\n offset += 1\n if offset > len(lines):\n log.error(\"Past end of input lines looking for line terminator.\")\n sys.exit(-1)\n\n code_line = \"\".join([line.strip() for line in lines_to_process])\n\n if onnx_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 7, Cos)>,\n trim_at = code_line.index(onnx_op) + onnx_op_len + 1\n *_, domain, start_version, op_type = (arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\"))\n\n registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, None)\n\n elif onnx_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 7, double, Sin)>,\n trim_at = code_line.index(onnx_typed_op) + onnx_typed_op_len + 1\n *_, domain, start_version, type, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(lines_to_process, domain, op_type, int(start_version), None, type)\n\n elif onnx_versioned_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 1, 10, Hardmax)>,\n trim_at = code_line.index(onnx_versioned_op) + onnx_versioned_op_len + 1\n *_, domain, start_version, end_version, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), None\n )\n\n elif onnx_versioned_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 1, 10, float, LogSoftmax)>,\n trim_at = code_line.index(onnx_versioned_typed_op) + onnx_versioned_typed_op_len + 1\n *_, domain, start_version, end_version, type, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), type\n )\n\n elif onnx_two_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,\n trim_at = code_line.index(onnx_two_typed_op) + onnx_two_typed_op_len + 1\n *_, domain, start_version, type1, type2, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), None, type1 + \", \" + type2\n )\n\n elif onnx_versioned_two_typed_op in code_line:\n # e.g. BuildKernelCreateInfo<ONNX_OPERATOR_TWO_TYPED_KERNEL_CLASS_NAME(\n # kCpuExecutionProvider, kOnnxDomain, 19, float, uint8, QuantizeLinear)>,\n trim_at = code_line.index(onnx_versioned_two_typed_op) + onnx_versioned_two_typed_op_len + 1\n *_, domain, start_version, end_version, type1, type2, op_type = (\n arg.strip() for arg in code_line[trim_at : -len(end_mark)].split(\",\")\n )\n registration_processor.process_registration(\n lines_to_process, domain, op_type, int(start_version), int(end_version), type1 + \", \" + type2\n )\n\n else:\n log.warning(f\"Ignoring unhandled kernel registration variant: {code_line}\")\n for line in lines_to_process:\n registration_processor.process_other_line(line)\n\n return offset + 1", "def parseInput (lines_by_case, f):\n\t# Read test cases\n\tcases = []\n\tcase_nr = int(f.readline().strip())\n\tfor case_idx in range(case_nr):\n\t\tcase = TestCase(case_idx + 1)\n\t\tcase.input = []\n\t\tfor line_idx in range(lines_by_case):\n\t\t\tcase.input.append(f.readline().rstrip('\\n'))\n\t\tcases.append(case)\n\treturn cases" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for printing progress and time elapsed.
def progress(i, num_total, t): dt = time.time() - t print '\r', i, '/',num_total, 'Elapsed Time:', dt , 'Time Remaining:', print 1.0 * dt / (i+1) * (num_total-i-1),
[ "def show_elapsed_time(start, end):\n PRINT('Elapsed: %s' % (end - start))", "def module_progress(strInfo):\n\n sys.stdout.write(_colors('PROGRESS') + '\\n > ' + _colors('ENDC'))\n sys.stdout.write(strInfo + '...')\n sys.stdout.flush()\n\n # Start the timer\n tStart = time.time()\n\n return tStart", "def _printProgress(caller, event):\n global currentAlgorithm, currentProgress\n\n pm = vtkProcessModule.GetProcessModule()\n progress = caller.GetLastProgress()\n alg = caller.GetLastProgressText()\n if alg != currentAlgorithm and alg:\n if currentAlgorithm:\n while currentProgress <= 10:\n import sys\n sys.stdout.write(\".\")\n currentProgress += 1\n print (\"]\")\n currentProgress = 0\n print (alg, \": [ \", end=\"\")\n currentAlgorithm = alg\n while currentProgress <= progress:\n import sys\n sys.stdout.write(\".\")\n #sys.stdout.write(\"%d \" % pm.GetLastProgress())\n currentProgress += 1\n if progress == 10:\n print (\"]\")\n currentAlgorithm = None\n currentProgress = 0", "def print_progress(percent):\n _print(str(int(percent)) + '%')", "def boto_progress(self, complete, total):\n if sys.stdin.isatty():\n if complete == 0:\n self.progress_stime = time.monotonic()\n sys.stdout.write(\"|\" + \"-\" * 10 + \"|\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"|\")\n sys.stdout.write(\".\")\n if complete == total:\n self.progress_etime = time.monotonic()\n sys.stdout.write(\"|\")\n sys.stdout.write(\"\\n\")\n seconds = self.boto_progress_duration()\n sys.stdout.write(\"{} seconds\".format(seconds))\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()", "def print_status(numcodes, totalNum, msg): #progress indicator\n print('Record: {} / {} {:>20}\\r'.format(numcodes, totalNum, msg), end='\\r'),\n sys.stdout.flush()", "def print_stats():\r\n\tprint()\r\n\r\n\tall_fn_names = [k for k in _total_times.keys() if k not in _disabled_names]\r\n\r\n\tmax_name_width = max([len(k) for k in all_fn_names] + [4])\r\n\tif max_name_width % 2 == 1: max_name_width += 1\r\n\tformat_str = ' {:>%d} | {:>10.4f} ' % max_name_width\r\n\r\n\theader = (' {:^%d} | {:^10} ' % max_name_width).format('Name', 'Time (ms)')\r\n\tprint(header)\r\n\r\n\tsep_idx = header.find('|')\r\n\tsep_text = ('-' * sep_idx) + '+' + '-' * (len(header)-sep_idx-1)\r\n\tprint(sep_text)\r\n\r\n\tfor name in all_fn_names:\r\n\t\tprint(format_str.format(name, _total_times[name]*1000))\r\n\t\r\n\tprint(sep_text)\r\n\tprint(format_str.format('Total', total_time()*1000))\r\n\tprint()", "def displayProgress(self):\n\n nRays = len(self)\n if self.iteration % self.progressLog == 0:\n self.progressLog *= 3\n if self.progressLog > nRays:\n self.progressLog = nRays\n\n print(\"Progress {0}/{1} ({2:.0f}%) \".format(self.iteration, nRays, self.iteration / nRays * 100))", "def progress_report(count_so_far, total_count, start_time):\n current_time = datetime.datetime.now()\n time_passed = current_time - start_time\n time_per = time_passed/count_so_far\n time_to_go = (total_count - count_so_far)*time_per\n \n return 'Processed {0} of {1} window centers so far in {2}. Still to go: {3}'.format(\n count_so_far, \n total_count, \n str(time_passed), \n str(time_to_go))", "def PrintProgress(self):\n ratio = 100*self.progressBar['value'] / self.progressBar['maximum']\n s = '\\033[1K\\r['\n n = math.floor(ratio)\n s += '=' * n\n if n < 100:\n s += '>' + '.'*(100-n-1)\n s += '] {:6.2f} %'.format(ratio)\n print(s, end='')\n sys.stdout.flush()", "def print_stats(upload, download):\n return \"▼ {}/{} ▲\".format(human_bytes(download / SLEEP * 8), human_bytes(upload / SLEEP * 8))", "def print_result(self) -> None:\n total_time: float = 0\n for i in range(1, len(self._stage_time)):\n time_used = self._stage_time[i][0] - self._stage_time[i-1][0]\n total_time += time_used\n print(\"Stage {} is executed in {:.5f} seconds\".format(self._stage_time[i][1], time_used))\n\n print(\"Total execution time is {:.5f} seconds\".format(total_time))", "def _print_download_progress(count, block_size, total_size):\n \n # percentage completion.\n pct_complete = float(count*block_size)/total_size\n \n # Status message. \n msg = \"\\r- Download progress: {0:.1%}\".format(pct_complete) #'\\r':当一行打印结束后,再从该行开始位置打印\n \n # Print\n sys.stdout.write(msg) # 相当于print(但最后不会添加换行符)\n sys.stdout.flush() # 输出缓冲,以便实时显示进度", "def time_track_print():\n\tglobal _time_track_dict\n#\tif not _time_track_dict.values(): return\n\tmax_time = max(_time_track_dict.values())\n\ttupel_list = [(fn_name, \"%.2f%%\" % (100*exe_time/max_time), \"%fs\" % exe_time) for (fn_name, exe_time) in sorted(_time_track_dict.items(), key=operator.itemgetter(1), reverse=True)]\n\tmax_len_item_1 = max([len(x) for (x,_,_) in tupel_list])\n\tmax_len_item_2 = max([len(x) for (_,x,_) in tupel_list])\n\tmax_len_item_3 = max([len(x) for (_,_,x) in tupel_list])\n\tfor (x,y,z) in tupel_list:\n\t\tprint x.ljust(max_len_item_1 + 3), y.rjust(max_len_item_2), z.rjust(max_len_item_3 + 3)", "def print_runtime(seconds):\n print_status(C('{:.3f}s'.format(seconds), fore='cyan'), file=sys.stderr)", "def TimeReport(self) -> None:\n if self.dry_run:\n logging.info('Total: %d bytes', self.num_bytes)\n else:\n end_time = time.time()\n dt = end_time - self.start_time\n rate = self.num_bytes / 1024.0 / dt\n logging.info('Total: %d KB/s (%d bytes in %.3fs)', rate, self.num_bytes,\n dt)", "def __progress(to_download, downloaded, to_upload, uploaded):\n\n del to_upload\n del uploaded\n\n if to_download != 0 and downloaded != 0:\n\n percent_completed = float(downloaded) / to_download\n rate = round(percent_completed * 100, ndigits=2)\n completed = \"#\" * int(rate)\n spaces = \" \" * (100 - int(rate))\n\n sys.stdout.write('\\r[%s%s] %s%%' % (completed, spaces, rate))\n sys.stdout.flush()", "def print_time(seconds, task):\n\n print (\"Time spent on \" + task + \": \" + str(round((seconds / 60.0), 2)) + \" minutes.\")", "def module_progress_done(tStart):\n\n # Measure the time\n tTime = time.time() - tStart\n if (tTime < 1) and (tTime >= 1e-3): # Miliseconds range\n tTime = tTime * 1e3\n strTime = ('done in %.2f ms') % (tTime)\n elif (tTime < 1e-3) and (tTime >= 1e-6): # Microseconds range\n tTime = tTime * 1e6\n strTime = ('done in %.2f us') % (tTime)\n else:\n strTime = ('done in %.2f s') % (tTime)\n sys.stdout.write(_colors('OK') + strTime + _colors('ENDC') + '\\n\\n\\n')\n sys.stdout.flush()\n\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to sample an op for each edge.
def add_sampled_op_index(edge): op_index = np.random.randint(len(edge.data.op)) edge.data.set('op_index', op_index, shared=True)
[ "def make_samples(self):\n for generated_pixel in self.generate_edge_pixels():\n yield self.pixel_to_sample(generated_pixel[0], generated_pixel[1])", "def sample_edge_uniform(_, __, n_triplets, sample_size):\n all_edges = np.arange(n_triplets)\n return np.random.choice(all_edges, sample_size, replace=False)", "def edge_obj_sample(cpdags, ws, num_samples, obj=None, is_tree=False):\n if obj in [objective_given_dags_interventions, None]:\n num_cpdags = len(cpdags)\n dag_list = []\n cpdag_list = []\n for i in range(num_samples):\n cpdag = cpdags[np.random.choice(num_cpdags, p=ws)]\n dag = mec_size.uniform_sample_dag_plural(cpdag, 1)[0]\n cpdag_list.append(cpdag)\n dag_list.append(dag)\n def new_obj(epsilon):\n out = 0\n for i in range(num_samples):\n out += objective_given_dags_interventions(cpdag_list[i], epsilon, cpdag_list[i].copy(), [dag_list[i]], is_tree=is_tree) / num_samples\n return out\n return new_obj\n return", "def testSampleNeighborLayerwise(self):\n\n op = ops.sample_neighbor_layerwise([[1, 2, 3], [1, 2, 3],\n [2, 3, 4], [2, 2, 4]],\n ['0', '1'],\n 10)\n with tf.Session() as sess:\n for i in range(10):\n ids, _ = sess.run(op)\n self.assertEqual(10, len(ids[0]))\n [self.assertTrue(n1 in [2, 3, 4, 5]) for n1 in ids[0]]\n [self.assertTrue(n2 in [3, 4, 5]) for n2 in ids[2]]\n [self.assertTrue(n3 in [3, 5]) for n3 in ids[3]]", "def generate_random_edge_weights(self, min=1, max=100):\n for n1, n2 in self.edges:\n self.edges[n1,n2]['weight'] = random.randint(min, max)", "def testSampleNeighbor(self):\n\n op = ops.sample_neighbor([1, 2], [\"0\", \"1\"], 10)\n with tf.Session() as sess:\n ids, weights, types = sess.run(op)\n self.assertEqual(10, len(ids[0]))\n self.assertEqual(10, len(weights[0]))\n self.assertEqual(10, len(types[0]))\n\n [self.assertTrue(n1 in [2, 3, 4]) for n1 in ids[0]]\n [self.assertTrue(int(w1) in [2, 3, 4]) for w1 in weights[0]]\n # [self.assertTrue(t1 in [0, 1] for t1 in types[0])]\n\n [self.assertTrue(n2 in [3, 5]) for n2 in ids[1]]\n [self.assertTrue(int(w2) in [3, 5]) for w2 in weights[1]]\n # [self.assertTrue(t2 in [0, 1] for t2 in types[1])]", "def testSampleNeighborLayerwiseFanout(self):\n\n op = ops.sample_fanout_layerwise_each_node(\n tf.constant([1, 2, 3, 4, 5], dtype=tf.int64),\n [['0', '1'], ['0', '1']],\n [10, 20])\n with tf.Session() as sess:\n ids, adj = sess.run(op)\n self.assertEqual(50, len(ids[1]))\n self.assertEqual(100, len(ids[2]))\n\n op = ops.sample_fanout_layerwise(\n tf.constant([1, 2, 3, 4, 5], dtype=tf.int64),\n [['0', '1'], ['0', '1']],\n [10, 20])\n with tf.Session() as sess:\n ids, adj = sess.run(op)\n self.assertEqual(10, len(ids[1]))\n self.assertEqual(20, len(ids[2]))\n print (ids[1], sess.run(tf.sparse_tensor_to_dense(adj[0])),\n ids[2], sess.run(tf.sparse_tensor_to_dense(adj[1])))", "def sample_action(self, state):\n # YOUR CODE HERE\n action = np.random.choice([0, 1], p=self.get_probs([state, state], [0, 1]))\n return action", "def randomGraphEdges(nodeCount=10, edgeCount=30):\n allEdges = list(combinations(range(nodeCount), 2))\n return sample(allEdges, min(len(allEdges), edgeCount))", "def testOpSeedSelectionNotSensitive(self):\n def f(include_print):\n shape = constant_op.constant([5])\n if include_print:\n shape = logging_ops.Print(shape, [shape])\n return random.get_global_generator().normal(shape)\n\n def compare(fst_includes_print, snd_includes_print):\n random.get_global_generator().reset(50)\n fst = f(fst_includes_print)\n random.get_global_generator().reset(50)\n snd = f(snd_includes_print)\n self.assertAllEqual(fst, snd)\n # Now do the above again using accelerated (defunned) 'f'.\n # Running 'f' with two different Boolean arguments should cause\n # two different graphs to be generated, hence demonstrating the\n # insensitivity to graph changes.\n f_acc = def_function.function(f)\n random.get_global_generator().reset(50)\n fst = f_acc(fst_includes_print)\n random.get_global_generator().reset(50)\n snd = f_acc(snd_includes_print)\n self.assertAllEqual(fst, snd)\n\n compare(False, False)\n compare(True, True)\n compare(True, False)", "def test_superop_adjoint_random(self):\n mats = [self.rand_matrix(4, 4) for _ in range(4)]\n chans = [SuperOp(Operator(mat)) for mat in mats]\n self._compare_adjoint_to_operator(chans, mats)", "def sample_state_action(self):\n\n # Not sure if state should be sampled first and then action rather than both simultaneously.\n # Doing this for now for simplicity.\n r = np.random.randint(len(self.model.keys()))\n (state, action) = list(self.model.keys())[r]\n return (state, action)", "def sample_tree_mcmc(edge_logits, edges, backend=\"python\"):\n if backend == \"python\":\n return _sample_tree_mcmc(edge_logits, edges)\n elif backend == \"cpp\":\n return _get_cpp_module().sample_tree_mcmc(edge_logits, edges)\n else:\n raise ValueError(\"unknown backend: {}\".format(repr(backend)))", "def weighted_dags_edge_obj_sample(cpdags, ws, dags, obj = None, total_x=1, is_tree=False):\n n = cpdags[0].shape[0]\n num_samples = len(cpdags)\n def new_obj(epsilon):\n out = 0\n for i in range(num_samples):\n out += objective_given_dags_interventions(cpdags[i], epsilon, cpdags[i].copy(), [dags[i]], is_tree=is_tree) * ws[i]\n return out\n def new_stochastic_grad(intervention_set, x):\n \"\"\"\n intervention set is existing interventions, x is the continuous numpy array\n \"\"\"\n grad_f = np.zeros(n)\n #sample the intervention given x\n indexes = np.random.randint((len(dags)), size=1)\n \n for i in indexes:\n dag = dags[i]\n cpdag = cpdags[i]\n computed_val = {}\n #do runs for multiple different samples of x\n for _ in range(total_x):\n x_rand = np.random.binomial(1, p = x)\n\n for v in range(0, n):\n x_rand_upper = x_rand.copy()\n x_rand_upper[v] = 1\n x_rand_lower = x_rand.copy()\n x_rand_lower[v] = 0\n\n #tobytes allows us to store the numpy array\n if x_rand_upper.tobytes() not in computed_val:\n cpdag_upper_score = new_obj(intervention_set+[np.flatnonzero(x_rand_upper).tolist()])\n computed_val[x_rand_upper.tobytes()] = cpdag_upper_score\n else:\n cpdag_upper_score = computed_val[x_rand_upper.tobytes()]\n\n if x_rand_lower.tobytes() not in computed_val:\n cpdag_lower_score = new_obj(intervention_set+[np.flatnonzero(x_rand_lower).tolist()])\n computed_val[x_rand_lower.tobytes()] = cpdag_lower_score\n else:\n cpdag_lower_score = computed_val[x_rand_lower.tobytes()]\n\n grad_f[v] += ws[i] * (cpdag_upper_score - cpdag_lower_score)/ (total_x* len(indexes))\n\n return grad_f\n \n def hess_fun(intervention_set, x, e):\n \"\"\"\n estimates the hessian for gred\n \"\"\"\n #print(cpdag)\n \n indexes = np.random.randint((len(dags)), size=1)\n #sample the intervention given x\n \n #print(\"stochastic grad inner\")\n hess = np.zeros((n, n))\n for ind in indexes:\n dag = dags[ind]\n cpdag = cpdags[ind]\n #time2 = time.time()\n #print(time2-time1)\n computed_val = {}\n #do runs for multiple different samples of x\n for _ in range(total_x):\n S = []\n for s in range(n):\n if e[s] < x[s]:\n S.append(s)\n for i in range(n):\n for j in range(i, n):\n if i == j:\n continue\n S_ij = list({i, j}.union(set(S)))\n S_i = list({i}.union(set(S)) - {j})\n S_j = list({j}.union(set(S)) - {i})\n S_minus = list(set(S) - {i,j}) #the set with both indices removed\n for S_mod in [S_ij, S_i, S_j, S_minus]:\n if np.array(S_mod).tobytes() not in computed_val:\n cpdag_new = orient_from_intervention(dag, cpdag.copy(), intervention_set+[S_mod], is_tree=is_tree)\n computed_val[np.array(S_mod).tobytes()] = cpdag_obj_val(cpdag_new)\n \n hess[i,j] += ws[ind] * (computed_val[np.array(S_ij).tobytes()]-computed_val[np.array(S_i).tobytes()]-\n computed_val[np.array(S_j).tobytes()]+computed_val[np.array(S_minus).tobytes()])/ (total_x* len(indexes)) \n #print(time.time()-time2)\n return hess\n return new_obj, new_stochastic_grad, hess_fun", "def incrementally_sample(annotations,\r\n hypothesis_load_fn,\r\n mask_shape,\r\n increment=5,\r\n shuffle=True,\r\n **hypothesis_params):\r\n if shuffle:\r\n random.shuffle(annotations)\r\n for i in range(0, len(annotations) + 1, increment):\r\n yield hypothesis_load_fn(mask_shape, annotations[:i], **hypothesis_params)", "def sample_action(self, state) -> (np.ndarray, float):\n raise NotImplementedError", "def graph_khop_sampler(\n row,\n colptr,\n input_nodes,\n sample_sizes,\n sorted_eids=None,\n return_eids=False,\n name=None,\n):\n\n if in_dynamic_mode():\n if return_eids:\n if sorted_eids is None:\n raise ValueError(\n \"`sorted_eid` should not be None \" \"if return_eids is True.\"\n )\n (\n edge_src,\n edge_dst,\n sample_index,\n reindex_nodes,\n edge_eids,\n ) = _legacy_C_ops.graph_khop_sampler(\n row,\n sorted_eids,\n colptr,\n input_nodes,\n \"sample_sizes\",\n sample_sizes,\n \"return_eids\",\n True,\n )\n return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids\n else:\n (\n edge_src,\n edge_dst,\n sample_index,\n reindex_nodes,\n _,\n ) = _legacy_C_ops.graph_khop_sampler(\n row,\n None,\n colptr,\n input_nodes,\n \"sample_sizes\",\n sample_sizes,\n \"return_eids\",\n False,\n )\n return edge_src, edge_dst, sample_index, reindex_nodes\n\n check_variable_and_dtype(\n row, \"Row\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n\n if return_eids:\n if sorted_eids is None:\n raise ValueError(\n \"`sorted_eid` should not be None \" \"if return_eids is True.\"\n )\n check_variable_and_dtype(\n sorted_eids, \"Eids\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n\n check_variable_and_dtype(\n colptr, \"Col_Ptr\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n check_variable_and_dtype(\n input_nodes, \"X\", (\"int32\", \"int64\"), \"graph_khop_sampler\"\n )\n\n helper = LayerHelper(\"graph_khop_sampler\", **locals())\n edge_src = helper.create_variable_for_type_inference(dtype=row.dtype)\n edge_dst = helper.create_variable_for_type_inference(dtype=row.dtype)\n sample_index = helper.create_variable_for_type_inference(dtype=row.dtype)\n reindex_nodes = helper.create_variable_for_type_inference(dtype=row.dtype)\n edge_eids = helper.create_variable_for_type_inference(dtype=row.dtype)\n helper.append_op(\n type=\"graph_khop_sampler\",\n inputs={\n \"Row\": row,\n \"Eids\": sorted_eids,\n \"Col_Ptr\": colptr,\n \"X\": input_nodes,\n },\n outputs={\n \"Out_Src\": edge_src,\n \"Out_Dst\": edge_dst,\n \"Sample_Index\": sample_index,\n \"Reindex_X\": reindex_nodes,\n \"Out_Eids\": edge_eids,\n },\n attrs={\"sample_sizes\": sample_sizes, \"return_eids\": return_eids},\n )\n if return_eids:\n return edge_src, edge_dst, sample_index, reindex_nodes, edge_eids\n else:\n return edge_src, edge_dst, sample_index, reindex_nodes", "def sample_edge_neighborhood(adj_list, degrees, n_triplets, sample_size):\n edges = np.zeros(sample_size, dtype=np.int32)\n\n #initialize\n sample_counts = np.array([d for d in degrees]) # sample count for each node\n picked = np.array([False for _ in range(n_triplets)]) # num_triple * 1\n seen = np.array([False for _ in degrees]) # num_node * 1\n for i in range(0, sample_size):\n weights = sample_counts * (~seen)\n if np.sum(weights) == 0:\n weights = np.ones_like(weights)\n weights[np.where(sample_counts == 0)] = 0\n probabilities = (weights) / np.sum(weights)\n chosen_vertex = np.random.choice(np.arange(degrees.shape[0]),\n p=probabilities)\n\n while len(adj_list[chosen_vertex]) == 0:\n chosen_vertex = np.random.choice(np.arange(degrees.shape[0]),\n p=probabilities)\n chosen_adj_list = adj_list[chosen_vertex]\n seen[chosen_vertex] = True\n\n chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))\n chosen_edge = chosen_adj_list[chosen_edge]\n edge_number = chosen_edge[0] # can be regarded as edge_id\n\n while picked[edge_number]: # if edge has been choosed before, then choose again\n chosen_edge = np.random.choice(np.arange(chosen_adj_list.shape[0]))\n chosen_edge = chosen_adj_list[chosen_edge]\n edge_number = chosen_edge[0]\n\n edges[i] = edge_number\n other_vertex = chosen_edge[1]\n picked[edge_number] = True\n sample_counts[chosen_vertex] -= 1\n sample_counts[other_vertex] -= 1\n seen[other_vertex] = True\n return edges", "def random_init(self, deg_sampler, args):\n self.g = gt.random_graph(N=self.N,\n deg_sampler=lambda: deg_sampler(**args),\n directed=False,\n parallel_edges=False,\n random=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to replace the primitive ops at the edges with the sampled one
def update_ops(edge): if isinstance(edge.data.op, list): primitives = edge.data.op else: primitives = edge.data.primitives edge.data.set('op', primitives[edge.data.op_index]) edge.data.set('primitives', primitives) # store for later use
[ "def add_sampled_op_index(edge):\n op_index = np.random.randint(len(edge.data.op))\n edge.data.set('op_index', op_index, shared=True)", "def replace(self, subset):\n for e in subset:\n v = e[0]\n self.remove_edge(e)\n choices = list(set(self.vertices()) - set([v]))\n new = random.choice(choices)\n self.add_edge(Edge(v, new))", "def compute_sharp_op(self):\n # Tensor product of the basis with itself\n # B_mnpq_jk = X_j_mn*X_k_qp\n B = sparse.tensordot(self.operators, self.operators.conj(),\n 0).transpose([1, 2, 5, 4, 0, 3])\n B_inv = B.reshape([self.op_dim**2,\n self.op_dim**2]).transpose().reshape(2*[self.op_dim] +\n 4*[self.vec_dim])\n self.sharp_op = sparse.tensordot(B_inv, B, ([2,3,4,5], [0,3,2,1]))", "def testOpSeedSelectionNotSensitive(self):\n def f(include_print):\n shape = constant_op.constant([5])\n if include_print:\n shape = logging_ops.Print(shape, [shape])\n return random.get_global_generator().normal(shape)\n\n def compare(fst_includes_print, snd_includes_print):\n random.get_global_generator().reset(50)\n fst = f(fst_includes_print)\n random.get_global_generator().reset(50)\n snd = f(snd_includes_print)\n self.assertAllEqual(fst, snd)\n # Now do the above again using accelerated (defunned) 'f'.\n # Running 'f' with two different Boolean arguments should cause\n # two different graphs to be generated, hence demonstrating the\n # insensitivity to graph changes.\n f_acc = def_function.function(f)\n random.get_global_generator().reset(50)\n fst = f_acc(fst_includes_print)\n random.get_global_generator().reset(50)\n snd = f_acc(snd_includes_print)\n self.assertAllEqual(fst, snd)\n\n compare(False, False)\n compare(True, True)\n compare(True, False)", "def polyFlipEdge():\n pass", "def edge_mask(self):", "def _glenoidEdgeImplicitFunction(self, giaGlenoid, edgePoints):", "def testCPUSameAsOldRandomOps(self):\n seed1, seed2 = 79, 25\n # note how the two seeds for the old op correspond to the seed for the new\n # op\n with ops.device(\"/device:CPU:0\"):\n random.reset_global_generator([0, seed2, seed1])\n shape = constant_op.constant([4, 7])\n dtype = dtypes.float64\n\n # create a graph for the old op in order to call it many times\n @def_function.function\n def old():\n with ops.device(\"/device:CPU:0\"):\n return gen_random_ops.random_standard_normal(\n shape, dtype=dtype, seed=seed1, seed2=seed2)\n\n def new():\n with ops.device(\"/device:CPU:0\"):\n return random.get_global_generator().standard_normal(shape, dtype=dtype)\n\n for _ in range(100):\n self.assertAllEqual(old(), new())", "def _handle_ops_to_ignore(self):\n ops_to_remove = []\n for op in self.get_all_ops().values():\n if op.type in self.passthrough_graph_nodes or op.type in self.input_graph_nodes_to_ignore:\n assert len(op.output_products) == 1\n consumers = [consumer for consumer in op.output_products[0].consumers]\n\n if not op.inputs:\n # Op has no inputs. Simply delete the op, its output product, and the output product from the inputs\n # of consumer ops.\n for consumer in consumers:\n # Check if consumer is not a passthrough or ignore op type. If so, create a constant input into\n # the consumer.\n if consumer.type in self.passthrough_graph_nodes or consumer.type in \\\n self.input_graph_nodes_to_ignore:\n consumer.inputs.remove(op.output_products[0])\n else:\n product_index = consumer.inputs.index(op.output_products[0])\n constant_product = self._add_product(f'constant_{self._constant_count}',\n op.output_products[0].shape)\n constant_product._is_const = True\n self._constant_count += 1\n constant_product.add_consumer(consumer)\n consumer.inputs[product_index] = constant_product\n\n else:\n assert len(op.inputs) == 1\n for consumer in consumers:\n # Index of consumer's input list corresponding to this op's output product\n consumer_input_index = consumer.inputs.index(op.output_products[0])\n # Replace this op's output product in consumer's input with this op's input product\n consumer.inputs[consumer_input_index] = op.inputs[0]\n\n # Get index of op's input product consumer list corresponding to this op\n op_index = op.inputs[0].consumers.index(op)\n # Replace this op in the input product consumers list with all consumers of this op's output product\n op.inputs[0]._consumers[op_index] = consumers\n op.inputs[0]._consumers = _flatten_lists(op.inputs[0].consumers)\n\n ops_to_remove.append(op)\n del self._products[op.output_products[0].name]\n\n for op in ops_to_remove:\n del self._ops[op.name]", "def _scalarize(self, transformed_multi_objectives: tf.Tensor) -> tf.Tensor:", "def edge_features(self):", "def test_superop_adjoint_random(self):\n mats = [self.rand_matrix(4, 4) for _ in range(4)]\n chans = [SuperOp(Operator(mat)) for mat in mats]\n self._compare_adjoint_to_operator(chans, mats)", "def test_multiple_non_compute_intensive_ops():\n\n def get_graph():\n x = relay.var(\"x\", shape=(2, 2, 4), dtype=\"int8\")\n x = relay.reshape(x, newshape=(1, 2, 2, 4))\n x = relay.clip(x, 0.0, 1.0)\n x = relay.reshape(x, newshape=(2, 2, 4))\n return relay.clip(x, 0.0, 1.0)\n\n def before():\n func = get_graph()\n return tei.make_ethosn_partition(func)\n\n def expected():\n func = get_graph()\n mod = tvm.IRModule.from_expr(func)\n return relay.transform.InferType()(mod)\n\n mod = before()\n mod = InlineNonComputeIntensivePartitions()(mod)\n expected_mod = expected()\n _assert_structural_equal(mod, expected_mod)", "def test_identity_single_removal_on_binary_elementwise():\n\n def get_graph(get_expected=False):\n x = relay.var(\"x\", shape=(1, 4, 1, 4), dtype=\"int8\")\n y = relay.var(\"y\", shape=(1, 2, 2, 4), dtype=\"int8\")\n y = relay.reshape(y, newshape=(1, 4, 1, 4))\n if not get_expected:\n y = infra.make_ethosu_identity(y)\n z = infra.make_ethosu_binary_elementwise(x, y, 4, 4, \"ADD\", \"int8\")\n return relay.Function(relay.analysis.free_vars(z), z)\n\n actual = _optimize(get_graph())\n expected = _optimize(get_graph(get_expected=True), optimize=False)\n _assert_structural_equal(actual, expected)", "def _prune_non_control_edges_of_debug_ops(self):\n for node in self._node_inputs:\n inputs = self._node_inputs[node]\n\n for i, inp in enumerate(inputs):\n if is_copy_node(inp):\n # Find the input to the Copy node, which should be the original\n # input to the node.\n orig_inp = self._node_inputs[inp][0]\n inputs[i] = orig_inp", "def test_edge_mutation_for_graph():\n graph_without_edge = \\\n OptGraph(OptNode({'name': 'logit'}, [OptNode({'name': 'one_hot_encoding'}, [OptNode({'name': 'scaling'})])]))\n\n primary = OptNode({'name': 'scaling'})\n graph_with_edge = \\\n OptGraph(OptNode({'name': 'logit'}, [OptNode({'name': 'one_hot_encoding'}, [primary]), primary]))\n\n composer_requirements = GPComposerRequirements(primary=['scaling', 'one_hot_encoding'],\n secondary=['logit', 'scaling'], mutation_prob=1)\n\n graph_params = GraphGenerationParams(adapter=DirectAdapter(),\n rules_for_constraint=DEFAULT_DAG_RULES)\n successful_mutation_edge = False\n for _ in range(100):\n graph_after_mutation = mutation(types=[MutationTypesEnum.single_edge],\n params=graph_params,\n ind=Individual(graph_without_edge),\n requirements=composer_requirements,\n log=default_log(__name__), max_depth=graph_with_edge.depth).graph\n if not successful_mutation_edge:\n successful_mutation_edge = \\\n graph_after_mutation.root_node.descriptive_id == graph_with_edge.root_node.descriptive_id\n else:\n break\n assert successful_mutation_edge", "def test_identity_removal_on_binary_elementwise():\n\n def get_graph(get_expected=False):\n x = relay.var(\"x\", shape=(1, 2, 2, 4), dtype=\"int8\")\n y = relay.var(\"y\", shape=(1, 2, 2, 4), dtype=\"int8\")\n if not get_expected:\n x = infra.make_ethosu_identity(x)\n y = infra.make_ethosu_identity(y)\n z = infra.make_ethosu_binary_elementwise(x, y, 4, 4, \"ADD\", \"int8\")\n return relay.Function(relay.analysis.free_vars(z), z)\n\n actual = _optimize(get_graph())\n expected = _optimize(get_graph(get_expected=True), optimize=False)\n _assert_structural_equal(actual, expected)", "def _optimize_connected_graph(self):\n self._handle_ops_to_ignore()\n self._handle_tuple_and_list_construct_ops()\n self._handle_tuple_and_list_unpack_ops()", "def test_remove_extra_edges():\n g = graphs.RandomGNP(20, .5)\n r=g\n r, _ = FUN.remove_extra_edges(r)\n assert len(r.independent_set()) == len(g.independent_set())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the basename of a path, lowercased.
def get_lower_basename(path): return os.path.basename(os.path.normpath(os.path.abspath(path))).lower()
[ "def basename(input_file):\n\n input_file = value_checkup(input_file)\n return os.path.basename(os.path.splitext(input_file)[0])", "def sort_by_basename_icase(path):\n\treturn path.basename_lower", "def crds_basename(name):\n if name == \"N/A\":\n return \"N/A\"\n else:\n return os.path.basename(name)", "def basename(file_name):\n fileParts = file_name.split(\".\")\n base_name = \".\".join(fileParts[:-1])\n return base_name", "def test_get_basename(self):\n self.assertEquals(util.fileops.get_basename('/home/brandon/test/test.py'),\n 'test.py')\n self.assertEquals(util.fileops.get_basename('poopies.html'),\n 'poopies.html')\n self.assertEquals(util.fileops.get_basename('raw'), 'raw')\n self.assertEquals(util.fileops.get_basename('home/brandon/test'), 'test')", "def path_base(path: str) -> str:\n\n return os.path.normpath(path).split(os.sep)[0]", "def generate_name(self, path) -> str:\n name = path.strip(\"/\").split(\"/\")[-1]\n return name", "def get_file_name_from_path(file_path):\n file_name = file_path\n slash_position = file_name.rfind('/')\n dot_position = file_name.rfind('.')\n if slash_position >= 0:\n file_name = file_name[slash_position + 1:]\n if dot_position >= 0:\n file_name = file_name[:dot_position]\n return file_name", "def extensionOf(cls, path):\n parts = path.split('/')[-1].split('.')\n if len(parts) > 1:\n return parts[-1].lower()\n return None", "def getBasename(*args) -> \"SbString\":\n return _coin.SoInput_getBasename(*args)", "def basename_from_uri(uri):\n return os.path.basename(parse.urlparse(uri).path)", "def getBasename(self, filename):\n extInd = filename.rindex(\".\")\n return filename[0:extInd]", "def basename(n):\n # Type checkingn and conversion\n orig_type = type(n)\n if isinstance(n, OneDimBinning):\n n = n.name\n if not isinstance(n, str):\n raise ValueError('Unhandled type %s' %orig_type)\n # Remove all (pre/suf)fixes and any separator chars\n for regex in NAME_FIXES_REGEXES:\n n = regex.sub('', n)\n return n.strip()", "def strainName(self):\n basename = os.path.basename(self.bamfile)\n if basename.lower().endswith(\".bam\"):\n basename = basename[:-4]\n return basename", "def path_head(path: str) -> str:\n return os.path.split(path)[1]", "def get_path(self, path):\n return path[len(self.base)+2:]", "def get_basename_from_gt_file(gt_file):\n directory, file_name = os.path.split(gt_file)\n return file_name.replace(GT_FILE_EXT, \"\")", "def path_tail(path: str) -> str:\n return os.path.split(path)[0]", "def _normalise_path(self, path: str) -> str:\n return os.path.normpath(os.path.normcase(path))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create or replace path.
def create_path_or_replace(path_to_create): if os.path.exists(path_to_create): shutil.rmtree(path_to_create) create_path_tree(path_to_create)
[ "def createPath(self, path):\n if os.path.abspath('.') != os.path.abspath(path):\n try:\n os.makedirs(path)\n except OSError:\n print \"Error: Path already exists.\"\n self._handleCollision(path)", "def create_path(path: str) -> None:\n if not exists(path):\n mkdir(path)", "def path_set(path: Path) -> bool:\n tryagain = True\n access: int = 0o755\n if path.exists():\n tryagain = True\n else:\n md = su.get_new_path(path)\n if md.lower() == 'yes' or 'y':\n try:\n path.mkdir(mode=access)\n except OSError as error:\n logger.error(f'{error}')\n tryagain = False\n else:\n raise exceptions.CleanExit()\n return tryagain", "def createPath(path):\n\t\n\tif not os.path.isdir(path):\n\t\tos.mkdir(path)", "def setPath(self, path, update=True):\n self.path = path\n if update: self.updatePaths()\n elif self.__folderscreated: self.write()", "def _create_path(self, path):\n splpath = path.split(\"/\")\n lastpath = \"{}\".format(splpath[0])\n for d in splpath[1:-1]:\n curpath = \"{}/{}\".format(lastpath, d)\n if self.get_node_by_path(curpath) is None:\n parent = self.get_node_by_path(lastpath)\n DPNode(\n parent=parent,\n entry_path=curpath,\n entry_name=d,\n entry_type=None,\n entry_id=None,\n created_date=None,\n is_new=None,\n document_source=None,\n parent_folder_id=None,\n )\n lastpath = curpath", "def _create_path_ignore_existing(self, path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as e:\n # File exists (17) is okay\n if e.errno != 17:\n raise\n self._fix_permission(path)", "def newPath(self, new_path = None, new_fullpath = None, force = False, always_copy = False, always_move = False, leave_symlink = False, create_dirs = True, getPathPreview = False):\n\n if always_copy and always_move:\n raise ValueError(\"Both always_copy and always_move cannot be specified\")\n\n if (new_path is None and new_fullpath is None) or (new_path is not None and new_fullpath is not None):\n raise ValueError(\"Specify only new_dir or new_fullpath\")\n\n old_dir, old_filename = os.path.split(self.filename)\n if new_path is not None:\n # Join new filepath to old one (to handle realtive dirs)\n new_dir = os.path.abspath(os.path.join(old_dir, new_path))\n\n # Join new filename onto new filepath\n new_fullpath = os.path.join(new_dir, old_filename)\n\n else:\n # Join new filepath to old one (to handle realtive dirs)\n new_fullpath = os.path.abspath(os.path.join(old_dir, new_fullpath))\n\n new_dir = os.path.dirname(new_fullpath)\n\n\n if len(Config['move_files_fullpath_replacements']) > 0:\n p(\"Before custom full path replacements: %s\" % (new_fullpath))\n new_fullpath = applyCustomFullpathReplacements(new_fullpath)\n new_dir = os.path.dirname(new_fullpath)\n\n p(\"New path: %s\" % new_fullpath)\n\n if getPathPreview:\n return new_fullpath\n\n if create_dirs:\n p(\"Creating directory %s\" % new_dir)\n try:\n os.makedirs(new_dir)\n except OSError, e:\n if e.errno != 17:\n raise\n\n if os.path.isfile(new_fullpath):\n # If the destination exists, raise exception unless force is True\n if not force:\n raise OSError(\"File %s already exists, not forcefully moving %s\" % (\n new_fullpath, self.filename))\n\n if same_partition(self.filename, new_dir):\n if always_copy:\n # Same partition, but forced to copy\n copy_file(self.filename, new_fullpath)\n else:\n # Same partition, just rename the file to move it\n rename_file(self.filename, new_fullpath)\n\n # Leave a symlink behind if configured to do so\n if leave_symlink:\n symlink_file(new_fullpath, self.filename)\n else:\n # File is on different partition (different disc), copy it\n copy_file(self.filename, new_fullpath)\n if always_move:\n # Forced to move file, we just trash old file\n p(\"Deleting %s\" % (self.filename))\n delete_file(self.filename)\n\n # Leave a symlink behind if configured to do so\n if leave_symlink:\n symlink_file(new_fullpath, self.filename)\n\n self.filename = new_fullpath", "def edit_node_path(self, node, new_path):\n\n if self.check_node_existance(node):\n self.nodes[node]['path'] = new_path\n\n else:\n raise NodeNotFound('No node under name \"{0}\" found.'.format(node))", "def add_path(self, path):\n self.path_list.insert(0, path)\n self.build_path()", "def get_or_create_path(path, date, day=False):\n if day:\n path_components = (str(date.year), str(date.month), str(date.day))\n else:\n path_components = (str(date.year), str(date.month))\n for subpath in path_components:\n path = os.path.join(path, subpath)\n if not os.path.isdir(path):\n os.mkdir(path)\n return path", "def append_slash(path):\n if not path.endswith('/'):\n path += '/'\n\n return path", "def addpath(self, path, frename, forigin):\n self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]\n self._dirty = True", "def rebase_path(\n path: pathlib.Path, root: pathlib.Path, new_root: pathlib.Path\n) -> pathlib.Path:\n return new_root / path.relative_to(root)", "def generateSymbolicLink(path, output_dir, first_replacement=\"uploaded\",\n second_replacement=\"home/mediapanel\",\n lua_folder=\"themes\"):\n split_path = path.split(\"/\")\n replaced_index = split_path.index(first_replacement)\n replacement_dir = os.path.join(second_replacement, lua_folder, output_dir)\n split_path[replaced_index] = replacement_dir\n os.symlink(path, os.path.join(*split_path))", "def add_path(self, path):\n letters = list(string.ascii_letters)\n if len(self.paths) < len(letters):\n self.paths.append(path)\n self.paths = sorted(self.paths)\n i = 0\n while i < len(self.paths) and i < len(letters):\n self.paths[i].set_id(str(self.branch_id) + str(letters[i]))\n i += 1", "def test_patch_path(self):\n self.assertEqual(\n utils.patch_path(\n '/Users/sudeep.agarwal/src/squiddy/api/v0.1',\n '/Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml',\n ), '/Users/sudeep.agarwal/src/squiddy/api/v0.1/swagger.yaml')", "def testSetPath(self):\n original = mib.path()\n current = original + \":/some/other/directory\"\n try:\n mib.path(current)\n self.assertEqual(mib.path(), current)\n finally:\n mib.path(original)", "def set_pseudopotential_path(self, newpath):\n self.qe_input_data[\"pseudo_dir\"] = newpath" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a path tree if folders does not exist.
def create_path_tree(path_to_create): current = "" for path in path_to_create.split("/"): current = os.path.join(current, path) # Root if current == "": current = "/" continue if not os.path.exists(current): os.makedirs(current)
[ "def create_dirs(path_to_dir):\n path_to_dir = Path(path_to_dir)\n if not path_to_dir.exists():\n path_to_dir.mkdir(parents=True)\n LOG.debug(f\"Created directory structure: '{path_to_dir}'\")", "def create_folder(path):\n Path(path).mkdir(parents=True, exist_ok=True)", "def create_folders_if_need_to(self):\n expected = [\n self.path('images'),\n self.path('meta'),\n self.path('thumbnails'),\n self.path('staging'),\n self.path('staging', 'images'),\n self.path('staging', 'meta'),\n self.path('staging', 'thumbnails'),\n self.path('staging', 'unconfirmed'),\n ]\n for each in expected:\n if not self.exists(each):\n self.mkdir(each)\n print(f'New folder created: {each}')", "def create_missing_dir(path):\n if not (os.path.exists(path) and os.path.isdir(path)):\n os.makedirs(path)", "def __check_folders(self):\n\n if not os.path.exists(self.parent_folder):\n os.mkdir(self.parent_folder)\n\n if not os.path.exists(self.keys_folder):\n os.mkdir(self.keys_folder)\n\n if not os.path.exists(self.folder):\n os.mkdir(self.folder)", "def createPath(path):\n\t\n\tif not os.path.isdir(path):\n\t\tos.mkdir(path)", "def create_folder_structure():\n if path.exists('./train'):\n shutil.rmtree('./train')\n pathlib.Path(\"./train/train\").mkdir(parents=True, exist_ok=True)\n pathlib.Path(\"./train/validate\").mkdir(parents=True, exist_ok=True)", "def init_folders(path: str) -> bool:\n\n if not os.path.exists(path):\n logger.info(f\"{path} doesn't exist. Initializing...\")\n os.mkdir(path)\n return True\n return False", "def createPath(self, path):\n if os.path.abspath('.') != os.path.abspath(path):\n try:\n os.makedirs(path)\n except OSError:\n print \"Error: Path already exists.\"\n self._handleCollision(path)", "def vfs_makedirs(path_to_create):\n\n\turi = gnomevfs.URI(path_to_create)\n\tpath = uri.path\n\n\t# start at root\n\turi = uri.resolve_relative('/')\n\n\tfor folder in path.split('/'):\n\t\tif not folder:\n\t\t\tcontinue\n\t\turi = uri.append_string(folder.replace('%2f', '/'))\n\t\ttry:\n\t\t\tgnomevfs.make_directory(uri, 0777)\n\t\texcept gnomevfs.FileExistsError:\n\t\t\tpass\n\t\texcept :\n\t\t\treturn False\n\treturn True", "def create_path_or_replace(path_to_create):\n if os.path.exists(path_to_create):\n shutil.rmtree(path_to_create)\n\n create_path_tree(path_to_create)", "def create(paths):\n with LogSection(\"Creating directories...\"):\n for path in paths:\n path = expand(path)\n if not os.path.exists(path):\n print(\"Creating:\", path)\n os.makedirs(path)", "def create_parent_folder(path: Path) -> None:\n if not path.parent.exists():\n path.parent.mkdir(parents=True)", "def create_folder_structure(root_dir: Path):\n (root_dir / \"test\").mkdir(parents=True, exist_ok=True)\n (root_dir / \"train\").mkdir(exist_ok=True)\n (root_dir / \"val\").mkdir(exist_ok=True)", "def scan_and_create_dir_tree(path, file=True):\n\n parts = path.parts\n path_to_check = Path(parts[0])\n\n for i in range(1, len(parts)):\n if not path_to_check.exists():\n path_to_check.mkdir()\n path_to_check = path_to_check / parts[i]\n\n if file:\n pass\n else:\n if not path_to_check.exists():\n path_to_check.mkdir()", "def create_folder(folder_path):\n success = True\n try:\n if (folder_path.exists()):\n if (not folder_path.is_dir()):\n folder_path.unlink()\n folder_path.mkdir()\n else:\n folder_path.mkdir()\n except Exception as ex:\n repr(ex)\n success = False\n return success", "def make_empty_folder(path):\r\n shutil.rmtree(path, ignore_errors=True)\r\n pathlib.Path(path).mkdir(exist_ok=True)", "def create_dir(dir_path: Path):\n\n if not dir_exists(dir_path):\n dir_path.mkdir()", "def create_path(path: str) -> None:\n if not exists(path):\n mkdir(path)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns first found reader
def get_reader(): rarr=readers() if len(rarr) == 0: return None return rarr[0]
[ "def get_first_row(self):\n if not self.csv_reader:\n return None\n\n if not self.first_row:\n the_list = list(self.csv_reader)\n if the_list:\n self.first_row = the_list[0]\n self.reset_csv()\n else:\n return None\n\n return self.first_row", "def read(self):\n return self.reads.next()", "def first(self):\n try:\n row = self.cursor_strategy.fetchone()\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )\n\n try:\n if row is not None:\n return self.process_rows([row])[0]\n else:\n return None\n finally:\n self.close()", "def first( filename ):\r\n\r\n try:\r\n return next(iwhere(filename))\r\n except StopIteration:\r\n return None", "def get_reader(fh):\n sniff = sniff_file(fh)\n\n if sniff.startswith(\"FN \"):\n reader = PlainTextReader\n elif \"\\t\" in sniff:\n reader = TabDelimitedReader\n else:\n # XXX TODO Raised for empty file -- not very elegant\n raise ReadError(\"Could not determine appropriate reader for file \"\n \"{}\".format(fh))\n return reader", "def get_reader(self):\n raise NotImplementedError()", "def seek_to_beginning(self):\n if isinstance(self.reader, CSVParser):\n self.data = self.reader.csvreader\n elif isinstance(self.reader, ExcelParser):\n self.data = self.reader.excelreader\n else:\n raise Exception('Unknown type of parser in MCMParser')\n\n return self.reader.seek_to_beginning()", "def first(self):\n return next(self)", "def read(self) -> SmartSsdReadLookahead:\n return self._read", "def get_first_scan(self):\n if self.scans is None:\n return None\n return self.scans[0]", "def _get_dataset_reader(self) -> base.base_dataset.BaseDatasetReader:\n\n if self._input_type == DatasetInputFormat.ORIGINAL:\n return OriginalReader\n\n if self._input_type == DatasetInputFormat.LMDB:\n return LmdbReader\n\n # if self._input_type == DatasetInputFormat.H5PY:\n # return H5Reader\n\n self._logger.error('Dataset input type not recognized!')\n return -1", "def first(self, default=None):\n try:\n return iter(self).next()\n except StopIteration:\n return default", "def lookup_Reader(self, Reader_id):\n command = u\"\"\"self.cur.execute(\"SELECT * FROM Reader WHERE Reader_id = %s\")\"\"\" % Reader_id\n #print command\n exec(command)\n data = self.cur.fetchone()\n if data == None:\n return False\n data = list(data)\n data = self.change_str_from_mysql(data)\n return data", "def get_reader(file_path: str, direct_open: bool = False) -> Reader:\n\n extension = os.path.splitext(file_path)[1]\n if extension not in reader_registry:\n raise ValueError('unknown dataset file extension \"{}\"'.format(extension))\n\n reader = reader_registry[extension](file_path)\n if direct_open:\n reader.open()\n return reader", "def fetchone(self):\n try:\n row = self.cursor_strategy.fetchone()\n if row is not None:\n return self.process_rows([row])[0]\n else:\n self._soft_close()\n return None\n except BaseException as e:\n self.connection._handle_dbapi_exception(\n e, None, None, self.cursor, self.context\n )", "def getNext(self):\n line = self._file.readline()\n if line:\n return tuple(line.strip('\\n').split('\\t'))\n else: \n return None", "def _get_record(self, offset):\n self._maf_fp.seek(offset)\n return next(self._mafiter)", "def first(self, default=None):\r\n try:\r\n return next(iter(self))\r\n except StopIteration:\r\n return default", "def first(iterator):\n return next(iterator)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select an applet with appletID appletID can be either a hexencoded string or byte sequence
def select_applet(connection, appletID): data = maybe_fromhex(appletID) # Select: # CLA = 0x00 # INS = 0xA4 # P1 = 0x04 # P2 = 0x00 # Data = the instance AID cmd = [0x00, # CLA 0xA4, # INS 0x04, # P1 0x00, # P2 len(data), # Lc (content length) ] + data + [0x00] data, *sw = connection.transmit(cmd) data = bytes(data) sw = bytes(sw) if sw == b"\x90\x00": return data else: raise RuntimeError("Card responded with code %s and data \"%s\"" % (sw.hex(), data.hex()))
[ "def ExtractText(self):\n return \"[applet]\"", "def applet_new(input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/applet/new', input_params, always_retry=always_retry, **kwargs)", "def applet_run(object_id, input_params={}, always_retry=False, **kwargs):\n return DXHTTPRequest('/%s/run' % object_id, input_params, always_retry=always_retry, **kwargs)", "def sac_picker(id,stations):\n \n # Check sac script exists, if not make one.\n fname='pick.mac'\n if not os.path.exists(fname):\n sacfile=open(fname,'x')\n sacfile.write('bg x\\nqdp off\\nsc sactosac -m data/$1$/SAC/*.sac\\n')\n sacfile.write('r data/$1$/SAC/OL.$2$*E.sac\\nr more data/$1$/SAC/OL.$2$*N.sac\\nr more data/$1$/SAC/OL.$2$*Z.sac\\n')\n sacfile.write('ppk\\nwh\\nq\\n')\n sacfile.close()\n \n # Plot each components together for each station.\n for station in stations:\n sac=\"printf \\\"m pick.mac %s %s \\\"| sac\"%(id,station)\n os.system(sac)", "def selectApp(self, app_name):\n time.sleep(2) #this is a hack. Otherwise it won't find the app consistently. Need to double-check implicit/explicit waits\n #locatorStr = ('//*[@title=\"' + app_name + '\"]')\n #self.click_object(By.XPATH, locatorStr)\n app_tile = WorkspacePageLocators(app_name)\n #self.highlight(*app_tile.app_tile)\n self.click_object_at_location(1, 1, *app_tile.app_tile)", "def GetIdaSig(sig, mask = None) -> str:\n\n # Only a code sig should be byte string\n if type(sig) is type(b''):\n return Code2Ida(sig, mask)\n\n if sig.find(' ') > -1:\n\n # an olly sig without wildcards would be same as an ida sig so this is safe\n if sig.find(' ?? ') > -1:\n return Olly2Ida(sig)\n\n # Olly sig with no wildcards or already an ida sig\n return sig\n\n # Only supported type left is code sigs as a string\n return Code2Ida(sig, mask)", "def _find_appliance(oneandone_conn, appliance):\n for _appliance in oneandone_conn.list_appliances(q='IMAGE'):\n if appliance in (_appliance['id'], _appliance['name']):\n return _appliance['id']", "def _get_ap_by_id(self, ap_id):\n if isinstance(ap_id, AP):\n return ap_id\n\n with self.aps_lock:\n # check in all lookup tables\n if ap_id in self.ap_by_name:\n return self.ap_by_name[ap_id]\n elif ap_id in self.ap_by_mac:\n return self.ap_by_mac[ap_id]\n elif ap_id in self.ap_by_ip:\n return self.ap_by_ip[ap_id]\n elif ap_id in self.ap_by_radio_mac:\n return self.ap_by_radio_mac[ap_id]\n else:\n # ap ips may change or be set by dhcp\n for ap in self.aps:\n if ap.ip == ap_id:\n self.ap_by_ip[ap.ip] = ap\n return ap\n raise ValueError('AP with id %s does not exist! %s' % (ap_id, str(self.ap_by_mac)))", "def _run_applescript(script):\n p = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate(bytes(str(script).encode(\"utf-8\")))\n return (p.returncode, stdout, stderr)", "def select_assessor(intf, assessor_label):\n labels = assessor_label.split('-x-')\n xpath = A_XPATH.format(project=labels[0],\n subject=labels[1],\n session=labels[2],\n assessor=assessor_label)\n return intf.select(xpath)", "def _getid(lib):\n val = commands.getoutput(\"otool -D %s\" % lib)\n m = re.match(r\"[^:]+:\\s*([^\\s]+)\", val)\n if m:\n return m.group(1)\n raise RuntimeError, \"Could not determine id for %s\" % lib", "def update_allowed_protocol_by_id(self,\n id,\n allow_chap=None,\n allow_eap_fast=None,\n allow_eap_md5=None,\n allow_eap_tls=None,\n allow_eap_ttls=None,\n allow_leap=None,\n allow_ms_chap_v1=None,\n allow_ms_chap_v2=None,\n allow_pap_ascii=None,\n allow_peap=None,\n allow_preferred_eap_protocol=None,\n allow_teap=None,\n allow_weak_ciphers_for_eap=None,\n description=None,\n eap_fast=None,\n eap_tls=None,\n eap_tls_l_bit=None,\n eap_ttls=None,\n name=None,\n peap=None,\n preferred_eap_protocol=None,\n process_host_lookup=None,\n require_message_auth=None,\n teap=None,\n headers=None,\n payload=None,\n active_validation=True,\n **query_parameters):\n check_type(headers, dict)\n\n if headers is not None:\n if 'Content-Type' in headers:\n check_type(headers.get('Content-Type'),\n basestring, may_be_none=False)\n if 'Accept' in headers:\n check_type(headers.get('Accept'),\n basestring, may_be_none=False)\n\n with_custom_headers = False\n _headers = self._session.headers or {}\n if headers:\n _headers.update(dict_of_str(headers))\n with_custom_headers = True\n is_xml_payload = 'application/xml' in _headers.get('Content-Type', [])\n if active_validation and is_xml_payload:\n check_type(payload, basestring)\n if active_validation and not is_xml_payload:\n check_type(payload, dict)\n check_type(id, basestring,\n may_be_none=False)\n\n _params = {\n }\n _params.update(query_parameters)\n _params = dict_from_items_with_values(_params)\n\n path_params = {\n 'id': id,\n }\n if is_xml_payload:\n _payload = payload\n else:\n _tmp_payload = {\n 'name':\n name,\n 'description':\n description,\n 'eapTls':\n eap_tls,\n 'peap':\n peap,\n 'eapFast':\n eap_fast,\n 'eapTtls':\n eap_ttls,\n 'teap':\n teap,\n 'processHostLookup':\n process_host_lookup,\n 'allowPapAscii':\n allow_pap_ascii,\n 'allowChap':\n allow_chap,\n 'allowMsChapV1':\n allow_ms_chap_v1,\n 'allowMsChapV2':\n allow_ms_chap_v2,\n 'allowEapMd5':\n allow_eap_md5,\n 'allowLeap':\n allow_leap,\n 'allowEapTls':\n allow_eap_tls,\n 'allowEapTtls':\n allow_eap_ttls,\n 'allowEapFast':\n allow_eap_fast,\n 'allowPeap':\n allow_peap,\n 'allowTeap':\n allow_teap,\n 'allowPreferredEapProtocol':\n allow_preferred_eap_protocol,\n 'preferredEapProtocol':\n preferred_eap_protocol,\n 'eapTlsLBit':\n eap_tls_l_bit,\n 'allowWeakCiphersForEap':\n allow_weak_ciphers_for_eap,\n 'requireMessageAuth':\n require_message_auth,\n }\n _payload = {\n 'AllowedProtocols': dict_from_items_with_values(_tmp_payload)\n }\n _payload.update(payload or {})\n _payload = dict_from_items_with_values(_payload)\n if active_validation and not is_xml_payload:\n self._request_validator('jsd_a0b312f70257b1bfa90d0260f0c971_v3_0_0')\\\n .validate(_payload)\n\n e_url = ('/ers/config/allowedprotocols/{id}')\n endpoint_full_url = apply_path_params(e_url, path_params)\n\n request_params = {'data': _payload} if is_xml_payload else {'json': _payload}\n if with_custom_headers:\n _api_response = self._session.put(endpoint_full_url, params=_params,\n headers=_headers,\n **request_params)\n\n else:\n _api_response = self._session.put(endpoint_full_url, params=_params,\n **request_params)\n\n return self._object_factory('bpm_a0b312f70257b1bfa90d0260f0c971_v3_0_0', _api_response)", "def get_apple_by_id(apple_id):\n for apple in game_state['apples']:\n if get_apple_id(apple) == apple_id:\n return apple\n return None", "def break_apl_singleton(apl: str) -> str:\n if apl == \"\":\n return \"\"\n (ipver, remainder) = apl.split(\":\", 1)\n (ipaddr, mask) = remainder.split(\"/\", 1)\n return ipaddr", "def get_iqn_from_chap_secrets_cache(ocid):\n _, chap_passwords = load_cache(oci_utils.__chap_password_file)\n if chap_passwords is None:\n return None, None\n for iqn, unpw in chap_passwords.items():\n if ocid == unpw[0]:\n return iqn, unpw[1]\n return None, None", "def callbackFunc(event): # this function used to get selected item from the combo box and load into oid i/p box\r\n choice = quality_combo.get()\r\n choice = int((choice.strip())[0])\r\n\r\n oid.delete(0,1)\r\n oid.insert(0, choice)", "def artSelectCtx(unselectall=bool, mappressure=\"string\", outline=bool, selectop=\"string\", reflectionaxis=\"string\", exportfilesave=\"string\", image3=\"string\", reflection=bool, dragSlider=\"string\", image1=\"string\", exportfiletype=\"string\", lowerradius=float, exportfilesizex=int, opacity=float, accopacity=bool, usepressure=bool, exists=bool, brushalignment=bool, name=\"string\", showactive=bool, afterStrokeCmd=\"string\", surfaceConformedBrushVertices=bool, exportfilemode=\"string\", addselection=bool, toggleall=bool, exportfilesizey=int, selectall=bool, outwhilepaint=bool, tablet=bool, importfileload=\"string\", paintmode=\"string\", profileShapeFile=\"string\", expandfilename=bool, brushfeedback=bool, stampProfile=\"string\", clear=bool, importfilemode=\"string\", projective=bool, importreassign=bool, importthreshold=float, history=bool, beforeStrokeCmd=\"string\", image2=\"string\", tangentOutline=bool, radius=float):\n pass", "def enter_custom_app_instance_id(self, option=\"22222222-0000-0000-0000-000000000001\"):\n self.driver.send_keys(\"app_instance_id_custom_txt_box\", content=option)", "def arxiv_id(self):\n return self._paper_dict[\"id\"]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reads in a filename, and for each gene (in name column), collapse regions such that each gene has nonoverlapping intervals. This might be functionally the same as merge() but it's really slow.
def merge2(fn): df = pd.read_table(fn, names=['chrom','start','end','name','score','strand']) df.sort_values(['chrom','start','end'], inplace=True) df = df.groupby('name').apply(collapse) df.reset_index(inplace=True) return df[['chrom','start','end','name','score','strand']]
[ "def clean_overlap ( self ):\n regions = self.regions\n new_regions = {}\n chrs = regions.keys()\n chrs.sort()\n for chrom in chrs:\n new_regions[chrom]=[]\n n_append = new_regions[chrom].append\n prev_region = None\n regions_chr = regions[chrom]\n for i in xrange(len(regions_chr)):\n if not prev_region:\n prev_region = regions_chr[i]\n continue\n else:\n if regions_chr[i][0] <= prev_region[1]:\n s_new_region = prev_region[0]\n e_new_region = max(regions_chr[i][1],prev_region[1])\n l_new_region = e_new_region-s_new_region\n prev_region = (s_new_region,e_new_region)\n else:\n n_append(prev_region)\n prev_region = regions_chr[i]\n if prev_region:\n n_append(prev_region)\n del regions\n self.regions = new_regions\n return True", "def get_cell_line_overlaps(file_prefix, bedfile1, bedfile2, fraction_overlap=0.5):\n cmd = 'bedtools intersect -f {} -wa -a {} -b {} | uniq > {}_IDR.bed'.format(fraction_overlap, bedfile1, bedfile2,\n file_prefix)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n _ = process.communicate()\n out_filename = '{}_IDR.bed'.format(file_prefix)\n df = pd.read_csv(out_filename, sep='\\t', header=None)\n idr_starts = df.iloc[:, 1].values\n os.remove(out_filename)\n\n return idr_starts", "def load_unalignable(self, fname):\n self.logger.info(\"Loading unalignable regions\")\n if not os.path.exists(fname):\n self.logger.error(\"The unalignable sequence bed file %s could \"\n \"not be found\" % fname)\n return None\n # If no chrom data is present, this will define the current chrom list\n bed = []\n chroms = []\n chr2int = {}\n with open(fname) as f:\n for line in f:\n if line[0] == '#':\n continue\n temp = line.rstrip('\\n').split('\\t')\n # If no chrom data is already loaded, use the bed file's\n # chrom data\n if self.data is None:\n if temp[0] not in chroms:\n chr2int[temp[0]] == len(chroms)\n chroms.append(temp[0])\n chrint = chr2int[temp[0]]\n elif temp[0] not in self.chr2int:\n continue\n else:\n chrint = self.chr2int[temp[0]]\n bed.append((chrint, (int(temp[1]), int(temp[2]))))\n bed.sort()\n bed = numpy.array(bed, dtype=numpy.dtype([\n ('chr', numpy.int32), ('coords', numpy.int32, (2,))]))\n # If getting chrom data from bed, find correct chrom ordering\n if self.data is None:\n indices = numpy.r_[0, numpy.cumsum(numpy.bincount(bed['chr']))]\n chrints = []\n for chrom in chroms:\n try:\n chrints.append((\n str(int(chroms[i].lstrip('chr'))).rjust(2, '0'),\n chroms[i]))\n except ValueError:\n chrints.append((chrom, chrom))\n chrints.sort()\n chroms = []\n for i in range(len(chrints)):\n chroms.append(chrints[i][1])\n self.chroms = numpy.array(chroms)\n for i in range(len(self.chroms)):\n chrint = chr2int[self.chroms[i]]\n bed['chr'][indices[chrint]:indices[chrint + 1]] = i\n bed = bed[numpy.argsort(bed['chr'])]\n self.unalignable = bed\n # If RE data is already loaded, remove blacklisted regions\n if self.data is not None:\n self.remove_unalignable()", "def merge_overlap_regions(\n histone_overlap_files,\n out_master_bed,\n # end here\n\n args, histone, method=\"atac_midpoint\", filter_w_overlap=True):\n # set up naive overlap based master regions\n logging.info(\"HISTONE: {}: Generating master regions...\".format(histone))\n args.chipseq[\"histones\"][histone][\"overlap_master_regions\"] = \"{0}/ggr.{1}.overlap.master.bed.gz\".format(\n args.folders[\"data_dir\"], histone)\n if not os.path.isfile(args.chipseq[\"histones\"][histone][\"overlap_master_regions\"]):\n histone_overlap_files = sorted(\n glob.glob(\"{0}/{1}\".format(\n args.chipseq[\"data_dir\"], args.chipseq[\"histones\"][histone][\"overlap_glob\"])))\n logging.info(\"Master regions using: {}\".format(\" \".join(histone_overlap_files)))\n merge_regions(histone_overlap_files, args.chipseq[\"histones\"][histone][\"overlap_master_regions\"])\n \n if method == \"atac_midpoint\":\n # If centered on ATAC region midpoint, then extract the midpoint and then extend out with bedtools slop\n args.atac[\"master_slop_bed\"] = \"{}.slop_{}bp.bed.gz\".format(\n args.atac[\"master_bed\"].split(\".bed\")[0],\n args.params[\"histones\"][histone][\"overlap_extend_len\"])\n # this bit actually belongs in ATAC? integrative?\n if not os.path.isfile(args.atac[\"master_slop_bed\"]):\n slop_bed = (\n \"zcat {0} | \"\n \"awk -F '\\t' 'BEGIN{{OFS=\\\"\\t\\\"}} \"\n \"{{ midpoint=$2+int(($3-$2)/2); \"\n \"$2=midpoint; $3=midpoint+1; print }}' | \"\n \"bedtools slop -i stdin -g {1} -b {2} | \"\n \"gzip -c > {3}\").format(\n args.atac[\"master_bed\"],\n args.annot[\"chromsizes\"],\n args.params[\"histones\"][histone][\"overlap_extend_len\"],\n args.atac[\"master_slop_bed\"])\n print slop_bed\n run_shell_cmd(slop_bed)\n\n if filter_w_overlap:\n # now intersect ATAC with the naive overlap files and only keep region if has an overlap\n args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"] = \"{}.{}-marked.bed.gz\".format(\n args.atac[\"master_slop_bed\"].split(\".bed\")[0],\n histone)\n if not os.path.isfile(args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"]):\n keep_marked = (\n \"bedtools intersect -u -a {0} -b {1} | \"\n \"gzip -c > {2}\").format(\n args.atac[\"master_slop_bed\"],\n args.chipseq[\"histones\"][histone][\"overlap_master_regions\"],\n args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"])\n print keep_marked\n run_shell_cmd(keep_marked)\n master_regions = args.chipseq[\"histones\"][histone][\"master_slop_marked_bed\"]\n \n else:\n master_regions = args.atac[\"master_slop_bed\"]\n\n elif method == \"naive_overlap\":\n # If naive overlap, don't do anything extra - already generated the master file\n master_regions = args.chipseq[\"histones\"][histone][\"overlap_master_regions\"]\n\n else:\n raise Exception(\"non existent master regions method!\")\n \n return master_regions", "def merge_regions(self, max_gap_length):\n\n logging.debug('Group %s: merging %i candidate regions' %\n (self.family_name, len(self.regions)))\n\n if len(self.regions) > 1:\n\n potentially_mergable = self.regions\n not_mergable = []\n\n while len(potentially_mergable) > 1:\n\n merged = False\n current = potentially_mergable[0]\n compared_to = potentially_mergable[1:]\n\n for region in compared_to:\n if region.overlaps(current, max_gap_length):\n region.merge(current)\n region.clean_references(max_gap_length)\n #logging.debug('Group %s: merged a region. %i potentially mergable candidate regions remaining' % (self.family_name, len(potentially_mergable)))\n potentially_mergable = compared_to\n merged = True\n break\n\n if not merged:\n not_mergable.append(current)\n potentially_mergable = compared_to\n #logging.debug('Group %s: not merged a region. %i potentially mergable candidate regions remaining' % (self.family_name, len(potentially_mergable)))\n\n results = not_mergable + potentially_mergable\n\n logging.debug('Group %s: merged into %i regions' %\n (self.family_name, len(results)))\n\n self.regions = results\n\n else:\n logging.debug(\n 'Group %s: found only 1 region, no mergin necessary' % self.family_name)", "def removeRegions(dataFrame, er):\n midpoints = [] # to store midpoints\n\n # finding midpoints\n for index, row in dataFrame.iterrows():\n midpoints.append((row['start'] + row['end']) // 2)\n\n # adding another column called midpoints\n dataFrame['midpoint'] = midpoints\n\n # sort based on chromosome and then by midpoints\n sData = dataFrame.sort_values(\n by=['midpoint', 'p-value'], kind='mergesort')\n excludeList = [] # list of indexes that needs to be removed\n # print sData\n # print \"\\n\"\n\n # removing the motifs that are called on the different strand\n sortedData = removeRepeats(sData)\n\n # choosing motifs based on p-values per chromosome\n for i in sortedData['#chr'].unique():\n chrData = sortedData.loc[sortedData['#chr'] == i]\n # print chrData[0:3]\n mdp = chrData['midpoint'].tolist()\n # pprint.pprint(mdp)\n print(\"processing chromosome : {} \".format(i))\n\n # iterating through each motif to pick the least p-value\n for j in range(0, len(mdp)):\n # print mdp[j]\n\n # retrieve the index that will later be removed or retained\n index1 = chrData.index[chrData['midpoint'] == mdp[j]].tolist()[\n 0]\n # print index1\n\n excludeZone = range(0, mdp[j] + er) # create a boundary region\n pv1 = chrData.loc[index1]['p-value'] # retrieve the pvalue\n\n # if the index is not excluded from any previous comparisons\n if index1 not in excludeList:\n for k in range(j + 1, len(mdp)):\n # print \"\\t {}\".format(mdp[k])\n # retrieve the next line index\n index2 = chrData.index[chrData['midpoint'] == mdp[k]].tolist()[\n 0]\n # print \"\\t {}\".format(index2)\n\n # checking if the next line is in the exclusionZone\n if mdp[k] in excludeZone:\n # retrieve the p-value for comparison\n pv2 = chrData.loc[index2]['p-value']\n\n # excluding the greater p-value after comparison\n if pv1 <= pv2:\n # pprint.pprint(\" IF pv1: {}, pv2 :{}\".format(pv1,pv2))\n excludeList.append(index2)\n # pprint.pprint(list(chrData.loc[index2]))\n else:\n # pprint.pprint(\"ELSE pv1: {}, pv2 :{}\".format(pv1,pv2))\n excludeList.append(index1)\n # pprint.pprint(list(chrData.loc[index1]))\n # else:\n # print \"IN EXCLUDE LIST : {}\".format(index1)\n print(\"\\nRemoved : {} ,\\nindex : {}\\n\".format(\n len(excludeList), excludeList))\n sData = sortedData.drop(excludeList)\n sortedData = sData.sort_values(by=['#chr', 'rank'], kind='mergesort')\n # print sortedData\n sortedData = sortedData.drop(['midpoint'], axis=1)\n print(\"Final Shape of the data : {}\".format(sortedData.shape))\n sortedData.to_csv('dedupFimo.bed', sep='\\t', header=False, index=False)\n # return excludeList", "def merge_regions(\n out_path: str, sample1_id: int, regions1_file: File, sample2_id: int, regions2_file: File\n) -> File:\n\n def iter_points(regions):\n for start, end, depth in regions:\n yield (start, \"start\", depth)\n yield (end, \"end\", -depth)\n\n def iter_regions(points):\n first_point = next(points, None)\n if first_point is None:\n return\n start, _, depth = first_point\n\n for pos, kind, delta in points:\n if pos > start:\n yield (start, pos, depth)\n start = pos\n depth += delta\n\n regions1 = read_regions(regions1_file)\n regions2 = read_regions(regions2_file)\n points1 = iter_points(regions1)\n points2 = iter_points(regions2)\n points = iter_merge(points1, points2)\n regions = iter_regions(points)\n\n region_path = f\"{out_path}/regions/{sample1_id}_{sample2_id}.regions\"\n return write_regions(region_path, regions)", "def deleteOverlapping(tempList, fileName):\n if fileName == 0:\n specialPosition = 0\n elif fileName == len(file_list) - 1:\n specialPosition = -1\n else:\n specialPosition = 1\n fileName = int(file_list[fileName].split('/')[-1].split('.')[-2])\n minimum = fileName - 5.\n maximum = fileName + 5.\n\n if specialPosition == 1:\n tempTempList = [line for line in tempList if (line[0] < maximum) and (line[0] > minimum)]\n elif specialPosition == -1:\n tempTempList = [line for line in tempList if (line[0] < maximum)]\n elif specialPosition == 0:\n tempTempList = [line for line in tempList if (line[0] > minimum)]\n return tempTempList", "def merge(data_path):\n region_file_dict = {}\n \n # remove old files\n for region in regions:\n removed_file = os.path.join(data_path, region + \".csv\")\n print removed_file\n if os.path.exists(removed_file):\n os.remove(removed_file)\n \n # create region dictionary\n for _, _, filenames in os.walk(data_path):\n for f in filenames:\n region = f.split('_')[0]\n if not region in region_file_dict:\n region_file_dict[region] = []\n region_file_dict[region].append(os.path.join(data_path, f))\n \n for region, files in region_file_dict.items():\n new_fname = os.path.join(data_path, region + \".csv\")\n with codecs.open(new_fname, \"a+\", encoding=\"utf-8\") as new_f:\n# print new_fname\n for fname in files:\n# print fname\n try:\n for txt in codecs.open(fname, \"r\", encoding=\"utf-8\"):\n new_f.write(txt)\n except Exception as e:\n traceback.format_exc(e)", "def read_genes(args, db):\n db[\"genes\"] = {}\n with open(args.genes, \"r\") as fin:\n for line in fin:\n if line.startswith(\"#\"):\n continue\n line_l = line.strip().split()\n id_, chrom, start, end, conv = \"NA\", \"NA\", \"NA\", \"NA\", \"NA\"\n if len(line_l) < 1:\n continue\n name = line_l[0]\n if len(line_l) > 1:\n id_ = line_l[1]\n if len(line_l) > 2:\n chrom = line_l[2].strip(\"chr\")\n if len(line_l) > 4:\n start, end = int(line_l[3]), int(line_l[4])\n if len(line_l) > 5:\n conv = int(line_l[5])\n db[\"genes\"][name] = [id_, chrom, start, end, conv]", "def merge_longest_overlap(reads):\n\n longest = str()\n for i in range(len(reads)):\n for j in range(len(reads)):\n if i == j:\n continue\n\n if len(reads[i]) <= len(reads[j]):\n s1, s2 = reads[i], reads[j]\n else:\n s1, s2 = reads[j], reads[i]\n\n overlap = str()\n testlen = len(s1)\n while testlen > floor(len(s1)/2):\n if s1[:testlen] == s2[len(s2)-testlen:]:\n overlap = s2[:len(s2)-testlen] + s1\n break\n elif s2[:testlen] == s1[len(s1)-testlen:]:\n overlap = s1[:len(s1)-testlen] + s2\n break\n testlen -= 1\n\n if len(overlap) > len(longest):\n longest = overlap\n indices = (i, j)\n if longest:\n reads = [reads[i] for i in range(len(reads)) if i not in indices]\n reads += [longest]\n return reads", "def get_intervals(fname, on, off, gold, transcription):\n def overlap(a, b, interval):\n ov = (min(b, interval[1]) - max(a, interval[0]))\\\n /(interval[1] - interval[0])\n time = min(b, interval[1]) - max(a, interval[0])\n return ov, time\n\n # search interval tree\n cov_int = gold[fname].search(on, off)\n cov_trs = [] # retrieved transcription\n\n # check each interval to see if we keep it or not.\n # In particular, check if found interval contains\n # more than 30 ms or more than 50% of phone.\n for interval in cov_int:\n int_ov, time = overlap(on, off, interval)\n if round(int_ov, 4) >= 0.50 or round(time,4) >= 0.03:\n cov_trs.append((interval[0], interval[1], transcription[(fname, interval[0], interval[1])]))\n \n # finally, sort the transcription by onsets, because intervaltree\n # doesn't necessarily return the intervals in order...\n cov_trs.sort()\n trs = [t for b, e, t in cov_trs]\n\n return cov_int, trs", "def load_bedtools_coverage(filename, min_reads=1, min_length=100):\n columns = ['Chromosome', 'Start', 'End', 'GTF-INFO', 'Reads', 'Basesgt0',\n 'Length', 'Fractiongt0']\n coverage_df = pd.read_table(filename, names=columns)\n coverage_df = coverage_df[(coverage_df.Reads >= min_reads) &\n (coverage_df.Length >= min_length)]\n return coverage_df", "def read_regions_data(prefix):\n\n ret = []\n idx = {}\n columns = []\n for line in open(prefix+'_regions.txt'):\n line = line.strip()\n if line == '':\n continue\n\n if line.startswith('#'):\n line = line[1:]\n header = line.split()\n for i in range(len(header)):\n idx[header[i]] = i\n\n columns = ['RC', 'MEDCOV', 'MINCOV', 'MEDQCOV', 'MINQCOV', 'MAXFLMQ', 'MAXFLBQ', 'Pass_or_flag']\n if 'MEDCOV+' in header:\n columns += [\n 'RC+', 'MEDCOV+', 'MINCOV+', 'MEDQCOV+', 'MINQCOV+', 'MAXFLMQ+', 'MAXFLBQ+',\n 'RC-', 'MEDCOV-', 'MINCOV-', 'MEDQCOV-', 'MINQCOV-', 'MAXFLMQ-', 'MAXFLBQ-'\n ]\n continue\n\n cols = line.split('\\t')\n record = {'region': cols[0]}\n for c in columns:\n value = cols[idx[c]]\n if value == '.':\n value = '--'\n elif c.startswith('RC') or c.startswith('MIN'):\n value = int(value)\n elif c != 'Pass_or_flag':\n value = float(value)\n if c.startswith('MED') and value == int(value):\n value = int(value)\n record[c.lower()] = value\n ret.append(record)\n\n return ret", "def range_conflict(ranges, depth=1):\n overlap = set()\n active = set()\n endpoints = _make_endpoints(ranges)\n\n for seqid, ends in groupby(endpoints, lambda x: x[0]):\n active.clear()\n for seqid, pos, leftright, i, score in ends:\n if leftright == LEFT:\n active.add(i)\n else:\n active.remove(i)\n\n if len(active) > depth:\n overlap.add(tuple(sorted(active)))\n\n for ov in overlap:\n yield ov", "def loadTEranges(TE_file_loc):\n with open(TE_file_loc) as TE_file:\n for line in TE_file:\n line_col = str.split(line)\n TE_ranges.setdefault(line_col[CHROM],[]).append((line_col[START],line_col[STOP]))\n\n TE_file.close()\n return", "def start_shuffle(output_file,np):\n index=0\n starttime = time.time()\n individualIntervals = allIndividuals.items()\n try:\n print \"starting parallel shuffle...\"\n\tpool = Pool(np)\n results = pool.map(shuffle, individualIntervals)\n\tprint \"pool finished\\n\"\n\tprint str(results)\n\tpool.close()\n pool.join()\n except:\n os.nice(100)\n pass\n else:\n\tprint \"bbb\"\n print \"finished shuffling phase. Starting overlap analysis\"\n elapsedtime = time.time() - starttime\n reads = {}\n persons_reads = {}\n for result in results:\n for y in result.values()[0]:\n id = str(index)\n reads[id] = [str(y[0]), str(y[1]), str(y[2]), str(result.keys()[0])]\n if str(result.keys()[0]) not in persons_reads: persons_reads[str(result.keys()[0])] = []\n persons_reads[str(result.keys()[0])].append(id)\n index += 1\n \n \"\"\"Dictionary to keep track of occurrence of each number of overlaps: 0/1 (no/yes)\"\"\"\n local_overall_overlaps = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0,\n 13: 0, 14: 0, 15: 0, 16: 0, 17: 0, 18: 0, 19: 0, 20: 0}\n \n \"\"\"for each interval, of each individual, get the genes in this region from the tree representation of refseq\n genes(refseq_gene_tree) and build a dictionary of gene:[list of intervals covering this gene]\n finally COUNT the number of intervals covering the gene. This is in number of intervals overlaping a gene\n \"\"\"\n genes = {}\n for read in reads:\n l = reads[read]\n\t print l\n a = refseq_gene_tree.interval_tree(l[0], int(l[1]), int(l[2]))\n for result in a:\n b = result[3][:-1]\n if b not in genes:\n genes[b] = []\n genes[b].append(l[3])\n \n for result in genes:\n if len(genes[result]) > 1:\n if (len(genes[result])) not in local_overall_overlaps:\n local_overall_overlaps[len(genes[result])] = 0\n if local_overall_overlaps[len(genes[result])] == 0:\n local_overall_overlaps[len(genes[result])] = 1\n \n with open(output_file, 'a') as outfile:\n json.dump(local_overall_overlaps, outfile)\n outfile.write(\"\\n\")\n \n print \"Finished in {0:.1f}\".format(elapsedtime) + \" s\"\n \n return 0", "def self_merge_plus_minus_ranges_w_duplicates (self):\n self.total_unique = None\n self.total = 0\n for chrom in self.__ranges.keys():\n (plus_tags,minus_tags) = self.__ranges[chrom]\n new_plus_tags = array(BYTE4,[])\n #reset counts\n self.__counts[chrom][0] = array(BYTE2,[])\n self.__counts[chrom][1] = array(BYTE2,[])\n ip = 0\n im = 0\n lenp = len(plus_tags)\n lenm = len(minus_tags)\n while ip < lenp and im < lenm:\n if plus_tags[ip] < minus_tags[im]:\n new_plus_tags.append(plus_tags[ip])\n ip += 1\n else:\n new_plus_tags.append(minus_tags[im])\n im += 1\n if im < lenm:\n # add rest of minus tags\n new_plus_tags.extend(minus_tags[im:])\n if ip < lenp:\n # add rest of plus tags\n new_plus_tags.extend(plus_tags[ip:])\n\n self.__ranges[chrom] = [new_plus_tags,[]]\n self.total += len(new_plus_tags)", "def merge_folding(ranges, current_tree, root):\n folding_ranges = []\n for starting_line, ending_line, text in ranges:\n if ending_line > starting_line:\n starting_line += 1\n ending_line += 1\n folding_repr = FoldingRegion(text, (starting_line, ending_line))\n folding_ranges.append((starting_line, ending_line, folding_repr))\n\n tree = IntervalTree.from_tuples(folding_ranges)\n changes = tree - current_tree\n deleted = current_tree - tree\n adding_folding = len(changes) > len(deleted)\n\n deleted_iter = iter(sorted(deleted))\n changes_iter = iter(sorted(changes))\n deleted_entry = next(deleted_iter, None)\n changed_entry = next(changes_iter, None)\n non_merged = 0\n\n while deleted_entry is not None and changed_entry is not None:\n deleted_entry_i = deleted_entry.data\n changed_entry_i = changed_entry.data\n dist = textdistance.jaccard.normalized_similarity(\n deleted_entry_i.text, changed_entry_i.text)\n\n if dist >= 0.80:\n # Copy folding status\n changed_entry_i.clone_node(deleted_entry_i)\n deleted_entry = next(deleted_iter, None)\n changed_entry = next(changes_iter, None)\n else:\n if adding_folding:\n # New symbol added\n non_merged += 1\n changed_entry = next(changes_iter, None)\n else:\n # Symbol removed\n deleted_entry_i.delete()\n non_merged += 1\n deleted_entry = next(deleted_iter, None)\n\n if deleted_entry is not None:\n while deleted_entry is not None:\n # Symbol removed\n deleted_entry_i = deleted_entry.data\n deleted_entry_i.delete()\n non_merged += 1\n deleted_entry = next(deleted_iter, None)\n\n if changed_entry is not None:\n while changed_entry is not None:\n non_merged += 1\n changed_entry = next(changes_iter, None)\n\n if non_merged > 0:\n tree_copy = IntervalTree(tree)\n tree_copy.merge_overlaps(\n data_reducer=merge_interval,\n data_initializer=root)\n return tree, root" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
given a pandas dataframe, return a list of strings that represent a bedfile (tabbed)
def make_linelist_from_dataframe(df): lst = [] for values in df.head().values: lst.append('\t'.join([str(v) for v in values])) return lst
[ "def bed_to_df(bed_file):\n header_lines = 0\n #Handle likely header by checking colums 2 and 3 as numbers\n with open(bed_file, 'r') as f:\n next_line = f.readline().strip()\n line_split = next_line.split(None) #This split by any blank character\n start = line_split[1]\n end = line_split[2]\n while not start.isdigit() and not end.isdigit():\n header_lines = header_lines + 1\n next_line = f.readline().strip()\n line_split = next_line.split(None) #This split by any blank character\n start = line_split[1]\n end = line_split[2]\n\n if header_lines == 0:\n dataframe = pd.read_csv(bed_file, sep=\"\\t\", header=None) #delim_whitespace=True\n else:\n dataframe = pd.read_csv(bed_file, sep=\"\\t\", skiprows=header_lines, header=None) #delim_whitespace=True\n if dataframe.shape[1] == 3:\n dataframe['description'] = True\n dataframe.columns = [\"#CHROM\", \"start\", \"end\", \"description\"]\n else:\n dataframe.columns = [\"#CHROM\", \"start\", \"end\", \"description\"]\n \n return dataframe", "def create_output(df):\n output_list = df.to_string(header=False,\n index=False,\n index_names=False).split('\\n')\n output_string = ','.join([','.join(ele.split()) for ele in output_list])\n return output_string", "def get_seqs(df):\n seqs = df.sequence.values\n return [''.join([s for s in seq if s != '-']) for seq in seqs]", "def tobed (self):\n text = \"\"\n chrs = self.regions.keys()\n chrs.sort()\n for chrom in chrs:\n for region in self.regions[chrom]:\n text+= \"%s\\t%d\\t%d\\n\" % (chrom,region[0],region[1])\n return text", "def display_df(df):\r\n\r\n console = Console()\r\n table = Table(\r\n Column(\"source_text\", justify=\"center\"),\r\n Column(\"target_text\", justify=\"center\"),\r\n title=\"Sample Data\",\r\n pad_edge=False,\r\n box=box.ASCII,\r\n )\r\n\r\n for i, row in enumerate(df.values.tolist()):\r\n table.add_row(row[0], str(row[1]))\r\n\r\n console.print(table)", "def get_station_list(df: pd.DataFrame) -> list:\n return df[\"Station Name\"].unique().tolist()", "def parse_bed(path):\t\n\twith open(path, newline = '') as found:\n\t\tfound_reader = csv.reader(found, delimiter = '\\t')\n\t\tdata = []\n\t\t\n\t\t# get info from rows\n\t\tfor row in found_reader:\n\n\t\t\t# only need to keep fields 'Chr', 'IntStart', 'IntStop', 'Orientation', and 'ReadID'\n\t\t\tinfo = {'Chr' \t\t\t: row[0],\n\t\t\t\t\t'IntStart' \t\t: int(row[1]),\n\t\t\t\t\t'IntStop' \t\t: int(row[2]),\n\t\t\t\t\t'Orientation'\t: 'unknown',\n\t\t\t\t\t'ReadID' \t: '',\n\t\t\t\t\t'id'\t\t\t: ''\n\t\t\t\t\t}\n\n\t\t\tdata.append(info)\n\t\t\t\n\treturn data", "def process_file(fpath):\n base_name = os.path.splitext(os.path.basename(fpath))[0]\n df = pd.concat([t for t in parse_file(fpath)])\n df['break'] = df.apply(is_break, axis=1)\n table_name = base_name\n tables = {table_name: []}\n for i, row in df.iterrows():\n if row['break'] == True:\n table_name = row[0]\n tables.setdefault(table_name, [])\n continue\n if row[0] in set(r[0] for r in tables[table_name]):\n base_name += '?'\n table_name = base_name\n tables[table_name] = []\n tables[table_name].append(row)\n\n for name, rows in tables.items():\n t = pd.DataFrame(rows)\n if not t.empty:\n t.drop(columns=['break'], inplace=True)\n t.set_index(0, inplace=True)\n yield name, t", "def df2bytes(dataframe):\n return '\\n'.join(\n [','.join(dataframe), ] +\n [','.join(map(str, row)) for row in dataframe.values]\n ).encode()", "def get_fragfiles( self ) -> pd.DataFrame:\n fragpath = Path(core.get_option('loop_master', 'fragments'))\n self.log.debug(f'Listing available fragment files at: {fragpath.name}')\n if not fragpath.is_dir():\n raise NodeDataError(f'{fragpath.name} is not a folder.')\n return pd.DataFrame([(x.name[:4], x.name[5:6], x, y) for x, y in zip(sorted(fragpath.glob('*/*3mers.gz')),\n sorted(fragpath.glob('*/*9mers.gz')))],\n columns=['pdb', 'chain', '3mers', '9mers'])", "def get_fragfiles( self ) -> pd.DataFrame:\n fragpath = Path(core.get_option('loop_master', 'fragments'))\n self.log.debug(f'Listing available fragment files at: {fragpath.name}')\n if not fragpath.is_dir():\n raise NodeDataError(f'{fragpath.name} is not a folder.')\n return pd.DataFrame([(x.name[:4], x.name[5:6], x, y) for x, y in zip(sorted(fragpath.glob('*/*3mers.gz')),\n sorted(fragpath.glob('*/*9mers.gz')))],\n columns=['pdb', 'chain', '3mers', '9mers'])", "def row_name(df):\n rowName = []\n gen = df.iterrows()\n for i in range(len(df.index)):\n row = next(gen)\n temp = ''\n for j in row[1]: temp = temp + str(j).zfill(4)\n rowName.append(temp)\n return rowName", "def read_bcf_phased_genotypes(bcf_filename):\n phased_genotypes = []\n\n for r in pysam.VariantFile(bcf_filename, 'r'):\n for alt in r.alts:\n chromosome = r.chrom\n position = r.pos\n ref = r.ref\n\n assert len(r.samples) == 1\n gt_infos = r.samples[0].items()\n\n assert len(gt_infos) == 1\n assert gt_infos[0][0] == 'GT'\n allele1, allele2 = gt_infos[0][1]\n\n phased_genotypes.append([chromosome, position, ref, alt, allele1, allele2])\n\n phased_genotypes = pd.DataFrame(\n phased_genotypes,\n columns=['chromosome', 'position', 'ref', 'alt', 'allele1', 'allele2'])\n\n return phased_genotypes", "def pandasdf2pdb(df):\n s = \"\"\n chain = \"\"\n for _, row_atom in df.iterrows():\n atnum, atname, resname, resnum, x, y, z = row_atom\n atnum = int(atnum)\n resnum = int(resnum)\n # See for pdb format:\n # https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html.\n # \"alt\" means alternate location indicator\n # \"code\" means code for insertions of residues\n \t# \"seg\" means segment identifier\n # \"elt\" means element symbol\n if len(atname) == 4:\n s += (\"{record_type:6s}{atnum:5d} {atname:<4s}{alt:1s}{resname:>4s}\"\n \"{chain:1s}{resnum:>4d}{code:1s} {x:>8.3f}{y:>8.3f}{z:>8.3f}\"\n \"{occupancy:>6.2f}{temp_fact:>6.2f} {seg:<2s}{elt:>2s}\\n\"\n .format(record_type=\"ATOM\", atnum=atnum, atname=atname, alt=\"\",\n resname=resname, chain=chain, resnum=resnum, code=\"\",\n x=x, y=y, z=z, occupancy=1.0, temp_fact=0.0, seg=\"\",\n elt=atname[0]))\n else:\n s += (\"{record_type:6s}{atnum:5d} {atname:<3s}{alt:1s}{resname:>4s}\"\n \"{chain:1s}{resnum:>4d}{code:1s} {x:>8.3f}{y:>8.3f}{z:>8.3f}\"\n \"{occupancy:>6.2f}{temp_fact:>6.2f} {seg:<2s}{elt:>2s}\\n\"\n .format(record_type=\"ATOM\", atnum=atnum, atname=atname, alt=\"\",\n resname=resname, chain=chain, resnum=resnum, code=\"\",\n x=x, y=y, z=z, occupancy=1.0, temp_fact=0.0, seg=\"\",\n elt=atname[0]))\n return s", "def df2text(df, show_index, show_grid):\n df = remove_newlines(df)\n text = \"\"\n\n if show_grid:\n grid_sizes = get_grid_size(df)\n\n # The character \"index\" has length of 5\n index_cell_size = max(length(str(df.index[-1])) + 1, 6)\n\n # format header\n # ----------------\n # 1. create the top grid\n if show_index:\n text += \"+\" + \"-\" * index_cell_size\n\n for colname in df.columns:\n text += \"+\" + \"-\" * grid_sizes[colname]\n text += \"+\\n\"\n horizon_line = text\n\n # 2. create colnames row\n if show_index:\n text += \"|index\" + \" \" * (index_cell_size - 5)\n\n for colname in df.columns:\n text += \"|\" + colname + \" \" * (grid_sizes[colname] - length(colname))\n text += \"|\\n\"\n\n # 3. append a header grid\n text += horizon_line\n\n # format body\n # ------------------\n for index, row in df.iterrows():\n if show_index:\n text += \"|\" + str(index) + \" \" * (index_cell_size - length(str(index)))\n for colname in grid_sizes:\n text += \"|\" + str(row[colname]) + \" \" * (grid_sizes[colname] - length(str(row[colname])))\n text += \"|\\n\"\n \n text += horizon_line\n\n return text\n \n # Not showing grids\n header = df.columns.to_list()\n\n if show_index:\n text += \"index\\t\"\n\n text += \"\\t\".join(header) + \"\\n\"\n\n for index, row in df.iterrows():\n if show_index:\n text += str(index) + \"\\t\"\n text += \"\\t\".join(list(map(str, row.to_list()))) + \"\\n\"\n return text", "def get_data(fn):\n rows = []\n dbf = dbflib.open(fn)\n for i in range(dbf.record_count()):\n rows.append(dbf.read_record(i))\n\n return pd.DataFrame(rows)", "def get_recording_populations(self):\n results = []\n with self.transaction() as cursor:\n for row in cursor.execute(\n \"\"\"\n SELECT label\n FROM population\n \"\"\"):\n results.append(str(row[\"label\"], 'utf-8'))\n return results", "def parse_file(fname):\n for table in tabula.read_pdf(fname, pages=\"all\", multiple_tables=True, pandas_options={'dtype': str}):\n table = table.apply(parse_row, axis=1).apply(pd.Series)\n yield table", "def export_pairwise_gene_match_report(df: pd.DataFrame, outdir: Path) -> Path:\n outname = outdir / 'pairwise_gene_match_report.tsv'\n df.to_csv(outname, sep=\"\\t\", index=None)\n return outname" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Renames the BedTools.Interval.name() into something that can be used as an index (removes the tabs and replaces with a more friendly delimiter
def rename_index(interval_name): chrom, start, end, name, score, strand = str( interval_name ).strip().split('\t') return "{}:{}-{}:{}:{}".format(chrom, start, end, name, strand)
[ "def new_index_from_name(base_name):\n return base_name + \".\" + str(int(time.time()))", "def set_interval_name(sinsData):\n tempData = []\n df_depth = sinsData.df_depth\n for name,df_well in zip (sinsData.wellnames,sinsData.dfs_well):\n for index, row in df_depth[df_depth[\"Well\"] == name].iterrows():\n df_well.loc[((df_well['TVDss'] >= row.Top) & (df_well['TVDss'] <= row.Bottom)), \"Interval\"] = row.Interval\n tempData.append(df_well)\n # THIS REPLACES THE ORIGINAL INSTANCE\n sinsData.dfs_well = tuple(tempData) \n return sinsData", "def m21IntervalStr(interval):\n if interval in _intervalObj:\n return _intervalObj[interval]\n intervalObj = Interval(interval)\n _intervalObj[interval] = intervalObj\n return intervalObj", "def get_reg_name(self, name):\n return name.lower().replace('-', '').replace('_', '').replace(' ', '')", "def format_tensor_name(name):\n\n if name.startswith(\"^\"):\n name_old = name\n name = name.strip(\"^\")\n log.warning(\"Changing \\\"{}\\\" to \\\"{}\\\"\".format(name_old, name))\n return name.split(\":\")[0]\n # return name", "def get_next_lag_name(self):\n name = 'bond' + str(self.next_lag_index)\n self.next_lag_index += 1\n return name", "def getMarkerName(index):", "def gen_name(self):\n result = \"\"\n labels = self._name.split(\".\")\n for lbl in labels:\n result += int_to_hex(len(lbl), 2)\n result += str_to_hex(lbl)\n return result + int_to_hex(0, 2)", "def _create_name(cls, template: str, index: int):\n\n splitted = template.split(\".\")\n if len(splitted) == 1:\n return f\"{splitted[0]}.{index}\"\n elif len(splitted) == 2:\n return f\"{splitted[0]}.{index}.{splitted[1]}\"\n else:\n return f'{\".\".join(splitted[:-1])}.{index}.{splitted[-1]}'", "def get_index_name(args, lt_time, gte_time):\n log_lt = args.index + '-' + lt_time.strftime('%Y.%m.%d')\n log_gte = args.index + '-' + gte_time.strftime('%Y.%m.%d')\n\n if log_lt == log_gte:\n es_index = log_lt\n else:\n es_index = log_lt + ',' + log_gte\n return es_index", "def index_file_name(name: str) -> str:\n return name + '-idx.json'", "def _generate_series_name(self, row, current_col_index):\n name = \" \".join([row[col] for col in range(1, self.data_start_col)])\n\n if len(self.theader_list)-self.data_start_col >= 2:\n # if there is many data columns, append current data column name\n name = u\"%s-%s\" % (name, self.theader_list[current_col_index].decode(\"utf-8\"))\n\n return name", "def _update_name(self, name):\n if self._name:\n metric = self._name\n if name:\n metric = metric + \".\" + name\n else:\n metric = name\n return metric", "def __make_op_name(self, branch, tag):\n return '{}_{}'.format(branch, tag)", "def convert_name(value: str) -> str:\n return \"--\" + value.replace(\"_\", \"-\")", "def slicename_creation(x):\n slicename = x//100*100\n return slicename", "def rename_table(table):\n return table.replace(\".\",\"\").replace(\"-\",\"\").replace('/','').replace(\" \",\"\").upper()", "def _make_index_name(z_type, column_name):\n\n table_abbrev = \"mea_\" + z_type.replace(\"_\",\"\")[:3]\n column_abbrev = ''.join([x[0] for x in column_name.split('_')])\n md5 = hashlib.md5(\n '{}.{}'.format(z_type, column_name).encode('utf-8')). \\\n hexdigest()\n hashlen = NAME_LIMIT - (len(table_abbrev) + len(column_abbrev) +\n 3 * len('_') + len('ix'))\n return '_'.join([table_abbrev, column_abbrev, md5[:hashlen], 'ix'])", "def set_bowtie2_index_name(bowtie2_index_name):\n global BOWTIE2_INDEX_NAME\n BOWTIE2_INDEX_NAME = bowtie2_index_name", "def interconnect_to_name(interconnect):\n return \"_\".join(sorted(check_and_format_interconnect(interconnect)))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Splits list (lst) into n equal parts.
def split(lst, n): newlist = [] division = len(lst) / float(n) for i in xrange(n): newlist.append( lst[int(round(division * i)):int(round(division * (i + 1)))]) return newlist
[ "def do_chunkify(lst,n):\n return [lst[i::n] for i in range(n)]", "def divide_list_in_n_equal_chunks(_list, n):\n for i in range(0, len(_list), n):\n yield _list[i : i + n]", "def chunks(l, n):\n \n if n<1:\n n=1\n return [l[i:i+n] for i in range(0, len(l), n)]", "def partition_list(lst, n):\r\n return [lst[i:i + n] for i in range(0, len(lst), n)]", "def chunks(l, n):\n o = int(np.round(len(l)/n))\n out = []\n # For item i in a range that is a length of l,\n for i in range(0, n):\n # Create an index range for l of n items:\n if i == n-1:\n sub = l[i*o:]\n else:\n sub = l[i*o:i*o+o]\n \n if len(sub):\n out.append(sub)\n return out", "def _chunks(lst, n):\n for i in range(0, len(lst), n):\n yield lst[i:i + n]", "def chunks(lst, amount):\n return [lst[i:i + amount] for i in range(0, len(lst), amount)]", "def split_every(seq, n):\n return [seq[i:i+n] for i in range(0, len(seq), n)]", "def slice_list(list_: list, n: int):\n if n > len(list_):\n raise IndexError\n elif len(list_) % n != 0:\n result = []\n els_in_slice = int(len(list_) / n) +1 # Number of els in one\n prev_last = 0\n for i in range(n-1):\n result.append(list_[prev_last:prev_last + els_in_slice])\n prev_last += els_in_slice\n result.append(list_[prev_last:])\n else:\n result = []\n els_in_slice = int(len(list_) / n) # Number of els in one\n prev_last = 0\n for i in range(n):\n result.append(list_[prev_last:prev_last + els_in_slice])\n prev_last += els_in_slice\n return result", "def _split_list(list_in, num):\n if num >= len(list_in):\n return list_in\n step0 = int(len(list_in) / num)\n n_left = len(list_in) % num\n list_out = []\n for i in range(n_left):\n ibeg = i * (step0 + 1)\n iend = (i + 1) * (step0 + 1)\n sub_list = list_in[ibeg: iend]\n list_out.append(sub_list)\n for i in range(num - n_left):\n ibeg = n_left * (step0 + 1) + i * step0\n iend = n_left * (step0 + 1) + (i + 1) * step0\n sub_list = list_in[ibeg: iend]\n list_out.append(sub_list)\n return list_out", "def split_into_groups_of(groupsize, thelist):\n\tresult = []\n\n\tfor i in range(0, len(thelist), groupsize):\n\t\tresult.append(thelist[i:i+groupsize])\n\n\treturn result", "def splitEvenly(n, l):\n for i in range(0, len(l), n):\n yield l[i : i+n]", "def split_into(xs: Collection, n: int) -> Collection:\n\n bucket_size, remainder = divmod(len(xs), n)\n\n # We need one fewer than `n`, since these become split positions.\n relative_splits = np.full(n - 1, bucket_size)\n # e.g. 10 by 3 -> 4, 3, 3\n relative_splits[:remainder] += 1\n\n return split(xs, np.cumsum(relative_splits))", "def split_into_n_parts(x, n):\n result = []\n if(x < n):\n return None\n elif (x % n == 0):\n for i in range(n):\n result += [x//n]\n else:\n zp = n - (x % n)\n pp = x//n\n for i in range(n):\n if(i>= zp):\n result += [pp + 1]\n else:\n result += [pp]\n return result", "def group(lst, n):\r\n for i in range(0, len(lst), n):\r\n values = lst[i:i + n]\r\n yield tuple(values)", "def _chunks(lst: List[T], chunk_size: int) -> Generator[List[T], None, None]:\n return (lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size))", "def n_elements(my_list, start, n):\r\n last = start + n\r\n return my_list[start:last]", "def chunks(lst, chunk_size):\n for i in range(0, len(lst), chunk_size):\n yield lst[i:i + chunk_size]", "def group(iterable, n):\n groups = []\n\n if isinstance(iterable, types.GeneratorType):\n iterable = list(iterable)\n\n num_groups = ceil(len(iterable) / n)\n iterable = iter(iterable)\n\n while len(groups) < num_groups:\n groups.append(list(islice(iterable, 0, n)))\n\n return groups", "def chunks_by_element(arr, n):\n return [arr[i:i+n] for i in range(0, len(arr), n)]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given two boundaries (upper and lower genomic boundaries), returns the area defined by anchor lower_offset and anchor + upper_offset. If the region bleeds over the boundaries, this function will return the genomic left pad and genomic right pad.
def _get_absolute_coords_and_pad( anchor, upper_boundary, upper_offset, lower_boundary, lower_offset): left_pad = _too_far(anchor, lower_offset, lower_boundary, -1) right_pad = _too_far(anchor, upper_offset, upper_boundary, 1) absolute_start = anchor - lower_offset + left_pad absolute_end = anchor + upper_offset - right_pad return left_pad, absolute_start, absolute_end, right_pad
[ "def range_overlap(a, b, ratio=False):\n a_chr, a_min, a_max = a\n b_chr, b_min, b_max = b\n a_min, a_max = sorted((a_min, a_max))\n b_min, b_max = sorted((b_min, b_max))\n shorter = min((a_max - a_min), (b_max - b_min)) + 1\n # must be on the same chromosome\n if a_chr != b_chr:\n ov = 0\n else:\n ov = min(shorter, (a_max - b_min + 1), (b_max - a_min + 1))\n ov = max(ov, 0)\n if ratio:\n ov /= float(shorter)\n return ov", "def get_map_ranges(lon0, lon1, lat0, lat1, aspect, border=0.05):\n\n # data ranges\n dlon = lon1 - lon0\n dlat = lat1 - lat0\n\n # map view set by longitude span\n if dlon > aspect * dlat:\n x0 = lon0 - border * dlon\n x1 = lon1 + border * dlon\n dlon_view = dlon + 2 * border * dlon\n dlat_view = dlon_view / aspect\n assert dlat_view > dlat\n margin = (dlat_view - dlat) / 2\n y0 = lat0 - margin\n y1 = lat1 + margin\n\n # map view set by latitude span\n else:\n y0 = lat0 - border * dlat\n y1 = lat1 + border * dlat\n dlat_view = dlat + 2 * border * dlat\n dlon_view = dlat_view * aspect\n assert dlon_view >= dlon\n margin = (dlon_view - dlon) / 2\n x0 = lon0 - margin\n x1 = lon1 + margin\n\n return x0, x1, y0, y1", "def get_overlap_coords(raster_one, raster_two):\n \n xmargin = 1/2*(raster_one.xstep + raster_two.xstep)\n ymargin = 1/2*(np.abs(raster_one.ystep) + np.abs(raster_two.ystep))\n \n #Check top\n inbound = False\n x = y = 0\n while not inbound and (x<raster_one.width and y<raster_one.height):\n long, lat = rio.transform.xy(raster_one.transform, [x],[y])\n long, lat = long[0], lat[0]\n testx = (long>=raster_two.bounds[0]-xmargin and \n long<=raster_two.bounds[1]+xmargin)\n testy = (lat>=raster_two.bounds[2]-ymargin and \n lat<=raster_two.bounds[3]+ymargin)\n inbound = testx and testy\n if not testx: x+=1\n if not testy: y+=1\n \n topleft=[x,y]\n if not inbound:\n logging.error(\"Failure finding overlap\")\n topleft = 0\n #Check bottom\n x = raster_one.width - 1\n y = raster_one.height - 1\n while not inbound and (x>=0 and y>=0) :\n long, lat = rio.transform.xy(raster_one.transform, [x],[y])\n long, lat = long[0], lat[0]\n\n testx = (long>=raster_two.bounds[0]-xmargin and \n long<=raster_two.bounds[1]+xmargin)\n testy = (lat>=raster_two.bounds[2]-ymargin and \n lat<=raster_two.bounds[3]+ymargin)\n inbound = testx and testy\n if not testx: x-=1\n if not testy: y-=1\n if not inbound:\n logging.error(\"Failure finding overlap\") \n bottomright = [x,y]\n return topleft, bottomright", "def _areaUnder2d(a,b):\n return 0.5*(a[0]*b[1]-a[1]*b[0])", "def overlap(a1, a2, b1, b2):\n\tassert a1 <= a2\n\tassert b1 <= b2\n\tassert isinstance(a1, int) and isinstance(a2, int) and isinstance(b1, int) and isinstance(b2, int)\n\t\n\t# if a interval is completely to the left of the b interval\n\tif a2 < b1:\n\t\treturn False\n\t# if a interval is completely to the right of the b interval\n\telif a1 > b2:\n\t\treturn False\n\telse:\n\t\treturn True", "def getOverlap(a, b):\r\n return max(0, 1 + min(a[1], b[1]) - max(a[0], b[0]))", "def get_area_rectangle(w, h):\n return -1.0", "def _calculate_overlap_area(x1, y1, x2, y2, width, height):\n\n dx = min(x1 + width, x2 + width) - max(x1, x2)\n dy = min(y1 + height, y2 + height) - max(y1, y2)\n\n if dx > 0 and dy > 0:\n return dx * dy\n else:\n return 0", "def bb_union(bbox1, bbox2):\n # Lower bounds\n xs = np.min([bbox1[0], bbox2[0]])\n ys = np.min([bbox1[1], bbox2[1]])\n # Upper bounds\n xe = np.max([bbox1[2], bbox2[2]])\n ye = np.max([bbox1[3], bbox2[3]])\n coord = np.array([xs, ys, xe, ye])\n return coord", "def bed_get_flanking_regions(bedfile, left_range, right_range, genome_file=None):\n if isinstance(left_range, int):\n left_range = (left_range, 0)\n if isinstance(right_range, int):\n right_range = (0, right_range)\n\n assert isinstance(left_range, tuple), \"Parameter 'left_range' must either be an integer or a tuple!\"\n assert len(left_range) == 2, \"Parameter 'left_range' must be a tuple of length 2!\"\n assert left_range[0] > left_range[1] or left_range == (0, 0), (\"The left-side range modifier of left_range must be \"\n \"less than the right-side!\")\n assert isinstance(right_range, tuple), \"Parameter 'right_range' must either be an integer or a tuple!\"\n assert len(right_range) == 2, \"Parameter 'right_range' must be a tuple of length 2!\"\n assert right_range[0] < right_range[1] or right_range == (0, 0), (\"The right-side range modifier of left_range must\"\n \" be greater than the left-side!\")\n bedfile = Path(bedfile)\n assert bedfile.exists(), \"Given bedfile path does not exist!\"\n assert bedfile.is_file(), \"Given bedfile path was not a file! Did you provide a directory?\"\n leftbed = bedfile.with_name(bedfile.stem +\n \"_left_Offset{0}_Size{1}\".format(left_range[1],\n left_range[0] - left_range[1]) +\n bedfile.suffix)\n rightbed = bedfile.with_name(bedfile.stem +\n \"_right_Offset{0}_Size{1}\".format(right_range[1],\n right_range[0] - right_range[1]) +\n bedfile.suffix)\n granges = {chrm: int(size) for chrm, size\n in [line.strip().split(\"\\t\") for line in open(genome_file)]} if genome_file else None\n with bedfile.open() as bf, leftbed.open(\"w\") as lbf, rightbed.open(\"w\") as rbf:\n records = (line.strip().split('\\t')[0:4] for line in bf)\n for (chr, s, e, id) in records:\n if left_range != (0, 0):\n left = [chr,\n int(s) - left_range[0],\n int(s) - left_range[1],\n id + \"_left\"]\n ldiff = 0\n if left[2] > left[1] > 0:\n left[3] += \"_offset-{0}_size-{1}\".format(left_range[1],\n left[2] - left[1])\n else:\n if left[1] < 0:\n ldiff = -left[1] # note its '-' because left[1] is negative\n left[2] += ldiff\n left[2] = left[2] if left[2] <= int(s) else int(s)\n left[1] = 0\n if left[1] == left[2]:\n left[2] += 1\n ldiff -= 1\n left[3] += \"_offset-{0}_size-{1}\".format(left_range[1] - ldiff,\n left[2] - left[1])\n else:\n left[3] += \"_offset-{0}_size-{1}\".format(left_range[1],\n left[2] - left[1])\n left = (str(i) for i in left)\n lbf.write('\\t'.join(left) + \"\\n\")\n if right_range != (0, 0):\n right = [chr,\n int(e) + right_range[0],\n int(e) + right_range[1],\n id + \"_right\"]\n if granges:\n if granges[chr] <= right[2] or granges[chr] <= right[1]:\n rdiff = granges[chr] - right[2]\n right[2] = granges[chr]\n right[1] += rdiff\n right[1] = right[1] if right[1] >= int(e) else int(e)\n if right[2] == right[1]:\n right[1] -= 1\n rdiff -= 1\n right[3] += \"_offset-{0}_size-{1}\".format(right_range[0] + rdiff,\n right[2] - right[1])\n else:\n right[3] += \"_offset-{0}_size-{1}\".format(right_range[0],\n right[2] - right[1])\n else:\n right[3] += \"_offset-{0}_size-{1}\".format(right_range[0],\n right[2] - right[1])\n right = (str(i) for i in right)\n rbf.write('\\t'.join(right) + \"\\n\")\n return", "def get_ab_area(self):\n\t\treturn la.norm(cross(self.a, self.b))/2", "def bb_intersect(bbox1, bbox2):\n # Lower bounds\n xs = np.max([bbox1[0], bbox2[0]])\n ys = np.max([bbox1[1], bbox2[1]])\n # Upper bounds\n xe = np.min([bbox1[2], bbox2[2]])\n ye = np.min([bbox1[3], bbox2[3]])\n coord = np.array([xs, ys, xe, ye])\n width = xe - xs\n height = ye - ys\n if width > 0 and height > 0:\n return coord\n else:\n return np.array([])", "def determine_overlap(peak, region):\n assert(peak.strand == region.strand)\n # print('peak:', peak.start, peak.end)\n # print('region:', region.start, region.end)\n if peak.start >= region.end or region.start >= peak.end:\n # newPeak and region don't overlap\n return 'no_overlap', 0\n elif peak.start == region.start and peak.end == region.end:\n # newPeak and region sizes are equal (completely overlap)\n overlap = peak.end - peak.start\n return 'equal', overlap\n elif peak.start <= region.start and peak.end <= region.end:\n # newPeak overlaps the left side of the region only\n overlap = peak.end - region.start\n return 'left', overlap\n elif peak.start >= region.start and peak.end >= region.end:\n # newPeak overlaps the right side of the region only\n overlap = region.end - peak.start\n return 'right', overlap\n elif peak.start <= region.start and peak.end >= region.end:\n # region is completely contained within newPeak\n overlap = region.end - region.start\n return 'whole_region', overlap\n elif peak.start >= region.start and peak.end <= region.end:\n # newPeak is completely contained within region\n overlap = peak.end - peak.start\n return 'whole_peak', overlap\n else:\n print(\"warning: {}, {} overlaps in an unexpected way.\".format(\n peak, region\n ))\n return 'no_overlap', -1", "def calculate_overlap_durations(ranges_a, ranges_b):\n max_starts_matrix = np.maximum.outer(ranges_a[:, 0], ranges_b[:, 0])\n min_ends_matrix = np.minimum.outer(ranges_a[:, 1], ranges_b[:, 1])\n overlap_durations = np.maximum(0, min_ends_matrix - max_starts_matrix)\n return overlap_durations", "def get_bounding_box_area(self, pair):\n left, right = pair\n combined = np.vstack([left['rect'], right['rect']])\n bounding_rect = cv.minAreaRect(combined)\n bounding_rect = cv.boxPoints(bounding_rect)\n area = self.get_rect_area(bounding_rect)\n return area", "def get_ab_area(self):\n\t\treturn la.norm(cross(self.a, self.b))", "def get_bounds( reads, start_pos_index, end_pos_index ):\n max_low = sys.maxint\n max_high = -sys.maxint\n for read in reads:\n if read[ start_pos_index ] < max_low:\n max_low = read[ start_pos_index ]\n if read[ end_pos_index ] > max_high:\n max_high = read[ end_pos_index ]\n return max_low, max_high", "def overlap(start_1, end_1, start_2, end_2):\n return range(max(start_1, start_2),\n min(end_1, end_2) + 1)", "def touch_detect_region(self, upper_left, bottom_right): # Sub-section .1\n command = 'FF39{:04X}{:04X}{:04X}{:04X}'.format(\n *(upper_left + bottom_right)\n )\n return self._send_command(command)", "def rectify(left, right, cam_params):\n return left, right" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines what the genomic lower boundary is. For intron regions, if a neighboring (next) interval exists, set the boundary with respect to that. Otherwise, set the lower boundary to 0. For exon regions, if 'stop_at_midpoint' flag is on, set the boundary to the middle of the exon. Otherwise, set the lower boundary to the other end of the exon.
def _get_lower_boundary(current_interval, next_interval, strand_or_5p, stop_at_midpoint=False): if strand_or_5p == '+': if stop_at_midpoint: return (current_interval.end + current_interval.start) / 2 else: return current_interval.start else: return next_interval.end if next_interval is not None else 0
[ "def _lower_bound(self, query: str, offset_l: int, offset_h: int) -> int:\n logging.debug('lower bound 2 %s %s %s', query, offset_l, offset_h)\n if offset_l >= offset_h:\n return self._seek_back_to_line_start(offset_l)\n\n mid = (offset_l + offset_h) // 2\n\n line_start = self._seek_back_to_line_start(mid)\n #current_id = self._id_from_line(line_start)\n current_line = self._get_line(line_start)\n next_line_start = self._seek_to_next_line(mid)\n\n #if current_id >= query:\n if current_line >= query:\n return self._lower_bound(query=query, offset_l=offset_l, offset_h=line_start - 1)\n return self._lower_bound(query=query, offset_l=next_line_start, offset_h=offset_h)", "def getOptBoundLower(self):\n return _core.CGPopt_getOptBoundLower(self)", "def lower_bound(self):\n return self.__lower_bound", "def set_lower_bound(constraint, lower_bound_on_index):\n for row in range(Bridge.HEIGHT):\n for column in range(Bridge.WIDTH):\n x = lower_bound_on_index(row, column)\n if x is not None:\n constraint[row][column][0] = x", "def bounds_low(self):\n if self._bounds_low is None:\n bounds = []\n for parameter in self.parameters_ordered:\n bounds.append(parameter.bound_low)\n\n self._bounds_low = np.array(bounds)\n\n return self._bounds_low", "def _index_of_interval_touching_strictly_from_left(self, x: int, ibl_x=None) -> int:\n # With the default lexicographic ordering on tuples, the x - 1 ensures we'll get the index\n # of an existing range starting at x if there is one, rather the index after it.\n if ibl_x is None:\n ibl_x = self._bisect_left(x)\n if ibl_x > 0:\n assert self._get_left_endpoint(ibl_x - 1) < x\n if self._get_right_endpoint(ibl_x - 1) >= x: # its right, open endpoint touches x\n return ibl_x - 1\n return -1", "def left_boundary(self):\n\n return self._left_boundary", "def setOptBoundLower(self, optBoundLower):\n return _core.CGPopt_setOptBoundLower(self, optBoundLower)", "def min_boundary(left, right, tokens):\n offset = 0\n while True:\n offset += 1\n left -= 1\n right += 1\n\n if (left < 0 or right == len(tokens) or\n _rB.search(tokens[left]) is not None or\n _rB.search(tokens[right]) is not None):\n return offset", "def check_and_adjust_boundary(self, pos):\n valu = self.limit_upper.copy()\n vall = self.limit_lower.copy()\n idu = pos > valu\n idl = pos < vall\n if self.boundary_policy == self.BoundaryPolicy.ToNaN:\n pos = _np.where(idu, _np.nan, pos)\n pos = _np.where(idl, _np.nan, pos)\n else:\n pos = _np.where(idu, self.limit_upper, pos)\n pos = _np.where(idl, self.limit_lower, pos)\n return pos", "def add_boundaries(self):\n # create the boundary points\n tmp_diff = 0\n while tmp_diff < 2.0*self.h:\n tmp_diff += self.dx\n tmp_min = self.min_x - tmp_diff\n tmp_max = self.max_x + tmp_diff\n\n # upper and lower rows\n for x in np.arange(tmp_min[0], tmp_max[0] + self.lil_bit, self.dx):\n self.place_point(x, tmp_min[1], bound=1)\n self.place_point(x, tmp_max[1], bound=1)\n\n # left and right (removing corners)\n tmp = np.arange(tmp_min[1], tmp_max[1] + self.lil_bit, self.dx)\n for i, y in enumerate(tmp):\n if i != 0 and i != len(tmp)-1:\n self.place_point(tmp_min[0], y, bound=1)\n self.place_point(tmp_max[0], y, bound=1)\n\n # account for the boundary particle changing limits\n self.min_x -= tmp_diff\n self.max_x += tmp_diff", "def is_before(self, value):\n if isinstance(value, Interval):\n result = ((self.num > 0) and (value.num > 0) and\n (value.get_end() < self.start))\n else:\n result = ((self.num > 0) and (value < self.start))\n if _Interval_debug:\n print \"is_before\", \"self:\", self, \"value:\", value, \\\n \"returns:\", result\n return result", "def boundary():\r\n return 250", "def set_boundaries(self):\n\n\t\t# States boundaries\n\t\t# X [-]\n\t\tself.low_bnd.states[0] = -2\n\t\tself.upp_bnd.states[0] = 2\n\n\t\t# Y [-]\n\t\tself.low_bnd.states[1] = -2\n\t\tself.upp_bnd.states[1] = 2\n\n\t\t# Z [-]\n\t\tself.low_bnd.states[2] = -2\n\t\tself.upp_bnd.states[2] = 2\n\n\t\t# Vx [-]\n\t\tself.low_bnd.states[3] = -20\n\t\tself.upp_bnd.states[3] = 20\n\n\t\t# Vy [-]\n\t\tself.low_bnd.states[4] = -20\n\t\tself.upp_bnd.states[4] = 20\n\n\t\t# Vz [-]\n\t\tself.low_bnd.states[5] = -20\n\t\tself.upp_bnd.states[5] = 20\n\n\t\t# m [kg]\n\t\tself.low_bnd.states[6] = 1e-6\n\t\tself.upp_bnd.states[6] = self.mass0\n\n\n\t\t# T [-]\n\t\tself.low_bnd.controls[0] = 1e-6\n\t\tself.upp_bnd.controls[0] = self.Tmax\n\n \t\t# Tx [-]\n\t\tself.low_bnd.controls[1] = - 1\n\t\tself.upp_bnd.controls[1] = 1\n\n\t\t# Ty [-]\n\t\tself.low_bnd.controls[2] = - 1\n\t\tself.upp_bnd.controls[2] = 1\n\n\t\t# Tz [-]\n\t\tself.low_bnd.controls[3] = - 1\n\t\tself.upp_bnd.controls[3] = 1\n\n\n\t\t# Initial and final times boundaries\n\t\tself.low_bnd.ti = self.upp_bnd.ti = self.fwd_time[0]\n\t\tself.low_bnd.tf = 0.2 * self.bwd_time[-1]\n\t\tself.upp_bnd.tf = 2.0 * self.bwd_time[-1]", "def lowerBound(self, openBounded = False):\n l = self.list()\n if not l:\n return None\n increment = 0\n if openBounded:\n increment = -1\n if self.incr() > 0:\n tmp = l[0]\n else:\n tmp = l[-1]\n return tmp + increment", "def lower_bound(x):\n\n return x[0]", "def get_lower_bound(self):\n raise NotImplementedError(\"Deprecated. Are you using the old robot class?\")", "def lower_left(self) -> Point:\n return self._lower_left_corner", "def boundary_at_start_of_dim(boundary: int, dim: str) -> Union[bool, None]:\n return BOUNDARY_AT_START_OF_DIM_MAPPING[boundary].get(dim, None)", "def getLowGuess(self, midpoint):\n guess = midpoint - 1\n while guess > self.getMinValue() and guess in self.getGuesses():\n guess -= 1\n return guess" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Determines what the genomic upper boundary is. For intron regions, if a neighboring (next) interval exists, set the boundary with respect to that. Otherwise, set the upper boundary to MAX. For exon regions, if 'stop_at_midpoint' flag is on, set the boundary to the middle of the exon. Otherwise, set the upper boundary to the other end of the exon.
def _get_upper_boundary(current_interval, next_interval, strand_or_5p, stop_at_midpoint=False): if strand_or_5p == '+': return next_interval.start if next_interval is not None else MAX else: if stop_at_midpoint: return (current_interval.end + current_interval.start) / 2 else: return current_interval.end
[ "def boundary():\r\n return 250", "def getOptBoundUpper(self):\n return _core.CGPopt_getOptBoundUpper(self)", "def upper_limit(self, val):\n self.gf_condition(upperLimit=val)", "def setOptBoundUpper(self, optBoundUpper):\n return _core.CGPopt_setOptBoundUpper(self, optBoundUpper)", "def bounded(self):\n return self.lower > -np.inf or self.upper < np.inf", "def get_upperbound(self) -> int:", "def get_upper_bound(self) -> _ARRAY:\n return self._upper_bound", "def upper_bound(x):\n return x[-1]", "def boundary(value, arg):\r\n value = int(value)\r\n boundary = int(arg)\r\n if value > boundary:\r\n return boundary\r\n else:\r\n return value", "def set_boundaries(self):\n\n\t\t# States boundaries\n\t\t# X [-]\n\t\tself.low_bnd.states[0] = -2\n\t\tself.upp_bnd.states[0] = 2\n\n\t\t# Y [-]\n\t\tself.low_bnd.states[1] = -2\n\t\tself.upp_bnd.states[1] = 2\n\n\t\t# Z [-]\n\t\tself.low_bnd.states[2] = -2\n\t\tself.upp_bnd.states[2] = 2\n\n\t\t# Vx [-]\n\t\tself.low_bnd.states[3] = -20\n\t\tself.upp_bnd.states[3] = 20\n\n\t\t# Vy [-]\n\t\tself.low_bnd.states[4] = -20\n\t\tself.upp_bnd.states[4] = 20\n\n\t\t# Vz [-]\n\t\tself.low_bnd.states[5] = -20\n\t\tself.upp_bnd.states[5] = 20\n\n\t\t# m [kg]\n\t\tself.low_bnd.states[6] = 1e-6\n\t\tself.upp_bnd.states[6] = self.mass0\n\n\n\t\t# T [-]\n\t\tself.low_bnd.controls[0] = 1e-6\n\t\tself.upp_bnd.controls[0] = self.Tmax\n\n \t\t# Tx [-]\n\t\tself.low_bnd.controls[1] = - 1\n\t\tself.upp_bnd.controls[1] = 1\n\n\t\t# Ty [-]\n\t\tself.low_bnd.controls[2] = - 1\n\t\tself.upp_bnd.controls[2] = 1\n\n\t\t# Tz [-]\n\t\tself.low_bnd.controls[3] = - 1\n\t\tself.upp_bnd.controls[3] = 1\n\n\n\t\t# Initial and final times boundaries\n\t\tself.low_bnd.ti = self.upp_bnd.ti = self.fwd_time[0]\n\t\tself.low_bnd.tf = 0.2 * self.bwd_time[-1]\n\t\tself.upp_bnd.tf = 2.0 * self.bwd_time[-1]", "def set_upper_bound(constraint, upper_bound_on_index):\n for row in range(Bridge.HEIGHT):\n for column in range(Bridge.WIDTH):\n x = upper_bound_on_index(row, column)\n if x is not None:\n constraint[row][column][1] = x", "def _upper_bound(self, query: str, offset_l: int, offset_h: int) -> int:\n logging.debug('upper bound 2 %s %s %s', query, offset_l, offset_h)\n if offset_l >= offset_h:\n return self._seek_back_to_line_start(offset_l)\n\n mid = (offset_l + offset_h) // 2\n\n line_start = self._seek_back_to_line_start(mid)\n #current_id = self._id_from_line(line_start)\n current_line = self._get_line(line_start)\n next_line_start = self._seek_to_next_line(mid)\n\n #if current_id >= query:\n if current_line <= query:\n return self._upper_bound(query=query, offset_l=next_line_start, offset_h=offset_h)\n return self._upper_bound(query=query, offset_l=offset_l, offset_h=line_start - 1)", "def determine_upper_bound(first_good,last_good):\n\t# Set some rules for the upper spectrum limit\n\t# Indo-US Library of Stellar Templates has a upper limit of 9464\n\tif ((last_good>=7000.) & (last_good<=9464.)) and (last_good-first_good>=500.): # cap at 7000 A\n\t\tauto_upp = last_good #7000.\n\telif ((last_good>=6750.) & (last_good<=7000.)) and (last_good-first_good>=500.): # include Ha/[NII]/[SII] region\n\t\tauto_upp = last_good\n\telif ((last_good>=6400.) & (last_good<=6750.)) and (last_good-first_good>=500.): # omit H-alpha/[NII] region if we can't fit all lines in region\n\t\tauto_upp = 6400.\n\telif ((last_good>=5050.) & (last_good<=6400.)) and (last_good-first_good>=500.): # Full MgIb/FeII region\n\t\tauto_upp = last_good\n\telif ((last_good>=4750.) & (last_good<=5025.)) and (last_good-first_good>=500.): # omit H-beta/[OIII] region if we can't fit all lines in region\n\t\tauto_upp = 4750.\n\telif ((last_good>=4400.) & (last_good<=4750.)) and (last_good-first_good>=500.):\n\t\tauto_upp = last_good\n\telif ((last_good>=4300.) & (last_good<=4400.)) and (last_good-first_good>=500.): # omit H-gamma region if we can't fit all lines in region\n\t\tauto_upp = 4300.\n\telif ((last_good>=3500.) & (last_good<=4300.)) and (last_good-first_good>=500.): # omit H-gamma region if we can't fit all lines in region\n\t\tauto_upp = last_good\n\telif (last_good-first_good>=500.):\n\t\tprint('\\n Not enough spectrum to fit! ')\n\t\tauto_upp = None \n\telse:\n\t\tauto_upp = last_good\n\treturn auto_upp", "def storage_interval_end_upper_bound_rule(_m, g):\r\n\r\n # Existing units\r\n if g in m.G_E_STORAGE:\r\n return m.q[g, m.T.last()] <= m.EXISTING_STORAGE_ENERGY_CAPACITY[g]\r\n\r\n # Candidate units\r\n elif g in m.G_C_STORAGE:\r\n return m.q[g, m.T.last()] <= m.b[g]", "def update_boundaries(self):\n\t\t#pass\n\t\tfor i in range(len(self.data)):\n\t\t\ttt = self.data[i]\n\t\t\tfor j in range(len(tt)):\n\t\t\t\tif self.flag_res[j] == 0:\n\t\t\t\t\t#print \"upper\", self.del_ub[j]\n\t\t\t\t\tif tt[j] > 0.9*self.del_ub[j]:\n\t\t\t\t\t\tself.del_ub[j]= 1.2*self.del_ub[j]\n\t\t\t\t\t\t#print \"Upper raised\", self.del_ub[j]\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.del_ub[j]= 0.9*self.del_ub[j]\n\t\t\t\t\t\t#print \"upper lowered\", self.del_lb[j]\n\n\t\t\t\t\tif tt[j] < 1.1*self.t_lb[j]:\n\t\t\t\t\t\tself.del_lb[j] = 1.1*self.del_lb[j]\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.del_lb[j] = 0.9*self.del_lb[j]\n\t\t#\tprint \"del_ub\", self.del_ub", "def bounds(self, reverse):\n return self.lower, self.upper", "def get_upper_bound(low, high, predicate):\n while low <= high:\n mid = (low + high) / 2\n if predicate(mid):\n low = mid + 1\n else:\n high = mid - 1\n return high", "def upper_bound(x):\n \n return x[1]", "def get_final_bound(self) -> int:\n return self.finalBound", "def upperbound_calc():\n\n upperbound = 0\n for huis in amstel.wijk_lijst:\n oude_huisprijs = float(huis.prijs)\n waardevermeerdering = float(huis.prijsverbetering)\n min_vrijstand = float(huis.min_vrijstand)\n\n # Voor elk huis wordt berekend hoeveel vrijstand hij zou hebben als\n # dat huis midden in de wijk staat.\n max_vrije_afstand = ((plattegrond.hoogte / 2) - (huis.hoogte /2)) - min_vrijstand\n nieuwe_huiswaarde = oude_huisprijs + (oude_huisprijs * (waardevermeerdering * max_vrije_afstand))\n\n upperbound += nieuwe_huiswaarde\n\n return int(upperbound)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Removes nans from a list (replaces with 0), and appends padding to ensure that the list will always be of length len(wiggle) + left_pad + right_pad.
def _clean_and_add_padding(wiggle, left_pad=0, right_pad=0, fill_pads_with=-1): wiggle = pd.Series(wiggle) wiggle = abs(wiggle) wiggle = np.pad( wiggle, (left_pad, right_pad), 'constant', constant_values=fill_pads_with ) wiggle = np.nan_to_num(wiggle) return wiggle
[ "def padding(x, L, padding_list=None):\n len_x = len(x)\n assert len_x <= L, \"Length of vector x is larger than the padding length\"\n zero_n = L - len_x\n if padding_list is None:\n x.extend([0] * zero_n)\n elif len(padding_list) < zero_n:\n x.extend(padding_list + [0] * (zero_n - len(padding_list)))\n else:\n x.extend(padding_list[0:zero_n])\n return None", "def pad_list(lst, padding):\n result = [None, padding] * len(lst)\n result[0::2] = lst\n return result", "def pad(num, padding, side='left'):\n return [*num,\n *([0] * padding)] if side is 'right' else [*([0] * padding), *num]", "def pad_list_of_lists(_list):\n for i in range(len(_list)):\n element = _list[i]\n targetPadNum = 0\n\n # Gets the max padding length\n for j in range(len(element)):\n temp = element[j]\n if(len(temp) > targetPadNum):\n targetPadNum = len(temp)\n\n # Pads each list within the main list\n for j in range(len(element)):\n temp = element[j]\n while len(temp) < targetPadNum:\n temp.append(0.0)\n return _list", "def pad(l, n, pad=config.PAD_ID):\n pad_with = (0, max(0, n - len(l)))\n return np.pad(l, pad_with, mode='constant', constant_values=pad)", "def zero_pad(X,pad):\n X_pad = np.pad(X, ((0,0), (pad,pad), (pad,pad), (0,0)), 'constant', constant_values = 0)\n return X_pad", "def zpad(signal):\n if np.log2(len(signal)) - int(np.log2(len(signal))) != 0.0:\n signal = np.hstack((signal, np.zeros(2**(int(np.log2(len(signal))) + 1) - len(signal))))\n return signal", "def zero_pad(X, pad): \n X_pad = np.pad(X, ((0,0), (pad, pad), (pad, pad), (0,0)), mode='constant', constant_values=(0, 0))\n\n return X_pad", "def _pad_lists(lists: list, pad_val=0):\n max_length = 0\n sizes = []\n for li in lists:\n sizes.append(len(li))\n max_length = max_length if len(li) < max_length else len(li)\n for li in lists:\n li += [pad_val] * (max_length - len(li))\n return sizes", "def addpaddings(tokens, toZero=False):\n max_length = len(max(tokens, key=len))\n for i in range(len(tokens)):\n if toZero:\n tokens[i] += [0 for i in range(max_length - len(tokens[i]))]\n else:\n tokens[i] += [PAD_TOKEN for i in range(max_length - len(tokens[i]))]\n return tokens", "def padlist(list_to_pad, padlen, pad_token=0):\n padded_list = list_to_pad[:padlen]\n padded_list = padded_list + [pad_token] * (padlen - len(list_to_pad))\n return padded_list", "def zip_pad(*iterables, **kw):\n\tif kw:\n\t\tassert len(kw) == 1\n\t\tpad = kw[\"pad\"]\n\telse:\n\t\tpad = None\n\tdone = [len(iterables)-1]\n\tdef pad_iter():\n\t\tif not done[0]:\n\t\t\treturn\n\t\tdone[0] -= 1\n\t\twhile 1:\n\t\t\tyield pad\n\titerables = [chain(seq, pad_iter()) for seq in iterables]\n\treturn izip(*iterables)", "def __pad(self, tensor_list, length):\n return torch.stack([torch.cat([tensor.data, tensor.new(length-tensor.size(0)).zero_()])\n for tensor in tensor_list]).to(self.device)", "def pad_sequence(sequence, n, pad_left: bool = ..., pad_right: bool = ..., left_pad_symbol: Optional[Any] = ..., right_pad_symbol: Optional[Any] = ...):\n ...", "def add_pad(l, n, item):\n if n >= len(l):\n l += [None] * (n + 1 - len(l))\n if l[n] is None:\n l[n] = item", "def extend_padding(ls_of_ls, padding=''):\n maxlen = max(map(len, ls_of_ls))\n newls = []\n for ls in ls_of_ls:\n if len(ls) != maxlen:\n ls.extend([padding]*(maxlen - len(ls)))\n newls.append(ls)\n return newls", "def pad(sequences, pad_value=-1):\n\n # Find the maximum-length sequence.\n max_length = -float('inf')\n for sequence in sequences:\n if len(sequence) > max_length:\n max_length = len(sequence)\n\n # Initialize the 2D array with -1 in every entry.\n padded_sequences = np.full(shape=[len(sequences), max_length],\n fill_value=-1,\n dtype=np.int)\n\n # Fill the values that are known.\n for i in range(len(sequences)):\n for j in range(len(sequences[i])):\n padded_sequences[i][j] = sequences[i][j]\n\n return padded_sequences", "def zero_pad(signal):\n power = log(len(signal), 2)\n power = ceil(power)\n\n if len(signal) == 2**power:\n return signal.copy()\n else:\n return np.concatenate(signal, np.zeros((2**power - len(signal), 1)))", "def apply_padding_by_last(list_of_lists):\n padded_lists = pad_into_lists(\n [enumerate(vals) for vals in list_of_lists],\n lambda x: x[0]\n )\n return [\n # remove the index\n [e[1] if e is not None else e for e in carry_previous_over_none(padded)]\n for padded in padded_lists\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the score of a region with peak overlaps as a series.
def get_overlap(peak, region, score_type='simple'): series = pd.Series(data=0, index=range(len(region))) overlap_type, overlap = determine_overlap(peak, region) if overlap_type == 'no_overlap': return series elif overlap_type == 'equal': series[:] = [score(score_type, peak, region) for i in range(overlap)] elif overlap_type == 'left': assert peak.end - overlap == region.start series[:overlap] = [score(score_type, peak, region) for i in range(overlap)] elif overlap_type == 'right': assert peak.start - region.start + overlap == len(series) series[-overlap:] = [score(score_type, peak, region) for i in range(overlap)] elif overlap_type == 'whole_region': assert overlap == len(series) series[:] = [score(score_type, peak, region) for i in range(overlap)] elif overlap_type == 'whole_peak': left_offset = peak.start - region.start right_offset = region.end - peak.end assert left_offset + overlap + right_offset == len(series) series[left_offset:-right_offset] = [score(score_type, peak, region) for i in range(overlap)] else: return -1 assert peak.strand == region.strand if peak.strand == '-': series = pd.Series([s for s in reversed(series)]) return series else: return series
[ "def max_min_score (self):\n peaks = self.peaks\n chrs = peaks.keys()\n chrs.sort()\n x = 0\n y = 100000\n for chrom in chrs:\n if peaks[chrom]:\n m = max([i[4] for i in peaks[chrom]])\n if m>x:\n x=m\n m = min([i[4] for i in peaks[chrom]])\n if m<y:\n y=m\n return (x,y)", "def cal_peak(data1, data2):\n update()\n p1 = np.amax(np.absolute(data1))\n p2 = np.amax(np.absolute(data2))\n score = S(p1, p2)\n return p1, p2, score", "def myfind_peak(xdata,ydata, min_height, min_dis):\n indexes=peakutils.indexes(ydata,thres=min_height, min_dist=min_dis)\n return indexes,xdata[indexes]", "def determine_overlap(peak, region):\n assert(peak.strand == region.strand)\n # print('peak:', peak.start, peak.end)\n # print('region:', region.start, region.end)\n if peak.start >= region.end or region.start >= peak.end:\n # newPeak and region don't overlap\n return 'no_overlap', 0\n elif peak.start == region.start and peak.end == region.end:\n # newPeak and region sizes are equal (completely overlap)\n overlap = peak.end - peak.start\n return 'equal', overlap\n elif peak.start <= region.start and peak.end <= region.end:\n # newPeak overlaps the left side of the region only\n overlap = peak.end - region.start\n return 'left', overlap\n elif peak.start >= region.start and peak.end >= region.end:\n # newPeak overlaps the right side of the region only\n overlap = region.end - peak.start\n return 'right', overlap\n elif peak.start <= region.start and peak.end >= region.end:\n # region is completely contained within newPeak\n overlap = region.end - region.start\n return 'whole_region', overlap\n elif peak.start >= region.start and peak.end <= region.end:\n # newPeak is completely contained within region\n overlap = peak.end - peak.start\n return 'whole_peak', overlap\n else:\n print(\"warning: {}, {} overlaps in an unexpected way.\".format(\n peak, region\n ))\n return 'no_overlap', -1", "def _compute_region_score(self,\n saliency_map: torch.Tensor,\n region: Tuple[int, int, int, int],\n global_mean: float = 0) -> float:\n raw_score = self._select_region(saliency_map, region).sum().item()\n return raw_score - global_mean * self._region_size(region)", "def overlap_score(q1, q2):\n score = 0\n return score", "def find_max_overlaps(\n rps: np.ndarray,\n rp_boxes: np.ndarray\n ) -> np.ndarray:\n a = np.maximum(rps[:, None, 0], rp_boxes[:, 0])\n c = np.minimum(rps[:, None, 2], rp_boxes[:, 2])\n max_par_index = np.argmax(c - a, axis=1)\n\n return max_par_index", "def peak_count_score(trace,x_vals,height=0.0178, prominence=0.107,offset=-2e-10,maxval=5e-10):\n\n example_trace_norm=trace-offset\n example_trace_norm[example_trace_norm<0]=0\n example_trace_norm = (example_trace_norm)/((maxval-offset))\n \n peaks, data = find_peaks_1d(example_trace_norm,prominence,height,prenorm=True)\n \n return len(peaks)", "def get_peak_value(img, x, y, peak_coord, peak_pixel_size=1):\n px, py = peak_coord\n\n # frequency coordinates\n dx = x[1] - x[0]\n dy = y[1] - y[0]\n xx, yy = np.meshgrid(x, y)\n\n # find closest pixel\n ix = np.argmin(np.abs(px - x))\n iy = np.argmin(np.abs(py - y))\n\n # get ROI around pixel for weighted averaging\n roi = get_centered_roi([iy, ix], [3 * peak_pixel_size, 3 * peak_pixel_size])\n img_roi = img[roi[0]:roi[1], roi[2]:roi[3]]\n xx_roi = xx[roi[0]:roi[1], roi[2]:roi[3]]\n yy_roi = yy[roi[0]:roi[1], roi[2]:roi[3]]\n\n # estimate value from weighted average of pixels in ROI, based on overlap with pixel area centered at [px, py]\n weights = np.zeros(xx_roi.shape)\n for ii in range(xx_roi.shape[0]):\n for jj in range(xx_roi.shape[1]):\n weights[ii, jj] = pixel_overlap([py, px], [yy_roi[ii, jj], xx_roi[ii, jj]],\n [peak_pixel_size * dy, peak_pixel_size * dx], [dy, dx]) / (dx * dy)\n\n peak_value = np.average(img_roi, weights=weights)\n\n return peak_value", "def find_peak(values):\n\n # values should not contain NaN values\n x = values\n # argrelextrema returns indices of local maxima/minima\n # local maxima\n local_max = argrelextrema(x, np.greater)\n local_max = local_max[0]\n # local minima\n local_min = argrelextrema(x, np.less)\n local_min = local_min[0]\n\n lower, upper = 0, 0\n peaks = []\n lower_minima_idx, upper_minima_idx = -1, -1\n # FIXME height threshold of peak (global max - global min)/10\n height_threshold = (x.max() - x.min())/10\n\n # for each local maxima, if height of the local maxima to its surrounding local minima is\n # greater than (global max - global min)/10, consider it as a peak\n for maximum in local_max:\n try:\n while local_min[lower] < maximum:\n lower += 1\n upper = lower\n lower -= 1\n while (upper < local_min.shape[0]) and (local_min[upper] < maximum):\n upper += 1 \n lower_minima_idx = local_min[lower]\n upper_minima_idx = local_min[upper]\n\n except IndexError:\n lower -= 1\n lower_minima_idx = local_min[lower]\n upper_minima_idx = x.shape[0] - 1\n \n if lower_minima_idx < 0 or upper_minima_idx < 0:\n print \"Error: indices should be > 0\"\n return\n # FIXME height of peak > (global max - global min)/10 \n if x[maximum] - min(x[lower_minima_idx], x[upper_minima_idx]) > height_threshold: \n peaks.append(np.array([lower_minima_idx, maximum, upper_minima_idx]))\n\n ### END - for maximum\n return np.array(peaks)", "def get_max_peak_loc_val(detection_row,example_starts_ends,\n time_bound=np.inf):\n peaks =[]\n for example_id,start_end_tuple in enumerate(example_starts_ends):\n start_time, end_time = start_end_tuple\n # get the lower bound\n l_time = int(max(0,start_time-time_bound))\n if example_id >0:\n l_time=int(max(l_time,\n (start_time + example_starts_ends[example_id-1][0])/2))\n # get the time upperbound\n u_time = int(min(len(detection_row),start_time+time_bound))\n if example_id < len(example_starts_ends) -1:\n u_time=int(min(u_time,\n (start_time+example_starts_ends[example_id+1][0])/2))\n loc = np.argmax(detection_row[l_time:u_time]) + l_time\n val = detection_row[loc]\n peakiness = get_peakiness(detection_row,loc)\n peaks.append((start_time-loc,val,peakiness))\n\n return peaks", "def get_peak_to_peak(data: list):\r\n max = argrelextrema(data, np.greater)[0]\r\n min = argrelextrema(data, np.less)[0]\r\n\r\n pp = abs(data[max[1]]) + abs(data[min[1]])\r\n\r\n return pp", "def match_peak_to_region(region_dic, peak_list, return_list=True):\n # initialzie a dict to record peak existence\n _region_records = {_k:0 for _k in region_dic.keys()}\n for _peak in peak_list:\n for _rid, _region in region_dic.items():\n if _peak['midpoint'] >= _region['start'] and _peak['midpoint'] <= _region['end'] and _peak['chr'] == _region['chr']:\n _region_records[_rid] += _peak['fold']\n break\n if not return_list:\n return _region_records\n else:\n _rids = list(_region_records.keys())\n _rx = np.arange(int(min(_rids)), int(max(_rids))+1)\n _ry = np.zeros(len(_rx))\n for _rid, _signal in _region_records.items():\n _ry[np.where(_rid == _rx)[0]] = _signal\n \n return _rx, _ry", "def findsomepeaks(y,n=10):\n split = int(len(y)/n)\n start = 0\n end = start+split\n splity = []\n for i in range(n):\n splity.append(sci.asarray(y[start:end]))\n start += split\n end += split\n out = []\n for ind,section in enumerate(splity):\n maxy = max(section)\n if maxy == max(section[1:-1]): # if max is not at the edge of the spectrum\n out.append(sci.where(section==maxy)[0][0]+split*ind)\n return out", "def _calcPeakRange(sr,n, loFreq=200., hiFreq=2000.):\n\tfftBase = sr / float(n)\n\tpkRange = n/2 + 1\n\tminPos = int(round(loFreq/fftBase))\n\tmaxPos = int(round(hiFreq/fftBase))\n\treturn pkRange, minPos, maxPos", "def find_best_point(self, start_i, end_i, ranges):\n # best_idx = np.argmax(ranges[start_i:end_i])\n best_idx = int(np.mean([start_i,end_i]))\n return best_idx", "def slice_score(self, proc_ranges, slice):\r\n gap_start = slice.start\r\n gap_end = slice.stop\r\n gap_size = gap_end - gap_start\r\n width_score = (gap_size / 810)\r\n distance_score = proc_ranges[gap_start:gap_end].mean() / proc_ranges.mean()\r\n slice_score = (width_score*0.7) + (distance_score*0.3)\r\n return slice_score", "def find_peak_in_range(qmap: np.ndarray, centre: int, window_size: int, adaptive_range: bool=False) -> Tuple[np.ndarray, np.ndarray]:\n half_size = window_size // 2\n search_field = qmap\n #search_field[:, centre-half_size:centre+half_size] = qmap[:, centre-half_size:centre+half_size]\n search_field[np.isnan(search_field)] = 0\n\n ppos = np.array([], dtype='int')\n perr = np.array([])\n for i in range(0,len(search_field[:,0])):\n search_slice = np.zeros(len(qmap[0]))\n low = centre-half_size\n upp = centre+half_size\n search_slice[low:upp] = search_field[i, low:upp]\n tmp = np.argwhere(search_slice==np.max(search_slice))\n perr = np.append(perr, 1/(tmp[0][0]-np.mean(search_slice)))\n ppos = np.append(ppos, tmp[0][0])\n\n if adaptive_range == True:\n centre = int(tmp[0][0])\n\n #plt.plot(qmap[i])\n #plt.plot(ppos[i], qmap[i, ppos[i]], marker='1', markersize=5)\n #wtf = True\n\n #plt.show()\n return ppos, perr", "def get_max_score(self):\n return sum(self.maxpoints.values())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes two intervals (peak, region) and determines whether or not the peak overlaps the left, right, entire region, or not at all.
def determine_overlap(peak, region): assert(peak.strand == region.strand) # print('peak:', peak.start, peak.end) # print('region:', region.start, region.end) if peak.start >= region.end or region.start >= peak.end: # newPeak and region don't overlap return 'no_overlap', 0 elif peak.start == region.start and peak.end == region.end: # newPeak and region sizes are equal (completely overlap) overlap = peak.end - peak.start return 'equal', overlap elif peak.start <= region.start and peak.end <= region.end: # newPeak overlaps the left side of the region only overlap = peak.end - region.start return 'left', overlap elif peak.start >= region.start and peak.end >= region.end: # newPeak overlaps the right side of the region only overlap = region.end - peak.start return 'right', overlap elif peak.start <= region.start and peak.end >= region.end: # region is completely contained within newPeak overlap = region.end - region.start return 'whole_region', overlap elif peak.start >= region.start and peak.end <= region.end: # newPeak is completely contained within region overlap = peak.end - peak.start return 'whole_peak', overlap else: print("warning: {}, {} overlaps in an unexpected way.".format( peak, region )) return 'no_overlap', -1
[ "def regions_overlap(r1,r2):\n x = False\n for loc in r1.locs:\n if loc in r2.locs:\n return True\n return False", "def _do_intervals_overlap(intervals_a, intervals_b):\n\n def contained(points, intervals):\n return np.logical_and(\n np.less_equal(intervals[:, 0], points),\n np.less_equal(points, intervals[:, 1]))\n\n return np.logical_or(\n np.logical_or(\n contained(intervals_a[:, 0], intervals_b),\n contained(intervals_a[:, 1], intervals_b)),\n np.logical_or(\n contained(intervals_b[:, 0], intervals_a),\n contained(intervals_b[:, 1], intervals_a)))", "def overlap(a1, a2, b1, b2):\n\tassert a1 <= a2\n\tassert b1 <= b2\n\tassert isinstance(a1, int) and isinstance(a2, int) and isinstance(b1, int) and isinstance(b2, int)\n\t\n\t# if a interval is completely to the left of the b interval\n\tif a2 < b1:\n\t\treturn False\n\t# if a interval is completely to the right of the b interval\n\telif a1 > b2:\n\t\treturn False\n\telse:\n\t\treturn True", "def no_overlap_check(self, p1, p2, bboxes):\n x_range_new = set(range(p1[0], p2[0]+1))\n y_range_new = set(range(p1[1], p2[1]+1))\n for xmin, ymin, xmax, ymax in bboxes:\n x_range_bb = set(range(xmax, ymax+1))\n y_range_bb = set(range(xmin, ymin+1))\n if (x_range_new & x_range_bb) and (y_range_new & y_range_bb): # if intersection for x and y values of both\n return False # Overlap\n return True # No overlap", "def pin_overlap(self, pin1, pin2, pitch):\n\n # FIXME: If the pins are not in a row, this may break.\n # However, a top pin shouldn't overlap another top pin,\n # for example, so the extra comparison *shouldn't* matter.\n\n # Pin 1 must be in the \"BOTTOM\" set\n x_overlap = pin1.by() < pin2.by() and abs(pin1.center().x - pin2.center().x) < pitch\n\n # Pin 1 must be in the \"LEFT\" set\n y_overlap = pin1.lx() < pin2.lx() and abs(pin1.center().y - pin2.center().y) < pitch\n overlaps = (not self.vertical and x_overlap) or (self.vertical and y_overlap)\n return overlaps", "def isoverlap(r1, r2):\n y1 = r1[1]\n x1 = r1[0]\n h1 = r1[3]\n w1 = r1[2]\n \n y2 = r2[1]\n x2 = r2[0]\n h2 = r2[3]\n w2 = r2[2]\n \n if ((x1+w1)<x2 or (x2+w2)<x1 or (y1+h1)<y2 or (y2+h2)<y1):\n return False\n else:\n return True", "def check_overlap(l1_x, l1_y, r1_x, r1_y, l2_x, l2_y, r2_x, r2_y):\r\n# If one rectangle is on total left side of other\r\n if bool(l1_x > r2_x) ^ bool(l2_x > r1_x):\r\n return False\r\n# If one rectangle is above other\r\n if bool(l1_y < r2_y) ^ bool(l2_y < r1_y):\r\n return False\r\n return True", "def overlap(a, b):\n # if any start / end is None then it doesn't overlap\n if a[0] is None or a[1] is None or b[0] is None or b[1] is None:\n return False\n # If the casing start/end intersects\n records_intersect = (a[0] > b[0] and a[0] < b[1]) or (a[1] > b[0] and a[1] < b[1])\n # If the series start or end in the same place\n records_overlap = (a[0] == b[0]) or (a[1] == b[1])\n return records_intersect or records_overlap", "def overlaps(self, other: \"Interval\") -> bool:\n return not (self.end <= other.start or self.start >= other.end)", "def detectOverlap(projection1, projection2):\n\t\tmin1, max1 = projection1\n\t\tmin2, max2 = projection2\n\t\treturn max1 > min2 and min1 < max2", "def match_peak_to_region(region_dic, peak_list, return_list=True):\n # initialzie a dict to record peak existence\n _region_records = {_k:0 for _k in region_dic.keys()}\n for _peak in peak_list:\n for _rid, _region in region_dic.items():\n if _peak['midpoint'] >= _region['start'] and _peak['midpoint'] <= _region['end'] and _peak['chr'] == _region['chr']:\n _region_records[_rid] += _peak['fold']\n break\n if not return_list:\n return _region_records\n else:\n _rids = list(_region_records.keys())\n _rx = np.arange(int(min(_rids)), int(max(_rids))+1)\n _ry = np.zeros(len(_rx))\n for _rid, _signal in _region_records.items():\n _ry[np.where(_rid == _rx)[0]] = _signal\n \n return _rx, _ry", "def is_index_in_peak_range(idx, peak_range):\n for p_left, p_right in peak_range:\n if p_left <= idx <= p_right:\n return True\n return False", "def _overlaps_vertically(self, r0: Rectangle, r1: Rectangle) -> bool:\n return (\n int(r0.get_y()) <= int(r1.get_y()) <= int(r0.get_y() + r0.get_height())\n ) or (int(r1.get_y()) <= int(r0.get_y()) <= int(r1.get_y() + r1.get_height()))", "def mark_overlaps_low_res(low_list,high_list):\n marker=np.zeros(len(high_list))\n for index, row in low_list.iterrows():\n c1=row[0],row[1],row[2],row[4],row[5]\n for index2, row2 in high_list.iterrows():\n c2=row2[0],row2[1],row2[2],row2[4\n ],row2[5]\n if c2[0]==c1[0] and abs(c2[1]>=c1[1]) and c2[2]<=c1[2] and abs(c2[3]>=c1[3]) and c2[4]<=c1[4]:\n marker[index2]=marker[index2]+1\n else:\n marker[index2]=marker[index2]+0\n high_list[17]=marker\n low_list[17]=0\n return(high_list,low_list)", "def pins_overlap(self, other, pitch):\n\n for pin1 in self.pins:\n for pin2 in other.pins:\n if self.pin_overlap(pin1, pin2, pitch):\n return True\n\n return False", "def _interval_sv_overlap(xs, x1, x2):\n xmin = min(x1, x2)\n xmax = max(x1, x2)\n if xmin <= xs[0]:\n ll = 0\n elif xmin >= xs[-1]:\n ll = len(xs) - 1\n else:\n ll = 0\n for i, x in enumerate(xs):\n if x > xmin:\n ll = i - 1\n break\n if xmax >= xs[-1]:\n ul = len(xs) - 1\n elif xmax <= xs[0]:\n ul = 0\n else:\n ul = len(xs) - 1\n for i, x in enumerate(xs):\n if not x < xmax:\n ul = i\n break\n if ll != ul:\n return ll, ul\n else:\n if ll != len(xs) - 1:\n return ll, ul + 1\n else:\n return ll - 1, ul", "def do_overlap(rect1, rect2):\n l1, r1 = rect1.topleft, rect1.bottomright\n l2, r2 = rect2.topleft, rect2.bottomright\n # conditions for non overlapping\n if r1[0] < l2[0] or r2[0] < l1[0]:\n return False\n if r2[1] < l1[1] or r1[1] < l2[1]:\n return False\n\n return True", "def overlaps(self, ext2):\r\n\t\ts1, e1 = self.data['start'], self.data['end']\r\n\t\ts2, e2 = ext2.data['start'], ext2.data['end']\r\n\t\treturn (self.thash == ext2.thash and all([e2 > s1, e1 > s2]))", "def do_these_genes_overlap(gene1_coords, gene2_coords):\n overlap = max(0,\n min(gene1_coords[1], gene2_coords[1]) - max(gene1_coords[0],\n gene2_coords[0]))\n\n return True if overlap > 0 else False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
masks the intervals in df based on where peaks are. If a peak does not directly overlap the region at a given position, mask that position with nan. If a peak does overlap, preserve the score.
def mask(df, peak, stream): progress = trange(len(df.index)) for i in df.index: region = bedtool_from_renamed_twobed_index(i, stream) masked_interval = peak.values(region.chrom, region.start, region.end, region.strand) if sum(masked_interval) != 0: for pos in masked_interval.index: df.loc[i, pos] = df.loc[i, pos] if masked_interval.loc[pos] > 0 else np.nan progress.update(1) return df
[ "def _remove_overlaps(segmentation_mask, fronts):\n fidxs, sidxs = np.where((segmentation_mask != fronts) & (segmentation_mask != 0) & (fronts != 0))\n fronts[fidxs, sidxs] = 0", "def detect_peaks_1d(timeseries, delta_peak, threshold, peak_width=5):\n\n # Sort time series by magnitude.\n max_idx = np.squeeze(timeseries.argsort())[::-1]\n\n # Remove peaks within delta_peak to the array boundary\n max_idx = max_idx[max_idx > delta_peak]\n max_idx = max_idx[max_idx < np.size(timeseries) - delta_peak]\n\n max_values = np.zeros_like(timeseries[max_idx])\n max_values[:] = np.squeeze(timeseries[max_idx])\n\n # Number of peaks exceeding threshold\n num_big_ones = np.sum(timeseries > threshold)\n try:\n max_values = max_values[:num_big_ones]\n max_idx = max_idx[:num_big_ones]\n except:\n print(\"detect_peaks_1d: No peaks in the unmasked part of the array.\")\n return np.array([])\n\n # Mark the indices we need to skip here\n max_idx_copy = np.zeros_like(max_idx)\n max_idx_copy[:] = max_idx\n\n # Eliminate values exceeding the threshold within delta_peak of another\n # for idx, mv in enumerate(max_values):\n # print 'iterating over %d peaks' % ( np.size(max_idx))\n for i, idx in enumerate(max_idx):\n current_idx = max_idx_copy[i]\n if (max_idx_copy[i] == -1):\n # print 'idx %d is zeroed out' % (idx)\n continue\n\n # Check if this value is larger than the valueghbouring values of the\n # timeseries. If it is not, continue with next iteration of for loop\n if (timeseries[current_idx] < timeseries[\n current_idx - peak_width: current_idx + peak_width]).any():\n max_idx_copy[i] = -1\n continue\n\n # Zero out all peaks closer than delta_peak\n close_idx = np.abs(max_idx_copy - idx)\n close_ones = np.squeeze(np.where(close_idx < delta_peak)[0])\n max_idx_copy[close_ones] = -1\n # Copy back current value\n max_idx_copy[i] = max_idx[i]\n\n # Remove all entries equal to -1\n max_idx_copy = max_idx_copy[max_idx_copy != -1]\n max_idx_copy = max_idx_copy[max_idx_copy < np.size(timeseries)]\n\n # Return an ndarray with all peaks of large amplitude indices\n return max_idx_copy", "def calculate_non_mask_overlaps(x_mask, y_mask):\n x_is_not_nan = 1 * ~x_mask\n y_is_not_nan = 1 * ~y_mask\n\n r = np.dot(x_is_not_nan.T, y_is_not_nan)\n return r", "def peakscleaning(df):\n df.dropna(inplace=True)\n\n #drop relative intensities\n df.drop(columns=[i for i in df.columns.values if 'val' in i],inplace=True)\n return df", "def _correct_outlier_physiological(\n rpeaks: pd.DataFrame, bool_mask: np.array, hr_thres: Tuple[float, float], **kwargs # noqa: ARG001\n) -> np.array:\n # physiological outlier: minimum/maximum heart rate threshold\n bool_mask = np.logical_or(\n bool_mask,\n (rpeaks[\"RR_Interval\"] > (60 / hr_thres[0])) | (rpeaks[\"RR_Interval\"] < (60 / hr_thres[1])),\n )\n return bool_mask", "def _detect_peaks(\n x,\n mph=None,\n mpd=1,\n threshold=0,\n edge=\"rising\",\n kpsh=False,\n valley=False,\n show=False,\n ax=None,\n):\n\n x = np.atleast_1d(x).astype(\"float64\")\n if x.size < 3:\n return np.array([], dtype=int)\n if valley:\n x = -x\n # find indices of all peaks\n dx = x[1:] - x[:-1]\n # handle NaN's\n indnan = np.where(np.isnan(x))[0]\n if indnan.size:\n x[indnan] = np.inf\n dx[np.where(np.isnan(dx))[0]] = np.inf\n ine, ire, ife = np.array([[], [], []], dtype=int)\n if not edge:\n ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]\n else:\n if edge.lower() in [\"rising\", \"both\"]:\n ire = np.where(\n (np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0)\n )[0]\n if edge.lower() in [\"falling\", \"both\"]:\n ife = np.where(\n (np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0)\n )[0]\n ind = np.unique(np.hstack((ine, ire, ife)))\n # handle NaN's\n if ind.size and indnan.size:\n # NaN's and values close to NaN's cannot be peaks\n ind = ind[\n np.in1d(\n ind,\n np.unique(np.hstack((indnan, indnan - 1, indnan + 1))),\n invert=True,\n )\n ]\n # first and last values of x cannot be peaks\n if ind.size and ind[0] == 0:\n ind = ind[1:]\n if ind.size and ind[-1] == x.size - 1:\n ind = ind[:-1]\n # remove peaks < minimum peak height\n if ind.size and mph is not None:\n ind = ind[x[ind] >= mph]\n # remove peaks - neighbors < threshold\n if ind.size and threshold > 0:\n dx = np.min(\n np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0\n )\n ind = np.delete(ind, np.where(dx < threshold)[0])\n # detect small peaks closer than minimum peak distance\n if ind.size and mpd > 1:\n ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height\n idel = np.zeros(ind.size, dtype=bool)\n for i in range(ind.size):\n if not idel[i]:\n # keep peaks with the same height if kpsh is True\n idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) & (\n x[ind[i]] > x[ind] if kpsh else True\n )\n idel[i] = 0 # Keep current peak\n # remove the small peaks and sort back the indices by their occurrence\n ind = np.sort(ind[~idel])\n\n return ind", "def find_peak_score(self):\n cur_SPEG_DF = spe_DF_clean.loc[(spe_DF_clean['DM'] >= self.min_DM) &\n (spe_DF_clean['DM'] <= self.max_DM) &\n (spe_DF_clean['time'] >= self.min_time) &\n (spe_DF_clean['time'] <= self.max_time), ]\n # group by DM\n cur_SPEG_DF = cur_SPEG_DF.groupby('DM', group_keys=False).apply(lambda x: x.loc[x.SNR.idxmax()])\n\n # # SNR - 5\n # print cur_SPEG_DF.head()\n peak_DM = self.peak_DM\n\n # check clipping or not\n if self.merged:\n # clipped = not any(abs(cur_SPEG_DF['DM'] - DM_centered) <= DMbound)\n lower_DMs = cur_SPEG_DF['DM'][cur_SPEG_DF['DM'] < peak_DM]\n higher_DMs = cur_SPEG_DF['DM'][cur_SPEG_DF['DM'] > peak_DM]\n left_neighbor = (lower_DMs - peak_DM > -6 * self.peak_DM_spacing).any()\n right_neighbor = (higher_DMs - peak_DM < 6 * self.peak_DM_spacing).any()\n\n if not left_neighbor or not right_neighbor:\n self.clipped = True\n\n # not clipped SPEG\n if not self.clipped:\n if self.peak_SNR > 7:\n self.centered_DM = self.peak_DM\n cur_peak_left = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] <= self.peak_DM, ]\n cur_peak_right = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] >= self.peak_DM, ]\n\n sum_SNR_left = cur_peak_left['SNR'].sum()\n sum_SNR_right = cur_peak_right['SNR'].sum()\n\n cur_sym_SNR = 100\n if sum_SNR_right > 0:\n cur_sym_SNR = sum_SNR_left / sum_SNR_right\n\n DM_range_left = self.peak_DM - float(cur_SPEG_DF['DM'].head(1))\n DM_range_right = float(cur_SPEG_DF['DM'].tail(1)) - self.peak_DM\n\n cur_sym_DM = 100\n if DM_range_right > 0:\n cur_sym_DM = DM_range_left / DM_range_right\n\n self.SNR_sym_index = cur_sym_SNR\n self.DM_sym_index = cur_sym_DM\n\n else:\n # weak pulses with few points, # decide the centerDM, find the one with maximum symmetry\n # within 0.98 of the max SNR\n cur_SNR_thresh = self.peak_SNR * 0.98\n peak_candidates = cur_SPEG_DF.loc[(cur_SPEG_DF['SNR']) > cur_SNR_thresh].copy()\n # print peakCandiates\n\n # select the top 2\n if peak_candidates.shape[0] > 1:\n # ['a', 'b'], ascending=[True, False]\n peak_candidates.sort_values(by='SNR', ascending=False, inplace=True)\n peak_candidates = peak_candidates.iloc[:2]\n\n sym_KVs = [] # store the curCenterDM and symmetry index pair\n for DMi in peak_candidates['DM']: # find the one with the largest symmetry\n cur_peak_left = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] <= DMi, ]\n cur_peak_right = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] >= DMi, ]\n\n # changed this to left vs. right instead of min vs max\n sum_SNR_left = cur_peak_left['SNR'].sum()\n sum_SNR_right = cur_peak_right['SNR'].sum()\n\n cur_SNR_sym = 100\n if sum_SNR_right > 0:\n cur_SNR_sym = sum_SNR_left / sum_SNR_right\n # closer to one, means more symmetric\n cur_SNR_sym_to_one = abs(1 - cur_SNR_sym)\n\n DM_range_left = DMi - float(cur_SPEG_DF['DM'].head(1))\n DM_range_right = float(cur_SPEG_DF['DM'].tail(1)) - DMi\n\n cur_DM_sym = 100\n if DM_range_right > 0:\n cur_DM_sym = DM_range_left / DM_range_right\n\n # sum_SNR_min = min(cur_peak_left['SNR'].sum(), cur_peak_right['SNR'].sum())\n # sum_SNR_max = max(cur_peak_left['SNR'].sum(), cur_peak_right['SNR'].sum())\n\n # cur_SNR_sym = sum_SNR_min / sum_SNR_max\n\n # DM_range1 = DMi - float(cur_SPEG_DF['DM'].head(1))\n # DM_range2 = float(cur_SPEG_DF['DM'].tail(1)) - DMi\n # cur_DM_sym = min(DM_range1, DM_range2) / max(DM_range1, DM_range2)\n\n sym_KVs.append([DMi, cur_SNR_sym, cur_DM_sym, cur_SNR_sym_to_one])\n\n # convert to data frame\n sym_KVs_DF = DataFrame(sym_KVs)\n sym_KVs_DF.columns = ['DM', 'SNR_sym', 'DM_sym', 'SNR_sym_to_one']\n\n # sort by SNR_sym close to 1\n cur_peak_row = sym_KVs_DF.loc[sym_KVs_DF.SNR_sym_to_one.idxmin()]\n\n cur_center_DM = cur_peak_row[0]\n cur_SNR_sym = cur_peak_row[1]\n cur_DM_sym = cur_peak_row[2]\n\n # print curCenterDM, curSymIndex\n self.centered_DM = cur_center_DM\n self.SNR_sym_index = cur_SNR_sym\n self.DM_sym_index = cur_DM_sym\n\n else: # clipped SPEG\n cur_peak_left = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] < self.peak_DM, ]\n cur_peak_right = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] > self.peak_DM, ]\n\n sum_SNR_left = cur_peak_left['SNR'].sum()\n sum_SNR_right = cur_peak_right['SNR'].sum()\n\n cur_SNR_sym = 100\n if sum_SNR_right > 0:\n cur_SNR_sym = sum_SNR_left / sum_SNR_right\n\n self.SNR_sym_index = cur_SNR_sym\n\n df1 = None\n df2 = None\n df3 = None\n df4 = None\n df5 = None\n df6 = None\n\n if not self.clipped: # not clipped SPEG\n # if self.SNR_sym_index > 0.25 and self.DM_sym_index > 0.2: # tunable\n if 4.0 > self.SNR_sym_index > 0.25 and 5.0 > self.DM_sym_index > 0.2: # tunable\n cur_peak_left = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] <= self.centered_DM, ]\n cur_peak_right = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] >= self.centered_DM, ]\n\n # find central part and divide into left and right\n cur_central_DMs_left = cur_peak_left.loc[cur_SPEG_DF['SNR'] >= self.peak_SNR * 0.9, 'DM']\n\n if cur_central_DMs_left.size > 0:\n # print cur_central_DMs_left\n cur_DM_left_temp = cur_central_DMs_left.tolist()[0]\n else: # no points between 90% and 100%\n cur_side_DMs_left = cur_peak_left.loc[cur_SPEG_DF['SNR'] < self.peak_SNR * 0.9, 'DM']\n # print cur_side_DMs_left.shape\n # print cur_side_DMs_left\n cur_DM_left_temp = cur_side_DMs_left.tolist()[-1]\n\n # when DM spacing is small\n if self.peak_DM_spacing <= 0.5:\n # really wide peaks\n if self.centered_DM - cur_DM_left_temp >= 6:\n self.center_startDM = self.centered_DM - 6\n # moderately wide peaks\n elif self.centered_DM - cur_DM_left_temp >= 1:\n self.center_startDM = cur_DM_left_temp\n # relatively narrow peaks\n elif self.centered_DM - self.min_DM >= 1:\n self.center_startDM = self.centered_DM - 1\n # narrow peaks\n else:\n self.center_startDM = self.min_DM\n # when DM spacing is moderately large\n elif self.peak_DM_spacing < 3:\n # really wide peaks\n if self.centered_DM - cur_DM_left_temp >= 6:\n self.center_startDM = self.centered_DM - 6\n # moderately wide peaks\n elif self.centered_DM - cur_DM_left_temp >= 2 * self.peak_DM_spacing:\n self.center_startDM = cur_DM_left_temp\n # relatively narrow peaks\n elif self.centered_DM - self.min_DM > 2 * self.peak_DM_spacing:\n self.center_startDM = self.centered_DM - 2 * self.peak_DM_spacing\n # narrow peaks\n else:\n self.center_startDM = self.min_DM\n # when DM spacing is large\n else:\n # really wide peaks\n if self.centered_DM - cur_DM_left_temp >= 2 * self.peak_DM_spacing:\n self.center_startDM = self.centered_DM - 2 * self.peak_DM_spacing\n # relatively narrow peaks\n elif self.centered_DM - self.min_DM >= 2 * self.peak_DM_spacing:\n self.center_startDM = self.centered_DM - 2 * self.peak_DM_spacing\n # narrow peaks\n else:\n self.center_startDM = self.min_DM\n\n # the right part\n cur_central_DMs_right = cur_peak_right.loc[cur_SPEG_DF['SNR'] >= self.peak_SNR * 0.9, 'DM']\n\n if cur_central_DMs_right.size > 0:\n # print cur_central_DMs_right\n cur_DM_right_temp = cur_central_DMs_right.tolist()[-1]\n # print cur_DM_right_temp\n else: # no points between 100% and 90%\n cur_side_DMs_right = cur_peak_right.loc[cur_SPEG_DF['SNR'] < self.peak_SNR * 0.9, 'DM']\n cur_DM_right_temp = cur_side_DMs_right.tolist()[0]\n\n # when DM spacing is small\n if self.peak_DM_spacing <= 0.5:\n # really wide peaks\n if cur_DM_right_temp - self.centered_DM >= 6:\n self.center_stopDM = self.centered_DM + 6\n # moderately wide peaks\n elif cur_DM_right_temp - self.centered_DM >= 1:\n self.center_stopDM = cur_DM_right_temp\n # relatively narrow peaks\n elif self.max_DM - self.centered_DM >= 1:\n self.center_stopDM = self.centered_DM + 1\n # narrow peaks\n else:\n self.center_stopDM = self.max_DM\n # when DM spacing is moderately large\n elif self.peak_DM_spacing < 3:\n # really wide peaks\n if cur_DM_right_temp - self.centered_DM >= 6:\n self.center_stopDM = self.centered_DM + 6\n # moderately wide peaks\n elif cur_DM_right_temp - self.centered_DM >= 2 * self.peak_DM_spacing:\n self.center_stopDM = cur_DM_left_temp\n # relatively narrow peaks\n elif self.max_DM - self.centered_DM >= 2 * self.peak_DM_spacing:\n self.center_stopDM = self.centered_DM + 2 * self.peak_DM_spacing\n # narrow peaks\n else:\n self.center_stopDM = self.max_DM\n # when DM spacing is large\n else:\n # really wide peaks\n if cur_DM_right_temp - self.centered_DM >= 2 * self.peak_DM_spacing:\n self.center_stopDM = self.centered_DM + 2 * self.peak_DM_spacing\n # relatively narrow peaks\n elif self.max_DM - self.centered_DM >= 2 * self.peak_DM_spacing:\n self.center_stopDM = self.centered_DM + 2 * self.peak_DM_spacing\n # narrow peaks\n else:\n self.center_stopDM = self.max_DM\n\n # check if both sides have more than 3 points, at least 7 points in total\n if cur_peak_left.shape[0] > 3 and cur_peak_right.shape[0] > 3:\n # at least 4 points on each side, 7 points in total\n left_DM_range = self.centered_DM - float(cur_peak_left['DM'].head(1)) # head is the minimum\n right_DM_range = float(cur_peak_right['DM'].tail(1)) - self.centered_DM # tail is the maximum\n left_DM_step = left_DM_range / 3\n right_DM_step = right_DM_range / 3\n if right_DM_step > left_DM_step:\n right_DM_step = min(right_DM_step, 2 * left_DM_step)\n else:\n left_DM_step = min(left_DM_step, 2 * right_DM_step)\n\n DM1 = self.centered_DM - left_DM_step\n DM2 = self.centered_DM - 2 * left_DM_step\n DM3 = self.centered_DM + right_DM_step\n DM4 = self.centered_DM + 2 * right_DM_step\n\n df1 = cur_peak_left.loc[cur_peak_left['DM'] >= DM1, ]\n # check df1: if there are at least 1 point between DM1 and center DM\n if df1.shape[0] > 1:\n # check df2\n df2 = cur_peak_left.loc[(cur_peak_left['DM'] >= DM2) & (cur_peak_left['DM'] <= DM1), ]\n if df2.shape[0] > 1:\n # if there are at least 2 points between DM2 and DM1\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM2), ]\n # if there are less than 2 points below DM2\n if df3.shape[0] < 2:\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM1), ]\n # df3 is a super set of df2, which has at least 2 points\n else:\n # if there are less than 2 points between DM2 and DM1\n df2 = cur_peak_left.loc[(cur_peak_left['DM'] >= DM2), ]\n # df2 is a super set of df1, which has at least 2 points\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM2), ]\n # if there are less than 2 points below DM2\n if df3.shape[0] < 2:\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM1), ]\n # if there are less than 2 points below DM1\n if df3.shape[0] < 2:\n df3 = cur_peak_left\n # df3 is a super set of df1, which has at least 2 points\n else: # if there is not any point between DM1 and center DM\n df1 = cur_peak_left.loc[(cur_peak_left['DM'] >= DM2), ]\n # if there is at least 1 point between DM2 and DM1\n if df1.shape[0] > 1:\n df2 = cur_peak_left.loc[(cur_peak_left['DM'] >= DM2) & (cur_peak_left['DM'] <= DM1), ]\n # if there are at least 2 points between DM2 and DM1\n if df2.shape[0] > 1:\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM2), ]\n # if there are less than 2 points below DM2\n if df3.shape[0] < 2:\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM1), ]\n # df3 is a super set of df2, which has at least 2 points\n else: # there is 1 and only 1 point between DM2 and DM1\n df2 = df1\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM2), ]\n # if there are less than 2 points below DM2\n if df3.shape[0] < 2:\n # if there is 1 point below DM2\n df3 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM1), ]\n # right side\n df4 = cur_peak_right.loc[cur_peak_right['DM'] <= DM3,]\n # check df4: if there are at least 2 points between center and DM3\n if df4.shape[0] > 1:\n # check df5: if there are at least 2 points between DM3 and DM4\n df5 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3) & (cur_peak_right['DM'] <= DM4), ]\n # if there are at least 2 points between DM3 and DM4\n if df5.shape[0] > 1:\n # if there are at least 2 points above DM4\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM4), ]\n # if there are less than 2 points above DM4\n if df6.shape[0] < 2:\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3), ]\n # df6 is a super set of df5, which has more than 2 points\n # if there are at less than 2 points between DM3 and DM4\n else:\n df5 = cur_peak_right.loc[(cur_peak_right['DM'] <= DM4), ]\n # df5 is a super set of df4, which has at least 2 points\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM4), ]\n # if there are less than 2 points above DM4\n if df6.shape[0] < 2:\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3), ]\n # if there are at less than 2 points above DM3\n if df6.shape[0] < 2:\n df6 = cur_peak_right\n else: # if there is not any point between center and DM3\n df4 = cur_peak_right.loc[(cur_peak_right['DM'] <= DM4), ]\n if df4.shape[0] > 1:\n # at least 1 point between DM3 and DM4\n df5 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3) & (cur_peak_right['DM'] <= DM4), ]\n if df5.shape[0] > 1:\n # at least 2 points between DM3 and DM4\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM4), ]\n if df6.shape[0] < 2:\n # if there is only 1 point above DM4\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3), ]\n # df6 is a super set of df5, which has at least 2 points\n else: # exactly 1 point between DM3 and DM4\n df5 = df4\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM4), ]\n # only one point above DM4\n if df6.shape[0] < 2:\n df6 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3), ]\n elif cur_peak_left.shape[0] > 2 and cur_peak_right.shape[0] > 2:\n # on one side there's at most 3 points, in total 5 points or more\n if 2.041 > self.DM_sym_index > 0.49:\n if cur_peak_left.shape[0] > 3: # divide into 2 parts\n nrow_left = cur_peak_left.shape[0] # number of rows in left peak\n nrowDF = cur_peak_left.shape[0] // 2 # number of rows in a df\n\n df1 = cur_peak_left.iloc[(nrow_left - nrowDF):]\n df2 = cur_peak_left.iloc[: (nrow_left - nrowDF)]\n else: # equals to 3\n df1 = cur_peak_left.iloc[1:]\n df2 = cur_peak_left.iloc[0:2]\n # df3 = cur_peak_left\n\n if cur_peak_right.shape[0] > 3: # divide into 2 parts\n # nrow_right = cur_peak_right.shape[0]\n nrowDF = cur_peak_right.shape[0] // 2 # number of rows in a df\n df4 = cur_peak_right.iloc[: nrowDF]\n df5 = cur_peak_right.iloc[nrowDF:]\n # df6 = cur_peak_right.iloc[(2 * nrowDF):]\n else:\n df4 = cur_peak_right.iloc[0:2]\n df5 = cur_peak_right.iloc[1:]\n # df6 = cur_peak_right\n\n else: # clipped SPEG\n # min side greater than 3\n if 10.0 > self.SNR_sym_index > 0.1:\n # print self.peak_DM\n cur_peak_left = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] < self.peak_DM,]\n cur_peak_right = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] > self.peak_DM,]\n if cur_peak_left.shape[0] > 3 and cur_peak_right.shape[0] > 3:\n # at least 4 points on each side, 9 points in total\n left_DM_range = float(cur_peak_left['DM'].tail(1)) - float(cur_peak_left['DM'].head(1))\n # head is the minimum\n right_DM_range = float(cur_peak_right['DM'].tail(1)) - float(cur_peak_right['DM'].head(1))\n # tail is the maximum\n\n left_DM_step = left_DM_range // 2\n right_DM_step = right_DM_range // 2\n\n if right_DM_step > left_DM_step:\n right_DM_step = min(right_DM_step, 2 * left_DM_step)\n else:\n left_DM_step = min(left_DM_step, 2 * right_DM_step)\n\n DM1 = float(cur_peak_left['DM'].tail(1)) - left_DM_step\n DM3 = float(cur_peak_right['DM'].head(1)) + right_DM_step\n\n df1 = cur_peak_left.loc[(cur_peak_left['DM'] >= DM1), ]\n # check df1: if there are at least 2 point between DM1 and center DM\n if df1.shape[0] > 1:\n # check df2\n df2 = cur_peak_left.loc[(cur_peak_left['DM'] <= DM1), ]\n # if there are at least 2 points between DM2 and DM1\n if df2.shape[0] < 2:\n df2 = None\n\n # right side\n df4 = cur_peak_right.loc[cur_peak_right['DM'] <= DM3, ]\n # check df4: if there are at least 2 points between center and DM3\n if df4.shape[0] > 1:\n # check df5: if there are at least 2 points between DM3 and DM4\n df5 = cur_peak_right.loc[(cur_peak_right['DM'] >= DM3), ]\n # if there are at least 2 points between DM3 and DM4\n if df5.shape[0] < 2:\n df5 = None\n\n # select the central part\n df_left = cur_peak_left.tail(cur_peak_left.shape[0] // 2)\n df_right = cur_peak_right.head(cur_peak_right.shape[0] // 2)\n\n # use 4.999 instead of 5 to avoid 0 weights\n model_left = LinearRegression().fit(X=df_left['DM'].values.reshape(-1, 1), y=df_left['SNR'],\n sample_weight=(df_left['SNR'] - 4.999))\n\n model_right = LinearRegression().fit(X=df_right['DM'].values.reshape(-1, 1), y=df_right['SNR'],\n sample_weight=(df_right['SNR'] - 4.999))\n\n cur_DM_min = float(df_left['DM'].tail(1))\n cur_DM_max = float(df_right['DM'].head(1))\n cur_DM_min_idx = DM_dict.get(cur_DM_min)\n cur_DM_max_idx = DM_dict.get(cur_DM_max)\n cur_DMs = DMs[cur_DM_min_idx:cur_DM_max_idx]\n\n intercept_left = model_left.intercept_\n slope_left = model_left.coef_[0]\n\n intercept_right = model_right.intercept_\n slope_right = model_right.coef_[0]\n\n fitted_left = cur_DMs * slope_left + intercept_left\n fitted_right = cur_DMs * slope_right + intercept_right\n\n fitted_diff = abs(fitted_left - fitted_right)\n min_diff = min(fitted_diff)\n # print self.centered_DM\n self.centered_DM = cur_DMs[fitted_diff == min_diff][0]\n\n # re-divide the cluster\n cur_peak_left2 = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] < self.centered_DM, ]\n cur_peak_right2 = cur_SPEG_DF.loc[cur_SPEG_DF['DM'] > self.centered_DM, ]\n\n left_DM_range2 = self.centered_DM - float(cur_peak_left2['DM'].head(1)) # head is the minimum\n right_DM_range2 = float(cur_peak_right2['DM'].tail(1)) - self.centered_DM # tail is the maximum\n\n cur_DM_sym = 100\n if right_DM_range2 > 0:\n cur_DM_sym = left_DM_range2 / right_DM_range2\n\n self.DM_sym_index = cur_DM_sym\n\n # find central part, devide into left and right\n cur_central_DMs_left = cur_peak_left.loc[cur_SPEG_DF['SNR'] >= self.peak_SNR * 0.9, 'DM']\n\n if cur_central_DMs_left.size > 0:\n cur_DM_left_temp = cur_central_DMs_left.tolist()[0]\n else: # no points between 90% and 100%\n cur_side_DMs_left = cur_peak_left.loc[cur_SPEG_DF['SNR'] < self.peak_SNR * 0.9, 'DM']\n cur_DM_left_temp = cur_side_DMs_left.tolist()[-1]\n\n # when DM spacing is small\n if self.peak_DM_spacing <= 0.5:\n # really wide peaks\n if self.centered_DM - cur_DM_left_temp >= 6 * 2:\n self.center_startDM = self.centered_DM - 6 * 2\n # moderately wide peaks\n elif self.centered_DM - cur_DM_left_temp >= 2 * 2:\n self.center_startDM = cur_DM_left_temp\n # relatively narrow peaks\n elif self.centered_DM - self.min_DM >= 2 * 2:\n self.center_startDM = self.centered_DM - 2 * 2\n # narrow peaks\n else:\n self.center_startDM = self.min_DM\n # when DM spacing is moderately large\n elif self.peak_DM_spacing < 3:\n # really wide peaks\n if self.centered_DM - cur_DM_left_temp >= 6 * 2:\n self.center_startDM = self.centered_DM - 6 * 2\n # moderately wide peaks\n elif self.centered_DM - cur_DM_left_temp >= 2 * self.peak_DM_spacing * 2:\n self.center_startDM = cur_DM_left_temp\n # relatively narrow peaks\n elif self.centered_DM - self.min_DM > 2 * self.peak_DM_spacing * 2:\n self.center_startDM = self.centered_DM - 2 * self.peak_DM_spacing * 2\n # narrow peaks\n else:\n self.center_startDM = self.min_DM\n # when DM spacing is large\n else:\n # really wide peaks\n if self.centered_DM - cur_DM_left_temp >= 2 * self.peak_DM_spacing * 2:\n self.center_startDM = self.centered_DM - 2 * self.peak_DM_spacing * 2\n # relatively narrow peaks\n elif self.centered_DM - self.min_DM >= 2 * self.peak_DM_spacing * 2:\n self.center_startDM = self.centered_DM - 2 * self.peak_DM_spacing * 2\n # narrow peaks\n else:\n self.center_startDM = self.min_DM\n\n # the right part\n cur_central_DMs_right = cur_peak_right.loc[cur_SPEG_DF['SNR'] >= self.peak_SNR * 0.9, 'DM']\n\n if cur_central_DMs_right.size > 0:\n # print cur_central_DMs_right\n cur_DM_right_temp = cur_central_DMs_right.tolist()[-1]\n # print cur_DM_right_temp\n else: # no points between 100% and 90%\n cur_side_DMs_right = cur_peak_right.loc[cur_SPEG_DF['SNR'] < self.peak_SNR * 0.9, 'DM']\n cur_DM_right_temp = cur_side_DMs_right.tolist()[0]\n\n # when DM spacing is small\n if self.peak_DM_spacing <= 0.5:\n # really wide peaks\n if cur_DM_right_temp - self.centered_DM >= 6 * 2:\n self.center_stopDM = self.centered_DM + 6 * 2\n # moderately wide peaks\n elif cur_DM_right_temp - self.centered_DM >= 2 * 2:\n self.center_stopDM = cur_DM_right_temp\n # relatively narrow peaks\n elif self.max_DM - self.centered_DM >= 2 * 2:\n self.center_stopDM = self.centered_DM + 2 * 2\n # narrow peaks\n else:\n self.center_stopDM = self.max_DM\n # when DM spacing is moderately large\n elif self.peak_DM_spacing < 3:\n # really wide peaks\n if cur_DM_right_temp - self.centered_DM >= 6 * 2:\n self.center_stopDM = self.centered_DM + 6 * 2\n # moderately wide peaks\n elif cur_DM_right_temp - self.centered_DM >= 2 * self.peak_DM_spacing * 2:\n self.center_stopDM = cur_DM_left_temp\n # relatively narrow peaks\n elif self.max_DM - self.centered_DM >= 2 * self.peak_DM_spacing * 2:\n self.center_stopDM = self.centered_DM + 2 * self.peak_DM_spacing * 2\n # narrow peaks\n else:\n self.center_stopDM = self.max_DM\n # when DM spacing is large\n else:\n # really wide peaks\n if cur_DM_right_temp - self.centered_DM >= 2 * self.peak_DM_spacing * 2:\n self.center_stopDM = self.centered_DM + 2 * self.peak_DM_spacing * 2\n # relatively narrow peaks\n elif self.max_DM - self.centered_DM >= 2 * self.peak_DM_spacing * 2:\n self.center_stopDM = self.centered_DM + 2 * self.peak_DM_spacing * 2\n # narrow peaks\n else:\n self.center_stopDM = self.max_DM\n\n if (df1 is not None) and (df2 is not None) and (df3 is not None) and (df4 is not None) and \\\n (df5 is not None) and (df6 is not None):\n model1 = LinearRegression().fit(X=df1['DM'].values.reshape(-1, 1), y=df1['SNR'],\n sample_weight=df1['SNR'] - 4.999)\n\n model2 = LinearRegression().fit(X=df2['DM'].values.reshape(-1, 1), y=df2['SNR'],\n sample_weight=df2['SNR'] - 4.999)\n\n model3 = LinearRegression().fit(X=df3['DM'].values.reshape(-1, 1), y=df3['SNR'],\n sample_weight=df3['SNR'] - 4.999)\n\n model4 = LinearRegression().fit(X=df4['DM'].values.reshape(-1, 1), y=df4['SNR'],\n sample_weight=df4['SNR'] - 4.999)\n\n model5 = LinearRegression().fit(X=df5['DM'].values.reshape(-1, 1), y=df5['SNR'],\n sample_weight=df5['SNR'] - 4.999)\n\n model6 = LinearRegression().fit(X=df6['DM'].values.reshape(-1, 1), y=df6['SNR'],\n sample_weight=df6['SNR'] - 4.999)\n\n slope1 = model1.coef_[0]\n slope2 = model2.coef_[0]\n slope3 = model3.coef_[0]\n slope4 = model4.coef_[0]\n slope5 = model5.coef_[0]\n slope6 = model6.coef_[0]\n\n slope_set = [slope3, slope2, slope1, slope4, slope5, slope6]\n\n slope_bound = max(0.01 / self.peak_DM_spacing, 0.01)\n\n def slope_code(x):\n if x > slope_bound:\n return 1\n elif x < -slope_bound:\n return -1\n else:\n return 0\n\n slope_set_coded = list(map(slope_code, slope_set))\n # make sure it's not all flat\n self.peak_score = sum(slope_set_coded[0:3]) - sum(slope_set_coded[3:6])\n elif (df1 is not None) and (df2 is not None) and (df4 is not None) and (df5 is not None):\n\n model1 = LinearRegression().fit(X=df1['DM'].values.reshape(-1, 1), y=df1['SNR'],\n sample_weight=df1['SNR'] - 4.999)\n\n model2 = LinearRegression().fit(X=df2['DM'].values.reshape(-1, 1), y=df2['SNR'],\n sample_weight=df2['SNR'] - 4.999)\n\n model4 = LinearRegression().fit(X=df4['DM'].values.reshape(-1, 1), y=df4['SNR'],\n sample_weight=df4['SNR'] - 4.999)\n\n model5 = LinearRegression().fit(X=df5['DM'].values.reshape(-1, 1), y=df5['SNR'],\n sample_weight=df5['SNR'] - 4.999)\n\n slope1 = model1.coef_[0]\n slope2 = model2.coef_[0]\n\n slope4 = model4.coef_[0]\n slope5 = model5.coef_[0]\n\n slope_set = [slope2, slope1, slope4, slope5]\n slope_bound = max(0.01 / self.peak_DM_spacing, 0.01)\n\n def slope_code(x):\n if x > slope_bound:\n return 1\n elif x < -slope_bound:\n return -1\n else:\n return 0\n\n slope_set_coded = list(map(slope_code, slope_set))\n # make sure it's not all flat\n self.peak_score = sum(slope_set_coded[0:2]) - sum(slope_set_coded[2:4])", "def detect_missing_peaks_valleys(self):\n\n i = 1\n found = False\n\n while i < len(self.valleys):\n # Calculate distance between two consecutive valleys.\n up, down = self.valleys[i - 1], self.valleys[i]\n dis = down - up\n\n i += 1\n\n # If the distance is about twice the average distance between\n # two consecutive peaks, then it is most probable that we are missing\n # a line in between these two valleys.\n if dis < 1.5 * self.avg_peaks_dist:\n continue\n\n u = up + self.avg_peaks_dist\n d = min(down, u + self.avg_peaks_dist)\n\n while (d - u) * 2 > self.avg_peaks_dist:\n if self.is_probable_valley(u) and self.is_probable_valley(d):\n peak = self.get_peak_in_range(u, d)\n if self.hor_hist[peak] > self.threshold_low:\n self.peaks.append(self.get_peak_in_range(u, d))\n found = True\n\n u = u + self.avg_peaks_dist\n d = min(down, u + self.avg_peaks_dist)\n\n # Re-distribute peaks and valleys if new ones are found.\n if found:\n self.peaks.sort()\n self.detect_valleys()", "def identify_peaks(self, interpolation_method=\"nearest\"):\n # TODO: do this, perhaps using \n I, kx, ky = self.grid_interpolate((-80, 80), (-80,80), N=101)\n return(0)", "def dfsortpeakvals(mydf, cd):\n\n filter_col_loc=[col for col in mydf if str(col).startswith(cd + '_center')]\n filter_col_height = [col for col in mydf if str(col).startswith(cd + '_height')]\n filter_col_area = [col for col in mydf if str(col).startswith(cd + '_area')]\n filter_col_sigma = [col for col in mydf if str(col).startswith(cd + '_sigma')]\n filter_col_ampl= [col for col in mydf if str(col).startswith(cd + '_amp')]\n filter_col_fwhm = [col for col in mydf if str(col).startswith(cd + '_fwhm')]\n filter_col_fract = [col for col in mydf if str(col).startswith(cd + '_fract')]\n filter_col_actheight = [col for col in mydf if str(col).startswith(cd+'_rawheight')]\n newdf = pd.DataFrame(None)\n for col in filter_col_loc:\n newdf = pd.concat([newdf, mydf[col]])\n if len(newdf)>0:\n newdf.columns = ['allpeaks']\n sortdf = newdf.sort_values(by = 'allpeaks')\n sortdf = sortdf.reset_index(inplace = False)\n newgroupindex = np.where(np.diff(sortdf['allpeaks'])>0.01)\n\n #the above threshold used to be 0.03 - was changed on 9.12.18 for the graphite stuff \n # this threshold should be changed to reflect the separation between peaks \n listnew=newgroupindex[0].tolist()\n listnew.insert(0, 0)\n listnew.append(len(sortdf))\n #to make sure we get the last group \n groupdict = {}\n for i in range(1, len(listnew)):\n if i ==1: \n newgroup = sortdf[listnew[i-1]:listnew[i]+1]\n else: \n newgroup = sortdf[listnew[i-1]+1:listnew[i]+1] \n newkey = newgroup.allpeaks.mean()\n groupdict.update({newkey: newgroup})\n #print(groupdict)\n\n count = 0\n for key in groupdict:\n count = count + 1\n mydf['sortedloc-'+cd+'-'+str(count)] = None\n mydf['sortedheight-'+cd+'-'+str(count)] = None\n mydf['sortedarea-'+cd+'-'+str(count)] = None\n mydf['sortedSIGMA-'+cd+'-'+str(count)] = None \n mydf['sortedamplitude-'+cd+'-'+str(count)] = None \n mydf['sortedfwhm-'+cd+'-'+str(count)] = None \n mydf['sortedfraction-'+cd+'-'+str(count)] = None\n mydf['sortedactheight-'+cd+'-'+str(count)] = None \n for j in range(len(filter_col_loc)):\n #iterate over the names of columns in mydf - ex[peakloc1, peakloc2, peakloc3..]\n # this is where we sort the values in the df based on if they appear in the group\n for i in range(len(mydf)):\n #iterate over rows in the dataframe\n if mydf.loc[i,(filter_col_loc[j])] >= min(list(groupdict[key].allpeaks)) and mydf.loc[i,(filter_col_loc[j])] <= max(list(groupdict[key].allpeaks)):\n mydf.loc[i, ('sortedloc-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_loc[j])]\n mydf.loc[i, ('sortedheight-'+cd+'-' + str(count))] = mydf.loc[i, (filter_col_height[j])]\n mydf.loc[i, ('sortedarea-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_area[j])]\n mydf.loc[i, ('sortedSIGMA-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_sigma[j])]\n mydf.loc[i, ('sortedamplitude-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_ampl[j])]\n mydf.loc[i, ('sortedfwhm-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_fwhm[j])] \n mydf.loc[i, ('sortedfraction-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_fract[j])] \n mydf.loc[i, ('sortedactheight-'+cd+'-'+str(count))] = mydf.loc[i, (filter_col_actheight[j])]\n else:\n None\n else: \n None \n # this will just return the original df - nothing sorted \n return mydf", "def detect_peaks(\n xx: np.ndarray,\n min_rel_prominence=0.5,\n peak_min_height=0.1,\n peak_min_distance=5,\n peak_min_prominence=0.01,\n) -> np.ndarray:\n peaks, properties = find_peaks(\n x=xx,\n height=peak_min_height,\n distance=peak_min_distance,\n prominence=peak_min_prominence,\n width=(1, 50),\n wlen=50,\n )\n\n if len(peaks) == 0:\n # return simple maximum\n return np.array([np.argmax(xx)])\n\n # filter peak_locations by relative prominence\n mask = np.ones_like(peaks, dtype=bool)\n for i in range(len(peaks)):\n y_peak = properties[\"peak_heights\"][i]\n y_left_base = xx[properties[\"left_bases\"][i]]\n y_right_base = xx[properties[\"right_bases\"][i]]\n\n prom_left = (y_peak - y_left_base) / y_peak\n prom_right = (y_peak - y_right_base) / y_peak\n prom_max = max(prom_left, prom_right)\n # print(peak_locations[i], prom_left, prom_right, properties['widths'][i])\n mask[i] = min_rel_prominence and prom_max >= min_rel_prominence\n\n if np.any(mask):\n peaks = peaks[mask]\n\n # sort by intensity\n peaks = np.array(sorted(peaks, key=lambda k: xx[k], reverse=True))\n\n return peaks", "def peakFinder(self, no_bg_image):\n # Mask the image so that peaks are only found in the AOI.\n masked_image = no_bg_image * self.peak_mask\n \n # Identify local maxima in the masked image.\n [new_peaks, self.taken] = utilC.findLocalMaxima(masked_image,\n self.taken,\n self.cur_threshold,\n self.find_max_radius,\n self.margin)\n\n # Fill in initial values for peak height, background and sigma.\n new_peaks = utilC.initializePeaks(new_peaks, # The new peaks.\n self.image, # The original image.\n self.background, # The current estimate of the background.\n self.sigma, # The starting sigma value.\n self.z_value) # The starting z value.\n \n return new_peaks", "def find_edges(spectrum, fill_value, peak_locations):\r\n left_edges = []\r\n right_edges = []\r\n\r\n for p_ind in range(len(peak_locations)):\r\n # start with the left edge\r\n p_l = peak_locations[p_ind]\r\n\r\n # set first estimate of left edge to last bin before the peak\r\n closest_below_noise_left = np.where(spectrum[0:p_l] <= fill_value)\r\n if len(closest_below_noise_left[0]) == 0:\r\n closest_below_noise_left = 0\r\n else:\r\n # add 1 to get the first bin of the peak which is not fill_value\r\n closest_below_noise_left = max(closest_below_noise_left[0]) + 1\r\n\r\n if p_ind == 0:\r\n # if this is the first peak, the left edge is the closest_below_noise_left\r\n left_edge = closest_below_noise_left\r\n elif peak_locations[p_ind - 1] > closest_below_noise_left:\r\n # merged peaks\r\n left_edge = np.argmin(spectrum[peak_locations[p_ind - 1]: p_l])\r\n left_edge = left_edge + peak_locations[p_ind - 1]\r\n else:\r\n left_edge = closest_below_noise_left\r\n\r\n # Repeat for right edge\r\n closest_below_noise_right = np.where(spectrum[p_l:-1] <= fill_value)\r\n if len(closest_below_noise_right[0]) == 0:\r\n # if spectrum does not go below noise (fill value), set it to the last bin\r\n closest_below_noise_right = len(spectrum) - 1\r\n else:\r\n # subtract one to obtain the last index of the peak\r\n closest_below_noise_right = min(closest_below_noise_right[0]) + p_l - 1\r\n\r\n # if this is the last (rightmost) peak, this first guess is the right edge\r\n if p_ind == (len(peak_locations) - 1):\r\n right_edge = closest_below_noise_right\r\n\r\n elif peak_locations[p_ind + 1] < closest_below_noise_right:\r\n right_edge = np.argmin(spectrum[p_l:peak_locations[p_ind + 1]]) + p_l\r\n else:\r\n right_edge = closest_below_noise_right\r\n\r\n left_edges.append(np.int(left_edge))\r\n right_edges.append(np.int(right_edge))\r\n\r\n return left_edges, right_edges", "def find_peak_in_range(qmap: np.ndarray, centre: int, window_size: int, adaptive_range: bool=False) -> Tuple[np.ndarray, np.ndarray]:\n half_size = window_size // 2\n search_field = qmap\n #search_field[:, centre-half_size:centre+half_size] = qmap[:, centre-half_size:centre+half_size]\n search_field[np.isnan(search_field)] = 0\n\n ppos = np.array([], dtype='int')\n perr = np.array([])\n for i in range(0,len(search_field[:,0])):\n search_slice = np.zeros(len(qmap[0]))\n low = centre-half_size\n upp = centre+half_size\n search_slice[low:upp] = search_field[i, low:upp]\n tmp = np.argwhere(search_slice==np.max(search_slice))\n perr = np.append(perr, 1/(tmp[0][0]-np.mean(search_slice)))\n ppos = np.append(ppos, tmp[0][0])\n\n if adaptive_range == True:\n centre = int(tmp[0][0])\n\n #plt.plot(qmap[i])\n #plt.plot(ppos[i], qmap[i, ppos[i]], marker='1', markersize=5)\n #wtf = True\n\n #plt.show()\n return ppos, perr", "def peakdetect(x,minpeakh = 18,\n minpeakw = 6):\n signdx = sign(diff(x))\n posidx = where(diff(signdx)>0)[0]+1\n negidx = where(diff(signdx)<0)[0]+1\n np = len(posidx)\n # preallocation\n peakinfo = []\n \n if np==0 or len(negidx)==0: return peakinfo\n \n # peak searching\n for i in xrange(np-1):\n nnidx = negidx[(negidx>posidx[i])&(negidx<posidx[i+1])]\n # check whether a plain occurs at the peak top, which will be presented as\n # multiple negative indices between two adjacent positive indices. If existed,\n # middile sites is retained\n centeridx = int(mean(nnidx))+1 if len(nnidx)>=2 else nnidx[0]\n \n # criteria: minimum peak intensity and minimum distance between peak\n # top and starting point\n if centeridx-posidx[i] >= minpeakw and x[centeridx] >= minpeakh:\n # coelute peak check using 2nd derivative\n d2x = diff(x[posidx[i]:posidx[i+1]+1],n=2)\n nx = len(d2x)\n localmin = []\n localmax = []\n for j in xrange(1,nx-1):\n \n if d2x[j] <= d2x[j-1] and d2x[j] <= d2x[j+1]:\n idx = arange(maximum(0,j-2),minimum(j+3,nx))\n idx = idx[where(d2x[idx]==d2x[j])[0]]\n # to avoid plain minima\n localmin.append(int(mean(idx))+1 if len(idx)>1 else idx[0])\n \n if d2x[j] >= d2x[j-1] and d2x[j] >= d2x[j+1]:\n idx = arange(maximum(0,j-2),minimum(j+3,nx))\n idx = idx[where(d2x[idx]==d2x[j])[0]]\n # to avoid plain maxima\n localmax.append(int(mean(idx))+1 if len(idx)>1 else idx[0])\n\n if len(localmin)>0:\n localmin = array(localmin)\n localmax = array(localmax)\n localmin += posidx[i]-1\n localmin = localmin[x[localmin]>=minpeakh]\n localmax += posidx[i]-1\n \n if len(localmin)>1:\n peakinfo.append([posidx[i],localmin[0],localmax[localmax>localmin[0]][0]])\n for j in xrange(1,len(localmin)):\n idx = where(localmax>localmin[j])[0]\n if len(idx)>0 and j!=len(localmin):\n peakinfo.append([localmax[idx[0]-1],localmin[j],localmax[idx[0]]])\n else:\n peakinfo.append([localmax[-1],localmin[j],posidx[i+1]])\n else:\n peakinfo.append([posidx[i],centeridx,posidx[i+1]])\n \n # Check the points before the first peak and after the last peak to identify\n # whether there is part peak due to the arbitrary data selection\n ## Check peak AFTER peak\n p = posidx[-1]\n if len(x)-p>=minpeakw:\n if any(negidx>p): # there exist peak maximum\n if x[negidx[-1]]>=minpeakh and negidx[-1]-p>=minpeakw:\n peakinfo.append([p,negidx[-1],len(x)])\n elif x[-1]>=minpeakh:\n peakinfo.append([p,len(x)-(len(x)-p)/2,len(x)])\n \n ## Check peak BEFORE peak\n p = posidx[0]\n if p>=minpeakw:\n if any(negidx<p): # there exist peak maximum\n if x[negidx[0]]>=minpeakh and p-negidx[0]>=minpeakw:\n peakinfo.insert(0,[0,negidx[0],p])\n elif x[0]>=minpeakh:\n peakinfo.insert(0,[0,int(p/2),p])\n \n return peakinfo", "def _threshold_mean(mask, data):\n daily_mean = _apply_daily_mask(mask, data, 'mean')\n daily_std = _apply_daily_mask(mask, data, 'std')\n daily_clipped_max = daily_mean + 2 * daily_std\n daily_clipped_min = daily_mean - 2 * daily_std\n # In cases where the standard deviation is 0 (i.e. all the data is\n # identical) it is possible for the mean to be above the daily maximum\n # by a very small amount due to floating point rounding errors. To ensure\n # that rounding errors do not affect the final outcome we lower the daily\n # clipping minimum if it is greater than the maximum for that day and\n # raise the daily clipping maximum if it is less than the minimum for\n # that day.\n daily_min, daily_max = _threshold_minmax(mask, data)\n min_above_max = daily_clipped_min > daily_max\n max_below_min = daily_clipped_max < daily_min\n daily_clipped_min[min_above_max] = daily_max[min_above_max]\n daily_clipped_max[max_below_min] = daily_min[max_below_min]\n return daily_clipped_min, daily_clipped_max", "def replace_middle_NaNs(filtered_logratios):\n \n for chrom in filtered_logratios:\n for lst1 in NaN_ranges(filtered_logratios[chrom].values):\n if lst1[0]!=0 and lst1[-1] != len(filtered_logratios[chrom].values)-1 and (lst1[-1] - lst1[0]) < 1000:\n avg = (filtered_logratios[chrom].values[lst1[0]-1] + filtered_logratios[chrom].values[lst1[-1]+1]) / 2\n filtered_logratios[chrom].values[lst1[0]:lst1[-1]+1] = avg\n \n return filtered_logratios", "def handle_SExtractor_mask(stars, thresh):\r\n mask = np.ones(stars.shape)\r\n mask[stars < thresh] = 0\r\n stars[stars < thresh] = 0\r\n return mask", "def check_and_adjust_boundary(self, pos):\n valu = self.limit_upper.copy()\n vall = self.limit_lower.copy()\n idu = pos > valu\n idl = pos < vall\n if self.boundary_policy == self.BoundaryPolicy.ToNaN:\n pos = _np.where(idu, _np.nan, pos)\n pos = _np.where(idl, _np.nan, pos)\n else:\n pos = _np.where(idu, self.limit_upper, pos)\n pos = _np.where(idl, self.limit_lower, pos)\n return pos" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the target sum of two numbers in collection. Naive method, iterate through string.
def find_sum_naive(collection, target): for i in collection: for j in collection[1:]: if i + j == target: return True return False
[ "def sum_integers(string):", "def twoSum(self, nums, target):\n for i in nums:\n for j in nums:\n my_target = i + j\n if my_target == target:\n return i, j", "def sum(str1: str, str2: str) -> str:\n\n print(\"str1=\" + str(str1) + \", str2=\" + str(str2))\n\n if not str1.isdigit() or not str2.isdigit() or str1 is None or str2 is None:\n return None\n\n list1 = list(str1)\n list2 = list(str2)\n print(\"list1=\" + str(list1))\n print(\"list2=\" + str(list2))\n result_str = \"\"\n\n if len(str1) > len(str2):\n smaller_number = str2\n larger_number = str1\n else:\n smaller_number = str1\n larger_number = str2\n\n larger_index = len(larger_number) - 1\n\n digit_in_memory = 0\n\n for index in range(len(smaller_number), 0, -1):\n digit1 = int(smaller_number[index - 1])\n digit2 = int(larger_number[larger_index])\n\n cur_sum = digit1 + digit2 + digit_in_memory\n\n if cur_sum > 9:\n result_str += str(cur_sum)[-1]\n digit_in_memory = int(str(cur_sum)[0])\n else:\n result_str += str(cur_sum)\n digit_in_memory = 0\n\n if index == 1 and digit_in_memory != 0:\n if len(str1) == len(str2):\n result_str += str(digit_in_memory)\n else:\n cur_sum = int(larger_number[larger_index - 1]) + digit_in_memory\n result_str += str(cur_sum)\n\n larger_index -= 1\n\n reverse_result = result_str[::-1]\n first_part = \"\"\n\n if len(str1) != len(str2):\n first_part = larger_number[0:len(larger_number) - len(reverse_result):1]\n result_number = first_part + reverse_result\n\n return result_number", "def pairs_with_sum(nums, target):\n pairs = []\n used = []\n\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n x = nums[i]\n y = nums[j]\n\n if x+y == target and not(i in used or j in used):\n pairs.append((x, y))\n used.append(i)\n used.append(j)\n break\n\n return len(pairs)", "def subsequence_sum_to_target(num_array, target):\t\n\tfor i in range(len(num_array)):\n\t\ttotal = 0\n\t\tfor j in range(i, len(num_array)):\n\t\t\ttotal += num_array[j]\n\t\t\tif total == target:\n\t\t\t\treturn num_array[i:j+1]\n\t\t\tif total > target:\n\t\t\t\tbreak\n\n\treturn None", "def equational_sum(total):\n\n summerize = \"\" # summerization variable to be returned to the variable calling the function.\n\n if isinstance(total, int):\n if int(total) > 9: # If the variable sentence_total_deduced is less than 9 ( meaning that this isn't needed because it is already deduced):\n iterable = 0 # Create an iterable variable\n for digit in str(total): # For each individual number inside of the integer:\n if iterable == 0: # If this is the first individual number coming through:\n summerize = digit # Set the variable sentence_total_deduced_sum to that individual number:\n else: # if it is any other individual number:\n summerize = \"{}+{}\".format(summerize, digit) # Add the equational sum to the variable sentence_total_deduced_sum\n iterable = 1 # Set the iterable to 1, effectively telling the code to only use the else part of the conditional above when parsing digits after this.\n\n if isinstance(total, list):\n iterable = 0 # Creates an iterable variable.\n for item in total: # For each number value in the total:\n if iterable == 0: # If this is the first item:\n summerize = item # Make it the first item in the string.\n else: # If this isn't the first item:\n summerize = \"{}+{}\".format(summerize, item)\n iterable = 1 # Set the iterable to 1, effectively telling the code to only use the else part of the conditional above when parsing digits after this.\n\n return summerize", "def get_sum(a, b):\n return sum(range(min(a, b), max(a, b) + 1))", "def sum(a, b):\n return a + b", "def calculateCost(given_text, actual_text):\n total_correct = 0\n\n for bit in range(len(given_text)):\n if given_text[bit] == actual_text[bit]:\n total_correct += 1\n\n return int((float(total_correct)/len(given_text))*100)", "def lucky_sum(a, b, c):\n\tval = 0\n\tfor num in [a, b, c]:\n\t\tif num == 13:\n\t\t\tbreak\n\t\tval += num\n\treturn val", "def sum(num1, num2):\n\treturn num1 + num2", "def _get_total_cost(self, text: str) -> int:\n\n return sum([self.splitter.word_cost.get(word, self.default_cost) for word in self.splitter.split(text)])", "def sum_all(everything):\n price = 0\n for item in everything:\n price += item[1]\n return price", "def pairs_with_sum(nums, target):\n frequency = {}\n counter = 0\n\n for i in nums:\n if i not in frequency:\n frequency[i] = 1\n else:\n frequency[i] += 1\n\n for i in frequency:\n find = target - i\n\n if (i == find and frequency[i] <= 1):\n continue\n\n if find in frequency and frequency[find] != 0 and frequency[i] != 0:\n appearances = min(frequency[i], frequency[find])\n\n if find == i:\n appearances //= 2\n\n counter += appearances\n frequency[find] -= appearances\n frequency[i] -= appearances\n\n return counter", "def solution(L, target_sum):\n # Sanity check\n if not L or len(L) < 2:\n return None\n\n # Sort the list such that we can find the solution in linear time.\n L.sort()\n \n start = 0\n end = 1\n\n for i in range(0, len(L)):\n if start == end:\n return None\n\n total = L[start] + L[-end]\n\n if total == target_sum:\n return (L[start], L[-end])\n elif total < target_sum:\n start += 1\n elif total > target_sum:\n end += 1\n \n return None", "def sum(sequence):\n return __builtin__.sum(sequence)", "def calctarget(symbolexpr, symboltable):\n terms = findall(r'[^+-]+|[+-][0-9]+', symbolexpr)\n if terms[0] in symboltable:\n return symboltable[terms[0]] + sum([int(t) for t in terms[1:]])\n else:\n return None", "def sum_multiples(mult: _Iterable[int], xs: _Iterable[int]) -> int:\n return sum(filter_multiples(mult, xs))", "def example2(S):\n n = len(S)\n total = 0\n for j in range(0, n, 2): # note the increment of 2\n total += S[j]\n return total" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Move to the cookie's location, and begin clicking.
def click_cookie(self): # Locate the cookie, and move the mouse over it cookie_location = pyautogui.locateOnScreen(self.cookie_image) pyautogui.moveTo(cookie_location) # main logic if cookie_location is not None: while True: for i in range(0, 105): pyautogui.click() grandma_location = self.buy_grandma_location() self.move_click(grandma_location) pyautogui.moveTo(cookie_location) for i in range(0, 500): pyautogui.click() self.move_click(grandma_location) pyautogui.moveTo(cookie_location) for i in range(0, 1005): pyautogui.click() farm_location = self.buy_farm_location() self.move_click(farm_location) pyautogui.moveTo(cookie_location) for i in range(0, 1005): pyautogui.click() self.move_click(farm_location) pyautogui.moveTo(cookie_location) for i in range(0, 2005): pyautogui.click() self.move_click(farm_location) pyautogui.moveTo(cookie_location) for i in range(0, 10005): pyautogui.click() mine_location = self.buy_mine_location() self.move_click(mine_location) pyautogui.moveTo(cookie_location) for i in range(0, 10005): pyautogui.click() factory_location = self.buy_factory_location() self.move_click(factory_location) pyautogui.moveTo(cookie_location) for i in range(0, 10005): pyautogui.click() self.move_click(factory_location)
[ "def set_clicked_cookie(headers, code):\n cookieUtil = Cookie.SimpleCookie()\n cookieUtil[code] = True\n cookieUtil.name = code\n cookieUtil[code]['expires'] = 31556928\n \n headers.add_header('Set-Cookie', cookieUtil.output())", "def _dismiss_cookies(self):\n print(\"W dismis_cookies z BaseTest\")\n try:\n btn = self.driver.find_element(*HomePageLocators.COOKIE_BTN)\n btn.click()\n except NoSuchElementException:\n pass", "def click(self, locator):\r\n self.find_element(locator).click()", "def NETRBookmarkGo(self):\n self.init_bookmark_ui()\n self._bookmarkUI.go()", "def click(self):\n self.logger.info('clicking on page object {}'.format(self._log_id_short))\n self.logger.debug('clicking on page object; {}'.format(self._log_id_long))\n self.webelement.click()\n self.logger.info('successfully clicked on page object {}'.format(self._log_id_short))\n self.logger.debug('successfully clicked on page object; {}'.format(self._log_id_long))\n return self", "def click_home(self):\n self.find_element_by_xpath(self.home_xpath).click()", "def step_impl(context):\n context.browser.find_element_by_id('Caisse').click()", "def click_sign_in_button(self):\n self.click_object(*LoginPageLocators.signin_button)", "def scroll_to_click(element):\n scroll_to(element)\n click(element)", "def switch_to_frame(self, frame):\r\n self.driver.switch_to.frame(frame)", "def click_menu(self):\n pass", "def click(widget, view_index=None):\n pos = center(widget, view_index)\n robouser.click(pos)", "def focus(self):\n hover = ActionChains(self.driver).move_to_element(self._find_element())\n hover.click()\n hover.perform()", "def go_to(self, point):\n self.hideturtle()\n self.penup()\n self.setposition(point.x, point.y)\n self.pendown()\n self.showturtle()", "def click_your_library(self):\n (self.find_element_by_xpath(\"/html/body/div/div/div/div/div[1]/div/div/div[1]/div/button\")).click()", "def click_account(self):\n self.find_element_by_xpath(self.profile_menu_xpath).click()\n element = WebDriverWait(self.driver, 30).until(EC.element_to_be_clickable((By.XPATH, self.account_btn_xpath)))\n did_scroll = element.location_once_scrolled_into_view\n element.click()", "def _click_highlighted_term(self):\n if self.currently_highlighted_term:\n self.gui_instance.exit()\n print(\"Clicking: \", self.currently_highlighted_term)\n # current_x, current_y = pyautogui.displayMousePosition()\n x,y = self.currently_highlighted_term[1][0:2]\n x, y = int(x), int(y)\n\n win32api.SetCursorPos((x, y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)", "def focus(self, locator):\n self._selenium.focus(locator)", "def double_click(self):\n self.node.double_click()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates an JSON schema based on the class structure in SQL.py
def generate_schema(): _result = { "$schema": "http://json-schema.org/draft-04/schema#", "description": "The JSON Schema for QAL transformations", "title": "QAL Transformation", "type": "object", "version": __version__, "properties": {}, "namespace": "qal", "definitions": {} } def _property_to_type(_property_name): if _property_name == "uuid": return [{ "type": "string", "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" }] elif _property_name == "mappings": return [{ "type": "array", "items": { "$ref": "#/definitions/Mapping" } }] elif _property_name == "substitution": return [{"$ref": "#/definitions/Substitution"}] elif _property_name == "resources": return [{ "type": "array", "items": { "$ref": "qal://resources.json#/definitions/Resource" } }] elif _property_name == "resources": return [{ "type": "array", "items": { "$ref": "qal://resources.json#/definitions/Resource" } }] elif _property_name in ["builtin_substitutions", "key_fields", "destination_log_level", "key_fields", "source", "destination"]: # Disregard these fields return None elif _property_name in ["delete", "insert", "update", "is_key"]: # Disregard these fields return [{"type": "boolean"}] else: return [{"type": "string"}] # First, Add parameter types for _curr_class in list_prefixed_classes(globals(), "", _exclude=[]): _result["definitions"].update({_curr_class: { "type": "object", "properties": json_add_child_properties(globals(), _curr_class, _property_to_type) } }) return _result
[ "def _schema_to_json(schema):\n features = []\n sparse_features = []\n for name, column_schema in sorted(six.iteritems(schema.column_schemas)):\n if isinstance(column_schema.representation,\n sch.SparseColumnRepresentation):\n sparse_features.append(_sparse_column_schema_to_json(name, column_schema))\n else:\n features.append(_dense_column_schema_to_json(name, column_schema))\n schema_dict = {\n 'feature': features,\n 'sparseFeature': sparse_features\n }\n return json.dumps(schema_dict, indent=2, separators=(',', ': '),\n sort_keys=True)", "def for_jsonschema(self):\n\n schema = {}\n get_name = lambda x: x.startswith('_jsonschema')\n for func_name in filter(get_name, dir(self)):\n attr_name = func_name.split('_')[-1]\n attr_value = getattr(self, func_name)()\n if attr_value is not None:\n schema[attr_name] = attr_value\n return schema", "def get_schema(self):\r\n schema = {}\r\n schema[\"type\"] = self.type\r\n if self.type == \"string\":\r\n schema[\"blank\"] = True # allow blank strings\r\n if self.optional:\r\n schema[\"required\"] = False\r\n\r\n return schema", "def unload_jsonschema_from_marshmallow_class(mclass) -> TDict:\n assert_is_a_marshmallow_class(mclass)\n schema = js().dump(mclass.Schema())[\"definitions\"][mclass.__name__]\n schema[\"additionalProperties\"] = True\n return schema", "def build_schema(cls, field_obj: mongoengine.fields.BaseField) -> dict:\n schema_skel = cls.schema_skel()\n schema = {f: getattr(field_obj, f, val) for f, val in schema_skel.items()}\n\n if 'default' in schema:\n schema['default'] = cls._normalize_default(schema['default'])\n\n if 'choices' in schema:\n schema['choices'] = cls._normalize_choices(schema['choices'])\n\n field_class = field_obj.__class__\n if field_class.__name__ in type_key_registry:\n schema['type_key'] = field_class.__name__\n else:\n registry_field_cls = get_closest_parent(\n field_class,\n (x.field_cls for x in type_key_registry.values())\n )\n if registry_field_cls is None:\n raise ActionError(f'Could not find {field_class!r} or one of its base classes '\n f'in type_key registry')\n\n schema['type_key'] = registry_field_cls.__name__\n\n return schema", "def schemata(schema):\r\n return dict((n, Schema.from_attribute(s)) for n, s in schema.items())", "def create_entry_json_schema(fields: dict[str, dict[str, Any]]) -> dict[str, Any]: # noqa: D407\n json_schema = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"type\": \"object\",\n \"properties\": {},\n }\n\n def recursive_field(\n parent_schema: dict[str, Any],\n parent_field_name: str,\n parent_field_def: dict[str, Any],\n ) -> None:\n\n if parent_field_def[\"type\"] != \"object\":\n # TODO this will not work when we have user defined types, s.a. saldoid\n schema_type = json_schema_type(parent_field_def[\"type\"])\n result: dict[str, Any] = {\"type\": schema_type}\n else:\n result = {\"type\": \"object\", \"properties\": {}}\n\n for child_field_name, child_field_def in parent_field_def[\"fields\"].items():\n recursive_field(result, child_field_name, child_field_def)\n\n if parent_field_def.get(\"required\", False):\n if \"required\" not in parent_schema:\n parent_schema[\"required\"] = []\n parent_schema[\"required\"].append(parent_field_name)\n\n if parent_field_def.get(\"collection\", False):\n result = {\"type\": \"array\", \"items\": result}\n\n parent_schema[\"properties\"][parent_field_name] = result\n\n for field_name, field_def in fields.items():\n recursive_field(json_schema, field_name, field_def)\n\n return json_schema", "def construct_schema(collection):\n columns_dict = {}\n columns = []\n for row in collection.find():\n for field in row.keys():\n field_type = get_type(field, row[field])\n if field not in columns_dict.keys():\n columns_dict[field] = field_type\n else:\n union_type = unify_types(columns_dict[field], field_type)\n columns_dict[field] = union_type\n for field in sorted(columns_dict.keys()):\n # We sort the keys to make the constructed schema look nice\n # Possible failure modes up until this point:\n # Field is entirely empty arrays, type is undefined\n # Field is entirely empty objects\n # Field is invalid\n columns_dict[field] = remove_invalid_fields(columns_dict[field])\n if (columns_dict[field].get('type', 'INVALID') != 'INVALID' and\n not (columns_dict[field]['type'] == 'RECORD' and columns_dict[field]['fields'] == [])):\n columns.append(columns_dict[field])\n return columns", "def _create_schema(schema_nodes):\n schema = col.SchemaNode(col.Mapping())\n for name, node_def in schema_nodes.items():\n data_type = node_def[0]\n arg_funcs = node_def[1:]\n\n kw = {'name': name}\n for func in arg_funcs:\n kw = func(kw)\n\n node = col.SchemaNode(data_type(), **kw)\n schema.add(node)\n return schema", "def create_graph_schema(self):\n\n ##Classes\n # Person Class\n g.add((RDFnamespace.FOAF.Person, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n # Organization Class\n g.add((RDFnamespace.FOAF.Organization, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n\n # Student Class\n g.add((focu.Student, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.Student, RDFnamespace.RDFS.subClassOf, RDFnamespace.FOAF.Person))\n g.add((focu.Student, RDFnamespace.RDFS.label, Literal(\"StudentClass\")))\n g.add((focu.Student, RDFnamespace.RDFS.comment, Literal(\"This is a Student Class\")))\n\n # Course Class\n g.add((focu.Course, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.Course, RDFnamespace.RDFS.label, Literal(\"University Courses\")))\n g.add((focu.Course, RDFnamespace.RDFS.comment, Literal(\"This is a Course Class\")))\n\n # Topic Class\n g.add((focu.Topic, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.Topic, RDFnamespace.RDFS.label, Literal(\"Course Topic\")))\n g.add((focu.Topic, RDFnamespace.RDFS.comment, Literal(\"Topic extracted for a given course\")))\n\n # University Class\n g.add((focu.University, RDFnamespace.RDF.type, RDFnamespace.RDFS.Class))\n g.add((focu.University, RDFnamespace.RDFS.subClassOf, RDFnamespace.FOAF.Organization))\n g.add((focu.University, RDFnamespace.RDFS.label, Literal(\"Univeristy\")))\n\n ##Properties\n\n # Course Name\n g.add((focu.course_name, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_name, RDFnamespace.RDFS.label, Literal(\"Course Name\")))\n g.add((focu.course_name, RDFnamespace.RDFS.comment, Literal(\"Course Name\")))\n g.add((focu.course_name, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_name, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Course Subject\n g.add((focu.course_subject, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_subject, RDFnamespace.RDFS.label, Literal(\"Course Subject\")))\n g.add((focu.course_subject, RDFnamespace.RDFS.comment, Literal(\"Course Subject\")))\n g.add((focu.course_subject, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_subject, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Course Number\n g.add((focu.course_number, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_number, RDFnamespace.RDFS.label, Literal(\"Course Number\")))\n g.add((focu.course_number, RDFnamespace.RDFS.comment, Literal(\"Course Number\")))\n g.add((focu.course_number, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_number, RDFnamespace.RDFS.range, RDFnamespace.XSD.integer))\n\n # Course Description\n g.add((focu.course_description, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_description, RDFnamespace.RDFS.label, Literal(\"Course Description\")))\n g.add((focu.course_description, RDFnamespace.RDFS.comment, Literal(\"Course Description\")))\n g.add((focu.course_description, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_description, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Student ID\n g.add((focu.student_id, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.student_id, RDFnamespace.RDFS.label, Literal(\"Student ID\")))\n g.add((focu.student_id, RDFnamespace.RDFS.comment, Literal(\"Student ID\")))\n g.add((focu.student_id, RDFnamespace.RDFS.domain, focu.Student))\n g.add((focu.student_id, RDFnamespace.RDFS.range, RDFnamespace.XSD.integer))\n\n # Graded Courses\n g.add((focu.graded_courses, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.graded_courses, RDFnamespace.RDFS.label, Literal(\"grade for course\", lang=\"en\")))\n g.add((focu.graded_courses, RDFnamespace.RDFS.comment, Literal(\"Course graded for a student\")))\n g.add((focu.graded_courses, RDFnamespace.RDFS.domain, focu.Student))\n g.add((focu.graded_courses, RDFnamespace.RDFS.range, focu.Course))\n\n # Subject Contains Topics\n g.add((focu.contains, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.contains, RDFnamespace.RDFS.label, Literal(\"extracted topics\", lang=\"en\")))\n g.add((focu.contains, RDFnamespace.RDFS.comment, Literal(\"Topics extracted from course description\")))\n g.add((focu.contains, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.contains, RDFnamespace.RDFS.range, focu.Topic))\n\n # Topics from Courses\n g.add((focu.containInverse, RDFnamespace.OWL.inverseOf, focu.contains))\n g.add((focu.containInverse, RDFnamespace.RDFS.label, Literal(\"extracted courses\", lang=\"en\")))\n g.add((focu.containInverse, RDFnamespace.RDFS.comment, Literal(\"Courses extracted from Topics\")))\n g.add((focu.containInverse, RDFnamespace.RDFS.domain, focu.Topic))\n g.add((focu.containInverse, RDFnamespace.RDFS.range, focu.Course))\n\n # Course Term\n g.add((focu.course_term, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_term, RDFnamespace.RDFS.label, Literal(\"course term\", lang=\"en\")))\n g.add((focu.course_term, RDFnamespace.RDFS.comment, Literal(\"The term in which the given course was taken\")))\n g.add((focu.course_term, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_term, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n # Course Grade\n g.add((focu.course_grade, RDFnamespace.RDF.type, RDFnamespace.RDF.Property))\n g.add((focu.course_grade, RDFnamespace.RDFS.label, Literal(\"course grade\", lang=\"en\")))\n g.add((focu.course_grade, RDFnamespace.RDFS.comment, Literal(\"The grade received for a course\")))\n g.add((focu.course_grade, RDFnamespace.RDFS.domain, focu.Course))\n g.add((focu.course_grade, RDFnamespace.RDFS.range, RDFnamespace.XSD.string))\n\n g.serialize(format='turtle', destination='knowledge_base/schema.ttl')", "def build_docstring(klass, template=\"{fits_hdu} {title}\"):\n from . import model_base\n\n def get_field_info(subschema, path, combiner, info, recurse):\n # Return all schema fields representing fits hdus\n if 'fits_hdu' in subschema and 'fits_keyword' not in subschema:\n attr = '.'.join(path)\n info[attr] = subschema\n return 'fits_hdu' in subschema or 'fits_keyword' in subschema\n\n # Silly rabbit, only datamodels have schemas\n if not (klass == model_base.DataModel or\n issubclass(klass, model_base.DataModel)):\n raise ValueError(\"Class must be a subclass of DataModel: %s\",\n klass.__name__)\n\n # Create a new model just to get its shape\n null_object = klass(init=None)\n shape = null_object.shape\n if shape is None:\n shaped_object = null_object\n else:\n # Instantiate an object with correctly dimensioned shape\n null_object.close()\n shape = tuple([1 for i in range(len(shape))])\n shaped_object = klass(init=shape)\n\n # Get schema fields which have an associated hdu\n info = {}\n walk_schema(shaped_object._schema, get_field_info, ctx=info)\n\n # Extract field names from template to set defaults\n # so format won't crash while using them when they aren't there\n default_schema = {}\n fields = re.findall(r'\\{([^\\\\:}]*)[\\:\\}]', template)\n for field in fields:\n default_schema[field] = ''\n\n buffer = []\n for attr, subschema in info.items():\n schema = {}\n schema.update(default_schema)\n schema.update(subschema)\n schema['path'] = attr\n\n # Determine if attribute has a default value\n instance = shaped_object.instance\n for field in attr.split('.'):\n try:\n instance = instance.get(field)\n except AttributeError:\n instance = None\n if instance is None:\n break\n schema['default'] = instance is not None\n\n # Extract table field names from datatype\n if type(schema['datatype']) == str:\n schema['array'] = True\n else:\n schema['records'] = True\n fields = []\n for field_info in schema['datatype']:\n fields.append(field_info['name'])\n schema['fields'] = ', '.join(fields)\n schema['datatype'] = 'table'\n\n # Convert boolean fields to their field names\n for field, value in schema.items():\n if type(value) == bool:\n schema[field] = field\n\n # Apply format to schema fields\n # Delete blank lines\n lines = template.format(**schema)\n for line in lines.split(\"\\n\"):\n if line and not line.isspace():\n buffer.append(line)\n\n field_info = \"\\n\".join(buffer) + \"\\n\"\n return field_info", "def api_get_all_classes():\n conn = sqlite3.connect(app.config['DATABASE'], detect_types=sqlite3.PARSE_DECLTYPES)\n conn.row_factory = dict_factory\n cur = conn.cursor()\n all_students = cur.execute('SELECT * FROM CLASS;').fetchall()\n return jsonify(all_students)", "def _store_schema(self):\n\t\tself._check_transaction()\n\n\t\tdata = dict()\n\t\tdata[\"level\"], data[\"t0\"], data[\"dt\"] = self.pix.level, self.pix.t0, self.pix.dt\n\t\tdata[\"nrows\"] = self._nrows\n\t\tdata[\"cgroups\"] = [ (name, schema) for (name, schema) in self._cgroups.iteritems() if name[0] != '_' ]\n\t\tdata[\"name\"] = self.name\n\t\tdata[\"fgroups\"] = self._fgroups\n\t\tdata[\"filters\"] = self._filters\n\t\tdata[\"aliases\"] = self._aliases\n\t\tdata[\"commit_hooks\"] = self._commit_hooks\n\n\t\tfn = self._snapshot_path(self.snapid, create=True) + '/schema.cfg'\n\t\tf = open(fn, 'w')\n\t\tf.write(json.dumps(data, indent=4, sort_keys=True))\n\t\tf.close()", "def schemata(schema_dicts):\r\n return dict((n, Schema.from_legacy(s)) for n, s in schema_dicts.items())", "def context_schema(cls) -> 'Schema':\n return cls.Schema()", "def _construct_schema(uuid):\n catalog_url = '{0}/api/catalog/v1?ids={1}'.format(URI, uuid)\n response = urllib.request.urlopen(catalog_url, context=context)\n catalog_data = json.load(response)[\"results\"][0][\"resource\"]\n\n schema = []\n for i in range(0, len(catalog_data[\"columns_field_name\"])):\n name = catalog_data[\"columns_field_name\"][i]\n field_type = _encode_datatype(catalog_data[\"columns_datatype\"][i])\n description = catalog_data[\"columns_description\"][i]\n schema.append(bigquery.SchemaField(name, field_type, mode='NULLABLE', description=description))\n\n return schema", "def _defs_sql_to_json(rows):\n # type: (List[Tuple[int, str, str]]) -> List[Dict[str, Union[int, str]]]\n return [{'id': row[0], 'name': row[1], 'path': row[2]} for row in rows]", "def generate_schema_wrapper(schema_file):\n with open(schema_file) as f:\n rootschema = json.load(f)\n contents = [\"# The contents of this file are automatically generated\",\n \"# at time {0}\\n\".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')),\n \"from altair.utils.schemapi import SchemaBase, Undefined\",\n LOAD_SCHEMA]\n contents.append(schema_class('Root', schema=rootschema,\n schemarepr=CodeSnippet('load_schema()')))\n for name in rootschema['definitions']:\n defschema = {'$ref': '#/definitions/' + name}\n defschema_repr = {'$ref': '#/definitions/' + name}\n\n contents.append(schema_class(get_valid_identifier(name),\n schema=defschema, schemarepr=defschema_repr,\n rootschema=rootschema,\n rootschemarepr=CodeSnippet(\"Root._schema\")))\n contents.append('') # end with newline\n return '\\n'.join(contents)", "def __get_schema__(cls):\n s = getattr(cls, \"__schema\", None)\n if s is None:\n sclass = getattr(cls, \"Schema\", None)\n if sclass is None:\n raise ValueError(\"Class must have Schema inner class\")\n else:\n s = cls.__schema = sclass() # instantiate\n s.__objclass__ = cls # assign this class to schema.__objclass__\n return s" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log any invalid run states found.
def _LogInvalidRunLevels(states, valid): invalid = set() for state in states: if state not in valid: invalid.add(state) if invalid: logging.warning("Invalid init runlevel(s) encountered: %s", ", ".join(invalid))
[ "def test_invalid_run(self):\n probe_run = 123321\n self.assertTrue(probe_run not in RUNS)\n self.assertFalse(utils.valid_run(probe_run))", "def error(self):\n \n print('---error state report ---')\n print(' state', self.state)\n print(' scenario', self.scenario)\n print(' parameters', self.parameters)\n print(' msg', self.msg)\n print('state history', self.state_history)\n print(' tree dump:')\n print(self.tree[0].dump())\n print('--end error state report---')\n raise RuntimeError('error state')", "def record_failure(self):\n self.state.record_failure()\n logger.debug(\"Failure recorded\")", "def any_build_failures(self):", "def test_validate_invalid_final_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.final_states = {'q4'}\n self.ntm1.validate()", "def report_nodes_not_run(notrun):\n if notrun:\n logger.info(\"***********************************\")\n for info in notrun:\n logger.error(\"could not run node: %s\" %\n '.'.join((info['node']._hierarchy,\n info['node']._id)))\n logger.info(\"crashfile: %s\" % info['crashfile'])\n logger.debug(\"The following dependent nodes were not run\")\n for subnode in info['dependents']:\n logger.debug(subnode._id)\n logger.info(\"***********************************\")\n raise RuntimeError(('Workflow did not execute cleanly. '\n 'Check log for details'))", "def test_validate_invalid_transition_result_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.transitions['q0']['.'] = {('q4', '.', 'R')}\n self.ntm1.validate()", "def error(self, run_id: str, messages: List[str] = None) -> WorkflowState:\n state = self.runs[run_id].error(messages=messages)\n self.runs[run_id] = state\n return state", "def test_validate_invalid_final_state_non_str(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.final_states = {4}\n self.ntm1.validate()", "def test_validate_invalid_transition_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.ntm1.transitions['q4'] = self.ntm1.transitions['q0']\n self.ntm1.validate()", "def showErrors(self):\n self.log.error('There were {0} errors encountered while executing all operations:'.format(len(self.error_list)))\n for i, error in enumerate(self.error_list):\n self.log.error('[{0}] {1}'.format(i, error))", "def log_state(self):\n pass", "def test_batch_state_stopping_after_error(self):\n\n minions_list = []\n retcode = None\n\n # Executing salt with batch: 1 and with failhard. It should stop after the first error.\n cmd = self.run_salt(\n '\"*minion\" state.single test.fail_without_changes name=test_me -b 1'\n \" --out=yaml --failhard\",\n timeout=self.run_timeout,\n )\n\n # Parsing the output. Idea is to fetch number on minions and retcode of the execution.\n # retcode var could be overwritten in case of broken failhard but number of minions check should still fail.\n for line in cmd:\n if line.startswith(\"Executing run on\"):\n minions_list.append(line)\n if line.startswith(\"retcode\"):\n retcode = line[-1]\n # We expect to have only one minion to be run\n self.assertEqual(1, len(minions_list))\n # We expect to find a retcode in the output\n self.assertIsNot(None, retcode)\n # We expect retcode to be non-zero\n self.assertNotEqual(0, retcode)", "def test_unknown_mode(self):\r\n from natcap.invest.ui import usage_logger\r\n\r\n logging_server = usage_logger.LoggingServer()\r\n\r\n sample_data = dict(\r\n (key_field, key_field) for key_field in\r\n usage_logger.LoggingServer._LOG_FIELD_NAMES)\r\n\r\n with self.assertRaises(ValueError):\r\n logging_server.log_invest_run(sample_data, 'bad_mode')", "def transition_error(self):\n next_states = self.states.next_states(self.previous_state)\n warnings.warn(\n (\"Transition from {self.previous_state!r} to {self.current_state!r} is\"\n \" not valid. Expected one of {next_states!r}\").format(self=self, next_states=next_states),\n StateTransitionWarning)", "def check_exceptions(self):\n if self.exc_counter:\n lines = self._lines\n self._lines = []\n exc_counter = self.exc_counter\n self.exc_counter = 0\n last_exc = self.last_exc\n self.last_exc = 0\n\n self._logger.critical(\"The following unhandled exceptions where raised during this test's execution:\")\n for line in lines:\n self._logger.critical(line)\n\n raise Exception(\"Test raised %d unhandled exceptions, last one was: %s\" % (exc_counter, last_exc))", "def run_failed_tests(self):\n self.reset_rollback_importer()\n test_suite = unittest.TestSuite()\n for node in self.model.node_lookup.values():\n if isinstance(node.test, unittest.TestCase) and node.get_status() in {\n TestStatus.fail,\n TestStatus.error,\n }:\n mayaunittest.get_tests(test=node.path(), test_suite=test_suite)\n self.output_console.clear()\n self.model.run_tests(self.stream, test_suite)", "def give_error_module(self, module):\n for fs in self.flow_segments:\n if (fs.start_pos[0] != module.position[0]\n or fs.start_pos[1] != module.position[1]) and (\n fs.end_pos[0] != module.position[0]\n or fs.end_pos[1] != module.position[1]):\n continue\n\n fs.state = State.ERROR", "def crash(self):\n self.terminated = True\n print (\"The virtual machine entered an erroneous state and is terminating.\")\n print (\"Register values at termination:\")\n for ri, r in enumerate(vm.r):\n print (\" r%u = %x\" % (ri, r.v))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accepts a string and returns a list of strings of numeric LSB runlevels.
def GetRunlevelsLSB(states): if not states: return set() valid = set(["0", "1", "2", "3", "4", "5", "6"]) _LogInvalidRunLevels(states, valid) return valid.intersection(set(states.split()))
[ "def GetRunlevelsNonLSB(states):\n if not states:\n return set()\n convert_table = {\n \"0\": \"0\",\n \"1\": \"1\",\n \"2\": \"2\",\n \"3\": \"3\",\n \"4\": \"4\",\n \"5\": \"5\",\n \"6\": \"6\",\n # SysV, Gentoo, Solaris, HP-UX all allow an alpha variant\n # for single user. https://en.wikipedia.org/wiki/Runlevel\n \"S\": \"1\",\n \"s\": \"1\"\n }\n _LogInvalidRunLevels(states, convert_table)\n return set([convert_table[s] for s in states.split() if s in convert_table])", "def __get_bin_list(string):\n return [1 if str(c).isupper() else 0 for c in string]", "def getGRIBlevels(grib,shortname='v'):\n levels = []\n for message in grib:\n if message.shortName == shortname:\n if message.typeOfLevel == \"isobaricInhPa\":\n levels.append(message.level)\n levels = np.unique(levels)\n return levels", "def translate_version_str2list(version_str, depth=2):\n if version_str is None:\n ver = depth * [0, ]\n else:\n ver = []\n for i in version_str.split(\".\")[:depth]:\n try:\n i = int(i)\n except:\n i = 0\n ver.append(i)\n return ver", "def get_runlevel():\n out = subprocess.check_output(['runlevel']).decode('UTF-8')\n lines = out.rstrip().split('\\n')\n assert len(lines) == 1\n fields = lines[0].split()\n assert len(fields) == 2\n return int(fields[1])", "def rlist_to_int(string):\r\n\r\n rlist = 0\r\n string = re.sub(r\"r|R\", \"\", string)\r\n for m in re.finditer(r\"[^,]+\", string):\r\n args = m.group().split(\"-\")\r\n if len(args) == 1: lo,hi = args*2\r\n else: lo,hi = args\r\n rlist |= 2**(int(hi) + 1) - 2**int(lo)\r\n return rlist", "def lsb_from_list_bin (l, n):\n result = []\n for i in l:\n i = set8bit(i)\n value = get_lsb(i, n)\n result.append(str(value))\n return result", "def LoadLevels():\n tree = ElementTree.parse(STANDARD_LEVELS_FILENAME)\n root = tree.getroot()\n levels = []\n \n for levelElement in root.findall('level'):\n levels.append(LoadLevel(levelElement))\n return levels", "def lsb_from_image (filename, n):\n list_lsb = []\n image_target = image.open(filename).convert('L')\n image_array = np.array(image_target)\n\n for i in range(len(image_array)):\n for j in range(len(image_array[0])):\n binn = set8bit(dec2bin(image_array[i,j]))\n value = get_lsb(str(binn), n)\n list_lsb.append(str(value))\n return list_lsb", "def GetLevelIDs(element):\r\n ids = []\r\n for levelIDElement in element.findall('level'):\r\n ids.append(int(levelIDElement.text))\r\n return ids", "def convert_digit_str_to_list(digit_str):\n return [int(d) for d in digit_str]", "def get_logging_levels() -> List[LoggingLevel]:\n return list(LEVEL_MAP.keys()) # type: ignore[arg-type]", "def lsb_from_list_int (l, n):\n result = []\n for i in l:\n i = set8bit(dec2bin(i))\n value = get_lsb(i, n)\n result.append(str(value))\n return result", "def levels():\r\n print()\r\n print('There are two level features:')\r\n print(' 1. High Level')\r\n print(' 2. Low Level')\r\n print()\r\n while True:\r\n type_level = input('Which level feature are you interested in seeing?'\r\n '(Type 1 or 2): ')\r\n if type_level == '1' or type_level == '2':\r\n return int(type_level)\r\n else:\r\n print('INVALID ENTRY')\r\n print()", "def parse_runlevel(timer_state: texus_relay.TimerState) -> runlevels.Runlevel:\n level = 0\n if timer_state[TimerEffect.BIT_O]:\n level += 1\n if timer_state[TimerEffect.BIT_1]:\n level += 2\n if timer_state[TimerEffect.BIT_2]:\n level += 4\n return runlevels.Runlevel(level)", "def get_enum_list(filename, search_string):\n # Get the text of the file\n with open(filename, 'r') as f:\n text = f.read()\n\n return get_enum_names(text, search_string)", "def string_to_bitlist(data: ByteString) -> List[int]:\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n\n return result", "def level_nums(bt: BinaryTree) -> list:\n if bt is None:\n return [0]\n\n level = 0\n per_level = []\n num_items = items_at_level(bt, level)\n per_level.append(num_items) if num_items != 0 else None\n while num_items > 0:\n level += 1\n num_items = items_at_level(bt, level)\n per_level.append(num_items) if num_items != 0 else None\n\n return per_level\n\n # level = 0\n # num_items = []\n # items = items_at_level(bt, level)\n # if items != 0:\n # num_items.append(items)\n # while items != 0:\n # level += 1\n # items = items_at_level(bt, level)\n # if items != 0:\n # num_items.append(items)\n #\n # return num_items", "def LoadIDs(element):\n ids = []\n for levelIDElement in element.findall('level'):\n ids.append(int(levelIDElement.text))\n return ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Accepts a string and returns a list of strings of numeric LSB runlevels.
def GetRunlevelsNonLSB(states): if not states: return set() convert_table = { "0": "0", "1": "1", "2": "2", "3": "3", "4": "4", "5": "5", "6": "6", # SysV, Gentoo, Solaris, HP-UX all allow an alpha variant # for single user. https://en.wikipedia.org/wiki/Runlevel "S": "1", "s": "1" } _LogInvalidRunLevels(states, convert_table) return set([convert_table[s] for s in states.split() if s in convert_table])
[ "def GetRunlevelsLSB(states):\n if not states:\n return set()\n valid = set([\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\"])\n _LogInvalidRunLevels(states, valid)\n return valid.intersection(set(states.split()))", "def __get_bin_list(string):\n return [1 if str(c).isupper() else 0 for c in string]", "def getGRIBlevels(grib,shortname='v'):\n levels = []\n for message in grib:\n if message.shortName == shortname:\n if message.typeOfLevel == \"isobaricInhPa\":\n levels.append(message.level)\n levels = np.unique(levels)\n return levels", "def translate_version_str2list(version_str, depth=2):\n if version_str is None:\n ver = depth * [0, ]\n else:\n ver = []\n for i in version_str.split(\".\")[:depth]:\n try:\n i = int(i)\n except:\n i = 0\n ver.append(i)\n return ver", "def get_runlevel():\n out = subprocess.check_output(['runlevel']).decode('UTF-8')\n lines = out.rstrip().split('\\n')\n assert len(lines) == 1\n fields = lines[0].split()\n assert len(fields) == 2\n return int(fields[1])", "def rlist_to_int(string):\r\n\r\n rlist = 0\r\n string = re.sub(r\"r|R\", \"\", string)\r\n for m in re.finditer(r\"[^,]+\", string):\r\n args = m.group().split(\"-\")\r\n if len(args) == 1: lo,hi = args*2\r\n else: lo,hi = args\r\n rlist |= 2**(int(hi) + 1) - 2**int(lo)\r\n return rlist", "def lsb_from_list_bin (l, n):\n result = []\n for i in l:\n i = set8bit(i)\n value = get_lsb(i, n)\n result.append(str(value))\n return result", "def LoadLevels():\n tree = ElementTree.parse(STANDARD_LEVELS_FILENAME)\n root = tree.getroot()\n levels = []\n \n for levelElement in root.findall('level'):\n levels.append(LoadLevel(levelElement))\n return levels", "def lsb_from_image (filename, n):\n list_lsb = []\n image_target = image.open(filename).convert('L')\n image_array = np.array(image_target)\n\n for i in range(len(image_array)):\n for j in range(len(image_array[0])):\n binn = set8bit(dec2bin(image_array[i,j]))\n value = get_lsb(str(binn), n)\n list_lsb.append(str(value))\n return list_lsb", "def GetLevelIDs(element):\r\n ids = []\r\n for levelIDElement in element.findall('level'):\r\n ids.append(int(levelIDElement.text))\r\n return ids", "def convert_digit_str_to_list(digit_str):\n return [int(d) for d in digit_str]", "def get_logging_levels() -> List[LoggingLevel]:\n return list(LEVEL_MAP.keys()) # type: ignore[arg-type]", "def lsb_from_list_int (l, n):\n result = []\n for i in l:\n i = set8bit(dec2bin(i))\n value = get_lsb(i, n)\n result.append(str(value))\n return result", "def levels():\r\n print()\r\n print('There are two level features:')\r\n print(' 1. High Level')\r\n print(' 2. Low Level')\r\n print()\r\n while True:\r\n type_level = input('Which level feature are you interested in seeing?'\r\n '(Type 1 or 2): ')\r\n if type_level == '1' or type_level == '2':\r\n return int(type_level)\r\n else:\r\n print('INVALID ENTRY')\r\n print()", "def parse_runlevel(timer_state: texus_relay.TimerState) -> runlevels.Runlevel:\n level = 0\n if timer_state[TimerEffect.BIT_O]:\n level += 1\n if timer_state[TimerEffect.BIT_1]:\n level += 2\n if timer_state[TimerEffect.BIT_2]:\n level += 4\n return runlevels.Runlevel(level)", "def get_enum_list(filename, search_string):\n # Get the text of the file\n with open(filename, 'r') as f:\n text = f.read()\n\n return get_enum_names(text, search_string)", "def string_to_bitlist(data: ByteString) -> List[int]:\n l = len(data) * 8\n result = [0] * l\n pos = 0\n for ch in data:\n i = 7\n while i >= 0:\n if ch & (1 << i) != 0:\n result[pos] = 1\n else:\n result[pos] = 0\n pos += 1\n i -= 1\n\n return result", "def level_nums(bt: BinaryTree) -> list:\n if bt is None:\n return [0]\n\n level = 0\n per_level = []\n num_items = items_at_level(bt, level)\n per_level.append(num_items) if num_items != 0 else None\n while num_items > 0:\n level += 1\n num_items = items_at_level(bt, level)\n per_level.append(num_items) if num_items != 0 else None\n\n return per_level\n\n # level = 0\n # num_items = []\n # items = items_at_level(bt, level)\n # if items != 0:\n # num_items.append(items)\n # while items != 0:\n # level += 1\n # items = items_at_level(bt, level)\n # if items != 0:\n # num_items.append(items)\n #\n # return num_items", "def LoadIDs(element):\n ids = []\n for levelIDElement in element.findall('level'):\n ids.append(int(levelIDElement.text))\n return ids" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dump the classifier 'cls' to a pickle named 'fn'.
def _dump_cls(self, cls, fn): w = gzip.open(fn, 'wb') cPickle.dump(cls, w, 1) w.close()
[ "def saveClassifier(filename, clf):\n with open(filename, 'wb') as fid:\n cPickle.dump(clf, fid)", "def save_classifier(self,filename=\"classifier.pickle\"):\n with open(filename,\"w\") as f:\n pickle.dump(self.classifier,f)", "def classifier_save(self, classifier, path=\"../../../datasets/multinominal_nb_classifier\"):\n \n with open(path, 'wb') as f:\n \n pickle.dump(classifier, f)", "def classifier_save(self, path=\"../../../datasets/logistic_regression_classifier\"):\n \n with open(path, 'wb') as f:\n pickle.dump(self.classifier, f)", "def serialize_fn(fn):\n try:\n return pickle.dumps(fn), SerializationType(\"pickle\")\n except (pickle.PickleError, AttributeError):\n if DILL_AVAILABLE:\n return dill.dumps(fn, recurse=True), SerializationType(\"dill\")\n return pickle.dumps(fn), SerializationType(\"pickle\")", "def pickleClassifier(file_name):\n feature_vectors = pd.read_csv(file_name,index_col = 0, parse_dates = True)\n feature_vectors = feature_vectors.rename(columns = {'target':'gesture'})\n msk = np.random.rand(len(feature_vectors)) < TRAIN_PROPORTION\n train = feature_vectors[msk]\n test = feature_vectors[~msk]\n # build classifier\n classifier = RandomForestClassifier(n_estimators=100, max_features=3)\n classifier = classifier.fit(train[FEATURE_VECTOR_COLUMNS],train['gesture'])\n # score classifier\n mean_accuracy = classifier.score(test[FEATURE_VECTOR_COLUMNS],test['gesture'])\n print('Mean Accuracy:' + str(mean_accuracy) + '\\n')\n # save pickled classifier\n pickle_file = open( file_name.replace('.csv', '-classifier.p'), \"wb\")\n pickle.dump(classifier, pickle_file)\n pickle_file.close()", "def pickle_main(f_name, pickle_source, do_pickle, instance = None):\n \n if do_pickle and instance is not None:\n \n \"if given an instance. save it as a class dictionary pickle\"\n print(f\"Pickling file to {f_name}\") \n pickler(instance.__dict__, pickle_source, f_name)\n return\n \n else:\n file = depickler(pickle_source, f_name)\n print(f\"Loading pickle {f_name}\")\n \"try loading the specified file as a class dict. else an instance.\"\n if type(file) == dict:\n \"removes old ukf function in memory\"\n \n \n instance = class_dict_to_instance(file)\n else: \n instance = file\n \n return instance", "def dump(self):\n import pickle as pkl\n \n filename = self.create_output_name(step=self.time_step_count)\n filename = filename.replace('.bin','.pkl')\n file = open(filename, \"wb\")\n pkl.dump(self, file)\n file.close()", "def pickle_processor(processor, outfile, **kwargs):\n # pylint: disable=unused-argument\n processor.dump(outfile)", "def guardar(self):\n pickle_out = open(\"X.pickle\", \"wb\")\n pickle.dump(self.features, pickle_out)\n pickle_out.close()\n\n pickle_out = open(\"Y.pickle\", \"wb\")\n pickle.dump(self.labels, pickle_out)\n pickle_out.close()", "def exporter(obj, filename):\n start_time = time.time()\n\n # If the file directory does not exist create it.\n file_directory = os.path.dirname(os.path.abspath(filename))\n if not os.path.isdir(file_directory):\n logging.debug(\"Creating pickle export directory \\\"%s\\\".\" % file_directory)\n os.makedirs(file_directory)\n\n logging.info(\"Beginning pickle EXPORT of file: \\\"\" + filename + \"\\\"\")\n # Dump pickle to the file.\n f = open(filename, 'w')\n pickle.dump(obj, f)\n f.close()\n\n logging.info(\"Completed pickle EXPORT to file: \\\"\" + filename + \"\\\"\")\n print_elapsed_time(start_time, \"pickle EXPORT of file: \\\"\" + filename + \"\\\"\")", "def save_pickled(self, obj, filename):\n path = os.path.join(pickle_dir, filename)\n with open(path, 'wb+') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(dataset: 'FuseDatasetBase', mode: SaveMode, filename: str) -> None:\n # get instance version to save\n dataset_to_save = dataset.get_instance_to_save(mode)\n\n # save this instance\n with open(filename, 'wb') as pickle_file:\n pickle.dump(dataset_to_save, pickle_file)", "def to_pickle(self): # pragma: no cover\n raise NotImplementedError(\n \"Pickling is not implemented for FunctionNode. \"\n \"Consider subclassing flowpipe.node.INode to pickle nodes.\"\n )", "def loadClassifier(filename):\n with open(filename, 'rb') as fid:\n return cPickle.load(fid)", "def serialize(obj, file):\n\tpickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)", "def pickle_obj(obj: Any, filename: str) -> None:\n with open(filename, 'wb') as f:\n pickle.dump(obj, f)", "def pickle_dump(what, file):\n with open(file, 'wb') as f:\n pickle.dump(what, f)", "def save_field(self, fname):\n\n import cPickle\n\n with open(fname, \"wb\") as f:\n cPickle.dump(self.__dict__, f, protocol = cPickle.HIGHEST_PROTOCOL)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if pan is in the current limits
def _is_in_pan_bounds(self, pan): return self.neck_pan_bounds[0] <= pan and self.neck_pan_bounds[1] >= pan
[ "def inside_bounds(self, point):\n return all(mn <= p <= mx for p, (mn, mx) in zip(point, self.bounds))", "def CanEast(self):\n return (not self.HWLimit) or (self.LimOverride and self.WestLim)", "def within_limits(self):\n within_limit = True\n\n for lux_sensor, limit in self.lightlevel.items():\n current_lightlevel = float(self.get_state(lux_sensor))\n if current_lightlevel > limit:\n within_limit = False\n self.log('Light level beyond limit')\n break\n\n return within_limit", "def _is_in_tilt_bounds(self, tilt):\n return self.neck_tilt_bounds[0] <= tilt and self.neck_tilt_bounds[1] >= tilt", "def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)", "def in_bounds(p):\n x, y = p\n return x >= 0 and x < SCREEN_WIDTH and y >= 0 and y < SCREEN_HEIGHT", "def _is_in_bounds(self, joints):\n return self._is_in_pan_bounds(joints[0]) and self._is_in_tilt_bounds(joints[1])", "def isBound(self):\n return self.__bound > 0", "def is_in_bounds(self, pos):\n\n x, y = pos\n map_width, map_height = self.dimensions\n\n in_bounds = x >= 0 and x < map_width\n in_bounds = in_bounds and y >= 0 and y < map_height\n\n return in_bounds", "def has_rect_limits(self) -> bool:\n return all(space.has_rect_limits for space in self.spaces)", "def _bounds_violated(self,loc):\n if self.x_bounds[0]: \n if (self.x_bounds[0] >= loc[0]): return True\n if self.x_bounds[1]: \n if (loc[0] >= self.x_bounds[1]): return True\n if self.y_bounds[0]: \n if (self.y_bounds[0] >= loc[1]): return True\n if self.y_bounds[1]: \n if (loc[1] >= self.y_bounds[1]): return True\n if self.z_bounds[0]: \n if (self.z_bounds[0] >= loc[2]): return True\n if self.z_bounds[1]: \n if (loc[2] >= self.z_bounds[1]): return True\n else:\n return False", "def in_bounds(bounds_array):\n\tglobal click_points\n\tif click_points[0] > bounds_array[0] and click_points[0] < bounds_array[1] and click_points[1] > bounds_array[2] and click_points[1] < bounds_array[3]:\n\t\treturn True\n\telse:\n\t\treturn False", "def CanWest(self):\n return (not self.HWLimit) or (self.LimOverride and self.EastLim)", "def in_view(self):\n \n bbox = self.bbox()\n area = self.parent.canvas.get_visible_area()\n\n y1, y2 = bbox[1], bbox[3]\n v1, v2 = area[1], area[3]\n\n return (y1 > v1 and y2 < v2)", "def is_in_bounds(self, loc):\n in_bounds = (0 <= loc[0] < self.world.width()) and (0 <= loc[1] < self.world.height())\n return in_bounds", "def _is_inside(self, obj_name):\n self.sim.forward()\n self.sim.step()\n min_pos, max_pos = self._get_bounding_box(obj_name)\n b = self._config.cursor_boundary\n if (min_pos < np.array([-b, -b, -0.05])).any() or (\n max_pos > np.array([b, b, b])\n ).any():\n return False\n return True", "def isin(self,x,y):\n if x>=self.xmin and x<self.xmax and y>=self.ymin and y<self.ymax:\n return True\n else:\n return False", "def is_within_bounds(bounds, point):\n point = np.array(point)\n if point.shape != (bounds.shape[0],):\n return False\n above_lb = np.all((point - bounds[:, 0] >= 0))\n below_ub = np.all((bounds[:, 1] - point >= 0))\n return above_lb * below_ub", "def in_bounds(self, coord):\n coord_x = coord[0]\n coord_y = coord[1]\n return (0 <= coord_x < self.dim\n and 0 <= coord_y < self.dim)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if tilt is in the current limits
def _is_in_tilt_bounds(self, tilt): return self.neck_tilt_bounds[0] <= tilt and self.neck_tilt_bounds[1] >= tilt
[ "def within_limits(self):\n within_limit = True\n\n for lux_sensor, limit in self.lightlevel.items():\n current_lightlevel = float(self.get_state(lux_sensor))\n if current_lightlevel > limit:\n within_limit = False\n self.log('Light level beyond limit')\n break\n\n return within_limit", "def larger_than_min_t(self):\n total_t = len(self.__detections)\n return total_t >= self.__min_t, total_t", "def checkTiling(tiling, bounds=defaultBounds):\n\tb = True\n\tfor tile in tiling:\n\t\tc = tile.getCenter()\n\t\tx, y = c.get(X_AXIS), c.get(Y_AXIS)\n\t\tif (x < bounds[0]) or (x > bounds[2]) or (y < bounds[1]) or (y > bounds[3]):\n\t\t\tb = False\n\t\t\tprint \"Tile out-of-bounds: %s\" % tile\n\tif not b:\n\t\tprint \"This tiling goes out of the bounds = %s\" % bounds\n\treturn b", "def has_tearing(self, cut1=0.05, cut2=-0.01, nsig=1, ntear_min=10):\n ntear = 0\n for amp in self:\n rstats1, rstats2 = self[amp].rstats\n if (rstats1.diff - cut1 > nsig*rstats1.error and\n rstats2.diff - cut2 > nsig*rstats2.error):\n ntear += 1\n if (rstats1.diff - cut2 > nsig*rstats1.error and\n rstats2.diff - cut1 > nsig*rstats2.error):\n ntear += 1\n return ntear > ntear_min", "def has_rect_limits(self) -> bool:\n return all(space.has_rect_limits for space in self.spaces)", "def CanEast(self):\n return (not self.HWLimit) or (self.LimOverride and self.WestLim)", "def in_bounds(self, t):\n return And(self.x(t) >= 0, self.x(t) < self.grid.width,\n self.y(t) >= 0, self.y(t) < self.grid.height)", "def tile_coords_valid(self, tc):\n return (tc.q >= self._min_coords.q and tc.q <= self._max_coords.q and\n tc.r >= self._min_coords.r and tc.r <= self._max_coords.r)", "def loses(self):\n return not (-2.5 < self.x < 2.5 and -0.262 < self.phi < 0.262)", "def isWithinFeatureLimits(self):\n\n if not self.config.limit_to_features:\n return True # No limits. Run it.\n\n # Check the requirements as-is (#1)\n if self.getMissingRequiredFeatures():\n return False\n\n # Check the requirements after removing the limiting features (#2)\n featuresMinusLimits = [f for f in self.config.available_features\n if not f in self.config.limit_to_features]\n if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):\n return False\n\n return True", "def _is_in_pan_bounds(self, pan):\n return self.neck_pan_bounds[0] <= pan and self.neck_pan_bounds[1] >= pan", "def _bounds_violated(self,loc):\n if self.x_bounds[0]: \n if (self.x_bounds[0] >= loc[0]): return True\n if self.x_bounds[1]: \n if (loc[0] >= self.x_bounds[1]): return True\n if self.y_bounds[0]: \n if (self.y_bounds[0] >= loc[1]): return True\n if self.y_bounds[1]: \n if (loc[1] >= self.y_bounds[1]): return True\n if self.z_bounds[0]: \n if (self.z_bounds[0] >= loc[2]): return True\n if self.z_bounds[1]: \n if (loc[2] >= self.z_bounds[1]): return True\n else:\n return False", "def isinview(self):\n term = getsession().terminal\n return (self.xloc > 0 and self.xloc +self.width -1 <= term.width\n and self.yloc > 0 and self.yloc +self.height -1 <= term.height)", "def CanWest(self):\n return (not self.HWLimit) or (self.LimOverride and self.EastLim)", "def on_target_area(self, loc):\n return self.target_area[0] <= loc[0] < self.target_area[0] + self.target_area[2] \\\n and self.target_area[1] <= loc[1] < self.target_area[1] + self.target_area[3]", "def is_met_a_bound(self, maze, distance_from_wall):\n # generate a number between -1 to 1\n if uniform(0, 1) > 0.65:\n for lidar in self.lidars:\n if len(lidar.detected_list) < lidar.radius // 2:\n return True\n else:\n if len(self.lidars[0].detected_list) < self.lidars[0].radius // 2:\n return True\n return False", "def is_fallen(self):\n orientation = self.minitaur.GetBaseOrientation()\n rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)\n local_up = rot_mat[6:]\n _, _, height = self.minitaur.GetBasePosition()\n local_global_up_dot_product = np.dot(np.asarray([0, 0, 1]), np.asarray(local_up))\n return local_global_up_dot_product < 0.85 or height < 0.15", "def verify(self):\n for i in self.coords:\n if np.abs(6*i-int(6*i))>0.1: return False\n if np.abs(self.coords[2]+self.coords[0]+self.coords[1]) > 0.1: return False\n return True", "def _is_in_bounds(self, joints):\n return self._is_in_pan_bounds(joints[0]) and self._is_in_tilt_bounds(joints[1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if the joints are is in the current limits
def _is_in_bounds(self, joints): return self._is_in_pan_bounds(joints[0]) and self._is_in_tilt_bounds(joints[1])
[ "def check_joint_limit(self, curve, info):\n low_mask = (curve < self.joint_lower_limit - 5e-3).any()\n high_mask = curve > self.joint_upper_limit + 5e-3\n over_joint_limit = (low_mask * high_mask).any() #\n info[\"violate_limit\"] = over_joint_limit\n info[\"terminate\"] = info[\"terminate\"] and (not over_joint_limit)", "def within_limits(self):\n within_limit = True\n\n for lux_sensor, limit in self.lightlevel.items():\n current_lightlevel = float(self.get_state(lux_sensor))\n if current_lightlevel > limit:\n within_limit = False\n self.log('Light level beyond limit')\n break\n\n return within_limit", "def inside_bounds(self, point):\n return all(mn <= p <= mx for p, (mn, mx) in zip(point, self.bounds))", "def has_rect_limits(self) -> bool:\n return all(space.has_rect_limits for space in self.spaces)", "def _is_inside(self, obj_name):\n self.sim.forward()\n self.sim.step()\n min_pos, max_pos = self._get_bounding_box(obj_name)\n b = self._config.cursor_boundary\n if (min_pos < np.array([-b, -b, -0.05])).any() or (\n max_pos > np.array([b, b, b])\n ).any():\n return False\n return True", "def is_bound(self):\n # TODO: make this a function\n\n return len(self.edges()) > 0", "def isjoint(self) -> bool:\n return True", "def isBound(self):\n return self.__bound > 0", "def _bounds_violated(self,loc):\n if self.x_bounds[0]: \n if (self.x_bounds[0] >= loc[0]): return True\n if self.x_bounds[1]: \n if (loc[0] >= self.x_bounds[1]): return True\n if self.y_bounds[0]: \n if (self.y_bounds[0] >= loc[1]): return True\n if self.y_bounds[1]: \n if (loc[1] >= self.y_bounds[1]): return True\n if self.z_bounds[0]: \n if (self.z_bounds[0] >= loc[2]): return True\n if self.z_bounds[1]: \n if (loc[2] >= self.z_bounds[1]): return True\n else:\n return False", "def _admissible(self, x: np.ndarray) -> bool:\n return np.all(x <= self.ub) and np.all(x >= self.lb)", "def on_target_area(self, loc):\n return self.target_area[0] <= loc[0] < self.target_area[0] + self.target_area[2] \\\n and self.target_area[1] <= loc[1] < self.target_area[1] + self.target_area[3]", "def in_bounds(pos: Position, size: Position) -> bool:\n (i, j) = pos\n (max_i, max_j) = size\n return 0 <= i < max_i and 0 <= j < max_j", "def is_met_a_bound(self, maze, distance_from_wall):\n # generate a number between -1 to 1\n if uniform(0, 1) > 0.65:\n for lidar in self.lidars:\n if len(lidar.detected_list) < lidar.radius // 2:\n return True\n else:\n if len(self.lidars[0].detected_list) < self.lidars[0].radius // 2:\n return True\n return False", "def is_fallen(self):\n orientation = self.minitaur.GetBaseOrientation()\n rot_mat = self._pybullet_client.getMatrixFromQuaternion(orientation)\n local_up = rot_mat[6:]\n _, _, height = self.minitaur.GetBasePosition()\n local_global_up_dot_product = np.dot(np.asarray([0, 0, 1]), np.asarray(local_up))\n return local_global_up_dot_product < 0.85 or height < 0.15", "def in_bounds(self, coord):\n coord_x = coord[0]\n coord_y = coord[1]\n return (0 <= coord_x < self.dim\n and 0 <= coord_y < self.dim)", "def isWithinFeatureLimits(self):\n\n if not self.config.limit_to_features:\n return True # No limits. Run it.\n\n # Check the requirements as-is (#1)\n if self.getMissingRequiredFeatures():\n return False\n\n # Check the requirements after removing the limiting features (#2)\n featuresMinusLimits = [f for f in self.config.available_features\n if not f in self.config.limit_to_features]\n if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):\n return False\n\n return True", "def intersectsAny (self):\r\n intersects = False\r\n \r\n # we do this manually so we don't have to go through all of them\r\n \r\n for widget in self.parent.widgets:\r\n if (widget != self) and (self.intersects(widget)):\r\n intersects = True\r\n break\r\n \r\n #Enforce positive coordinates\r\n if not 'Twine.hide' in self.passage.tags:\r\n if ((self.pos[0] < 0) or (self.pos[1] < 0)):\r\n intersects = True\r\n break\r\n\r\n return intersects", "def _check_if_within(self, obstacle):\n # type: (obstacleMsg) -> bool\n uav_pos = np.array(\n [self.uav_pose.pose.position.x, self.uav_pose.pose.position.y, self.uav_pose.pose.position.z])\n obs_pos = np.array(obstacle.pose[:3])\n return np.linalg.norm((uav_pos, obs_pos)) <= self.radius", "def on_corner(self):\n for c in self.corners:\n if abs(c[0]-self.position[0]) <= EPSILON and abs(c[1]-self.position[1]) <= EPSILON:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change direction of the pan movement
def change_pan_direction(self): self.neck_pan_delta *= -1
[ "def change_tilt_direction(self):\n self.neck_tilt_delta *= -1", "def set_direction(self, direction):", "def pan(self, angle):\n self.__send_servo_command(self.__CMD_PAN, angle)", "def move(self, direction):\n if direction == Direction.north:\n self.y -= 1\n elif direction == Direction.west:\n self.x -= 1\n elif direction == Direction.south:\n self.y += 1\n elif direction == Direction.east:\n self.x += 1", "def set_pan_tilt(self, pan: int, tilt: int) -> None:\n # Send EasyRemote update_element event for this pan tilt control\n # with the given pan and tilt values.\n self.er.s.sendto((f\"action=update_element&id={self.id}\"\n f\"&page={self.page}&value={pan},{tilt}\"\n \"&type=pt&event=up\").encode(), self.er.addr)", "def move(self, direction: str):\n if direction == \"left\":\n if self.x > 0:\n self.x -= 1\n if direction == \"right\":\n if self.x < self.xlim:\n self.x += 1\n if direction == \"down\":\n if self.y < self.ylim:\n self.y += 1\n if direction == \"up\":\n if self.y > 0:\n self.y -= 1", "def move_mirror(self, direction, axis, speed=1):\n self.electronics.move_piezo(speed, direction, axis)", "def __change_direction(self):\n\n self.current_direction = self.next_direction", "def pan_gesture(self, dx, dy):\n return False", "def move(self):\n if self.direction == \"n\":\n self.position = (self.position[0]-1, self.position[1])\n\n elif self.direction == \"s\":\n self.position = (self.position[0]+1, self.position[1])\n\n elif self.direction == \"e\":\n self.position = (self.position[0], self.position[1]+1)\n\n elif self.direction == \"w\":\n self.position = (self.position[0], self.position[1]-1)", "def update_direction(self):\n self.direction += normal(0,.05,2)\n self.direction = (self.direction / np.linalg.norm(self.direction)) * self.speed", "def move(self):\n self.steps += 1\n direction = uniform(0, 1)\n if direction < 0.5:\n self.position -= 1\n else:\n self.position += 1", "def tilt_pan_head(self, pan = 0, tilt = 0, duration = 2): #input is in degrees\n pan = pan * np.pi / 180\n tilt = tilt * np.pi / 180\n \n point = JointTrajectoryPoint()\n point.positions = [pan, tilt]\n point.time_from_start = rospy.Duration(duration)\n goal = FollowJointTrajectoryGoal()\n\n goal.trajectory.joint_names = [self.PAN_JOINT, self.TILT_JOINT]\n goal.trajectory.points.append(point)\n self.client.send_goal(goal)\n self.client.wait_for_result()", "def change_image_direction(self, direction):\n if direction != self.current_direction:\n old_direction = self.current_direction.angle_modifier\n new_direction = direction.angle_modifier\n \n #And rotate the image:\n self.image = pygame.transform.rotate(self.image , 90 * -(new_direction - old_direction))\n self.current_direction = direction\n \n self._create_line(old_direction)", "def rotate(self, dir='CW'):\n if dir == 'CW':\n self.right_motor.setVelocity(-0.5*self.MAX_SPEED)\n self.left_motor.setVelocity(0.5*self.MAX_SPEED)\n else:\n self.right_motor.setVelocity(0.5*self.MAX_SPEED)\n self.left_motor.setVelocity(-0.5*self.MAX_SPEED)", "def resolve_direction(self):\n while self.direction < 0:\n self.direction += 360\n self.direction %= 360", "def direction_but_pressed(self):\n if self.power_on:\n if self.direction == 'forward':\n self.config_direction('reverse')\n else:\n self.config_direction('forward')\n\n self.throttle_frame.set_direction(self.direction )", "def _change_direction(self):\n if self._current_direction == Enemy.LEFT:\n self._current_direction = Enemy.RIGHT\n else:\n self._current_direction = Enemy.LEFT\n if self._current_speed < self._max_speed:\n self._current_speed += 0.1\n for enemy in self._entities:\n enemy.set_movement(self._current_direction, self._current_speed)\n enemy.advance(self._advance_speed)", "def do_pan_view(self, dx, dy):\n auto = self.autoReplot()\n self.setAutoReplot(False)\n axes_to_update = self.get_axes_to_update(dx, dy)\n axis_ids_vertical = (self.get_axis_id(\"left\"), self.get_axis_id(\"right\"))\n\n for (x1, x0, _start, _width), axis_id in axes_to_update:\n lbound, hbound = self.get_axis_limits(axis_id)\n i_lbound = self.transform(axis_id, lbound)\n i_hbound = self.transform(axis_id, hbound)\n delta = x1 - x0\n vmin = self.invTransform(axis_id, i_lbound - delta)\n vmax = self.invTransform(axis_id, i_hbound - delta)\n # patch for not zooming into \"negative space\" ;) :\n if axis_id in axis_ids_vertical:\n vmin = 0\n if vmax < 0:\n vmax = -vmax\n self.set_axis_limits(axis_id, vmin, vmax)\n\n self.setAutoReplot(auto)\n # the signal MUST be emitted after replot, otherwise\n # we receiver won't see the new bounds (don't know why?)\n self.replot()\n self.emit(SIG_PLOT_AXIS_CHANGED, self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Change direction of the tilt movement
def change_tilt_direction(self): self.neck_tilt_delta *= -1
[ "def levelTilt(self):\n self.tiltAngle = 0", "def tilt(self, angle):\n self.__send_servo_command(self.__CMD_TILT, angle)", "def set_direction(self, direction):", "def tilt(self, angle):\n self.tiltAngle += angle\n tiltBound = 90 # Tilt restriction (degrees)\n if self.tiltAngle >= tiltBound: self.tiltAngle = tiltBound # Tilt angle upward limit is 90 degrees\n elif self.tiltAngle <= -tiltBound: self.tiltAngle = -tiltBound # Tilt angle downward limit is -90 degrees", "def update_direction(self):\n self.direction += normal(0,.05,2)\n self.direction = (self.direction / np.linalg.norm(self.direction)) * self.speed", "def __change_direction(self):\n\n self.current_direction = self.next_direction", "def change_direction(self):\n randomNum = randint(1, 340)\n \n if randomNum % 5 == 0:\n # Switch the duck's direction\n if self.direction == self.RIGHT:\n self.direction = self.LEFT\n self.dx = -.5\n \n else:\n self.direction = self.RIGHT\n self.dx = .5\n \n # Decide if it will fly straight or not\n randomNum = randint(1, 340)\n \n if randomNum % 5 == 0:\n # Change duck to straight or up\n self.straight = not self.straight", "def _change_direction(self):\n if self._current_direction == Enemy.LEFT:\n self._current_direction = Enemy.RIGHT\n else:\n self._current_direction = Enemy.LEFT\n if self._current_speed < self._max_speed:\n self._current_speed += 0.1\n for enemy in self._entities:\n enemy.set_movement(self._current_direction, self._current_speed)\n enemy.advance(self._advance_speed)", "def change_image_direction(self, direction):\n if direction != self.current_direction:\n old_direction = self.current_direction.angle_modifier\n new_direction = direction.angle_modifier\n \n #And rotate the image:\n self.image = pygame.transform.rotate(self.image , 90 * -(new_direction - old_direction))\n self.current_direction = direction\n \n self._create_line(old_direction)", "def spin_clockwise(self):\n self.twist.linear.x = 0.0\n self.twist.angular.z = self.angular_speed", "def change_pan_direction(self):\n self.neck_pan_delta *= -1", "def resolve_direction(self):\n while self.direction < 0:\n self.direction += 360\n self.direction %= 360", "def rotate(self, dir='CW'):\n if dir == 'CW':\n self.right_motor.setVelocity(-0.5*self.MAX_SPEED)\n self.left_motor.setVelocity(0.5*self.MAX_SPEED)\n else:\n self.right_motor.setVelocity(0.5*self.MAX_SPEED)\n self.left_motor.setVelocity(-0.5*self.MAX_SPEED)", "def move(self):\n self.steps += 1\n direction = uniform(0, 1)\n if direction < 0.5:\n self.position -= 1\n else:\n self.position += 1", "def direction_but_pressed(self):\n if self.power_on:\n if self.direction == 'forward':\n self.config_direction('reverse')\n else:\n self.config_direction('forward')\n\n self.throttle_frame.set_direction(self.direction )", "def spin_counter_clockwise(self):\n self.twist.linear.x = 0.0\n self.twist.angular.z = -self.angular_speed", "def move_clockwise(self):\n self.jerry_turtle.forward(50)\n self.jerry_turtle.right(20)", "def change_fleet_direction(ai_settings, lynels):\n for lynel in lynels.sprites():\n lynel.rect.y += ai_settings.horde_drop_speed\n ai_settings.horde_direction *= -1", "def rotate(dir, speed):\n\tsensorMotor.run_direct(duty_cycle_sp=dir*speed)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for upserting samples using API data data of response
def upsert(self, data): url = '/samples/upsert' return post(url, data)
[ "def upsert_bulk(self, data):\n url = '/samples/upsert/bulk'\n return post(url, data)", "def bulk_upsert(self, docs):\n\n for doc in docs:\n index = doc[\"ns\"] \n doc[\"_time\"] = doc[\"_id\"].generation_time\n \n service = self.getConnection()\n\n source = index.split(\".\")\n index_name = index.replace(\"_\",\"-\").replace(\".\",\"_\").lower()\n # Check index presence\n if index_name not in service.indexes:\n service.indexes.create(index_name)\n # Index the source document \n index = service.indexes[index_name]\n with index.attached_socket(sourcetype='json', source=source[0], host=\"abacus\") as sock:\n sock.send(dumps(doc, sort_keys=True)) \n \n if not doc:\n raise errors.EmptyDocsError(\n \"Cannot upsert an empty sequence of \"\n \"documents into Splunk\") \n return", "def update_sample_record(data):\n session = controller.connect_to_database()\n record = session.query(Sample).filter_by(id=data[\"id\"]).one()\n\n record = Sample()\n record.sample = data[\"sample\"]\n record.panel = data[\"panel\"]\n record.sample_taken = data[\"sample_taken\"]\n record.genotyping = data[\"genotyping\"]\n record.variant_calling = data[\"variant_calling\"]\n record.qc_status = data[\"qc_status\"]\n record.qc_report = data[\"qc_report\"]\n record.coverage = data[\"coverage\"]\n\n session.commit()\n session.close()", "def _create_samples(session, match):\n all_samples = defaultdict(data_models.Sample)\n project_id = match.get('project_id')\n project_status = match.get('project_status', 'open')\n sample_id = match.get('sample_id')\n sample_time_since = match.get('createddate')\n process_limit_date = match.get('process_limit_date')\n detailed = request.args.get('detailed') in ['true', 'True', True]\n if detailed:\n list_process_complete = None\n list_process_queued = None\n else:\n list_process_complete = list(status_cfg.step_completed_to_status) \\\n + list(status_cfg.additional_step_completed) \\\n + list(status_cfg.library_type_step_completed) \\\n + status_cfg.started_steps\n list_process_queued = status_cfg.step_queued_to_status\n udfs_to_fields = {\n 'Prep Workflow': 'planned_library',\n 'Species': 'species',\n 'Required Yield (Gb)': 'required_yield',\n 'Coverage (X)': 'coverage'\n }\n for result in queries.get_sample_info(session, project_id, sample_id, project_status=project_status,\n time_since=sample_time_since, udfs=list(udfs_to_fields)):\n (pjct_name, sample_name, container, wellx, welly, udf_name, udf_value) = result\n s = all_samples[sanitize_user_id(sample_name)]\n s.sample_name = sanitize_user_id(sample_name)\n s.project_name = pjct_name\n s.plate_name = container\n s.original_name = sample_name\n if udf_name in udfs_to_fields:\n setattr(all_samples[sanitize_user_id(sample_name)], udfs_to_fields[udf_name], udf_value)\n\n for result in queries.get_samples_and_processes(session, project_id, sample_id, project_status=project_status,\n workstatus='COMPLETE', list_process=list_process_complete,\n time_since=sample_time_since, process_limit_date=process_limit_date):\n (pjct_name, sample_name, process_name, process_status, date_run, process_id) = result\n all_samples[sanitize_user_id(sample_name)].add_completed_process(process_name, date_run, process_id)\n\n for result in queries.get_sample_in_queues_or_progress(\n session, project_id, sample_id, list_process=list_process_queued,\n time_since=sample_time_since, project_status=project_status, process_limit_date=process_limit_date):\n pjct_name, sample_name, process_name, queued_date, queue_id, process_id, process_date = result\n if not process_id:\n all_samples[sanitize_user_id(sample_name)].add_queue_location(process_name, queued_date, queue_id)\n else:\n all_samples[sanitize_user_id(sample_name)].add_inprogress(process_name, process_date, process_id)\n\n return all_samples.values()", "def update(self, doc, update_spec):\n \n doc = dict(doc.items() + update_spec.items())\n index = doc[\"ns\"]\n doc[\"_time\"] = doc[\"_id\"].generation_time\n\n service = self.getConnection()\n\n source = index.split(\".\")\n index_name = index.replace(\"_\",\"-\").replace(\".\",\"_\").lower()\n # Check index presence\n if index_name not in service.indexes:\n service.indexes.create(index_name) \n # Index the source document\n index = service.indexes[index_name]\n with index.attached_socket(sourcetype='json', source=source[0], host=\"abacus\") as sock:\n sock.send(dumps(doc, sort_keys=True)) \n print \"Updation successful\"\n if not doc:\n raise errors.EmptyDocsError(\n \"Cannot upsert an empty sequence of \"\n \"documents into Splunk\") \n return", "def updateSampleValue(self, key, sampleIds, value):\n results = []\n for sampleId in sampleIds: \n # get current value\n result = _runSql(\"select {} from samples where sample_id=%s and dataset_id=%s\".format(key), (sampleId, self.datasetId))\n results.append(_runSql(\"update samples set {}=%s where dataset_id=%s and sample_id=%s;\".format(key), (value, self.datasetId, sampleId,), type=\"update\")) \n value_from = result[0][0] if len(result)>0 else None\n\n print(\"New Value\", value)\n print(\"Original\", value_from)\n print(\"Updated: \", results)\n \n return {\"Updated\": value, \"Original\": value_from}", "def upsert(self, kind: VersionedDataKind, item: dict):", "def update_test(self, test_id, values):", "async def set_data_in_db(self):\n try:\n result = await self._data_table.bulk_write(self._data[0], ordered=False)\n print('Insertion result %s' % repr(result.bulk_api_result))\n except pymongo.errors.BulkWriteError as bwe:\n result = bwe.details", "def test_species_name_collected_api_update(self):\n # create record\n project = self.project_1\n client = self.custodian_1_client\n schema = self.schema_with_name_id_and_species_name()\n dataset = self._create_dataset_with_schema(\n project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION\n )\n record_data = {\n 'Name Id': 25454, # \"Canis lupus\"\n 'Species Name': 'Chubby Bat',\n 'When': '12/12/2017',\n 'Latitude': -32.0,\n 'Longitude': 115.756\n }\n payload = {\n 'dataset': dataset.pk,\n 'data': record_data\n }\n url = reverse('api:record-list')\n resp = client.post(url, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n record = Record.objects.filter(id=resp.json().get('id')).first()\n self.assertIsNotNone(record)\n self.assertEqual(record.name_id, 25454)\n self.assertEqual(record.species_name, \"Canis lupus\")\n # TODO: the species name in the data is not updated. Should we?\n self.assertEqual(record.data.get('Species Name'), 'Chubby Bat')\n\n # patch Name Id\n new_name_id = 24204\n record_data['Name Id'] = new_name_id\n expected_species_name = 'Vespadelus douglasorum'\n url = reverse('api:record-detail', kwargs={'pk': record.pk})\n payload = {\n 'data': record_data\n }\n resp = client.patch(url, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n record.refresh_from_db()\n self.assertEqual(record.name_id, new_name_id)\n self.assertEqual(record.species_name, expected_species_name)", "def update_daily_measurements(data):\n for i in data:\n key = {'_id': i['_id']}\n db.device_daily_measurements.replace_one(key, i, upsert=True)", "def test_species_name_collected_api_update(self):\n # create record\n project = self.project_1\n client = self.custodian_1_client\n schema = self.schema_with_name_id()\n dataset = self._create_dataset_with_schema(\n project, self.data_engineer_1_client, schema, dataset_type=Dataset.TYPE_SPECIES_OBSERVATION\n )\n record_data = {\n 'Name Id': 25454, # \"Canis lupus\"\n 'When': '12/12/2017',\n 'Latitude': -32.0,\n 'Longitude': 115.756\n }\n payload = {\n 'dataset': dataset.pk,\n 'data': record_data\n }\n url = reverse('api:record-list')\n resp = client.post(url, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n record = Record.objects.filter(id=resp.json().get('id')).first()\n self.assertIsNotNone(record)\n self.assertEqual(record.name_id, 25454)\n self.assertEqual(record.species_name, \"Canis lupus\")\n\n # patch Name Id\n new_name_id = 24204\n record_data['Name Id'] = new_name_id\n expected_species_name = 'Vespadelus douglasorum'\n url = reverse('api:record-detail', kwargs={'pk': record.pk})\n payload = {\n 'data': record_data\n }\n resp = client.patch(url, payload, format='json')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n record.refresh_from_db()\n self.assertEqual(record.name_id, new_name_id)\n self.assertEqual(record.species_name, expected_species_name)", "def _insert_helper(list_of_docs: List[Dict], api: MongoAPI) -> None:\n api.batch_insert(list_of_docs)\n list_of_docs.clear()", "def insert_data(api_call, timestamp):\n location = api_call[\"name\"]\n temp = api_call[\"main\"][\"temp\"]\n utcdt = api_call[\"dt\"]\n condition = [item[\"main\"] for item in api_call[\"weather\"]]\n\n with contextlib.closing(sqlite3.connect(\"weather_test1.db\")) as cursor:\n # cursor.execute(\"INSERT INTO weather VALUES (\"\n cursor.execute(\n \"INSERT OR IGNORE INTO weather VALUES (\"\n \":location, :temp, :conditions, :utc_epoch, :local)\",\n {\n \"location\": location,\n \"temp\": temp,\n \"conditions\": condition[0],\n \"utc_epoch\": utcdt,\n \"local\": timestamp,\n },\n )\n cursor.commit()", "def test_upsert_without_additional_request_headers(self):\n responses.add(\n responses.PATCH,\n re.compile(r'^https://.*/Case/some-case-id$'),\n body='{}',\n status=http.OK\n )\n\n sf_type = _create_sf_type()\n result = sf_type.upsert(\n record_id='some-case-id',\n data={'some': 'data'}\n )\n\n self.assertEqual(result, http.OK)", "def generate_and_insert_data( inputs ):\n global gpudb_ingestor\n\n batch_size, num_batches = inputs\n\n my_id = int(random.random() * 100)\n\n null_percentage = 0.1\n alphanum = (string.ascii_letters + string.digits)\n\n # Nested loop\n # Outer loop controls how many batches of records are added to the ingestor\n for i in range(0, num_batches):\n print (\"thread {_id:>5} outer loop: {i:>5}\".format( _id = my_id, i = i ))\n records = []\n # Inner loop generated records for this batch\n for j in range(0, batch_size):\n _i_plus_j = (i + j)\n record = collections.OrderedDict()\n record[ \"i1\" ] = i * j\n record[ \"i2\" ] = random.randint( -_i_plus_j, _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"i8\" ] = random.randint( -128, 127 ) if (random.random() >= null_percentage) else None\n record[ \"i16\" ] = random.randint( -32768, 32767 ) if (random.random() >= null_percentage) else None\n record[ \"d1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"f1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"l1\" ] = (random.randint( 0,_i_plus_j ) * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"timestamp\" ] = random.randint( -30610239758979, 29379542399999 ) if (random.random() >= null_percentage) else None\n record[ \"s1\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 2, 200 ) )] )\n record[ \"date\" ] = None if (random.random() < null_percentage) \\\n else strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" )\n record[ \"datetime\" ] = None if (random.random() < null_percentage) \\\n else ( strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" ) \\\n + \" \"\n + ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) )\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"decimal\" ] = None if (random.random() < null_percentage) \\\n else ( str( random.randint( -922337203685477, 922337203685477 ) )\n + \".\" + str( random.randint( 0, 9999 ) ) )\n record[ \"ipv4\" ] = None if (random.random() < null_percentage) \\\n else '.'.join( [ str( random.randint( 0, 255 ) ) for n in range(0, 4)] )\n record[ \"time\" ] = None if (random.random() < null_percentage) \\\n else ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) \\\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"c1\" ] = None if (random.random() < null_percentage) \\\n else random.choice( alphanum )\n record[ \"c2\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 2 ) )] )\n record[ \"c4\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 4 ) )] )\n record[ \"c8\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 8 ) )] )\n record[ \"c16\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 16 ) )] )\n record[ \"c32\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 32 ) )] )\n record[ \"c64\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 64 ) )] )\n record[ \"c128\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 128 ) )] )\n record[ \"c256\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 256 ) )] )\n\n # Add the record to the list of records\n records.append( record )\n # end for loop\n\n # Add the records to the ingestor\n gpudb_ingestor.insert_records( records )\n # end generating data\n\n\n # Need to flush here since the gpudb_ingestor of the parent\n # thread won't get this child thread's state\n gpudb_ingestor.flush()", "def upsert(self, config):\n\n scoring = ScoringFactory.create({**config, **{\"tokenizer\": {\"alphanum\": True, \"stopwords\": True}}})\n scoring.upsert(self.data)\n\n # Test count\n self.assertEqual(scoring.count(), len(self.data))\n\n # Test stop word is removed\n self.assertFalse(\"and\" in scoring.idf)", "def populate_call_table(connection, patient_ids):\n samples_cursor = connection.cursor()\n samples_query = (\"SELECT sample_id FROM samples\")\n samples_cursor.execute(samples_query)\n sample_ids = [sid[0] for sid in samples_cursor]\n samples_cursor.close()\n\n sample_map = {sid:pid for sid, pid in zip(sample_ids, patient_ids)}\n\n add_call = (\"INSERT INTO calls \"\n \"(variant_id, sample_id, patient_id) \"\n \"VALUES (%s, %s, %s)\")\n\n variants_cursor = connection.cursor(buffered=True)\n variants_query = (\"SELECT variant_id, vcf_id, gt_types FROM variants\")\n variants_cursor.execute(variants_query)\n rows = variants_cursor.fetchall() # this isn't scalable\n variants_cursor.close()\n\n valid_set = set([HET_ALT, HOM_ALT])\n\n entries = []\n execute_batch_size = 100\n commit_batch_size = 1000\n\n tot = 0\n n_to_commit = 0\n calls_cursor = connection.cursor()\n\n for variant_id, vcf_id, gts, in rows:\n full_gts = snappy_unpack_blob(gts)\n nsamps = len([gt for gt in full_gts if gt in valid_set])\n\n for (sample_id, gt) in zip(sample_ids, full_gts):\n if gt in valid_set and sample_id in sample_map:\n entries.append((variant_id, sample_id, sample_map[sample_id]))\n if len(entries) >= execute_batch_size:\n calls_cursor.executemany(add_call, entries)\n n_to_commit += len(entries)\n tot += len(entries)\n entries = []\n\n if n_to_commit >= commit_batch_size:\n connection.commit()\n n_to_commit = 0\n print(tot)\n \n if entries:\n calls_cursor.executemany(add_call, entries)\n connection.commit()\n\n calls_cursor.close()", "def put(self, projectid, sampleid):\n sample = self.get_sample(projectid, sampleid)\n try:\n data = json.loads(self.request.body)\n except Exception as msg:\n self.send_error(400, reason=str(msg))\n else:\n try:\n with self.saver(doc=sample, rqh=self) as saver:\n saver.store(data=data)\n except ValueError as msg:\n self.send_error(400, reason=str(msg))\n except IOError as msg:\n self.send_error(409, reason=str(msg))\n else:\n self.set_status(204)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function for bulk upserting samples using API data data of response
def upsert_bulk(self, data): url = '/samples/upsert/bulk' return post(url, data)
[ "def bulk_upsert(self, docs):\n\n for doc in docs:\n index = doc[\"ns\"] \n doc[\"_time\"] = doc[\"_id\"].generation_time\n \n service = self.getConnection()\n\n source = index.split(\".\")\n index_name = index.replace(\"_\",\"-\").replace(\".\",\"_\").lower()\n # Check index presence\n if index_name not in service.indexes:\n service.indexes.create(index_name)\n # Index the source document \n index = service.indexes[index_name]\n with index.attached_socket(sourcetype='json', source=source[0], host=\"abacus\") as sock:\n sock.send(dumps(doc, sort_keys=True)) \n \n if not doc:\n raise errors.EmptyDocsError(\n \"Cannot upsert an empty sequence of \"\n \"documents into Splunk\") \n return", "def upsert(self, data):\n\t\turl = '/samples/upsert'\n\t\treturn post(url, data)", "def _insert_helper(list_of_docs: List[Dict], api: MongoAPI) -> None:\n api.batch_insert(list_of_docs)\n list_of_docs.clear()", "def update(self, docs):\n documents = []\n for doc in docs:\n if isinstance(doc, dict):\n documents.append(doc)\n elif hasattr(doc, \"items\"):\n documents.append(dict(doc.items()))\n else:\n raise TypeError(\"expected dict, got %s\" % type(doc))\n\n response = self.server._POST(\n self.name,\n \"_bulk_docs\",\n data=json.dumps({\"docs\": documents}),\n headers={\"Content-Type\": JSON_MIME},\n )\n\n data = response.json()\n results = []\n for result in data:\n if \"error\" in result:\n results.append((False, result[\"id\"], result[\"error\"], result[\"reason\"]))\n else:\n results.append((True, result[\"id\"], result[\"rev\"]))\n return results", "async def set_data_in_db(self):\n try:\n result = await self._data_table.bulk_write(self._data[0], ordered=False)\n print('Insertion result %s' % repr(result.bulk_api_result))\n except pymongo.errors.BulkWriteError as bwe:\n result = bwe.details", "def populate_call_table(connection, patient_ids):\n samples_cursor = connection.cursor()\n samples_query = (\"SELECT sample_id FROM samples\")\n samples_cursor.execute(samples_query)\n sample_ids = [sid[0] for sid in samples_cursor]\n samples_cursor.close()\n\n sample_map = {sid:pid for sid, pid in zip(sample_ids, patient_ids)}\n\n add_call = (\"INSERT INTO calls \"\n \"(variant_id, sample_id, patient_id) \"\n \"VALUES (%s, %s, %s)\")\n\n variants_cursor = connection.cursor(buffered=True)\n variants_query = (\"SELECT variant_id, vcf_id, gt_types FROM variants\")\n variants_cursor.execute(variants_query)\n rows = variants_cursor.fetchall() # this isn't scalable\n variants_cursor.close()\n\n valid_set = set([HET_ALT, HOM_ALT])\n\n entries = []\n execute_batch_size = 100\n commit_batch_size = 1000\n\n tot = 0\n n_to_commit = 0\n calls_cursor = connection.cursor()\n\n for variant_id, vcf_id, gts, in rows:\n full_gts = snappy_unpack_blob(gts)\n nsamps = len([gt for gt in full_gts if gt in valid_set])\n\n for (sample_id, gt) in zip(sample_ids, full_gts):\n if gt in valid_set and sample_id in sample_map:\n entries.append((variant_id, sample_id, sample_map[sample_id]))\n if len(entries) >= execute_batch_size:\n calls_cursor.executemany(add_call, entries)\n n_to_commit += len(entries)\n tot += len(entries)\n entries = []\n\n if n_to_commit >= commit_batch_size:\n connection.commit()\n n_to_commit = 0\n print(tot)\n \n if entries:\n calls_cursor.executemany(add_call, entries)\n connection.commit()\n\n calls_cursor.close()", "def generate_and_insert_data( inputs ):\n global gpudb_ingestor\n\n batch_size, num_batches = inputs\n\n my_id = int(random.random() * 100)\n\n null_percentage = 0.1\n alphanum = (string.ascii_letters + string.digits)\n\n # Nested loop\n # Outer loop controls how many batches of records are added to the ingestor\n for i in range(0, num_batches):\n print (\"thread {_id:>5} outer loop: {i:>5}\".format( _id = my_id, i = i ))\n records = []\n # Inner loop generated records for this batch\n for j in range(0, batch_size):\n _i_plus_j = (i + j)\n record = collections.OrderedDict()\n record[ \"i1\" ] = i * j\n record[ \"i2\" ] = random.randint( -_i_plus_j, _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"i8\" ] = random.randint( -128, 127 ) if (random.random() >= null_percentage) else None\n record[ \"i16\" ] = random.randint( -32768, 32767 ) if (random.random() >= null_percentage) else None\n record[ \"d1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"f1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"l1\" ] = (random.randint( 0,_i_plus_j ) * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"timestamp\" ] = random.randint( -30610239758979, 29379542399999 ) if (random.random() >= null_percentage) else None\n record[ \"s1\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 2, 200 ) )] )\n record[ \"date\" ] = None if (random.random() < null_percentage) \\\n else strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" )\n record[ \"datetime\" ] = None if (random.random() < null_percentage) \\\n else ( strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" ) \\\n + \" \"\n + ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) )\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"decimal\" ] = None if (random.random() < null_percentage) \\\n else ( str( random.randint( -922337203685477, 922337203685477 ) )\n + \".\" + str( random.randint( 0, 9999 ) ) )\n record[ \"ipv4\" ] = None if (random.random() < null_percentage) \\\n else '.'.join( [ str( random.randint( 0, 255 ) ) for n in range(0, 4)] )\n record[ \"time\" ] = None if (random.random() < null_percentage) \\\n else ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) \\\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"c1\" ] = None if (random.random() < null_percentage) \\\n else random.choice( alphanum )\n record[ \"c2\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 2 ) )] )\n record[ \"c4\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 4 ) )] )\n record[ \"c8\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 8 ) )] )\n record[ \"c16\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 16 ) )] )\n record[ \"c32\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 32 ) )] )\n record[ \"c64\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 64 ) )] )\n record[ \"c128\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 128 ) )] )\n record[ \"c256\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 256 ) )] )\n\n # Add the record to the list of records\n records.append( record )\n # end for loop\n\n # Add the records to the ingestor\n gpudb_ingestor.insert_records( records )\n # end generating data\n\n\n # Need to flush here since the gpudb_ingestor of the parent\n # thread won't get this child thread's state\n gpudb_ingestor.flush()", "def _create_samples(session, match):\n all_samples = defaultdict(data_models.Sample)\n project_id = match.get('project_id')\n project_status = match.get('project_status', 'open')\n sample_id = match.get('sample_id')\n sample_time_since = match.get('createddate')\n process_limit_date = match.get('process_limit_date')\n detailed = request.args.get('detailed') in ['true', 'True', True]\n if detailed:\n list_process_complete = None\n list_process_queued = None\n else:\n list_process_complete = list(status_cfg.step_completed_to_status) \\\n + list(status_cfg.additional_step_completed) \\\n + list(status_cfg.library_type_step_completed) \\\n + status_cfg.started_steps\n list_process_queued = status_cfg.step_queued_to_status\n udfs_to_fields = {\n 'Prep Workflow': 'planned_library',\n 'Species': 'species',\n 'Required Yield (Gb)': 'required_yield',\n 'Coverage (X)': 'coverage'\n }\n for result in queries.get_sample_info(session, project_id, sample_id, project_status=project_status,\n time_since=sample_time_since, udfs=list(udfs_to_fields)):\n (pjct_name, sample_name, container, wellx, welly, udf_name, udf_value) = result\n s = all_samples[sanitize_user_id(sample_name)]\n s.sample_name = sanitize_user_id(sample_name)\n s.project_name = pjct_name\n s.plate_name = container\n s.original_name = sample_name\n if udf_name in udfs_to_fields:\n setattr(all_samples[sanitize_user_id(sample_name)], udfs_to_fields[udf_name], udf_value)\n\n for result in queries.get_samples_and_processes(session, project_id, sample_id, project_status=project_status,\n workstatus='COMPLETE', list_process=list_process_complete,\n time_since=sample_time_since, process_limit_date=process_limit_date):\n (pjct_name, sample_name, process_name, process_status, date_run, process_id) = result\n all_samples[sanitize_user_id(sample_name)].add_completed_process(process_name, date_run, process_id)\n\n for result in queries.get_sample_in_queues_or_progress(\n session, project_id, sample_id, list_process=list_process_queued,\n time_since=sample_time_since, project_status=project_status, process_limit_date=process_limit_date):\n pjct_name, sample_name, process_name, queued_date, queue_id, process_id, process_date = result\n if not process_id:\n all_samples[sanitize_user_id(sample_name)].add_queue_location(process_name, queued_date, queue_id)\n else:\n all_samples[sanitize_user_id(sample_name)].add_inprogress(process_name, process_date, process_id)\n\n return all_samples.values()", "def post_bulk(posts):\n\n # Bulk insert\n ###############################\n result=col.insert_many(posts) #\n ###############################", "def test_api_v3_stories_bulk_put(self):\n pass", "def bulkUpdateData(self, docs, existingDocs):\n if isinstance(docs, dict):\n docs = [docs]\n for chunk in grouper(docs, 100):\n for doc in chunk:\n if doc['_id'] in existingDocs:\n revList = existingDocs[doc['_id']].split('-')\n # update the revision number and keep the history of the revision\n doc['_revisions'] = {\"start\": int(revList[0]) + 1, \"ids\": [str(int(revList[1]) + 1), revList[1]]}\n else:\n # then create a random 10 digits uid for the first revision number, required by new_edits=False\n firstId = \"%10d\" % random.randrange(9999999999)\n doc['_revisions'] = {\"start\": 1, \"ids\": [firstId]}\n self.couchDB.queue(doc)\n\n logging.info(\"Committing bulk of %i docs ...\", len(chunk))\n self.couchDB.commit(new_edits=False)\n return", "def insert_bulk(self, items):\n docs = []\n for item in items:\n doc = search.Document(\n fields=[\n search.TextField(name='name', value=item['name']),\n search.TextField(name='sku', value=item['sku']),\n ]\n )\n docs.append(doc)\n self.client.put(docs)", "def bulk_create_sample(self, *sample_posts):\n if len(sample_posts) > InventoryClient.MAX_BULK:\n raise ValueError(\n f\"Max permitted samples is {InventoryClient.MAX_BULK} but was {len(sample_posts)}\"\n )\n toPost = [s.data for s in sample_posts]\n bulk_post = {\"operationType\": \"CREATE\", \"records\": toPost}\n return self._do_bulk(bulk_post)", "def execute_bulk_updates(self, bulk_updates):\n for field, values in bulk_updates.items():\n for value, ids in values.items():\n self.model_context.model.objects.filter(**{\"%s__in\" % self.model_context.pk: ids}).update(**{field: value})\n self.records_processed += 1", "def elastic_save_data(records):\r\n bulk = \"\"\r\n for record in records:\r\n meta = {\"update\": {\"_index\": ES_INDX, \"_id\": record[\"id\"], \"_source\": True}}\r\n bulk += json.dumps(meta) + \"\\n\"\r\n bulk += '{ \"doc\": ' + json.dumps(record) + ', \"doc_as_upsert\": true }\\n'\r\n\r\n log.debug(\"saving files: {}\".format(len(files)))\r\n response = elastic.post(ES_BASE + \"/_bulk\", data=bulk)\r\n log.debug(\"response text: {} {}\".format(response.status_code, response.reason))\r\n\r\n return len(files)", "def update(self, doc, update_spec):\n \n doc = dict(doc.items() + update_spec.items())\n index = doc[\"ns\"]\n doc[\"_time\"] = doc[\"_id\"].generation_time\n\n service = self.getConnection()\n\n source = index.split(\".\")\n index_name = index.replace(\"_\",\"-\").replace(\".\",\"_\").lower()\n # Check index presence\n if index_name not in service.indexes:\n service.indexes.create(index_name) \n # Index the source document\n index = service.indexes[index_name]\n with index.attached_socket(sourcetype='json', source=source[0], host=\"abacus\") as sock:\n sock.send(dumps(doc, sort_keys=True)) \n print \"Updation successful\"\n if not doc:\n raise errors.EmptyDocsError(\n \"Cannot upsert an empty sequence of \"\n \"documents into Splunk\") \n return", "def update_daily_measurements(data):\n for i in data:\n key = {'_id': i['_id']}\n db.device_daily_measurements.replace_one(key, i, upsert=True)", "def batch_upload(uploader, samples, group_uuid=None, upload_group_name=None):\n if group_uuid is None:\n current_time = datetime.now().isoformat()\n if upload_group_name is None:\n upload_group_name = f'upload_group_{current_time}'\n try:\n result = uploader.knex.get(f'/api/v1/sample_groups/getid/{upload_group_name}')\n group_uuid = result['data']['sample_group_uuid']\n except HTTPError:\n group_uuid = uploader.create_sample_group(upload_group_name)\n click.echo(f'group created: <name: \\'{upload_group_name}\\' UUID: \\'{group_uuid}\\'>')\n\n try:\n results = uploader.upload_all_results(group_uuid, samples)\n except HTTPError as error:\n click.echo('Could not create Sample', err=True)\n click.echo(error, err=True)\n\n if results:\n click.echo('Upload results:')\n for result in results:\n sample_uuid = result['sample_uuid']\n sample_name = result['sample_name']\n result_type = result['result_type']\n\n if result['type'] == 'error':\n exception = result['exception']\n click.secho(f' - {sample_name} ({sample_uuid}): {result_type}',\n fg='red', err=True)\n click.secho(f' {exception}', fg='red', err=True)\n else:\n click.secho(f' - {sample_name} ({sample_uuid}): {result_type}', fg='green')\n click.echo(f'group info: <name: \\'{upload_group_name}\\' UUID: \\'{group_uuid}\\'>')", "def updateSampleValue(self, key, sampleIds, value):\n results = []\n for sampleId in sampleIds: \n # get current value\n result = _runSql(\"select {} from samples where sample_id=%s and dataset_id=%s\".format(key), (sampleId, self.datasetId))\n results.append(_runSql(\"update samples set {}=%s where dataset_id=%s and sample_id=%s;\".format(key), (value, self.datasetId, sampleId,), type=\"update\")) \n value_from = result[0][0] if len(result)>0 else None\n\n print(\"New Value\", value)\n print(\"Original\", value_from)\n print(\"Updated: \", results)\n \n return {\"Updated\": value, \"Original\": value_from}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Displays notifications. Optionally pass in your own attributes, to override defaults in class.
def display( self, notifications: List[Notification], attributes: Optional[List[List[str]]] = None, ): if len(notifications) < 1: self._write_stdout(self._no_notifications_msg) return if attributes is None: attributes = self._default_attributes attributes = self._remove_attributes_for_terminal_size(attributes) headers, fields = self._unpack_attributes(attributes) n_table = [[n.get(field) for field in fields] for n in notifications] self._display_table(headers, n_table)
[ "def render(self, notification):\n raise NotImplementedError()", "def notify(self, title, message, icon_data=None):\n \n print \"[\" + title + \"]\"\n print message\n print", "def notify(self, **kwargs):\n self.notifiers.notify(**kwargs)", "def showEvent(self, event):\r\n super(Notification, self).showEvent(event)\r\n width, pgeo = self._parent.width(), self._parent.geometry()\r\n conditional_vertical = settings.NOTIFICATION_POSITION in (0, 1)\r\n conditional_horizontal = settings.NOTIFICATION_POSITION in (0, 2)\r\n x = pgeo.left() if conditional_horizontal else pgeo.right()\r\n y = (pgeo.bottom() - self.height()\r\n if conditional_vertical else pgeo.top())\r\n self.setFixedWidth(width)\r\n self.setGeometry(x, y, self.width(), self.height())\r\n background_color = str(settings.NOTIFICATION_COLOR)\r\n foreground_color = str(settings.NOTIFICATION_COLOR).lower().translate(\r\n maketrans('0123456789abcdef', 'fedcba9876543210'))\r\n self._root.setColor(background_color, foreground_color)\r\n self._root.start(self._duration)", "def __init__(self, **kwargs):\n #from cbutils.worker import config # AllanC - HACK!!! could be worker config or pylons config - import here is bad - we need a better way of doing this\n if config and config.get('feature.notifications', True): # Only generate messages if notifications enabled - required to bypass requireing the internationalisation module to be activated\n linked = {}\n for key in kwargs:\n if hasattr(kwargs[key], \"__link__\"):\n linked[key] = HTML.a(unicode(kwargs[key]), href=kwargs[key].__link__())\n else:\n linked[key] = HTML.span(unicode(kwargs[key])) # the span here is for security, it does HTML escaping\n if 'you' not in linked:\n linked['you'] = 'you'\n if 'your' not in linked:\n if linked['you'] == 'you':\n linked['your'] = 'your'\n else:\n linked['your'] = linked['you']+\"'s\"\n \n try:\n self.subject = unicode(self.subject) % linked\n self.content = unicode(self.content) % linked\n except ValueError as e:\n log.error(\"Error formatting message: %s: %s [%s]\" % (self.subject, self.content, linked))\n raise\n else:\n self.subject = u'notification generation diabled'\n self.content = u'notification generation diabled'", "def _build_notification(title, message, details, level):\n notification = ui.Notification(title, message, details, level)\n ui.NOTIFICATION_PANEL.notify(notification)", "def get_notification_html(*, notification_type: str, options: Dict, sender: str) -> str:\n validate_options(options=options)\n\n url_base = app.config['FRONTEND_BASE']\n resource_url = '{url_base}{resource_path}?source=notification'.format(resource_path=options.get('resource_path'),\n url_base=url_base)\n joined_chars = resource_url[len(url_base) - 1:len(url_base) + 1]\n if joined_chars.count('/') != 1:\n raise Exception('Configured \"FRONTEND_BASE\" and \"resource_path\" do not form a valid url')\n\n notification_strings = NOTIFICATION_STRINGS.get(notification_type)\n if notification_strings is None:\n raise Exception('Unsupported notification_type')\n\n greeting = 'Hello,<br/>'\n notification = notification_strings.get('notification', '').format(resource_url=resource_url,\n resource_name=options.get('resource_name'),\n sender=sender)\n comment = notification_strings.get('comment', '')\n end_note = notification_strings.get('end_note', '')\n salutation = '<br/>Thanks,<br/>Amundsen Team'\n\n if notification_type == NotificationType.METADATA_REQUESTED:\n options_comment = options.get('comment')\n need_resource_description = options.get('description_requested')\n need_fields_descriptions = options.get('fields_requested')\n\n if need_resource_description and need_fields_descriptions:\n notification = notification + 'and requests improved table and column descriptions.<br/>'\n elif need_resource_description:\n notification = notification + 'and requests an improved table description.<br/>'\n elif need_fields_descriptions:\n notification = notification + 'and requests improved column descriptions.<br/>'\n else:\n notification = notification + 'and requests more information about that resource.<br/>'\n\n if options_comment:\n comment = ('<br/>{sender} has included the following information with their request:'\n '<br/>{comment}<br/>').format(sender=sender, comment=options_comment)\n\n if notification_type == NotificationType.DATA_ISSUE_REPORTED:\n greeting = 'Hello data owner,<br>'\n data_issue_url = options.get('data_issue_url')\n comment = comment.format(data_issue_url=data_issue_url)\n\n return '{greeting}{notification}{comment}{end_note}{salutation}'.format(greeting=greeting,\n notification=notification,\n comment=comment,\n end_note=end_note,\n salutation=salutation)", "def notifications(request):\n my_notifications = models.Notification.objects.order_by('-id').filter(\n person_notifying=request.user)\n return render(request,\n 'notifications.html',\n {'my_notifications': my_notifications})", "def notify():\n\n notifications_db, cursor = connect_db()\n\n cursor.execute('select * from notifications')\n notifications = cursor.fetchall()\n\n if not sys.stdout.isatty():\n sleep(5 * 60)\n xterm = 'xterm -e'\n bash = 'bash -c'\n cmd = 'python /home/veronika/git/notify/notify.py; bash'\n os.system('{} \\'{} \"{}\"\\''.format(xterm, bash, cmd))\n\n is_connection = is_network_connection()\n if not is_connection:\n print 'You have no network connection, showing only notifications'\\\n ' where it may not be\\nnecessary:\\n'\n\n for notification in notifications:\n if not is_connection and notification[1] in [TYPE_TO_INDEX['mail'],\n TYPE_TO_INDEX['search']]:\n continue\n print notification[0], ' ', INDEX_TO_TYPE[notification[1]],\\\n notification[2]\n\n notifications_db.close()", "def _init_notification(self):\n self.notification = notify.Notification(\n 'remind_bedtime bug!', icon='appointment')\n self.notification.set_timeout(notify.EXPIRES_NEVER)\n self.notification.set_urgency(notify.URGENCY_CRITICAL)\n self.notification.set_hint('transient', False)", "def notificationsWithUID(uid): # @NoSelf", "def info(title, message, details=None):\n _build_notification(title, message, details, ui.Notification.Information)", "def notification_trigger(self):\n self.today = self.entry_date.strftime(\"%Y-%m-%d\")\n #finding notify items\n self.df_notify = self.df_user.loc[self.df_user[\"notify (days)\"] <= self.today] \n self.name_notify = list(self.df_notify[\"title\"])\n #EXPIRED THINGS\n self.df_exp_dead = self.df_user.loc[self.df_user[\"expiration (days)\"] < self.today]\n self.names_expired = list(self.df_exp_dead[\"title\"])\n #NOTIFY ITEMS\n self.list_notify_notexpired = [x for x in self.name_notify if x not in self.names_expired]\n\n self.result.config(text=\"EXPIRES SOON:\")\n self.result3.config(text=\", \".join(self.list_notify_notexpired))\n self.result4.config(text=\"EXPIRED ITEMS: \"+\", \".join(self.names_expired))", "def notify(self, *args, **kwargs):\n\t\tself.server.notify(self, *args, **kwargs)", "def __repr__(self):\n bd = \"None\" if self.body is None else repr(self.body)\n ty = \"None\" if self.type is None else repr(self.type)\n\n msg = \"Notification Name: \" + self.name\n msg += \"\\nBody:\"+bd\n msg += \"\\nType:\"+ty\n\n return msg", "def comment_notifier(sender, **kwargs):\n body = kwargs['instance'].name + \", comento\" + \": \" + kwargs['instance'].comment\n send_mail(\"Nuevo Comentario en Sitio ADDAC\", body, 'no-reply@addac.org.ni', ['byroncorrales@gmail.com','antorcha@addac.org.ni'])", "def notifications(request, pUsername): \n if request.user.is_superuser == False:\n raise Http404\n \n try:\n lUser = User.objects.filter(username=pUsername)[0]\n except IndexError:\n raise Http404\n lProfile = lUser.profile\n \n if pUsername != lUser.username:\n raise Http404\n \n lOutstandingFeedbackCount, lSentFeedbackCount, lContestsCount, lContestHistoryCount, lMessageCount, lUserBadges = _get_tab_counts(request, lUser)\n \n return render_auth(request, 'users/profile/notifications.html', {\n 'User' : lUser,\n 'Profile' : lProfile,\n \"ContestCount\" : lContestsCount,\n 'FeedbackCount' : lOutstandingFeedbackCount,\n 'SentFeedbackCount' : lSentFeedbackCount,\n 'PerformanceCount' : lContestHistoryCount, \n \"MessageCount\" : lMessageCount,\n \"UserBadges\" : lUserBadges,\n })", "def notifications(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BudgetNotificationArgs']]]]:\n return pulumi.get(self, \"notifications\")", "def notifications(self, callback=None):\n super(SwitchModule, self).notifications(\n switch_data(callback) if callback is not None else None)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets a data structure for storing batched calls to Datastore Lookup. The batch data structure is stored in the current context. If there is not already a batch started, a new structure is created and an idle callback is added to the current event loop which will eventually perform the batch look up.
def get_batch(batch_cls, options=None): # prevent circular import in Python 2.7 from google.cloud.ndb import context as context_module context = context_module.get_context() batches = context.batches.get(batch_cls) if batches is None: context.batches[batch_cls] = batches = {} if options is not None: options_key = tuple( sorted( ((key, value) for key, value in options.items() if value is not None) ) ) else: options_key = () batch = batches.get(options_key) if batch is not None and not batch.full(): return batch def idler(batch): def idle(): if batches.get(options_key) is batch: del batches[options_key] batch.idle_callback() return idle batches[options_key] = batch = batch_cls(options) context.eventloop.add_idle(idler(batch)) return batch
[ "def _get_lookup_batch():\n state = _runstate.current()\n batch = state.batches.get(_BATCH_LOOKUP)\n if batch is not None:\n return batch\n\n state.batches[_BATCH_LOOKUP] = batch = {}\n _eventloop.add_idle(_perform_batch_lookup)\n return batch", "def _perform_batch_lookup():\n state = _runstate.current()\n batch = state.batches.pop(_BATCH_LOOKUP, None)\n if batch is None:\n return\n\n rpc = _datastore_lookup(batch.keys())\n _eventloop.queue_rpc(rpc, BatchLookupCallback(batch))", "def __enter__(self):\n db = self.BatchDB(self._db, self.batch)\n return db", "def getBatch(self):\n batch = self.queue.get()\n self.queue.task_done()\n return batch", "def batch_data(self):\n return self.container['batch_data']", "def get_batch(self, idx):\n batch_ = self.dataset.get_batch(idx, variables=self.input_variables)\n batch = {}\n batch['x'] = batch_['x']\n batch['y_gt'] = batch_['y_gt']\n if 'add_d_out' in self.model_opt:\n if self.model_opt['add_d_out']:\n batch['d_in'] = batch_['d_out']\n if 'add_y_out' in self.model_opt:\n if self.model_opt['add_y_out']:\n batch['y_in'] = batch_['y_out']\n batch['s_gt'] = batch_['s_gt']\n return batch", "def store_batches(idf, key, max_batches=None, context=None):\n context = get_context(context)\n sdfi = StoredDataframeIterator(key)\n for sdfi, df in _store_batches(idf, key, max_batches=max_batches, context=context):\n pass\n return sdfi", "def collect_batch(self, *args, **kwargs):\n self.samples_np = self.double_buffer[self.sync.db_idx.value]\n return super().collect_batch(*args, **kwargs)", "def next_batch_packed(self):\n if self.batch >= self.num_batches:\n self.new_epoch()\n \n pack_ids = self.batches_pack[self.batch]\n\n if len(pack_ids) > 1:\n where = \"id in {}\".format(tuple(pack_ids))\n else:\n where = \"id={}\".format(pack_ids[0])\n\n sql = \"\"\"\n select data\n from {}\n where\n {}\n\n \"\"\".format(self.packed_table, where)\n\n s_sql = datetime.datetime.now()\n dat = pd.read_sql_query(sql, self.conn)\n\n\n e_sql = datetime.datetime.now()\n\n buff_list = dat.data.values\n\n x, y, n_obs = pickle.load(StringIO.StringIO(buff_list[0]))\n for i in range(1,len(buff_list)):\n x_,y_,n_obs_ = pickle.load(StringIO.StringIO(buff_list[i]))\n x = np.append(x,x_, axis=0)\n y = np.append(y, y_, axis=0)\n n_obs = np.append(n_obs, n_obs_, axis=0)\n\n e_pkl = datetime.datetime.now()\n if self.debug:\n dt_sql = e_sql - s_sql\n dt_pkl = e_pkl - e_sql\n dt_total = e_pkl - s_sql\n print(\"next_batch time summary:\")\n msg = \"total time elapsed: %d ms (sql: %d ms, unpickle %d ms)\" % (\n dt_total.total_seconds() * 1000, dt_sql.total_seconds() * 1000,\n dt_pkl.total_seconds() * 1000)\n print(msg)\n\n self.batch += 1\n return x, y, n_obs", "def next_batch(self):\n # If the batch queue is empty, print a warning\n if self.batch_queue.qsize() == 0:\n tf.logging.warning(\"Bucket queue size: %i, Input queue size: %i\", \\\n self.batch_queue.qsize(), self.example_queue.qsize())\n if self.single_pass and self.finished_reading:\n tf.logging.info(\"Finished reading dataset in single_pass mode.\")\n return None\n # get the next Batch\n batch = self.batch_queue.get()\n return batch", "def active_batch(self):\n active_batch_id = self.execution_engine.active_batch_data_id\n active_batch = self.batches.get(active_batch_id) if active_batch_id else None\n return active_batch", "def produce_query_batches(self):\n self.__generate_queries()\n return self.__bobs", "def generator(self, *args, **kwargs) -> BatchGenerator:\n return BatchGenerator(self._obj, *args, **kwargs)", "def _store_batches(idf, key, max_batches=None, context=None):\n context = get_context(context)\n context.info(f\"Store iterator\")\n batch_number = 0\n if max_batches in (\"0\", \"\", None):\n max_batches = 0\n else:\n max_batches = int(max_batches)\n store = context.store()\n if store.contains(key):\n if store.is_dir(key):\n context.info(f\"Cleaning {key}\")\n for x in store.listdir_keys(key):\n context.info(f\"Remove {x}\")\n store.remove(x)\n else:\n raise Exception(\n f\"Can't store the iterator in '{key}'. The key exists and it is not a directory.\"\n )\n sdfi_key = store.join_key(key, \"dataframe_iterator.json\")\n\n sdfi = StoredDataframeIterator(key)\n for df in context.progress_iter(idf):\n if not len(df):\n continue\n batch_number += 1\n if max_batches:\n context.info(f\"Storing batch {batch_number}/{max_batches}\")\n else:\n context.info(f\"Storing batch {batch_number}\")\n sdfi.append(df)\n context.store_data(sdfi_key, sdfi)\n yield sdfi, df\n # sdfi_bytes, mimetype = STORED_DATAFRAME_ITERATOR_STATE_TYPE.as_bytes(sdfi)\n # dc = data_characteristics(sdfi)\n # sdfi_metadata = context.metadata()\n # sdfi_metadata.update(\n # dict(type_identifier=dc[\"type_identifier\"], data_characteristics=dc)\n # )\n # store.store(sdfi_key, sdfi_bytes, sdfi_metadata)\n if max_batches and batch_number > max_batches:\n context.info(f\"Maximum number of batches reached\")\n break", "def start_batch(self):\n\n def batch():\n while self.alive:\n gevent.sleep(60.0)\n if len(self.docs) > 0:\n self.couch_db.save_docs(self.docs)\n self.docs = []\n\n return gevent.spawn(batch)", "def batch(self) -> t.Optional[jank.graphics.Batch]:\n return self.get_batch()", "def get_batch_job(self) -> SlurmBatchJob:\n ...", "def get_batch(self, data_asset_name, expectation_suite_name, batch_kwargs=None, **kwargs):\n normalized_data_asset_name = self.normalize_data_asset_name(data_asset_name)\n\n datasource = self.get_datasource(normalized_data_asset_name.datasource)\n if not datasource:\n raise ge_exceptions.DataContextError(\n \"Can't find datasource {} in the config - please check your {}\".format(\n normalized_data_asset_name,\n self.GE_YML\n )\n )\n\n if batch_kwargs is None:\n batch_kwargs = self.build_batch_kwargs(data_asset_name, **kwargs)\n\n data_asset = datasource.get_batch(normalized_data_asset_name,\n expectation_suite_name,\n batch_kwargs,\n **kwargs)\n return data_asset", "def find_raw_batches(self, *args, **kwargs):\n cursor = self.delegate.find_raw_batches(\n *unwrap_args_session(args), **unwrap_kwargs_session(kwargs)\n )\n cursor_class = create_class_with_framework(\n AgnosticRawBatchCursor, self._framework, self.__module__\n )\n\n return cursor_class(cursor, self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Execute rules and handler
async def execute_handler(self, *args): # args - (event, data) if self.rules: _execute = False for rule in self.rules: if not asyncio.iscoroutinefunction(rule) and not isinstance( rule, BaseRule ): result = rule(*args) else: result = await rule(*args) if not result: _execute = False break if isinstance(result, dict): args[1].update(result) data_.set(args[1]) _execute = True if _execute: await self.handler(*args) return True else: await self.handler(*args) return True
[ "def process(self, x):\n match_not_found = True\n if self.before_processing:\n self.before_processing(x)\n for rule in self._rules:\n if rule.matches(x):\n rule.action(x)\n match_not_found = False\n if rule.last:\n break\n if match_not_found:\n for action in self._defaults:\n action(x)\n if self.after_processing:\n self.after_processing(x)", "def do_rules(self, _):\r\n print(self.game.display_rules())", "def run(self, instances_list):\n rule_results = []\n\n def get_instance(list_of_instances, content_type):\n for _instance in list_of_instances:\n if isinstance(_instance, content_type.model_class()):\n return _instance\n\n instance_identifier = None\n rules = Rule.objects.filter(ruleset=self, active=True).order_by(\n 'sequence')\n instance = None\n\n for rule in rules:\n instance = get_instance(instances_list, rule.process_model)\n result = rule.run_evaluate(instance)\n if not instance_identifier:\n instance_identifier = rule.instance_identifier\n if result:\n rule_results.append({\n \"identity\": getattr(instance, instance_identifier),\n \"result\": result\n })\n if rule.exit_on_match:\n return rule_results\n else:\n if rule.exit_on_fail:\n return rule_results\n\n if not rule_results:\n rule_results = [{'identity': getattr(\n instance, instance_identifier), 'result': None}]\n return rule_results", "def handle():\n arguments = pluginsupport.getArguments()\n def __getArgument(arg):\n if not arguments.has_key(arg):\n return None\n val = arguments[arg]\n del arguments[arg]\n return val\n\n if __isAction(arguments):\n action = __getArgument('action')\n log.debug(\"invoking action '%s'\" % action)\n actionHandlers[action].call(arguments)\n else:\n mode = __getArgument('mode') or \"ROOT\"\n handler = modeHandlers[mode]\n log.debug(\"invoking mode %r handler with arguments %r\" % (mode, arguments))\n result = handler.call(arguments)\n log.debug(\"results from mode %r handler: %r\" % (mode, result))\n if handler.playable:\n log.debug(\"playing results for mode %r\" % mode)\n pluginsupport.play(result.items)\n else:\n log.debug(\"listing results for mode %r\" % mode)\n pluginsupport.list(result, handler.getContentType(arguments))\n pluginsupport.done()", "def _run_rule(parser, visitor, start_rule, global_state):\n start_func = getattr(parser, start_rule)\n tree = start_func()\n if global_state:\n visitor.visit(tree)\n return visitor\n else:\n return visitor.visit(tree)", "def run_evaluate(self, instance):\n # return None if not true, or rule_status id if True (for reporting)?\n if not self.active:\n return\n\n hits = 0\n condition = None\n\n for condition in self.condition_set.all():\n result = condition.run_evaluate(instance)\n if result:\n hits += 1\n elif condition.join_condition == 'AND':\n return\n\n if hits == 0:\n return\n\n # what is the instances identity?\n identity = getattr(instance, self.instance_identifier)\n\n # check to see if action can be performed more than once\n if self.perform_action_once and self.rulestatus_set.filter(\n identity=identity,\n content_type=self.process_model).count() > 0:\n return\n\n # last condition is used to generate rule status\n rule_status, create = RuleStatus.objects.get_or_create(\n rule=self,\n condition=condition,\n content_type=self.process_model,\n object_id=instance.id,\n identity=identity\n )\n\n # Perform action - rule must have passed, and there must be an action\n # class if no action class, then obviously don't do anything, carry on\n # the ruleset sequence.\n if self.action_class:\n madule = self.action_class[:self.action_class.rindex(\".\")]\n klass = self.action_class[self.action_class.rindex(\".\") + 1:]\n action = str_to_class(madule, klass)\n\n # prepare kwargs\n parameters = self.parameter_names.split(\",\")\n values = self.parameter_values.split(\",\")\n kwargs = dict(zip(parameters, values))\n\n # add instance specific id\n kwargs[self.instance_identifier] = identity\n\n # hand over rule id, so that any rule field(s) can be used in\n # payment description\n kwargs['rule_id'] = self.id\n\n # perform action as rule passed, and no limit to\n performed_result = action.perform(**kwargs)\n ActionStatus.objects.get_or_create(\n action=self.action_class,\n rule_status=rule_status,\n performed=performed_result[:255] # avoid overflow\n )\n\n return rule_status.id", "def handle_packet(self, policy, port_thread, buf):\n for rule in self.rules:\n if rule.apply_rule(policy, port_thread, buf):\n return\n self.default_action(policy, None, port_thread, buf)", "def start_rule(self, context):\n pass", "async def rules(ctx):\r\n\trules = getServerStat(ctx.message.server, globals.serverList, \"Rules\")\r\n\tmsg = \"{} Rules:\\n{}\".format(ctx.message.server.name, rules)\r\n\tawait bot.send_message(ctx.message.channel, msg)", "async def rules(self, ctx: Context) -> None:\n rules_channel = ctx.guild.get_channel(Channels.rules)\n await ctx.send(f\"Please read the server rules at: {rules_channel.mention}\")", "def main():\n options = parser.parse_args()\n h = SonarAPIHandler(host=options.host, port=options.port,\n user=options.user, password=options.password,\n token=options.authtoken, base_path=options.basepath)\n\n # Determine output csv and html file names\n csv_fn = os.path.expanduser(os.path.join(options.output, 'rules.csv'))\n html_fn = os.path.expanduser(os.path.join(options.output, 'rules.html'))\n\n # Open csv and html files\n with open(csv_fn, 'w') as csv_f, open(html_fn, 'w') as html_f:\n # Init csv writer and write header\n csv_w = csv.writer(csv_f)\n csv_w.writerow(['language', 'key', 'name', 'debt', 'severity'])\n\n # Start html file\n html_f.write(u'<html><body>')\n\n # Get the rules generator\n rules = h.get_rules(options.active,\n options.profile,\n options.languages)\n\n # Counters (total, exported and failed)\n s, f = 0, 0\n\n # Now import and keep count\n try:\n for rule in rules:\n try:\n # Write CSV row\n csv_w.writerow([\n rule['langName'],\n rule['key'],\n rule['name'],\n # Note: debt can be in diff. fields depending on type\n rule.get('debtRemFnOffset',\n rule.get('debtRemFnCoeff', u'-')),\n rule['severity']\n ])\n\n # Render parameters sublist\n params_htmls = []\n if rule['params']:\n for param in rule['params']:\n params_htmls.append(u'<li>{}: {}</li>'.format(\n param.get('key', u'-'),\n param.get('defaultValue', u'-')\n ))\n else:\n params_htmls.append(u'-')\n\n # Build values to write in html\n values = (\n rule['key'], rule['name'], rule['langName'],\n rule['key'], rule['severity'],\n rule.get('debtRemFnOffset', rule.get('debtRemFnCoeff', u'-')),\n u''.join(params_htmls), rule.get('htmlDesc', u'-')\n )\n\n # Render html and write to file\n html = utf_encode(HTML_RULE_TEMPLATE.format(*values))\n html_f.write(html)\n s += 1\n\n except KeyError as exc:\n # Key error, should continue execution afterwards\n sys.stderr.write(\"Error: missing values for {}\\n\".format(','.join(exc.args)))\n f += 1\n\n # Done with rules, close html body and document\n html_f.write(u'</body></html>')\n\n except Exception as exc:\n # Other errors, stop execution immediately\n sys.stderr.write(\"Error: {}\\n\".format(exc))\n status = 'Incomplete'\n\n else:\n # No errors, complete\n status = 'Complete'\n\n # Finally, write results\n sys.stdout.write(\"{} rules export: {} exported and \"\n \"{} failed.\\n\".format(status, s, f))", "def process(self):\n\n self.process_classes(Steps.FLATTEN)\n self.filter_classes()\n self.process_classes(Steps.SANITIZE)\n self.process_classes(Steps.RESOLVE)\n self.process_classes(Steps.FINALIZE)\n self.designate_classes()", "def rules(self, create, extracted, **kwargs):\n if not create:\n return\n if extracted:\n for rule in extracted:\n self.rules.add(rule)", "def applyRules(rules, packet):\n acceptFlag = 'unspecified'\n\n for rule in rules:\n rule = rule.replace('-> ', '').split(' ')\n access, protocol, srcIP, dstIP, srcPort, dstPort = getHeaderValues(rule)\n\n # Handle any wildcards in the rule ports\n if srcPort == '*':\n srcPort = str(packet.srcPort)\n if dstPort == '*':\n dstPort = str(packet.dstPort)\n\n # If packet matches rule's protocol, IPs, and ports\n if (\n protocol == packet.protocol and\n validateIP(srcIP, packet.srcIP) and # Handle any wildcards in the rule's source IP\n validateIP(dstIP, packet.dstIP) and # Handle any wildcards in the rule's destination IP\n srcPort == str(packet.srcPort) and\n dstPort == str(packet.dstPort)\n ):\n if access == 'allow': # If packet matches and rule allows it, continue\n acceptFlag = True\n continue\n else: # If packet matches and rule denies it, break\n acceptFlag = False\n break\n\n return acceptFlag", "def do_analysis(self):\n for op in self._config_dict['operations']:\n self.logger.info('Calling %s for %s', op, self._config_key)\n func = getattr(analysis_ops, op)\n func(self, self._config_dict)", "def run(self, args, payload):\n if not args:\n raise ConfigurationException(\n 'Incomplete \"User\" module configuration'\n )\n\n name = self._get_name(payload)\n if name is None:\n return\n\n rules = args if isinstance(args, list) else [args]\n rules = self.replace_user(rules, payload)\n\n for rule in rules:\n if re.match(rule, name):\n return\n\n raise UnauthorizedException(\n 'User name verification failed: user name breaks the rules'\n )", "def process(self):\n\n self.search_matches()\n self.save_matches()\n self.make_wide()", "def rules(self, prob_function, args, labels = {}):\n\t\t# Create nodes for all positions between words\n\t\tnodes = [Node(i) for i in xrange(0, self.lengthS + 1)]\n\t\tspans = []\n\t\t\n\t\t# Construct the graph by creating the edges\n\t#\tprint 'finding spans'\n\t\tfor (i,j) in self.spans():\n\t\t\tnodes[i].link_to(nodes[j])\n\t\t\tspans.append((i,j))\n\t#\tprint 'finding rules'\n\t\tfor (i,j) in spans:\n\t\t\tfor path in nodes[i].paths_to(nodes[j]):\n\t\t\t\tif not path or len(path) == 2:\n\t\t\t\t\t# No rules possible, or path points to itself\n\t\t\t\t\tcontinue\n\t\t\t\t# set probability\n\t\t\t\trule = Rule((i, j), path, labels)\n\t\t\t\tprob = prob_function(rule,args)\n\t\t\t\tyield self.prune_production(rule, self.lex_dict)", "def invoke(self, *args, **kwargs):\n for handler in self.handlers:\n if handler is not None:\n handler(*args, **kwargs)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns FocusGame Player One object
def get_player_one(self): return self._player_one
[ "def player():\n\n name_id = 1\n return card_game.Player(name_id)", "def _get_ac_player(self):\n\t\treturn self.players[self.active_player]", "def get_player(self, name):\r\n return User(name)", "def getPlayer(self, p):\n log(\"MState getPlayer\",5)\n if type(p) == Player:\n return p\n elif type(p) == str:\n players = [player for player in self.players if player.id == p]\n if len(players) >= 1:\n return players[0]\n else:\n raise Exception(\"Couldn't find player from id {}\".format(p))\n else:\n raise Exception(\"Couldn't find player from {}\".format(p))", "def current_player(self):\n return self.player", "def player_1(self):\n return self.player_1_init or self.previous_match_1.winner()", "def get_active_player(self, player_name):\n player_one = self.get_player_one()\n player_two = self.get_player_two()\n\n if player_one.get_player_name() == player_name:\n return player_one\n if player_two.get_player_name() == player_name:\n return player_two\n else:\n return 'player not found'", "def get_player(self, name):\n\n try:\n name = name.name\n except AttributeError: pass\n\n for i in self.players:\n if i.name == name:\n return i", "def player(self):\n return axl.Cycler(self.get_sequence_str())", "def get_player(self, name):\n\t\t\n\t\tname = \"\".join(ch.lower() for ch in name if ch not in set(string.punctuation)).capitalize()\n\t\titem = self.db.get(name)\n\t\t\n\t\tif item.value:\n\t\t\titem.value = data.Object(item.value)\n\t\telse:\n\t\t\tplayer = data.Object()\n\t\t\t\n\t\t\tplayer.name = name\n\t\t\tplayer.title = \"\"\n\t\t\tplayer.full_name = name\n\t\t\tplayer.karma = 0\n\t\t\tplayer.alignment = ALIGNMENT_NEUTRAL\n\t\t\tplayer.unaligned_name = random.choice(UNALIGNED_NAMES)\n\t\t\tplayer.damage = random.choice(DAMAGE_TYPES)\n\t\t\tplayer.next_karma = 0\n\t\t\tplayer.next_fight = 0\n\t\t\tplayer.wins = 0\n\t\t\tplayer.losses = 0\n\t\t\tplayer.ties = 0\n\t\t\t\n\t\t\titem.value = player\n\t\t\titem.commit()\n\t\t\n\t\treturn item", "def __get_opponent_player(self):\n return {'X': helper.PLAYERO, 'O': helper.PLAYERX}[self.player]", "def getplayer(title, logs=[]):\n match = consts.player_re.search(title)\n if not match:\n logs.append(\"Player: No regex match\")\n return None\n name = strip_annots(match.group(1))\n\n players = safe_call(consts.osu_api.get_user, name)\n if players:\n return players[0]\n logs.append(\"Player: '%s' not found\" % name)\n return None", "def get_next_player(self) -> None:\n if not any([player.total_score >= 10000 for player in self.base.list_o_players]):\n temp = self.base.list_o_players.popleft()\n self.base.current_player = temp\n if self.base.current_player.first_turn and not self.base.current_player.comp_player:\n self.open_first_popup(self.base.current_player)\n self.base.list_o_players.append(temp)\n if self.base.current_player.comp_player:\n Clock.schedule_once(self.base.buttons.roll.on_release, 1.)\n\n else:\n self.base.list_o_winners.append(self.base.current_player)\n self.base.current_player = self.base.list_o_players.popleft()\n if self.base.list_o_players and self.base.current_player.comp_player:\n Clock.schedule_once(self.base.buttons.roll.on_release, 1.)\n if self.base.list_o_players and not self.base.current_player.comp_player:\n self.open_last_popup()\n\n if not self.base.list_o_players:\n self.find_winner()", "def get_blue_player(self):\n return self._blue_player", "def get_current_player(player_one_turn: bool) -> str:\r\n\r\n # Complete this function.\r\n if player_one_turn == True:\r\n return P1\r\n else:\r\n return P2", "def getPlayer(self, number):\n try:\n return self.players[number]\n except KeyError:\n return None", "def get_player(self, login):\n for player in self['players']:\n if player['login'] == login:\n return player\n raise ValueError('player with login %s not found' % login)", "def getPlayerForId(self, id):\n for player in self.players:\n if player.id == id:\n return player\n return None", "def get_player_two(self):\n return self._player_two" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns FocusGame Player Two object
def get_player_two(self): return self._player_two
[ "def player_2(self):\n return self.player_2_init or self.previous_match_2.winner()", "def initialize_second_player(self, option):\n if option is BOT:\n self.players[\"1\"] = BotPlayer(self.player_marks[\"1\"])\n self.players[\"1\"].set_player_info(\"Bot Player\")\n else:\n self.players[\"1\"] = HumanPlayer(self.player_marks[\"1\"])\n self.players[\"1\"].set_player_info(\"Second Player\")", "def other_player(self, player: Player) -> Union[Player, None]:\n if player.player_id == self.player1.player_id:\n return self.player2\n if player.player_id == self.player2.player_id:\n return self.player1", "def get_other_player_name(self) :\n return self.players[1]", "def get_blue_player(self):\n return self._blue_player", "def get_player_one(self):\n return self._player_one", "def __get_opponent_player(self):\n return {'X': helper.PLAYERO, 'O': helper.PLAYERX}[self.player]", "def getOpponent(self):\n if self.Num == 1:\n self.opponent = self.game.player2\n return self.game.player2\n elif self.Num == 2:\n self.opponent = self.game.player1\n return self.game.player1", "def __init__(self, player1, player2):\n self.board = GlobalBoard()\n self.player1 = player1\n self.player2 = player2\n self.active_player = player1 # the player who moves next is the active player. Player 1 always goes first\n self.bot_game = isinstance(player1, Bot) and isinstance(player2, Bot)\n self.moves = []\n if isinstance(player1, Bot):\n self.player1.setup_bot(self)\n\n if isinstance(player2, Bot):\n self.player2.setup_bot(self)", "def start_game(self, player1, player2):\r\n\t\tgame = NimGame(player1, player2)\r\n\t\tself.games[game.id] = player1.game = player2.game = game\r\n\t\treturn game", "def get_active_player(self, player_name):\n player_one = self.get_player_one()\n player_two = self.get_player_two()\n\n if player_one.get_player_name() == player_name:\n return player_one\n if player_two.get_player_name() == player_name:\n return player_two\n else:\n return 'player not found'", "def get_player(self, name):\r\n return User(name)", "def other_player(self, player):\n assert self.nbr_players == 2\n\n return 0 if player == 1 else 1", "def player():\n\n name_id = 1\n return card_game.Player(name_id)", "def get_next_match(self):\n if not self._not_played_matches:\n return\n\n p1, p2 = self._not_played_matches.pop(0)\n p1 = self._players[p1]\n p2 = self._players[p2]\n p1.set_classification(Classification.WHITE)\n p2.set_classification(Classification.BLACK)\n self._current_match = (p1, p2)\n return self._current_match", "def _get_ac_player(self):\n\t\treturn self.players[self.active_player]", "def getPlayers(self):\n n = 0\n while n < 2:\n playerType = input(\"Player\"+str(n+1)+\": Play as Human by inserting 'H', insert ' ' to play as Computer:\")\n if \"H\" in playerType.upper():\n player = Human(self, n+1)\n self.setPlayer(player, n+1)\n else:\n player = Computer(self, n+1)\n self.setPlayer(player, n+1)\n n += 1\n if self.player1.isComputer():\n self.player1.getOpponent()", "def get_winner(self) -> Union[Player, None]:\n\n if self._board.get_token(0, self.size // 2) == self.player2.player_id:\n return self.player2\n \n if self._board.get_token(self.size - 1, self.size // 2) == self.player1.player_id:\n return self.player1\n\n if self.player2.player_id not in str(self._board):\n return self.player1\n\n if self.player1.player_id not in str(self._board):\n return self.player2\n \n return Pieces.EMPTY", "def get_other_active_player_id(self):\n for proc in self.state.stack.items:\n if isinstance(proc, Block):\n if proc.defender is not None:\n return proc.defender.player_id\n if isinstance(proc, PassAction):\n if proc.catcher is not None:\n return proc.catcher.player_id\n if isinstance(proc, Handoff):\n if proc.catcher is not None:\n return proc.catcher.player_id\n if isinstance(proc, Push):\n if proc.catcher is not None:\n return proc.player.player_id\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a parameter of a player name, sets the FocusGame turn to that player
def set_turn(self, player_name): self._turn = player_name
[ "def set_player_name(name):\n\n player[\"player_name\"] = name", "def change_player():\n\n global current_player\n if current_player == 'X':\n current_player = 'O'\n elif current_player == 'O':\n current_player = 'X'\n return", "def change_name(self, name):\n self._player_name = name", "def set_player_name(self, player):\r\n self.__name = player", "def test_set_player_name(self):\n self.game.set_player(1, \"Wille\")\n p_1 = self.game.player1\n exp = \"Lucas\"\n self.game.set_player_name(\"Lucas\", p_1)\n self.assertEqual(exp, p_1.name)", "def set_player_turn(self):\n if self.get_player_turn() == 'BLACK':\n self._player_turn = 'WHITE'\n else:\n self._player_turn = 'BLACK'", "def __switch_player(self):\n if self.current_player == self.players[0]:\n self.current_player = self.players[1]\n else:\n self.current_player = self.players[0]", "def change_turn(self, player):\r\n if player == self._first_player:\r\n self._turn = self._second_player\r\n else:\r\n self._turn = self._first_player", "def setPlayer(self, player, n):\n if n == 1:\n self.player1 = player\n elif n == 2:\n self.player2 = player", "def switch_player(self):\n self.current_player_idx = (self.current_player_idx + 1) % self.nbr_players", "def start_game(self) -> None:\n username = self.username_value.get()\n number_of_opponents = self._number_players.get()\n player_character = self._player_character.get()\n self.start_game = Game(username, number_of_opponents, player_character)", "def __playHumanTurn__(self, choice):\n self.__inputChoice__(choice)", "def changePlayer(self, whichPlayer):\n return 2 if whichPlayer == 1 else 1", "def switchPlay(playTurn):\n\n if(playTurn == 'o'):\n playTurn = 'x'\n elif(playTurn == 'x'):\n playTurn = 'o'\n else:\n print('ERROR')\n return playTurn", "def set_key_value_by_player_name(match, key):\r\n entity = match.group(1)\r\n value = match.group(2)\r\n players[entity][key] = value", "def change_turn(self):\n if self._current_player == self._red_player:\n self._current_player = self._blk_player\n else:\n self._current_player = self._red_player\n\n if self._opp_player == self._red_player:\n self._opp_player = self._blk_player\n else:\n self._opp_player = self._red_player", "def next_turn(self):\n temp = self.current_player\n self.current_player = self.opponent\n self.opponent = temp", "async def setgame(ctx, *, game):\n if ctx.message.author.id == (ownerid):\n message = ctx.message\n await client.whisper(\"Game was set to **{}**!\".format(game))\n await client.change_presence(game=discord.Game(name=game))", "def set_next_players_turn(self):\n self.current_player_id = next(self.turn_cycle)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Take a parameter of a player_name and sets the winner data field of FocusGame to the inputted player
def set_winner(self, player_name): self._winner = player_name
[ "def set_player_name(name):\n\n player[\"player_name\"] = name", "def test_set_player_name(self):\n self.game.set_player(1, \"Wille\")\n p_1 = self.game.player1\n exp = \"Lucas\"\n self.game.set_player_name(\"Lucas\", p_1)\n self.assertEqual(exp, p_1.name)", "def set_player_name(self, player):\r\n self.__name = player", "def update_game_new_player():\n name = request.json.get(\"name\")\n game_finder = get_game_finder()\n game_id = game_finder.join_game(name)\n if game_id is None:\n status_code = status.HTTP_404_NOT_FOUND\n else:\n status_code = status.HTTP_200_OK\n return {\"game_id\": game_id}, status_code, RESPONSE_HEADERS", "def setPlayer(self, player, n):\n if n == 1:\n self.player1 = player\n elif n == 2:\n self.player2 = player", "def set_key_value_by_player_name(match, key):\r\n entity = match.group(1)\r\n value = match.group(2)\r\n players[entity][key] = value", "def change_name(self, name):\n self._player_name = name", "def change_player():\n\n global current_player\n if current_player == 'X':\n current_player = 'O'\n elif current_player == 'O':\n current_player = 'X'\n return", "def store_name_match(self, match_id, name, account):\n self.c.execute('SELECT * FROM player WHERE (name = ?) AND (matchid = ?)', (name, match_id))\n results = self.c.fetchone()\n if results is None:\n self.c.execute('INSERT INTO player (name, matchid, account) VALUES (?,?,?)', (name, match_id, account))\n self.conn.commit()\n logging.log(logging.INFO, 'Sotring a name and macth id in player table: %s and %s', name, match_id)", "def operator_player_name(self, operator_player_name):\n\n self._operator_player_name = operator_player_name", "def clarify_player(matches):\n question = [\n {\n 'type': 'list',\n 'name': 'exact_player',\n 'message': 'There are {} players with that name. Please chose one:'.format(len(matches)),\n 'choices': []\n }\n ]\n for match in matches:\n # Populate choices with specific player characteristics when a name is associated with multiple players\n player = commonplayerinfo.CommonPlayerInfo(player_id=match['id']).get_data_frames()[0]\n birthdate = datetime.strptime(player.BIRTHDATE.values[0], '%Y-%m-%dT%H:%M:%S').strftime(\"%B %-d, %Y\")\n question[0]['choices'].append({\n 'name': \"{} {} - {} - Born {}\".format(player.FIRST_NAME.values[0], player.LAST_NAME.values[0],\n player.POSITION.values[0], birthdate),\n 'value': match['id']\n })\n\n answer = prompt(question, style=style)\n return answer", "def ask_for_player():\n question = [\n {\n 'type': 'input',\n 'name': 'player_name',\n 'message': 'Enter the player\\'s name',\n 'validate': PlayerValidator,\n }\n ]\n answer = prompt(question, style=style)\n return answer", "def game(self, choice):\n\n global PC_wins\n global player_wins\n global player_name\n\n game_choice = random.randint(1, 3)\n if game_choice == 1:\n game_choice = \"Rock\"\n elif game_choice == 2:\n game_choice = \"Paper\"\n else:\n game_choice = \"Scissors\"\n\n if game_choice == choice:\n winner = \"Draw\"\n elif game_choice == \"Rock\" and choice == \"Paper\" or game_choice == \"Paper\" and choice == \"Scissors\" or \\\n game_choice == \"Scissors\" and choice == \"Rock\":\n winner = player_name.get()\n player_wins += 1\n else:\n winner = \"PC\"\n PC_wins += 1\n\n self.user_choice.delete(1.0, \"end\")\n self.user_choice.insert(1.0, choice)\n self.pc_choice.delete(1.0, \"end\")\n self.pc_choice.insert(1.0, game_choice)\n\n if winner == \"Draw\":\n self.winner_label.configure(text=winner)\n else:\n self.winner_label.configure(text=f\"{winner} win!\")\n total_text.set(f\"PC - {PC_wins}:{player_wins} - {player_name.get()}\")", "def player_selection(self):\n valid_cols = [\"A\", \"B\", \"C\"]\n valid_rows = [\"0\", \"1\", \"2\"]\n\n print(f\"Player {self.players_turn}, it is your turn to pick\")\n valid = False\n while not valid:\n selection = input(\"Selection: \").upper()\n selection = selection.replace(\" \", \"\")\n if (\n len(selection) == 2\n and selection[0] in valid_cols\n and selection[1] in valid_rows\n ):\n valid = True\n else:\n print(\"Invalid Selection, try again\")\n\n col = valid_cols.index(selection[0])\n row = int(selection[1])\n\n self.board[row][col] = self.players_turn\n if self.players_turn == \"X\":\n self.players_turn = \"O\"\n else:\n self.players_turn = \"X\"", "def test_update_player(self):\n the_game = game.Game()\n the_game.create_player('Test')\n the_game.update_player('Test', 'new name')\n the_player = the_game.players['new name']\n self.assertIsInstance(the_player, player.Player)", "def __init_player(self, name, client_id):\n self.current_input_number += 1\n\n # Construct init player message\n message = messagepb.ClientGameMessage()\n\n init_player = messagepb.InitPlayer()\n init_player.name = name\n init_player.client_id = client_id\n message.input_sequence_number = self.current_input_number\n message.init_player_payload.CopyFrom(init_player)\n\n self.ws.send(message.SerializeToString())", "def changePlayer(self, whichPlayer):\n return 2 if whichPlayer == 1 else 1", "def next_play(board, selection, active_player):", "def set_turn(self, player_name):\n self._turn = player_name", "def set_player_names(num_players):\n markers = [\"X\", \"O\"]\n players_list = []\n for n in range(num_players):\n new_player = Player(input(f\"Player {markers[n]}, what is your name? \"), markers[n])\n players_list.append(new_player)\n if num_players == 1:\n computer_player = Player(\"Computer\", markers[1])\n players_list.append(computer_player)\n return players_list" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method for the movement of pieces. Takes a player_name, start location, destination location, and the number of pieces desired to move as parameters. move_piece will check the validity of the move and then make the appropriate move depending on whether it is a single move or multiple move. Will update the board, and captured and reserved pieces and finally check win conditions. This will all lead in to updating the next turn for the FocusGame and returning the appropriate message
def move_piece(self, player_name, start, destination, num_of_pieces): if self.get_winner() is not None: return self.get_winner() + ' has already won!' turn = self.get_turn() player = self.get_active_player(player_name) move_size = self.move_size(start, destination) valid_move = self.check_valid_move(player, start, destination) if turn != player.get_player_name() and turn is not None: return 'not your turn' if valid_move is False: return 'invalid location' if num_of_pieces != move_size: return 'invalid number of pieces' movement_range = self.get_range(start) if num_of_pieces > movement_range: return 'invalid number of pieces' pieces_to_move = self.get_pieces_to_move(start, num_of_pieces) board = self.update_board_location(start, destination, pieces_to_move) board = self.process_stack(board, destination, player) if self.get_winner() is not None: return 'wins' else: self.set_board(board) self.get_next_player(player) return 'successfully moved'
[ "def move_piece(self, player_name, from_position, to_position, pieces_moved):\n if self._whose_turn is None:\n self._whose_turn = player_name\n\n # general validation\n validation_result = self.general_move_validation(player_name, to_position)\n if validation_result is not True: # validation_result is string if any test failed\n return validation_result\n\n # enforce valid from_position; from_position is within bounds\n if not self.is_in_board(from_position):\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid from_position; from_position is not empty\n if len(self.show_pieces(from_position)) < 1:\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid from_position; player controls top of stack at from_position\n if self.show_pieces(from_position)[-1] != self._players[player_name]['color']:\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid to_position; to_position is within legal range\n if not self.position_is_in_stack_range(from_position, to_position):\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid number of pieces moved\n if pieces_moved > len(self.show_pieces(from_position)):\n return self._ERROR_MESSAGES['invalid_number_of_pieces']\n\n # move is valid--process the move by removing pieces from from_position and placing atop to_position\n removed_pieces = self.remove_pieces_from_stack(from_position, 'top', pieces_moved)\n self.place_atop_safely(to_position, removed_pieces)\n\n return self.process_post_move()", "def move_piece(self, player, select, move, number_pieces):\r\n if self.check(player, select, move, number_pieces):\r\n player_profile = self.which_player(player)\r\n piece_select = self._board[select[0]][select[1]]\r\n bottom_place = len(piece_select) - number_pieces\r\n for num in range(number_pieces):\r\n bottom_piece = piece_select[bottom_place]\r\n self._board[select[0]][select[1]].pop(bottom_place)\r\n self._board[move[0]][move[1]].append(bottom_piece)\r\n if len(self._board[move[0]][move[1]]) > 5:\r\n self.overflow(player, move)\r\n if player_profile.get_capture() == 6:\r\n return player_profile.get_name() + \" Wins\"\r\n return \"successfully moved\"\r\n\r\n else:\r\n return False", "def play_piece(self, piece, piece_moves):\n start_file, start_rank = piece.file_pos, piece.rank_pos\n coord_str = \"\"\n select_move_dict = {}\n key_num = 1\n for move_vector in piece_moves:\n move_notation_str = self.board.move_notation(piece, move_vector)\n coord_str += (str(key_num) + \". \" + move_notation_str + \" | \") \n select_move_dict.update({key_num: move_vector})\n key_num += 1\n while True:\n try:\n print(\"0. Go back. | \" + coord_str)\n input_num = int(input(\"Enter the move you want to make: \"))\n if input_num == 0:\n raise ReturnException(\"go back\")\n if input_num >= key_num or input_num < 0:\n raise ValueError\n break\n except ValueError:\n print(f\"Invalid input. Please enter a number from 1 through {key_num-1}.\")\n while True:\n try:\n break_num = int(input(\"Enter 1 to confirm your move. 0 to go back: \"))\n if break_num == 1:\n break\n elif break_num == 0:\n raise ReturnException\n else:\n print(\"Invalid input.\")\n except ValueError:\n print(\"Please enter a number.\")\n\n move_vector = select_move_dict.get(input_num)\n direction, step = move_vector[0], move_vector[1]\n self.board.move_piece(start_file, start_rank, direction, step)", "def move_piece(x, y, new_x, new_y, x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION):\n\n # check whether the destination is the same for both\n\n if (new_x == new_x2 and new_y == new_y2):\n print(\"Both pieces going to the same location\")\n piece_type1 = get_piece(board, y, x)\n piece_type2 = get_piece(board, y2, x2)\n if (piece_type1 == \"p\" and piece_type2 == \"P\"):\n # both pawns, delete both\n print(\"Both are pawns, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"k\" and piece_type2 == \"K\"):\n print(\"Both are knights, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"p\" and piece_type2 == \"K\"):\n\n board = delete_piece(x, y, board, board_turtles)\n # execute move for AI\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n elif (piece_type1 == \"k\" and piece_type2 == \"P\"):\n board = delete_piece(x2, y2, board, board_turtles)\n # execute move for AI\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n print(\"Executing moves normally\")\n if (x != -1):\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n if (x2 != -1):\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n\n return board", "def reproduce_game(starting_position, turn, move_list, move_counter, players, tournament, result, solving=False):\n global pieces\n \n first_turn = turn\n pieces = deepcopy(starting_position)\n other = \"b\" if turn == \"w\" else \"w\"\n chess_notation = [[\"White\"], [\"Black\"]]\n \n for i, move in enumerate(move_list):\n color = 0 if turn == \"w\" else 1\n if i < move_counter:\n if move[:5] == \"0-0-0\":\n if turn == \"w\":\n seek_piece(\"e1\", pieces).set_position(\"c1\")\n seek_piece(\"a1\", pieces).set_position(\"d1\")\n elif turn == \"b\":\n seek_piece(\"e8\", pieces).set_position(\"c8\")\n seek_piece(\"a8\", pieces).set_position(\"d8\")\n chess_notation[color].append(move)\n \n elif move[:3] == \"0-0\":\n if turn == \"w\":\n seek_piece(\"e1\", pieces).set_position(\"g1\")\n seek_piece(\"h1\", pieces).set_position(\"f1\")\n elif turn == \"b\":\n seek_piece(\"e8\", pieces).set_position(\"g8\")\n seek_piece(\"h8\", pieces).set_position(\"f8\")\n chess_notation[color].append(move)\n \n else:\n m_piece = move[0].upper()\n m_init = move[1:3].lower()\n m_ends = move[4:6].lower()\n\n piece = seek_piece(m_init, pieces)\n if seek_piece(m_ends, pieces):\n pieces.remove(seek_piece(m_ends, pieces))\n piece.set_position(m_ends)\n\n if m_piece == \"P\" and m_ends[-1] in \"1/8\":\n create(move[7], piece.color, piece.position, pieces)\n pieces.remove(piece)\n \n chess_notation[color].append(move)\n turn, other = other, turn\n else:\n break\n \n result = result if (result and move_counter == len(move_list)) else None\n\n if first_turn == \"b\":\n chess_notation[0].insert(1, \"...\")\n \n screen_reset()\n print_solve_problem_header() if solving else print_analyze_game_header()\n if solving:\n print_solve_problem_playing(pieces, chess_notation, players, tournament, result)\n else: \n print_analyze_game_playing(pieces, chess_notation, players, tournament, result)", "def movePiece(self,pos, dest):\r\n\r\n #set passant every turn to check if en passant is possible\r\n if(isinstance(self.board[pos[0]][pos[1]],piece.Pawn)):\r\n if(self.board[pos[0]][pos[1]].enPassant == True):\r\n self.passant = True\r\n else:\r\n self.passant = False\r\n\r\n #check if castle move\r\n if(not self.checkPossible(pos,dest)):\r\n if(pos == self.kingPos[0]):\r\n if(dest == (1,0)):\r\n self.board[2][0] = self.board[0][0]\r\n self.board[2][0].position = (2,0)\r\n self.board[0][0] = 0\r\n elif(dest == (6,0)):\r\n self.board[5][0] = self.board[7][0]\r\n self.board[5][0].position = (5,0)\r\n self.board[7][0] = 0\r\n elif(pos == self.kingPos[1]):\r\n if(dest == (1,7)):\r\n self.board[2][7] = self.board[0][7]\r\n self.board[2][7].position = (2,7)\r\n self.board[0][7] = 0\r\n elif(dest ==(6,7)):\r\n print(\"Final Step\")\r\n self.board[5][7] = self.board[7][7]\r\n self.board[5][7].position = (5,7)\r\n self.board[7][7] = 0\r\n\r\n # move piece(normally)\r\n self.board[dest[0]][dest[1]] = self.board[pos[0]][pos[1]]\r\n self.board[pos[0]][pos[1]] = 0\r\n self.board[dest[0]][dest[1]].position = (dest[0], dest[1])", "def execute_move(self, move: Tuple[int, int, Piece], player: int):\n\n (x, y, p) = move\n\n # Placing in empty square\n assert self[x][y] == 0\n # Piece placed is not already used\n assert p not in self.used_pieces\n # Not placing in middle cross\n assert x != self.mid\n assert y != self.mid\n\n # print(f\"Placing {(self.selected_piece & 0b1111):04b} at {x},{y}\")\n self[x][y] = int(self.selected_piece) # +(1<<self.n)\n\n self.selected_piece = p\n # print(f\"Selecting {(self.selected_piece & 0b1111):04b} for opponent\\n\")", "def doMove(self, board, dice):\n rc = RuleChecker(board, dice, self.color)\n moves = []\n sorted_pawns = self.order_pawns(rc.b_final)\n while not Player.stop_move_generation(self, rc):\n sorted_pawns = self.order_pawns(rc.b_final)\n new_move = None\n for pawn in sorted_pawns:\n #print(\"MoveFirstPawn::doMove: pawn loop - pawn id: \"+str(pawn.id))\n pawn_space = rc.b_final.spacemap[pawn.location]\n if isinstance(pawn_space, FinalSpace):\n continue\n elif isinstance(pawn_space, HomeSpace):\n #print(\"MoveFirstPawn::doMove: mkaing a HomeMove in doMove\")\n for die in rc.tvals.get_all_dice():\n new_move = MoveHome(pawn, pawn.location, die)\n valid, rc = Player.check_next_move(self, rc, new_move)\n if valid:\n moves.append(new_move)\n break\n else:\n new_move = None\n elif isinstance(pawn_space, StartSpace):\n #print(\"MoveFirstPawn::doMove: making an EnterPiece in doMove\")\n new_move = EnterPiece(pawn)\n valid, rc = Player.check_next_move(self, rc, new_move)\n if valid:\n #print(\"MoveFirstPawn::doMove: EnterPiece was valid using pawn \"+str(pawn.id))\n moves.append(new_move)\n #print(\"MoveFirstPawn::doMove: length of moves: \"+str(len(moves)))\n else:\n new_move = None\n else: #Regular Space\n #print(\"MoveFirstPawn::doMove: making a MoveMain in doMove\")\n for die in rc.tvals.get_all_dice():\n new_move = MoveMain(pawn, pawn.location, die)\n valid, rc = Player.check_next_move(self, rc, new_move)\n #print(\"MoveFirstPawn::doMove: \"+str(rc.tvals.bonus))\n if valid:\n moves.append(new_move)\n break\n else:\n new_move = None\n if new_move != None:\n print(\"MoveFirstPawn::doMove: breaking from pawn loop\")\n break\n return moves", "def makeMove(move):\r\n try:\r\n # Place the player in a new position\r\n GameBoard.theBoard[PlayerClass.char.position + move] = PlayerClass.char.name\r\n # Reset the current position to empty\r\n GameBoard.theBoard[PlayerClass.char.position] = \" \"\r\n # Update player position to the new position\r\n PlayerClass.char.position = PlayerClass.char.position + move\r\n except IndexError:\r\n print(\"Out of bounds.\")\r\n\r\n checkEncounters()\r\n GameBoard.draw_board(GameBoard.theBoard)\r\n gameAction()", "def can_move_piece(self, position, new_position, ignore_turn = False):\n\n can_move_from_result = self.can_move_piece_from(position, ignore_turn)\n if (can_move_from_result != Game.CanMoveResults.Ok):\n return can_move_from_result\n if (new_position < 0 or new_position > Board.position_count):\n return Game.CanMoveResults.OutsideBoard\n if (position == new_position):\n return Game.CanMoveResults.SamePosition\n if (self.board[new_position] != Piece.Empty):\n return Game.CanMoveResults.NewPositionOccupied\n if (self.check_if_mill_is_ok(self.board[position], new_position) == False):\n return Game.CanMoveResults.OldMillAtPosition\n\n \n\n\n moved_piece = self.board[position]\n total_on_board = self.board.pieces_of_type_on_board(moved_piece)\n # If you have three pieces left you're allowed to fly so the adjacent rule doesn't apply\n if (total_on_board > 3):\n if (self.board.positions_are_adjacent(position, new_position) == False):\n return Game.CanMoveResults.NotAdjacent\n\n return Game.CanMoveResults.Ok", "def move_piece(self, from_pos, to_pos):\n moving_piece_id = self.get_occupation(from_pos)\n captured_piece_id = self.get_occupation(to_pos) # None if no capture\n\n # if moving piece is a general update its position in the\n # general_position dictionary\n if moving_piece_id[1:3] == 'ge':\n if moving_piece_id[0] == 'r':\n color = 'red'\n else:\n color = 'blue'\n self.set_general_position(color, to_pos)\n\n # update the board\n self.clear_position(from_pos)\n self.set_occupation(moving_piece_id, to_pos)\n\n return captured_piece_id", "def check_move(self):\n\n if self.DEBUG_PRINT_FUNCTIONS:\n pass;\n print \"check_move\"\n\n if len(self.square) != 1 or self.piece == None:\n if self.DEBUG:\n print \"missing piece or square!\"\n return 5\n sqr_cords = self.c.coords(self.square) # square coords\n sqr_cntr = apply(self.find_center, sqr_cords) # square center\n pce_cntr = apply(self.find_center, self.c.coords(self.piece)) # piece center\n vtr = (sqr_cntr[0] - pce_cntr[0], sqr_cntr[1] - pce_cntr[1]) # piece vector(distence and direction)\n if self.DEBUG:\n pass; # print sqr_cords, sqr_cntr, pce_cntr, vtr\n\n if self.jumps[0]: # jump checker\n # if move has not been found by check_for_jumps then fail\n # else, ingore all the other checks, and succeed\n if self.jumps[0].count((self.piece, vtr)) != 1:\n self.show_message(\"You have a jump!\", .8)\n return 5\n else:\n self.jump_made = self.jumps[0].index((self.piece, vtr))\n if self.DEBUG:\n print \"jump_made: \", self.jump_made\n return 0\n\n # movement direction checker\n if self.c.itemcget(self.piece, \"outline\") != \"gold2\":\n if self.moving == \"black\":\n if vtr[1] > 0:\n if self.DEBUG:\n print \"wrong way, black!\"\n return 3\n else:\n if vtr[1] < 0:\n if self.DEBUG:\n print \"wrong way, red!\"\n return 3\n\n # distence checker\n if abs(vtr[0]) != self.SQUARESIZE or abs(vtr[1]) != self.SQUARESIZE:\n if self.DEBUG:\n print \"Too far!\"\n return 4\n\n # square emptiness checker\n if self.c.type(self.c.find_overlapping(sqr_cords[0] + (self.SQUARESIZE / 2), \\\n sqr_cords[1] + (self.SQUARESIZE / 2), \\\n sqr_cords[2] - (self.SQUARESIZE / 2), \\\n sqr_cords[3] - (self.SQUARESIZE / 2))) != \"rectangle\":\n if self.DEBUG:\n print \"not empty: \", self.c.find_overlapping(sqr_cords[0] + (self.SQUARESIZE / 2), \\\n sqr_cords[1] + (self.SQUARESIZE / 2), \\\n sqr_cords[2] - (self.SQUARESIZE / 2), \\\n sqr_cords[3] - (self.SQUARESIZE / 2))\n return 2\n\n return 0", "def move_piece(board, x, y, new_x, new_y):\n if not isfree(board, x, y):\n player = get_piece(board, x, y)\n remove_piece(board, x, y)\n place_piece(board, new_x, new_y, player)\n return True\n return False", "def move(self, action):\n tile_type, from_pile, to_stack, nbr_to_move = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n #elif pile < 0 or pile >= len(self.piles):\n # raise Exception(\"Invalid pile\")\n #elif count < 1 or count > self.piles[pile]:\n # raise Exception(\"Invalid number of objects\")\n\n # get the tiles from the factory\n nbr_tiles, penalty = self.factory.remove_tiles_from_pile(from_pile, tile_type)\n\n if to_stack == penalty_stack_row_idx:\n # these tiles are going straight to penalty\n self.players[self.current_player_idx].add_tiles_to_penalty(nbr_tiles, tile_type)\n else:\n # put the tiles on the floor\n self.players[self.current_player_idx].move_tiles_to_row(nbr_tiles, tile_type, to_stack)\n\n if penalty == 1:\n self.players[self.current_player_idx].add_penalty_tile_to_penalty_stack()\n\n # check if the round is over\n if self.factory.get_tile_count_in_piles() == 0:\n # score this round and setup the next round \n # if the game is over, determine the winner\n if self.process_end_round():\n self.set_winner()\n # the end of round method also sets the next player\n else:\n # check if the player just did something which will end the game soon\n if not self.is_last_round:\n self.is_last_round = self.players[self.current_player_idx].has_a_completed_row()\n # pass the baton to the next player\n self.switch_player()\n\n \n\n # Update pile\n #self.piles[pile] -= count\n #self.switch_player()\n\n # Check for a winner\n #if all(pile == 0 for pile in self.piles):\n # self.winner = self.player", "def move(self, start, end):\n piece = self.get_piece_at(*start)\n opposing_piece = self.get_piece_at(*end)\n \n if opposing_piece != None:\n opposing_piece.is_alive = False\n opposing_piece.x = None\n opposing_piece.y = None\n \n if str(piece) == 'Pawn':\n self.promote(piece, end[1])\n piece = self.get_piece_at(*start)\n \n piece.x = end[0]\n piece.y = end[1]\n self.board[start[1]-1][start[0]-1] = None\n self.board[end[1]-1][end[0]-1] = piece", "def possible_moves(self, actual_position:list, board):\n a_r,a_c = actual_position #piece actual row and column indexes\n board_pos_mov = [] #array containing possible moves of piece within board\n pos_mov = [] #array containing possible moves of piece within board and analyzing allies and enemies\n\n # Getting possible moves from moves_* methods.\n if self.type in ['king','knight']:\n board_pos_mov = self.move_king_knight(actual_position)\n elif self.type == 'pawn':\n board_pos_mov = self.move_pawn(actual_position)\n # elif self.type == 'knight':\n # board_pos_mov = self.move_knight(actual_position)\n elif self.type in ['queen','rook','bishop']:\n board_pos_mov = self.move_queen_rook_bishop(actual_position)\n\n # Analyzing possible moves with friends and foes for pawn\n if self.type == 'pawn':\n for row,column in board_pos_mov:\n\n # Checking for moves in horizontal direction\n if row == a_r:\n if board[row][column] == None: #Checking for empty square\n pos_mov.append([row,column])\n else:\n pass # if there is a piece in the square the pawn cannot be moved\n\n # Checking for moves in diagonal (attacks)\n else:\n if board[row][column] != None: #Checking if there is a piece in the square\n if board[row][column].team != self.team: # Checking wether the piece is ally or enemy\n pos_mov.append([row,column]) # Append if it is enemy\n else:\n pass # If the piece is an ally the piece cannot be moved\n\n # Analyzing possible moves with friends and foes for king and knight\n if self.type in ['king','knight']:\n for row,column in board_pos_mov:\n if board[row][column] == None: #Checking for empty square\n pos_mov.append([row,column])\n elif board[row][column].team != self.team: #Checking for ally or enemy\n pos_mov.append([row,column])\n else:\n pass\n # Analyzing possible moves with friends and foes for queen, rook and bishop\n elif self.type in ['queen','rook','bishop']:\n for direction in board_pos_mov: # testing for every movement direction\n for row,column in direction:\n if type(board[row][column]) == type(self): # checking if there is a piece in the square\n if board[row][column].team != self.team: # checking piece's team\n pos_mov.append([row,column])\n break # Movement stops because there is a enemy\n else:\n break # if the piece in the square is an ally the analyzed piece cannot move further in that direction\n\n else:\n pos_mov.append([row,column]) # If the square is empty (None) the piece can move in that direction\n\n return pos_mov", "def update_pieces(self):\n pieces = self.get_pieces().values() # pieces is now a list of objects\n allowed_destinations = set()\n allowed_palace_destinations = set()\n pieces_checking = []\n\n color = self.get_color()\n board = self.get_board()\n if color == 'red':\n opposing_general_position = board.get_general_position('blue')\n else:\n opposing_general_position = board.get_general_position('red')\n\n for piece in pieces:\n piece.update_hyp_moves()\n piece.update_allowed_moves()\n piece.update_allowed_palace_destinations()\n\n # add the piece's destinations to the player's sets\n allowed_destinations |= set(piece.get_allowed_moves())\n allowed_palace_destinations |= piece.get_allowed_palace_destinations()\n\n # if the piece has the opposing general in check add it to the\n # Player's pieces_checking\n if piece.is_checking(opposing_general_position):\n pieces_checking.append(piece)\n\n self.set_allowed_destinations(allowed_destinations)\n self.set_allowed_palace_destinations(allowed_palace_destinations)\n self.set_pieces_checking(pieces_checking)", "def place_piece(self, piece, position):\n if (self.can_place_piece(piece, position) != self.CanPlaceResults.Ok):\n return self.PlaceResults.Failed\n\n self.board[position] = piece\n player = self.get_player_from_piece(self.turn)\n player.pieces_amount -= 1\n player.increase_position_move_count()\n\n if (self.players[0].pieces_amount == 0 and self.players[1].pieces_amount == 0):\n self.state = self.GameStage.Moving\n\n if (self.board.has_three_at_position(piece, position)):\n player.latest_created_mill = self.board.get_mill_at_position(piece, position)\n self.eliminating = True\n return self.PlaceResults.GotThree\n self.turn = self.board.get_other_piece(self.turn)\n self.total_turns = self.total_turns + 1\n return self.PlaceResults.Placed", "def can_move_piece_from(self, position, ignore_turn = False):\n if (position < 0 or position >= Board.position_count):\n return Game.CanMoveResults.OutsideBoard\n if (ignore_turn == False and self.turn != self.board[position]):\n return Game.CanMoveResults.WrongPiece\n if (self.state != Game.GameStage.Moving):\n return Game.CanMoveResults.WrongState\n\n return Game.CanMoveResults.Ok" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the stack post player move and processes it. Captures enemy pieces and gets reserve pieces if the size of the stack is more than five pieces. Updates the current board and returns it to the calling function.
def process_stack(self, board, destination, player): stack = board[destination[0]][destination[1]] while len(stack) > 5: piece = stack[0] if piece == player.get_player_color(): player.add_reserve_piece() else: player.capture_piece() stack = stack[1:] board[destination[0]][destination[1]] = stack self.check_win_conditions(player) return board
[ "def overflow(self, player, move):\r\n player_profile = self.which_player(player)\r\n piece_move = self._board[move[0]][move[1]]\r\n while len(self._board[move[0]][move[1]]) > 5:\r\n bottom_piece = piece_move[0]\r\n self._board[move[0]][move[1]].pop(0)\r\n if bottom_piece == player_profile.get_color():\r\n player_profile.add_reserve()\r\n if bottom_piece != player_profile.get_color():\r\n player_profile.add_capture()", "def move(self, action):\n tile_type, from_pile, to_stack, nbr_to_move = action\n\n # Check for errors\n if self.winner is not None:\n raise Exception(\"Game already won\")\n #elif pile < 0 or pile >= len(self.piles):\n # raise Exception(\"Invalid pile\")\n #elif count < 1 or count > self.piles[pile]:\n # raise Exception(\"Invalid number of objects\")\n\n # get the tiles from the factory\n nbr_tiles, penalty = self.factory.remove_tiles_from_pile(from_pile, tile_type)\n\n if to_stack == penalty_stack_row_idx:\n # these tiles are going straight to penalty\n self.players[self.current_player_idx].add_tiles_to_penalty(nbr_tiles, tile_type)\n else:\n # put the tiles on the floor\n self.players[self.current_player_idx].move_tiles_to_row(nbr_tiles, tile_type, to_stack)\n\n if penalty == 1:\n self.players[self.current_player_idx].add_penalty_tile_to_penalty_stack()\n\n # check if the round is over\n if self.factory.get_tile_count_in_piles() == 0:\n # score this round and setup the next round \n # if the game is over, determine the winner\n if self.process_end_round():\n self.set_winner()\n # the end of round method also sets the next player\n else:\n # check if the player just did something which will end the game soon\n if not self.is_last_round:\n self.is_last_round = self.players[self.current_player_idx].has_a_completed_row()\n # pass the baton to the next player\n self.switch_player()\n\n \n\n # Update pile\n #self.piles[pile] -= count\n #self.switch_player()\n\n # Check for a winner\n #if all(pile == 0 for pile in self.piles):\n # self.winner = self.player", "def getImageStack(self, board):\n index = [None,0,None,1,3,None,2] # indices of main plains\n # PLAYER = 4 # index of PLAYER's plain\n LONG_CAPTURE = 4 # index of LONG_CAPTURE's plain\n active_player = 1 # white always move\n # create image stack that will be an input to NNet \n n = self.n\n main_planes = np.zeros(shape=(5, n, n), dtype=np.float32)\n # main images\n for y in range(n):\n for x in range(n):\n piece = board.pieces[x][y]\n if piece != 0:\n main_planes[index[piece]][x][y] = 1\n # main_planes[PLAYER][x][y] = active_player\n \n # player/piece(s) making the move\n if board.last_long_capture:\n last = board.last_long_capture\n main_planes[LONG_CAPTURE][last.x1][last.y1] = 1\n \n # auxiliary images\n normNoProgressCount = board.noProgressCount / 128.0 if board.count_pieces() <= 7 else 0\n normKingMoveCount = board.kingMoveCount / 32.0\n normRepetitionCount = board.get_repetition_count() / 4.0\n \n no_progress = np.full((8, 8), normNoProgressCount, dtype=np.float32)\n king_move_count = np.full((8, 8), normKingMoveCount, dtype=np.float32)\n repetition_count = np.full((8, 8), normRepetitionCount, dtype=np.float32)\n auxiliary_planes = [no_progress, king_move_count, repetition_count]\n\n image_stack = np.asarray(auxiliary_planes, dtype=np.float32)\n image_stack = np.vstack((main_planes, auxiliary_planes))\n assert image_stack.shape == (self.getImageStackSize(), n, n) \n \n # debug image stack\n #if board.last_long_capture or board.kingMoveCount>0:\n # self.print_image_stack(image_stack)\n \n return image_stack", "def move_piece(self, player, select, move, number_pieces):\r\n if self.check(player, select, move, number_pieces):\r\n player_profile = self.which_player(player)\r\n piece_select = self._board[select[0]][select[1]]\r\n bottom_place = len(piece_select) - number_pieces\r\n for num in range(number_pieces):\r\n bottom_piece = piece_select[bottom_place]\r\n self._board[select[0]][select[1]].pop(bottom_place)\r\n self._board[move[0]][move[1]].append(bottom_piece)\r\n if len(self._board[move[0]][move[1]]) > 5:\r\n self.overflow(player, move)\r\n if player_profile.get_capture() == 6:\r\n return player_profile.get_name() + \" Wins\"\r\n return \"successfully moved\"\r\n\r\n else:\r\n return False", "def place_atop_safely(self, position, stack):\n x, y = cartesian_to_list(position)\n self._board[x][y].extend(stack) # place stack atop the stack already at position\n\n # a piece has been placed! process the consequence based on game rules\n stack = self.show_pieces(position)\n active_player_color = self._players[self._whose_turn]['color']\n excess_stack_height = len(stack) - self._MAX_STACK_HEIGHT # has negative \"excess\" for small stacks\n\n if excess_stack_height > 0: # game rules define consequences based on excess stack height\n # remove the bottom pieces from the excessive stack so it's not excessive anymore\n removed_pieces = self.remove_pieces_from_stack(position, 'bottom', number_to_remove=excess_stack_height)\n\n # process each excess piece's capture or reserve placement\n for removed_piece in removed_pieces:\n\n # if bottom piece belongs to player making move, send to reserve. Else, make capture of opponent piece\n consequence = 'captured'\n if removed_piece == active_player_color:\n consequence = 'reserved'\n\n # place the excess pieces into this player's reserve or capture pile, as appropriate\n self._players[self._whose_turn][consequence] += 1", "def move_battle_heap(MyMoves, battle_heap):\n while battle_heap:\n section_distance, enemy_distance, ship_id, target_coord, over_thrust, strong_enough, enemy_val = heapq.heappop(battle_heap)\n\n if ship_id not in MyMoves.myMap.ships_moved_already:\n\n ship_coords = MyMoves.myMap.data_ships[MyMoves.myMap.my_id][ship_id]['coords']\n ship_point = MyMoves.myMap.data_ships[MyMoves.myMap.my_id][ship_id]['point']\n ship_health = MyMoves.myMap.data_ships[MyMoves.myMap.my_id][ship_id]['health']\n ship_dying = ship_health <= MyCommon.Constants.DYING_HP\n\n if target_coord: ## HAS TARGET\n if over_thrust is None:\n ## MOVE THIS SHIP, IN THE SAME SECTION\n\n ## IF NOT STRONG ENOUGH, GO BACK 7 UNITS, BUT ALSO KEEP TRACK OF BACKUP MATRIX\n if strong_enough or ship_dying:\n ## STRONG ENOUGH, CAN JUST ATTACK TOWARDS ENEMY\n ## IF DYING, ATTACK TOWARDS ENEMY\n logging.debug(\"ship_id: {} from handled_ships in same section (strong enough). ship_dying: {}\".format(ship_id, ship_dying))\n thrust, angle = astar.get_thrust_angle_from_Astar(MyMoves, ship_id, target_coord, target_distance=enemy_distance, target_planet_id=None)\n logging.debug(\"thrust: {} angle: {} enemy_distance: {}\".format(thrust, angle, enemy_distance))\n\n ## IF TARGET IS REACHABLE, MOVE BACK BY 2 TO PREVENT COLLIDING WITH ENEMY\n ## COMMENTING THIS OUT GIVES A HIGHER RANKING\n if int(round(enemy_distance)) - 1 <= thrust:\n logging.debug(\"docked enemy_val: {} \".format(enemy_val))\n if enemy_val == Matrix_val.ENEMY_SHIP_DOCKED.value and not(ship_dying): ## ONLY MOVE BACK IF ENEMY IS DOCKED\n thrust = max(0, thrust - 2)\n logging.debug(\"updated thrust for docked enemy: {} angle: {}\".format(thrust, angle))\n\n ship_task = MyCommon.ShipTasks.ATTACKING_FRONTLINE\n set_commands_status(MyMoves, ship_id, thrust, angle, target_coord, ship_task)\n\n ## SET COMAND STATUS LATER (MOVE OTHERS FIRST)\n # ship_task2 = MyCommon.ShipTasks.SUPPORTING\n # move_ships_towards_this_coord(MyMoves, ship_id, ship_task, ship_task2, target_coord)\n\n else:\n ## NOT STRONG ENOUGH (FLIP ANGLE)\n logging.debug(\"ship_id: {} from handled_ships in same section (not strong enough)\".format(ship_id))\n angle = MyCommon.get_angle(ship_coords, target_coord)\n flip_angle = MyCommon.get_reversed_angle(angle)\n over_thrust = 10\n new_target_coord = MyCommon.get_destination_coord(ship_coords,flip_angle,over_thrust,rounding=False)\n thrust, angle = astar.get_thrust_angle_from_Astar(MyMoves, ship_id, new_target_coord, target_distance=over_thrust, target_planet_id=None)\n logging.debug(\"thrust: {} angle: {}\".format(thrust, angle))\n ship_task = MyCommon.ShipTasks.EVADING\n\n ## COMMENTING THIS OUT BECAUSE WILL MOVE LATER\n #set_commands_status(MyMoves, ship_id, thrust, angle, new_target_coord, ship_task)\n\n ## ADD TO BACKUP MATRIX\n #MyMoves.myMatrix.backup_matrix[ship_point[0], ship_point[1]] = 1 ## WAS ON BOT25\n ## +2 TO MOVE BACK FURTHER FOR BACKUP TO GO THERE\n\n try:\n backup_coord = MyCommon.get_destination_coord(ship_coords, angle, thrust + 2, rounding=True)\n MyMoves.myMatrix.backup_matrix[backup_coord.y, backup_coord.x] = 1\n except:\n ## GOING OVER THE MAP\n backup_coord = MyCommon.get_destination_coord(ship_coords, angle, thrust, rounding=True)\n MyMoves.myMatrix.backup_matrix[backup_coord.y, backup_coord.x] = 1\n\n ship_task2 = MyCommon.ShipTasks.SUPPORTING\n move_ships_towards_this_coord(MyMoves, ship_id, ship_task, ship_task2, backup_coord)\n\n else:\n ## MOVE THIS SHIP NOW, FROM DIFFERENT SECTION\n\n ## LOOK FOR BACKUP FIRST, IF NONE FOUND MOVE TOWARDS TARGET LIKE NORMAL\n # pad_values = 0\n # area_matrix = MyCommon.get_circle_in_square(MyMoves.myMatrix.backup_matrix,\n # ship_coords,\n # MyCommon.Constants.BACKUP_CIRCLE_RADIUS,\n # MyCommon.Constants.BACKUP_SQUARE_RADIUS,\n # pad_values)\n # seek_val = 1\n # backup_point, backup_distance, backup_val = MyCommon.get_coord_closest_seek_value(seek_val,\n # area_matrix,\n # MyMoves.EXP.distance_matrix_backup)\n #\n # if backup_point:\n # ## MOVE TOWARDS BACKUP\n # logging.debug(\"ship_id: {} from handled_ships in different section. Going to back up\".format(ship_id))\n # slope = (backup_point[0] - MyCommon.Constants.BACKUP_SQUARE_RADIUS, backup_point[1] - MyCommon.Constants.BACKUP_SQUARE_RADIUS)\n # new_target_coord = MyCommon.Coordinates(ship_point[0] + slope[0], ship_point[1] + slope[1])\n # logging.debug(\"backup found at coord: {}\".format(new_target_coord))\n # thrust, angle = expanding2.get_thrust_angle_from_Astar(MyMoves, ship_id, new_target_coord,\n # target_distance=backup_distance,\n # target_planet_id=None)\n # logging.debug(\"thrust: {} angle: {}\".format(thrust, angle))\n # ship_task = MyCommon.ShipTasks.SUPPORTING\n # set_commands_status(MyMoves, ship_id, thrust, angle, new_target_coord, ship_task)\n #\n # else:\n # ## NO BACKUP CLOSE BY, JUST MOVE TOWARDS ENEMY\n # logging.debug(\"ship_id: {} from handled_ships in different section\".format(ship_id))\n # logging.debug(\"section_distance: {} enemy_distance {} target_coord {}\".format(section_distance, enemy_distance, target_coord, over_thrust))\n # thrust, angle = expanding2.get_thrust_angle_from_Astar(MyMoves, ship_id, target_coord, target_distance=over_thrust, target_planet_id=None)\n # logging.debug(\"thrust: {} angle: {}\".format(thrust, angle))\n # ship_task = MyCommon.ShipTasks.ATTACKING\n # set_commands_status(MyMoves, ship_id, thrust, angle, target_coord, ship_task)\n\n\n ## BACKUP IS MOVED ALREADY AT THIS POINT (USING GET SHIPS IN ARRAY)\n logging.debug(\"ship_id: {} from handled_ships in different section\".format(ship_id))\n logging.debug(\"section_distance: {} enemy_distance {} target_coord {}\".format(section_distance, enemy_distance, target_coord, over_thrust))\n thrust, angle = astar.get_thrust_angle_from_Astar(MyMoves, ship_id, target_coord, target_distance=over_thrust, target_planet_id=None)\n logging.debug(\"thrust: {} angle: {}\".format(thrust, angle))\n ship_task = MyCommon.ShipTasks.ATTACKING\n\n set_commands_status(MyMoves, ship_id, thrust, angle, target_coord, ship_task)\n\n ## DOING THIS GENERATED A LOWER RANK (BOT 52)\n # if enemy_distance < 14:\n # ## PREVENTS COLLIDING TO ENEMY\n # thrust = int(max(1, enemy_distance - 8))\n # logging.debug(\"updated thrust to prevent collision: {} angle: {}\".format(thrust, angle))\n # set_commands_status(MyMoves, ship_id, thrust, angle, target_coord, ship_task)\n # else:\n # set_commands_status(MyMoves, ship_id, thrust, angle, target_coord, ship_task)\n\n\n else:\n logging.debug(\"ship_id: {} from handled_ships no enemy found around it\".format(ship_id))\n ## NO ENEMY FOUND AROUND ANY OF OUR SHIPS\n #closest_section_with_enemy(MyMoves, ship_id, move_now=True)\n closest_section_with_enemy(MyMoves, ship_id, move_now=True, docked_only=True)", "def applyMove(board,gameState, move, player = \"player\"):\n pass", "def possible_moves(self, actual_position:list, board):\n a_r,a_c = actual_position #piece actual row and column indexes\n board_pos_mov = [] #array containing possible moves of piece within board\n pos_mov = [] #array containing possible moves of piece within board and analyzing allies and enemies\n\n # Getting possible moves from moves_* methods.\n if self.type in ['king','knight']:\n board_pos_mov = self.move_king_knight(actual_position)\n elif self.type == 'pawn':\n board_pos_mov = self.move_pawn(actual_position)\n # elif self.type == 'knight':\n # board_pos_mov = self.move_knight(actual_position)\n elif self.type in ['queen','rook','bishop']:\n board_pos_mov = self.move_queen_rook_bishop(actual_position)\n\n # Analyzing possible moves with friends and foes for pawn\n if self.type == 'pawn':\n for row,column in board_pos_mov:\n\n # Checking for moves in horizontal direction\n if row == a_r:\n if board[row][column] == None: #Checking for empty square\n pos_mov.append([row,column])\n else:\n pass # if there is a piece in the square the pawn cannot be moved\n\n # Checking for moves in diagonal (attacks)\n else:\n if board[row][column] != None: #Checking if there is a piece in the square\n if board[row][column].team != self.team: # Checking wether the piece is ally or enemy\n pos_mov.append([row,column]) # Append if it is enemy\n else:\n pass # If the piece is an ally the piece cannot be moved\n\n # Analyzing possible moves with friends and foes for king and knight\n if self.type in ['king','knight']:\n for row,column in board_pos_mov:\n if board[row][column] == None: #Checking for empty square\n pos_mov.append([row,column])\n elif board[row][column].team != self.team: #Checking for ally or enemy\n pos_mov.append([row,column])\n else:\n pass\n # Analyzing possible moves with friends and foes for queen, rook and bishop\n elif self.type in ['queen','rook','bishop']:\n for direction in board_pos_mov: # testing for every movement direction\n for row,column in direction:\n if type(board[row][column]) == type(self): # checking if there is a piece in the square\n if board[row][column].team != self.team: # checking piece's team\n pos_mov.append([row,column])\n break # Movement stops because there is a enemy\n else:\n break # if the piece in the square is an ally the analyzed piece cannot move further in that direction\n\n else:\n pos_mov.append([row,column]) # If the square is empty (None) the piece can move in that direction\n\n return pos_mov", "def update_game(move):\n\n global board\n global x\n global y\n global myturn\n global receivedMove\n global bigscope\n global magic\n global opponent_jid\n global moves\n global turn\n\n move = int(move)\n print(\"Received:\", move)\n sleep(2)\n\n print(\"Hello, this is update_game function here.\")\n opponent_turn = not myturn\n\n print(\"Current Turn: \", turn)\n\n if opponent_turn:\n insertVal = \"X\"\n opponentAcc = x\n\n else:\n insertVal = \"O\"\n opponentAcc = y\n\n board[bigscope][move] = insertVal\n opponentAcc.ac[bigscope].append(magic[move])\n opponentAcc.moves[bigscope] += 1\n moves += 1\n if check(opponentAcc.ac[bigscope]):\n #Opponent has won a block\n opponentAcc.wins[bigscope] = True\n print(f\"\\nYour opponent {opponent_jid} won the block {bigscope}. Time to show your metal!\")\n\n bigscope = move #set up for local player's move\n turn = not turn\n receivedMove = True", "def doMove(self, board, dice):\n rc = RuleChecker(board, dice, self.color)\n moves = []\n sorted_pawns = self.order_pawns(rc.b_final)\n while not Player.stop_move_generation(self, rc):\n sorted_pawns = self.order_pawns(rc.b_final)\n new_move = None\n for pawn in sorted_pawns:\n #print(\"MoveFirstPawn::doMove: pawn loop - pawn id: \"+str(pawn.id))\n pawn_space = rc.b_final.spacemap[pawn.location]\n if isinstance(pawn_space, FinalSpace):\n continue\n elif isinstance(pawn_space, HomeSpace):\n #print(\"MoveFirstPawn::doMove: mkaing a HomeMove in doMove\")\n for die in rc.tvals.get_all_dice():\n new_move = MoveHome(pawn, pawn.location, die)\n valid, rc = Player.check_next_move(self, rc, new_move)\n if valid:\n moves.append(new_move)\n break\n else:\n new_move = None\n elif isinstance(pawn_space, StartSpace):\n #print(\"MoveFirstPawn::doMove: making an EnterPiece in doMove\")\n new_move = EnterPiece(pawn)\n valid, rc = Player.check_next_move(self, rc, new_move)\n if valid:\n #print(\"MoveFirstPawn::doMove: EnterPiece was valid using pawn \"+str(pawn.id))\n moves.append(new_move)\n #print(\"MoveFirstPawn::doMove: length of moves: \"+str(len(moves)))\n else:\n new_move = None\n else: #Regular Space\n #print(\"MoveFirstPawn::doMove: making a MoveMain in doMove\")\n for die in rc.tvals.get_all_dice():\n new_move = MoveMain(pawn, pawn.location, die)\n valid, rc = Player.check_next_move(self, rc, new_move)\n #print(\"MoveFirstPawn::doMove: \"+str(rc.tvals.bonus))\n if valid:\n moves.append(new_move)\n break\n else:\n new_move = None\n if new_move != None:\n print(\"MoveFirstPawn::doMove: breaking from pawn loop\")\n break\n return moves", "def on_after_move_damage(self, battle, pokemon, damage, move, foe):", "def get_pieces_to_move(self, coord, num_of_pieces):\n stack = self.get_stack(coord)\n pieces_to_move = []\n counter = 0\n while counter != num_of_pieces:\n pieces_to_move.insert(0, stack.pop())\n counter += 1\n return stack, pieces_to_move", "def process_a_shot(shot_row, shot_col, players_copy_of_opp_board, opponent_board, opponent_ships):\n state_of_shotted_field = opponent_board[shot_row][shot_col]\n \n if state_of_shotted_field == \"0\":\n \n if players_copy_of_opp_board[shot_row][shot_col] == \"M\":\n shot_result = \"M_repeat\"\n return shot_result\n \n shot_result = \"M\"\n mark_shot_on_board(shot_row, shot_col, players_copy_of_opp_board, shot_result)\n \n elif state_of_shotted_field == \"X\":\n shot_result = \"H\"\n mark_shot_on_board(shot_row, shot_col, players_copy_of_opp_board, shot_result)\n update_ships_state(shot_row, shot_col, opponent_ships, players_copy_of_opp_board, opponent_board)\n\n if is_ship_sunk(shot_row, shot_col, opponent_ships):\n shot_result = \"S\"\n \n\n return shot_result", "def possible_moves(self, stack):\n wpx, wpy = stack.pos\n h = stack.height\n \n # tests whether a generated position e is a valid move from s\n valid = lambda s, e: board.is_valid_position(e) and (s != e) and all(e != b.pos for b in self.black)\n\n moves = []\n for n in range(1, h + 1):\n for x in range(wpx - h, wpx + h + 1):\n if valid(stack.pos, (x, wpy)):\n moves.append(((x, wpy), n))\n for y in range(wpy - h, wpy + h + 1):\n if valid(stack.pos, (wpx, y)):\n moves.append(((wpx, y), n))\n return moves", "def ai_dark_move(gs):\n\n ### TODO: edit to your unique algorithm (mini-max w/ pruning, etc) ###\n ###\t\t Static evaluation of the board can be done with 'light_pieces' and 'dark_pieces' dictionaries ###\n dark_move = None\n move = minimax(gs, 2)[0] # Move as decided by minimax\n\n if move:\n # create move copy (only copy (start_row, start_col) & (end_row, end_col) of move object)\n dark_move = Move((move.start_row, move.start_col),\n (move.end_row, move.end_col), gs.board)\n\n gs.make_move(dark_move) # make move\n\n # handles pawn promotion\n if (dark_move.end_row == 7) and (dark_move.piece_moved[0] == \"p\"):\n gs.board[dark_move.end_row][dark_move.end_col] = random.choice(\n (\"qd\", \"rd\", \"bd\", \"nd\")) # randomly select promotion piece\n\n # update dark pieces position dictionary\n # dark_pieces.pop(\"{},{}\".format(dark_move.start_row, dark_move.start_col))\n # dark_pieces[\"{},{}\".format(dark_move.end_row, dark_move.end_col)] = dark_move.piece_moved\n\n # # remove pieces captured from light_piece dictionary for faster static board evaluation in your mini-max algorithm rewrite\n # if dark_move.piece_captured != \" \" and not dark_move.en_passant_captured:\n # \tlight_pieces.pop(\"{},{}\".format(dark_move.end_row, dark_move.end_col))\n # elif dark_move.en_passant_captured:\n # \tlight_pieces.pop(\"{},{}\".format(dark_move.end_row+1, dark_move.end_col if gs.light_to_move else dark_move.end_row-1, dark_move.end_col))\n else:\n if gs.is_in_check():\n gs.check_mate = True\n else:\n gs.stale_mate = True\n\n return dark_move, gs", "def movestack(self, i):\n if i == _RIGHT and self.mode == _CHOOSEPILE and self.stackpointer < len(self.board) - 1:\n self.stackpointer += 1\n self.cardpointer = 0\n elif i == _LEFT and self.mode == _CHOOSEPILE and self.stackpointer > 0:\n self.stackpointer += -1\n self.cardpointer = 0\n elif i == _RIGHT and self.mode == _PICKMOVE and self.stackpicker < len(self.board) -1:\n self.stackpicker += 1\n elif i == _LEFT and self.mode == _PICKMOVE and self.stackpicker > 0:\n self.stackpicker += -1", "def apply(self, move):\n # the pile the user wants to take from\n pile = move.get_pile()\n # the amount of stones the user wants to take from the pile\n stones = move.get_stones()\n self._piles[pile] = max(0, self._piles[pile] - stones)", "def move_piece(x, y, new_x, new_y, x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION):\n\n # check whether the destination is the same for both\n\n if (new_x == new_x2 and new_y == new_y2):\n print(\"Both pieces going to the same location\")\n piece_type1 = get_piece(board, y, x)\n piece_type2 = get_piece(board, y2, x2)\n if (piece_type1 == \"p\" and piece_type2 == \"P\"):\n # both pawns, delete both\n print(\"Both are pawns, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"k\" and piece_type2 == \"K\"):\n print(\"Both are knights, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"p\" and piece_type2 == \"K\"):\n\n board = delete_piece(x, y, board, board_turtles)\n # execute move for AI\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n elif (piece_type1 == \"k\" and piece_type2 == \"P\"):\n board = delete_piece(x2, y2, board, board_turtles)\n # execute move for AI\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n print(\"Executing moves normally\")\n if (x != -1):\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n if (x2 != -1):\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n\n return board", "def worker(board_id, board):\n members = get_members(board_id)\n cards = get_board_cards(board_id)\n lists_in_boards = get_board_lists(board_id)\n for card_id, card in cards.items():\n status = \"open\"\n if card['closed']:\n status = \"closed\"\n #if a card is closed the corresponding action is closed as well\n data = {'Status': status}\n actions = get_card_actions(card_id, {'filter': 'createCard,updateCard:idList,updateCard:name,\\\n ,updateCard:desc,commentCard'})\n sanitize_action(actions, card, lists_in_boards, board, members, data)\n sanitize_card(card, lists_in_boards, board, members)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the starting location tuple, destination tuple, and pieces to move tuple. Updates the initial location with the first item in the pieces to move tuple (the part of the stack not desired to move) appends the second part of the pieces to move tuple to the destination list. Returns the updated instance of the board.
def update_board_location(self, start, destination, pieces_to_move): board = self.get_board() board[start[0]][start[1]] = pieces_to_move[0] list_of_pieces_to_add = pieces_to_move[1] for piece in list_of_pieces_to_add: board[destination[0]][destination[1]].append(piece) return board
[ "def move(self, start, end):\n piece = self.get_piece_at(*start)\n opposing_piece = self.get_piece_at(*end)\n \n if opposing_piece != None:\n opposing_piece.is_alive = False\n opposing_piece.x = None\n opposing_piece.y = None\n \n if str(piece) == 'Pawn':\n self.promote(piece, end[1])\n piece = self.get_piece_at(*start)\n \n piece.x = end[0]\n piece.y = end[1]\n self.board[start[1]-1][start[0]-1] = None\n self.board[end[1]-1][end[0]-1] = piece", "def combine_moves(board_state_val, x, y, new_x, new_y, x2, y2, new_x2, new_y2):\n # Create deep copy of the board to configure\n board_state = copy.deepcopy(board_state_val)\n\n # store the values of each moving board piece\n player_val = board_state[x][y]\n ai_val = board_state[x2][y2]\n\n if new_x == new_x2 and new_y == new_y2:\n\n piece_type1 = board_state[x][y]\n piece_type2 = board_state[x2][y2]\n if piece_type1 == \"p\" and piece_type2 == \"P\":\n # both pawns, delete both\n board_state[x][y] = \"W\"\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"k\" and piece_type2 == \"K\":\n board_state[y][x] = \"W\"\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"p\" and piece_type2 == \"K\":\n\n board_state[x][y] = \"W\"\n # execute move for AI\n board_state[new_x2][new_y2] = board_state[y2][x2]\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"k\" and piece_type2 == \"P\":\n board_state[x2][y2] = \"W\"\n # execute move for player\n board_state[new_x][new_y] = board_state[y][x]\n board_state[x][y] = \"W\"\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n\n board_state[new_x][new_y] = player_val\n board_state[x][y] = \"W\"\n\n board_state[new_x2][new_y2] = ai_val\n board_state[x2][y2] = \"W\"\n\n # check whether an AI pawn reached the last rank\n if ai_val == \"P\" and new_x2 == 4:\n # reached last rank, process it\n board_state[new_x2][new_y2] = \"K\"\n\n # check whether a player pawn reached the last rank\n if player_val == \"p\" and new_x == 0:\n # reached last rank, process it\n board_state[new_x][new_y] = \"k\"\n\n return board_state", "def place_pieces(cur_state):\n initial_board = cur_state\n\n initial_board[0][3] = \" R\"\n initial_board[1][2] = \" B\"\n initial_board[1][4] = \" B\"\n initial_board[3][4] = \" B\"\n initial_board[3][6] = \" B\"\n initial_board[5][2] = \" B\"\n\n cur_state = initial_board\n\n \"\"\"\n initial_board[0][1] = \"R\"\n initial_board[0][3] = \"R\"\n initial_board[0][5] = \"R\"\n initial_board[0][7] = \"R\"\n initial_board[1][0] = \"R\"\n initial_board[1][2] = \"R\"\n initial_board[1][4] = \"R\"\n initial_board[1][6] = \"R\"\n initial_board[2][1] = \"R\"\n initial_board[3][2] = \"B\" #######\n initial_board[2][5] = \"R\"\n initial_board[2][7] = \"R\"\n initial_board[5][0] = \"B\"\n initial_board[5][2] = \"B\"\n initial_board[5][4] = \"B\"\n initial_board[5][6] = \"B\"\n initial_board[6][1] = \"B\"\n initial_board[6][3] = \"B\"\n initial_board[6][5] = \"B\"\n initial_board[6][7] = \"B\"\n initial_board[7][0] = \"B\"\n initial_board[7][2] = \"B\"\n initial_board[7][4] = \"B\"\n initial_board[7][6] = \"B\"\n \"\"\"", "def move_piece(x, y, new_x, new_y, x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION):\n\n # check whether the destination is the same for both\n\n if (new_x == new_x2 and new_y == new_y2):\n print(\"Both pieces going to the same location\")\n piece_type1 = get_piece(board, y, x)\n piece_type2 = get_piece(board, y2, x2)\n if (piece_type1 == \"p\" and piece_type2 == \"P\"):\n # both pawns, delete both\n print(\"Both are pawns, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"k\" and piece_type2 == \"K\"):\n print(\"Both are knights, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"p\" and piece_type2 == \"K\"):\n\n board = delete_piece(x, y, board, board_turtles)\n # execute move for AI\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n elif (piece_type1 == \"k\" and piece_type2 == \"P\"):\n board = delete_piece(x2, y2, board, board_turtles)\n # execute move for AI\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n print(\"Executing moves normally\")\n if (x != -1):\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n if (x2 != -1):\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n\n return board", "def get_pieces_to_move(self, coord, num_of_pieces):\n stack = self.get_stack(coord)\n pieces_to_move = []\n counter = 0\n while counter != num_of_pieces:\n pieces_to_move.insert(0, stack.pop())\n counter += 1\n return stack, pieces_to_move", "def _move(self, at, to):\n copy = self.copy()\n i, j = at\n r, c = to\n copy.board[i][j], copy.board[r][c] = copy.board[r][c], copy.board[i][j]\n return copy", "def move_piece(self, player_name, start, destination, num_of_pieces):\n if self.get_winner() is not None:\n return self.get_winner() + ' has already won!'\n turn = self.get_turn()\n player = self.get_active_player(player_name)\n move_size = self.move_size(start, destination)\n valid_move = self.check_valid_move(player, start, destination)\n\n if turn != player.get_player_name() and turn is not None:\n return 'not your turn'\n\n if valid_move is False:\n return 'invalid location'\n\n if num_of_pieces != move_size:\n return 'invalid number of pieces'\n\n movement_range = self.get_range(start)\n\n if num_of_pieces > movement_range:\n return 'invalid number of pieces'\n\n pieces_to_move = self.get_pieces_to_move(start, num_of_pieces)\n board = self.update_board_location(start, destination, pieces_to_move)\n board = self.process_stack(board, destination, player)\n\n if self.get_winner() is not None:\n return 'wins'\n\n else:\n self.set_board(board)\n self.get_next_player(player)\n return 'successfully moved'", "def _move_piece(self, pos_from, pos_to):\n self[pos_to] = self[pos_from]\n self[pos_to].position = pos_to\n self[pos_to].mvs_number += 1\n self[pos_from] = EMPTY\n return", "def move_piece(self, start_x_y, end_x_y):\n\t\t(start_x, start_y) = start_x_y\n\t\t(end_x, end_y) = end_x_y\n\t\tself.matrix[end_x][end_y].occupant = self.matrix[start_x][start_y].occupant\n\t\tself.remove_piece((start_x, start_y))\n\t\tself.king((end_x, end_y))", "def movePiece(self,pos, dest):\r\n\r\n #set passant every turn to check if en passant is possible\r\n if(isinstance(self.board[pos[0]][pos[1]],piece.Pawn)):\r\n if(self.board[pos[0]][pos[1]].enPassant == True):\r\n self.passant = True\r\n else:\r\n self.passant = False\r\n\r\n #check if castle move\r\n if(not self.checkPossible(pos,dest)):\r\n if(pos == self.kingPos[0]):\r\n if(dest == (1,0)):\r\n self.board[2][0] = self.board[0][0]\r\n self.board[2][0].position = (2,0)\r\n self.board[0][0] = 0\r\n elif(dest == (6,0)):\r\n self.board[5][0] = self.board[7][0]\r\n self.board[5][0].position = (5,0)\r\n self.board[7][0] = 0\r\n elif(pos == self.kingPos[1]):\r\n if(dest == (1,7)):\r\n self.board[2][7] = self.board[0][7]\r\n self.board[2][7].position = (2,7)\r\n self.board[0][7] = 0\r\n elif(dest ==(6,7)):\r\n print(\"Final Step\")\r\n self.board[5][7] = self.board[7][7]\r\n self.board[5][7].position = (5,7)\r\n self.board[7][7] = 0\r\n\r\n # move piece(normally)\r\n self.board[dest[0]][dest[1]] = self.board[pos[0]][pos[1]]\r\n self.board[pos[0]][pos[1]] = 0\r\n self.board[dest[0]][dest[1]].position = (dest[0], dest[1])", "def make_move(self, move):\n zero_index = self.state.index(0)\n state = deepcopy(self.state)\n action = None\n new_state = None\n if move is Board.UP:\n new_state = self.up(zero_index, state)\n self.move_series.append(self.tie_breaker['UP']) # todo test these\n elif move is Board.UP_RIGHT:\n new_state = self.up_right(zero_index, state)\n self.move_series.append(self.tie_breaker['UP_RIGHT'])\n elif move is Board.RIGHT:\n new_state = self.right(zero_index, state)\n self.move_series.append(self.tie_breaker['RIGHT'])\n elif move is Board.DOWN_RIGHT:\n new_state = self.down_right(zero_index, state)\n self.move_series.append(self.tie_breaker['DOWN_RIGHT'])\n elif move is Board.DOWN:\n new_state = self.down(zero_index, state)\n self.move_series.append(self.tie_breaker['DOWN'])\n elif move is Board.DOWN_LEFT:\n new_state = self.down_left(zero_index, state)\n self.move_series.append(self.tie_breaker['DOWN_LEFT'])\n elif move is Board.LEFT:\n new_state = self.left(zero_index, state)\n self.move_series.append(self.tie_breaker['LEFT'])\n elif move is Board.UP_LEFT:\n new_state = self.up_left(zero_index, state)\n self.move_series.append(self.tie_breaker['UP_LEFT'])\n else:\n print(\"\\n\\n\\n\\n\\nERROR: not a valid board move\\n\\n\\n\\n\\n\")\n\n if not new_state:\n return False, False\n\n new_zero_index = new_state.index(0)\n action = deepcopy(Board.letters[new_zero_index])\n return new_state, action", "def combine_single_move(board_state_val, x, y, new_x, new_y):\n board_state = copy.deepcopy(board_state_val)\n\n player_val = copy.copy(board_state[x][y])\n\n board_state[new_x][new_y] = player_val\n board_state[x][y] = \"W\"\n # check whether we need to upgrade pawns to knights\n if new_x == 4 and player_val == \"P\":\n #print(\"Upgraded Single knight\")\n board_state[new_x][new_y] = \"K\"\n #print(board_state)\n elif new_x == 0 and player_val == \"p\":\n board_state[new_x][new_y] = \"k\"\n\n return board_state", "def make_move(self, move: Tuple[Position]):\n from_position, to_position = move\n\n if not Board._is_position_valid(self._shape, self._size, from_position):\n raise ValueError(f\"Invalid board position: {from_position}\")\n\n if not Board._is_position_valid(self._shape, self._size, to_position):\n raise ValueError(f\"Invalid board position: {to_position}\")\n\n new_hole_positions = (self._get_holes() - set([to_position])) | set(\n [\n from_position.get_middle_position(to_position),\n from_position,\n ]\n )\n\n return Board(\n size=self._size,\n shape=self._shape,\n hole_positions=new_hole_positions,\n )", "def execute_move(self, move: Tuple[int, int, Piece], player: int):\n\n (x, y, p) = move\n\n # Placing in empty square\n assert self[x][y] == 0\n # Piece placed is not already used\n assert p not in self.used_pieces\n # Not placing in middle cross\n assert x != self.mid\n assert y != self.mid\n\n # print(f\"Placing {(self.selected_piece & 0b1111):04b} at {x},{y}\")\n self[x][y] = int(self.selected_piece) # +(1<<self.n)\n\n self.selected_piece = p\n # print(f\"Selecting {(self.selected_piece & 0b1111):04b} for opponent\\n\")", "def p2_oneMoveGenerator(current_pawn_position, boardSize):\n row, col = boardSize\n pos_row, pos_col = current_pawn_position\n possible_new_position = []\n if pos_row -1 >= 0:\n new_pos_row = pos_row-1\n possible_new_position.append((new_pos_row,pos_col))\n if pos_col-1 >=0:\n possible_new_position.append((new_pos_row,pos_col-1))\n if pos_col+1 <=col-1:\n possible_new_position.append((new_pos_row,pos_col+1))\n return possible_new_position #return a list of 2 to 3 tuples with straight move first", "def add_moving(self, xy):\n self.board.set_tuple_item(BetterBoard.Moving, xy)", "def possible_moves(self):\n\n pos = self.get_pos()\n coords = self.translate_to_list_coords(pos)\n row = coords[0]\n col = coords[1]\n possible_moves = []\n possible_moves_alg = []\n possible_moves_final = []\n\n # Horses are not limited to forward only motion like soldiers,\n # so we do not need to check the piece color.\n\n # \"U\" (up) in the notes below means closer to the red side of the board\n possible_moves.append([[row - 1, col], [row - 2, col + 1]]) # 1U 1R\n possible_moves.append([[row - 1, col], [row - 2, col - 1]]) # 1U 1L\n possible_moves.append([[row, col + 1], [row - 1, col + 2]]) # 1R 1U\n possible_moves.append([[row, col + 1], [row + 1, col + 2]]) # 1R 1D\n possible_moves.append([[row, col - 1], [row - 1, col - 2]]) # 1L 1U\n possible_moves.append([[row, col - 1], [row + 1, col - 2]]) # 1L 1D\n possible_moves.append([[row + 1, col], [row + 2, col + 1]]) # 1D 1R\n possible_moves.append([[row + 1, col], [row + 2, col - 1]]) # 1D 1L\n\n for move in range(len(possible_moves)):\n\n temp = []\n\n for square in range(len(possible_moves[move])):\n temp.append(self.translate_to_alg_coords(possible_moves[move][square]))\n\n possible_moves_alg.append(temp)\n\n # Check if any part of each possible move is outside the board\n for move in range(len(possible_moves_alg)):\n\n temp = []\n\n if self.within_board(possible_moves_alg[move][0]) and self.within_board(possible_moves_alg[move][1]):\n temp.append(possible_moves_alg[move][0])\n temp.append(possible_moves_alg[move][1])\n\n # Do not add empty lists to the results\n if temp:\n possible_moves_final.append(temp)\n\n # Final list is in format [ [ step 1, step 2 ] , [ step 1, step 2 ] ],\n # where step 1 is an intermediate stop along the way to step 2\n return possible_moves_final", "def pawn_moves(x, y, color, board):\n possible_moves = []\n if color == '1':\n if y + 1 <= 7:\n if board[y + 1][x] == 'e0':\n possible_moves.append([x, y + 1])\n if y == 1:\n if board[y + 2][x] == 'e0':\n possible_moves.append([x, y + 2])\n if x - 1 >= 0:\n if board[y + 1][x - 1][1] == '2':\n possible_moves.append([x - 1, y + 1])\n if x + 1 <= 7:\n if board[y + 1][x + 1][1] == '2':\n possible_moves.append([x + 1, y + 1])\n\n if color == '2':\n if y + 1 <= 7:\n if board[y - 1][x] == 'e0':\n possible_moves.append([x, y - 1])\n if y == 6:\n if board[y - 2][x] == 'e0':\n possible_moves.append([x, y - 2])\n if x - 1 >= 0:\n if board[y - 1][x - 1][1] == '1':\n possible_moves.append([x - 1, y - 1])\n if x + 1 <= 7:\n if board[y - 1][x + 1][1] == '1':\n possible_moves.append([x + 1, y - 1])\n return possible_moves", "def move_piece(self, rnum_from, rnum_to):\n self._chk_rnum(rnum_from)\n self._chk_rnum(rnum_to)\n if self.is_square_empty(rnum_from):\n raise CheckersError(self._s_pos(rnum_from), \"no piece found\")\n if self.is_square_occupied(rnum_to):\n raise CheckersError(self._s_pos(rnum_to),\n f\"{self._pieces[rnum_to]} occupies square\")\n self._pieces[rnum_to] = self._pieces.pop(rnum_from)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes the coordinates of teh stack, and the number of pieces desired to move form that stack, as parameters. Splits the stack and returns it in a tuple.
def get_pieces_to_move(self, coord, num_of_pieces): stack = self.get_stack(coord) pieces_to_move = [] counter = 0 while counter != num_of_pieces: pieces_to_move.insert(0, stack.pop()) counter += 1 return stack, pieces_to_move
[ "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n def do_move(source, end):\n print_move(source, end)\n\n def play_game(disks, source, end, spare):\n if (disks == 1):\n do_move(source, end)\n else:\n play_game(disks - 1, source, spare, end)\n do_move(source, end)\n play_game(disks -1, spare, end, source)\n\n return play_game(n, start, end, 6-(start+end))", "def process_stack(self, board, destination, player):\n stack = board[destination[0]][destination[1]]\n while len(stack) > 5:\n piece = stack[0]\n if piece == player.get_player_color():\n player.add_reserve_piece()\n else:\n player.capture_piece()\n stack = stack[1:]\n\n board[destination[0]][destination[1]] = stack\n\n self.check_win_conditions(player)\n\n return board", "def remove_pieces_from_stack(self, position, top_or_bottom, number_to_remove):\n x, y = cartesian_to_list(position)\n\n if top_or_bottom == 'top':\n # note the removed pieces before removal (last n pieces)\n removed_pieces = self._board[x][y][-number_to_remove:]\n\n # remove the pieces\n del(self._board[x][y][-number_to_remove:])\n else:\n # note the removed pieces before removal (first n pieces)\n removed_pieces = self._board[x][y][:number_to_remove]\n\n # remove the pieces\n del(self._board[x][y][:number_to_remove])\n\n return removed_pieces", "def m_move_one(state, b1, b2):\n if state.pos[b1] == 'table':\n return [('stack', b1, b2)]\n elif b2 == 'table':\n return [('unstack', b1, state.pos[b1])]\n else:\n return [('restack', b1, state.pos[b1], b2)]", "def stackDDUP(self):\n args = 2\n a, b = self.stack.pop(), self.stack.pop()\n self.stack += [b, a, b, a]\n # for i in [b,a,b,a]:\n # \tself.stack.append(i)", "def place_pieces(cur_state):\n initial_board = cur_state\n\n initial_board[0][3] = \" R\"\n initial_board[1][2] = \" B\"\n initial_board[1][4] = \" B\"\n initial_board[3][4] = \" B\"\n initial_board[3][6] = \" B\"\n initial_board[5][2] = \" B\"\n\n cur_state = initial_board\n\n \"\"\"\n initial_board[0][1] = \"R\"\n initial_board[0][3] = \"R\"\n initial_board[0][5] = \"R\"\n initial_board[0][7] = \"R\"\n initial_board[1][0] = \"R\"\n initial_board[1][2] = \"R\"\n initial_board[1][4] = \"R\"\n initial_board[1][6] = \"R\"\n initial_board[2][1] = \"R\"\n initial_board[3][2] = \"B\" #######\n initial_board[2][5] = \"R\"\n initial_board[2][7] = \"R\"\n initial_board[5][0] = \"B\"\n initial_board[5][2] = \"B\"\n initial_board[5][4] = \"B\"\n initial_board[5][6] = \"B\"\n initial_board[6][1] = \"B\"\n initial_board[6][3] = \"B\"\n initial_board[6][5] = \"B\"\n initial_board[6][7] = \"B\"\n initial_board[7][0] = \"B\"\n initial_board[7][2] = \"B\"\n initial_board[7][4] = \"B\"\n initial_board[7][6] = \"B\"\n \"\"\"", "def move_stack(n, start, end):\n assert 1 <= start <= 3 and 1 <= end <= 3 and start != end, \"Bad start/end\"\n if n == 0:\n print_move(start, end)\n else:\n remainder = 2 * 3 - start - end\n print_move(start, remainder)\n move_stack(n - 1, start, remainder)\n print_move(remainder, end)", "def imagine_move(state: tuple[int, ...], move: Move) -> tuple[int, ...]:\n cur_state = list(state)\n layer_i, low_idx, high_idx = move\n\n # Make the layer 0-indexed\n layer_i -= 1\n\n # Imagine performing move\n active_layer = cur_state.pop(layer_i)\n left_result = low_idx - 1\n right_result = active_layer - high_idx\n if left_result > 0:\n cur_state.append(left_result)\n if right_result > 0:\n cur_state.append(right_result)\n\n return tuple(cur_state)", "def possible_moves(self, stack):\n wpx, wpy = stack.pos\n h = stack.height\n \n # tests whether a generated position e is a valid move from s\n valid = lambda s, e: board.is_valid_position(e) and (s != e) and all(e != b.pos for b in self.black)\n\n moves = []\n for n in range(1, h + 1):\n for x in range(wpx - h, wpx + h + 1):\n if valid(stack.pos, (x, wpy)):\n moves.append(((x, wpy), n))\n for y in range(wpy - h, wpy + h + 1):\n if valid(stack.pos, (wpx, y)):\n moves.append(((wpx, y), n))\n return moves", "def movestack(self, i):\n if i == _RIGHT and self.mode == _CHOOSEPILE and self.stackpointer < len(self.board) - 1:\n self.stackpointer += 1\n self.cardpointer = 0\n elif i == _LEFT and self.mode == _CHOOSEPILE and self.stackpointer > 0:\n self.stackpointer += -1\n self.cardpointer = 0\n elif i == _RIGHT and self.mode == _PICKMOVE and self.stackpicker < len(self.board) -1:\n self.stackpicker += 1\n elif i == _LEFT and self.mode == _PICKMOVE and self.stackpicker > 0:\n self.stackpicker += -1", "def place_atop_safely(self, position, stack):\n x, y = cartesian_to_list(position)\n self._board[x][y].extend(stack) # place stack atop the stack already at position\n\n # a piece has been placed! process the consequence based on game rules\n stack = self.show_pieces(position)\n active_player_color = self._players[self._whose_turn]['color']\n excess_stack_height = len(stack) - self._MAX_STACK_HEIGHT # has negative \"excess\" for small stacks\n\n if excess_stack_height > 0: # game rules define consequences based on excess stack height\n # remove the bottom pieces from the excessive stack so it's not excessive anymore\n removed_pieces = self.remove_pieces_from_stack(position, 'bottom', number_to_remove=excess_stack_height)\n\n # process each excess piece's capture or reserve placement\n for removed_piece in removed_pieces:\n\n # if bottom piece belongs to player making move, send to reserve. Else, make capture of opponent piece\n consequence = 'captured'\n if removed_piece == active_player_color:\n consequence = 'reserved'\n\n # place the excess pieces into this player's reserve or capture pile, as appropriate\n self._players[self._whose_turn][consequence] += 1", "def move_piece(self, player, select, move, number_pieces):\r\n if self.check(player, select, move, number_pieces):\r\n player_profile = self.which_player(player)\r\n piece_select = self._board[select[0]][select[1]]\r\n bottom_place = len(piece_select) - number_pieces\r\n for num in range(number_pieces):\r\n bottom_piece = piece_select[bottom_place]\r\n self._board[select[0]][select[1]].pop(bottom_place)\r\n self._board[move[0]][move[1]].append(bottom_piece)\r\n if len(self._board[move[0]][move[1]]) > 5:\r\n self.overflow(player, move)\r\n if player_profile.get_capture() == 6:\r\n return player_profile.get_name() + \" Wins\"\r\n return \"successfully moved\"\r\n\r\n else:\r\n return False", "def test_exec_manipulate_stack_can_duplicate_nth_value_from_top_of_stack(num):\n from esolang_whitespace import SpaceInterpreter\n i = SpaceInterpreter('\\t ' + num_to_space(num))\n stack = [0, 1, 2, 3, 4]\n i.exec_manipulate_stack('\\t ' + num_to_space(num), stack, [0])\n assert stack == [0, 1, 2, 3, 4, 4 - num]", "def getPartialStackPawn(self, top_pawn, num_pawns):\n pawn = top_pawn\n for x in range(1, num_pawns):\n pawn = pawn.get_bottom()\n return pawn", "def getImageStack(self, board):\n index = [None,0,None,1,3,None,2] # indices of main plains\n # PLAYER = 4 # index of PLAYER's plain\n LONG_CAPTURE = 4 # index of LONG_CAPTURE's plain\n active_player = 1 # white always move\n # create image stack that will be an input to NNet \n n = self.n\n main_planes = np.zeros(shape=(5, n, n), dtype=np.float32)\n # main images\n for y in range(n):\n for x in range(n):\n piece = board.pieces[x][y]\n if piece != 0:\n main_planes[index[piece]][x][y] = 1\n # main_planes[PLAYER][x][y] = active_player\n \n # player/piece(s) making the move\n if board.last_long_capture:\n last = board.last_long_capture\n main_planes[LONG_CAPTURE][last.x1][last.y1] = 1\n \n # auxiliary images\n normNoProgressCount = board.noProgressCount / 128.0 if board.count_pieces() <= 7 else 0\n normKingMoveCount = board.kingMoveCount / 32.0\n normRepetitionCount = board.get_repetition_count() / 4.0\n \n no_progress = np.full((8, 8), normNoProgressCount, dtype=np.float32)\n king_move_count = np.full((8, 8), normKingMoveCount, dtype=np.float32)\n repetition_count = np.full((8, 8), normRepetitionCount, dtype=np.float32)\n auxiliary_planes = [no_progress, king_move_count, repetition_count]\n\n image_stack = np.asarray(auxiliary_planes, dtype=np.float32)\n image_stack = np.vstack((main_planes, auxiliary_planes))\n assert image_stack.shape == (self.getImageStackSize(), n, n) \n \n # debug image stack\n #if board.last_long_capture or board.kingMoveCount>0:\n # self.print_image_stack(image_stack)\n \n return image_stack", "def change_state_move(self, stack, new_pos, num_tokens):\n\n new_white = self.white.difference([stack])\n\n new_height = num_tokens\n for w in self.white:\n if w.pos == new_pos:\n new_white = new_white.difference([w])\n new_height += w.height\n\n new_white |= {Stack(new_pos, new_height)}\n\n if stack.height > num_tokens:\n new_white |= {Stack(stack.pos, stack.height - num_tokens)}\n \n return State(new_white, self.black, self.goals)", "def _next_pos_multiplied(self, component):\r\n posx = component.pos[0]\r\n posy = component.pos[1]\r\n\r\n mq = component.move_queue.copy()\r\n\r\n while len(mq) > 0:\r\n x, y = mq.pop()\r\n\r\n posx += x * 4\r\n posy += y * 4\r\n\r\n return posx, posy", "def ApplyAndQueryPositions(layer, pos):\n n_heads = len(pos)\n return tl.Serial(\n tl.Dup(), # (x, x)\n CutAtPosition(), # (x_content, x_position, x)\n tl.Parallel([], tl.Swap()), # (x_content, x, x_position)\n [tl.Parallel([], Dup2()) for _ in range(n_heads - 1)],\n # Now the stack is x_content, (x, x_position) * n_heads.\n tl.Parallel(*([layer] + pos)),\n tl.Concatenate(n_items=n_heads + 1)\n )", "def four_stool_move(model: 'TOAHModel',\n n: int,\n src_stool: int,\n dest_stool: int,\n temp_stools: 'tuple of (int, int)'):\n \n # Base case: if no cheeses need to be moved, do nothing\n if (n <= 0):\n return\n \n # Base case: if there is only one cheese to move,\n # move it directly to the dest_stool\n elif (n == 1):\n model.move(src_stool, dest_stool)\n return\n \n # Recursive decomposition: Use Anne Hoy's TOAH algorithm\n else:\n # Get the optimal i value from an optimal_i_generator starting at n\n op_i = optimal_i_generator(n)\n i = next(op_i)\n \n # Anne Hoy's step 1: Move n - i cheese rounds to an intermediate \n # stool (temp_stools[0]) using all four stools.\n four_stool_move(model,\n (n - i), # new n\n src_stool, # new src\n temp_stools[0], # new dest\n (dest_stool, temp_stools[1])) # new temps\n \n # Anne Hoy's step 2: Move i cheese rounds from the origin stool to \n # the destination stool, using the 3 remaining stools\n # (temp_stools[0] is used by step 1, so cannot use it in this step)\n three_stool_move(model,\n i, # new n\n src_stool, # new src\n dest_stool, # new dest\n temp_stools[1]) # new temp\n \n # Anne Hoy's step 3: Move the n-i smallest cheese rounds from the \n # intermediate stool to the destination stool, using all four stools\n four_stool_move(model,\n (n - i), # new n\n temp_stools[0], # new src\n dest_stool, # new dest\n (src_stool, temp_stools[1])) # new temps\n \n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a player_name as a parameter and returns the active player based on the player name
def get_active_player(self, player_name): player_one = self.get_player_one() player_two = self.get_player_two() if player_one.get_player_name() == player_name: return player_one if player_two.get_player_name() == player_name: return player_two else: return 'player not found'
[ "def findPlayerByName(self, name): \r\n\t\treturn self.__players_by_name.get(name.lower(), None)", "def get_player(self, name):\n\n try:\n name = name.name\n except AttributeError: pass\n\n for i in self.players:\n if i.name == name:\n return i", "def player_by_name(self, name):\n if self.config[\"uncased\"]:\n name = name.lower()\n if self.players.get(name, None) is None:\n self.players[name] = Player(name, self.config)\n return self.players[name]", "def _get_ac_player(self):\n\t\treturn self.players[self.active_player]", "def player():\n\n name_id = 1\n return card_game.Player(name_id)", "def get_player_name():\n\n return player.get(\"player_name\")", "async def get_character(self, character_name):\n for player in self._characters:\n if player.get_name() == character_name:\n return player\n await asyncio.sleep(.1)", "def query_specific_player(player_name):\n\n if player_name:\n return db.players.find_one({'Player': player_name})\n else:\n raise ValueError('Specify a player name, please!')", "def getPlayer(self, p):\n log(\"MState getPlayer\",5)\n if type(p) == Player:\n return p\n elif type(p) == str:\n players = [player for player in self.players if player.id == p]\n if len(players) >= 1:\n return players[0]\n else:\n raise Exception(\"Couldn't find player from id {}\".format(p))\n else:\n raise Exception(\"Couldn't find player from {}\".format(p))", "def create_player(name):\n if name.lower() == \"ai\":\n return Player(name.upper(), 'computer')\n else:\n return Player(name.title(), 'human')", "def get_current_player(player_one_turn: bool) -> str:\r\n\r\n # Complete this function.\r\n if player_one_turn == True:\r\n return P1\r\n else:\r\n return P2", "def name(player):\n return player['name']", "def get_player(self, name):\r\n return User(name)", "def check_if_player_exists(self, player_name) -> Union[Player, bool]:\n for player in self._players:\n if player.name == player_name and player._is_alive:\n return player\n return False", "def get_player(self, player_id):\n for player in self.players:\n if player.get_id() == player_id:\n return player", "def get_player(self, name):\n\t\t\n\t\tname = \"\".join(ch.lower() for ch in name if ch not in set(string.punctuation)).capitalize()\n\t\titem = self.db.get(name)\n\t\t\n\t\tif item.value:\n\t\t\titem.value = data.Object(item.value)\n\t\telse:\n\t\t\tplayer = data.Object()\n\t\t\t\n\t\t\tplayer.name = name\n\t\t\tplayer.title = \"\"\n\t\t\tplayer.full_name = name\n\t\t\tplayer.karma = 0\n\t\t\tplayer.alignment = ALIGNMENT_NEUTRAL\n\t\t\tplayer.unaligned_name = random.choice(UNALIGNED_NAMES)\n\t\t\tplayer.damage = random.choice(DAMAGE_TYPES)\n\t\t\tplayer.next_karma = 0\n\t\t\tplayer.next_fight = 0\n\t\t\tplayer.wins = 0\n\t\t\tplayer.losses = 0\n\t\t\tplayer.ties = 0\n\t\t\t\n\t\t\titem.value = player\n\t\t\titem.commit()\n\t\t\n\t\treturn item", "def get_actor_by_name(self, partialname):\n for actor in self.players:\n if partialname.lower() in actor.name.lower():\n return actor\n return None", "def getplayer(title, logs=[]):\n match = consts.player_re.search(title)\n if not match:\n logs.append(\"Player: No regex match\")\n return None\n name = strip_annots(match.group(1))\n\n players = safe_call(consts.osu_api.get_user, name)\n if players:\n return players[0]\n logs.append(\"Player: '%s' not found\" % name)\n return None", "def next_player(current_player: str, occurrence_number: int) -> str:\n if occurrence_number > 0:\n return current_player\n else:\n if current_player == PLAYER_ONE:\n return PLAYER_TWO\n else:\n return PLAYER_ONE" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a player name as a parameter and returns how many pieces they have in reserve
def show_reserve(self, player_name): player = self.get_active_player(player_name) return player.get_reserve_pieces()
[ "def check_occupancy(shelter_name):\n\tresult = session.query(Shelter.name,func.count(Puppy.id).label('puppy_count'),Shelter.max_occ)\\\n\t.outerjoin(Shelter.name).group_by(Shelter.name).filter(Shelter.name==shelter_name).one()\n\tnum_left = result.max_occ-result.puppy_count\n\tprint \"Cur Occupancy - {sname}: {pcount}, {remaining} openings remaining\".format(sname = shelter_name, pcount = result.puppy_count, remaining = num_left)", "def _numWorthless(self):\r\n self.inventoryManager.refreshInventory()\r\n inv = self.inventoryManager.inventory()\r\n n = inv.get(43, 0) + inv.get(44, 0) + inv.get(45, 0)\r\n return n", "def nb_ships_in_fleets(self, player):\n nb = 0\n for f in self.fleets:\n if f.owner is player:\n nb += f.nb_ships\n return nb", "def count_pieces(self, current_player=None) :\n if current_player not in [True, False, None]:\n raise TypeError(\"Expected boolean value for current_player, got \"\n + str(current_player))\n piece_type = self.__piece_type__(self.get_current_player_name() if current_player else self.get_other_player_name())\n player_test = (lambda x: x) if current_player is None else (lambda piece: piece == piece_type)\n return len(filter(player_test, sum(self.board_array,[])))", "def nb_ships(self, player):\n return self.nb_ships_in_fleets(player) + self.nb_ships_on_planets(player)", "def num_total_players(self):\r\n return self.count()", "def countPlayers():\n\n # establish db connection\n DB, cursor = connect()\n\n # fetch number of players registered\n cursor.execute(\"SELECT count(*) from player_registry\")\n player_count = cursor.fetchone()[0]\n DB.close()\n\n return player_count", "def CheckOpponentBigs(lineup):\n bigs = 0\n for name in opp_bigs:\n if name in lineup:\n bigs += 1\n return bigs", "def reserved_move(self, player_name, coord):\n player = self.get_active_player(player_name)\n board = self.get_board()\n if player.get_reserve_pieces() < 1:\n return 'no pieces in reserve'\n board[coord[0]][coord[1]].append(player.get_player_color())\n player.remove_reserve_piece()\n board = self.process_stack(board, coord, player)\n self.set_board(board)\n self.get_next_player(player)\n return 'successfully moved'", "def num_pieces(self):\n pos = self.position\n counts = defaultdict(int)\n for piece in pos.values():\n counts[piece] += 1\n return counts", "def nb_ships_on_planets(self, player):\n nb = 0\n for p in self.planets:\n if p.owner is player:\n nb += p.nb_ships\n return nb", "def add_reserve_piece(self):\n self._reserve_pieces += 1", "def get_calc_unique_pieces(self):\n # TODO: Make this work with rebrickable inventories\n count = db.run_sql(\"SELECT COUNT(bl_inventories.quantity) FROM bl_inventories JOIN parts\"\n \" ON bl_inventories.piece_id = parts.id\"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return count", "def numof_pieces(self):\n return len(self._pieces)", "def get_remaining_pegs(self):\n return len(list(filter(lambda x: x.has_piece(), itertools.chain(*self.board.content))))", "def get_player_count(self):\n if self.players:\n return len(self.players)\n else:\n return 0", "def test_cli_change_number_of_computer_players(engine):\n assert engine.ui.seats == 7 + 2\n assert len(engine.playerlist) == 7 + 1", "def get_calc_piece_count(self):\n\n count = db.run_sql(\"SELECT SUM(bl_inventories.quantity) FROM bl_inventories \"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return count", "def winnings_total(players):\n return sum(map(lambda p: p.winnings, players))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Takes a players name, and the destination coordinates and determines if they can make a reserve piece move, and executes if so. Updates the turn order. Returns a message if failed.
def reserved_move(self, player_name, coord): player = self.get_active_player(player_name) board = self.get_board() if player.get_reserve_pieces() < 1: return 'no pieces in reserve' board[coord[0]][coord[1]].append(player.get_player_color()) player.remove_reserve_piece() board = self.process_stack(board, coord, player) self.set_board(board) self.get_next_player(player) return 'successfully moved'
[ "def move_piece(self, player_name, start, destination, num_of_pieces):\n if self.get_winner() is not None:\n return self.get_winner() + ' has already won!'\n turn = self.get_turn()\n player = self.get_active_player(player_name)\n move_size = self.move_size(start, destination)\n valid_move = self.check_valid_move(player, start, destination)\n\n if turn != player.get_player_name() and turn is not None:\n return 'not your turn'\n\n if valid_move is False:\n return 'invalid location'\n\n if num_of_pieces != move_size:\n return 'invalid number of pieces'\n\n movement_range = self.get_range(start)\n\n if num_of_pieces > movement_range:\n return 'invalid number of pieces'\n\n pieces_to_move = self.get_pieces_to_move(start, num_of_pieces)\n board = self.update_board_location(start, destination, pieces_to_move)\n board = self.process_stack(board, destination, player)\n\n if self.get_winner() is not None:\n return 'wins'\n\n else:\n self.set_board(board)\n self.get_next_player(player)\n return 'successfully moved'", "def move_piece(self, player_name, from_position, to_position, pieces_moved):\n if self._whose_turn is None:\n self._whose_turn = player_name\n\n # general validation\n validation_result = self.general_move_validation(player_name, to_position)\n if validation_result is not True: # validation_result is string if any test failed\n return validation_result\n\n # enforce valid from_position; from_position is within bounds\n if not self.is_in_board(from_position):\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid from_position; from_position is not empty\n if len(self.show_pieces(from_position)) < 1:\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid from_position; player controls top of stack at from_position\n if self.show_pieces(from_position)[-1] != self._players[player_name]['color']:\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid to_position; to_position is within legal range\n if not self.position_is_in_stack_range(from_position, to_position):\n return self._ERROR_MESSAGES['invalid_location']\n\n # enforce valid number of pieces moved\n if pieces_moved > len(self.show_pieces(from_position)):\n return self._ERROR_MESSAGES['invalid_number_of_pieces']\n\n # move is valid--process the move by removing pieces from from_position and placing atop to_position\n removed_pieces = self.remove_pieces_from_stack(from_position, 'top', pieces_moved)\n self.place_atop_safely(to_position, removed_pieces)\n\n return self.process_post_move()", "def move_piece(x, y, new_x, new_y, x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION):\n\n # check whether the destination is the same for both\n\n if (new_x == new_x2 and new_y == new_y2):\n print(\"Both pieces going to the same location\")\n piece_type1 = get_piece(board, y, x)\n piece_type2 = get_piece(board, y2, x2)\n if (piece_type1 == \"p\" and piece_type2 == \"P\"):\n # both pawns, delete both\n print(\"Both are pawns, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"k\" and piece_type2 == \"K\"):\n print(\"Both are knights, detroying both\")\n board = delete_piece(x, y, board, board_turtles)\n board = delete_piece(x2, y2, board, board_turtles)\n elif (piece_type1 == \"p\" and piece_type2 == \"K\"):\n\n board = delete_piece(x, y, board, board_turtles)\n # execute move for AI\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n elif (piece_type1 == \"k\" and piece_type2 == \"P\"):\n board = delete_piece(x2, y2, board, board_turtles)\n # execute move for AI\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n print(\"Executing moves normally\")\n if (x != -1):\n board = execute_move(x, y, new_x, new_y, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n if (x2 != -1):\n board = execute_move(x2, y2, new_x2, new_y2, board, board_turtles, SYMBOL_DICT, BOARD_DIMENSION)\n\n return board", "def place_piece(board, x, y, player):\n can_place = isfree(board, x, y)\n if can_place:\n board[(x,y)] = player\n return can_place", "def _take_turns(self):\n # Beginning with the first player,\n # alternate turns between players until the game ends\n self.current_player_id = 1 # the id of the current player\n user_command = '' # the command entered by the user\n while(self.board.is_game_over() is False):\n if self.current_player_id == self.computer_player_id: \n self.board.take_best_move(self.computer_player_id)\n # End turn and allow the user to take a turn\n self.current_player_id = self.user_player_id\n else:\n # Display the board\n self.board.display()\n # Remind the user whether they are X's or O's\n if self.user_player_id == 1:\n print \"You are X's\"\n else:\n print \"You are O's\"\n # Ask user to input the coordinates of her mark, or to press q to quit\n user_command = raw_input('<enter \"{rowNum}, {columnNum}\" or \"q\" to quit>: ')\n print \"\"\n # Process the user command\n if user_command.lower().strip() == 'q':\n # End the game\n break\n else:\n # Mark the board for the user\n self._mark_board_for_user(user_command)\n # Display final board \n self.board.display()\n # Determine winner\n self.winner_id = self.board.get_winner()", "def place(self):\n print('Its ' + self.identify_piece(self.game.turn) + ' player\\'s turn to play')\n while True:\n position = self.input_number('Choose a spot to place: ') - 1\n\n result = self.game.can_place_piece(self.game.turn, position)\n if result == Game.CanPlaceResults.Ok:\n self.game.place_piece(self.game.turn, position)\n player = self.game.get_player_from_piece(self.game.turn)\n player.previous_move[1] = position\n break\n elif result == Game.CanPlaceResults.Occupied:\n print(\"There is already something at this position.\")\n elif result == Game.CanPlaceResults.WrongPiece:\n print(\"Wrong turn (this shouldn't be possible to happen).\")\n elif result == Game.CanPlaceResults.WrongState:\n print(\"Placing is not allowed at this time (this shouldn't be possible to happen).\")\n return # Safety return here. Wrong state means no placement can happen\n elif result == Game.CanPlaceResults.OutsideBoard:\n print(\"Position is outside the board.\")\n else:\n print(\"Something went wrong.\")", "def move_piece(self, player, select, move, number_pieces):\r\n if self.check(player, select, move, number_pieces):\r\n player_profile = self.which_player(player)\r\n piece_select = self._board[select[0]][select[1]]\r\n bottom_place = len(piece_select) - number_pieces\r\n for num in range(number_pieces):\r\n bottom_piece = piece_select[bottom_place]\r\n self._board[select[0]][select[1]].pop(bottom_place)\r\n self._board[move[0]][move[1]].append(bottom_piece)\r\n if len(self._board[move[0]][move[1]]) > 5:\r\n self.overflow(player, move)\r\n if player_profile.get_capture() == 6:\r\n return player_profile.get_name() + \" Wins\"\r\n return \"successfully moved\"\r\n\r\n else:\r\n return False", "def execute_move(self, move: Tuple[int, int, Piece], player: int):\n\n (x, y, p) = move\n\n # Placing in empty square\n assert self[x][y] == 0\n # Piece placed is not already used\n assert p not in self.used_pieces\n # Not placing in middle cross\n assert x != self.mid\n assert y != self.mid\n\n # print(f\"Placing {(self.selected_piece & 0b1111):04b} at {x},{y}\")\n self[x][y] = int(self.selected_piece) # +(1<<self.n)\n\n self.selected_piece = p\n # print(f\"Selecting {(self.selected_piece & 0b1111):04b} for opponent\\n\")", "def placeAPiece(self):\n # Check if you can eliminate any opponent piece by placing your piece\n for y in range(0, 8):\n for x in range(0, 8):\n if self.board.board[y][x] == self.piece:\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n try:\n if (x + dx + dx) < 0 or (y + dy + dy) < 0:\n continue\n\n if (self.board.board[y + dy][x + dx] == self.opponentPiece\n and self.board.board[y + dy +dy][x + dx + dx] == \"-\"\n and (x + dx + dx, y + dy + dy) not in self.board.placeBanList):\n if x + dx + dx > 0 and y + dy + dy > 0:\n self.board.placePiece((x + dx + dx, y + dy + dy), self.myColour)\n return (x + dx + dx, y + dy + dy)\n else:\n continue\n except IndexError:\n continue\n\n # Tries to place a piece on the middle positions of the board first\n counter = 0\n while True:\n lowerBound = 3\n upperBound = 4\n # The range for placing slowly grows outwards\n # if it cannot find a place at first within a few tries\n if counter > 5 and counter < 15:\n lowerBound = 2\n upperBound = 5\n elif counter > 15 and counter < 50:\n lowerBound = 1\n upperBound = 6\n elif counter > 50:\n lowerBound = 0\n upperBound = 7\n\n x = randint(lowerBound, upperBound)\n y = randint(lowerBound, upperBound)\n\n counter += 1\n # Checks if the piece will get eliminated next turn if we\n # place a piece in the generated position\n dangerPlace = False\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n # In order to get rid of negative indexing since its annoying\n if (x + dx) < 0 or (y + dy) < 0:\n continue\n\n try:\n if ((self.board.board[y+dy][x+dx] == self.opponentPiece or\n self.board.board[y+dy][x+dx] == \"X\") and\n self.board.board[y-dy][x-dx] == \"-\"):\n dangerPlace = True\n break\n except IndexError:\n continue\n if dangerPlace:\n continue\n # Place the piece if the game rules allow it and then return\n if (x, y) not in self.board.placeBanList:\n self.board.placePiece((x, y), self.myColour)\n return ((x, y))", "def movePiece(self,pos, dest):\r\n\r\n #set passant every turn to check if en passant is possible\r\n if(isinstance(self.board[pos[0]][pos[1]],piece.Pawn)):\r\n if(self.board[pos[0]][pos[1]].enPassant == True):\r\n self.passant = True\r\n else:\r\n self.passant = False\r\n\r\n #check if castle move\r\n if(not self.checkPossible(pos,dest)):\r\n if(pos == self.kingPos[0]):\r\n if(dest == (1,0)):\r\n self.board[2][0] = self.board[0][0]\r\n self.board[2][0].position = (2,0)\r\n self.board[0][0] = 0\r\n elif(dest == (6,0)):\r\n self.board[5][0] = self.board[7][0]\r\n self.board[5][0].position = (5,0)\r\n self.board[7][0] = 0\r\n elif(pos == self.kingPos[1]):\r\n if(dest == (1,7)):\r\n self.board[2][7] = self.board[0][7]\r\n self.board[2][7].position = (2,7)\r\n self.board[0][7] = 0\r\n elif(dest ==(6,7)):\r\n print(\"Final Step\")\r\n self.board[5][7] = self.board[7][7]\r\n self.board[5][7].position = (5,7)\r\n self.board[7][7] = 0\r\n\r\n # move piece(normally)\r\n self.board[dest[0]][dest[1]] = self.board[pos[0]][pos[1]]\r\n self.board[pos[0]][pos[1]] = 0\r\n self.board[dest[0]][dest[1]].position = (dest[0], dest[1])", "def move_piece(board, x, y, new_x, new_y):\n if not isfree(board, x, y):\n player = get_piece(board, x, y)\n remove_piece(board, x, y)\n place_piece(board, new_x, new_y, player)\n return True\n return False", "def find_available_moves(game, pieces, playerID=-1, num=-1):\r\n if playerID < 0:\r\n playerID = game.current_playerID\r\n\r\n corners = game.find_available_corners(playerID=playerID)\r\n\r\n # here are all of the available mating corners for each playerID\r\n #(i could get away with only the corners for the current player, but\r\n # getting all of them allows me to just how many blokus's are performed on a move\r\n # which could be part of a goodness metric for a strategy).\r\n\r\n # remove all of the other players pieces from the list\r\n pieces = [[i, pieces[i].piece]\r\n for i in xrange(len(pieces)) if pieces[i].playerID == playerID]\r\n moves = []\r\n\r\n for item in pieces:\r\n for rotation in xrange(0, 4):\r\n for parity in [-1, 1]:\r\n piece = item[1]\r\n ind = item[0]\r\n test_piece = Piece(\r\n piece.pieceID,\r\n piece.playerID,\r\n rotation=rotation,\r\n parity=parity)\r\n geo = test_piece.geometry\r\n Size = geo.shape\r\n for i in xrange(Size[0]):\r\n for j in xrange(Size[1]):\r\n for corner in corners:\r\n test_position = (corner[0] - i, corner[1] - j)\r\n new_board, problem = game.check_if_is_allowed(\r\n test_piece, test_position)\r\n if problem == '':\r\n moves = moves + [{'playerID': piece.playerID,\r\n 'index': ind,\r\n 'pieceID': piece.pieceID,\r\n 'position': test_position,\r\n 'rotation': rotation,\r\n 'parity': parity}]\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n\r\n return moves", "def can_move_piece_from(self, position, ignore_turn = False):\n if (position < 0 or position >= Board.position_count):\n return Game.CanMoveResults.OutsideBoard\n if (ignore_turn == False and self.turn != self.board[position]):\n return Game.CanMoveResults.WrongPiece\n if (self.state != Game.GameStage.Moving):\n return Game.CanMoveResults.WrongState\n\n return Game.CanMoveResults.Ok", "def reserve_board(self, board_name, attempts = 3):\n # ----------------------------------------------------------------------- #\n # This method seems to match the desired reservation behavior. It tries #\n # to reserve for a given number of tries and, if the operation failes, #\n # an exception is raised. #\n # It is advisable to work on extending this (already implemented) method #\n # and make it perform additional tasks, if needed (for example, to also #\n # try logging on to the board in order to confirm its' operational state) #\n # ----------------------------------------------------------------------- #\n # Create a \"shell\" object. We use pexpect and not pxssh, because the command is passed\n #_shell = pexpect.spawn(\"bash\")\n #_shell.logfile = logfile\n\n self.logger.info(self.constants.INFO[\"reservationattempt\"] % board_name)\n reserved = False\n reservation_id = None\n\n reserve_cmd = self.constants.COMMANDS[\"reservetarget\"] % (board_name, \"5M\")\n self.logger.debug(reserve_cmd)\n reservation_id = self.run_command(reserve_cmd)\n\n #for i in range(attempts):\n # reserve_cmd = self.constants.COMMANDS[\"reservetarget\"] % (\"5M\", board_name)\n # self.logger.debug(reserve_cmd)\n #_shell.sendline(reserve_cmd)\n #found = _shell.expect([\"Reservation confirmed\",\"Reservation Exception\",pexpect.TIMEOUT])\n # reservation_id = self.run_command(reserve_cmd)\n # This is present in the original version, since commands were executed on remote machines,\n # via SSH. Omitting this in this context, since we run a local shell\n #self._shell.prompt(timeout=10)\n #if found==0:\n # self.logger.info(self.constants.INFO[\"reservationconfirmed\"] % board_name)\n # reserved=True\n\n # reservation_id = _shell.before[5:-2]\n # break\n #elif found==1:\n # self.logger.info(self.constants.INFO[\"targetalreadyreserved\"] % board_name)\n\n # Here we will need to invoke our ExceptionHandler, and error triage will be\n # done there. So, whenever an exception should be raised, it will be passed\n # 'blindly' to the ExceptionHandler \n # TODO: Use a custom exception instead of plain log errors\n # self.logger.error(self.constants.ERRORS[\"targetreserved\"])\n #else:\n # self.logger.info(self.constants.INFO[\"targetalreadyreserved\"] % (board_name, str(i)))\n #if not reserved:\n # self.logger.info(self.constants.ERRORS[\"unabletoreserve\"] % (board_name, attempts))\n\n return reservation_id", "def can_move_piece(self, position, new_position, ignore_turn = False):\n\n can_move_from_result = self.can_move_piece_from(position, ignore_turn)\n if (can_move_from_result != Game.CanMoveResults.Ok):\n return can_move_from_result\n if (new_position < 0 or new_position > Board.position_count):\n return Game.CanMoveResults.OutsideBoard\n if (position == new_position):\n return Game.CanMoveResults.SamePosition\n if (self.board[new_position] != Piece.Empty):\n return Game.CanMoveResults.NewPositionOccupied\n if (self.check_if_mill_is_ok(self.board[position], new_position) == False):\n return Game.CanMoveResults.OldMillAtPosition\n\n \n\n\n moved_piece = self.board[position]\n total_on_board = self.board.pieces_of_type_on_board(moved_piece)\n # If you have three pieces left you're allowed to fly so the adjacent rule doesn't apply\n if (total_on_board > 3):\n if (self.board.positions_are_adjacent(position, new_position) == False):\n return Game.CanMoveResults.NotAdjacent\n\n return Game.CanMoveResults.Ok", "def makeMove(move):\r\n try:\r\n # Place the player in a new position\r\n GameBoard.theBoard[PlayerClass.char.position + move] = PlayerClass.char.name\r\n # Reset the current position to empty\r\n GameBoard.theBoard[PlayerClass.char.position] = \" \"\r\n # Update player position to the new position\r\n PlayerClass.char.position = PlayerClass.char.position + move\r\n except IndexError:\r\n print(\"Out of bounds.\")\r\n\r\n checkEncounters()\r\n GameBoard.draw_board(GameBoard.theBoard)\r\n gameAction()", "def applyMove(board,gameState, move, player = \"player\"):\n pass", "def is_setting_position_correct(turn):\n global pieces\n white_king, black_king = False, False\n king_counter = 0\n print()\n for piece in pieces:\n if piece.name == \"P\":\n if piece.position[-1] in \"1/8\":\n print(\"Invalid position. Remember, pawns cannot be in first or last row.\", end=\"\\n\\n\")\n return False\n if piece.name == \"K\":\n if piece.color == \"w\":\n white_king = True\n king_counter = increase(king_counter)\n elif piece.color == \"b\":\n black_king = True\n king_counter = increase(king_counter)\n if white_king and black_king:\n if king_counter != 2:\n print(\"Invalid position. There cannot be more than two kings on the board.\", end=\"\\n\\n\")\n return False\n elif is_check(\"w\", pieces) and is_check(\"b\", pieces):\n print(\"Invalid position. Both kings cannot be in check.\", end=\"\\n\\n\")\n return False\n elif is_check(\"w\", pieces) and turn != \"w\" or is_check(\"b\", pieces) and turn != \"b\":\n print(\"Invalid position. King cannot be in check while opponent's turn.\", end=\"\\n\\n\")\n return False\n else:\n return True\n else:\n print(\"Invalid position. Must be both kings on the board.\", end=\"\\n\\n\")\n return False", "def place_ship(self,row,column):\n if self.board[row][column] == \"S\":\n return \"ship already present\"\n else:\n self.board[row][column] = \"S\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Initializes with empty captured pieces, reserved pieces, their assigned color, and name.
def __init__(self, player_name, player_color): self._player_name = player_name self._player_color = player_color self._reserve_pieces = 0 self._captured_pieces = 0
[ "def __init__(self, name, color):\n self._name = name\n self._color = color\n self._is_turn = None\n self._red_captured = 0\n self._marbles_left = 8", "def __init__(self):\n \n # White always starts the game\n self.current_turn = 'white'\n self.num_white_turns = 0\n self.num_black_turns = 0\n\n self.valid_moves = []\n\n self.pieces_dict = {}\n # Initialize the set of pieces using the Piece class (see below)\n self.init_pieces_dict()", "def __init__(self, color, x, y):\n\t\t# Store the instance variable\n\t\tself.x = x\n\t\tself.y = y\n\t\t# Record whether the piece is living and drawn\n\t\tself.drawn = False\n\t\tself.living = True\n\t\t# Create the image name, which is blank and to be filled\n\t\tself.imageName = \"\"\n\t\t# Store the color\n\t\tself.color = color\n\t\t# Store the type\n\t\tself.type = \"\"\n\t\tself.hasMoved = False", "def _init_pieces(self, board):\n color = self.get_color()\n\n if color == 'blue':\n pieces = {'bso1': Soldier('bso1', 'a7', board), 'bso2': Soldier('bso2', 'c7', board),\n 'bso3': Soldier('bso3', 'e7', board), 'bso4': Soldier('bso4', 'g7', board),\n 'bso5': Soldier('bso5', 'i7', board), 'bca1': Cannon('bca1', 'b8', board),\n 'bca2': Cannon('bca2', 'h8', board), 'bge1': General('bge1', 'e9', board),\n 'bch1': Chariot('bch1', 'a10', board), 'bel1': Elephant('bel1', 'b10', board),\n 'bho1': Horse('bho1', 'c10', board), 'bgu1': Guard('bgu1', 'd10', board),\n 'bgu2': Guard('bgu2', 'f10', board), 'bel2': Elephant('bel2', 'g10', board),\n 'bho2': Horse('bho2', 'h10', board), 'bch2': Chariot('bch2', 'i10', board)}\n else:\n pieces = {'rso1': Soldier('rso1', 'a4', board), 'rso2': Soldier('rso2', 'c4', board),\n 'rso3': Soldier('rso3', 'e4', board), 'rso4': Soldier('rso4', 'g4', board),\n 'rso5': Soldier('rso5', 'i4', board), 'rca1': Cannon('rca1', 'b3', board),\n 'rca2': Cannon('rca2', 'h3', board), 'rge1': General('rge1', 'e2', board),\n 'rch1': Chariot('rch1', 'a1', board), 'rel1': Elephant('rel1', 'b1', board),\n 'rho1': Horse('rho1', 'c1', board), 'rgu1': Guard('rgu1', 'd1', board),\n 'rgu2': Guard('rgu2', 'f1', board), 'rel2': Elephant('rel2', 'g1', board),\n 'rho2': Horse('rho2', 'h1', board), 'rch2': Chariot('rch2', 'i1', board)}\n\n self._pieces = pieces\n allowed_destinations = set()\n\n for piece in pieces.values():\n piece.update_hyp_moves()\n piece.update_allowed_moves()\n allowed_destinations |= set(piece.get_allowed_moves())\n\n self.set_allowed_destinations(allowed_destinations)", "def init_pieces_dict(self):\n self.pieces_dict['white_rook_1'] = Piece(True, 'whiterook', 7, 0, 'white', 'rook')\n self.pieces_dict['white_knight_1'] = Piece(True, 'whiteknight', 7, 1, 'white', 'knight')\n self.pieces_dict['white_bishop_1'] = Piece(True, 'whitebishop', 7, 2, 'white', 'bishop')\n self.pieces_dict['white_king'] = Piece(True, 'whiteking', 7, 3, 'white', 'king')\n self.pieces_dict['white_queen'] = Piece(True, 'whitequeen', 7, 4, 'white', 'queen')\n self.pieces_dict['white_bishop_2'] = Piece(True, 'whitebishop', 7, 5, 'white', 'bishop')\n self.pieces_dict['white_knight_2'] = Piece(True, 'whiteknight', 7, 6, 'white', 'knight')\n self.pieces_dict['white_rook_2'] = Piece(True, 'whiterook', 7, 7, 'white', 'rook')\n\n self.pieces_dict['white_pawn_1'] = Piece(True, 'whitepawn', 6, 0, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_2'] = Piece(True, 'whitepawn', 6, 1, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_3'] = Piece(True, 'whitepawn', 6, 2, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_4'] = Piece(True, 'whitepawn', 6, 3, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_5'] = Piece(True, 'whitepawn', 6, 4, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_6'] = Piece(True, 'whitepawn', 6, 5, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_7'] = Piece(True, 'whitepawn', 6, 6, 'white', 'pawn', True)\n self.pieces_dict['white_pawn_8'] = Piece(True, 'whitepawn', 6, 7, 'white', 'pawn', True)\n\n self.pieces_dict['black_rook_1'] = Piece(True, 'blackrook', 0, 0, 'black', 'rook')\n self.pieces_dict['black_knight_1'] = Piece(True, 'blackknight', 0, 1, 'black', 'knight')\n self.pieces_dict['black_bishop_1'] = Piece(True, 'blackbishop', 0, 2, 'black', 'bishop')\n self.pieces_dict['black_king'] = Piece(True, 'blackking', 0, 3, 'black', 'king')\n self.pieces_dict['black_queen'] = Piece(True, 'blackqueen', 0, 4, 'black', 'queen')\n self.pieces_dict['black_bishop_2'] = Piece(True, 'blackbishop', 0, 5, 'black', 'bishop')\n self.pieces_dict['black_knight_2'] = Piece(True, 'blackknight', 0, 6, 'black', 'knight')\n self.pieces_dict['black_rook_2'] = Piece(True, 'blackrook', 0, 7, 'black', 'rook')\n\n self.pieces_dict['black_pawn_1'] = Piece(True, 'blackpawn', 1, 0, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_2'] = Piece(True, 'blackpawn', 1, 1, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_3'] = Piece(True, 'blackpawn', 1, 2, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_4'] = Piece(True, 'blackpawn', 1, 3, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_5'] = Piece(True, 'blackpawn', 1, 4, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_6'] = Piece(True, 'blackpawn', 1, 5, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_7'] = Piece(True, 'blackpawn', 1, 6, 'black', 'pawn', True)\n self.pieces_dict['black_pawn_8'] = Piece(True, 'blackpawn', 1, 7, 'black', 'pawn', True)", "def __init__(self):\r\n self._turn = \"R\"\r\n self._game_state = \"UNFINISHED\"\r\n self._game_board = board.Board() # calls the Board class\r\n self._piece = piece.Piece(self._game_board.get_board) # calls the Piece class, passing to it the current game board\r\n self._check_status = False\r\n self._checkmate_status = False\r\n self._color = None", "def __init__(self, duration=21):\n self.duration = duration\n self.remaining_duration = duration\n self.__colour_index = ZebraVirus.__colour_index", "def __init__(self, mode: str):\n num_of_blocks, num_of_bombs = self.create_board(mode)\n # Board size in playable spaces\n self.width, self.height = num_of_blocks, num_of_blocks\n self.mine_count = num_of_bombs\n # Construct board of empty squares\n self.board = [[\n MyneSquare(0, False, \"Icons/temp_empty.png\",\n pygame.Rect(x * ICON_SIZE, y * ICON_SIZE,\n ICON_SIZE, ICON_SIZE))\n for y in range(self.height)] for x in range(self.width)]\n self.mine_lst = []\n self.place_mine()\n for x in range(self.width):\n for y in range(self.height):\n self.place_numbers(x, y)", "def __init__(self, *args):\r\n self.__stencils = {}\r\n self.__shapes = {}\r\n #self.add_stencil(\"Basic Shapes.vss\")\r\n for stencil in args:\r\n self.add_stencil(stencil)", "def pieces(self, pieces):\n\n self._pieces = pieces", "def __init__(self, box_pos, color):\r\n\r\n self._box_pos = box_pos\r\n self._color = color\r\n self._played = False", "def _init_colour(self, name: str) -> None:\n if len(self._subtrees) > 0:\n self._colour = (100, 100, 100)\n else:\n file_type = \"Other File\"\n for category in FILE_EXTENSIONS:\n for extension in FILE_EXTENSIONS[category]:\n if name.lower().endswith(extension):\n file_type = category\n self._colour = FILE_COLORS[file_type]", "def __init__(self):\n this = _coin.new_SoPackedColor()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self):\n this = _coin.new_SoColorPacker()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, settings):\n ColorDefParser.__init__(self, settings)\n self._define_parser_colordef_attr()", "def __init__(self):\n self.segments = []\n self._create_init_snakes()", "def __init__(self):\n # Main character id\n self.name = None\n self.p1 = None\n self.p1_is = None\n self.p2 = None\n self.p3 = None", "def load_pieces(self):\n for piece in os.listdir(os.path.join(DATA_PATH, 'pieces')):\n piece_image = pygame.image.load(os.path.join(DATA_PATH + 'pieces/', piece))\n setattr(self, piece[0], piece_image)", "def __init__(self, color, pos):\n\n self._color = color\n self._pos = pos\n self._role = \"ge\" # Note \"ge\" = Generic, not General (\"GG\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a piece to the player's reserve pile
def add_reserve_piece(self): self._reserve_pieces += 1
[ "def add_to_reserve(self, pawn):\n self._reserve.append(pawn)", "def place_piece(board, x, y, player):\n can_place = isfree(board, x, y)\n if can_place:\n board[(x,y)] = player\n return can_place", "def add_piece(self, piece):\n self.piece = piece\n self.set_piece_rect(self.square_rect)", "def reserve_bottom(self, player, location):\r\n self._board[location[0]][location[1]] = self._board[location[0]][location[1]][1:]\r\n player.add_reserve()", "def add_piece(self, col_number, player=None) :\n\n if self.is_column_full(col_number) :\n raise IndexError(\"Can't add piece to full column \"+str(col_number)+\".\")\n\n player = player or self.whose_turn\n piece_type = self.__piece_type__(player)\n new_board = self.copy()\n height = 1 + new_board.get_column_height(col_number)\n new_board.board_array[-height][col_number] = piece_type\n new_board.prev_move_string = (\"Put \" + str(player)\n + \"'s piece in col \" + str(col_number))\n # adding a piece causes the current player to swap\n new_board.set_current_player_name(new_board.players[1])\n return new_board", "def add_piece(self, piece, location):\n # if the center of the piece is being added to a valid spot on the board...\n if location[0] in range(2, 20) and location[1] in range(2, 20):\n self._board[location[0]][location[1]] = piece.get_piece_center()\n\n # then add the center, plus any other part of the piece that is not off the board\n if location[0] - 1 in range(2, 20) and location[1] in range(2, 20):\n self._board[location[0] - 1][location[1]] = piece.get_piece_N()\n\n if location[0] - 1 in range(2, 20) and location[1] - 1 in range(2, 20):\n self._board[location[0] - 1][location[1] - 1] = piece.get_piece_NW()\n\n if location[0] - 1 in range(2, 20) and location[1] + 1 in range(2, 20):\n self._board[location[0] - 1][location[1] + 1] = piece.get_piece_NE()\n\n if location[0] + 1 in range(2, 20) and location[1] in range(2, 20):\n self._board[location[0] + 1][location[1]] = piece.get_piece_S()\n\n if location[0] + 1 in range(2, 20) and location[1] - 1 in range(2, 20):\n self._board[location[0] + 1][location[1] - 1] = piece.get_piece_SW()\n\n if location[0] + 1 in range(2, 20) and location[1] + 1 in range(2, 20):\n self._board[location[0] + 1][location[1] + 1] = piece.get_piece_SE()\n\n if location[0] in range(2, 20) and location[1] + 1 in range(2, 20):\n self._board[location[0]][location[1] + 1] = piece.get_piece_E()\n\n if location[0] in range(2, 20) and location[1] - 1 in range(2, 20):\n self._board[location[0]][location[1] - 1] = piece.get_piece_W()", "def move_piece(board, x, y, new_x, new_y):\n if not isfree(board, x, y):\n player = get_piece(board, x, y)\n remove_piece(board, x, y)\n place_piece(board, new_x, new_y, player)\n return True\n return False", "def place_piece(self, piece, row, col):\n if self.board.verify_pos(row, col):\n rewards = self.create_rewards(piece, row, col)\n #TODO: delete the pos from valued_moves\n self.check_game_end(piece, rewards)\n super().place_piece(piece, row, col)\n self.update_moves(row, col)", "def _place_piece(self, position, piece):\n self._positions[str(position)].piece = piece", "def pickup(self, slot_x):\n player_inventory[self.name] += 1 #add item to inventory\n self.tile_y = 15 #last row reserved for inventory\n self.tile_x = slot_x #chosen inventory slot\n self.y = self.tile_y * TILESIZE #set new y position on screen\n self.x = self.tile_x * TILESIZE #set new x position on screen", "def place_piece(self, piece, position):\n if (self.can_place_piece(piece, position) != self.CanPlaceResults.Ok):\n return self.PlaceResults.Failed\n\n self.board[position] = piece\n player = self.get_player_from_piece(self.turn)\n player.pieces_amount -= 1\n player.increase_position_move_count()\n\n if (self.players[0].pieces_amount == 0 and self.players[1].pieces_amount == 0):\n self.state = self.GameStage.Moving\n\n if (self.board.has_three_at_position(piece, position)):\n player.latest_created_mill = self.board.get_mill_at_position(piece, position)\n self.eliminating = True\n return self.PlaceResults.GotThree\n self.turn = self.board.get_other_piece(self.turn)\n self.total_turns = self.total_turns + 1\n return self.PlaceResults.Placed", "def pour_in(self, amount: int) -> None:\n space = self.capacity - self.holding\n if amount > space:\n raise Spillage(\"Too much! Overflowing!\")\n self.holding += amount", "def reserved_move(self, player_name, coord):\n player = self.get_active_player(player_name)\n board = self.get_board()\n if player.get_reserve_pieces() < 1:\n return 'no pieces in reserve'\n board[coord[0]][coord[1]].append(player.get_player_color())\n player.remove_reserve_piece()\n board = self.process_stack(board, coord, player)\n self.set_board(board)\n self.get_next_player(player)\n return 'successfully moved'", "def _give_player_tile(self, player, tile):\r\n player.tiles.add(tile)\r\n self.tile_deck.remove(tile)", "def put(self, piece, position):\n piece.position = Square(position.x, position.y)\n self.state.pitch.board[position.y][position.x] = piece", "def addPile(self, matchPile):\r\n self.piles.append(matchPile)", "def remove_reserve_piece(self):\n self._reserve_pieces -= 1", "def placeAPiece(self):\n # Check if you can eliminate any opponent piece by placing your piece\n for y in range(0, 8):\n for x in range(0, 8):\n if self.board.board[y][x] == self.piece:\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n try:\n if (x + dx + dx) < 0 or (y + dy + dy) < 0:\n continue\n\n if (self.board.board[y + dy][x + dx] == self.opponentPiece\n and self.board.board[y + dy +dy][x + dx + dx] == \"-\"\n and (x + dx + dx, y + dy + dy) not in self.board.placeBanList):\n if x + dx + dx > 0 and y + dy + dy > 0:\n self.board.placePiece((x + dx + dx, y + dy + dy), self.myColour)\n return (x + dx + dx, y + dy + dy)\n else:\n continue\n except IndexError:\n continue\n\n # Tries to place a piece on the middle positions of the board first\n counter = 0\n while True:\n lowerBound = 3\n upperBound = 4\n # The range for placing slowly grows outwards\n # if it cannot find a place at first within a few tries\n if counter > 5 and counter < 15:\n lowerBound = 2\n upperBound = 5\n elif counter > 15 and counter < 50:\n lowerBound = 1\n upperBound = 6\n elif counter > 50:\n lowerBound = 0\n upperBound = 7\n\n x = randint(lowerBound, upperBound)\n y = randint(lowerBound, upperBound)\n\n counter += 1\n # Checks if the piece will get eliminated next turn if we\n # place a piece in the generated position\n dangerPlace = False\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n # In order to get rid of negative indexing since its annoying\n if (x + dx) < 0 or (y + dy) < 0:\n continue\n\n try:\n if ((self.board.board[y+dy][x+dx] == self.opponentPiece or\n self.board.board[y+dy][x+dx] == \"X\") and\n self.board.board[y-dy][x-dx] == \"-\"):\n dangerPlace = True\n break\n except IndexError:\n continue\n if dangerPlace:\n continue\n # Place the piece if the game rules allow it and then return\n if (x, y) not in self.board.placeBanList:\n self.board.placePiece((x, y), self.myColour)\n return ((x, y))", "def overflow(self, player, move):\r\n player_profile = self.which_player(player)\r\n piece_move = self._board[move[0]][move[1]]\r\n while len(self._board[move[0]][move[1]]) > 5:\r\n bottom_piece = piece_move[0]\r\n self._board[move[0]][move[1]].pop(0)\r\n if bottom_piece == player_profile.get_color():\r\n player_profile.add_reserve()\r\n if bottom_piece != player_profile.get_color():\r\n player_profile.add_capture()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks to see if there are enough reserve pieces for a move, then removes one if so, if not returns an error message
def remove_reserve_piece(self): self._reserve_pieces -= 1
[ "def reserved_move(self, player_name, coord):\n player = self.get_active_player(player_name)\n board = self.get_board()\n if player.get_reserve_pieces() < 1:\n return 'no pieces in reserve'\n board[coord[0]][coord[1]].append(player.get_player_color())\n player.remove_reserve_piece()\n board = self.process_stack(board, coord, player)\n self.set_board(board)\n self.get_next_player(player)\n return 'successfully moved'", "def eliminate_piece(self, position):\n if (self.can_eliminate_piece(position) != self.CanElimateResults.Ok):\n return False\n self.board[position] = Piece.Empty\n self.eliminating = False\n self.ai_eliminated = True\n self.total_turns = self.total_turns + 1\n self.turn = self.board.get_other_piece(self.turn)\n\n return True", "def add_reserve_piece(self):\n self._reserve_pieces += 1", "def remove_piece(board, x, y):\n piece_exists = not isfree(board, x, y)\n if piece_exists:\n del board[(x,y)]\n return piece_exists", "def move_piece(self, player, select, move, number_pieces):\r\n if self.check(player, select, move, number_pieces):\r\n player_profile = self.which_player(player)\r\n piece_select = self._board[select[0]][select[1]]\r\n bottom_place = len(piece_select) - number_pieces\r\n for num in range(number_pieces):\r\n bottom_piece = piece_select[bottom_place]\r\n self._board[select[0]][select[1]].pop(bottom_place)\r\n self._board[move[0]][move[1]].append(bottom_piece)\r\n if len(self._board[move[0]][move[1]]) > 5:\r\n self.overflow(player, move)\r\n if player_profile.get_capture() == 6:\r\n return player_profile.get_name() + \" Wins\"\r\n return \"successfully moved\"\r\n\r\n else:\r\n return False", "def remove_piece(s, (x,y)):\n\t\ts.matrix[x][y].occupant = None", "def find_available_moves(game, pieces, playerID=-1, num=-1):\r\n if playerID < 0:\r\n playerID = game.current_playerID\r\n\r\n corners = game.find_available_corners(playerID=playerID)\r\n\r\n # here are all of the available mating corners for each playerID\r\n #(i could get away with only the corners for the current player, but\r\n # getting all of them allows me to just how many blokus's are performed on a move\r\n # which could be part of a goodness metric for a strategy).\r\n\r\n # remove all of the other players pieces from the list\r\n pieces = [[i, pieces[i].piece]\r\n for i in xrange(len(pieces)) if pieces[i].playerID == playerID]\r\n moves = []\r\n\r\n for item in pieces:\r\n for rotation in xrange(0, 4):\r\n for parity in [-1, 1]:\r\n piece = item[1]\r\n ind = item[0]\r\n test_piece = Piece(\r\n piece.pieceID,\r\n piece.playerID,\r\n rotation=rotation,\r\n parity=parity)\r\n geo = test_piece.geometry\r\n Size = geo.shape\r\n for i in xrange(Size[0]):\r\n for j in xrange(Size[1]):\r\n for corner in corners:\r\n test_position = (corner[0] - i, corner[1] - j)\r\n new_board, problem = game.check_if_is_allowed(\r\n test_piece, test_position)\r\n if problem == '':\r\n moves = moves + [{'playerID': piece.playerID,\r\n 'index': ind,\r\n 'pieceID': piece.pieceID,\r\n 'position': test_position,\r\n 'rotation': rotation,\r\n 'parity': parity}]\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n if (num > 0 and len(moves) >= num):\r\n break\r\n\r\n return moves", "def is_space_free(self, move):\r\n return self.board[move] == ' ' # Return True of empty, else return False.\r", "def test_move_partition_rg_imbalanced(self):\n assert not self.move_partition_valid(0, 1, 3)", "def update_allowed_moves(self):\n piece_id = self.get_piece_id()\n color = piece_id[0]\n board = self.get_board()\n hyp_moves = self.get_hyp_moves()\n allowed_moves = hyp_moves.copy()\n\n # eliminate moves with the destination occupied by a piece of the same\n # color\n for destination in hyp_moves:\n piece_id_at_destination = board.get_occupation(destination)\n if piece_id_at_destination is not None:\n if piece_id_at_destination[0] == color:\n del allowed_moves[destination]\n\n # prevent iterating through already eliminated moves\n hyp_moves = allowed_moves.copy()\n\n # eliminate moves with an occupied intermediate position\n for destination, intermediates in hyp_moves.items():\n for intermediate in intermediates:\n if board.get_occupation(intermediate) is not None:\n if destination in allowed_moves: # not yet deleted\n del allowed_moves[destination]\n\n self.set_allowed_moves(allowed_moves)", "def test_move_partition_movement_size_too_large(self):\n assert not self.move_partition_valid(5, 1, 3, max_movement_size=1)", "def eliminate(self):\n self.print_board()\n print(self.identify_piece(self.game.turn) + ' player has three in a row!')\n\n while True:\n position = self.input_number('Choose a piece to eliminate: ') - 1\n result = self.game.can_eliminate_piece(position)\n if result == Game.CanElimateResults.Ok:\n self.game.eliminate_piece(position)\n player = self.game.get_player_from_piece(self.game.turn)\n player.previous_move[2] = position\n break\n elif result == Game.CanElimateResults.NoPiece:\n print(\"No piece at that position.\")\n elif result == Game.CanElimateResults.TargetAreThrees:\n print(\"Target are threes and can not be removed.\")\n elif result == Game.CanElimateResults.WrongPiece:\n print(\"You can't eliminate your own piece\")\n elif result == Game.CanElimateResults.OutsideBoard:\n print(\"Position is outside the board\")\n else:\n print(\"Something went wrong\")", "def remove_reserve(self):\r\n self._reserves -= 1", "def move_piece(self, rnum_from, rnum_to):\n self._chk_rnum(rnum_from)\n self._chk_rnum(rnum_to)\n if self.is_square_empty(rnum_from):\n raise CheckersError(self._s_pos(rnum_from), \"no piece found\")\n if self.is_square_occupied(rnum_to):\n raise CheckersError(self._s_pos(rnum_to),\n f\"{self._pieces[rnum_to]} occupies square\")\n self._pieces[rnum_to] = self._pieces.pop(rnum_from)", "def placeAPiece(self):\n # Check if you can eliminate any opponent piece by placing your piece\n for y in range(0, 8):\n for x in range(0, 8):\n if self.board.board[y][x] == self.piece:\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n try:\n if (x + dx + dx) < 0 or (y + dy + dy) < 0:\n continue\n\n if (self.board.board[y + dy][x + dx] == self.opponentPiece\n and self.board.board[y + dy +dy][x + dx + dx] == \"-\"\n and (x + dx + dx, y + dy + dy) not in self.board.placeBanList):\n if x + dx + dx > 0 and y + dy + dy > 0:\n self.board.placePiece((x + dx + dx, y + dy + dy), self.myColour)\n return (x + dx + dx, y + dy + dy)\n else:\n continue\n except IndexError:\n continue\n\n # Tries to place a piece on the middle positions of the board first\n counter = 0\n while True:\n lowerBound = 3\n upperBound = 4\n # The range for placing slowly grows outwards\n # if it cannot find a place at first within a few tries\n if counter > 5 and counter < 15:\n lowerBound = 2\n upperBound = 5\n elif counter > 15 and counter < 50:\n lowerBound = 1\n upperBound = 6\n elif counter > 50:\n lowerBound = 0\n upperBound = 7\n\n x = randint(lowerBound, upperBound)\n y = randint(lowerBound, upperBound)\n\n counter += 1\n # Checks if the piece will get eliminated next turn if we\n # place a piece in the generated position\n dangerPlace = False\n for dx, dy in [(1, 0), (0, 1), (0, -1), (-1, 0)]:\n # In order to get rid of negative indexing since its annoying\n if (x + dx) < 0 or (y + dy) < 0:\n continue\n\n try:\n if ((self.board.board[y+dy][x+dx] == self.opponentPiece or\n self.board.board[y+dy][x+dx] == \"X\") and\n self.board.board[y-dy][x-dx] == \"-\"):\n dangerPlace = True\n break\n except IndexError:\n continue\n if dangerPlace:\n continue\n # Place the piece if the game rules allow it and then return\n if (x, y) not in self.board.placeBanList:\n self.board.placePiece((x, y), self.myColour)\n return ((x, y))", "def overflow(self, player, move):\r\n player_profile = self.which_player(player)\r\n piece_move = self._board[move[0]][move[1]]\r\n while len(self._board[move[0]][move[1]]) > 5:\r\n bottom_piece = piece_move[0]\r\n self._board[move[0]][move[1]].pop(0)\r\n if bottom_piece == player_profile.get_color():\r\n player_profile.add_reserve()\r\n if bottom_piece != player_profile.get_color():\r\n player_profile.add_capture()", "def _is_empty(self, pieces, move):\n\n moved_piece = None\n # Find the head piece\n for piece in pieces:\n moved_piece = StateSpaceGenerator.apply_movement(piece, move)\n # Head piece found if the new position isn't already taken up\n if tuple(moved_piece) not in pieces:\n break\n # *NOTE* moved_piece = head piece - sorry not very clear\n\n # Check if new position is taken up by any other pieces on the board\n if self._tile_taken(moved_piece, 1):\n return False\n # Check if new position is out of bounds\n elif not self._check_piece_bounds(moved_piece):\n return False\n # Is an empty space if both of those are not True\n else:\n return True", "def remove_lock(self, pokemon, move):\n pass", "def _check_valid_sidestep(self, pieces, move):\n # Create list for new position of pieces\n moved_pieces = []\n # Move 2 or 3 pieces to new position and add to list\n if len(pieces) == 2:\n moved_pieces.append(StateSpaceGenerator.apply_movement(pieces[0], move))\n moved_pieces.append(StateSpaceGenerator.apply_movement(pieces[1], move))\n else:\n moved_pieces.append(StateSpaceGenerator.apply_movement(pieces[0], move))\n moved_pieces.append(StateSpaceGenerator.apply_movement(pieces[1], move))\n moved_pieces.append(StateSpaceGenerator.apply_movement(pieces[2], move))\n\n # Returns False if new position has already been taken by other pieces\n if self._tile_taken(moved_pieces, len(moved_pieces)):\n return False\n\n for piece in moved_pieces:\n # Return False if any of the pieces are out of bounds\n if not self._check_piece_bounds(piece):\n return False\n # Sidestep is valid otherwise\n else:\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Captures a piece for the player
def capture_piece(self): self._captured_pieces += 1
[ "def take_effect(self, player):\n\t\tpass", "def add_piece(self, piece):\n self.piece = piece\n self.set_piece_rect(self.square_rect)", "def captured_piece(self):\n self._num_captured += 1", "def play(self, player, game):\n pass #normal card dont play cause only a special card action", "def capture_bottom(self, player, location):\r\n self._board[location[0]][location[1]] = self._board[location[0]][location[1]][1:]\r\n player.add_captured()", "def draw_player(self, location):\r\n self.screen.blit(self.player_blit, location)", "def pickUpPiece(self, position):\n self.board.bind('<Motion>', \n lambda event, piece=self.pieces[position]: \n self.trackMouse(event, piece))", "def general_can_capture(self, color):\n\n # Convert the color\n short_color = self.shorten_color(color)\n\n # Determine which pieces of the opposite color could capture the General\n threats = self.threat_list(color)\n\n # If there is more than 1 threatening piece, can't capture more than 1 in next turn\n if len(threats) > 1:\n return False\n\n # Otherwise, see if any of that team's pieces can capture the enemy piece on their turn\n else:\n\n enemy_piece = threats[0]\n enemy_pos = enemy_piece.get_pos() # Returns alg. notation\n ways_to_capture = []\n\n # Cycle through the entire board\n for row in range(0, len(self.get_board())):\n\n for col in range(0, len(self.get_board()[row])):\n\n # See if that piece is the team's color we need\n if self.get_board()[row][col].get_color() == short_color:\n\n team_piece = self.get_board()[row][col]\n\n # Call check move on the team piece's position to see if\n # it can traverse to the enemy piece position (capture)\n result = self.check_move(team_piece.get_pos(), enemy_pos)\n\n # Check move result will be a list if the move can be made\n if type(result) == list:\n\n # We need to add the first space back in so we know\n # which piece to try the move from in the next step\n result.insert(0, team_piece.get_pos())\n ways_to_capture.append(result)\n\n # If there's no way to capture, General will still be in check.\n if len(ways_to_capture) == 0:\n return False\n\n # For each checked move, see if the General would still be in check with the\n # proposed piece making their move.\n\n else:\n safe_moves = 0\n\n for path in range(0, len(ways_to_capture)):\n\n # Save the starting piece information\n start_coords_alg = ways_to_capture[path][0]\n start_coords = self.translate_to_list_coords(start_coords_alg)\n start_row = start_coords[0]\n start_col = start_coords[1]\n start_piece = self.get_board()[start_row][start_col]\n\n #Save the ending/capture piece information\n end_coords_alg = ways_to_capture[path][-1]\n end_coords = self.translate_to_list_coords(end_coords_alg)\n end_row = end_coords[0]\n end_col = end_coords[1]\n end_piece = self.get_board()[end_row][end_col]\n\n # Put the start piece in the new/end pos\n self.get_board()[end_row][end_col] = start_piece\n start_piece.set_pos(end_coords_alg)\n\n # Put NoPiece at piece's former position\n self.get_board()[start_row][start_col] = NoPiece(start_coords_alg)\n\n # Increment safe_moves counter if General no longer in check after move\n if not self.is_in_check(color):\n safe_moves += 1\n\n # Put the end piece back at its original pos\n self.get_board()[end_row][end_col] = end_piece\n end_piece.set_pos(end_coords_alg)\n\n # Put the moving piece back at its original pos\n self.get_board()[start_row][start_col] = start_piece\n start_piece.set_pos(start_coords_alg)\n\n if safe_moves > 0:\n return True\n\n else:\n return False", "def get_player_from_piece(self, piece):\n if (piece == Piece.Black):\n return self.players[0]\n if (piece == Piece.White):\n return self.players[1]\n return None", "def play_card(self, pile):\n card = self.hand.remove_first()\n pile.add_card(card)", "def grab(self, player):\n player.health.gain_health(self.heal_amount)\n effect = pygame.mixer.Sound('./assets/health-sound.wav')\n effect.play()\n self.kill()", "def insurance(player, taken=False):\n pass", "def put(self, piece, position):\n piece.position = Square(position.x, position.y)\n self.state.pitch.board[position.y][position.x] = piece", "def test_play(self):\n self.plr.play_card(self.card)\n self.assertEqual(self.plr.coins.get(), 2)\n self.plr.actions.set(1)\n self.plr.test_input = [\"Gain\"]\n self.plr.play_card(self.village)\n self.assertIn(\"Village\", self.plr.piles[Piles.DISCARD])\n self.assertIn(\"Village\", self.plr.piles[Piles.PLAYED])", "def _play_move(self,point, color):\n \n if point == None: #play a pass move\n msg = \"Playing a pass move with %s color is permitted\"%(color)\n return True, msg\n \n if self.board[point] != EMPTY:\n c=self._point_to_coord(point)\n msg = \"Row and Column: %d %d is already filled with a %s stone\"%(c[0],c[1],GoBoardUtil.int_to_color(color))\n return False,msg\n if point == self.ko_constraint:\n msg =\"KO move is not permitted!\"\n return False , msg\n in_enemy_eye = self._is_eyeish(point) == GoBoardUtil.opponent(color)\n self.board[point] = color\n self._is_empty = False\n self.caps = []\n single_captures = []\n cap_inds = None\n neighbors = self._neighbors(point)\n for n in neighbors:\n if self.board[n]==BORDER:\n continue\n if self.board[n]!=color:\n if self.board[n]!=EMPTY:\n fboard = self._flood_fill(n)\n if not self._liberty_flood(fboard):\n cap_inds = fboard==FLOODFILL\n #self.caps = np.where(fboard==FLOODFILL)\n self.caps += list(*np.where(fboard==FLOODFILL))\n num_captures = np.sum(cap_inds)\n if num_captures == self.size*self.size:\n self._is_empty = True\n if num_captures == 1:\n single_captures.append(n)\n if color==WHITE:\n self.white_captures += num_captures\n else :\n self.black_captures += num_captures\n self.board[cap_inds]=EMPTY\n fboard = self._flood_fill(point)\n self.ko_constraint = single_captures[0] if in_enemy_eye and len(single_captures) == 1 else None\n if self._liberty_flood(fboard) and self.suicide:\n #non suicidal move\n c=self._point_to_coord(point)\n msg = \"Playing a move with %s color in the row and column %d %d is permitted\"%(color,c[0],c[1])\n return True, msg\n else:\n # undoing the move because of being suicidal\n self.board[point] = EMPTY\n if cap_inds!= None:\n self.board[cap_inds]=GoBoardUtil.opponent(color)\n c=self._point_to_coord(point)\n msg = \"Suicide move with color %s in the row and column: %d %d \"%(color, c[0],c[1])\n return False, msg", "def drawPiece(self, chessGUI):\n\t\t# Store coordinates to avoid passing self\n\t\tsquareX,squareY = self.getCoordinates()[0],self.getCoordinates()[1]\n\t\tif(self.drawn == False):\n\t\t\t# Draw the image if it hasn't already been\n\t\t\tsquareX,squareY = self.getCoordinates()[0],self.getCoordinates()[1]\n\t\t\tself.image = Image(chessGUI.getSquare([squareX,squareY]).getCenter(), self.imageName)\n\t\t\tchessGUI.draw(self.image)\n\t\t\tself.drawn = True\n\n\t\t# Otherwise, just change the parameters.\n\t\telse:\n\t\t\t# Move the piece to the current location\n\t\t\tdesiredCenter = chessGUI.getSquare([squareX,squareY]).getCenter()\n\t\t\tdx = 1 * desiredCenter.getX() - self.image.getAnchor().getX()\n\t\t\tdy = 1 * desiredCenter.getY() - self.image.getAnchor().getY()\n\t\t\tself.image.move(dx,dy)", "def __whose_piece__(self) :\n return dict([(self.__piece_type__(x), x) for x in self.players])", "def create_piece_screenshots(self, x: float, y: float, w: float, h: float, sct: mss.mss) -> (int, dict):\n mon = self.mon\n board_monitor = {\n \"top\": mon[\"top\"] + int(y),\n \"left\": mon[\"left\"] + int(x),\n \"width\": int(w),\n \"height\": int(h),\n \"mon\": self.monitor_number,\n }\n\n # Screenshot of the chess board\n img = np.array(sct.grab(board_monitor))\n\n # Convert the image to grayscale for image processing\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n # Resizes the board screenshot to make it perfectly divisible by 8 so that it can be split up into an 8x8 grid\n # of squares\n board_width = (w // 8) * 8\n gray = cv.resize(gray, (board_width, board_width), interpolation=cv.INTER_LINEAR)\n\n # Gets the width of each chess square\n square_width = board_width // 8\n\n # Takes a screenshot of every piece on a light and dark square except for pawns\n for k in range(8):\n black = gray[0:square_width, k * square_width:k * square_width + square_width]\n cv.imwrite(f\"chess_pieces/black_{PIECES[k]}_{SQUARE[k % 2]}.png\", black)\n white = gray[board_width - square_width:board_width, k * square_width:k * square_width + square_width]\n cv.imwrite(f\"chess_pieces/white_{PIECES[k]}_{SQUARE[(k + 1) % 2]}.png\", white)\n\n white_pixel = gray[10, 10]\n green_pixel = gray[board_width - 10, 10]\n\n\n # Duplicates the screenshots for the King and Queen for both players because there is only one of each\n black_king_dark = gray[0:square_width, 4 * square_width:5 * square_width]\n\n for row in range(len(black_king_dark)):\n for col in range(len(black_king_dark[row])):\n if black_king_dark[row][col] == white_pixel:\n black_king_dark[row][col] = green_pixel\n cv.imwrite(\"chess_pieces/black_king_dark.png\", black_king_dark)\n white_king_light = gray[7 * square_width:board_width, 4 * square_width:5 * square_width]\n for row in range(len(white_king_light)):\n for col in range(len(white_king_light[row])):\n if white_king_light[row][col] == green_pixel:\n white_king_light[row][col] = white_pixel\n cv.imwrite(\"chess_pieces/white_king_light.png\", white_king_light)\n black_queen_light = gray[0:square_width, 3 * square_width:4 * square_width]\n for row in range(len(black_queen_light)):\n for col in range(len(black_queen_light[row])):\n if black_queen_light[row][col] == green_pixel:\n black_queen_light[row][col] = white_pixel\n cv.imwrite(\"chess_pieces/black_queen_light.png\", black_queen_light)\n white_queen_dark = gray[7 * square_width:board_width, 3 * square_width:4 * square_width]\n for row in range(len(white_queen_dark)):\n for col in range(len(white_queen_dark[row])):\n if white_queen_dark[row][col] == white_pixel:\n white_queen_dark[row][col] = green_pixel\n cv.imwrite(\"chess_pieces/white_queen_dark.png\", white_queen_dark)\n\n # Takes a screenshot of the pawns on a light and dark square\n black_pawn_dark = gray[square_width:2 * square_width, 0:square_width]\n cv.imwrite(\"chess_pieces/black_pawn_dark.png\", black_pawn_dark)\n black_pawn_light = gray[square_width:2 * square_width, square_width:2 * square_width]\n cv.imwrite(\"chess_pieces/black_pawn_light.png\", black_pawn_light)\n white_pawn_light = gray[6 * square_width:7 * square_width, 0:square_width]\n cv.imwrite(\"chess_pieces/white_pawn_light.png\", white_pawn_light)\n white_pawn_dark = gray[6 * square_width:7 * square_width, square_width:2 * square_width]\n cv.imwrite(\"chess_pieces/white_pawn_dark.png\", white_pawn_dark)\n\n # Obtains a pixel value from the white_queen which is used to determining the color of the pieces the user is\n # playing as\n white_pixel_value = white_queen_dark[square_width // 2, square_width // 2]\n\n cv.imshow(\"board screenshot\", img)\n cv.waitKey(0)\n\n return white_pixel_value, board_monitor", "def play_piece(self, piece, piece_moves):\n start_file, start_rank = piece.file_pos, piece.rank_pos\n coord_str = \"\"\n select_move_dict = {}\n key_num = 1\n for move_vector in piece_moves:\n move_notation_str = self.board.move_notation(piece, move_vector)\n coord_str += (str(key_num) + \". \" + move_notation_str + \" | \") \n select_move_dict.update({key_num: move_vector})\n key_num += 1\n while True:\n try:\n print(\"0. Go back. | \" + coord_str)\n input_num = int(input(\"Enter the move you want to make: \"))\n if input_num == 0:\n raise ReturnException(\"go back\")\n if input_num >= key_num or input_num < 0:\n raise ValueError\n break\n except ValueError:\n print(f\"Invalid input. Please enter a number from 1 through {key_num-1}.\")\n while True:\n try:\n break_num = int(input(\"Enter 1 to confirm your move. 0 to go back: \"))\n if break_num == 1:\n break\n elif break_num == 0:\n raise ReturnException\n else:\n print(\"Invalid input.\")\n except ValueError:\n print(\"Please enter a number.\")\n\n move_vector = select_move_dict.get(input_num)\n direction, step = move_vector[0], move_vector[1]\n self.board.move_piece(start_file, start_rank, direction, step)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Reflect point a across vector b
def reflect(a, b): return 2 * proj(a,b) - a
[ "def get_vector(a, b):\n return Vector(b.x - a.x, b.y - a.y, b.z - a.z)", "def tangent(self, pos):", "def proj(a,b):\n return np.dot(a,b) * b / (np.linalg.norm(b)**2)", "def get_relative_transformation(a_to_base, b_to_base):\n\n base_to_a = np.linalg.inv(a_to_base)\n return base_to_a @ b_to_base", "def to_tangent(self, vector, base_point):\n sq_norm = gs.sum(base_point**2, axis=-1)\n inner_prod = self.embedding_space.metric.inner_product(base_point, vector)\n coef = inner_prod / sq_norm\n return vector - gs.einsum(\"...,...j->...j\", coef, base_point)", "def vector_subtraction(point1, point2): # point1 - point2\n return Point(coords=point1.as_array() - point2.as_array())", "def reflect(v, n):\n return v - 2 * np.dot(v,n) * n", "def extend(point_a, point_b, L):\n\n xa, ya = point_a\n xb, yb = point_b\n u_vec = [xb - xa, yb - ya]\n u_vec /= np.linalg.norm(u_vec)\n\n xc = xa + L * u_vec[0]\n yc = ya + L * u_vec[1]\n return xc, yc", "def get_xy_velocity(posa,posb,v):\n rest = posa-posb\n m = magnitude(rest)\n vx = (v * rest[0])/m\n vy = (v * rest[1])/m\n if m < scout_near:\n return vx * scout_velocity_decay*m/scout_near,vy * scout_velocity_decay*m/scout_near\n return vx,vy", "def project_point_onto_line(o, v, p):\n return o + dv.vector_projection(p - o, v)", "def dot(u, v):\n return u.x*v.x + u.y*v.y", "def extrapolate( b, centerpt ):\n edgept = get_edgept(b)\n b[0][0], b[0][1] = 2*edgept[0]-b[1][0], 2*edgept[1]-b[1][1]\n b[-1][0], b[-1][1] = 2*centerpt[0]-b[-2][0], 2*centerpt[1]-b[-2][1]", "def vectorCross(v1, v2):\r\n return (v1[0] * v2[1] - v1[1] * v2[0])", "def vrrotvec(a, b):\r\n a = normalize(a)\r\n b = normalize(b)\r\n ax = normalize(np.cross(a, b))\r\n\r\n angle = np.arccos(np.minimum(np.dot(a, b), [1]))\r\n if not np.any(ax):\r\n absa = np.abs(a)\r\n mind = np.argmin(absa)\r\n c = np.zeros((1, 3))\r\n c[mind] = 0\r\n ax = normalize(np.cross(a, c))\r\n r = np.concatenate((ax, angle))\r\n return r", "def reflect(self, normal):\n return (2 * normal) * self.dot(normal) - self", "def _reflect_points(points, p1 = (0,0), p2 = (1,0)):\n # From http://math.stackexchange.com/questions/11515/point-reflection-across-a-line\n points = np.array(points); p1 = np.array(p1); p2 = np.array(p2);\n if np.asarray(points).ndim == 1:\n return 2*(p1 + (p2-p1)*np.dot((p2-p1),(points-p1))/norm(p2-p1)**2) - points\n if np.asarray(points).ndim == 2:\n return np.array([2*(p1 + (p2-p1)*np.dot((p2-p1),(p-p1))/norm(p2-p1)**2) - p for p in points])", "def vec_angle(a, b):\n cosang = np.dot(a, b)\n sinang = fast_norm(np.cross(a, b))\n return np.arctan2(sinang, cosang)", "def hyperbolic_tangent(a, b, prime_offset=0.0, threshold=float('inf')):\n thr_fun = lambda X: (X < -threshold) * -a + (X > threshold) * a + ((X < -threshold) + (X > threshold) == 0) * X\n fun = lambda X: thr_fun(a * scipy.tanh(X * b))\n # der = lambda X: scipy.ones(X.shape) - scipy.tanh(X)**2\n ab = a * b\n der = lambda X: ab * (scipy.ones(X.shape) - scipy.tanh(X * b)**2) + scipy.ones(X.shape) * prime_offset\n inv = lambda X: scipy.arctanh(X / a) / b\n descr = \"hyperbolic_tangent(%f, %f, %f, %f)\" % (a, b, prime_offset, threshold)\n return ActivationFunction(fun, inv, der, descr)", "def do_reflect(self, x_time, x_sign, y_time, y_sign):\n\n ball_xs, ball_ys = self._model.get_ball_speed()\n ball_x, ball_y = self._model.get_ball_position()\n\n ball_x += x_time * ball_xs\n ball_xs *= x_sign\n\n ball_x += (1 - x_time) * ball_xs\n ball_y += y_time * ball_ys\n\n ball_ys *= y_sign\n ball_y += (1 - y_time) * ball_ys\n\n self._model.set_ball_speed(ball_xs, ball_ys)\n self._model.set_ball_position(ball_x, ball_y)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Projects vector a onto vector b returns the projected vector
def proj(a,b): return np.dot(a,b) * b / (np.linalg.norm(b)**2)
[ "def projection(b, a, norm=False):\n if norm:\n proj = np.dot(np.dot(a, a.T), b)\n else:\n c = np.dot(a.T, b) / np.dot(a.T, a)\n proj = c * a\n\n return proj", "def proj(A,B):\n return A - (A*B).sum()*B/(B**2).sum()", "def project(self, vector):\n return vector.multiply(self.dot(vector) / (vector.magnitude**2))", "def reflect(a, b):\n return 2 * proj(a,b) - a", "def compute_vec2vec_projection(self, u, v):\n return (np.dot(u, v) / np.linalg.norm(v)) * v", "def get_vector(a, b):\n return Vector(b.x - a.x, b.y - a.y, b.z - a.z)", "def proj(self, u, vec):\n return (vec + adj(vec)) / 2", "def proj(v,u):\r\n prefc = v.dot(u)/u.dot(u)\r\n return prefc*u", "def project(v: np.ndarray, w: np.ndarray) -> np.ndarray:\n return np.dot(v, w) * (w / np.linalg.norm(w))", "def main():\n\n # find projected b to vector v\n v=Vector([3.039, 1.879])\n b=Vector([0.825,2.036])\n projected_b = v.find_v_parallel_to(b)\n\n # print \"should print [1.083, 2.672]\"\n print projected_b, \"\\n\"\n\n # find orthogonal vector to vector v2\n v2=Vector([-9.88, -3.264,-8.159])\n b2=Vector([-2.155,-9.353,-9.473])\n v2_orthogonal = v2.find_v_orthongonal_to(b2)\n\n # should print [-8.350, 3.376, -1.434]\n print v2_orthogonal, \"\\n\"\n\n print \"decomposing a vector:\"\n #find vector coordinates of 2 vectors that sum to v5\n v5=Vector([3.009, -6.172, 3.692, -2.51])\n v6=Vector([6.404, -9.144, 2.759, 8.718])\n\n v5_parallel = v5.find_v_parallel_to(v6)\n v5_orthogonal = v5.find_v_orthongonal_to(v6)\n\n # should print #v5_a = [1.969, -2.811,0.848,2.680]\n print v5_parallel, \"\\n\"\n\n # should print #v5_b = [1.040,-3.361,2.844,-5.190]\n print v5_orthogonal", "def projection(vector, mu):\n return np.divide(vector, np.maximum(abs(vector) / mu, 1))", "def proj_dist(A,B):\n return (A*B).sum()/(B**2).sum()**0.5", "def testVectorProject(self):\n decimal_places = 9\n\n normal = Vector(1, 2, 3)\n base = Vector(5, 7, 9)\n x_dir = Vector(1, 0, 0)\n\n # test passing Plane object\n point = Vector(10, 11, 12).projectToPlane(Plane(base, x_dir, normal))\n self.assertTupleAlmostEquals(\n point.toTuple(), (59 / 7, 55 / 7, 51 / 7), decimal_places\n )\n\n # test line projection\n vec = Vector(10, 10, 10)\n line = Vector(3, 4, 5)\n angle = vec.getAngle(line)\n\n vecLineProjection = vec.projectToLine(line)\n\n self.assertTupleAlmostEquals(\n vecLineProjection.normalized().toTuple(),\n line.normalized().toTuple(),\n decimal_places,\n )\n self.assertAlmostEqual(\n vec.Length * math.cos(angle), vecLineProjection.Length, decimal_places\n )", "def biProjection(unitVec1, unitVec2):\n if np.array_equal(unitVec1, unitVec2):\n return unitVec1\n \n along = np.dot(unitVec1, unitVec2)\n if along == -1.0:\n return None\n \n unitVec1Perp = unit(unitVec2 - along * unitVec1)\n \n return unitVec1 + unitVec1Perp * (1.0 - along) / np.dot(unitVec2, unitVec1Perp)", "def proj(v,U):\n U = np.matrix(U)\n n,m = U.shape\n v = np.matrix(v).reshape(n,1)\n u = np.array((U*((U.T*U).I)*U.T)*v).flatten()\n\n return u", "def project_vector(self,x) :\n\n n_dofs = 4*self.param.n_cells\n projection = np.zeros(4*self.param.n_mom*self.param.n_cells)\n coarse_n_mom = x.shape[0]/(4*self.param.n_cells)\n if self.param.galerkin==True :\n skip = (-1+np.sqrt(1+2*self.param.n_mom))/2-1\n tmp_end = coarse_n_mom-(skip-1)\n residual = coarse_n_mom-tmp_end\n projection[0:n_dofs*tmp_end] = x[0:n_dofs*tmp_end]\n projection[n_dofs*(tmp_end+skip):n_dofs*(tmp_end+skip+residual)] =\\\n x[n_dofs*tmp_end:n_dofs*(tmp_end+residual)]\n else :\n projection[0:4*self.n_mom*self.param.n_cells] = x[0:4*coarse_n_mom*\\\n self.param.n_cells]\n \n return projection", "def colPlaneProj(vect):\n xProj = proj(vect,vector(1,0,0))\n zProj = proj(vect,vector(0,0,1))\n return xProj + zProj + (0,BALLSIZE,0)", "def plane_project(x,n):\n\treturn x-np.dot(x,n)/np.linalg.norm(n)*vecnorm(n)", "def project(self, point: 'SbVec2f') -> \"SbVec3f\":\n return _coin.SbPlaneProjector_project(self, point)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compute and prints errors between two images.
def print_errors(ref, img): print('RMSE : %.5f' % rmse(ref, img)) print('rRMSE: %.5f' % rrmse(ref, img))
[ "def error(x1, x2):\n return x2/x1 - 1", "def error_images(self):\n return flatten_out(\n [f.error_images for f in self.algorithm_results])", "def PostRegistration(image1, image2, model = False, **kwargs):\n silent = False\n if kwargs.get(silent) == True: silent = kwargs.get(silent)\n\n if np.any(model) == False:\n image2_adj = image2\n RMS_norm, RMS_raw, image_contrast = ImageDelta(image1, image2_adj)\n else:\n image2_adj = RegisterImage(image2, model)\n RMS_norm, RMS_raw, image_contrast = ImageDelta(image1, image2_adj.data, image2_adj.mask)\n\n blank_sample = np.zeros_like(image1)\n RGB_pic = RGBSlice(image1, image2_adj, blank_sample, B_scale = 1)\n\n if silent == False:\n fig, axes = plt.subplots(nrows = 1, ncols = 2, figsize = (20,10))\n axes[0].imshow(RGB_pic)\n axes[1].imshow(image_contrast, cmap = 'gray', interpolation = 'none')\n axes[0].title.set_text('Overlay')\n axes[1].title.set_text('Normalized intensity difference')\n plt.show()\n plt.close()\n\n print ('RMS difference of normalized images:',RMS_norm)\n print ('RMS difference of raw images:',RMS_raw)\n\n return RMS_norm, RMS_raw", "def errorMap(label1,label2):\n \n return 1.*(np.asarray(label1) == np.asarray(label2))", "def verify(test_img,key):\n\n ref_coeffs = proj_c[key]\n norm_test = test_img.flat - data_mean\n test_coeffs = N.dot(utt,norm_test)\n l2_err = N.linalg.norm(test_coeffs-ref_coeffs,2)\n imshow2(imtrain[key],test_img,\n labels = ('Reference Image','Test Image')) \n P.title('L2 coefficient error: %.2e' % l2_err)", "def mse(img1, img2): \n # TODO: implement this function.\n err=np.square(np.subtract(img1,img2)).mean()\n return err", "def diff(self):\n # read in files\n filesRead = False\n for testFilename, goldFilename in zip(self.__out_files, self.__gold_files):\n if not os.path.exists(testFilename):\n self.__same = False\n self.__message += 'Test file does not exist: '+testFilename\n elif not os.path.exists(goldFilename):\n self.__same = False\n self.__message += 'Gold file does not exist: '+goldFilename\n else:\n filesRead = True\n #read in files\n if filesRead:\n if not correctImport:\n self.__message += 'ImageDiff cannot run without imageio'+\\\n ' that is an optional RAVEN library. Pleaase install it.'\n self.__same = False\n return(self.__same, self.__message)\n try:\n # RAK - The original line...\n # testImage = imread(open(testFilename,'r'))\n # ...didn't work on Windows Python because it couldn't sense the file type\n testImage = imread(testFilename)\n except IOError:\n self.__message += 'Unrecognized file type for test image in imageio.imread: '+testFilename\n filesRead = False\n return (False, self.__message)\n try:\n # RAK - The original line...\n # goldImage = imread(open(goldFilename,'r'))\n # ...didn't work on Windows Python because it couldn't sense the file type\n goldImage = imread(goldFilename)\n except IOError:\n filesRead = False\n self.__message += 'Unrecognized file type for test image in imageio.imread: '+goldFilename\n return (False, self.__message)\n #first check dimensionality\n if goldImage.shape != testImage.shape:\n self.__message += 'Gold and test image are not the same shape: '+\\\n str(goldImage.shape)+', '+str(testImage.shape)\n self.__same = False\n return (self.__same, self.__message)\n #pixelwise comparison\n #TODO in the future we can add greyscale, normalized coloring, etc.\n # For now just do raw comparison of right/wrong pixels\n diff = goldImage - testImage\n onlyDiffs = diff[abs(diff) > self.__zero_threshold]\n pctNumDiff = onlyDiffs.size/float(diff.size)\n if pctNumDiff > self.__rel_err:\n self.__message += 'Difference between images is too large:'+\\\n ' %2.2f pct (allowable: %2.2f) for %s and %s' %(100*pctNumDiff,\\\n 100*self.__rel_err,\n goldFilename,\n testFilename)\n self.__same = False\n return (self.__same, self.__message)", "def _photometricErrors(self, catalog=None, n_per_bin=100):\n\n if catalog is None:\n # Simple proxy for photometric errors\n release = self.config['data']['release']\n band_1 = self.config['catalog'].get('mag_1_band')\n if not band_1: band_1 = self.config['isochrone']['mag_1_field']\n band_2 = self.config['catalog'].get('mag_2_band')\n if not band_2: band_2 = self.config['isochrone']['mag_2_field']\n \n DELMIN = 0.0\n pars_1 = MAGERR_PARAMS[release][band_1]\n \n def photo_err_1(delta):\n p = pars_1\n return np.clip(np.exp(p[0]*delta+p[1])+p[2], 0, np.exp(p[0]*(DELMIN)+p[1])+p[2])\n\n pars_2 = MAGERR_PARAMS[release][band_2]\n def photo_err_2(delta):\n p = pars_2\n return np.clip(np.exp(p[0]*delta+p[1])+p[2], 0, np.exp(p[0]*(DELMIN)+p[1])+p[2])\n\n else:\n catalog.spatialBin(self.roi)\n\n if len(catalog.mag_1) < n_per_bin:\n logger.warning(\"Catalog contains fewer objects than requested to calculate errors.\")\n #n_per_bin = int(len(catalog.mag_1) / 3)\n return self._photometricErrors(catalog=None)\n \n # Band 1\n mag_1_thresh = self.mask_1.mask_roi_sparse[catalog.pixel_roi_index] - catalog.mag_1\n sorting_indices = np.argsort(mag_1_thresh)\n mag_1_thresh_sort = mag_1_thresh[sorting_indices]\n mag_err_1_sort = catalog.mag_err_1[sorting_indices]\n \n # ADW: Can't this be done with np.median(axis=?)\n mag_1_thresh_medians = []\n mag_err_1_medians = []\n for i in range(0, int(len(mag_1_thresh) / float(n_per_bin))):\n mag_1_thresh_medians.append(np.median(mag_1_thresh_sort[n_per_bin * i: n_per_bin * (i + 1)]))\n mag_err_1_medians.append(np.median(mag_err_1_sort[n_per_bin * i: n_per_bin * (i + 1)]))\n \n if mag_1_thresh_medians[0] > 0.:\n mag_1_thresh_medians = np.insert(mag_1_thresh_medians, 0, -99.)\n mag_err_1_medians = np.insert(mag_err_1_medians, 0, mag_err_1_medians[0])\n \n photo_err_1 = scipy.interpolate.interp1d(mag_1_thresh_medians, mag_err_1_medians,\n bounds_error=False, fill_value=mag_err_1_medians[-1])\n \n # Band 2\n mag_2_thresh = self.mask_2.mask_roi_sparse[catalog.pixel_roi_index] - catalog.mag_2\n sorting_indices = np.argsort(mag_2_thresh)\n mag_2_thresh_sort = mag_2_thresh[sorting_indices]\n mag_err_2_sort = catalog.mag_err_2[sorting_indices]\n \n mag_2_thresh_medians = []\n mag_err_2_medians = []\n for i in range(0, int(len(mag_2_thresh) / float(n_per_bin))):\n mag_2_thresh_medians.append(np.median(mag_2_thresh_sort[n_per_bin * i: n_per_bin * (i + 1)]))\n mag_err_2_medians.append(np.median(mag_err_2_sort[n_per_bin * i: n_per_bin * (i + 1)]))\n \n if mag_2_thresh_medians[0] > 0.:\n mag_2_thresh_medians = np.insert(mag_2_thresh_medians, 0, -99.)\n mag_err_2_medians = np.insert(mag_err_2_medians, 0, mag_err_2_medians[0])\n \n photo_err_2 = scipy.interpolate.interp1d(mag_2_thresh_medians, mag_err_2_medians,\n bounds_error=False, fill_value=mag_err_2_medians[-1])\n \n self.photo_err_1=photo_err_1\n self.photo_err_2=photo_err_2\n\n return self.photo_err_1, self.photo_err_2", "def error_ratios_cross_val(output_folder):\n\n from parsers import CVOutputParser\n from utils import avg\n\n if not output_folder[-1] == '/':\n output_folder += '/'\n\n\n singleton_thresholds = [0, 10, 20, 30, 40, 50, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 2000, 3000, 4000]\n pair_thresholds = [0, 1, 2, 3, 4, 5, 7, 10, 15, 20, 25, 30, 40, 50, 60, 70, 80, 90, 100]\n triple_thresholds = [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 150, 200, 250]\n # Results are inserted at an offset\n # acc_error, count, maxent_best, ext_best\n c = [[[(0,0,0,0, (0,0,0)) for z in range(len(triple_thresholds))] for y in range(len(pair_thresholds))] for x in range(len(singleton_thresholds))]\n\n\n merged_file = output_folder + 'merged_estimates.tsv'\n\n iteration = 0\n for (n1, n2, n3), (est, ext, obs, ratio, triangle) in CVOutputParser.read_merged_file_disc_version(merged_file):\n\n s1, s2, s3, s12, s13, s23, s123 = triangle\n\n # Calculate errors and add the to the result matrix\n # Ratio error between estiamtes\n error = 0\n # check if both estimaters are spot on:\n if abs(ext-obs) == 0 and abs(est-obs) == 0:\n error = 1.\n # check that we are not dividing be a very small floating point\n # from extrapolation. If below one we just treat the error as\n # if it was 1\n if abs(ext-obs) < 1:\n error = float(abs(est-obs))\n # Get error ratio, avoid division by zero\n elif abs(ext-obs) != 0:\n error = abs(est-obs) / float(abs(ext-obs))\n # ratio_errors.append(error)\n for singleton_index, singleton_threshold in enumerate(singleton_thresholds):\n if not min(s1, s2, s3) > singleton_threshold:\n break\n for pair_index, pair_threshold in enumerate(pair_thresholds):\n if not min(s12, s13, s23) > pair_threshold:\n break\n for triple_index, triple_threshold in enumerate(triple_thresholds):\n if not s123 < triple_threshold:\n continue\n acc_error, count, maxent_best, ext_best, (s, p, t) = c[singleton_index][pair_index][triple_index]\n acc_error += error\n count += 1\n if error < 1:\n maxent_best += 1\n elif error > 1:\n ext_best += 1\n c[singleton_index][pair_index][triple_index] = (acc_error, count, maxent_best, ext_best, (singleton_threshold, pair_threshold, triple_threshold))\n if iteration % 1000000 == 0:\n print 'iteration: ', iteration\n iteration += 1\n\n # maxent_errors.append(est / float(obs))\n # ext_errors.append(ext / float(obs))\n\n # Compute average errors\n for singleton_index, singleton_threshold in enumerate(singleton_thresholds):\n for pair_index, pair_threshold in enumerate(pair_thresholds):\n for triple_index, triple_threshold in enumerate(triple_thresholds):\n (acc_error, count, maxent_best, ext_best, (s,p,t)) = c[singleton_index][pair_index][triple_index]\n if count > 0:\n c[singleton_index][pair_index][triple_index] = (acc_error / float(count), count, maxent_best, ext_best, (s,p,t))\n\n # ratio_error = sum(ratio_errors) / float(len(ratio_errors))\n # ext_ratio = avg(ext_errors)\n # maxent_ratio = avg(maxent_errors)\n\n # print 'Singletons done for threshold: ', singleton_threshold\n\n # fd.close()\n\n # fd = open(output_folder + 'parameter_cv.tsv', 'wr')\n # fd.write('singleton\\tpair\\ttriple\\tmax_ent\\text\\tratio_error\\n')\n # fd.write(singleton + '\\t' + pair + '\\t' + triple + '\\t' + maxent_ratio + '\\t' + ext_ratio + '\\t' + ratio_error + '\\n')\n # max_val = 1000\n # offset = 30\n # hist([x for x in range(max_val)[offset:]], ratio_errors[offset:max_val], color='green')\n\n return c", "def _image_difference(image_1_path, image_2_path):\n\n image_1 = Image.open(image_1_path)\n image_2 = Image.open(image_2_path)\n\n if image_1.mode != image_2.mode:\n # Different kinds of images.\n return 100\n\n if image_1.size != image_2.size:\n # Different sizes\n return 100\n\n pairs = zip(image_1.getdata(), image_2.getdata())\n if len(image_1.getbands()) == 1:\n # for gray-scale JPEGS\n dif = sum(abs(p1 - p2) for p1, p2 in pairs)\n else:\n dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))\n\n n_components = image_1.size[0] * image_1.size[1] * 3\n return (dif / 255.0 * 100) / n_components", "def evaluate_error(photo_id, reflectance_image, thresh=0.10, is_sRGB=True):\n\n if isinstance(reflectance_image, basestring):\n reflectance_image = imread(reflectance_image).astype(float) / 255.0\n elif not isinstance(reflectance_image, np.ndarray):\n reflectance_image = np.asarray(reflectance_image).astype(float) / 255.0\n\n #if reflectance_image.shape[1] != 300:\n #z = 300.0 / reflectance_image.shape[1]\n #reflectance_image = interpolation.zoom(\n #reflectance_image, zoom=(z, z, 1))\n\n rows, cols, _ = reflectance_image.shape\n if is_sRGB:\n reflectance_image_linear = srgb_to_rgb(reflectance_image)\n else:\n reflectance_image_linear = reflectance_image\n\n # get the luminance of the reflectance channel\n\n # fetch comparisons\n comparisons = list(\n IntrinsicPointComparison.objects.filter(\n photo_id=photo_id,\n point1__opaque=True,\n point2__opaque=True,\n darker__isnull=False,\n darker__in=(\"1\", \"2\", \"E\"),\n darker_score__isnull=False,\n darker_score__gt=0\n ).select_related('point1')\n )\n\n # fetch points\n points = IntrinsicPoint.objects.filter(photo_id=photo_id)\n point_id_to_l = {\n p.id: np.mean(reflectance_image_linear[int(p.y * rows), int(p.x * cols), :])\n for p in points\n }\n\n # ratio thresholds\n eq_thresh = 1.0 + thresh\n\n # error from a set of comparisons\n def comparison_error(comps):\n error_num = 0.0\n error_den = 0.0\n\n for c in comps:\n if c.darker not in ('1', '2', 'E'):\n raise ValueError(\"Unknown value of darker: %s\" % c.darker)\n\n l1 = max(point_id_to_l[c.point1_id], 1e-10)\n l2 = max(point_id_to_l[c.point2_id], 1e-10)\n\n if l2 / l1 > eq_thresh:\n r_darker = '1'\n elif l1 / l2 > eq_thresh:\n r_darker = '2'\n else:\n r_darker = 'E'\n\n if c.darker != r_darker:\n error_num += c.darker_score\n error_den += c.darker_score\n\n if error_den:\n return error_num / error_den\n else:\n return None\n\n # return value\n update_kwargs = {\n 'error_comparison_thresh': thresh,\n }\n\n # all errors\n update_kwargs['num'] = len(comparisons)\n if comparisons:\n update_kwargs['mean_error'] = comparison_error(comparisons)\n else:\n update_kwargs['mean_error'] = None\n\n # all dense errors\n comparisons_dense = [c for c in comparisons if c.point1.min_separation < 0.05]\n update_kwargs['num_dense'] = len(comparisons_dense)\n if comparisons_dense:\n update_kwargs['mean_dense_error'] = comparison_error(comparisons_dense)\n else:\n update_kwargs['mean_dense_error'] = None\n\n # all dense errors\n comparisons_sparse = [c for c in comparisons if c.point1.min_separation > 0.05]\n update_kwargs['num_sparse'] = len(comparisons_sparse)\n if comparisons_sparse:\n update_kwargs['mean_sparse_error'] = comparison_error(comparisons_sparse)\n else:\n update_kwargs['mean_sparse_error'] = None\n\n # equality errors\n comparisons_eq = [c for c in comparisons if c.darker == \"E\"]\n update_kwargs['num_eq'] = len(comparisons_eq)\n if comparisons_eq:\n update_kwargs['mean_eq_error'] = comparison_error(comparisons_eq)\n else:\n update_kwargs['mean_eq_error'] = None\n\n # inequality errors\n comparisons_neq = [c for c in comparisons if c.darker in (\"1\", \"2\")]\n update_kwargs['num_neq'] = len(comparisons_neq)\n if comparisons_neq:\n update_kwargs['mean_neq_error'] = comparison_error(comparisons_neq)\n else:\n update_kwargs['mean_neq_error'] = None\n\n # sum of two split errors\n if (update_kwargs['mean_eq_error'] is not None\n or update_kwargs['mean_neq_error'] is not None):\n f = lambda x: x if x else 0\n update_kwargs['mean_sum_error'] = (\n f(update_kwargs['mean_eq_error']) +\n f(update_kwargs['mean_neq_error']))\n else:\n update_kwargs['mean_sum_error'] = None\n\n return update_kwargs", "def visualize_prediction_error(self, predicted_state):", "def printErrors(self):\n for filename in sorted(self.texterrors):\n fileerrors = self.texterrors[filename]\n print(\n \"\\n\",\n 70 * \"=\",\n \"\\n%s, %i possible errors found.\" % (filename, len(fileerrors)),\n \"Suppressing %i error codes: %s\"\n % (len(self.ignorecodes), \",\".join(self.ignorecodes)),\n \"\\n\",\n 70 * \"=\",\n )\n # print(fileerrors)\n for e in fileerrors:\n if e.name not in self.ignorecodes:\n print(\" \", e.name, e)\n for filename in self.imgerrors:\n fileerrors = self.imgerrors[filename]\n for e in fileerrors:\n print(filename)\n print(\" \", e)", "def compute_errors(data_sources):\n for source in data_sources:\n source.add_column('errors', source.get_column('actuals')\n - source.get_column('forecasts'))", "def imcalc(image1, image2, out_im, op='-'):\n min_ext = 2\n\n pf_1 = pyfits.open(image1)\n pf_2 = pyfits.open(image2)\n\n next_1 = len(pf_1)\n next_2 = len(pf_2)\n\n # Inputs must have at least 1 primary header and 1 data ext\n if next_1 < min_ext:\n pf_1.close()\n pf_2.close()\n raise ValueError('image1 has {} ext but expect >={}.'.format(\n next_1, min_ext))\n\n # Inputs must have same number of extensions\n if next_1 != next_2:\n pf_1.close()\n pf_2.close()\n raise ValueError('image1 has {} ext but image2 has {}.'.format(\n next_1, next_2))\n\n out_phdr = pyfits.PrimaryHDU()\n out_phdr.header.add_history('IMAGE1 {}'.format(os.path.basename(image1)))\n out_phdr.header.add_history('IMAGE2 {}'.format(os.path.basename(image2)))\n out_phdr.header.add_history('IMAGE1 {} IMAGE2'.format(op))\n\n out_hdu = pyfits.HDUList([out_phdr])\n\n for i in xrange(1, next_1):\n data_1 = pf_1[i].data\n data_2 = pf_2[i].data\n\n if data_1 is None or data_2 is None:\n module_logger.warn('input(s) has NoneType data.')\n hdu = pyfits.ImageHDU()\n\n else:\n if data_1.dtype != data_2.dtype:\n module_logger.warn(\n 'In ext {}, image1 is {} but image2 is {}'.format(\n i, data_1.dtype, data_2.dtype))\n\n if op == '/':\n out_data = data_1 / data_2\n else:\n out_data = data_1 - data_2\n\n hdu = pyfits.ImageHDU(out_data)\n\n # Inherit EXTNAME and EXTVER from image1\n hdu.update_ext_name(pf_1[i].name)\n hdu.update_ext_version(pf_1[i]._extver)\n\n out_hdu.append(hdu)\n\n out_hdu.writeto(out_im, clobber=True)\n\n pf_1.close()\n pf_2.close()", "def test_plot_error_map_over_100_qubit_backend_v2(self):\n backend = FakeWashingtonV2()\n img_ref = path_to_diagram_reference(\"washington_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)", "def _brug_iter_error(epsbr, eps1, eps2, shape, L, f1, size):\n f2 = 1.0 - f1\n # If appropriate calculate a size effect using Equations 10.38 and 10.39 in Sihvola\n size_factor = calculate_size_factor(size)\n leps1 = np.dot(L, (eps1 - epsbr))\n leps2 = size_factor * np.dot(L, (eps2 - epsbr))\n leps1 = average_tensor(leps1)\n leps2 = average_tensor(leps2)\n a1 = np.linalg.inv(epsbr + leps1)\n a2 = np.linalg.inv(epsbr + leps2)\n # alpha1 and 2 are the polarisabilities of 1 and 2 in the effective medium\n eps1av = average_tensor(eps1)\n eps2av = average_tensor(eps2)\n alpha1 = np.dot((eps1av-epsbr), a1)\n alpha2 = np.dot((eps2av-epsbr), a2)\n # the error or residual matrix should be zero for a bruggeman solution\n error = f1*alpha1 + f2*alpha2\n error = np.linalg.norm(error)\n m1 = f1*np.dot(eps1, a1)+f2*np.dot(eps2, a2)\n m2 = np.linalg.inv(f1*a1 + f2*a2)\n damp = 0.0\n epsbr = (1.0 - damp)*np.dot(m1, m2) + damp*epsbr\n trace = np.trace(epsbr) / 3.0\n epsbr = np.array([[trace, 0, 0], [0, trace, 0], [0, 0, trace]])\n return epsbr, error", "def test_plot_error_map_backend_v2(self):\n backend = FakeKolkataV2()\n img_ref = path_to_diagram_reference(\"kolkata_v2_error.png\")\n fig = plot_error_map(backend)\n with BytesIO() as img_buffer:\n fig.savefig(img_buffer, format=\"png\")\n img_buffer.seek(0)\n self.assertImagesAreEqual(Image.open(img_buffer), img_ref, 0.2)\n plt.close(fig)", "def plot_errors(iwp_pred, iwp_pred_std, iwp_test):\n f, axs = plt.subplots(1, 2, figsize = (10, 6))\n\n bins = np.logspace(-5, 1, 11)\n mapes = np.zeros(bins.size - 1)\n mpes = np.zeros(bins.size - 1)\n stds = np.zeros(bins.size - 1)\n\n for i in range(10):\n x_l = bins[i]\n x_r = bins[i + 1]\n\n inds = (iwp_pred >= x_l) * (iwp_pred < x_r)\n mapes[i] = np.mean(mape(iwp_pred[inds], iwp_test[inds]))\n mpes[i] = np.mean(mpe(iwp_pred[inds], iwp_test[inds]))\n stds[i] = 100.0 * np.mean(iwp_pred_std[inds] / iwp_pred[inds])\n\n x = 0.5 * (bins[1:] + bins[:-1])\n\n axs[0].set_title(\"MAPE & Std. Dev.\")\n axs[0].plot(x, mapes, label = 'MAPE', lw = 2)\n axs[0].plot(x, stds, label = 'Std. Dev.', lw = 2)\n axs[0].set_xlabel(\"IWP $[kg / m^2]$\")\n axs[0].set_ylabel(\"Error [%]\")\n axs[0].set_xscale(\"log\")\n axs[0].set_yscale(\"log\")\n axs[0].set_ylim([1e1, 1e5])\n axs[0].legend()\n\n axs[1].plot(x, mpes, lw = 2)\n axs[1].set_title(\"MPE\")\n axs[1].set_xlabel(\"IWP $[kg / m^2]$\")\n axs[1].set_ylabel(\"Error [%]\")\n axs[1].set_xscale(\"log\")\n axs[1].set_ylim([-1000.0, 1000.0])\n axs[1].legend()\n\n plt.tight_layout()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Produce a symbol generator
def symbol_factory(packager,prefix): i=1 while True: yield packager(prefix+str(i)) i +=1
[ "def gen_symbol(length):\r\n\r\n def c():\r\n return chr(random.randint(ord('A'), ord('Z')))\r\n\r\n s = ''\r\n for i in range(length):\r\n s += c()\r\n return s", "def retr_symmetry_generators(struct,ini):\n #hall = struct.spacegroup_hall()\n ini[\"symgen\"] = struct.get_symmetry_generators()\n return ini", "def _terminal_symbols_generator(self):\n UPPAs = list(\n list(range(0xE000, 0xF8FF + 1)) + list(range(0xF0000, 0xFFFFD + 1)) + list(range(0x100000, 0x10FFFD + 1)))\n for i in UPPAs:\n yield (chr(i))\n raise ValueError(\"Too many input strings.\")", "def iter_symbols(self):\r\n for i in range(self.num_symbols()):\r\n yield self.get_symbol(i)", "def _terminalSymbolsGenerator(self):\n if self.end_symbol_set == \"unicode\":\n symbol_set = UNICODE_SYMBOLS\n else:\n symbol_set = ASCII_SYMBOLS\n\n for c in symbol_set:\n yield(c)\n raise ValueError(\"To many input strings.\")", "def prepare_symbols(self):", "def generate_symbols(self):\n\n logger.debug(f'- Generating symbols for {self.class_name}')\n\n # clear symbols storage\n self.f_list, self.g_list = list(), list()\n self.f_matrix, self.g_matrix = Matrix([]), Matrix([])\n\n # process tex_names defined in model\n # -----------------------------------------------------------\n for key in self.parent.tex_names.keys():\n self.tex_names[key] = Symbol(self.parent.tex_names[key])\n for instance in self.parent.discrete.values():\n for name, tex_name in zip(instance.get_names(), instance.get_tex_names()):\n self.tex_names[name] = tex_name\n # -----------------------------------------------------------\n\n for var in self.cache.all_params_names:\n self.inputs_dict[var] = Symbol(var)\n\n for var in self.cache.all_vars_names:\n tmp = Symbol(var)\n self.vars_dict[var] = tmp\n self.inputs_dict[var] = tmp\n if var in self.cache.vars_int:\n self.vars_int_dict[var] = tmp\n\n # store tex names defined in `self.config`\n for key in self.config.as_dict():\n tmp = Symbol(key)\n self.inputs_dict[key] = tmp\n if key in self.config.tex_names:\n self.tex_names[tmp] = Symbol(self.config.tex_names[key])\n\n # store tex names for pretty printing replacement later\n for var in self.inputs_dict:\n if var in self.parent.__dict__ and self.parent.__dict__[var].tex_name is not None:\n self.tex_names[Symbol(var)] = Symbol(self.parent.__dict__[var].tex_name)\n\n self.inputs_dict['dae_t'] = Symbol('dae_t')\n self.inputs_dict['sys_f'] = Symbol('sys_f')\n self.inputs_dict['sys_mva'] = Symbol('sys_mva')\n\n self.lambdify_func[0]['Indicator'] = lambda x: x\n self.lambdify_func[0]['imag'] = np.imag\n self.lambdify_func[0]['real'] = np.real\n self.lambdify_func[0]['im'] = np.imag\n self.lambdify_func[0]['re'] = np.real\n\n self.vars_list = list(self.vars_dict.values()) # useful for ``.jacobian()``", "def symbols(self):\n def _iter_symbols(symbol_values):\n # The initial charset doesn't matter, as the start codes have the same symbol values in all charsets.\n charset = 'A'\n\n shift_charset = None\n for symbol_value in symbol_values:\n if shift_charset:\n symbol = self._val2sym[shift_charset][symbol_value]\n shift_charset = None\n else:\n symbol = self._val2sym[charset][symbol_value]\n\n if symbol in (self.Special.START_A, self.Special.CODE_A):\n charset = 'A'\n elif symbol in (self.Special.START_B, self.Special.CODE_B):\n charset = 'B'\n elif symbol in (self.Special.START_C, self.Special.CODE_C):\n charset = 'C'\n elif symbol in (self.Special.SHIFT_A,):\n shift_charset = 'A'\n elif symbol in (self.Special.SHIFT_B,):\n shift_charset = 'B'\n\n yield symbol\n\n return list(_iter_symbols(self.symbol_values))", "def _next(self):\n s = Symbol(\"%s%i\" % (self._label, self._counterVar))\n self._counterVar += 1\n return s", "def gen_by_rules(self, indices, I):\n symbols = I\n\n for i in indices:\n symbols = self.gen(i, symbols)\n\n return symbols", "def make_lex(symbols):\n ...", "def _get_next_symbol(self):\n for i in range(0, len(self.symbol)):\n if self.symbol[i] >= 126:\n if i == len(self.symbol) - 1:\n self.symbol.append(33)\n self.symbol[i] = 33\n else:\n self.symbol[i] = self.symbol[i] + 1\n break\n symbol = \"\"\n for sym in self.symbol:\n symbol = symbol + chr(sym)\n return symbol", "def test_generate_symvar(self):\n mommy.make('eshop.Order')\n mommy.make('eshop.Order')\n order = Order()\n order.generate_symvar()\n self.assertEqual(order.symvar, '20173')", "def _generate_oscillator_seq_code(self):\n pass", "def symbols(self) -> Iterable[str]:\n\n symbols = set([context.symbol for context in self.map.keys()])\n yield from symbols", "def generate(self):\n alpha = self.alpha\n p = random.random() # get a random value between 0 and 1\n q = 0.0\n for sym in alpha: # pick a symbol with a frequency proportional to its probability\n q = q + self[sym]\n if p < q:\n return sym\n return alpha[len(alpha)]", "def getSymbol(id):", "def genPW():\n newPW = \"\"\n i = 1\n while i <= 12:\n newChar = genChar()\n newPW = newPW + newChar\n i += 1\n return newPW", "def get_symbol_map():\n functions = {}\n for ea in Segments():\n for funcea in Functions(SegStart(ea), SegEnd(ea)):\n size = FindFuncEnd(funcea) - funcea\n functions[funcea] = (GetFunctionName(funcea), size)\n # It may not be necessary to sort by ea, but be safe...\n output_lines = []\n for i, (ea, (name, size)) in enumerate(sorted(functions.items())):\n if len(name) > 255:\n print \"ClemSym: truncating name\", name\n name = name[:255]\n line = \"%d: %s @ %07x %d\" % (i, name, ea, size)\n output_lines.append(line)\n return '\\n'.join(output_lines)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Convert a maxima MRAT expression to Sage SR Maxima has an optimised representation for multivariate rational expressions. The easiest way to translate those to SR is by first asking maxima to give the generic representation of the object. That is what RATDISREP does in maxima.
def mrat_to_sage(expr): return max_to_sage(meval(EclObject([[ratdisrep],expr])))
[ "def frame_rms ( frame , expression , cuts = '' ) : \n return frame_central_moment ( frame , order = 2 , expression = expression , cuts = cuts )", "def _2MASS_query(ra_deg, dec_deg, rad_deg, maxmag=20,\n maxsources=-1):\n vquery = Vizier(columns=['+_r', '2MASS', 'RAJ2000', 'DEJ2000',\n 'Jmag','e_Jmag','Jsnr',\n 'Hmag','e_Hmag','Hsnr',\n 'Kmag','e_Kmag','Ksnr',\n 'dup', 'Ndet'],\n # column_filters={\"gmag\":\n # (\"<%f\" % maxmag)\n # \"imag\":\n # (\"<%f\" % maxmag)},\n row_limit = maxsources)\n field = SkyCoord(ra=ra_deg, dec=dec_deg,\n unit=(u.deg, u.deg),\n frame='icrs')\n return vquery.query_region(field,\n width=(\"%fd\" % rad_deg),\n catalog=\"II/246\")[0]", "def msr (riskfree_rate,er,cov):\r\n n=er.shape[0]\r\n init_guess=np.repeat(1/n,n)\r\n bounds=((0.0,1.0),)*n \r\n def neg_sharpe_ratio(weights,riskfree_rate,er,cov):\r\n \"\"\"\r\n Returns the negative of Sharpe Ratio, given weights\r\n \"\"\"\r\n r=portfolio_return(weights,er)\r\n vol=portfolio_vol(weights,cov)\r\n return -(r-riskfree_rate)/vol\r\n \r\n weights_sum_to_1={'type':'eq','fun':lambda weights:np.sum(weights)-1}\r\n results=minimize(neg_sharpe_ratio,init_guess,args=(riskfree_rate,er,cov,),method='SLSQP',options={'disp':False},constraints=(weights_sum_to_1),bounds=bounds)\r\n return results.x", "def _mr(moment, res):\n # check that at least some scans exist with the moments\n if len(scan_info[moment][res]['scans']) == 0:\n return None\n\n # determine scans, max_gates, dualpol_offset\n scans = scan_info[moment][res]['scans']\n max_gates = max(scan_info[moment][res]['ngates'])\n dualpol_offset = 0\n if moment == 'REF':\n moments = ['REF']\n else:\n moments = ['VEL', 'SW']\n if len(scan_info['ZDR'][res]['scans']) != 0:\n moments = ['VEL', 'SW', 'ZDR', 'PHI', 'RHO']\n dualpol_offset = (scan_info['ZDR'][res]['scans'][0] -\n scan_info['VEL'][res]['scans'][0])\n\n # read the radar from the archive file\n return _radar_from_nexradl2(nfile, moments, scans, max_gates,\n field_mapping, field_metadata,\n dualpol_offset)", "def nrml_from_shapefile(shapefile,\n shapefile_faultname_attribute,\n shapefile_dip_attribute,\n shapefile_sliprate_attribute,\n source_model_name,\n simple_fault_tectonic_region,\n magnitude_scaling_relation,\n rupture_aspect_ratio,\n upper_depth,\n lower_depth,\n a_value,\n b_value,\n min_mag,\n max_mag,\n rake,\n output_dir,\n quiet):\n # Get geometry\n fault_traces, faultnames, dips, \\\n sliprate, fault_lengths = parse_line_shapefile(shapefile,\n shapefile_faultname_attribute,\n shapefile_dip_attribute, \n shapefile_sliprate_attribute)\n\n # Output is written line-by-line to this list\n output_xml = []\n\n append_xml_header(output_xml, source_model_name)\n\n\n # Loop through each fault and add source specific info\n for i in range(len(fault_traces)):\n simple_fault_id = i\n A = fault_lengths[i]*(float(lower_depth)-float(upper_depth))\n # Calculate M_max from scaling relations\n scalrel = WC1994()\n max_mag = scalrel.get_median_mag(A, float(rake))\n# print A\n # Calculate GR a values from slip rate\n if sliprate[i] != '\"\"':\n print sliprate[i]\n a_value, moment_rate = fault_slip_rate_GR_conversion.slip2GR(sliprate[i], A,\n float(b_value), \n float(max_mag),\n M_min=0.0)\n append_rupture_geometry(output_xml, fault_traces[i],\n dips[i], simple_fault_id,\n faultnames[i], upper_depth,\n lower_depth, simple_fault_tectonic_region)\n\n append_earthquake_information(output_xml,\n magnitude_scaling_relation,\n rupture_aspect_ratio, a_value, b_value,\n min_mag, max_mag, rake)\n\n # Close xml\n output_xml.append(' </sourceModel>')\n output_xml.append('</nrml>')\n\n # Add newlines\n output_xml = [oxml + '\\n' for oxml in output_xml]\n\n return output_xml", "def protein_rmsd(grofile,trajfile,**kwargs):\n\n\t#---unpack\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\t\n\t#---prepare universe\t\n\tslice_name = kwargs['calc']['slice_name']\n\tgroup = kwargs['calc']['group']\n\tgrofile,trajfile = [work.slice(sn)[slice_name][group][i] for i in ['gro','xtc']]\n\tuni = MDAnalysis.Universe(work.postdir+grofile,work.postdir+trajfile)\n\tnframes = len(uni.trajectory)\n\tprotein = uni.select_atoms('protein and name CA')\n\n\t#---reference frame\n\tuni.trajectory[0]\n\tr0 = protein.coordinates()\n\tr0 -= mean(r0,axis=0)\n\n\t#---collect coordinates\n\tnframes = len(uni.trajectory)\n\tcoords = []\n\tfor fr in range(0,nframes):\n\t\tuni.trajectory[fr]\n\t\tr1 = protein.coordinates()\n\t\tcoords.append(r1)\n\n\t#---simple RMSD code\n\trmsds = []\n\tfor fr in range(nframes):\n\t\tstatus('RMSD',i=fr,looplen=nframes)\n\t\tr1 = coords[fr]\n\t\tr1 -= mean(r1,axis=0)\n\t\t#---computation of RMSD validated against VMD but no reflection\n\t\tU,s,Vt = linalg.svd(dot(r0.T,r1))\n\t\tsigner = identity(3)\n\t\tsigner[2,2] = sign(linalg.det(dot(Vt.T,U)))\n\t\tRM = dot(dot(U,signer),Vt)\n\t\trmsds.append(sqrt(mean(sum((r0.T-dot(RM,r1.T))**2,axis=0))))\n\n\t#---pack\n\tattrs,result = {},{}\n\tresult['rmsds'] = array(rmsds)\n\treturn result,attrs", "def protein_rmsd(grofile,trajfile,**kwargs):\n\n\t#---unpack\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\t\n\t#---prepare universe\t\n\tuni = MDAnalysis.Universe(grofile,trajfile)\n\tnframes = len(uni.trajectory)\n\tprotein = uni.select_atoms('protein and name CA')\n\n\t#---reference frame\n\tuni.trajectory[0]\n\tr0 = protein.positions\n\tr0 -= mean(r0,axis=0)\n\n\t#---collect coordinates\n\tnframes = len(uni.trajectory)\n\tcoords,times = [],[]\n\tfor fr in range(0,nframes):\n\t\tuni.trajectory[fr]\n\t\tr1 = protein.positions\n\t\tcoords.append(r1)\n\t\ttimes.append(uni.trajectory.time)\n\n\t#---simple RMSD code\n\trmsds = []\n\tfor fr in range(nframes):\n\t\tstatus('RMSD',i=fr,looplen=nframes)\n\t\tr1 = coords[fr]\n\t\tr1 -= mean(r1,axis=0)\n\t\t#---computation of RMSD validated against VMD but no reflection\n\t\tU,s,Vt = linalg.svd(dot(r0.T,r1))\n\t\tsigner = identity(3)\n\t\tsigner[2,2] = sign(linalg.det(dot(Vt.T,U)))\n\t\tRM = dot(dot(U,signer),Vt)\n\t\trmsds.append(sqrt(mean(sum((r0.T-dot(RM,r1.T))**2,axis=0))))\n\n\t#---pack\n\tattrs,result = {},{}\n\tresult['rmsds'] = array(rmsds)\n\tresult['timeseries'] = array(times)\n\treturn result,attrs", "def fraction_to_rational(fra):\n from pyexiv2.utils import Rational\n if fra.__class__.__name__=='Fraction':\n return Rational(fra.limit_denominator().numerator,fra.limit_denominator().denominator)\n else:\n return fra", "def msr(rf_rate, er, cov):\n n = er.shape[0]\n initial_weights = np.repeat(1/n, n)\n bounds = ((0.0, 1.0),) * n\n weights_sum_to_1 = {\n 'type': 'eq',\n 'fun': lambda w: w.sum() - 1 \n }\n def neg_sharpe_ratio(w):\n ret = portfolio_return(w, er)\n vol = portfolio_vol(w, cov)\n return -(ret - rf_rate) / vol\n results = minimize(neg_sharpe_ratio, initial_weights, method='SLSQP', \n options={'disp': False}, constraints=(weights_sum_to_1), \n bounds=bounds)\n return results.x", "def itkRegionalMaximaImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkRegionalMaximaImageFilterISS2ISS2 *\":\n return _itkRegionalMaximaImageFilterPython.itkRegionalMaximaImageFilterISS2ISS2_cast(obj)", "def MR(s, A, W):\n\n\n MRs = -float('inf')\n wstar = None\n\n for w in W.extreme_points:\n for t in A:\n val = Utils.dot(Utils.vector_sub(t, s), w)\n if val> MRs:\n MRs= val\n wstar=w\n\n return MRs, wstar", "def sdss_query(ra_deg, dec_deg, rad_deg, maxmag=20,\n maxsources=-1):\n vquery = Vizier(columns=['+_r', 'objID', 'RA_ICRS', 'e_RA_ICRS',\n 'DE_ICRS','e_DE_ICRS','umag','e_umag','gmag', 'e_gmag',\n 'rmag', 'e_rmag', 'imag', 'e_imag', 'zmag', 'e_zmag',\n 'zsp','spCl', 'subCl'],\n # column_filters={\"gmag\":\n # (\"<%f\" % maxmag),\n # \"imag\":\n # (\"<%f\" % maxmag)},\n row_limit = maxsources)\n field = SkyCoord(ra=ra_deg, dec=dec_deg,\n unit=(u.deg, u.deg),\n frame='icrs')\n return vquery.query_region(field,\n width=(\"%fd\" % rad_deg),\n catalog=\"V/147/sdss12\")[0]", "def AdaptiveResolutionGSM(s, coords):\n galCoords = SkyCoord(frame=\"icrs\", ra=coords.PSFRAs*u.rad, dec=coords.PSFDecs*u.rad).transform_to(\"galactic\")\n NSIDE = s.adaptiveHEALPixMinNSIDE\n interpoltedGSMRotated = np.zeros(coords.nPSFPixels) \n while NSIDE <= s.mapNSIDE:\n thisGSM = GlobalSkyModel(s.freq, s.GSMlocation, NSIDE).hpMap\n interpoltedGSMRotated[coords.newPSFNSIDEs==NSIDE] = hp.get_interp_val(thisGSM,-galCoords[coords.newPSFNSIDEs==NSIDE].b.radian+np.pi/2, galCoords[coords.newPSFNSIDEs==NSIDE].l.radian)\n NSIDE *= 2\n\n return interpoltedGSMRotated", "def SMR_epi(B, A, W):\n\n vars2 = W.vars.copy()\n vars2 = ['x'] + vars2\n formatted_constraints_epigraph = W.formatted_constraints.copy()\n for alpha in B:\n constraint = ''\n for i in range(len(alpha)):\n if i < len(alpha) - 1:\n constraint += str(-alpha[i]) + ' w' + str(i) + ' + '\n else:\n constraint += str(-alpha[i]) + ' w' + str(i) + ' + 1 x >= 0'\n\n formatted_constraints_epigraph.append(constraint)\n\n epigraph_B = Polytope.Polytope(vars2, frac=W.frac, epi_var='x')\n epigraph_B.add_formatted_constraints(formatted_constraints_epigraph)\n\n\n # computation of SMR(B,A) and update Wp := Wp ∪ WB; where WB is the projection in W of the extreme points of the\n # epigraph of B\n eps = []\n MR_W_A_B = -float('inf')\n for p in epigraph_B.extreme_points:\n #projection ext pints epigraph in W\n #W0_ep = p[1:]\n W0_ep = list(p[1:])\n eps.append(W0_ep)\n if W.frac:\n for j in range(len(W0_ep)):\n W0_ep[j]=float(W0_ep[j])\n\n #update Wp_valA and evaluating i-th ext point of epi_B fot the computation of SMR(B,A)\n valA = - float('inf')\n for i in range(len(A)):\n alpha = A[i]\n dot = Utils.dot(alpha, W0_ep)\n if dot > valA:\n valA = dot\n\n\n if valA - p[0] > MR_W_A_B:\n MR_W_A_B = valA - p[0]\n # print(W0_ep)\n\n\n return MR_W_A_B, eps", "def build_model(series, p, d, q, S, exog_data, P=None, D=None, Q=None):\n if P is None:\n P = p\n if D is None:\n D = d\n if Q is None:\n Q = q\n model = SARIMAX(series, order=(p,d,q),\n seasonal_order=(P,D,Q,S),\n exog=exog_data,\n enforce_invertibility=True)\n results = model.fit()\n return results", "def S(U, *args):\n\n kwargs = args[0] #hack to get kwargs back out..\n im = Immersion(**kwargs)\n return im.calc_S(U)", "def scale_spec(wave, flux, ivar, sn, wave_ref, flux_ref, ivar_ref, mask=None, mask_ref=None, scale_method='auto', min_good=0.05,\n ref_percentile=70.0, maxiters=5, sigrej=3, max_median_factor=10.0,\n npoly=None, hand_scale=None, sn_min_polyscale=2.0, sn_min_medscale=0.5, debug=False, show=False):\n\n if mask is None:\n mask = ivar > 0.0\n if mask_ref is None:\n mask_ref = ivar_ref > 0.0\n\n\n # Interpolate the reference spectrum onto the wavelengths of the spectrum that will be rescaled\n flux_ref_int, ivar_ref_int, mask_ref_int = interp_spec(wave, wave_ref, flux_ref, ivar_ref, mask_ref)\n\n # estimates the SNR of each spectrum and the stacked mean SNR\n #rms_sn, weights = sn_weights(wave, flux, ivar, mask, sn_smooth_npix)\n #sn = np.sqrt(np.mean(rms_sn**2))\n\n if scale_method == 'auto':\n if sn > sn_min_polyscale:\n method_used = 'poly'\n elif ((sn <= sn_min_polyscale) and (sn > sn_min_medscale)):\n method_used = 'median'\n else:\n method_used = 'none'\n else:\n method_used = scale_method\n\n # Estimate the scale factor\n if method_used == 'poly':\n # Decide on the order of the polynomial rescaling\n if npoly is None:\n if sn > 25.0:\n npoly = 5 # quintic, Is this stable?\n elif sn > 8.0:\n npoly = 3 # cubic\n elif sn >= 5.0:\n npoly = 2 # quadratic\n else:\n npoly = 1 # linear\n scale, fit_tuple, flux_scale, ivar_scale, outmask = solve_poly_ratio(\n wave, flux, ivar, flux_ref_int, ivar_ref_int, npoly,mask=mask, mask_ref=mask_ref_int,\n ref_percentile=ref_percentile, debug=debug)\n elif method_used == 'median':\n # Median ratio (reference to spectrum)\n med_scale = robust_median_ratio(flux, ivar, flux_ref_int, ivar_ref_int,ref_percentile=ref_percentile,min_good=min_good,\n mask=mask, mask_ref=mask_ref_int, maxiters=maxiters,\n max_factor=max_median_factor,sigrej=sigrej)\n # Apply\n flux_scale = flux * med_scale\n ivar_scale = ivar * 1.0/med_scale**2\n scale = np.full_like(flux,med_scale)\n elif method_used == 'hand':\n # Input?\n if hand_scale is None:\n msgs.error(\"Need to provide hand_scale parameter, single value\")\n flux_scale = flux * hand_scale\n ivar_scale = ivar * 1.0 / hand_scale ** 2\n scale = np.full(flux.size, hand_scale)\n elif method_used == 'none':\n flux_scale = flux.copy()\n ivar_scale = ivar.copy()\n scale = np.ones_like(flux)\n else:\n msgs.error(\"Scale method not recognized! Check documentation for available options\")\n # Finish\n if show:\n scale_spec_qa(wave, flux, ivar, wave_ref, flux_ref, ivar_ref, scale, method_used, mask = mask, mask_ref=mask_ref,\n title='Scaling Applied to the Data')\n\n return flux_scale, ivar_scale, scale, method_used", "def cast(obj: 'itkLightObject') -> \"itkMeanReciprocalSquareDifferenceImageToImageMetricISS2ISS2 *\":\n return _itkMeanReciprocalSquareDifferenceImageToImageMetricPython.itkMeanReciprocalSquareDifferenceImageToImageMetricISS2ISS2_cast(obj)", "def msr(riskfree_rate, er, cov):\n n = er.shape[0]\n initial_weights = np.repeat(1/n, n) # Equally distr. weights\n bounds = ((0.0, 1.0),)*n # n bounds of (0,1) tuples\n constraint_weight_sum_is_one = {\n 'type': 'eq',\n 'fun': lambda weights: np.sum(weights) - 1\n }\n\n def neg_sharpe_ratio(weights, riskfree_rate, er, cov):\n \"\"\"\n Returns the inverse of the Sharpe ratio given:\n * weights: allocation of the assets\n \"\"\"\n r = portfolio_return(weights, er)\n v = portfolio_vol(weights, cov)\n return -(r - riskfree_rate)/v\n\n results = minimize(neg_sharpe_ratio, initial_weights, args=(riskfree_rate, er, cov,), method=\"SLSQP\", options={\n 'disp': False}, constraints=(constraint_weight_sum_is_one), bounds=bounds)\n return results.x" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Translate a python object into a maxima object Mainly just a wrapper around EclObject, but needs to be in place because some objects might be translated better into maxima than just into lisp (think vectors and matrices).
def pyobject_to_max(obj): return EclObject(obj)
[ "def computeFrameToObject(*args):\n return _almathinternal.computeFrameToObject(*args)", "def convert(cls, obj: typ.Any, target_type: ScriptTypes):\n\n # TODO: Catch and report other parsing or conversion exceptions.\n\n source_type: ScriptTypes = cls.get_type_of(obj)\n\n if source_type is None:\n raise TypeConversionException('Source object is not a recognized scripting type')\n\n if source_type is target_type:\n # Return a the same object.\n return obj\n\n elif target_type is ScriptTypes.NULL:\n # Result is always null.\n return None\n\n elif target_type is ScriptTypes.STRING:\n if source_type is ScriptTypes.SEQUENCE:\n # join_func = cls.get_join_function()\n\n str_items = []\n for item in obj:\n item_conv = cls.convert(obj=item, target_type=ScriptTypes.STRING)\n str_items.append(item_conv)\n return ', '.join(str_items)\n\n elif source_type is ScriptTypes.MAPPING:\n # map_flat_func = cls.get_mapping_flatten_function()\n\n # seq = map_flat_func(obj)\n seq = tuple(obj.keys())\n return cls.convert(seq, target_type=ScriptTypes.STRING)\n\n elif source_type is ScriptTypes.NULL:\n # Null becomes an empty string.\n return ''\n\n # Anything else can convert to string.\n return str(obj)\n\n elif target_type is ScriptTypes.INTEGER:\n if source_type in {ScriptTypes.STRING, ScriptTypes.FLOAT, ScriptTypes.DECIMAL,\n ScriptTypes.BOOLEAN}:\n return int(obj)\n\n elif target_type is ScriptTypes.FLOAT:\n if source_type in {ScriptTypes.STRING, ScriptTypes.INTEGER, ScriptTypes.DECIMAL,\n ScriptTypes.BOOLEAN}:\n return float(obj)\n\n elif target_type is ScriptTypes.DECIMAL:\n if source_type in {ScriptTypes.STRING, ScriptTypes.INTEGER, ScriptTypes.FLOAT,\n ScriptTypes.BOOLEAN}:\n return decimal.Decimal(obj)\n\n elif target_type is ScriptTypes.BOOLEAN:\n if source_type is ScriptTypes.STRING:\n # TODO: Do some high-level coercion.\n return True\n\n if source_type in {ScriptTypes.INTEGER, ScriptTypes.FLOAT, ScriptTypes.DECIMAL,\n ScriptTypes.SEQUENCE, ScriptTypes.MAPPING}:\n return bool(obj)\n\n # TODO: Add option to allow None -> False conversion.\n\n elif target_type is ScriptTypes.DATE:\n if source_type is ScriptTypes.STRING:\n return dup.parse(obj).date()\n\n elif target_type is ScriptTypes.TIME:\n if source_type is ScriptTypes.STRING:\n return dup.parse(obj).time()\n\n elif target_type is ScriptTypes.SEQUENCE:\n if source_type is ScriptTypes.MAPPING:\n # TODO: Allow for using keys, values, or pairs.\n # TODO: Better to reshape, or encapsulate?\n return tuple(obj.keys())\n\n return (obj,)\n\n elif target_type is ScriptTypes.MAPPING:\n if source_type is ScriptTypes.SEQUENCE:\n # Treat as a mapping with integer indices as keys.\n return dict(enumerate(obj))\n\n # A singleton mapping, with a 0 as the key.\n return {0: obj}\n\n raise TypeConversionException(f'Unable to convert object of type {source_type.name} '\n f'to type {target_type.name}')", "def itkBoxSpatialObject2_cast(obj: 'itkLightObject') -> \"itkBoxSpatialObject2 *\":\n return _itkBoxSpatialObjectPython.itkBoxSpatialObject2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkBoxSpatialObject2 *\":\n return _itkBoxSpatialObjectPython.itkBoxSpatialObject2_cast(obj)", "def translate(obj, a0, a1):\n atoms = getats(obj)\n M = getmat(atoms)\n p0 = (a0.r if isinstance(a0, Atom) else a0)\n p1 = (a1.r if isinstance(a1, Atom) else a1)\n if len(p0)!=3 or len(p1)!=3:\n print('ERROR: invalid a0 or a1!')\n exit(1)\n M = matvec.translate(M, p0, p1)\n putmat(atoms, M)", "def WrappedObject(self) -> object:", "def to_object(inp, objtype='player'):\r\n obj, typ = identify_object(inp)\r\n if typ == objtype:\r\n return obj\r\n if objtype == 'player':\r\n if typ == 'object':\r\n return obj.player\r\n if typ == 'string':\r\n return _PlayerDB.objects.get(user_username__iexact=obj)\r\n if typ == 'dbref':\r\n return _PlayerDB.objects.get(id=obj)\r\n print objtype, inp, obj, typ, type(inp)\r\n raise CommError()\r\n elif objtype == 'object':\r\n if typ == 'player':\r\n return obj.obj\r\n if typ == 'string':\r\n return _ObjectDB.objects.get(db_key__iexact=obj)\r\n if typ == 'dbref':\r\n return _ObjectDB.objects.get(id=obj)\r\n print objtype, inp, obj, typ, type(inp)\r\n raise CommError()\r\n elif objtype == 'channel':\r\n if typ == 'string':\r\n return _ChannelDB.objects.get(db_key__iexact=obj)\r\n if typ == 'dbref':\r\n return _ChannelDB.objects.get(id=obj)\r\n print objtype, inp, obj, typ, type(inp)\r\n raise CommError()", "def _make_ospray_object(self, *args, **kwargs):\n\t\traise NotImplementedError", "def decode(obj):\n\n return BSON(obj).decode()[Serialiser.WRAPPER]", "def _make_object(obj, *args, **kwargs):\n return _BackendSelector._backend.__dict__[obj](*args, **kwargs)", "def itkRegionalMaximaImageFilterISS2ISS2_cast(obj: 'itkLightObject') -> \"itkRegionalMaximaImageFilterISS2ISS2 *\":\n return _itkRegionalMaximaImageFilterPython.itkRegionalMaximaImageFilterISS2ISS2_cast(obj)", "def _mapTransform(self, obj, tr):\n # convert to a type that can be mapped\n retType = None\n if isinstance(obj, (tuple, list)):\n retType = type(obj)\n if np.isscalar(obj[0]):\n if len(obj) == 2:\n obj = Qt.QPointF(*obj)\n elif len(obj) == 3:\n obj = Qt.QVector3D(*obj)\n else:\n raise TypeError(\"Cannot map %s of length %d.\" % (type(obj).__name__, len(obj)))\n elif isinstance(obj[0], np.ndarray):\n obj = np.concatenate([x[np.newaxis, ...] for x in obj])\n else:\n raise Exception ('Cannot map--object of type %s ' % str(type(obj[0])))\n\n if isinstance(obj, Qt.QPointF):\n ret = tr.map(obj)\n if retType is not None:\n return retType([ret.x(), ret.y()])\n return ret\n elif isinstance(obj, Qt.QVector3D):\n ret = tr.map(obj)\n if retType is not None:\n return retType([ret.x(), ret.y(), ret.z()])\n return ret\n\n elif isinstance(obj, np.ndarray):\n m2 = pg.transformCoordinates(tr, obj)\n return m2\n else:\n raise Exception('Cannot map--object of type %s ' % str(type(obj)))", "def _convert_laygo_object(objname, obj, scl, layermap, lib_export, sn, pin_label_height=0.1,\n pin_annotate_layer=['text', 'drawing'], text_height=0.1,\n abstract_instances=False, abstract_instances_layer=['prBoundary', 'drawing']):\n # TODO: this code is not very readable. Refactor it.\n\n if obj.__class__ == laygo2.object.Rect:\n xy = obj.xy * scl\n hext = obj.hextension * scl # extensions for routing wires.\n vext = obj.vextension * scl\n bx1, bx2 = sorted(xy[:, 0].tolist()) # really need to sort the coordinates?\n by1, by2 = sorted(xy[:, 1].tolist())\n ll = np.array([bx1, by1]) # lower-left\n ur = np.array([bx2, by2]) # upper-right\n _xy = np.vstack([ll,ur])\n c = [[round(_xy[0][0]-hext), round(_xy[0][1]-vext)], [round(_xy[0][0]-hext), round(_xy[1][1]+vext)],\n [round(_xy[1][0]+hext), round(_xy[1][1]+vext)], [round(_xy[1][0]+hext), round(_xy[0][1]-vext)],\n [round(_xy[0][0]-hext), round(_xy[0][1]-vext)]] # build list\n l = layermap[obj.layer[0]][obj.layer[1]]\n lib_export.add_boundary(sn, l[0], l[1], c)\n logging.debug('ExportGDS: Rect:' + objname + ' layer:' + str(l) + ' xy:' + str(c))\n elif obj.__class__ == laygo2.object.Path:\n xy = obj.xy * scl\n width = obj.width * scl\n extn = obj.extension * scl\n l = layermap[obj.layer[0]][obj.layer[1]]\n lib_export.add_path(sn, l[0], l[1], xy.tolist(), width, pathtype=4, bgnextn=extn, endextn=extn)\n logging.debug('ExportGDS: Path:' + objname + ' layer:' + str(l) + ' xy:' + str(xy))\n elif obj.__class__ == laygo2.object.Pin:\n if obj.elements is None:\n _objelem = [obj]\n else:\n _objelem = obj.elements\n for idx, _obj in np.ndenumerate(_objelem):\n xy = _obj.xy * scl\n bx1, bx2 = sorted(xy[:,0].tolist()) # again, let's check this.\n by1, by2 = sorted(xy[:,1].tolist())\n ll = np.array([bx1, by1]) # lower-left\n ur = np.array([bx2, by2]) # upper-right\n _xy = np.vstack([ll,ur])\n c = [[round(_xy[0][0]), round(_xy[0][1])], [round(_xy[0][0]), round(_xy[1][1])],\n [round(_xy[1][0]), round(_xy[1][1])], [round(_xy[1][0]), round(_xy[0][1])],\n [round(_xy[0][0]), round(_xy[0][1])]] # build list\n l = layermap[_obj.layer[0]][_obj.layer[1]]\n lib_export.add_boundary(sn, l[0], l[1], c)\n lib_export.add_text(sn, l[0], l[1], [[(_xy[0][0]+_xy[1][0])//2, (_xy[0][1]+_xy[1][1])//2]],\n string=_obj.netname, textHeight=pin_label_height * scl)\n if not _obj.name == _obj.netname: # if netname is different from pinname, create an annotate text\n if _obj.name is not None:\n l_ann = layermap[pin_annotate_layer[0]][pin_annotate_layer[1]]\n lib_export.add_text(sn, l_ann[0], l_ann[1],\n [[(_xy[0][0]+_xy[1][0])//2, (_xy[0][1]+_xy[1][1])//2]],\n string=_obj.name, textHeight=pin_label_height * scl)\n logging.debug('ExportGDS: Pin:' + objname + ' net:' + _obj.netname + ' layer:' + str(l) + ' xy:' + str(c))\n elif obj.__class__ == laygo2.object.physical.Text:\n xy = obj.xy * scl\n l = layermap[obj.layer[0]][obj.layer[1]]\n _xy = [round(_xy0) for _xy0 in xy]\n lib_export.add_text(sn, l[0], l[1], [_xy], string=obj.text, textHeight=round(text_height * scl))\n logging.debug('ExportGDS: Text:' + objname + ' text:' + obj.text + ' layer:' + str(l) + ' xy:' + str(_xy))\n elif obj.__class__ == laygo2.object.Instance:\n _convert_laygo_object_instance(lib_export, sn, objname, obj, scl, abstract_instances, abstract_instances_layer,\n layermap)\n elif obj.__class__ == laygo2.object.VirtualInstance: # virtual instance\n virt_struc_name = sn + '_VirtualInst_' + objname\n s_virt = lib_export.add_structure(virt_struc_name)\n for en, e in obj.native_elements.items():\n _convert_laygo_object(objname=objname+'_'+en, obj=e, scl=scl, layermap=layermap, lib_export=lib_export,\n sn=virt_struc_name, pin_label_height=pin_label_height, pin_annotate_layer=pin_annotate_layer,\n text_height=text_height, abstract_instances=abstract_instances,\n abstract_instances_layer=abstract_instances_layer)\n xy = obj.xy * scl\n xyl = xy.tolist()\n if np.array_equal(obj.shape, np.array([1, 1])) or (obj.shape is None): # single instance\n lib_export.add_instance(sn, virt_struc_name, [xyl], obj.transform)\n logging.debug('ExportGDS: VirtualInstance:' + objname + ' cellname:' + obj.cellname + ' xy:' + str(xy))\n else: # mosaic\n xy_mosaic = [[round(xyl[0]), round(xyl[1])],\n [round(xyl[0] + obj.shape[0] * (obj.spacing[0] * scl)), round(xyl[1])],\n [round(xyl[0]), round(xyl[1] + obj.shape[1] * (obj.spacing[1] * scl))]]\n\n lib_export.add_instance_array(sn, virt_struc_name, obj.shape[0], obj.shape[1], xy_mosaic,\n obj.transform)\n logging.debug('ExportGDS: VirtualInstance:' + objname + ' cellname:' + obj.cellname + ' xy:' + str(xy_mosaic)\n + ' shape:' + str(obj.shape.tolist()) + ' spacing:' + str(obj.spacing.tolist()))", "def _py2rpy(obj):\n raise NotImplementedError(\n \"Conversion 'py2rpy' not defined for objects of type '%s'\" %\n str(type(obj))\n )", "def cast(obj: 'itkLightObject') -> \"itkRegionalMaximaImageFilterISS2ISS2 *\":\n return _itkRegionalMaximaImageFilterPython.itkRegionalMaximaImageFilterISS2ISS2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkRegionalMaximaImageFilterIF2IF2 *\":\n return _itkRegionalMaximaImageFilterPython.itkRegionalMaximaImageFilterIF2IF2_cast(obj)", "def itkRegionalMaximaImageFilterIF2IF2_cast(obj: 'itkLightObject') -> \"itkRegionalMaximaImageFilterIF2IF2 *\":\n return _itkRegionalMaximaImageFilterPython.itkRegionalMaximaImageFilterIF2IF2_cast(obj)", "def cast(obj: 'itkLightObject') -> \"itkCompositeTransformD2 *\":\n return _itkCompositeTransformPython.itkCompositeTransformD2_cast(obj)", "def itkRegionalMaximaImageFilterISS3ISS3_cast(obj: 'itkLightObject') -> \"itkRegionalMaximaImageFilterISS3ISS3 *\":\n return _itkRegionalMaximaImageFilterPython.itkRegionalMaximaImageFilterISS3ISS3_cast(obj)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Convert a maxima expression to sage symbolic ring
def max_to_sage(expr): global op_sage_to_max, op_max_to_sage global sym_sage_to_max, sym_max_to_sage if expr.consp(): op_max=caar(expr) if op_max in special_max_to_sage: return special_max_to_sage[op_max](expr) if not(op_max in op_max_to_sage): op=sageop.next() op_max_to_sage[op_max]=op op_sage_to_max[op]=op_max op=op_max_to_sage[op_max] max_args=cdr(expr) args=[] while not(max_args.nullp()): args.append(max_to_sage(car(max_args))) max_args=cdr(max_args) return op(*args) elif expr.symbolp(): if not(expr in sym_max_to_sage): sym=sagesym.next() sym_max_to_sage[expr]=sym sym_sage_to_max[sym]=expr sym=sym_max_to_sage[expr] return sym else: return expr.python()
[ "def mrat_to_sage(expr):\n return max_to_sage(meval(EclObject([[ratdisrep],expr])))", "def frame_rms ( frame , expression , cuts = '' ) : \n return frame_central_moment ( frame , order = 2 , expression = expression , cuts = cuts )", "def stanley_reisner_ring(self, base_ring=ZZ):\n R = self._stanley_reisner_base_ring(base_ring)\n products = []\n for f in self.minimal_nonfaces():\n prod = 1\n for v in f:\n prod *= R(self._gen_dict[v])\n products.append(prod)\n return R.quotient(products)", "def ase_to_pyiron(ase_obj):\n try:\n import ase\n except ImportError:\n raise ValueError(\"ASE package not yet installed\")\n element_list = ase_obj.get_chemical_symbols()\n cell = ase_obj.cell\n positions = ase_obj.get_positions()\n pbc = ase_obj.get_pbc()\n spins = ase_obj.get_initial_magnetic_moments()\n if all(spins == np.array(None)) or sum(np.abs(spins)) == 0.0:\n pyiron_atoms = Atoms(\n elements=element_list, positions=positions, pbc=pbc, cell=cell\n )\n else:\n if any(spins == np.array(None)):\n spins[spins == np.array(None)] = 0.0\n pyiron_atoms = Atoms(\n elements=element_list,\n positions=positions,\n pbc=pbc,\n cell=cell,\n magmoms=spins,\n )\n if hasattr(ase_obj, \"constraints\") and len(ase_obj.constraints) != 0:\n for constraint in ase_obj.constraints:\n constraint_dict = constraint.todict()\n if constraint_dict[\"name\"] == \"FixAtoms\":\n if \"selective_dynamics\" not in pyiron_atoms._tag_list.keys():\n pyiron_atoms.add_tag(selective_dynamics=[True, True, True])\n pyiron_atoms.selective_dynamics[\n constraint_dict[\"kwargs\"][\"indices\"]\n ] = [False, False, False]\n elif constraint_dict[\"name\"] == \"FixScaled\":\n if \"selective_dynamics\" not in pyiron_atoms._tag_list.keys():\n pyiron_atoms.add_tag(selective_dynamics=[True, True, True])\n pyiron_atoms.selective_dynamics[\n constraint_dict[\"kwargs\"][\"a\"]\n ] = constraint_dict[\"kwargs\"][\"mask\"]\n else:\n warnings.warn(\"Unsupported ASE constraint: \" + constraint_dict[\"name\"])\n return pyiron_atoms", "def string_to_list_of_solutions(s):\n from sage.categories.all import Objects\n from sage.structure.sequence import Sequence\n from sage.calculus.calculus import symbolic_expression_from_maxima_string\n v = symbolic_expression_from_maxima_string(s, equals_sub=True)\n return Sequence(v, universe=Objects(), cr_str=True)", "def polygens(base_ring, names=\"x\"):\n from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\n return PolynomialRing(base_ring, names).gens()", "def AdaptiveResolutionGSM(s, coords):\n galCoords = SkyCoord(frame=\"icrs\", ra=coords.PSFRAs*u.rad, dec=coords.PSFDecs*u.rad).transform_to(\"galactic\")\n NSIDE = s.adaptiveHEALPixMinNSIDE\n interpoltedGSMRotated = np.zeros(coords.nPSFPixels) \n while NSIDE <= s.mapNSIDE:\n thisGSM = GlobalSkyModel(s.freq, s.GSMlocation, NSIDE).hpMap\n interpoltedGSMRotated[coords.newPSFNSIDEs==NSIDE] = hp.get_interp_val(thisGSM,-galCoords[coords.newPSFNSIDEs==NSIDE].b.radian+np.pi/2, galCoords[coords.newPSFNSIDEs==NSIDE].l.radian)\n NSIDE *= 2\n\n return interpoltedGSMRotated", "def make_sympy(self, xml=None): # lint-amnesty, pylint: disable=too-many-statements\n\n if self.the_sympy:\n return self.the_sympy\n\n if xml is None:\t # root\n if not self.is_mathml():\n return my_sympify(self.expr)\n if self.is_presentation_mathml():\n cmml = None\n try:\n cmml = self.cmathml\n xml = etree.fromstring(str(cmml))\n except Exception as err:\n if 'conversion from Presentation MathML to Content MathML was not successful' in cmml: # lint-amnesty, pylint: disable=unsupported-membership-test\n msg = \"Illegal math expression\"\n else:\n msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)\n raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n else:\n xml = etree.fromstring(self.expr)\n xml = self.fix_greek_in_mathml(xml)\n self.the_sympy = self.make_sympy(xml[0])\n return self.the_sympy\n\n def gettag(expr):\n return re.sub('{http://[^}]+}', '', expr.tag)\n\n def op_plus(*args):\n return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]\n\n def op_times(*args):\n return reduce(operator.mul, args)\n\n def op_minus(*args):\n if len(args) == 1:\n return -args[0]\n if not len(args) == 2: # lint-amnesty, pylint: disable=unneeded-not\n raise Exception('minus given wrong number of arguments!')\n #return sympy.Add(args[0],-args[1])\n return args[0] - args[1]\n\n opdict = {\n 'plus': op_plus,\n 'divide': operator.div, # lint-amnesty, pylint: disable=no-member\n 'times': op_times,\n 'minus': op_minus,\n 'root': sympy.sqrt,\n 'power': sympy.Pow,\n 'sin': sympy.sin,\n 'cos': sympy.cos,\n 'tan': sympy.tan,\n 'cot': sympy.cot,\n 'sinh': sympy.sinh,\n 'cosh': sympy.cosh,\n 'coth': sympy.coth,\n 'tanh': sympy.tanh,\n 'asin': sympy.asin,\n 'acos': sympy.acos,\n 'atan': sympy.atan,\n 'atan2': sympy.atan2,\n 'acot': sympy.acot,\n 'asinh': sympy.asinh,\n 'acosh': sympy.acosh,\n 'atanh': sympy.atanh,\n 'acoth': sympy.acoth,\n 'exp': sympy.exp,\n 'log': sympy.log,\n 'ln': sympy.ln,\n }\n\n def parse_presentation_symbol(xml):\n \"\"\"\n Parse <msub>, <msup>, <mi>, and <mn>\n \"\"\"\n tag = gettag(xml)\n if tag == 'mn':\n return xml.text\n elif tag == 'mi':\n return xml.text\n elif tag == 'msub':\n return '_'.join([parse_presentation_symbol(y) for y in xml])\n elif tag == 'msup':\n return '^'.join([parse_presentation_symbol(y) for y in xml])\n raise Exception('[parse_presentation_symbol] unknown tag %s' % tag)\n\n # parser tree for Content MathML\n tag = gettag(xml)\n\n # first do compound objects\n\n if tag == 'apply':\t\t# apply operator\n opstr = gettag(xml[0])\n if opstr in opdict:\n op = opdict[opstr] # pylint: disable=invalid-name\n args = [self.make_sympy(expr) for expr in xml[1:]]\n try:\n res = op(*args)\n except Exception as err:\n self.args = args # pylint: disable=attribute-defined-outside-init\n self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name\n raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args)) # lint-amnesty, pylint: disable=raise-missing-from\n return res\n else:\n raise Exception('[formula]: unknown operator tag %s' % (opstr))\n\n elif tag == 'list':\t\t# square bracket list\n if gettag(xml[0]) == 'matrix':\n return self.make_sympy(xml[0])\n else:\n return [self.make_sympy(expr) for expr in xml]\n\n elif tag == 'matrix':\n return sympy.Matrix([self.make_sympy(expr) for expr in xml])\n\n elif tag == 'vector':\n return [self.make_sympy(expr) for expr in xml]\n\n # atoms are below\n\n elif tag == 'cn':\t\t\t# number\n return sympy.sympify(xml.text)\n\n elif tag == 'ci':\t\t\t# variable (symbol)\n if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'):\t # subscript or superscript\n usym = parse_presentation_symbol(xml[0])\n sym = sympy.Symbol(str(usym))\n else:\n usym = six.text_type(xml.text)\n if 'hat' in usym:\n sym = my_sympify(usym)\n else:\n if usym == 'i' and self.options is not None and 'imaginary' in self.options:\t # i = sqrt(-1)\n sym = sympy.I\n else:\n sym = sympy.Symbol(str(usym))\n return sym\n\n else:\t\t\t\t# unknown tag\n raise Exception('[formula] unknown tag %s' % tag)", "def function2():\r\n x = sp.Symbol('x')\r\n y = sp.Symbol('y')\r\n # f = 0.2 * x - 20 + 3 * y ** 2\r\n f = 6*x + y**2 - 100\r\n\r\n return f", "def gen_AngularMomentum (s):\n \n Sz = np.diag(np.arange(-s, s+1))\n eigenvalues = np.arange(-s, s+1)\n\n d = int(2*s) + 1\n I = np.identity(d)\n\n Splus = np.zeros((d, d))\n Sminus = np.zeros((d, d))\n\n for m in range(d - 1):\n splusfactor = sqrt(s*(s + 1) - eigenvalues[m]*(eigenvalues[m] + 1))\n sminusfactor = sqrt(s*(s + 1) - eigenvalues[m+1]*(eigenvalues[m+1] - 1))\n Splus = Splus + splusfactor * np.outer(I[m, :], I[m+1,:])\n Sminus = Sminus + sminusfactor * np.outer(I[m+1, :], I[m, :])\n\n Sx = 1/2 * (Splus + Sminus)\n Sy = -1j/2 * (Splus - Sminus)\n\n return Sx, Sy, Sz", "def simplify(expr): \n \n from symgp.superexpressions import SuperMatSymbol\n \n depth = get_max_depth(expand_to_fullexpr(expr))\n \n simps = [] # The simplified expressions we have obtained with the associated substitutions\n subs = {} # Pairs substituted expressions with the substitutions made\n usedSubs = [] # The expressions we have substituted we have used so far\n \n # Get the expressions at every depth\n #exprs_by_depth = get_exprs_at_depth(expr, range(depth+1))\n \n usedNames = SuperMatSymbol.getUsedNames()\n \n min_expr = expr \n for d in range(depth, -1, -1): \n # Get the exprs at each depth for the new shortest expressions\n exprs_by_depth = get_exprs_at_depth(min_expr, range(depth+1))\n \n sub_exprs = exprs_by_depth[d]\n \n min_syms = math.inf\n \n # For each sub expression at level d check for copies in other parts of expressions\n for s in sub_exprs:\n repetitions = 0\n \n # Find other similar expressions to s\n for k in exprs_by_depth.keys():\n if k == d:\n continue\n \n if s in exprs_by_depth[k]: \n repetitions += exprs_by_depth[k].count(s) \n \n # Make replacements if expression 's' appears more than twice throughout expression or\n # it corresponds to the special matrix inverse lemma\n if (repetitions > 0 or check_inv_lemma(s)) and s not in usedSubs:\n \n # Update the used substituted expressions\n usedSubs.append(s)\n \n # TODO: Allow for using best or range of simplified exprs from previous depths\n \n # Lower case for vectors and upper case for matrices\n if s.shape[0] != 1 and s.shape[1] != 1:\n avail_prefixes = string.ascii_uppercase\n else:\n avail_prefixes = string.ascii_lowercase\n \n # Keep on searching for available replacement names \n for c in avail_prefixes:\n i = 0\n r_name = c + '_{' + str(i) + '}'\n while r_name in usedNames and i < 99:\n i += 1\n r_name = c + '_{' + str(i) + '}'\n \n if not r_name in usedNames:\n r = SuperMatSymbol(s.shape[0], s.shape[1], r_name, expanded=s)\n \n repl_list = [(s,r)] \n simp_expr = replace(min_expr, repl_list).doit()\n \n if not subs.get(s):\n subs[s] = r\n \n simps.append(simp_expr.doit())\n \n num_syms = get_num_symbols(simp_expr)\n if num_syms < min_syms:\n min_syms = num_syms\n min_expr = simp_expr.doit()\n\n # Check if we can collect any symbols on simp_expr. If we can add to simps.\n if isinstance(simp_expr, MatAdd):\n ends_of_expr_collection = get_ends(simp_expr)\n \n for ends_of_expr in ends_of_expr_collection:\n ends_dict_left = defaultdict(list)\n ends_dict_right = defaultdict(list)\n ends_dict_both = defaultdict(list)\n \n # Collect left ends and right ends\n for l in range(len(ends_of_expr)):\n if len(ends_of_expr[l]) == 2:\n ends_dict_left[ends_of_expr[l][0]].append(l)\n ends_dict_right[ends_of_expr[l][1]].append(l)\n ends_dict_both[ends_of_expr[l]].append(l)\n else:\n ends_dict_left[ends_of_expr[l][0]].append(l)\n ends_dict_right[ends_of_expr[l][0]].append(l)\n \n # If there are two or more repetitions of a symbol, collect \n for key, val in ends_dict_left.items():\n simped = collect(simp_expr,key,'left').doit()\n if len(val) >= 2 and not simped in simps:\n simps.append(simped)\n \n for key, val in ends_dict_right.items():\n simped = collect(simp_expr,key,'right').doit()\n if len(val) >= 2 and not simped in simps:\n simps.append(simped)\n \n # For cases where both ends are repeated two or more times (e.g. A*P*A + A*Q*A + B), collect\n for key, val in ends_dict_both.items():\n simped = collect(simp_expr,[key[0],key[1]],['left','right']).doit()\n if len(val) >= 2 and not simped in simps:\n simps.append(simped) \n break\n\n simps = sorted(simps, key=lambda e: get_num_symbols(e))\n \n return simps, subs", "def traceXRS(smax,pmin,fun,L=200.,nodegap=25.,Nshell=1e3,energy=1000.,\\\n rough=1.,offaxis=0.):\n #Loop through sections and construct node positions\n N = len(smax)\n rsec = []\n for sec in range(N):\n #Establish radius vector\n rad = np.array([])\n rout = 0.\n #Compute shell gap\n gap = (pmin[sec]+L-1e4)*3e-3\n #First node position\n rad = np.append(rad,200.+(1300./N)*sec)\n rout = conic.primrad(pmin[sec]+L,rad[-1],\\\n np.sqrt(1e4**2-rad[-1]**2))\n while rout+gap < 200.+(1300./N)*(sec+1):\n rad = np.append(rad,rout+gap)\n rout = conic.primrad(pmin[sec]+L,rad[-1],\\\n np.sqrt(1e4**2-rad[-1]**2))\n #Add to list of node positions\n rsec.append(rad)\n\n #Trace through all shells, computing reflectivity and geometric area\n #for each shell\n for i in range(N):\n for r in rsec[i]:\n #Set up aperture\n z = np.sqrt(1e4**2-r**2)\n a0 = conic.primrad(pmin[i],r,z)\n a1 = conic.primrad(pmin[i]+L,r,z) \n rays = sources.annulus(a0,a1,Nshell)\n tran.transform(rays,0,0,-z,0,0,0)\n\n #Set up weights\n weights = np.repeat((a1**2-a0**2) * np.pi / 100. / Nshell,Nshell)\n\n #Trace to primary\n psi = fun[i](r)\n surf.wsPrimary(rays,r,z,psi)\n rays[4] = rays[4]+np.sin(offaxis)\n rays[6] = -np.sqrt(1.-rays[4]**2)\n tran.reflect(rays)\n ang = anal.grazeAngle(rays)\n weights = weights*\\\n pol.computeReflectivities(ang,energy,rough,1,cons)[0]\n\n #Trace to secondary\n surf.wsSecondary(rays,r,z,psi)\n tran.reflect(rays)\n ang = anal.grazeAngle(rays)\n weights = weights*\\\n pol.computeReflectivities(ang,energy,rough,1,cons)[0]\n\n #Handle vignetting\n ind = np.logical_and(rays[3]>smax[i]-L,rays[3]<smax[i])\n if sum(ind) == 0:\n pdb.set_trace()\n rays = tran.vignette(rays,ind=ind)\n weights = weights[ind]\n\n #Go to focus\n try:\n surf.flat(rays)\n except:\n pdb.set_trace()\n\n #Accumulate master rays\n try:\n mrays = [np.concatenate([mrays[ti],rays[ti]]) for ti in range(10)]\n mweights = np.concatenate([mweights,weights])\n except:\n mrays = rays\n mweights = weights\n\n return mrays,mweights", "def src_to_sympy(src):\n a_ast = src_to_ast(src, translation_unit=False)\n a = ast_to_asr(a_ast)\n py_src = call_visitor(a)\n return py_src", "def sch(self, peg):\n return self.xyz(peg.ellipsoid).sch(peg)", "def __init__(self,xsym,ax,Lmax,Mmax,lmax,parity='natural',ax2=None,psum=None,Lmin=None,Mmin=None,lsum=None): \n\n # set the defaults\n def default_if_None(val,deflt): \n if val is None: \n return deflt\n else:\n return val\n\n self.xsym=xsym\n self.ax =ax\n self.bx =default_if_None(ax2,ax)\n self.ax.show('radial axis 1')\n self.bx.show('radial axis 2')\n L0=default_if_None(Lmin,Lmax)\n M0=default_if_None(Mmin,Mmax)\n ls=default_if_None(lsum,2*lmax)+1\n ks=default_if_None(psum,self.ax.order()+self.bx.order())\n\n self.gaunt=GauntCoeffTable(2*lmax)\n self.bang=[]\n self.len=-1\n block_i0=0\n count=0\n for L in range(L0,Lmax+1):\n for M in range(M0,Mmax+1):\n for l1 in range(lmax+1):\n for l2 in range(lmax+1):\n if l1+l2>ls: continue\n if parity=='natural' and (L+l1+l2)%2==1: continue\n if parity=='unnatural' and (L+l1+l2)%2==0: continue\n if xsym!=0 and l1<l2: continue # skip exchange symmetric angular part\n self.bang.append(BasTwoAngle(L,M,l1,l2))\n ba=self.bang[-1]\n\n # generate product basis\n for e1 in self.ax.e:\n for e2 in self.bx.e:\n ba.brad.append(BasTwoRadial(e1.centrifugal(l1),e2.centrifugal(l2)))\n br=ba.brad[-1]\n for k1 in range(e1.n):\n for k2 in range(e2.n):\n count+=1\n br.k1.append(k1)\n br.k2.append(k2)\n itotal=block_i0+e1.i0+k1+self.ax.len()*(e2.i0+k2)\n# print 'block',L,M,l1,l2,itotal,block_i0\n self.len=max(self.len,itotal+1)\n br.i.append(itotal)\n block_i0=block_i0+self.ax.len()*self.bx.len() \n print 'total',self.len", "def _gromacs_str(op_name, gro_name, sys_name, job):\n if op_name == 'em' and job.statepoint()['an_forcefield'] == 'lopes_flour' and job.statepoint()['cat_forcefield'] == 'lopes':\n mdp = signac.get_project().fn('util/mdp_files/{}.mdp'.format(op_name))\n cmd = (\n 'gmx_sp grompp -f {mdp} -c init.gro -p init.top -o em.tpr && srun -n 1 mdrun_mpi_sp -deffnm em')\n else:\n mdp = signac.get_project().fn(\n 'src/util/mdp_files/{}-{}.mdp'.format(op_name, job.sp.T))\n cmd = (\n 'gmx_sp grompp -f {mdp} -c {gro}.gro -p init.top -o {op}.tpr && srun -n 1 mdrun_mpi_sp -deffnm {op}')\n return workspace_command(cmd.format(mdp=mdp, op=op_name, gro=gro_name, sys=sys_name))", "def acres_to_edge_of_square(acres):\n # sqft=acres*43560\n sqmtr=acres*4046.86\n return sqmtr", "def editMainExpression(objectName=\"\", splashDisc=\"\", oceanShader=\"\", objectNameEmitter=\"\", wakeEmitter=\"\", splashDiscEmitter=\"\"):\r\n \r\n oldMainExoression = cmds.expression(\"ocean_MainExpression\",query=True, string=True)\r\n \r\n startExpression = \"\\n////////////////////////////// Start %s Expression //////////////////////////////\\n\"%objectName\r\n \r\n newExp = \"\"\r\n newExp += \"float $%s_particleSprayRate = 3000;\\n\"%objectName\r\n newExp += \"float $%s_particleBubblesRate = 100;\\n\" %objectName\r\n newExp += \"float $%s_fluidDisplacement = 6.0;\\n\" %objectName\r\n newExp += \"float $%s_fluidFoam = 2.0;\\n\" %objectName\r\n newExp += \"float $%s_u = .I[0];\\n\" %objectName\r\n newExp += \"float $%s_v = .I[1];\\n\" %objectName\r\n newExp += \"float $%s_disp[] = `colorAtPoint -u $%s_u -v $%s_v %s`;\\n\" %(objectName,objectName,objectName,oceanShader)\r\n newExp += \"float $%s_lastY = `getAttr -time (frame - 2) %s.translateY`;\\n\" %(objectName,objectName)\r\n newExp += \"float $%s_curY = %s.translateY;\\n\" %(objectName,objectName)\r\n newExp += \"float $%s_ydiff = $%s_lastY - $%s_curY;\\n\" %(objectName,objectName,objectName)\r\n \r\n newExp += \"if( $%s_curY < 0.5 ){\\n\\t\"%objectName\r\n newExp += \"%s.rate = $%s_particleBubblesRate;\"%(objectNameEmitter,objectName)\r\n newExp += \"\\n} else {\\n\\t\"\r\n newExp += \"%s.rate = 0;\\n}\\n\"%objectNameEmitter\r\n newExp += \"if( $%s_ydiff < 0 ){\\n\\t\"%objectName\r\n newExp += \"$%s_ydiff = -$%s_ydiff;\"%(objectName,objectName)\r\n newExp += \"\\n}\\n\"\r\n newExp += \"if( $%s_curY > -1 && $%s_curY < 0.6 ){\\n\\t\"%(objectName,objectName)\r\n newExp += \"%s.fluidDensityEmission = $%s_fluidDisplacement;\\n\\t\"%(wakeEmitter,objectName)\r\n newExp += \"%s.fluidHeatEmission = $%s_fluidFoam;\"%(wakeEmitter,objectName)\r\n newExp += \"\\n} else {\\n\\t\"\r\n newExp += \"%s.fluidDensityEmission = 0;\\n\\t\"%wakeEmitter\r\n newExp += \"%s.fluidHeatEmission = 0;\"%wakeEmitter\r\n newExp += \"\\n}\\n\"\r\n newExp += \"if( $%s_curY > -1 && $%s_curY < 0.5 && $%s_ydiff > 0.05 ){\\n\\t\"%(objectName,objectName,objectName)\r\n newExp += \"%s.rate = $%s_particleSprayRate * $%s_ydiff;\\n\\t\"%(splashDiscEmitter,objectName,objectName)\r\n newExp += \"float $%s_speed = $%s_ydiff * 10;\\n\\t\"%(objectName,objectName)\r\n newExp += \"if( $%s_speed > 10 ){\\n\\t\\t\"%objectName\r\n newExp += \"$%s_speed = 10;\"%objectName\r\n newExp += \"\\n\\t}\\n\\t\"\r\n newExp += \"%s.speed = $%s_speed;\"%(splashDiscEmitter,objectName)\r\n newExp += \"\\n} else {\\n\\t\"\r\n newExp += \"%s.rate = 0;\"%splashDiscEmitter\r\n newExp += \"\\n}\\n\"\r\n newExp += \"%s.translateY = $%s_disp[0];\\n\"%(splashDisc,objectName)\r\n newExp += \"float $%s_dummy = %s.displacement;\\n\"%(objectName,oceanShader)\r\n\r\n endExpression = \"////////////////////////////// End %s Expression //////////////////////////////\\n\\n\"%objectName\r\n\r\n totalExp = oldMainExoression+startExpression+newExp+endExpression\r\n \r\n cmds.expression (\"ocean_MainExpression\",string=totalExp ,edit=True, alwaysEvaluate=1, unitConversion=\"all\")\r\n \r\n return totalExp", "def _Biot_Savart_SingleRing(ring_wzr,fieldpoint_xyz):\n\n # Jackson's approximation only works far from the RING!!\n # See RingArray.py for a better way.\n raise Exception\n x=fieldpoint_xyz[0]\n y=fieldpoint_xyz[1]\n z=fieldpoint_xyz[2]-ring_wzr[1]\n R=ring_wzr[2]\n R2=R*R\n r=sqrt(x*x+y*y+z*z)\n r2=r*r\n R2pr2=R2+r2\n #print \"z: \",z,\" r: \",r\n if z==0.0 and r==0.0:\n costheta=1.0\n sintheta=0.0\n else:\n costheta=z/float(r)\n sintheta=sqrt(1-costheta*costheta)\n sin2theta=sintheta*sintheta\n Br=(mu_0*R2*costheta/(2*pow(R2pr2,1.5)))*(1+15*R2*r2*sin2theta/(4*R2pr2*R2pr2))\n Btheta=-((mu_0*R2*sintheta)/(4*pow(R2pr2,2.5)))*(2*R2-r2+((15*R2*r2*sin2theta)/(8*R2pr2*R2pr2))*(4*R2-3*r2))\n Bphi=0\n fact=ring_wzr[0]\n #print Br\n Bx,By,Bz=xyz_from_rtp_vect([fact*Br,fact*Btheta,fact*Bphi],xyz_to_rtp_point([x,y,z]))\n return [Bx,By,Bz]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the velocity search space ranges (Vs from DWA paper)
def calculate_Vs(self): return [self.robot.min_vel, self.robot.max_vel, self.robot.min_omega, self.robot.max_omega]
[ "def calculate_Vr(self, robot_state):\n ### Calculate Velocity spaces\n Vs = self.calculate_Vs()\n Vd = self.calculate_Vd(robot_state)\n\n ### Resulting search space range\n Vr_v_min = max(Vs[0], Vd[0]) # Resulting Minimum Linear velocity Vr_v_min\n Vr_v_max = min(Vs[1], Vd[1]) # Resulting Maximum Linear velocity Vr_v_max\n Vr_omega_min = max(Vs[2], Vd[2]) # Resulting Minimum Angular velocity Vr_omega_min\n Vr_omega_max = min(Vs[3], Vd[3]) # Resulting Maximum Angular velocity Vr_omega_max \n\n # Generate search space for velocities\n Vr_v = np.arange(Vr_v_min, Vr_v_max, self.robot.v_resolution)\n Vr_omega = np.arange(Vr_omega_min, Vr_omega_max, self.robot.omega_resolution)\n\n return Vr_v, Vr_omega", "def __filterVelocity(self):\n pass\n # windowedVelocity = dict()\n # maxTimestamp = max(self.angularVelocityDict.keys())\n # for t in self.angularVelocityDict:\n # index = int(t/step)\n # if index not in windowedVelocity:\n # windowedVelocity[index] = list()\n # windowedVelocity[index].append(self.angularVelocityDict[t])\n # self.angularVelocityWindow = (step, list())\n # angVel = self.angularVelocityWindow[1]\n # for index in windowedVelocity:\n # angVel.append(\n # sum(windowedVelocity[index])/len(windowedVelocity[index]))", "def compute_ground_truth_velocity(self):\n\n # initial velocity for timestep 0 is assumed as zero\n # from the given problem statement\n self.gt_vx = [0]\n self.gt_vy = [0]\n for timestep in range(1, len(self.gt_data[0])):\n self.gt_vx.append((self.gt_data[0][timestep] - self.gt_data[0][timestep-1])/0.1)\n self.gt_vy.append((self.gt_data[1][timestep] - self.gt_data[1][timestep-1])/0.1)", "def CalcVelocity(u,v,dxpsi,dypsi,psi,dx,dy):\n\t#Calculate gradients for air velocity\n\t#forward on bottom? Backward on top?\n\n\t#dypsi[0,:] = (psi[1,:] - psi[0,:])/dy\n\t#dypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy\n\t#dypsi[1:-2,:] = (psi[2:-1,:]-psi[0:-3,:])/(2*dy) #Centered difference\n\n\t\n\t#x = 0, x = L\n#\tdxpsi[:,0]=(psi[:,1]-psi[:,-2])/(2*dx)\n\t#dxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx)\n\t#\n\t#Interior\n\t#dxpsi[:,1:-2]= (psi[:,2:-1]-psi[:,0:-3])/(2*dx) #centered difference\n\n\t\n\t#y = 0\n\tdypsi[0,:] = (psi[1,:] - psi[0,:])/dy\n\t#y = L\n\tdypsi[-1,:] = (psi[-1,:] - psi[-2,:])/dy\n\t#Interior\n\tdypsi[1:-1,:] = (psi[2:,:]-psi[0:-2,:])/(2*dy) #Centered difference\n\n\t\n\t#x = 0, x = L\n\tdxpsi[:,0]=(psi[:,1]-psi[:,-2])/(2*dx)\n\tdxpsi[:,-1]= dxpsi[:,0]#(psi[:,0]-psi[:,-2])/(2*dx)\n\t\n\t#Interior\n\tdxpsi[:,1:-1]= (psi[:,2:]-psi[:,0:-2])/(2*dx) #centered difference\n\n\tu = -dypsi\n\tv = dxpsi\n\t\n\treturn u,v", "def computeRanges(self):\n self.axesRange.clear()\n for i in range(len(self.data[0])):\n minV = maxV = self.data[0][i]\n for j in range(len(self.data)):\n if self.data[j][i] < minV:\n minV = self.data[j][i]\n elif self.data[j][i] > maxV:\n maxV = self.data[j][i]\n self.axesRange.append([minV, maxV])", "def velocity(obs0, obs1, r0, r1):\n\tsigma = G/(np.linalg.norm(r0)**3)\n\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, 0)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\tfor _ in range(4): # Iterate to get tau\n\t\ttau = r0.dot(v0)/r0.dot(r0)\n\t\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, tau)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\treturn v0", "def velocity_function_space(self):\n return self.function_spaces.sub(1), self.function_spaces.sub(4)", "def get_velocities(self):\n w_l, w_r = self.drive_train.get_velocities()\n return w_l, w_r", "def action_scaling_vecs(self):\n vel_vec = np.arange(1, self.specs['velocity_limits'][1] + 1, 1)\n\n acc_pos_vec = self.calc_acceleration_from_power(\n vel_vec, self.specs['power_limits'][1])\n acc_neg_vec = self.calc_acceleration_from_power(\n vel_vec, self.specs['power_limits'][0])\n acc_0_vec = self.calc_acceleration_from_power(vel_vec, 0)\n\n acc_pos_vec = np.min([\n acc_pos_vec,\n np.ones(len(acc_pos_vec)) * self.specs['acceleration_limits'][1]\n ],\n axis=0)\n acc_neg_vec = np.max([\n acc_neg_vec,\n np.ones(len(acc_neg_vec)) * self.specs['acceleration_limits'][0]\n ],\n axis=0)\n\n # TODO: Find better solution :)\n # This is kind of a workaround. Roman got the values for 0 from the\n # data, which seems difficult to implement here. So the added 1.0 in\n # acc_pos_vec is handcrafted.\n self.vel_vec = np.append(0, vel_vec)\n self.acc_pos_vec = np.append(1.0, acc_pos_vec)\n self.acc_neg_vec = np.append(0.0, acc_neg_vec)\n self.acc_0_vec = np.append(0.0, acc_0_vec)", "def calculate_scan_variables(self):\n # Calcualte the # of evenly spaced 2D sweeps (base movements) required\n # to match resolutions\n num_sweeps = int(round(self.settings.get_resolution()\n * self.settings.get_scan_range())) #range degree\n\n # Caclulate the number of stepper steps covering the angular range\n num_stepper_steps = self.settings.get_scan_range() * self.base.get_steps_per_deg()\n\n # Calculate number of stepper steps per move (ie: between scans)\n num_stepper_steps_per_move = int(round(num_stepper_steps / num_sweeps))\n\n # Actual angle per move (between individual 2D scans)\n angle_between_sweeps = 1.0 * num_stepper_steps_per_move / \\\n self.base.get_steps_per_deg()\n\n # Correct the num_sweeps...\n # Account for the accumulated difference due to rounding\n num_sweeps = math.floor(\n 1.0 * self.settings.get_scan_range() / angle_between_sweeps)\n # Account for gap introduced from splitting scans\n num_sweeps = num_sweeps + 2\n\n return (num_sweeps, angle_between_sweeps, num_stepper_steps_per_move)", "def evaluate_wander_velocity(self, grid_size):\n\n # Define range of velocities either side of zero\n v_values = np.linspace(-0.4, +0.4, grid_size)\n\n # Define position of L4\n r_lagrange_point = np.array([constants.R * np.sin(np.pi / 6),\n constants.R * ((constants.MASS_SUN - constants.MASS_JUPITER) / (\n constants.MASS_SUN + constants.MASS_JUPITER)) * np.cos(np.pi / 6),\n 0]) # Asteroid vector displacement from COM\n\n # Define 2D arrays X, Y about the lagrange point for the wander to be evaluated at\n X, Y = np.meshgrid(v_values, v_values)\n\n # Generate input_list to supply initial conditions to worker processes\n input_list = []\n for i in range(grid_size):\n for j in range(grid_size):\n input_list.append([X[i][j], Y[i][j], r_lagrange_point])\n\n # Split input list into sections of length n\n n = int(len(input_list) / 4)\n input_list = [input_list[i:i + n] for i in range(0, len(input_list), n)]\n\n # Define pool and map input_list to the pooled processes\n pool = Pool()\n result = pool.map(pooled_process_velocity, input_list)\n\n # 'result' is a 2D array (i, j) corresponding to coordinates X[i][j], Y[i][j]\n result = np.concatenate(result).reshape((grid_size, grid_size))\n\n # Save result to disk\n self.save_results(result, X, Y)", "def compute_parameters(self):\n diameter = np.sqrt(np.sum([(self.bounds[i][1] - self.bounds[i][0])**2 for i in range(self.n)]))\n volume = np.array([self.bounds[i][1] - self.bounds[i][0] for i in range(self.n)]).prod()\n return diameter, volume, 1.0", "def compute_velocities(self, control):\r\n\r\n # Compute epsilon for ratio between divergent and rotational\r\n # components\r\n epsilon = control['Rossby']*max(1,control['Rossby'])/max(1, control['Burger'])\r\n\r\n # Compute x- and y- derivatives of the streamfunction\r\n psi_x = self.st.calc_derivative(self.psi, 'x')\r\n psi_y = self.st.calc_derivative(self.psi, 'y')\r\n\r\n # Compute x- and y- derivatives of the potential\r\n chi_x = self.st.calc_derivative(self.chi, 'x')\r\n chi_y = self.st.calc_derivative(self.chi, 'y')\r\n\r\n # Compute velocities by Helmholtz equation\r\n self.vx = -psi_y + epsilon*chi_x\r\n self.vy = psi_x + epsilon*chi_y", "def velocity_lims(rate, N):\n dt = N / rate\n vmin = 0.5 * PLATESCALE / dt\n vmax = np.minimum(8.2 / dt, 50 * (1 - OVERLAP_RATIO) * PLATESCALE / dt)\n return vmin, vmax", "def get_range(self):\n \n # add some process noise to the system\n vel = self.vel + 5*randn()\n alt = self.alt + 10*randn()\n self.pos = self.pos + vel*self.dt\n \n # add measurment noise\n err = self.pos * 0.05*randn()\n slant_dist = sqrt(self.pos**2 + alt**2)\n \n return slant_dist + err", "def ConvectiveVelocity(self):\n\n T = self.T[:,:,75]; P = self.P[:,:,75]; rho = self.rho[:,:,75]\n uy = self.vy[:,:,75]; ux = self.vx[:,:,75]\n d = 1.0 # delta\n self.vel_conv = np.nan_to_num(np.sqrt(self.F*d*self.dTdy(P,T,rho)/T)*self.dx)\n\n xx,yy = np.meshgrid(self.y, self.x)\n fig = plt.figure('conv vel')\n ax = fig.gca(projection='3d')\n ax.plot_surface(xx, yy, rho, cmap=cm.plasma)\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m/s]')\n plt.tight_layout()\n plt.savefig('Density.png')\n\n print '------'\n print 'The convective velocity is,'\n print self.vel_conv\n print '------'\n print 'Difference between convective velocity and vertical velocity'\n print self.vel_conv - uy\n\n \"\"\"\n Mass fraction moving with convective velocity +/- 10%. For each cell, the\n mass moving up with the given velocity range needs to be summed up. This\n gives the mass fraction moving with the given velocity range.\n \"\"\"\n\n mass_y = []; mass_x = []\n for i in range(self.nx):\n for j in range(self.ny):\n if uy[i,j] >= self.vel_conv[i,j]*0.9 and uy[i,j] <= self.vel_conv[i,j]*1.1:\n mass_y.append(rho[i,j])\n if ux[i,j] >= self.vel_conv[i,j]*0.9 and ux[i,j] <= self.vel_conv[i,j]*1.1:\n mass_x.append(rho[i,j])\n\n MassFraction_y = np.sum(mass_y)/np.sum(rho)\n MassFraction_x = np.sum(mass_x)/np.sum(rho)\n print 'Fraction of mass moving with velocities v_conv +/- 10% in x direction:',MassFraction_x\n print 'Fraction of mass moving with velocities v_conv +/- 10% in y direction:',MassFraction_y\n\n print '-----------'\n return self.vel_conv", "def get_velocity( b ):\n v = []\n for i in range(1,len(b)-1):\n D2 = b[i+1] - 2.0*b[i] + b[i-1]\n D1 = (b[i+1] - b[i-1])/2.0\n D1norm2 = D1[0]**2.0 + D1[1]**2.0\n v.append( D2/D1norm2 )\n return np.array(v)", "def range_reduction_get_objects(self):\n vlist = []\n x, y = self.problem._model.x, self.problem._model.y\n if (y.ub - y.lb) > self.improved_abstol:\n vlist.append(y)\n if (x.ub - x.lb) > self.improved_abstol:\n vlist.append(x)\n return vlist", "def calculate_scan_variables(self):\n # Calcualte the # of evenly spaced 2D sweeps (base movements) required\n # to match resolutions\n num_sweeps = int(round(self.settings.get_resolution()\n * self.settings.get_scan_range()))\n\n # Caclulate the number of stepper steps covering the angular range\n num_stepper_steps = self.settings.get_scan_range() * self.base.get_steps_per_deg()\n\n # Calculate number of stepper steps per move (ie: between scans)\n num_stepper_steps_per_move = int(round(num_stepper_steps / num_sweeps))\n\n # Actual angle per move (between individual 2D scans)\n angle_between_sweeps = 1.0 * num_stepper_steps_per_move / \\\n self.base.get_steps_per_deg()\n\n # Correct the num_sweeps...\n # Account for the accumulated difference due to rounding\n num_sweeps = math.floor(\n 1.0 * self.settings.get_scan_range() / angle_between_sweeps)\n # Account for gap introduced from splitting each scan\n num_sweeps = num_sweeps + 2\n\n return (num_sweeps, angle_between_sweeps, num_stepper_steps_per_move)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the resulting velocity search space Implements EQN 16, INTERSECT(Vs, Vd). Note the admissable velocity Va is checked after iterating through the (v,omega) pairs of the search space (Vr_v, Vr_omega).
def calculate_Vr(self, robot_state): ### Calculate Velocity spaces Vs = self.calculate_Vs() Vd = self.calculate_Vd(robot_state) ### Resulting search space range Vr_v_min = max(Vs[0], Vd[0]) # Resulting Minimum Linear velocity Vr_v_min Vr_v_max = min(Vs[1], Vd[1]) # Resulting Maximum Linear velocity Vr_v_max Vr_omega_min = max(Vs[2], Vd[2]) # Resulting Minimum Angular velocity Vr_omega_min Vr_omega_max = min(Vs[3], Vd[3]) # Resulting Maximum Angular velocity Vr_omega_max # Generate search space for velocities Vr_v = np.arange(Vr_v_min, Vr_v_max, self.robot.v_resolution) Vr_omega = np.arange(Vr_omega_min, Vr_omega_max, self.robot.omega_resolution) return Vr_v, Vr_omega
[ "def veq(self):\n return self._veq / self._velocity_factor", "def get_Hv():\n \n vn = np.zeros((nx,ny+1)) \n vs = np.zeros((nx,ny+1))\n ve = np.zeros((nx,ny+1))\n vw = np.zeros((nx,ny+1))\n ue = np.zeros((nx,ny+1))\n uw = np.zeros((nx,ny+1))\n τyyn = np.zeros((nx,ny+1))\n τyys = np.zeros((nx,ny+1))\n τyxe = np.zeros((nx,ny+1))\n τyxw = np.zeros((nx,ny+1))\n Hv = np.zeros((nx,ny+1))\n \n j = np.arange(1,ny) # v-cell centers in domain interior\n \n vn[:,j] = (v[:,j+1] + v[:,j])/2\n vs[:,j] = (v[:,j] + v[:,j-1])/2\n \n i = np.arange(0,nx-1)\n ve[IJ(i,j)] = (v[IJ(i+1,j)] + v[IJ(i,j)])/2\n ve[nx-1,j] = vbc_r\n i = np.arange(1,nx)\n vw[IJ(i,j)] = (v[IJ(i,j)] + v[IJ(i-1,j)])/2\n vw[0,j] = vbc_l\n \n i = np.arange(0,nx)\n ue[IJ(i,j)] = (u[IJ(i+1,j-1)] + u[IJ(i+1,j)])/2\n uw[IJ(i,j)] = (u[IJ(i,j-1)] + u[IJ(i,j)]) /2\n \n τyyn[:,j] = -2*ν*(v[:,j+1] - v[:,j]) /Δy\n τyys[:,j] = -2*ν*(v[:,j] - v[:,j-1])/Δy\n \n i = np.arange(0,nx-1)\n τyxe[IJ(i,j)] = -ν*(v[IJ(i+1,j)]-v[IJ(i,j)])/Δx - ν*(u[IJ(i+1,j)]-u[IJ(i+1,j-1)])/Δy\n τyxe[nx-1,j] = -ν*(vbc_r-v[nx-1,j])/(Δx/2) - ν*(u[nx,j]-u[nx,j-1])/Δy \n \n i = np.arange(1,nx)\n τyxw[IJ(i,j)] = -ν*(v[IJ(i,j)]-v[IJ(i-1,j)])/Δx - ν*(u[IJ(i,j)]-u[IJ(i,j-1)])/Δy\n τyxw[0,j] = -ν*(v[0,j]-vbc_l)/(Δx/2) - ν*(u[0,j]-u[0,j-1])/Δy\n \n Hv[:,j] = -((vn[:,j]*vn[:,j] - vs[:,j]*vs[:,j])/Δy + (ve[:,j]*ue[:,j] - vw[:,j]*uw[:,j])/Δx) \\\n -((τyyn[:,j] - τyys[:,j])/Δy + (τyxe[:,j] - τyxw[:,j])/Δx)\n \n return Hv", "def intersection_segment_voxels(p0, p1, v, l_2, eps=1.0e-10):\n tmp = p1 - p0\n tmp[tmp == 0.0] = eps\n lt = (v + l_2 - p0) / tmp\n gt = (v - l_2 - p0) / tmp\n tmp = (p1 - p0) < 0\n tmp1 = lt[:, tmp].copy()\n tmp2 = gt[:, tmp].copy()\n lt[:, tmp] = tmp2\n gt[:, tmp] = tmp1\n intersection = gt.clip(min=0.0).max(1) < lt.clip(max=1.0).min(1)\n return intersection", "def calculate_Vs(self):\n return [self.robot.min_vel, self.robot.max_vel, self.robot.min_omega, self.robot.max_omega]", "def processCollisionDeltaV(self, deltav):\n return", "def svr(D):\n m = D.shape[0]\n r = ones(m-1)\n for i in xrange(m-1):\n v = svd(D[i:i+2,:],compute_uv=False)\n r[i] = v[0]/v[1]\n \n return r", "def ConvectiveVelocity(self):\n\n T = self.T[:,:,75]; P = self.P[:,:,75]; rho = self.rho[:,:,75]\n uy = self.vy[:,:,75]; ux = self.vx[:,:,75]\n d = 1.0 # delta\n self.vel_conv = np.nan_to_num(np.sqrt(self.F*d*self.dTdy(P,T,rho)/T)*self.dx)\n\n xx,yy = np.meshgrid(self.y, self.x)\n fig = plt.figure('conv vel')\n ax = fig.gca(projection='3d')\n ax.plot_surface(xx, yy, rho, cmap=cm.plasma)\n ax.set_xlabel('x [m]')\n ax.set_ylabel('y [m]')\n ax.set_zlabel('z [m/s]')\n plt.tight_layout()\n plt.savefig('Density.png')\n\n print '------'\n print 'The convective velocity is,'\n print self.vel_conv\n print '------'\n print 'Difference between convective velocity and vertical velocity'\n print self.vel_conv - uy\n\n \"\"\"\n Mass fraction moving with convective velocity +/- 10%. For each cell, the\n mass moving up with the given velocity range needs to be summed up. This\n gives the mass fraction moving with the given velocity range.\n \"\"\"\n\n mass_y = []; mass_x = []\n for i in range(self.nx):\n for j in range(self.ny):\n if uy[i,j] >= self.vel_conv[i,j]*0.9 and uy[i,j] <= self.vel_conv[i,j]*1.1:\n mass_y.append(rho[i,j])\n if ux[i,j] >= self.vel_conv[i,j]*0.9 and ux[i,j] <= self.vel_conv[i,j]*1.1:\n mass_x.append(rho[i,j])\n\n MassFraction_y = np.sum(mass_y)/np.sum(rho)\n MassFraction_x = np.sum(mass_x)/np.sum(rho)\n print 'Fraction of mass moving with velocities v_conv +/- 10% in x direction:',MassFraction_x\n print 'Fraction of mass moving with velocities v_conv +/- 10% in y direction:',MassFraction_y\n\n print '-----------'\n return self.vel_conv", "def vector_search(query, model, index, num_results=10):\n vector = model.encode(list(query))\n D, I = index.search(np.array(vector).astype(\"float32\"), k=num_results)\n return D, I", "def __equiv__(self, miller=None, csym=None,\n cdim=[1.,1.,1.], cang=[90.,90.,90.]):\n start = time.time()\n from .sym import cv\n from .sym import cubic, hexag\n #from sym_cy import cubic, hexag\n from . import sym #python compiled\n #import sym_cy #cython compiled\n #from sym.py cvec, cubic, and hexgonal modules are brought in\n if miller==None:\n print(\"Miller index should be given\")\n raise IOError\n vect = np.array(miller)\n norm = 0.; sneq = []\n temp = vect.copy()\n #norm = vect[0]**2 + vect[1]**2 + vect[2]**2\n #norm = np.sqrt(norm)\n #vect = vect/ norm\n #print 'elapsed time before v calculation: %8.6f'%\n #(time.time()-start)\n\n ##---------------------------------\n ##---------------------------------\n #start = time.time()\n if csym=='cubic':\n #H = sym_cy.cubic(1) #cython compiled\n H = sym.cubic() #operators\n for i in range(len(H)):\n sneq.append(np.dot(H[i], vect))\n pass\n pass\n elif csym=='hexag':\n #H = sym_cy.hexag(1) #cython compiled\n H = sym.hexag() #operators\n v = cv(pole=vect, cdim=cdim, cang=cang)\n for i in range(len(H)):\n sneq.append(np.dot(H[i], v))\n pass\n pass\n elif csym=='None':\n #H = [np.identity(3)]\n sneq = [vect]\n else:\n print('Given symmetry, %s is not prepared'%csym)\n input('Enter to raise an error and quits the job');\n raise IOError\n\n #print 'elapsed time during v calculation: %8.6f'%\n #(time.time()-start)\n #####-------------------------------\n #####--------------------------------\n\n start = time.time()\n stacked = [] #empty unique vectors\n # is cH in the already existing stacked list?\n # yes: pass\n # no : add\n\n ## filtering the sneq under whether or not it is unique\n for i in range(len(sneq)):\n cH = sneq[i].copy() #current vector\n if __isunique__(a=cH, b=stacked):\n stacked.append(cH)\n else: pass\n pass\n\n ## if v[2] is minus, mutiply minus sign to the vector.\n for i in range(len(stacked)):\n if stacked[i][2]<0:\n stacked[i] = stacked[i]*-1\n pass\n #print 'elapsed time during the rest: %8.6f'%\n #(time.time()-start)\n return np.array(stacked)", "def colider(v,crmax,tau,selxtra,coeff,sD) :\n \n ncell = sD.ncell \n col = 0 # Count number of collisions\n vrel = np.empty(3) # Relative velocity for collision pair\n \n #* Loop over cells, processing collisions in each cell\n for jcell in range(ncell) :\n \n #* Skip cells with only one particle\n number = sD.cell_n[jcell]\n if number > 1 : \n \n #* Determine number of candidate collision pairs \n # to be selected in this cell\n select = coeff*number*(number-1)*crmax[jcell] + selxtra[jcell]\n nsel = int(select) # Number of pairs to be selected\n selxtra[jcell] = select-nsel # Carry over any left-over fraction\n crm = crmax[jcell] # Current maximum relative speed\n \n #* Loop over total number of candidate collision pairs\n for isel in range(nsel) :\n \n #* Pick two particles at random out of this cell\n k = int( np.floor( np.random.uniform(0,number) ) )\n kk = int(np.ceil( k + np.random.uniform(0,number-1) ) % number )\n ip1 = sD.Xref[ k + sD.index[jcell] ] # First particle\n ip2 = sD.Xref[ kk + sD.index[jcell] ] # Second particle\n\n #* Calculate pair's relative speed\n cr = np.linalg.norm( v[ip1,:] - v[ip2,:] ) # Relative speed \n if cr > crm : # If relative speed larger than crm,\n crm = cr # then reset crm to larger value\n\n #* Accept or reject candidate pair according to relative speed\n if cr/crmax[jcell] > np.random.random() :\n #* If pair accepted, select post-collision velocities\n col += 1 # Collision counter\n vcm = 0.5*( v[ip1,:] + v[ip2,:] ) # Center of mass velocity\n cos_th = 1. - 2.*np.random.random() # Cosine and sine of \n sin_th = np.sqrt(1. - cos_th**2) # collision angle theta\n phi = 2*np.pi*np.random.random() # Collision angle phi\n vrel[0] = cr*cos_th # Compute post-collision \n vrel[1] = cr*sin_th*np.cos(phi) # relative velocity\n vrel[2] = cr*sin_th*np.sin(phi)\n v[ip1,:] = vcm + 0.5*vrel # Update post-collision\n v[ip2,:] = vcm - 0.5*vrel # velocities\n\n crmax[jcell] = crm # Update max relative speed \n \n return col", "def vecpot(arx,ary,arz, kf, lx=2*np.pi, ly=2*np.pi, lz=2*np.pi):\n nx,ny,nz=arx.shape\n\n # COMPUTE THE ARRAY SIZE\n kx, ky, kz, km = create_kgrid(*arx.shape, lx=lx, ly=ly, lz=lz)\n #kx, ky, kz, k2 = kx[:,nna,nna],ky[nna,:,nna],kz[nna,nna,:], km**2\n k2=km**2\n k2[nx/2,ny/2,nz/2]=1.\n\n # FOURIER TRANSFORM THE ARRAY\n farx = nf.fftshift(nf.fftn(arx))\n fary = nf.fftshift(nf.fftn(ary))\n farz = nf.fftshift(nf.fftn(arz))\n\n # SET VALUES ABOVE kf AS 0+0i\n farx = (np.sign(km - kf) - 1.)/(-2.)*farx\n fary = (np.sign(km - kf) - 1.)/(-2.)*fary\n farz = (np.sign(km - kf) - 1.)/(-2.)*farz\n\n # FIND THE CORRESPONDING VECTOR POTENTIAL A = -ik x B /k^2\n axf = -eye*(ky*farz-kz*fary)/k2\n ayf = -eye*(kz*farx-kx*farz)/k2\n azf = -eye*(kx*fary-ky*farx)/k2\n\n # BACK TRANSFORM TO REAL SPACE\n ax = np.real(nf.ifftn(nf.ifftshift(axf)))\n ay = np.real(nf.ifftn(nf.ifftshift(ayf)))\n az = np.real(nf.ifftn(nf.ifftshift(azf)))\n return ax,ay,az", "def sedov(t, E0, rho0, g, n=1000, nu=3):\n\n # the similarity variable\n v_min = 2.0 / ((nu + 2) * g)\n v_max = 4.0 / ((nu + 2) * (g + 1))\n\n v = v_min + arange(n) * (v_max - v_min) / (n - 1)\n\n a = calc_a(g, nu)\n beta = calc_beta(v, g=g, nu=nu)\n lbeta = log(beta)\n \n r = exp(-a[0] * lbeta[0] - a[2] * lbeta[1] - a[1] * lbeta[2])\n rho = ((g + 1.0) / (g - 1.0)) * exp(a[3] * lbeta[1] + a[5] * lbeta[3] + a[4] * lbeta[2])\n p = exp(nu * a[0] * lbeta[0] + (a[5] + 1) * lbeta[3] + (a[4] - 2 * a[1]) * lbeta[2])\n u = beta[0] * r * 4.0 / ((g + 1) * (nu + 2))\n p *= 8.0 / ((g + 1) * (nu + 2) * (nu + 2))\n\n # we have to take extra care at v=v_min, since this can be a special point.\n # It is not a singularity, however, the gradients of our variables (wrt v) are.\n # r -> 0, u -> 0, rho -> 0, p-> constant\n\n u[0] = 0.0; rho[0] = 0.0; r[0] = 0.0; p[0] = p[1]\n\n # volume of an n-sphere\n vol = (pi ** (nu / 2.0) / Gamma(nu / 2.0 + 1)) * power(r, nu)\n\n\n # note we choose to evaluate the integral in this way because the\n # volumes of the first few elements (i.e near v=vmin) are shrinking \n # very slowly, so we dramatically improve the error convergence by \n # finding the volumes exactly. This is most important for the\n # pressure integral, as this is on the order of the volume.\n\n # (dimensionless) energy of the model solution\n de = rho * u * u * 0.5 + p / (g - 1)\n\n # integrate (trapezium rule)\n q = inner(de[1:] + de[:-1], diff(vol)) * 0.5\n\n # the factor to convert to this particular problem\n fac = (q * (t ** nu) * rho0 / E0) ** (-1.0 / (nu + 2))\n shock_speed = fac * (2.0 / (nu + 2))\n r_s = shock_speed * t * (nu + 2) / 2.0\n\n\n r *= fac * t\n u *= fac\n p *= fac * fac * rho0\n rho *= rho0\n\n\n\n return r, p, rho, u, r_s", "def vels_from_mod(K, G, Rho):\r\n Vp = np.sqrt((K+4/3*G)/Rho)\r\n Vs = np.sqrt(G/Rho)\r\n\r\n return Vp, Vs", "def box_potential(N, v, vr=0):\n V = np.zeros(N)\n x = np.linspace(0, 1, N)\n V[(x < (2 / 3)) & (x > (1 / 3))] = v\n if vr != 0:\n V[(x > (2 / 3))] = vr\n return V", "def epv_cartesian(theta,pres,u,v,lats,deltax,deltay):\n iz, iy, ix = theta.shape\n \n dthdp, dthdy, dthdx = gradient_cartesian(theta, pres, deltax, deltay)\n dudp, dudy, dudx = gradient_cartesian(u, pres, deltax, deltay)\n dvdp, dvdy, dvdx = gradient_cartesian(v, pres, deltax, deltay)\n\n avort = np.zeros_like(theta).astype('f') \n for kk in range(0,iz): \n avort[kk,:,:] = vertical_vorticity_cartesian(u[kk,:,:].squeeze(), v[kk,:,:].squeeze(), lats, deltax, deltay, 1)\n\n epv = (-9.81*(-dvdp*dthdx - dudp*dthdy + avort*dthdp))*10**6\n\n\n return epv", "def intersection_segments_voxels_slow(p0, p1, v, l_2):\n intersection = np.zeros(v.shape[0], dtype=np.bool)\n for i in range(len(p0)):\n intersection += intersection_segment_voxels(p0[i], p1[i], v, l_2)\n\n return intersection", "def VRHavg(self):\n if self.compl: raise ValueError('Elastic tensor is compliance!')\n Cij = self.Cvoigt\n eCij= self.eCvoigt\n # Need compliances too:\n if eCij is None:\n sij = np.linalg.inv(Cij)\n else:\n complTensor = self.copy()\n complTensor.invert()\n sij = complTensor.Cvoigt\n eSij= complTensor.eCvoigt\n covSij = complTensor.vcovCvoigt\n # These equations are valid for all crystal systems (only 9 of \n # the 21 elastic constants ever have to be used, e.g. see Anderson \n # theory of the Earth, pg. 122 or the introduction to Hill, 1952).\n voigtB = (1.0/9)*(Cij[0,0] + Cij[1,1] + Cij[2,2] ) \\\n + (2.0/9)*(Cij[0,1] + Cij[0,2] + Cij[1,2])\n if eCij is not None:\n evB = np.sqrt( (1.0/81)*(eCij[0,0]**2 + eCij[1,1]**2 + eCij[2,2]**2) \\\n +(2.0/81)*(eCij[0,1]**2 + eCij[0,2]**2 + eCij[1,2]**2) )\n reussB = 1.0/((sij[0,0]+sij[1,1]+sij[2,2]) + 2*(sij[0,1]+sij[0,2]+sij[1,2]))\n if eCij is not None:\n # Note that COV(X+Z,Y) = COV(X,Y)+COV(Z,Y) and \n # COV(SUM(Xi),SUM(Yj)) = SUM(SUM(COV(Xi,Yj)\n # c.f. http://mathworld.wolfram.com/Covariance.html\n erB = (np.sqrt(eSij[0,0]**2 + eSij[1,1]**2 + eSij[2,2]**2 \\\n + 4*eSij[0,1]**2 + 4*eSij[0,2]**2 + 4*eSij[1,2]**2 \\\n + 2*covSij[0,0,1,1] + 2*covSij[0,0,2,2] + 2*covSij[1,1,2,2] \\\n + 4*covSij[0,0,0,1] + 4*covSij[0,0,0,2] + 4*covSij[0,0,1,2] \\\n + 4*covSij[1,1,0,1] + 4*covSij[1,1,0,2] + 4*covSij[1,1,1,2] \\\n + 4*covSij[2,2,0,1] + 4*covSij[2,2,0,2] + 4*covSij[2,2,1,2] \\\n + 8*covSij[0,1,0,2] + 8*covSij[0,1,1,2] + 8*covSij[0,2,1,2] )) \\\n * reussB**2\n voigtG = (1.0/15)*(Cij[0,0] + Cij[1,1] + Cij[2,2] - \\\n Cij[0,1] - Cij[0,2] - Cij[1,2]) + \\\n (1.0/5)*(Cij[3,3] + Cij[4,4] + Cij[5,5])\n if eCij is not None:\n evG = np.sqrt( (1.0/225)*(eCij[0,0]**2 + eCij[1,1]**2 + \\\n eCij[2,2]**2 + eCij[0,1]**2 + \\\n eCij[0,2]**2 + eCij[1,2]**2) + \\\n (1.0/25)*(eCij[3,3]**2 + eCij[4,4]**2 + eCij[5,5]**2) )\n reussG = 15.0/(4*(sij[0,0]+sij[1,1]+sij[2,2]) - \\\n 4*(sij[0,1]+sij[0,2]+sij[1,2]) + 3*(sij[3,3]+sij[4,4]+sij[5,5]))\n if eCij is not None:\n erG = np.sqrt( \\\n 16*(eSij[0,0]**2 + eSij[1,1]**2 + eSij[2,2]**2) \\\n + 16*(eSij[0,1]**2 + eSij[0,2]**2 + eSij[1,2]**2) \\\n + 9*(eSij[3,3]**2 + eSij[4,4]**2 + eSij[5,5]**2) \\\n + 32*covSij[0,0,1,1] + 32*covSij[0,0,2,2] + 32*covSij[1,1,2,2] \\\n + 32*covSij[0,0,0,1] + 32*covSij[0,0,0,2] + 32*covSij[0,0,1,2] \\\n + 32*covSij[1,1,0,1] + 32*covSij[1,1,0,2] + 32*covSij[1,1,1,2] \\\n + 32*covSij[2,2,0,1] + 32*covSij[2,2,0,2] + 32*covSij[2,2,1,2] \\\n + 32*covSij[0,1,0,2] + 32*covSij[0,1,1,2] + 32*covSij[0,2,1,2] \\\n + 24*covSij[0,0,3,3] + 24*covSij[0,0,4,4] + 24*covSij[0,0,5,5] \\\n + 24*covSij[1,1,3,3] + 24*covSij[1,1,4,4] + 24*covSij[1,1,5,5] \\\n + 24*covSij[2,2,3,3] + 24*covSij[2,2,4,4] + 24*covSij[2,2,5,5] \\\n + 24*covSij[0,1,3,3] + 24*covSij[0,1,4,4] + 24*covSij[0,1,5,5] \\\n + 24*covSij[0,2,3,3] + 24*covSij[0,2,4,4] + 24*covSij[0,2,5,5] \\\n + 24*covSij[1,2,3,3] + 24*covSij[1,2,4,4] + 24*covSij[1,2,5,5] \\\n + 18*covSij[3,3,4,4] + 18*covSij[3,3,5,5] + 18*covSij[4,4,5,5] \\\n ) * (reussG**2 / 15)\n if eCij is not None:\n return (voigtB, reussB, voigtG, reussG, ((voigtB+reussB)/2.0), ((voigtG+reussG)/2.0),\n evB, erB, evG, erG, ((evB+erB)/2), ((evG+erG)/2))\n else:\n return (voigtB, reussB, voigtG, reussG, ((voigtB+reussB)/2.0), ((voigtG+reussG)/2.0),\n None, None, None, None, None, None)", "def velocity(obs0, obs1, r0, r1):\n\tsigma = G/(np.linalg.norm(r0)**3)\n\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, 0)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\tfor _ in range(4): # Iterate to get tau\n\t\ttau = r0.dot(v0)/r0.dot(r0)\n\t\tv0 = (r1 - vel_f(obs1.JD, obs0.JD, sigma, tau)*r0)/vel_g(obs1.JD, obs0.JD, sigma)\n\treturn v0", "def calc_virt_critical_pt(self):\n # For brevity, \"dimensionless\" prefix omitted from \"position\" and \"motive\" variable names.\n # If the device is operating within the space charge limited mode boundary surface, we can immediately set the values and exit.\n if self.calc_interelectrode_spacing() <= self[\"motive_data\"][\"spclmbs_max_dist\"]:\n return {\"output_voltage\":self.calc_contact_potential(),\n \"output_current_density\":self[\"Emitter\"].calc_saturation_current_density()}\n \n output_current_density = optimize.brentq(self.virt_critical_point_target_function,\\\n self[\"Emitter\"].calc_saturation_current_density(),0)\n \n motive = np.log(self[\"Emitter\"].calc_saturation_current_density()/output_current_density)\n output_voltage = (self[\"Emitter\"][\"barrier\"] - self[\"Collector\"][\"barrier\"] + \\\n physical_constants[\"boltzmann\"] * self[\"Emitter\"][\"temp\"] * motive) / \\\n physical_constants[\"electron_charge\"]\n \n return {\"output_voltage\":output_voltage,\n \"output_current_density\":output_current_density}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the dynamic window approach control inputs required for path planning. Returns the best control inputsm best trajectory and the resulting trajectory found from evaluating the velocity search space.
def calc_dwa_control(self, robot_state, robot_goal, obstacles): # Best Metrics Initializer minimum_cost = np.inf # Initialize minimum cost to extremely large initially best_control_input = np.zeros(2) best_trajectory = deepcopy(robot_state) # Compute the resulting velocity search space Vr_v, Vr_omega = self.calculate_Vr(robot_state) # Trajectory set (store all possible circular trajectories for visualization) num_possible_trajectories = Vr_v.shape[0] * Vr_omega.shape[0] trajectory_set = np.zeros((0, self.n_horizon+1, robot_state.shape[0])) ### Evaluate (v,omega) pairs and searches for the best control input + trajectory x_init = deepcopy(robot_state) for v in Vr_v: for omega in Vr_omega: ### Generate predicted trajectories with (v, omega) pair from search space control_input = np.array([v, omega]) trajectory = self.generate_trajectory(x_init, control_input) ### Evaluate the paths for nearest obstacles nearest_obstacles = self.calculate_nearest_obstacles(robot_state, obstacles) ### Check if velocity is admissable if self.is_admissable(trajectory, control_input, nearest_obstacles): trajectory_set = np.vstack((trajectory_set, trajectory[None])) ### Cost calculation angle_cost = self.alpha * self.calc_goal_heuristic(trajectory, robot_goal) dist_cost = self.beta * self.calc_obs_dist_heuristic(trajectory, nearest_obstacles) vel_cost = self.gamma * self.calc_vel_heuristic(trajectory, self.robot.max_vel) # Total cost total_cost = angle_cost + dist_cost + vel_cost ### Update best costs & store the best control inputs + trajectory if minimum_cost >= total_cost: # print("[!] Best Found (v,w): ({:.3f}, {:.3f})".format(v, omega)) minimum_cost = total_cost best_control_input[:] = control_input best_trajectory = trajectory # print("[!] Best Found (v,w): ({:.3f}, {:.3f}) \tCost: {:.3f}".format(v, omega, minimum_cost)) ### Prevention of getting stuck in (v,omega) = 0 search space if (abs(best_control_input[0]) < self.stuck_space_tol and abs(robot_state[3]) < self.stuck_space_tol): print("[!] Robot stuck in 0 velocity, sending escape angular velocity to get out of region.") best_control_input[1] = self.escape_ang_vel # print("robot state: ", robot_state) # print("best_control_input: ", best_control_input) # print("minimum_cost: ", minimum_cost) # print("Vr_v: ", Vr_v) # print("Vr_omega: ", Vr_omega) return best_control_input, best_trajectory, trajectory_set
[ "def calculate_optimal_control(inputs):\n phi_init = 0\n numsteps, step_norm, ig_max, ig_min = inputs\n phi_osc_pos = phi_init\n phi_osc_neg = phi_init\n\n # define a function that integrates for step size\n def step_integrate(phi0, u_val, step):\n \"\"\" function that integrates one step forward. returns final phase,\n total shift value \"\"\"\n def dphidt(phi, t):\n return ((2*np.pi)/pmodel.T\n - u_val*prc_spl(start_time+(phi)*pmodel.T/(2*np.pi)))\n\n int_times = np.linspace(0,step,101) # in hours\n phis = integrate.odeint(dphidt, [phi0], int_times, hmax=0.01)\n return phis[-1][0], phis[-1][0]-phi0-2*np.pi/pmodel.T*step\n\n def total_shift(control_inputs, phi_init, maxmin):\n \"\"\" this is the function we maximize or minimize \"\"\"\n tot_shift = 0\n phi_i = phi_init\n for inp in control_inputs:\n new_phi, step_shift = step_integrate(phi_i, inp, step_norm)\n phi_i = new_phi\n tot_shift += step_shift\n if maxmin is 'max':\n return -tot_shift\n elif maxmin is 'min':\n return tot_shift\n\n def max_shift(us):\n return total_shift(us, phi_init, 'max')\n def min_shift(us):\n return total_shift(us, phi_init, 'min')\n\n\n # scipy optimization: multistart at either end\n max_opt1 = optimize.minimize(max_shift, # fcn to maximize\n np.hstack([ig_max,[0.00]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n max_opt2 = optimize.minimize(max_shift, # fcn to maximize\n np.hstack([ig_max,[0.06]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n max_opts = [max_opt1, max_opt2]\n max_opt = max_opts[np.argmin([max_opt1.fun, max_opt2.fun])]\n multi = False\n if max_opt1.fun != max_opt2.fun:\n multi=True\n maxopt = max_opt.x\n maxshift = -max_opt.fun\n else:\n maxopt = max_opt.x\n maxshift = -max_opt.fun\n\n\n min_opt1 = optimize.minimize(min_shift, # fcn to maximize\n np.hstack([ig_min,[0.00]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n min_opt2 = optimize.minimize(min_shift, # fcn to maximize\n np.hstack([ig_min,[0.06]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n min_opts = [min_opt1, min_opt2]\n min_opt = min_opts[np.argmin([min_opt1.fun, min_opt2.fun])]\n multi = False\n if min_opt1.fun != min_opt2.fun:\n multi=True\n minopt = min_opt.x\n minshift = min_opt.fun\n else:\n minopt = min_opt.x\n minshift = min_opt.fun\n\n return maxopt, maxshift, minopt, minshift", "def _reduce_control_problem(self):\n # Initialize which turbines to optimize for\n self.turbs_to_opt = (self.maximum_yaw_angle - self.minimum_yaw_angle >= 0.001)\n\n # Initialize subset variables as full set\n self.fi_subset = self.fi.copy()\n nwinddirections_subset = copy.deepcopy(self.fi.floris.flow_field.n_wind_directions)\n minimum_yaw_angle_subset = copy.deepcopy(self.minimum_yaw_angle)\n maximum_yaw_angle_subset = copy.deepcopy(self.maximum_yaw_angle)\n x0_subset = copy.deepcopy(self.x0)\n turbs_to_opt_subset = copy.deepcopy(self.turbs_to_opt)\n turbine_weights_subset = copy.deepcopy(self.turbine_weights)\n yaw_angles_template_subset = self._unpack_variable(0.0)\n yaw_angles_baseline_subset = copy.deepcopy(self.yaw_angles_baseline)\n\n # Define which turbines to optimize for\n if self.exclude_downstream_turbines:\n for iw, wd in enumerate(self.fi.floris.flow_field.wind_directions):\n # Remove turbines from turbs_to_opt that are downstream\n downstream_turbines = derive_downstream_turbines(self.fi, wd)\n downstream_turbines = np.array(downstream_turbines, dtype=int)\n self.turbs_to_opt[iw, 0, downstream_turbines] = False\n turbs_to_opt_subset = copy.deepcopy(self.turbs_to_opt) # Update\n\n # Reduce optimization problem through layout symmetry\n if (self.exploit_layout_symmetry) & (self._sym_df is not None):\n # Reinitialize floris with subset of wind directions\n wd_array = self.fi.floris.flow_field.wind_directions\n wind_direction_subset = wd_array[self._sym_mapping_reduce]\n self.fi_subset.reinitialize(wind_directions=wind_direction_subset)\n\n # Reduce control variables\n red_map = self._sym_mapping_reduce\n nwinddirections_subset = len(wind_direction_subset)\n minimum_yaw_angle_subset = minimum_yaw_angle_subset[red_map, :, :]\n maximum_yaw_angle_subset = maximum_yaw_angle_subset[red_map, :, :]\n x0_subset = x0_subset[red_map, :, :]\n turbs_to_opt_subset = turbs_to_opt_subset[red_map, :, :]\n turbine_weights_subset = turbine_weights_subset[red_map, :, :]\n yaw_angles_template_subset = yaw_angles_template_subset[red_map, :, :]\n yaw_angles_baseline_subset = yaw_angles_baseline_subset[red_map, :, :]\n\n # Set up a template yaw angles array with default solutions. The default\n # solutions are either 0.0 or the allowable yaw angle closest to 0.0 deg.\n # This solution addresses both downstream turbines, minimizing their abs.\n # yaw offset, and additionally fixing equality-constrained turbines to\n # their appropriate yaw angle.\n idx = (minimum_yaw_angle_subset > 0.0) | (maximum_yaw_angle_subset < 0.0)\n if np.any(idx):\n # Find bounds closest to 0.0 deg\n combined_bounds = np.concatenate(\n (\n np.expand_dims(minimum_yaw_angle_subset, axis=3),\n np.expand_dims(maximum_yaw_angle_subset, axis=3)\n ),\n axis=3\n )\n # Overwrite all values that are not allowed to be 0.0 with bound value closest to zero\n ids_closest = np.expand_dims(np.argmin(np.abs(combined_bounds), axis=3), axis=3)\n yaw_mb = np.squeeze(np.take_along_axis(combined_bounds, ids_closest, axis=3))\n yaw_angles_template_subset[idx] = yaw_mb[idx]\n\n # Save all subset variables to self\n self._nwinddirections_subset = nwinddirections_subset\n self._minimum_yaw_angle_subset = minimum_yaw_angle_subset\n self._maximum_yaw_angle_subset = maximum_yaw_angle_subset\n self._x0_subset = x0_subset\n self._turbs_to_opt_subset = turbs_to_opt_subset\n self._turbine_weights_subset = turbine_weights_subset\n self._yaw_angles_template_subset = yaw_angles_template_subset\n self._yaw_angles_baseline_subset = yaw_angles_baseline_subset", "def _compute_control_inputs(self, traj ):\n\n r = traj.u.copy() # reference is input of combined sys\n npts = traj.t.shape[0]\n u = np.zeros([npts ,self.cds.plant.m])\n\n # Compute internal input signal_proc\n for i in range(npts):\n\n ri = r[i,:]\n yi = traj.y[i,:]\n xi = traj.x[i,:]\n ti = traj.t[i]\n\n # extract internal controller states\n xi,zi = self.cds._split_states( xi ) \n\n ui = self.cds.controller.c( zi, yi , ri , ti )\n\n u[i,:] = ui\n\n return u", "def _compute_control_inputs(self, traj):\n\n r = traj.u.copy() # reference is input of combined sys\n npts = traj.t.shape[0]\n u = np.zeros([npts, self.cds.plant.m])\n\n # Compute internal input\n for i in range(npts):\n\n ri = r[i,:]\n yi = traj.y[i,:]\n ti = traj.t[i]\n\n ui = self.cds.controller.c( yi , ri , ti )\n\n u[i,:] = ui\n\n return u", "def compute_gains(Q, R, W, V, dt):\n\n data = np.empty((N,), dtype=controller_t)\n\n # Loop over all speeds for which we have system dynamics\n for i in range(N):\n data['theta_R_dot'][i] = theta_R_dot[i]\n data['dt'][i] = dt\n # Convert the bike dynamics to discrete time using a zero order hold\n data['A'][i], data['B'][i], _, _, _ = cont2discrete(\n (A_w[i], B_w[i, :], eye(4), zeros((4, 1))), dt)\n data['plant_evals_d'][i] = la.eigvals(data['A'][i])\n data['plant_evals_c'][i] = np.log(data['plant_evals_d'][i]) / dt\n \n # Bicycle measurement matrices\n # - steer angle\n # - roll rate\n data['C_m'][i] = C_w[i, :2, :]\n # - yaw rate\n data['C_z'][i] = C_w[i, 2, :]\n\n A = data['A'][i]\n B = data['B'][i, :, 2].reshape((4, 1))\n C_m = data['C_m'][i]\n C_z = data['C_z'][i]\n\n # Controllability from steer torque\n data['ctrb_plant'][i] = ctrb(A, B)\n u, s, v = la.svd(data['ctrb_plant'][i])\n assert(np.all(s > 1e-13))\n\n # Solve discrete algebraic Ricatti equation associated with LQI problem\n P_c = dare(A, B, R, Q)\n \n # Optimal feedback gain using solution of Ricatti equation\n K_c = -la.solve(R + dot(B.T, dot(P_c, B)),\n dot(B.T, dot(P_c, A)))\n data['K_c'][i] = K_c\n data['A_c'][i] = A + dot(B, K_c)\n data['B_c'][i] = B\n data['controller_evals'][i] = la.eigvals(data['A_c'][i])\n data['controller_evals_c'][i] = np.log(data['controller_evals'][i]) / dt\n assert(np.all(abs(data['controller_evals'][i]) < 1.0))\n\n # Observability from steer angle and roll rate measurement\n # Note that (A, C_m * A) must be observable in the \"current estimator\"\n # formulation\n data['obsv_plant'][i] = obsv(A, dot(C_m, A))\n u, s, v = la.svd(data['obsv_plant'][i])\n assert(np.all(s > 1e-13))\n\n # Solve Riccati equation\n P_e = dare(A.T, C_m.T, V, W)\n # Compute Kalman gain\n K_e = dot(P_e, dot(C_m.T, la.inv(dot(C_m, dot(P_e, C_m.T)) + V)))\n data['K_e'][i] = K_e\n data['A_e'][i] = dot(eye(4) - dot(K_e, C_m), A)\n data['B_e'][i] = np.hstack((dot(eye(4) - dot(K_e, C_m), B), K_e))\n data['estimator_evals'][i] = la.eigvals(data['A_e'][i])\n data['estimator_evals_c'][i] = np.log(data['estimator_evals'][i]) / dt\n # Verify that Kalman estimator eigenvalues are stable\n assert(np.all(abs(data['estimator_evals'][i]) < 1.0))\n\n # Closed loop state space equations\n A_cl = np.zeros((8, 8))\n A_cl[:4, :4] = A\n A_cl[:4, 4:] = dot(B, K_c)\n A_cl[4:, :4] = dot(K_e, dot(C_m, A))\n A_cl[4:, 4:] = A - A_cl[4:, :4] + A_cl[:4, 4:]\n data['A_cl'][i] = A_cl\n data['closed_loop_evals'][i] = la.eigvals(A_cl)\n assert(np.all(abs(data['closed_loop_evals'][i]) < 1.0))\n\n B_cl = np.zeros((8, 1))\n B_cl[:4, 0] = B.reshape((4,))\n B_cl[4:, 0] = dot(eye(4) - dot(K_e, C_m), B).reshape((4,))\n data['B_cl'][i] = B_cl\n\n C_cl = np.hstack((C_z, np.zeros((1, 4))))\n data['C_cl'][i] = C_cl\n\n # Transfer functions from r to yaw rate\n num, den = ss2tf(A_cl, B_cl, C_cl, 0)\n data['w_r_to_psi_dot'][i], y = freqz(num[0], den)\n data['w_r_to_psi_dot'][i] /= (dt * 2.0 * np.pi)\n data['mag_r_to_psi_dot'][i] = 20.0 * np.log10(abs(y))\n data['phase_r_to_psi_dot'][i] = np.unwrap(np.angle(y)) * 180.0 / np.pi\n\n # Open loop transfer function from e to yaw rate (PI loop not closed,\n # but LQR/LQG loop closed.\n inner_cl = ss(A_cl, B_cl, C_cl, 0)\n pi_block = ss([[1]], [[1]], [[data['Ki_fit'][i]*dt]], [[data['Kp_fit'][i]]])\n e_to_psi_dot = series(pi_block, inner_cl)\n num, den = ss2tf(e_to_psi_dot.A, e_to_psi_dot.B, e_to_psi_dot.C, e_to_psi_dot.D)\n data['w_e_to_psi_dot'][i], y = freqz(num[0], den)\n data['w_e_to_psi_dot'][i] /= (dt * 2.0 * np.pi)\n data['mag_e_to_psi_dot'][i] = 20.0 * np.log10(abs(y))\n data['phase_e_to_psi_dot'][i] = np.unwrap(np.angle(y)) * 180.0 / np.pi\n\n\n\n\n return data", "def optimizer(self, temperature):\n G_V = [] # G for each volume\n # G = E(V) + PV + A_vib(V, T)\n for i, v in enumerate(self.volumes):\n G_V.append(self.energies[i] +\n self.pressure * v * self.gpa_to_ev_ang +\n self.vibrational_free_energy(temperature, v))\n\n # fit equation of state, G(V, T, P)\n eos_fit = self.eos.fit(self.volumes, G_V)\n # minimize the fit eos wrt volume\n # Note: the ref energy and the ref volume(E0 and V0) not necessarily\n # the same as minimum energy and min volume.\n volume_guess = eos_fit.volumes[np.argmin(eos_fit.energies)]\n min_wrt_vol = minimize(eos_fit.func, volume_guess)\n # G_opt=G(V_opt, T, P), V_opt\n return min_wrt_vol.fun, min_wrt_vol.x[0]", "def get_dd_wind_field(Grids, u_init, v_init, w_init, vel_name=None,\n refl_field=None, u_back=None, v_back=None, z_back=None,\n frz=4500.0, Co=1.0, Cm=1500.0, Cx=0.0,\n Cy=0.0, Cz=0.0, Cb=0.0, Cv=0.0, Ut=None, Vt=None,\n filt_iterations=2, mask_outside_opt=False, \n max_iterations=200, mask_w_outside_opt=True, \n filter_window=9, filter_order=4, min_bca=30.0, \n max_bca=150.0, upper_bc=True):\n \n num_evaluations = 0\n \n if(Ut == None or Vt == None):\n if(Cv != 0.0):\n raise ValueError(('Ut and Vt cannot be None if vertical ' +\n 'vorticity constraint is enabled!'))\n \n # Disable background constraint if none provided\n if(u_back == None or v_back == None):\n u_back2 = np.zeros(u_init.shape[0])\n v_back2 = np.zeros(v_init.shape[0])\n C8 = 0.0\n else:\n # Interpolate sounding to radar grid\n print('Interpolating sounding to radar grid')\n u_interp = interp1d(z_back, u_back, bounds_error=False)\n v_interp = interp1d(z_back, v_back, bounds_error=False)\n u_back2 = u_interp(Grids[0].z['data'])\n v_back2 = v_interp(Grids[0].z['data'])\n print('Interpolated U field:')\n print(u_back2)\n print('Interpolated V field:')\n print(v_back2)\n print('Grid levels:')\n print(Grids[0].z['data'])\n\n # Parse names of velocity field\n if refl_field is None:\n refl_field = pyart.config.get_field_name('reflectivity')\n\n # Parse names of velocity field\n if vel_name is None:\n vel_name = pyart.config.get_field_name('corrected_velocity') \n winds = np.stack([u_init, v_init, w_init])\n wts = []\n vrs = []\n azs = []\n els = []\n \n # Set up wind fields and weights from each radar\n weights = np.zeros(\n (len(Grids), u_init.shape[0], u_init.shape[1], u_init.shape[2]))\n bg_weights = np.zeros(v_init.shape)\n bca = np.zeros(\n (len(Grids), len(Grids), u_init.shape[1], u_init.shape[2]))\n M = np.zeros(len(Grids))\n sum_Vr = np.zeros(len(Grids))\n\n for i in range(len(Grids)):\n wts.append(cost_functions.calculate_fall_speed(Grids[i], \n refl_field=refl_field))\n add_azimuth_as_field(Grids[i], dz_name=refl_field)\n add_elevation_as_field(Grids[i], dz_name=refl_field)\n vrs.append(Grids[i].fields[vel_name]['data'])\n azs.append(Grids[i].fields['AZ']['data']*np.pi/180)\n els.append(Grids[i].fields['EL']['data']*np.pi/180)\n \n if(len(Grids) > 1): \n for i in range(len(Grids)): \n for j in range(i+1, len(Grids)):\n print((\"Calculating weights for radars \" + str(i) +\n \" and \" + str(j)))\n bca[i,j] = get_bca(Grids[i].radar_longitude['data'],\n Grids[i].radar_latitude['data'],\n Grids[j].radar_longitude['data'],\n Grids[j].radar_latitude['data'],\n Grids[i].point_x['data'][0],\n Grids[i].point_y['data'][0],\n Grids[i].get_projparams())\n\n for k in range(vrs[i].shape[0]):\n cur_array = weights[i,k]\n cur_array[np.logical_and(\n vrs[i][k].mask == False,\n np.logical_and(\n bca[i,j] >= math.radians(min_bca), \n bca[i,j] <= math.radians(max_bca)))] += 1\n weights[i,k] = cur_array\n cur_array = weights[j,k]\n cur_array[np.logical_and(\n vrs[j][k].mask == False,\n np.logical_and(\n bca[i,j] >= math.radians(min_bca), \n bca[i,j] <= math.radians(max_bca)))] += 1\n weights[j,k] = cur_array\n cur_array = bg_weights[k]\n cur_array[np.logical_or(\n bca[i,j] >= math.radians(min_bca),\n bca[i,j] <= math.radians(max_bca))] = 1\n cur_array[vrs[i][k].mask == True] = 0\n bg_weights[i] = cur_array\n else:\n weights[0] = np.where(vrs[0].mask == False, 1, 0)\n bg_weights[0] = np.where(vrs[0].mask == False, 0, 1)\n \n weights[weights > 0] = 1 \n sum_Vr = np.sum(np.square(vrs*weights))\n\n rmsVr = np.sum(sum_Vr)/np.sum(weights)\n \n del bca\n grid_shape = u_init.shape\n # Parse names of velocity field\n\n winds = winds.flatten()\n #ones = np.ones(winds.shape)\n\n ndims = len(winds)\n\n print((\"Starting solver \"))\n dx = np.diff(Grids[0].x['data'], axis=0)[0]\n dy = np.diff(Grids[0].y['data'], axis=0)[0]\n dz = np.diff(Grids[0].z['data'], axis=0)[0]\n print('rmsVR = ' + str(rmsVr))\n print('Total points:' +str(weights.sum()))\n z = Grids[0].point_z['data']\n\n the_time = time.time()\n bt = time.time()\n \n # First pass - no filter\n wcurr = w_init\n wprev = 100*np.ones(w_init.shape)\n wprevmax = 99\n wcurrmax = w_init.max()\n iterations = 0\n warnflag = 99999\n coeff_max = np.max([Co, Cb, Cm, Cx, Cy, Cz, Cb])\n bounds = [(-x,x) for x in 100*np.ones(winds.shape)]\n while(iterations < max_iterations and \n (abs(wprevmax-wcurrmax) > 0.02)):\n wprevmax = wcurrmax\n winds = fmin_l_bfgs_b(J_function, winds, args=(vrs, azs, els, \n wts, u_back, v_back,\n Co, Cm, Cx, Cy, Cz, Cb,\n Cv, Ut, Vt,\n grid_shape, \n dx, dy, dz, z, rmsVr, \n weights, bg_weights,\n upper_bc),\n maxiter=10, pgtol=1e-3, bounds=bounds, \n fprime=grad_J, disp=1, iprint=-1)\n \n\n # Print out cost function values after 10 iterations\n J = J_function(winds[0], vrs, azs, els, wts, u_back, v_back,\n Co, Cm, Cx, Cy, Cz, Cb, Cv, Ut, Vt, grid_shape, \n dx, dy, dz, z, rmsVr, weights, bg_weights, upper_bc=True,\n print_out=True)\n J = grad_J(winds[0], vrs, azs, els, wts, u_back, v_back,\n Co, Cm, Cx, Cy, Cz, Cb, Cv, Ut, Vt, grid_shape, \n dx, dy, dz, z, rmsVr, weights, bg_weights, upper_bc=True,\n print_out=True)\n \n warnflag = winds[2]['warnflag']\n \n winds = np.reshape(winds[0], (3, grid_shape[0], grid_shape[1],\n grid_shape[2]))\n iterations = iterations+10\n print('Iterations before filter: ' + str(iterations))\n \n wcurrmax = winds[2].max()\n \n winds = np.stack([winds[0], winds[1], winds[2]])\n winds = winds.flatten()\n\n \n if(filt_iterations > 0):\n print('Applying low pass filter to wind field...')\n winds = np.reshape(winds, (3, grid_shape[0], grid_shape[1],\n grid_shape[2]))\n winds[0] = savgol_filter(winds[0], 9, 3, axis=0)\n winds[0] = savgol_filter(winds[0], 9, 3, axis=1)\n winds[0] = savgol_filter(winds[0], 9, 3, axis=2)\n winds[1] = savgol_filter(winds[1], 9, 3, axis=0)\n winds[1] = savgol_filter(winds[1], 9, 3, axis=1)\n winds[1] = savgol_filter(winds[1], 9, 3, axis=2)\n winds[2] = savgol_filter(winds[2], 9, 3, axis=0)\n winds[2] = savgol_filter(winds[2], 9, 3, axis=1)\n winds[2] = savgol_filter(winds[2], 9, 3, axis=2) \n \n winds = np.stack([winds[0], winds[1], winds[2]])\n winds = winds.flatten()\n iterations = 0\n while(iterations < filt_iterations):\n winds = fmin_l_bfgs_b(\n J_function, winds, args=(\n vrs, azs, els, wts, u_back, v_back, Co, Cm, Cx, Cy, Cz, Cb,\n Cv, Ut, Vt, grid_shape, dx, dy, dz, z, rmsVr, weights, \n bg_weights,upper_bc),\n maxiter=10, pgtol=1e-3, bounds=bounds, \n fprime=grad_J, disp=1, iprint=-1)\n\n warnflag = winds[2]['warnflag']\n \n winds = np.reshape(winds[0], (3, grid_shape[0], grid_shape[1],\n grid_shape[2]))\n iterations = iterations+1\n print('Iterations after filter: ' + str(iterations))\n \n winds = np.stack([winds[0], winds[1], winds[2]])\n winds = winds.flatten()\n \n print(\"Done! Time = \" + \"{:2.1f}\".format(time.time() - bt))\n\n # First pass - no filter\n\n the_winds = np.reshape(winds, (3, grid_shape[0], grid_shape[1],\n grid_shape[2]))\n u = the_winds[0]\n v = the_winds[1]\n w = the_winds[2]\n\n where_mask = np.sum(weights, axis=0)\n if(mask_outside_opt==True):\n u = np.ma.masked_where(where_mask < 1, u)\n v = np.ma.masked_where(where_mask < 1, v)\n w = np.ma.masked_where(where_mask < 1, w)\n if(mask_w_outside_opt==True):\n w = np.ma.masked_where(where_mask < 1, w)\n\n u_field = deepcopy(Grids[0].fields[vel_name])\n u_field['data'] = u\n u_field['standard_name'] = 'u_wind'\n u_field['long_name'] = 'meridional component of wind velocity'\n u_field['min_bca'] = min_bca\n u_field['max_bca'] = max_bca\n v_field = deepcopy(Grids[0].fields[vel_name])\n v_field['data'] = v\n v_field['standard_name'] = 'v_wind'\n v_field['long_name'] = 'zonal component of wind velocity' \n v_field['min_bca'] = min_bca\n v_field['max_bca'] = max_bca\n w_field = deepcopy(Grids[0].fields[vel_name])\n w_field['data'] = w\n w_field['standard_name'] = 'w_wind'\n w_field['long_name'] = 'vertical component of wind velocity' \n w_field['min_bca'] = min_bca\n w_field['max_bca'] = max_bca\n\n \n new_grid_list = []\n \n for grid in Grids:\n temp_grid = deepcopy(grid)\n temp_grid.add_field('u', u_field, replace_existing=True)\n temp_grid.add_field('v', v_field, replace_existing=True)\n temp_grid.add_field('w', w_field, replace_existing=True)\n \n new_grid_list.append(temp_grid)\n \n return new_grid_list", "def solve(self, time_steps: int, warm_start_method: str = mantrap.constants.WARM_START_HARD, **kwargs\n ) -> typing.Tuple[torch.Tensor, torch.Tensor]:\n ego_trajectory_opt = torch.zeros((time_steps + 1, 5))\n ado_trajectories = torch.zeros((self.env.num_ados, time_steps + 1, 1, 5))\n self.logger.log_reset()\n env_copy = self.env.copy()\n eval_env_copy = self.eval_env.copy()\n\n # Initialize trajectories with current state and environment time.\n ego_trajectory_opt[0] = self._env.ego.state_with_time\n for m_ado, ado in enumerate(self.env.ados):\n ado_trajectories[m_ado, 0, 0, :] = ado.state_with_time\n\n # Warm-start the optimization using a simplified optimization formulation.\n z_warm_start = self.warm_start(method=warm_start_method)\n\n logging.debug(f\"Starting trajectory optimization solving for planning horizon {time_steps} steps ...\")\n for k in range(time_steps):\n logging.debug(\"#\" * 30 + f\"solver {self.log_name} @k={k}: initializing optimization\")\n\n # Solve optimisation problem.\n z_k = self.optimize(z_warm_start, tag=mantrap.constants.TAG_OPTIMIZATION, **kwargs)\n ego_controls_k = self.z_to_ego_controls(z_k.detach().numpy())\n assert mantrap.utility.shaping.check_ego_controls(ego_controls_k, t_horizon=self.planning_horizon)\n\n # Warm-starting using the next optimization at time-step k+1 using the recent results k.\n # As we have proceeded one time-step, use the recent results for one-step ahead, and append\n # zero control actions to it.\n z_warm_start = self.ego_controls_to_z(torch.cat((ego_controls_k[1:, :], torch.zeros((1, 2)))))\n z_warm_start = torch.from_numpy(z_warm_start) # detached !\n\n # Forward simulate environment.\n ado_states, ego_state = self._eval_env.step(ego_action=ego_controls_k[0, :])\n self._env.step_reset(ego_next=ego_state, ado_next=ado_states)\n ego_trajectory_opt[k + 1, :] = ego_state\n ado_trajectories[:, k + 1, 0, :] = ado_states\n\n # If the goal state has been reached, break the optimization loop (and shorten trajectories to\n # contain only states up to now (i.e. k + 1 optimization steps instead of max_steps).\n if torch.norm(ego_state[0:2] - self.goal) < mantrap.constants.SOLVER_GOAL_END_DISTANCE:\n ego_trajectory_opt = ego_trajectory_opt[:k + 2, :].detach()\n ado_trajectories = ado_trajectories[:, :k + 2, :, :].detach()\n\n # Log a last time in order to log the final state, after the environment has executed it\n # its update step. However since the controls have not changed, but still the planned\n # trajectories should all have the same shape, the concatenate no action (zero controls).\n if self.logger.is_logging:\n ego_controls = torch.cat((ego_controls_k[1:, :], torch.zeros((1, 2))))\n ego_trajectory = self.env.ego.unroll_trajectory(ego_controls, dt=self.env.dt)\n self.__intermediate_log(ego_trajectory=ego_trajectory)\n break\n\n # Increment solver iteration.\n self.logger.increment()\n\n # Update the logging with the actual ego and ado trajectories (currently only samples).\n actual_dict = {f\"{mantrap.constants.LT_EGO}_actual\": ego_trajectory_opt,\n f\"{mantrap.constants.LT_ADO}_actual\": ado_trajectories}\n self.logger.log_append(**actual_dict, tag=mantrap.constants.TAG_OPTIMIZATION)\n\n # Cleaning up solver environment and summarizing logging.\n logging.debug(f\"solver {self.log_name}: logging trajectory optimization\")\n self.env.detach() # detach environment from computation graph\n self.logger.log_store(csv_name=f\"{self.log_name}.{self.env.log_name}\")\n\n # Reset environment to initial state. Some modules are also connected to the old environment,\n # which has been forward predicted now. Reset these to the original environment copy.\n self._env = env_copy\n self._eval_env = eval_env_copy\n for module in self.modules:\n module.reset_env(env=self.env)\n\n logging.debug(f\"solver {self.log_name}: finishing up optimization process\")\n return ego_trajectory_opt, ado_trajectories", "def geometry_optimization(self):\n input = self.sample_qe_inputs\n input[\"control_params\"][\"calculation\"] = \"'vc-relax'\"\n return input", "def InputVariables(parameters_dict, n_option = \"random\", nmin = 0.1, nmax = 0.2, m = 0.03):\n nx, ny = parameters_dict['nx'], parameters_dict['ny'] #retrieve grid size\n dx = parameters_dict['dx']\n\n # set cell initial distribution based on function input\n while n_option not in ['uniform', 'random', 'linear', 'sinusoidal']:\n print(\"Invalid initial cell distribution choice made (can be 'uniform', 'random', 'linear' or 'sinusoidal')\")\n exit()\n\n if n_option in ['uniform']: #selects uniform distribution n = nmin \n n = nmin * np.ones((nx, ny))\n\n if n_option in ['random']: #selects distribution with random fluctuations between cmin and cmax\n np.random.seed(42)\n n = nmin + ((nmax - nmin) * np.random.rand(nx, ny))\n \n if n_option in ['linear']: #selects linear distribution between cmin and cmax\n n = np.zeros((nx, ny))\n for i in range(ny):\n n[i, :] = nmin + ((nmax - nmin) / (ny-1)) * (i)\n \n if n_option in ['sinusoidal']:\n n = (nmin + ((nmax - nmin) / 2)) * np.ones((nx, ny))\n for i in range(ny):\n n[i, :] += ((nmax - nmin) / 2) * np.sin(20 * np.pi * i * dx)\n\n # amount of free volume\n phi = 1 - m\n\n # water volume fraction dependent on cell distribution via no voids constraint (n + w + m = 1)\n w = phi - n \n\n # water velocity \n uw = np.zeros((nx, ny))\n vw = np.zeros((nx, ny))\n\n # create variables dictionary\n\n # update parameters dictionary \n parameters_dict[\"phi\"] = phi\n parameters_dict[\"m\"] = m \n\n return n, w, uw, vw, parameters_dict", "def solve(self):\n \n # States used in value iteration\n states = self.cells\n state_container = {cell.get_name():cell for cell in states}\n \n # Values for the states\n value = {cell.get_name():0 for cell in states}\n value_new = {cell.get_name():value[cell.get_name()] for cell in states}\n \n # Current policy\n policy = {cell.get_name():\"\" for cell in states}\n \n # Maximum number of iterations\n N_max = 10000 # Should be sufficient for pretty much all scenarios\n \n # Value iteration\n for i in range(N_max):\n \n # Iterate each state\n for state in states:\n \n # Possible actions from current state\n actions = [\"north\",\"east\",\"south\",\"west\"]\n \n # Find best action for current state\n max_action = actions[0]\n max_value = -10000\n for action in actions:\n \n # The possible actual outcomes from current action\n # First one is always the correct outcome\n possible_outcomes = self.get_possible_outcomes(action)\n probs = [0.8, 0.1, 0.1]\n \n # Calculate the value of current action in current state\n current_value = 0\n \n # Three possible outcomes from current action\n for j in range(3):\n outcome = possible_outcomes[j]\n \n # Teleportation\n if state.get_name() == \"43\":\n next_cell = state_container[\"11\"]\n \n # Get next state if outcome is legit move\n elif outcome in state.get_moves():\n x_new = state.get_x() + self.MOVES[outcome][0]\n y_new = state.get_y() + self.MOVES[outcome][1]\n next_cell = state_container[str(x_new)+str(y_new)]\n \n # Remain in the current state if outcome is not legit\n else:\n next_cell = state\n \n # Update value of current action\n current_value += probs[j]* \\\n (self.get_immediate_reward(next_cell) + \\\n self.gamma*value[next_cell.get_name()])\n \n # If the value of current action is best so far,\n # update value function and policy\n if (current_value > max_value):\n max_action = action\n max_value = current_value\n \n # Store value and policy for this iteration in their dicts\n value_new[state.get_name()] = max_value\n policy[state.get_name()] = max_action\n \n # Check for convergence using the given threshold\n terminate = True\n for state in states:\n state_name = state.get_name()\n diff = value_new[state_name] - value[state_name]\n if diff > self.epsilon*(1-self.gamma)/(2*self.gamma) or \\\n diff < -self.epsilon*(1-self.gamma)/(2*self.gamma):\n terminate = False\n \n if terminate:\n if self.conv:\n print(\"Value iteration converged after \"+str(i), \\\n \" iterations.\")\n return [value_new, policy]\n else:\n value = {cell.get_name():value_new[cell.get_name()] \\\n for cell in states}", "def _solve(self):\n data = {}\n for n in xrange(NUM_SAMPLES):\n # accumulate and solve system of equations\n # component equations\n equations = reduce(list.__add__, (component.equations() for component in\n self.components), [])\n # one KCL equation per node in the circuit (excluding ground node)\n KCL = {}\n for component in self.components:\n component.KCL_update(KCL)\n equations.extend([KCL[node] for node in KCL if node is not self.gnd])\n # assert that ground voltage is 0\n equations.append([(1, self.gnd)])\n # solve system of equations\n data[n * T] = solve_equations(equations)\n # step components, providing them the solution for the current time step\n for component in self.components:\n component.step(data[n * T])\n return data", "def get_control_input(self):\n # Get outputs\n neighbor_outputs = self.__simulator.get_neighbor_outputs(self.__master_agent)\n master_output = self.__master_agent.send_output()\n\n # Calculate control input\n sines = [np.sin(neighbor_output - master_output) for neighbor_output in neighbor_outputs]\n control_input = np.sum(np.array(sines)) + self.__nat_freq\n control_input *= self.__simulator.get_dt()\n return control_input", "def loop_analysis(g, ctrl_src, tree, types={}):\n eqs = []\n vars = []\n\n # loop equations of the cobranch voltages\n cobranches = g.branches() - tree.branches()\n for cobranch in cobranches:\n if btype(cobranch, types) not in 'IFG':\n lpos, lneg = g.loop(cobranch, tree, include_cobranch=True)\n lhs_pos = [Calculus(f_u(b, ctrl_src, types)) for b in lpos]\n lhs_neg = [Calculus(f_u(b, ctrl_src, types)) for b in lneg]\n lhs = Add(*lhs_pos) - Add(*lhs_neg)\n eqs.append(lhs)\n if btype(cobranch, types) not in 'U':\n vars.append(Calculus('I_'+cobranch))\n if btype(cobranch, types) in 'N':\n vars.append(Calculus('V_'+cobranch))\n\n # cut equations of the tree currents (for substitution or additional)\n tcur = {}\n for tb in tree.branches():\n bpos, bneg = g.cut(tb, tree, include_tree_branch=False)\n # moving (bpos, bneg) from lhs to rhs by negation\n rhs_pos = [Calculus(branch_current(b, ctrl_src, types)) for b in bneg]\n rhs_neg = [Calculus(branch_current(b, ctrl_src, types)) for b in bpos]\n rhs = Add(*rhs_pos) - Add(*rhs_neg)\n if btype(tb, types) not in 'IFGU':\n tcur[Calculus('I_'+tb)] = rhs.expand()\n if btype(tb, types) in 'N':\n vars.append(Calculus('V_'+tb))\n else:\n eqs.append(Calculus(f_i(tb, ctrl_src, types)) - rhs)\n if btype(tb, types) not in 'U':\n vars.append(Calculus('V_'+tb))\n\n # finally add variables and equations of controlled sources\n for src, ctrl in ctrl_src.items():\n if btype(src, types) in 'EG' and btype(ctrl, types) != 'V':\n v_ctrl = Calculus('V_'+ctrl)\n if v_ctrl not in vars: # ctrl is no 'IFG' in tree\n # control voltage is unknown\n vars.append(v_ctrl)\n # try if control voltage can be substituted\n # then control branch is a (controlled) voltage source\n lhs = v_ctrl - f_u(ctrl, ctrl_src, types)\n if lhs == 0:\n # ctrl is a current source\n if ctrl in cobranches:\n # add missing loop equation for v_ctrl which\n # was omitted due to: btype(cobranch, types) not in 'IFG'\n lpos, lneg = g.loop(ctrl, tree, include_cobranch=False)\n lhs_pos = [Calculus(f_u(b, ctrl_src, types)) for b in lpos]\n lhs_neg = [Calculus(f_u(b, ctrl_src, types)) for b in lneg]\n lhs = v_ctrl + Add(*lhs_pos) - Add(*lhs_neg)\n # if v_ctrl is in tree then var and cut equation are\n # already added because ctrl is a current source\n eqs.append(lhs)\n elif btype(src, types) in 'F' and src in cobranches:\n if ctrl in tree.branches() and btype(ctrl, types) not in 'IFG':\n i_ctrl = Calculus('I_'+ctrl)\n # just a test avoiding duplication\n if i_ctrl not in vars:\n # control current is unknown\n vars.append(i_ctrl)\n # remove cut equation for ctrl from tcur and add it to eqs\n eqs.append(i_ctrl - tcur.pop(i_ctrl))\n\n # substitute tree currents by cobranch currents\n eqs = [e.subs(tcur).expand() for e in eqs]\n return eqs, vars", "def compute_inputs(end_vc, end_gr):\n vc = end_vc\n gr = end_gr\n\n # compute initial required inputs, traversing backwards\n while True:\n gr = input_range(vc, vc, gr)\n vc.input_gr = gr\n if vc.parent is None:\n break\n vc = vc.parent\n\n # compute actual inputs, arising from feeding first required input\n # forward\n while True:\n gr = output_range(vc, vc, vc.input_gr)\n if vc.child is None:\n break\n vc = vc.child\n if vc.do_trim_input:\n vc.input_trim = [vc.input_gr.sub[0] - gr.sub[0], gr.sub[1] - vc.input_gr.sub[1]]\n else:\n vc.input_gr = gr\n return gr", "def get_control_step(self,desired_u_step,ku,thetao,theta_goal,disc_len):\n delta_theta = np.abs(thetao-theta_goal)\n curvature_max = ku\n #Calculate the average arc length if we were to drill with full curvature\n #and half curvature capabilities\n avg_arc_length = 3/2*delta_theta/curvature_max*100\n u_solver_step = int(desired_u_step*disc_len/avg_arc_length)\n u_solver_step = max(u_solver_step,1)\n return u_solver_step", "def solve(self, maxiter=100, init_xs=None, init_us=None, isFeasible=False, regInit=None):\n \n # if not accounting for gaps rollout initial trajectory \n if not self.withGaps:\n init_xs = self.problem.rollout(init_us)\n isFeasible = True\n \n # set solver.xs and solver.us and solver.isFeasible \n self.setCandidate(init_xs, init_us, isFeasible)\n \n self.n_little_improvement = 0\n # set regularization values \n self.x_reg = regInit if regInit is not None else self.regMin\n self.u_reg = regInit if regInit is not None else self.regMin\n\n for i in range(maxiter):\n recalc = True # flag to recalculate dynamics & derivatives \n # backward pass and regularize \n backwardFlag = self.computeDirection(recalc=recalc)\n\n if not backwardFlag:\n # if backward pass fails after all regularization\n print(' Failed to compute backward pass at maximum regularization '.center(LINE_WIDTH,'#')) \n return self.xs, self.us, False\n\n \n for a in self.alphas:\n try:\n self.dV = self.tryStep(a)\n except:\n print('Try step failed ')\n continue\n\n if self.dV > 0.:\n # Accept step\n #TODO: find a better criteria to accept the step \n self.setCandidate(self.xs_try, self.us_try, self.isFeasible)\n self.cost = self.cost_try\n break\n # else:\n # self.n_little_improvement += 1 \n if a > self.th_step:\n self.decreaseRegularization()\n if a == self.alphas[-1] :\n self.n_little_improvement += 1 \n self.increaseRegularization()\n if self.x_reg == self.regMax:\n return self.xs, self.us, False\n # else:\n # self.n_little_improvement = 0\n self.stepLength = a\n self.iter = i\n self.stop = sum(self.stoppingCriteria())\n if self.callback is not None:\n [c(self) for c in self.callback]\n\n if self.stop < self.th_stop:\n print('Feedforward Norm %s, Solver converged '%self.stop)\n return self.xs, self.us, True\n \n if self.n_little_improvement == 10:\n print(' solver converged with little improvements in the last 6 steps ')\n return self.xs, self.us, self.isFeasible\n # Warning: no convergence in max iterations\n print('max iterations with no convergance')\n return self.xs, self.us, self.isFeasible", "def evaluation_step(self):\n current_step = self.n\n # first ode: d beta(t) = (beta0(t) + beta1(t)beta(t))dt\n beta0 = [-(self.b_f + self.c_f*self.p1_grid[current_step-1][t]**2) for t in range(len(self.time))]\n beta1 = [-(2*self.b + 2*self.c*self.p1_grid[current_step-1][t]) for t in range(len(self.time))]\n if self.solver=='Euler':\n self.beta.append(self._solve_ode_euler(beta0, beta1, self.gamma)) # beta is a funcation lambda\n else:\n self.beta.append(self._solve_ode_explicit(beta0, beta1, self.gamma)) # beta is a funcation lambda\n \n # second ode: d delta(t) = (delta0(t) + delta1(t)delta(t))dt\n delta0 = [-(2*self.c_f * self.p1_grid[current_step-1][t] * self.p2_grid[current_step-1][t] + 2*self.c*self.beta[current_step-1][t]*self.p2_grid[current_step-1][t]) for t in range(len(self.time))]\n delta1 = [-(self.b + self.c*self.p1_grid[current_step-1][t]) for t in range(len(self.time))]\n if self.solver == 'Euler':\n self.delta.append(self._solve_ode_euler(delta0, delta1, 0)) # delta is a function lambda\n else:\n self.delta.append(self._solve_ode_explicit(delta0, delta1, 0)) # delta is a function lambda\n \n # third ode: d phi = (phi0(t) + phi1(t)phi(t))dt\n phi0 = [-(self.sigma**2*self.beta[current_step-1][t] + self.c_f*self.p2_grid[current_step-1][t]**2 + self.c*self.delta[current_step-1][t]*self.p2_grid[current_step-1][t]) for t in range(len(self.time))]\n phi1 = [0]*len(self.time)\n if self.solver == 'Euler':\n self.phi.append(self._solve_ode_euler(phi0, phi1, 0)) # phi is a function lambda`A\n else:\n self.phi.append(self._solve_ode_explicit(phi0, phi1, 0)) # phi is a function lambda`A\n \n \n # we update p1 and p2:\n p1_new = np.array([-self.c/(2*self.c_f)*2*self.beta[current_step-1][t] for t in range(len(self.time))])\n p2_new = np.array([-self.c/(2*self.c_f)*self.delta[current_step-1][t] for t in range(len(self.time))])\n self.p1_grid.append(p1_new)\n self.p2_grid.append(p2_new)\n self.n += 1", "def response_to_control_inputs(veh, veh_next, dt):\n # variable lookup\n kappa = veh.kappa\n kappa_next = veh_next.kappa\n v = veh.v\n v_next = veh_next.v\n\n # compute curvature rate command\n kdot = (kappa_next - kappa)/dt\n\n # check against upper/lower bound on curvature rate\n kdot = min(kdot, DKMAX)\n kdot = max(kdot, DKMIN)\n\n # call speed control logic for safe speed\n veh_next = speed_control_logic(veh_next)\n\n # compute curvature at the next vehicle state\n kappa_next = kappa + kdot*dt\n\n # check upper/lower bound on curvature\n kappa_next = min(kappa_next, KMAX)\n kappa_next = max(kappa_next, KMIN)\n\n # compute acceleration command\n vdot = (v_next - v)/dt\n\n # check upper/lower bound on acceleration\n vdot = min(vdot, DVMAX)\n vdot = max(vdot, DVMIN)\n\n # compute velocity at next state\n veh_next.v = v + vdot*dt\n\n return veh_next" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the nearest obstacles w.r.t to the robot current state and obstacle position
def calculate_nearest_obstacles(self, state, obstacles): robot_pos = state[0:2] nearest_obstacles_ind = np.where( np.linalg.norm(robot_pos - obstacles, axis=1) < self.obstacle_dist_tol) nearest_obstacles = obstacles[nearest_obstacles_ind] return nearest_obstacles
[ "def closest_obstacles(queen, obstacles):\n closest = [(0,0)]*8\n for obstacle in obstacles:\n t = obstacle_type(queen, obstacle)\n if t > -1 and closest[t] == (0,0):\n closest[t] = obstacle\n elif t > -1 and dist(queen,obstacle) < dist(queen,closest[t]):\n closest[t] = obstacle\n return closest", "def _get_coord_state(self):\n\n # Get the player coords\n blue_x, blue_y = self.blue_ball.position()\n red_x, red_y = self.red_ball.position()\n\n coords = [blue_x, blue_y, red_x, red_y]\n\n # Get the closest obstacle\n current_obstacle_set = self.obstacle_manager.oldest_obstacle_set()\n\n obs_1 = current_obstacle_set[0]\n left, right = obs_1.x_span()\n top, bottom = obs_1.get_top(), obs_1.get_bottom()\n obs_1_coords = [top, bottom, left, right]\n\n if len(current_obstacle_set) == 1:\n obs_2_coords = [0, 0, 0, 0]\n else:\n obs_2 = current_obstacle_set[1]\n left, right = obs_2.x_span()\n top, bottom = obs_2.get_top(), obs_2.get_bottom()\n obs_2_coords = [top, bottom, left, right]\n\n coords = coords + obs_1_coords + obs_2_coords\n\n \"\"\"\n # Get the second closest obstacle\n obstacles = self.obstacle_manager.get_obstacles()\n if len(obstacles) < 2:\n coords = coords + [0, 0, 0, 0, 0, 0, 0, 0]\n else:\n next_obstacle_set = obstacles[1]\n\n obs_1 = current_obstacle_set[0]\n left, right = obs_1.x_span()\n top, bottom = obs_1.get_top(), obs_1.get_bottom()\n obs_1_coords = [top, bottom, left, right]\n\n if len(next_obstacle_set) == 1:\n obs_2_coords = [0, 0, 0, 0]\n else:\n obs_2 = next_obstacle_set[1]\n left, right = obs_2.x_span()\n top, bottom = obs_2.get_top(), obs_2.get_bottom()\n obs_2_coords = [top, bottom, left, right]\n\n coords = coords + obs_1_coords + obs_2_coords\n\n \"\"\"\n coords = np.array(coords)\n\n return coords", "def calc_obs_dist_heuristic(self, trajectory, nearest_obstacles):\n trajectory_positions = trajectory[:, 0:2]\n obs_x = nearest_obstacles[:,0]\n obs_y = nearest_obstacles[:,1]\n\n dx = trajectory_positions[:, 0] - obs_x[:, None]\n dy = trajectory_positions[:, 1] - obs_y[:, None]\n euclidean_dist = np.hypot(dx, dy)\n\n if np.array(euclidean_dist <= self.robot.robot_radius + 0.2).any():\n return np.inf\n elif euclidean_dist.size == 0:\n # No nearest osbtacle therefore return cost of 0\n return 0.0\n else:\n # Return the inverse since we are trying to maximize the objective func for obstacles\n # Smaller the dist the higher the robot's desire to move around it\n min_dist = np.min(euclidean_dist)\n return 1.0 / min_dist", "def scan_obstacles(self, positions):\n # Calculate potential generated by obstacles\n mindistance = SCREEN_WIDTH\n v = pygame.math.Vector2(0,0) \n for position in positions:\n distance = (position - self.location).magnitude()\n if (self.mode == TRAINING and 0 < distance) or (self.mode == EVALUATION and 0 < distance < OBSERVABLE_RADIUS):\n if self.location[0] < position[0] and distance < mindistance:\n mindistance = distance\n # Get normalized direction of neighbor \n direction = position - self.location \n # Proporcional to the distance. The closer the stronger needs to be\n #direction = direction / sqrt(distance) \n v = direction\n # This gives the direction of the resulting potential \n self.obstacles = v.copy()", "def calc_dwa_control(self, robot_state, robot_goal, obstacles): \n # Best Metrics Initializer\n minimum_cost = np.inf # Initialize minimum cost to extremely large initially\n best_control_input = np.zeros(2) \n best_trajectory = deepcopy(robot_state)\n\n # Compute the resulting velocity search space\n Vr_v, Vr_omega = self.calculate_Vr(robot_state)\n\n # Trajectory set (store all possible circular trajectories for visualization)\n num_possible_trajectories = Vr_v.shape[0] * Vr_omega.shape[0]\n trajectory_set = np.zeros((0, self.n_horizon+1, robot_state.shape[0]))\n\n ### Evaluate (v,omega) pairs and searches for the best control input + trajectory\n x_init = deepcopy(robot_state)\n for v in Vr_v:\n for omega in Vr_omega:\n ### Generate predicted trajectories with (v, omega) pair from search space\n control_input = np.array([v, omega])\n trajectory = self.generate_trajectory(x_init, control_input)\n\n ### Evaluate the paths for nearest obstacles\n nearest_obstacles = self.calculate_nearest_obstacles(robot_state, obstacles)\n\n ### Check if velocity is admissable\n if self.is_admissable(trajectory, control_input, nearest_obstacles):\n trajectory_set = np.vstack((trajectory_set, trajectory[None]))\n\n ### Cost calculation\n angle_cost = self.alpha * self.calc_goal_heuristic(trajectory, robot_goal)\n dist_cost = self.beta * self.calc_obs_dist_heuristic(trajectory, nearest_obstacles)\n vel_cost = self.gamma * self.calc_vel_heuristic(trajectory, self.robot.max_vel)\n \n # Total cost\n total_cost = angle_cost + dist_cost + vel_cost\n\n ### Update best costs & store the best control inputs + trajectory\n if minimum_cost >= total_cost:\n # print(\"[!] Best Found (v,w): ({:.3f}, {:.3f})\".format(v, omega))\n minimum_cost = total_cost\n best_control_input[:] = control_input\n best_trajectory = trajectory\n\n # print(\"[!] Best Found (v,w): ({:.3f}, {:.3f}) \\tCost: {:.3f}\".format(v, omega, minimum_cost))\n\n ### Prevention of getting stuck in (v,omega) = 0 search space\n if (abs(best_control_input[0]) < self.stuck_space_tol and abs(robot_state[3]) < self.stuck_space_tol):\n print(\"[!] Robot stuck in 0 velocity, sending escape angular velocity to get out of region.\")\n best_control_input[1] = self.escape_ang_vel\n\n # print(\"robot state: \", robot_state)\n # print(\"best_control_input: \", best_control_input)\n # print(\"minimum_cost: \", minimum_cost)\n # print(\"Vr_v: \", Vr_v)\n # print(\"Vr_omega: \", Vr_omega)\n\n return best_control_input, best_trajectory, trajectory_set", "def findPathToClosestDot(self, gameState):\n # Here are some useful elements of the startState\n startPosition = gameState.getPacmanPosition(self.index)\n food = gameState.getFood()\n walls = gameState.getWalls()\n problem = AnyFoodSearchProblem(gameState, self.index)\n\n\n \"*** YOUR CODE HERE ***\"\n\n pacmanCurrent = [problem.getStartState(), [], 0]\n visitedPosition = set()\n # visitedPosition.add(problem.getStartState())\n fringe = util.PriorityQueue()\n fringe.push(pacmanCurrent, pacmanCurrent[2])\n while not fringe.isEmpty():\n pacmanCurrent = fringe.pop()\n if pacmanCurrent[0] in visitedPosition:\n continue\n else:\n visitedPosition.add(pacmanCurrent[0])\n if problem.isGoalState(pacmanCurrent[0]):\n return pacmanCurrent[1]\n else:\n pacmanSuccessors = problem.getSuccessors(pacmanCurrent[0])\n Successor = []\n for item in pacmanSuccessors: # item: [(x,y), 'direction', cost]\n if item[0] not in visitedPosition:\n pacmanRoute = pacmanCurrent[1].copy()\n pacmanRoute.append(item[1])\n sumCost = pacmanCurrent[2]\n Successor.append([item[0], pacmanRoute, sumCost + item[2]])\n for item in Successor:\n fringe.push(item, item[2])\n return pacmanCurrent[1]", "def naive_nearest_neighboor(set_rectangle, distance_used = distance):\n n = len(set_rectangle)\n \n if len(set_rectangle) == 1:\n return None\n \n #initialisation\n min_dist = distance(set_rectangle[0], set_rectangle[1])\n nearest_neighboor = (set_rectangle[0], set_rectangle[1])\n \n #testing every combination\n t1 = clock()\n for i in range(n):\n t2 = clock()\n for j in range(i+1,n):\n t3 = clock()\n dist = distance_used(set_rectangle[i], set_rectangle[j])\n if dist < min_dist:\n nearest_neighboor = (set_rectangle[i], set_rectangle[j])\n min_dist = dist\n t4 = clock()\n #print(\"calcul dans double boucle \", t4 - t3)\n t4 = clock()\n #print(\"calcul 1 ere boucle\", t4 - t2)\n t4 = clock()\n #print('calcul double boucle', t1 - t4)\n return nearest_neighboor", "def walk_and_avoid(self):\n\n # FIXME CIVILIAN move choosing does not consider criminals they can see more than one space away\n # doesn't consider moving towards a criminal they can see\n\n goal = self.current_route_goal()\n #print(\"goal: \" + str(goal))\n neighbourhoods = self.environment.grid.get_neighborhood(self.pos, moore=False, include_center=False, radius = 1)\n neighbors = self.environment.grid.get_neighbors(self.pos, moore=True, radius=self.vision, include_center=True)\n\n #Find nearby criminals for current location in the civilian's memory\n criminals_nearby = [crim for crim in filter(lambda agent: agent in self.memory, neighbors)]\n # if (len(criminals_nearby)):\n # print(\"criminals_nearby: \" + str(criminals_nearby))\n\n # The civilian should avoid buildings but should not avoid his home or workplace\n if len(criminals_nearby) == 0:\n #print(\"No criminals nearby!\")\n if not self.walk_across_buildings:\n next_moves = [cell for cell in filter(lambda x: (self.environment.can_agent_occupy_cell(x) or x == goal) and x != self.last_pos, neighbourhoods)]\n else:\n next_moves = [cell for cell in filter(lambda x: x != self.last_pos, neighbourhoods)]\n else:\n if not self.walk_across_buildings:\n next_moves = [cell for cell in filter(lambda x: self.environment.can_agent_occupy_cell(x) or x == goal, neighbourhoods)]\n else:\n next_moves = neighbourhoods\n\n if len(next_moves) == 0:\n next_moves.append(self.last_pos)\n\n random.shuffle(next_moves)\n\n # print(\"last_pos: \" + str(self.last_pos))\n self.last_pos = (self.pos[0], self.pos[1])\n\n\n\n # print(\"next_moves: \" + str(next_moves))\n\n\n\n #Make list of points nearby, calculate triangle inequality for ways to maximize min distance\n #from a criminal while walking route\n\n #if not criminals_nearby:\n #If not criminals nearby, then civilian walks to goal as usual\n # self.walk_route()\n # return True\n #else:\n #Calculate distance between nearby cells and the goal of the civilians route, choose the one that\n #maximizes distance from criminal and minimizes distance to goal\n\n #distance_from_cells_to_goal = [distance.euclidean(self.current_route_goal(), cell) for cell in next_moves]\n ###distance_from_self_to_criminal = [distance.euclidean(self.pos, criminal) for criminal in criminals_nearby]\n ###closest_criminal = next_moves[distance_from_self_to_criminal.index(min(distance_from_self_to_criminal))]\n #points_away_from_criminal = [distance.euclidean(cell, closest_criminal) for cell in next_moves]\n\n ###dist_between_criminal_and_goal = [distance.euclidean(distance.euclidean(cell, closest_criminal), distance.euclidean(cell, self.current_route_goal()))\n ###for cell in next_moves]\n ###cell_to_walk_to = next_moves[next_moves.index(max(dist_between_criminal_and_goal))]\n ###print(\"debug: \" + next_moves.index(max(dist_between_criminal_and_goal)))\n ###print(\"cell_to_walk_to: \" + str(cell_to_walk_to))\n\n ###self._walk_to(cell_to_walk_to)\n\n\n best_utility = -math.inf\n best_cell = (0, 0)\n\n # get the best utility based on larger distance from cell to criminals and shorter distance from cell to goal\n for cell in next_moves:\n utility = 0\n dist_to_criminals = 0\n\n # calculate total distance from cell to criminals\n for criminal in criminals_nearby:\n dist_to_criminals += distance.euclidean(cell, criminal.pos)\n\n # calculate utility\n # set 0.5 since to avoid criminals is more important\n utility = dist_to_criminals - 0.5 * distance.euclidean(cell, goal)\n if utility > best_utility:\n best_utility = utility\n best_cell = cell\n\n #print(\"best_cell: \" + str(best_cell))\n\n if best_cell != self.pos:\n self.environment.grid.move_agent(self, best_cell)\n # print(\"pos: \" + str(self.pos))\n return True\n else:\n return False\n\n\n #take the cell that minimizes the distance to my goal but that maxmizes my min distance from criminal\n\n\n\n\n #random.shuffle(next_moves)\n #for cell in next_moves:\n # if sum(agent in self.memory for agent in self.environment.grid.get_cell_list_contents(cell)):\n # continue\n\n # else:\n # Move to this cell where there is nobody we remember\n # self.walk_to(self, cell)\n # return True", "def get_move(self, neighbors: list[Agent],\n thermal_points: dict[str, float]) -> np.ndarray[int]:\n\n # Nothing to target when there is no neighors sensed\n best_pos = np.array(self.position) +np.random.randint(-3, 4, size=2, dtype=int)\n\n if (len(neighbors)==0):\n return best_pos\n\n # Collect relative position of neighbors\n neighbors_rpos = np.stack([n.position - self.position\n for n in neighbors])\n if self._movement_policy == \"average\":\n target_pos = np.sum(neighbors_rpos, axis=0)\n # Average policy takes means of all agent positions, set as target\n elif self._movement_policy == \"closest\":\n best_neighbor_i = np.argmin(\n np.abs(neighbors_rpos).sum(axis=1))\n target_pos = neighbors_rpos[best_neighbor_i]\n # Closest policy target the closest agent\n\n # Decide to move toward/away from target, or stay in place\n if self.core_temp < self._low_move_threshold:\n target_pos = target_pos + self.position\n elif self.core_temp > self._high_move_threshold:\n target_pos = -1*target_pos + self.position\n else:\n return best_pos\n\n # Calculate the optimal final position closest to target\n for step in range(self._movement_speed):\n distances = [\n abs(best_pos[0] + 0 - target_pos[0]) +\n abs(best_pos[1] + 0 - target_pos[1]), # stay\n abs(best_pos[0] + 0 - target_pos[0]) +\n abs(best_pos[1] + 1 - target_pos[1]), # up\n abs(best_pos[0] + 0 - target_pos[0]) +\n abs(best_pos[1] - 1 - target_pos[1]), # down\n abs(best_pos[0] - 1 - target_pos[0]) +\n abs(best_pos[1] + 0 - target_pos[1]), # left\n abs(best_pos[0] + 1 - target_pos[0]) +\n abs(best_pos[1] + 0 - target_pos[1]) # right\n ]\n direction = np.argmin(distances)\n if direction == 1:\n best_pos[1] += 1\n elif direction == 2:\n best_pos[1] += -1\n elif direction == 3:\n best_pos[0] += -1\n elif direction == 4:\n best_pos[0] += 1\n\n return best_pos", "def check_waypoints_obstacles(self,mission_waypoints,obstacles,\r\n moving_obstacle_direction,buffer_zone=1):\r\n # Copy of waypoints in case waypoints arg is attribute\r\n updated_mission_waypoints = mission_waypoints.copy()\r\n\r\n for i,n in enumerate(updated_mission_waypoints):\r\n for j,o in enumerate(obstacles):\r\n\r\n # If center of waypoint is center of obstacle, point towards\r\n # next waypoint on exterior of obstacle\r\n if (np.equal(o[:2],n).all()):\r\n\r\n # If there is a point next, point towards that waypoint\r\n if (i != len(updated_mission_waypoints)-1):\r\n if np.equal(moving_obstacle_direction[j],[0.0]).all():\r\n #if True:\r\n dx = updated_mission_waypoints[i+1][0]-o[0]\r\n dy = updated_mission_waypoints[i+1][1]-o[1]\r\n alpha = np.arctan(max(dy,1e-3)/max(dx,1e-3))\r\n else:\r\n\r\n v_dx = moving_obstacle_direction[j][0]\r\n v_dy = moving_obstacle_direction[j][1]\r\n v_alpha = np.arctan(max(v_dy,1e-3)/max(v_dx,1e-3))\r\n\r\n if (v_alpha >= 180):\r\n alpha = v_alpha-180\r\n else:\r\n alpha = v_alpha+180\r\n\r\n new_x = o[0]+(o[2]+buffer_zone)*np.cos(alpha)\r\n new_y = o[1]+(o[2]+buffer_zone)*np.sin(alpha)\r\n updated_mission_waypoints[i] = [new_x,new_y]\r\n\r\n # If at end, point towards previous waypoint\r\n else:\r\n if np.equal(moving_obstacle_direction[j],[0.0]).all():\r\n #if True:\r\n dx = updated_mission_waypoints[i+1][0]-o[0]\r\n dy = updated_mission_waypoints[i+1][1]-o[1]\r\n alpha = np.arctan(max(dy,1e-3)/max(dx,1e-3))\r\n else:\r\n\r\n v_dx = moving_obstacle_direction[j][0]\r\n v_dy = moving_obstacle_direction[j][1]\r\n v_alpha = np.arctan(max(v_dy,1e-3)/max(v_dx,1e-3))\r\n\r\n if (v_alpha >= 180):\r\n alpha = v_alpha-180\r\n else:\r\n alpha = v_alpha+180\r\n\r\n \r\n new_x = o[0]+(o[2]+buffer_zone)*np.cos(alpha)\r\n new_y = o[1]+(o[2]+buffer_zone)*np.sin(alpha)\r\n updated_mission_waypoints[i] = [new_x,new_y]\r\n\r\n # If not in center, move to closest point to waypoint\r\n elif (np.linalg.norm((n[1]-o[1],n[0]-o[0]))<=o[2]):\r\n if np.equal(moving_obstacle_direction[j],[0.0]).all():\r\n # if True:\r\n dx = updated_mission_waypoints[i+1][0]-o[0]\r\n dy = updated_mission_waypoints[i+1][1]-o[1]\r\n alpha = np.arctan(max(dy,1e-3)/max(dx,1e-3))\r\n else:\r\n dx = updated_mission_waypoints[i+1][0]-o[0]\r\n dy = updated_mission_waypoints[i+1][1]-o[1]\r\n #alpha = np.arctan(max(dy,1e-3)/max(dx,1e-3))\r\n v_dx = moving_obstacle_direction[j][0]\r\n v_dy = moving_obstacle_direction[j][1]\r\n v_alpha = np.arctan(max(v_dy,1e-3)/max(v_dx,1e-3))\r\n\r\n if (v_alpha >= 180):\r\n alpha = v_alpha-180\r\n else:\r\n alpha = v_alpha+180\r\n \r\n new_x = o[0]+(o[2]+buffer_zone)*np.cos(alpha)\r\n new_y = o[1]+(o[2]+buffer_zone)*np.sin(alpha)\r\n updated_mission_waypoints[i] = [new_x,new_y]\r\n\r\n return np.asarray(updated_mission_waypoints)", "def scan_neighbors(self, positions):\n # Calculate potential generated by topology while removing the current drone\n v = pygame.math.Vector2(0,0) \n for position in positions:\n distance = (position - self.location).magnitude()\n if (self.mode == TRAINING and 0 < distance) or (self.mode == EVALUATION and 0 < distance):# < OBSERVABLE_RADIUS):\n # Get normalized direction of neighbor \n direction = position - self.location \n # Proporcional to the distance. The closer the stronger needs to be\n #direction = direction / distance \n v += direction\n # Save last non-zero neighbours position\n if self.neighbors.magnitude() > 0:\n self.last_neighbors = self.neighbors \n # This gives the direction of the resulting potential \n self.neighbors = v.copy() / (len(positions) - 1)", "def closest(self):\n boxes = [[i, j] for i in range(len(self.grid)) for j in range(len(self.grid[i])) if\n (self.grid[i][j] == '$')]\n tot_min = 0\n for b in boxes:\n minim = 100\n for p in self.placement:\n minim = min(minim, abs(b[0] - p[0]) + abs(b[1] - p[1]))\n tot_min += minim\n\n return tot_min", "def update_pos2closest(self, pos_list, dstn_lim=0.5):\n self.neigh.fit(pos_list)\n ret = self.neigh.kneighbors(np.array([self.x, self.y]).reshape(-1, 2), return_distance=True)\n dist = ret[0][0][0]\n if dist >= dstn_lim: #no update if distance is large\n return self.x, self.y\n new_pos_index = ret[1][0][0]\n x, y = pos_list[new_pos_index]\n self.x = x\n self.y = y\n # print(\"New position = ({a}, {b})\".format(a=self.x, b=self.y))\n return x, y", "def find_near(start_pos, max_cost):\n cells = astar_o.solve_for_near_states(start_pos, max_cost)\n if len(cells) == 0:\n return cells\n else:\n return cells[1:]", "def __get_repulsive_force(self, robot_cell, robot_map):\n circle = filled_midpoint_circle(robot_cell.x, robot_cell.y, self.__radius_obs)\n closest_obstacles = [None] * self.__max_obs\n min_dists = [inf] * self.__max_obs\n for point in circle:\n if robot_map.is_in_bound(point) and robot_map.grid[point.x][point.y] >= 0.75:\n dist = hypot(robot_cell.x - point.x, robot_cell.y - point.y)\n for i in range(self.__max_obs):\n if dist < min_dists[i]:\n for ii in range(self.__max_obs - 1, i + 2, -1):\n min_dists[ii] = min_dists[ii - 1]\n closest_obstacles[ii] = closest_obstacles[ii - 1]\n min_dists[i] = dist\n closest_obstacles[i] = point\n break\n result = {'x': 0, 'y': 0}\n for obstacle in closest_obstacles:\n if obstacle != None:\n dist = hypot(robot_cell.x - obstacle.x, robot_cell.y - obstacle.y)\n rep_factor = min(0.9, abs(self.__radius_obs - dist) / self.__radius_obs)\n length = -2 * log10(1 - rep_factor) * self.__weight_rep\n dx = obstacle.x - robot_cell.x\n dy = obstacle.y - robot_cell.y\n angle = atan2(dy, dx)\n result['x'] += -length * cos(angle)\n result['y'] += -length * sin(angle)\n return result", "def generate_local_costmap_and_obstacles(self):\n rospy.wait_for_message('/scan', LaserScan)\n\n # Initilize point cloud for transformations of obstacles\n pointcloud_hokuyo = PointCloud()\n pointcloud_hokuyo.header.frame_id = 'hokuyo_link'\n\n # Initilize point cloud for transformations regarding the visualization of the local map.\n pointcloud_local_costmap = PointCloud()\n pointcloud_local_costmap.header.frame_id = 'hokuyo_link'\n\n # Set robot position to the middle of the grid map\n local_costmap_middle = int(self.local_costmap.info.height/2)\n robot_pos = (local_costmap_middle, local_costmap_middle)\n\n while not rospy.is_shutdown():\n start = time.time()\n\n # Get current values from subscribed topics\n ranges = self.scan.ranges \n current_pose = self.current_pose\n min_angle = self.scan.angle_min\n angle_inc = self.scan.angle_increment\n\n # Clear point clouds\n pointcloud_hokuyo.points.clear()\n pointcloud_local_costmap.points.clear()\n\n # Clear local costmap\n local_costmap = np.zeros((self.local_costmap.info.height, self.local_costmap.info.height), dtype=np.int8)\n\n for idx, element in enumerate(ranges):\n # Check if element would be in local_costmap\n if element < self.lc_length/2:\n angle = min_angle + idx * angle_inc\n\n # Get position of the sensed element in the frame of the laser scanner\n dx = np.cos(angle) * element\n dy = np.cos(np.pi/2 - angle) * element\n\n # Get position of the sensed element for visualization of the local costmap\n dx_local_map = np.cos(np.pi/2 - (angle + current_pose[2])) * element\n dy_local_map = np.sin(np.pi/2 - (angle + current_pose[2])) * element \n\n point_hokuyo_frame = Point()\n point_hokuyo_frame.x = dx\n point_hokuyo_frame.y = dy\n point_hokuyo_frame.z = 0\n\n point_local_costmap = Point()\n point_local_costmap.x = dx_local_map\n point_local_costmap.y = dy_local_map\n point_local_costmap.z = 0\n\n pointcloud_hokuyo.points.append(point_hokuyo_frame)\n pointcloud_local_costmap.points.append(point_local_costmap)\n\n # Transform point cloud into 'map' frame\n self.listener.waitForTransform('/hokuyo_link', '/base_link', rospy.Time(0), rospy.Duration(10))\n pointcloud_hokuyo.header.stamp = self.listener.getLatestCommonTime('/hokuyo_link', '/base_link')\n pointcloud_local_costmap.header.stamp = self.listener.getLatestCommonTime('/hokuyo_link', '/base_link')\n pointcloud_local_costmap = self.listener.transformPointCloud('/base_link', pointcloud_local_costmap)\n pointcloud_base_link = self.listener.transformPointCloud('/base_link', pointcloud_hokuyo)\n\n self.listener.waitForTransform('/odom', '/base_link', rospy.Time(0), rospy.Duration(10))\n pointcloud_base_link.header.stamp = self.listener.getLatestCommonTime('/base_link', '/odom')\n pointcloud_odom = self.listener.transformPointCloud('/odom', pointcloud_base_link)\n\n self.listener.waitForTransform('/map', '/odom', rospy.Time(0), rospy.Duration(10))\n pointcloud_odom.header.stamp = self.listener.getLatestCommonTime('/odom', '/map')\n pointcloud_map = self.listener.transformPointCloud('/map', pointcloud_odom)\n\n # Add points of the local costmap that have been transformed for visualization purposes\n for point in pointcloud_local_costmap.points:\n point = (int(np.floor(point.x / self.local_costmap.info.resolution)), \\\n int(np.floor(point.y / self.local_costmap.info.resolution)))\n try:\n local_costmap[robot_pos[0] + point[0], robot_pos[1] + point[1]] = 100\n except:\n pass\n\n # Publish local_costmap with robot in its center\n self.local_costmap.header.stamp = rospy.Time.now()\n self.local_costmap.info.origin.position.x = current_pose[0] - self.lc_length / 2\n self.local_costmap.info.origin.position.y = current_pose[1] - self.lc_length / 2\n self.local_costmap.data = local_costmap.ravel()\n self.pub_local_costmap.publish(self.local_costmap)\n\n # Publish local_obstacles\n self.local_obstacles = pointcloud_map\n self.pub_local_obstacles.publish(self.local_obstacles)\n\n end = time.time()\n # To ensure that desired frequency does not get affected by computation time.\n rospy.sleep((1/self.lc_freq) - end + start)", "def nearest_neighbor(input_forcings,ConfigOptions,MpiConfig):\n # If we are running CFSv2 with bias correction, bypass as temporal interpolation is done\n # internally (NWM-only).\n if ConfigOptions.runCfsNldasBiasCorrect and input_forcings.productName == \"CFSv2_6Hr_Global_GRIB2\":\n if MpiConfig.rank == 0:\n ConfigOptions.statusMsg = \"Bypassing temporal interpolation routine due to NWM bias correction for CFSv2\"\n err_handler.log_msg(ConfigOptions, MpiConfig)\n return\n\n # Calculate the difference between the current output timestep,\n # and the previous input forecast output step.\n dtFromPrevious = ConfigOptions.current_output_date - input_forcings.fcst_date1\n\n # Calculate the difference between the current output timesetp,\n # and the next forecast output step.\n dtFromNext = ConfigOptions.current_output_date - input_forcings.fcst_date2\n\n if abs(dtFromNext.total_seconds()) <= abs(dtFromPrevious.total_seconds()):\n # Default to the regridded states from the next forecast output step.\n if input_forcings.regridded_forcings2 is None:\n input_forcings.final_forcings[:, :, :] = ConfigOptions.globalNdv\n else:\n input_forcings.final_forcings[:,:,:] = input_forcings.regridded_forcings2[:,:,:]\n else:\n # Default to the regridded states from the previous forecast output\n # step.\n if input_forcings.regridded_forcings1 is None:\n input_forcings.final_forcings[:, :, :] = ConfigOptions.globalNdv\n else:\n input_forcings.final_forcings[:,:,:] = input_forcings.regridded_forcings1[:,:,:]", "def calculate_potential_field(self, pos_drones, pos_obstacles):\n alpha = beta = 0.005\n # --- Repulsion drones\n for position in pos_drones:\n distance = (self.location - position).magnitude()\n if 0 < distance < OBSERVABLE_RADIUS:\n # Proporcional to the distance. The closer the stronger needs to be\n f_repulsion = (position - self.location).normalize() / distance \n #f_repulsion = derivativeBivariate(alpha, beta, position, self.location) / SAMPLE_TIME\n #f_repulsion = limit(f_repulsion, SEEK_FORCE)\n self.applyForce(-f_repulsion)\n\n # --- Repulsion obstacles \n for position in pos_obstacles:\n distance = (self.location - position).magnitude()\n if 0 < distance < OBSERVABLE_RADIUS:\n # Proporcional to the distance. The closer the stronger needs to be\n f_repulsion = 2*(position - self.location).normalize() / sqrt(distance)\n #f_repulsion = derivativeBivariate(alpha, beta, position, self.location) / SAMPLE_TIME\n #f_repulsion = limit(f_repulsion, SEEK_FORCE)\n self.applyForce(-f_repulsion)\n\n # --- Repulsion walls\n # Distance to Bottom\n distance = UPPER_Y - self.location[1] \n # Proporcional to the distance. The closer the stronger needs to be\n if distance > 0:\n f_repulsion = pygame.math.Vector2(0,2) / sqrt(distance)\n else:\n f_repulsion = pygame.math.Vector2(0,2) * SEEK_FORCE\n self.applyForce(-f_repulsion)\n \n # Distance to Top\n distance = self.location[1] - LOWER_Y \n # Proporcional to the distance. The closer the stronger needs to be\n if distance > 0:\n f_repulsion = pygame.math.Vector2(0,-2) / sqrt(distance)\n else:\n f_repulsion = pygame.math.Vector2(0,-2) * SEEK_FORCE\n self.applyForce(-f_repulsion)", "def visible_targets(self):\r\n loc = self.loc\r\n obs = self.observation\r\n visible_targets = {}\r\n max_turn = self.settings.max_turn\r\n agent_radius = 7\r\n\r\n # Foes are possible targets if in range and in turnrange\r\n foes_in_range = [(foe, angles_plus_dist(loc, foe, agent_radius, max_turn)) for foe in obs.foes if point_dist(loc[:2], foe[:2]) < self.settings.max_range + agent_radius and abs(get_rel_angle(loc, foe)) < max_turn + 0.1]\r\n \r\n # Stop if no foes in range found\r\n if not foes_in_range or not self.ammo:\r\n return visible_targets\r\n \r\n # Same goes for friends\r\n friends_in_range = [angles_plus_dist(loc, friend, agent_radius, max_turn) for friend in obs.friends if point_dist(loc[:2], friend[:2]) < self.settings.max_range + agent_radius and abs(get_rel_angle(loc, friend)) < max_turn + 0.2]\r\n \r\n # Take corners into account as well\r\n wall_corners = corners_in_range(self.corners, loc)\r\n # Now a list [(a1, a1, dist, type), ...]\r\n wall_corners = [angles_plus_dist(loc, c[:2], 0, max_turn) + c[:-3:-1] for c in wall_corners]\r\n\r\n # obstacles is now a list of tuples for each object:\r\n # (rel_angle_left, rel_angle_right, distance, [cornertype])\r\n obstacles = friends_in_range + wall_corners\r\n\r\n # Check if foe-angles overlap with friend-angles or grid-angles\r\n for foe_loc, foe in foes_in_range:\r\n foe_a1, foe_a2, foe_d = foe\r\n\r\n # Alter shot if an object is in front of the foe, or if another\r\n # foe is in front of it\r\n for obstacle in ([o for o in obstacles if o[2] < foe_d] + [f for l,f in foes_in_range if f != foe]):\r\n\r\n # if this is a wall, check the type\r\n if len(obstacle) == 4:\r\n if obstacle[2] < 2:\r\n obstacle = (obstacle[0],\r\n obstacle[1] + max_turn,\r\n obstacle[3])\r\n else:\r\n obstacle = (obstacle[0] - max_turn,\r\n obstacle[1],\r\n obstacle[3])\r\n\r\n obst_a1, obst_a2, obst_dist = obstacle\r\n # Multiple cases for overlapping\r\n # - right-of-obstacle overlaps \r\n # - left-of-obstacle overlaps\r\n # - entire overlap\r\n if foe_a1 < obst_a2 < foe_a2:\r\n foe_a1 = obst_a2\r\n elif foe_a1 < obst_a1 < foe_a2:\r\n foe_a2 = obst_a1\r\n elif foe_a1 > obst_a1 and foe_a2 < obst_a2:\r\n foe_a1 = None\r\n foe_a2 = None\r\n \r\n \r\n if foe_a1 is not None and foe_a1 < foe_a2 and abs(foe_a1 - foe_a2) > 0.025 and not line_intersects_grid(loc[:2], foe_loc[:2], self.grid, self.settings.tilesize):\r\n visible_targets[foe_loc] = (foe_a1 + foe_a2) / 2.0\r\n\r\n return visible_targets" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates the Obstacle Heuristic cost function (dist(v,omega) from DWA paper)
def calc_obs_dist_heuristic(self, trajectory, nearest_obstacles): trajectory_positions = trajectory[:, 0:2] obs_x = nearest_obstacles[:,0] obs_y = nearest_obstacles[:,1] dx = trajectory_positions[:, 0] - obs_x[:, None] dy = trajectory_positions[:, 1] - obs_y[:, None] euclidean_dist = np.hypot(dx, dy) if np.array(euclidean_dist <= self.robot.robot_radius + 0.2).any(): return np.inf elif euclidean_dist.size == 0: # No nearest osbtacle therefore return cost of 0 return 0.0 else: # Return the inverse since we are trying to maximize the objective func for obstacles # Smaller the dist the higher the robot's desire to move around it min_dist = np.min(euclidean_dist) return 1.0 / min_dist
[ "def heuristic_cost_estimate(self, node):\n # TODO: Return the heuristic cost estimate of a node\n \n d=self.distance(node,self.goal)\n \n return d", "def heuristic(current, goal):\n\n return ((goal[0][0] - current[0])**2 + (goal[0][1] - current[1])**2) ** (0.5) # Your code here", "def heuristic(g, h):\n (g1, h1) = g\n (g2, h2) = h\n return abs(g1 - g2) + abs(h1 - h2)", "def get_heuristic_cost(gameState, heuristic='manhattan'):\n cost = 0\n board = gameState.board\n for i in range(3):\n for j in range(3):\n cost = cost + get_heuristic_distance(board[i][j], i, j, heuristic)\n return cost", "def heuristic_cost(self, node):\n score = 0\n target_positions = self.tgt_positions[:]\n px, py = node.get_player_pos()\n mintx, minty = 0, 0\n\n for bx, by in node.box_positions:\n distance = float(\"inf\") # Set distance to be infinity to start\n for tx, ty in target_positions:\n man_dist = abs(bx - tx) + abs(by - ty)\n if man_dist < distance:\n distance = man_dist\n mintx, minty = tx, ty\n target_positions.remove((mintx, minty))\n score += distance\n return score", "def _cost(node_and_neighborhood):\n v, neighborhood = node_and_neighborhood\n return G.nodes[v].get(weight, 1) / len(neighborhood - dom_set)", "def calc_heuristic(self):\n layer = len(self.grid)\n row = len(self.grid[0])\n col = len(self.grid[0][0])\n\n self.heuristic = [[[0 for x in range(col)] for y in range(row)] for z in range(layer)]\n\n #print(len(self.heuristic) == len(self.grid))\n #print(len(self.heuristic[0]) == len(self.grid[0]))\n #print(len(self.heuristic[0][0]) == len(self.grid[0][0]))\n\n for i in range(layer):\n for j in range(row):\n for k in range(col):\n row_diff = abs(i - self.goal_node[0])\n col_diff = abs(j - self.goal_node[1])\n layer_diff = abs(k - self.goal_node[2])\n self.heuristic[i][j][k] = math.sqrt(row_diff**2 + col_diff**2 + layer_diff**2)\n\n #print(\"Heuristic:\")\n #print(self.heuristic)", "def calc_dwa_control(self, robot_state, robot_goal, obstacles): \n # Best Metrics Initializer\n minimum_cost = np.inf # Initialize minimum cost to extremely large initially\n best_control_input = np.zeros(2) \n best_trajectory = deepcopy(robot_state)\n\n # Compute the resulting velocity search space\n Vr_v, Vr_omega = self.calculate_Vr(robot_state)\n\n # Trajectory set (store all possible circular trajectories for visualization)\n num_possible_trajectories = Vr_v.shape[0] * Vr_omega.shape[0]\n trajectory_set = np.zeros((0, self.n_horizon+1, robot_state.shape[0]))\n\n ### Evaluate (v,omega) pairs and searches for the best control input + trajectory\n x_init = deepcopy(robot_state)\n for v in Vr_v:\n for omega in Vr_omega:\n ### Generate predicted trajectories with (v, omega) pair from search space\n control_input = np.array([v, omega])\n trajectory = self.generate_trajectory(x_init, control_input)\n\n ### Evaluate the paths for nearest obstacles\n nearest_obstacles = self.calculate_nearest_obstacles(robot_state, obstacles)\n\n ### Check if velocity is admissable\n if self.is_admissable(trajectory, control_input, nearest_obstacles):\n trajectory_set = np.vstack((trajectory_set, trajectory[None]))\n\n ### Cost calculation\n angle_cost = self.alpha * self.calc_goal_heuristic(trajectory, robot_goal)\n dist_cost = self.beta * self.calc_obs_dist_heuristic(trajectory, nearest_obstacles)\n vel_cost = self.gamma * self.calc_vel_heuristic(trajectory, self.robot.max_vel)\n \n # Total cost\n total_cost = angle_cost + dist_cost + vel_cost\n\n ### Update best costs & store the best control inputs + trajectory\n if minimum_cost >= total_cost:\n # print(\"[!] Best Found (v,w): ({:.3f}, {:.3f})\".format(v, omega))\n minimum_cost = total_cost\n best_control_input[:] = control_input\n best_trajectory = trajectory\n\n # print(\"[!] Best Found (v,w): ({:.3f}, {:.3f}) \\tCost: {:.3f}\".format(v, omega, minimum_cost))\n\n ### Prevention of getting stuck in (v,omega) = 0 search space\n if (abs(best_control_input[0]) < self.stuck_space_tol and abs(robot_state[3]) < self.stuck_space_tol):\n print(\"[!] Robot stuck in 0 velocity, sending escape angular velocity to get out of region.\")\n best_control_input[1] = self.escape_ang_vel\n\n # print(\"robot state: \", robot_state)\n # print(\"best_control_input: \", best_control_input)\n # print(\"minimum_cost: \", minimum_cost)\n # print(\"Vr_v: \", Vr_v)\n # print(\"Vr_omega: \", Vr_omega)\n\n return best_control_input, best_trajectory, trajectory_set", "def cost_obs(x, svh, svv, theta, unc=0.5):\n n_obs = svh.shape[0]\n A_vv, B_vv, R_vv, A_vh, B_vh, R_vh = x[:6]\n vsm = x[6 : (6 + n_obs)]\n lai = x[(6 + n_obs) :]\n sigma_vv, dvv = wcm(A_vv, lai, B_vv, lai, vsm, R_vv,\n pol=\"VV\", theta=theta)\n sigma_vh, dvh = wcm(A_vh, lai, B_vh, lai, vsm, R_vh,\n pol=\"HV\", theta=theta)\n diff_vv = svv - sigma_vv\n diff_vh = svh - sigma_vh\n cost = 0.5 * (diff_vv ** 2 + diff_vh ** 2) / (unc ** 2)\n jac = np.concatenate(\n [\n np.array(\n [\n np.sum(dvv[0] * diff_vv), # A_vv\n np.sum(dvv[2] * diff_vv), # B_vv\n np.sum(dvv[5] * diff_vv), # R_vv\n np.sum(dvh[0] * diff_vh), # A_vh\n np.sum(dvh[2] * diff_vh), # B_vh\n np.sum(dvh[5] * diff_vh), # R_vh\n ]\n ),\n dvv[4] * diff_vv + dvh[4] * diff_vh, # vsm\n (dvv[1] + dvv[3]) * diff_vv + (dvh[1] + dvh[3]) * diff_vh, # LAI\n ]\n )\n return cost.sum(), -jac / (unc ** 2)", "def heuristic(self, player):\n dist1 = self.board.dijkstra_distance(player, -1, -2)\n dist2 = self.board.dijkstra_distance(player, -2, -1)\n opponent1 = self.board.dijkstra_distance(next_player(player), -1, -2)\n opponent2 = self.board.dijkstra_distance(next_player(player), -2, -1)\n result = min(opponent1, opponent2) - min(dist1, dist2)\n limit = min(self.board.size())\n result = max(-limit, min(limit, result))\n return 1.0 * result / limit", "def get_estimated_cost(start_node, destination_node):\r\n delta_x = abs(start_node.x - destination_node.x)\r\n delta_y = abs(start_node.y - destination_node.y)\r\n if delta_x < delta_y:\r\n return math.sqrt(2 * delta_x^2) + delta_y - delta_x\r\n else:\r\n return math.sqrt(2 * delta_y^2) + delta_x - delta_y", "def compute_obstacle_cost_and_gradient(\n self, robot_pts, obstacle_points, radius=0.1, epsilon=0.02\n ):\n n_obstacle = obstacle_points.shape[0]\n n_dof = robot_pts.shape[0]\n n_collision_points = robot_pts.shape[1]\n\n # run through all obstacles and compute the potentials\n cost = np.zeros([n_dof, n_obstacle])\n for j in range(n_dof):\n for i in range(n_obstacle):\n # for k in range(n_collision_points):\n # field_distance = np.norm(obstacle_points[i,:]-robot_pts[j,k,:]) - radius\n field_distance = np.linalg.norm(\n obstacle_points[i, :] - robot_pts[j, 0, :]\n ) # single point\n D = field_distance - radius # D(x)\n\n if D >= epsilon:\n c = 0\n elif D >= 0:\n c = 1 / 2 * (D - epsilon) ** 2 / epsilon\n else:\n c = -D + 1 / 2 * epsilon\n\n cost[j, i] = c\n\n return 1, 2, 3", "def total_cost(self):\n return self.heuristic() + self.backward_cost", "def _construct_adv_cost(self):\n match_cost = self.GN.compute_log_prob(Xd=self.match_target)\n adv_cost = -T.sum(match_cost) / self.obs_count\n return adv_cost", "def estimate_cost(self, board):\n pass", "def compute_cost(self, u, v):\n pass", "def set_h_cost(self, heuristic):\n if heuristic == 'uniform':\n self.h_cost = 0\n elif heuristic == 'misplaced_tiles':\n self.h_cost = self.set_misplaced_tiles()\n elif heuristic == 'manhattan':\n self.h_cost = self.set_manhattan_distance()", "def costFunction(tracks, phantom_w, phantom_d):\n J = 0. # cost function\n # clear derivatives\n phantom_d = np.zeros((phantom_d.shape[0], phantom_d.shape[1]))\n\n for track in tracks:\n loss = track.loss_der() # calcs loss function J and modifies phantom_d\n J += loss * loss\n \n phantom_d /= len(tracks)\n return J / len(tracks) / 2", "def custom_heuristic(gameState):\r\n center_weight = 0.5\r\n lib_weight = 1.5\r\n own_loc = gameState.locs[self.player_id]\r\n opp_loc = gameState.locs[1- self.player_id]\r\n own_liberties = gameState.liberties(own_loc)\r\n opp_liberties = gameState.liberties(opp_loc)\r\n # Custom 1: distanceToCenter(own_loc)\r\n # Custom 2: len(own_liberties) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 3: len(own_liberties) - ( len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) ) \r\n # Custom 4: len(own_liberties) - ( lib_weight * len(opp_liberties) ) - ( center_weight * distanceToCenter(own_loc) )\r\n # Custom 5: ( lib_weight * (len(own_liberties) / len(opp_liberties)) - ( center_weight * distanceToCenter(own_loc)) )\r\n return ( lib_weight * (len(own_liberties) / len(opp_liberties)) - (center_weight * distanceToCenter(own_loc)) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that region of gcp util object could be specified via constructor.
def test_gcputil_init_region(): gcp_util = gcp.GoogleCloudUtil(region_name="europe-west1") assert gcp_util._region_name == "europe-west1"
[ "def test_gcputil_init_region_config():\n test_region = \"europe-west3\"\n TEST_ENV_DATA = copy.deepcopy(config.ENV_DATA)\n TEST_ENV_DATA[\"region\"] = test_region\n with patch(\"ocs_ci.framework.config.ENV_DATA\", TEST_ENV_DATA):\n gcp_util = gcp.GoogleCloudUtil()\n assert gcp_util._region_name == test_region\n # but the config can specify only the default value\n gcp_util = gcp.GoogleCloudUtil(region_name=\"something_else\")\n assert gcp_util._region_name == \"something_else\"", "def is_region_flagged(self):\n element = self.driver.find_element(*ProjectFormLoc.ERROR_REGION)\n assert('Region is required' in element.text), 'Region error missing'", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['LoadBalancerProfileSecurityGroupsSupportedFixed', 'LoadBalancerProfileSecurityGroupsSupportedDependent']))\n raise Exception(msg)", "def is_gce_instance(cls):\n try:\n socket.getaddrinfo('metadata.google.internal', 80)\n except socket.gaierror:\n return False\n return True", "def region_check(self, field) -> None:\n if field.region() != (None, None) and field.region() != self._region:\n raise FieldOperationError('Cant operate on ' + str(type(self)) + ' and ' + str(type(field)) + '!')", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['InstancePlacementTargetDedicatedHostGroupReference', 'InstancePlacementTargetDedicatedHostReference']))\n raise Exception(msg)", "def __init__(__self__,\n resource_name: str,\n args: MultiRegionAccessPointArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['InstancePlacementTargetPrototypeDedicatedHostIdentity', 'InstancePlacementTargetPrototypeDedicatedHostGroupIdentity']))\n raise Exception(msg)", "def test_vmware_service_resources_regions_get(self):\n pass", "def testNoParameters(self):\n # Equivalent to: region = RegionFile()\n self.assertRaises(ValueError, RegionFile)", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['EndpointGatewayReservedIPReservedIPIdentity', 'EndpointGatewayReservedIPReservedIPPrototypeTargetContext']))\n raise Exception(msg)", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['DedicatedHostProfileVCPUFixed', 'DedicatedHostProfileVCPURange', 'DedicatedHostProfileVCPUEnum', 'DedicatedHostProfileVCPUDependent']))\n raise Exception(msg)", "def __init__(self) -> None:\n # pylint: disable=super-init-not-called\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityById', 'InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByCRN', 'InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByHref']))\n raise Exception(msg)", "def test_get_cloud_pool(self):\n pass", "def test_create_cloud_pool(self):\n pass", "def CheckBounds(self, ):\n ...", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['CloudObjectStorageBucketIdentityByName']))\n raise Exception(msg)", "def _validate_instance(self, url):\n request = requests.get(url)\n if 'Google Fonts Regression' not in request.text and \\\n 'Compare fonts' not in request.text:\n raise Exception(('instance_url %s is not an instance of '\n 'GF Regression' % url))", "def __init__(self) -> None:\n msg = \"Cannot instantiate base class. Instead, instantiate one of the defined subclasses: {0}\".format(\n \", \".join(['InstanceProfileVCPUFixed', 'InstanceProfileVCPURange', 'InstanceProfileVCPUEnum', 'InstanceProfileVCPUDependent']))\n raise Exception(msg)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that region of gcp util object is loaded from via ocsci config when not specified. Moreover if the region is specified directly, the config value should not be used.
def test_gcputil_init_region_config(): test_region = "europe-west3" TEST_ENV_DATA = copy.deepcopy(config.ENV_DATA) TEST_ENV_DATA["region"] = test_region with patch("ocs_ci.framework.config.ENV_DATA", TEST_ENV_DATA): gcp_util = gcp.GoogleCloudUtil() assert gcp_util._region_name == test_region # but the config can specify only the default value gcp_util = gcp.GoogleCloudUtil(region_name="something_else") assert gcp_util._region_name == "something_else"
[ "def test_gcputil_init_region():\n gcp_util = gcp.GoogleCloudUtil(region_name=\"europe-west1\")\n assert gcp_util._region_name == \"europe-west1\"", "def parse_region():\r\n\r\n if ARGS.get('os_rax_auth'):\r\n region = ARGS.get('os_rax_auth')\r\n auth_url = 'identity.api.rackspacecloud.com/v2.0/tokens'\r\n if region is 'LON':\r\n return ARGS.get('os_auth_url', 'https://lon.%s' % auth_url)\r\n elif region.lower() in info.__rax_regions__:\r\n return ARGS.get('os_auth_url', 'https://%s' % auth_url)\r\n else:\r\n raise turbo.SystemProblem('No Known RAX Region Was Specified')\r\n elif ARGS.get('os_hp_auth'):\r\n region = ARGS.get('os_hp_auth')\r\n auth_url = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens'\r\n if region.lower() in info.__hpc_regions__:\r\n return ARGS.get('os_auth_url', auth_url % region)\r\n else:\r\n raise turbo.SystemProblem('No Known HP Region Was Specified')\r\n elif ARGS.get('os_auth_url'):\r\n return ARGS.get('os_auth_url')\r\n else:\r\n raise turbo.SystemProblem(\r\n 'You Are required to specify an Auth URL, Region or Plugin'\r\n )", "def region_name(self) -> Optional[str]:\n aws_region = self._aws_region\n if aws_region:\n return aws_region\n if self._aws_credentials_dir:\n aws_credentials_file = os.path.join(self._aws_credentials_dir, \"credentials\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_credentials_file)\n if aws_region:\n return aws_region\n aws_config_file = os.path.join(self._aws_credentials_dir, \"config\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_config_file)\n if aws_region:\n return aws_region\n aws_region = os.environ.get(\"AWS_REGION\", os.environ.get(\"AWS_DEFAULT_REGION\"))\n if aws_region:\n return aws_region\n aws_credentials_file = os.environ.get(\"AWS_SHARED_CREDENTIALS_FILE\", \"~/.aws/credentials\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_credentials_file)\n if aws_region:\n return aws_region\n aws_config_file = os.environ.get(\"AWS_CONFIG_FILE\", \"~/.aws/config\")\n _, _, aws_region = self._read_aws_credentials_from_file(aws_config_file)\n return aws_region", "def _AutoDetectRegion(self):\n def _GetRegionContext(unused_object_type, context):\n if self._flags.region:\n return self.DenormalizeResourceName(self._flags.region)\n return self.GetRegionForResource(self.api.addresses,\n context['address'])\n\n self._context_parser.context_prompt_fxns['region'] = _GetRegionContext", "def region_check(self, field) -> None:\n if field.region() != (None, None) and field.region() != self._region:\n raise FieldOperationError('Cant operate on ' + str(type(self)) + ' and ' + str(type(field)) + '!')", "def test_get_cortx_s3_region_failure():\n with pytest.raises(KeyError):\n config = CORTXS3Config(use_cipher = False)\n del config._config['cortx_s3']['default_region']\n assert config.get_cortx_s3_region()", "def test_get_cortx_s3_region_success():\n config = CORTXS3Config(use_cipher = False)\n config._config['cortx_s3']['default_region'] = \"us-west2\"\n s3_region = config.get_cortx_s3_region()\n assert s3_region == \"us-west2\"", "def is_region_flagged(self):\n element = self.driver.find_element(*ProjectFormLoc.ERROR_REGION)\n assert('Region is required' in element.text), 'Region error missing'", "def test_vmware_service_resources_regions_get(self):\n pass", "def _should_skip_region(self, region_start):\n\n obj = self.project.loader.find_object_containing(region_start, membership_check=False)\n if obj is None:\n return False\n if isinstance(obj, PE):\n section = obj.find_section_containing(region_start)\n if section is None:\n return False\n if section.name in {\".textbss\"}:\n return True\n\n return False", "def ec2_current_region() -> Optional[str]:\n cfg = ec2_metadata()\n if cfg is None:\n return None\n return cfg.get(\"region\", None)", "def get_region(self):\n try:\n return self.meta_data['placement'][\n 'availability-zone'][:-1].strip()\n except KeyError:\n raise IcsMetaException(\n \"Cannot find the 'region info' in meta-data.\")", "def test_azure_service_api_regions_get(self):\n pass", "def this_region(self):\n _logger.debug('%s', where_am_i())\n if self._metadata is None:\n _logger.warning('metadata is None !')\n # TODO: should it severe error case instead ??\n return None\n try:\n return self._metadata['instance']['region']\n except Exception as e:\n _logger.warning('No region information in metadata: %s', str(e))\n return None", "def get_region_name(place_info: dict) -> Optional[str]:\n region: Optional[str]\n try:\n region = place_info['region'][0]['value']\n except Exception as ex:\n region = None\n logging.error(f\"Error in region reading {ex}\")\n return region", "def get_region_from_rclone(bucket_name: str, cloud: RcloneClouds) -> str:\n bucket_rclone_profile = Rclone.generate_rclone_bucket_profile_name(\n bucket_name, cloud)\n with open(Rclone._RCLONE_ABS_CONFIG_PATH) as file:\n bucket_profile_found = False\n for line in file:\n if line.lstrip().startswith('#'): # skip user's comments.\n continue\n if line.strip() == f'[{bucket_rclone_profile}]':\n bucket_profile_found = True\n elif bucket_profile_found and line.startswith('region'):\n return line.split('=')[1].strip()\n elif bucket_profile_found and line.startswith('['):\n # for efficiency stop if we've searched past the\n # requested bucket profile with no match\n return ''\n # segment for bucket and/or region field for bucket wasn't found\n return ''", "def set_region():\n\n region = None\n session = boto3.session.Session()\n region = session.region_name\n if region: # already defined in env var or config file\n return\n else:\n try:\n region = requests.get(\"http://169.254.169.254/latest/dynamic/instance-identity/document\").json()['region']\n boto3.setup_default_session(region_name=region)\n print(json.dumps({\"message\": \"set region to {}\".format(region)}))\n except:\n print(json.dumps({\"message\": \"getting region failed from instance metadata failed\"}))\n pass", "def get_aws_region():\n _name: str = \"get_aws_region()\"\n region: Optional[str] = None\n if 'AWS_DEFAULT_REGION' in os.environ:\n region = os.getenv('AWS_DEFAULT_REGION')\n elif 'AWS_REGION' in os.environ:\n region = os.getenv('AWS_REGION')\n else:\n msg: str = \"AWS_DEFAULT_REGION nor AWS_REGION defined in the environment.\"\n _logger.error(\"%s: %s\", _name, msg)\n raise RuntimeError(msg)\n\n return region", "def test_srname(self):\n londo = self.get_region(\"Orange Londo\")\n self.assertIsNotNone(londo)\n\n ct = self.get_region(\"ct_orangelondo\")\n self.assertIsNotNone(ct)\n self.assertEqual(londo, ct)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Just some array that can accept one of Customer or Int32
def someArray(self) -> Array[Customer, Int32]:
[ "def _validate(x):\n if not isinstance(x, int):\n raise TypeError(\"Only Integer Arrays are allowed\")", "def validate_integers(*nums):\n for num in nums:\n if not isinstance(num, int):\n raise TypeError(\"Sorry. The function only works with integers.\")", "def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]", "def acceptedTypeIds(*args, **kwargs):\n \n pass", "def validate_client_ids(arg_name: str, value) -> bool:\n\n if value is None:\n return True\n\n if not isinstance(value, list):\n raise ValueError(f\"Parameter '{arg_name}' should be 'list'. Actual type is {type(value)}\")\n\n if len(value) == 0:\n raise ValueError(f\"Parameter '{arg_name}' should be not empty list\")\n\n for client_id in value:\n if not isinstance(client_id, int):\n raise ValueError(\n f\"Parameter '{arg_name}' should contain only 'int' elements, but it contains '{client_id}', \"\n f\"which type is {type(value)}\")\n\n return True", "def _validate_index_type_is_int(self, index: Union[int, Int]) -> None:\r\n from apysc import Int\r\n if isinstance(index, (int, Int)):\r\n return\r\n raise ValueError(\r\n 'Currently indexing is only supported int or Int types.'\r\n ' If you need to slice array please use slice method.')", "def create_customers(self, cust_array):\n keys = []\n for cust in cust_array:\n c_key = keynamehelper.create_key_name(\"customer\", cust['id'])\n self.redis.hmset(c_key, cust)\n keys.append(c_key)\n return keys", "def integers_only(lst):\n try:\n _ = [int(i) for i in lst]\n except:\n return False\n return True", "def asArray( cls, value, typeCode=None ):\n return value", "def validate_set_contents(self, value):\n if self.item_type in (int, long):\n item_type = (int, long)\n else:\n item_type = self.item_type\n\n for item in value:\n if not isinstance(item, item_type):\n if item_type == (int, long):\n raise db.BadValueError('Items in the %s set must all be integers.' %\n self.name)\n else:\n raise db.BadValueError(\n 'Items in the %s set must all be %s instances' %\n (self.name, self.item_type.__name__))\n return value", "def generate_cust_lst(person_lst):\r\n cust_lst = []\r\n for index in range(0, len(person_lst)):\r\n cust = Customer(person_lst[index][0],person_lst[index][1],person_lst[index][2])\r\n cust_lst.append(cust)\r\n\r\n return cust_lst", "def inputArrayValue(*args, **kwargs):\n \n pass", "def check_type_get_example(self, in_types):\n pass", "def check_type_k(k: Union[int, list, range, np.ndarray]) -> None:\n\n if isinstance(k, list):\n if any(not (isinstance(ki,int)) for ki in k):\n raise AssertionError(\"Some element in k is not int.\")\n\n return None", "def count_user_types(data_list):\n subscriber = 0\n dependent = 0\n customer = 0\n\n for sample in range(len(data_list)):\n if data_list[sample] == \"Subscriber\":\n subscriber += 1\n elif data_list[sample] == \"Dependent\":\n dependent += 1\n elif data_list[sample] == \"Customer\":\n customer += 1\n\n return [subscriber, dependent, customer]", "def is_integer(obj):\n return isinstance(obj, (int, np.integer))", "def _check_min_num(self, obj):\n\n if obj.min_num is None:\n return []\n elif not isinstance(obj.min_num, int):\n return must_be(\"an integer\", option=\"min_num\", obj=obj, id=\"admin.E205\")\n else:\n return []", "def restrict_batch(self, batch, subset):\n\n self._validate(is_numeric=False, batch=batch)\n assert isinstance(subset, (list, tuple))\n\n if len(subset) == 1:\n idx, = subset\n return batch[idx]\n\n return tuple([batch[i] for i in subset])", "def gen_customer_list(num_orders):\n cust = Customer.query.all()\n cust_list_prob = apply_pareto(cust)\n return np.random.choice(cust, num_orders, cust_list_prob).tolist()", "def _convert_other_val_to_array(self, other: Any) -> Any:\r\n if isinstance(other, list):\r\n return Array(other)\r\n return other" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
An array that accepts anything
def anyArray(self) -> Array[...]:
[ "def array(self):\n raise NotImplementedError", "def arrayify(value):\n return value if _is_array(value) else [value]", "def IsArray(self) -> bool:", "def inputArrayValue(*args, **kwargs):\n \n pass", "def _arraytest(*args):\r\n\r\n rargs = []\r\n for a in args:\r\n if isinstance(a, (list, tuple)):\r\n rargs.append(scipy.array(a))\r\n else:\r\n rargs.append(a)\r\n if len(rargs) == 1:\r\n return rargs[0] # no unpacking if single value, return value i/o list\r\n else:\r\n return rargs", "def _is_array(v):\n return isinstance(v, list)", "def ea_array_frompointer(*args) -> \"ea_array *\":\n return _ida_pro.ea_array_frompointer(*args)", "def get_array(self): # real signature unknown; restored from __doc__\n pass", "def asArray( cls, value, typeCode=None ):\n return value", "def uval_array_frompointer(*args) -> \"uval_array *\":\n return _ida_pro.uval_array_frompointer(*args)", "def wrap_array(x):\n if isinstance(x, collections.Iterable):\n if isinstance(x, np.ndarray):\n return x\n else:\n return np.array(x)\n else:\n return np.array([x])", "def _make_1_element_array(data: float):\n return numpy.array([data])", "def _check_to_array(x):\n if(hasattr(x, '__iter__')):\n return(np.array(x, dtype=float))\n else:\n return(np.array([float(x)]))", "def _convert_other_val_to_array(self, other: Any) -> Any:\r\n if isinstance(other, list):\r\n return Array(other)\r\n return other", "def assert_is_any_array(a):\n if not is_any_array(a):\n raise TypeError('a is not an array or sparse array.')", "def addElementArray(*args, **kwargs):\n \n pass", "def test_sequence_array_conversion_type_checking():\n from Python.Test import ArrayConversionTest\n from Python.Test import Spam\n\n # This should work, because null / None is a valid value in an\n # array of reference types.\n\n items = UserList()\n for i in range(10):\n items.append(Spam(str(i)))\n items[1] = None\n\n result = ArrayConversionTest.EchoRange(items)\n\n assert result[0].__class__ == Spam\n assert result[1] is None\n assert len(result) == 10\n\n with pytest.raises(TypeError):\n items[1] = 1\n _ = ArrayConversionTest.EchoRange(items)\n\n with pytest.raises(TypeError):\n items[1] = \"spam\"\n _ = ArrayConversionTest.EchoRange(items)", "def unsafe_array(self):\n _DEPRECATION_ERROR_ATTRIBUTE(\n self,\n \"unsafe_array\",\n \"Use 'array' attribute instead.\",\n version=\"3.0.0\",\n ) # pragma: no cover", "def test_list_array_conversion_type_checking():\n from Python.Test import ArrayConversionTest\n from Python.Test import Spam\n\n # This should work, because null / None is a valid value in an\n # array of reference types.\n\n items = []\n for i in range(10):\n items.append(Spam(str(i)))\n items[1] = None\n\n result = ArrayConversionTest.EchoRange(items)\n\n assert result[0].__class__ == Spam\n assert result[1] is None\n assert len(result) == 10\n\n with pytest.raises(TypeError):\n items[1] = 1\n _ = ArrayConversionTest.EchoRange(items)\n\n with pytest.raises(TypeError):\n items[1] = \"spam\"\n _ = ArrayConversionTest.EchoRange(items)", "def test_optional_array_no_input():\n length = 10\n output = _validation._check_optional_array(length, None)\n\n assert isinstance(output, np.ndarray)\n assert_array_equal(output, np.ones(length))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read pin values from board. Return a dict.
def read(self, board, sensors=None, timestamp=False): keys = [] values = [] for sensor in sensors: keys.append(sensor) values.append(board.read(sensor)) return dict(zip(keys, values))
[ "def getAllBoardCoord(driver):\n board_list = ['hi', 'mid', 'lo']\n board_dict = {}\n for b in board_list: \n tmp = getTemplate(b)\n game_image = getGameImage(driver, 'layer2')\n board_coord = detectTemplate(game_image, tmp, False, -1)\n board_dict[b] = board_coord\n return board_dict", "def _get_gpio(tcl_name):\n pat1 = 'connect_bd_net -net processing_system7_0_GPIO_O'\n pat2 = 'connect_bd_net -net ps7_GPIO_O'\n result = {}\n gpio_list = []\n try:\n with open(tcl_name, 'r') as f:\n for line in f:\n if (pat1 in line) or (pat2 in line):\n gpio_list = re.findall('\\[get_bd_pins (.+?)/Din\\]',\n line, re.IGNORECASE)\n\n match1 = 0\n index = 0\n for i in range(len(gpio_list)):\n name = gpio_list[i].split('/')[0]\n pat3 = \"set \" + name\n pat4 = \"CONFIG.DIN_FROM {([0-9]+)}*\"\n with open(tcl_name, 'r') as f:\n for line in f:\n if pat3 in line:\n match1 = 1\n continue\n if match1 == 1:\n match2 = re.search(pat4, line, re.IGNORECASE)\n if match2:\n index = match2.group(1)\n match1 = 0\n break\n result[gpio_list[i]] = [int(index), None]\n except:\n pass\n return result", "def read_raw_values(self):\n [axes, buttons, mapped_values] = self._input_device.read(include_raw=True)\n dict_axes = {}\n dict_buttons = {}\n\n for i, a in enumerate(axes):\n dict_axes[i] = a\n\n for i, b in enumerate(buttons):\n dict_buttons[i] = b\n\n return [dict_axes, dict_buttons, mapped_values]", "def read(pin):\n normalized_pin = normalize_pin(pin)\n try:\n pin_map = pins[normalized_pin]\n except KeyError:\n return # Pin not supported\n\n with open(pin_map['path'], 'r') as f:\n if int(f.read()):\n return HIGH\n else:\n return LOW", "def read(self, pin):\n if type(pin) is list:\n return [self.read(p) for p in pin]\n\n pin_id = self._pin_mapping.get(pin, None)\n if pin_id:\n value = self._read(pin_id)\n lpin = self._pin_lin.get(pin, None)\n if lpin and type(lpin[\"read\"]) is tuple:\n read_range = lpin[\"read\"]\n value = self._linear_interpolation(value, *read_range)\n return value\n else:\n raise KeyError(\"Requested pin is not mapped: %s\" % pin)", "def arduino_boards() -> dict:\n with open(BOARDS) as f:\n board_properties = json.load(f)\n\n boards_model = {board: info[\"model\"] for board, info in board_properties.items()}\n return boards_model", "def get_board_dict(self):\n return {p: self.as_string(p) for p in board.positions()}", "def get_pincodes(state, city):\n pincode_data = STATEDATA[state][city]\n return pincode_data", "def read_scanner_data(handle): \n name = read_string(handle)\n pmt_green = read_int(handle)\n pmt_red = read_int(handle)\n scanner_version = read_string(handle)\n imaging_user = read_string(handle)\n return ScannerData(name, pmt_green, pmt_red, scanner_version, imaging_user)", "def get_data(_, __):\r\n power = rpi.get_power()\r\n brightness = rpi.get_brightness()\r\n\r\n logger.info('get_data: Power %d Brightness %d',\r\n power if power is not None else -1 , brightness)\r\n\r\n return {\r\n \"power\": power,\r\n \"brightness\": brightness,\r\n }", "def pins(self) -> Dict:\n if self.identifier is None:\n raise PinterestException(\"Section: pins() requires valid section identifier\")\n url = config.api_url + '/v1/board/sections/{identifier}/pins/'.format(identifier=self.identifier)\n params = {'access_token': self.token}\n return pinterest_request('put', url, params=params)", "def get_pins(self, layer=-1):\n layer = self._get_layer(layer)\n return self._pin_dict.get(layer, [])", "def pinState(gpio_pin):\r\n # With sysfs driver this is identical to digitalRead()\r\n return digitalRead(gpio_pin)", "def readtiles(self):\n\n data = {}\n for p in self.index:\n data[p] = self.readtile(p.x, p.y)\n\n return data", "def read_values(base, key):\r\n try:\r\n handle = RegOpenKeyEx(base, key)\r\n except RegError:\r\n return None\r\n d = {}\r\n i = 0\r\n while 1:\r\n try:\r\n name, value, type = RegEnumValue(handle, i)\r\n except RegError:\r\n break\r\n name = name.lower()\r\n d[convert_mbcs(name)] = convert_mbcs(value)\r\n i = i + 1\r\n return d", "def read_monitors_devices(self):\n response = self._txrx.send_recv_message(self._cmd_msg)\n read_maps = generate_register_maps(response)\n result = dict(list(zip(list(self._channel_data.keys()), read_maps)))\n for name, value in list(result.items()):\n self._channel_data[name].append(value)\n return result", "def getPinProperties(cardConnection, featureList=None, controlCode=None):\n if controlCode is None:\n if featureList is None:\n featureList = getFeatureRequest(cardConnection)\n controlCode = hasFeature(featureList, FEATURE_IFD_PIN_PROPERTIES)\n\n if controlCode is None:\n return {'raw': []}\n\n response = cardConnection.control(controlCode, [])\n d = {\n 'raw': response,\n 'LcdLayoutX': response[0],\n 'LcdLayoutY': response[1],\n 'EntryValidationCondition': response[2],\n 'TimeOut2': response[3]}\n\n return d", "def read_map(map_meta_path: str) -> dict:\n with open(map_meta_path, \"r\") as f:\n meta = yaml.load(f, yaml.SafeLoader)\n\n img_array = read_img(os.path.join(os.path.dirname(map_meta_path), meta['image']))\n grid = make_binary_grid(img_array, meta['occupied_thresh'], meta['negate'])\n return dict(meta, image=grid)", "def _read_calibration(self):\n url = 'http://docs.google.com/feeds/download/spreadsheets/Export?key=14VPabCGN6TftpdID9zgbFzsRx3mHq_iayQP6OTUrr3A&exportFormat=csv&gid=0'\n response = urllib2.urlopen(url)\n cr = csv.reader(response)\n\n result = {}\n labels = next(cr)\n for row in cr:\n mac_address = row[0].translate(None, ':').strip().lower()\n if mac_address == self.mac_address or row[0] == 'DEFAULT':\n direction = row[5].strip()\n for i in range(6, len(labels)):\n result[labels[i].strip(), direction] = row[i].strip()\n if direction == 'reverse':\n break\n\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Common setup for Vera devices.
def setup(hass, base_config): global VERA_CONTROLLER import pyvera as veraApi config = base_config.get(DOMAIN) base_url = config.get('vera_controller_url') if not base_url: _LOGGER.error( "The required parameter 'vera_controller_url'" " was not found in config" ) return False VERA_CONTROLLER, _ = veraApi.init_controller(base_url) def stop_subscription(event): """Shutdown Vera subscriptions and subscription thread on exit.""" _LOGGER.info("Shutting down subscriptions.") VERA_CONTROLLER.stop() hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_subscription) try: all_devices = VERA_CONTROLLER.get_devices( list(DEVICE_CATEGORIES.keys())) except RequestException: # There was a network related error connecting to the vera controller. _LOGGER.exception("Error communicating with Vera API") return False exclude = config.get(CONF_EXCLUDE, []) if not isinstance(exclude, list): _LOGGER.error("'exclude' must be a list of device_ids") return False lights_ids = config.get(CONF_LIGHTS, []) if not isinstance(lights_ids, list): _LOGGER.error("'lights' must be a list of device_ids") return False for device in all_devices: if device.device_id in exclude: continue dev_type = DEVICE_CATEGORIES.get(device.category) if dev_type is None: continue if dev_type == 'switch' and device.device_id in lights_ids: dev_type = 'light' VERA_DEVICES[dev_type].append(device) for component in 'binary_sensor', 'sensor', 'light', 'switch', 'lock': discovery.load_platform(hass, component, DOMAIN, {}, base_config) return True
[ "def SetupCommon(self):\n self.network_name = self._GetResourceName()\n self.subnetwork_name = self._GetResourceName()\n\n self.Run('compute networks create {} --subnet-mode=custom'.format(\n self.network_name))\n self.Run('compute networks subnets create {0} --network {1} '\n '--region {2} --range {3}'.format(self.subnetwork_name,\n self.network_name, self.region,\n self.SUBNET_RANGE))", "def prepare_emulator(self):", "def setup_class(self):\n self.dut = self.android_devices[0]\n required_params = dir(VPN_PARAMS)\n required_params = [x for x in required_params if not x.startswith('__')]\n self.unpack_userparams(required_params)\n wifi_test_utils.wifi_test_device_init(self.dut)\n wifi_test_utils.wifi_connect(self.dut, self.wifi_network)\n time.sleep(3)", "def setup_environment():", "def setUp(self):\n SelTestBase.setUp(self)\n self.addDevice()", "def _main():\n volttron_home = _os.path.normpath(expandall(\n _os.environ.get('VOLTTRON_HOME', '~/.volttron')))\n _os.environ['VOLTTRON_HOME'] = volttron_home\n if not _os.path.exists(volttron_home):\n _os.makedirs(volttron_home, 0o755)\n\n y_or_n = ('Y', 'N', 'y', 'n')\n y = ('Y', 'y')\n n = ('N', 'n')\n print('\\nYour VOLTTRON_HOME currently set to: {}'.format(volttron_home))\n t = ('\\nIs this the volttron you are attempting to setup? [Y]',\n y_or_n,\n 'Y')\n if not prompt_response(t) in y:\n print(\n '\\nPlease execute with VOLTRON_HOME=/your/path volttron-cfg to '\n 'modify VOLTTRON_HOME.\\n')\n return\n t = ('\\nIs this instance discoverable (Y/N)? [N] ', y_or_n, 'N')\n _explain_discoverable()\n is_discoverable = prompt_response(t) in y\n\n if is_discoverable:\n t = ('\\nWhat is the external ipv4 address for this instance? '\n '[127.0.0.1]: ', None, '127.0.0.1')\n external_ip = prompt_response(t)\n t = ('What is the vip port this instance? [22916] ',)\n vip_port = prompt_response(t)\n if not vip_port:\n vip_port = 22916\n\n t = ('\\nWhat is the port for discovery? [8080] ',)\n external_port = prompt_response(t)\n if not external_port:\n external_port = 8080\n t = (\n '\\nWhich IP addresses are allowed to discover this instance? '\n '[/127.*/] ', None, '/127.*/')\n ip_allowed_to_discover = prompt_response(t)\n AuthFile().add(AuthEntry(address=ip_allowed_to_discover,\n credentials='/CURVE:.*/'))\n\n t = ('\\nIs this instance a volttron central (Y/N)? [N] ', y_or_n, 'N')\n do_install_vc = prompt_response(t) in y\n do_vc_autostart = True\n do_platform_autostart = True\n if do_install_vc:\n t = ('\\nShould volttron central autostart(Y/N)? [Y] ',\n y_or_n, 'Y')\n do_vc_autostart = prompt_response(t) in y\n\n t = ('\\nInclude volttron central platform agent on '\n 'volttron central? [Y]', y_or_n, 'Y')\n do_install_platform = prompt_response(t) in y\n else:\n do_install_platform = True\n t = ('\\nAddress of volttron central? [127.0.0.1]: ', None,\n '127.0.0.1')\n vc_ipaddress = prompt_response(t)\n should_resolve = True\n first = True\n t = ('Port of volttron central? [8080] ',)\n vc_port = prompt_response(t)\n if not vc_port:\n vc_port = 8080\n while not _resolvable(vc_ipaddress, vc_port) and should_resolve:\n print(\"Couldn't resolve {}:{}\".format(vc_ipaddress, vc_port))\n t2 = (\n '\\nShould volttron central be resolvable now? [Y] ', y_or_n,\n 'Y')\n if first:\n should_resolve = prompt_response(t2) in ('y', 'Y')\n first = False\n\n if should_resolve:\n t = ('\\nAddress of volttron central? ',)\n vc_ipaddress = prompt_response(t)\n t = ('\\nPort of volttron central? ',)\n vc_port = prompt_response(t)\n\n if do_install_platform:\n t = ('\\nShould platform agent autostart(Y/N)? [Y] ', y_or_n, 'Y')\n do_platform_autostart = prompt_response(t) in y\n\n external_uri = \"tcp://{}:{}\".format(external_ip, vip_port)\n bind_web_address = \"http://{}:{}\".format(external_ip,\n external_port)\n try:\n vc_web_address = \"http://{}:{}\".format(vc_ipaddress, vc_port)\n _make_configuration(external_uri, bind_web_address,\n vc_web_address)\n\n # if vc_ipaddres isn't defined\n # only happens on volttron central.\n except UnboundLocalError:\n _make_configuration(external_uri, bind_web_address)\n\n t = ('\\nShould install sqlite platform historian? [N]', y_or_n, n)\n do_install_platform_historian = prompt_response(t) in y\n\n do_historian_autostart = True\n if do_install_platform_historian:\n t = ('\\nShould historian agent autostart(Y/N)? [Y] ', y_or_n, 'Y')\n do_historian_autostart = prompt_response(t) in y\n\n # in order to install agents we need to start the platform.\n _start_platform()\n _install_agents((do_install_vc, do_vc_autostart),\n (do_install_platform, do_platform_autostart),\n (do_install_platform_historian,\n do_historian_autostart))\n _shutdown_platform()\n print('Finished configuration\\n')\n print('You can now start you volttron instance.\\n')\n print('If you need to change the instance configuration you can edit')\n print('the config file at {}/{}\\n'.format(volttron_home, 'config'))", "def mainSetup():\n setupGlobals()\n setupCallbacks()", "def setup(self, **KARGS):\n for attr in KARGS.keys():\n self.__setattr__(attr, KARGS[attr])\n if getattr(self, 'device', ''): zram_setup(**self.__dict__)\n if getattr(self, 'prefix', ''): tmpdir_setup(**self.__dict__)", "def setup_vagrant():\n require('hosts', provided_by=[vagrant]) # Sets the environment for Fabric\n sub_add_repos()\n sub_install_packages()\n sub_install_shiny()", "def setup(self):\n # Write sensitive variables from the environment to specified files.\n # NOTE: Both the grid cert and the grid key are necessary for using the grid, EOS, etc!\n self.writeGridCertFromVariableToFile()\n self.writeGridKeyFromVariableToFile()\n self.writeSSHKeyFromVariableToFile()\n\n logger.debug(\"Setting up environment variables.\")\n # Setup environment\n self.setupRoot()\n self.setupReceiverPath()\n self.setupEnvironmentVars()", "def _do_custom_setup(self):\n self._create_handle(\n hostname=self.configuration.ixsystems_server_hostname,\n port=self.configuration.ixsystems_server_port,\n login=self.configuration.ixsystems_login,\n password=self.configuration.ixsystems_password,\n apikey=self.configuration.ixsystems_apikey,\n api_version=self.configuration.ixsystems_api_version,\n transport_type=self.configuration.ixsystems_transport_type)\n\n if not self.handle:\n raise FreeNASApiError(\n \"Failed to create handle for FREENAS server\")", "def _setup_device_and_fuzzer(self):\n # These environment variables are set when start_qemu is run.\n # We need them in order to ssh / otherwise communicate with the VM.\n fuchsia_pkey_path = environment.get_value('FUCHSIA_PKEY_PATH')\n fuchsia_portnum = environment.get_value('FUCHSIA_PORTNUM')\n fuchsia_resources_dir = environment.get_value('FUCHSIA_RESOURCES_DIR')\n if (not fuchsia_pkey_path or not fuchsia_portnum or\n not fuchsia_resources_dir):\n raise fuchsia.errors.FuchsiaConfigError(\n ('FUCHSIA_PKEY_PATH, FUCHSIA_PORTNUM, or FUCHSIA_RESOURCES_DIR was '\n 'not set'))\n\n # Fuzzer objects communicate with the VM via a Device object,\n # which we set up here.\n fuchsia_resources_dir_plus_build = os.path.join(fuchsia_resources_dir,\n self.FUCHSIA_BUILD_REL_PATH)\n self.host = Host.from_dir(fuchsia_resources_dir_plus_build)\n self.device = Device(self.host, 'localhost', fuchsia_portnum)\n self.device.set_ssh_option('StrictHostKeyChecking no')\n self.device.set_ssh_option('UserKnownHostsFile=/dev/null')\n self.device.set_ssh_identity(fuchsia_pkey_path)\n\n # Fuchsia fuzzer names have the format {package_name}/{binary_name}.\n package, target = self.executable_path.split('/')\n test_data_dir = os.path.join(fuchsia_resources_dir_plus_build,\n self.FUZZER_TEST_DATA_REL_PATH, package,\n target)\n\n # Finally, we set up the Fuzzer object itself, which will run our fuzzer!\n sanitizer = environment.get_memory_tool_name(\n environment.get_value('JOB_NAME')).lower()\n self.fuzzer = Fuzzer(\n self.device,\n package,\n target,\n output=test_data_dir,\n foreground=True,\n sanitizer=sanitizer)", "def __init__(self, svariant=''):\r\n # Call the Super Class Constructor\r\n GnuArmDevice.__init__(self, 'LPC11U24')\r\n\r\n # Load the Specifics\r\n self.svariant = svariant\r\n self.defines = ['TARGET_LPC11U24', 'TOOLCHAIN_GCC_ARM', '__CORTEX_M0']\r\n self.libs = ['gcc', 'c', 'nosys']\r\n self.ldfile = 'LPC11U24.ld'\r\n self.ldpath = 'LPC11U24'\r\n self.package = 'armld/LPC11U24.7z'\r\n self.flash = 32\r\n self.ram = 8\r\n self.eeprom = 4\r\n self.arch = CPU_CORTEXM0", "def VISA_init():\n return hv_base.VISA_init()", "def init(verbose):\n\n\tif verbose:\n\t\tlog.basicConfig(format=\"%(levelname)s: %(message)s\", level=log.DEBUG)\n\telse:\n\t\tlog.basicConfig(format=\"%(levelname)s: %(message)s\")\n\n\tlog.info(\"Initializing SmartSpa subsystems.\")\n\n\tglobal real_time_config\n\treal_time_config = Config(\"real_time\")\n\n\tinit_sensing()\n\tinit_hardware()\n\tinit_control()\n\tinit_system()\n\tinit_ui()\n\tinit_db()", "def setup_system():\r\n # Install pre-requisites with package manager\r\n install_requirements()\r\n # Create user and target paths as necessary\r\n create_app_context()\r\n # Validate that requisites are in place\r\n check_system()", "def sixteen_ch_setup(self):\n\n print \"Setting up for generic 16 channel readout...\\n\"\n # initialize edt interface\n InitFile = eolib.getCfgVal(self.CfgFile,\"INIT_FILE\")\n if not self.CheckIfFileExists(InitFile):\n print \"Init File not found. Exiting sixteen channel setup\"\n return\n #self.runcmd([\"initrcx0\"]) # This script just does the following:\n self.runcmd([self.EDTdir+\"/initcam\", \"-u\", \"0\", \"-c\", \"0\", \"-f\", InitFile]) \n\n self.runcmd([self.edtsaodir+\"/crst\"]) # Camera reset\n # Turn off the greyscale generator\n print \"Turning greyscale generator off\\n\"\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"30400000\"]) # ad board #1 gray scale off\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"31400000\"]) # ad board #2 gray scale off\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"32400000\"]) # ad board #3 gray scale off\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"33400000\"]) # ad board #4 gray scale off\n\n # Set the system gain to high\n # Note that this gets over-ridden in ccd_setup.\n self.gain(\"HIGH\")\n\n # Set unidirectional mode\n print \"Setting unidirectional CCD serial shift mode\\n\"\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"43000001\"]) # uni on\n\n # Set split mode on. \"Why on?\" you ask. Beats me.\n print \"Setting CCD serial register shifts to split mode\\n\"\n self.runcmd([self.edtsaodir+\"/edtwriten\", \"-c\", \"41000001\"]) # split on \n\n self.ccd_channels()\n\n print \"Setting default ADC offsets\\n\"\n\n self.ccd_offsets()\n self.Check_Communications()\n print \"16ch_setup Done.\\n\"\n self.master.update()\n return", "def __init__(self, svariant=''):\r\n # Call the Super Class Constructor\r\n GnuArmDevice.__init__(self, 'LPC11U68')\r\n\r\n # Load the Specifics\r\n self.svariant = svariant\r\n self.defines = ['TARGET_LPC11U68', 'TOOLCHAIN_GCC_ARM', '__CORTEX_M0_PLUS']\r\n self.libs = ['gcc', 'c', 'nosys']\r\n self.ldfile = 'LPC11U24.ld'\r\n self.ldpath = 'LPC11U24'\r\n self.package = 'armld/LPC11U24.7z'\r\n self.flash = 256\r\n self.ram = 32\r\n self.eeprom = 4\r\n self.arch = CPU_CORTEXM0PLUS", "def init_s1ap_tester(self):\n # config ip first, because cloud tests will restart gateway\n self.configIpBlock()\n\n self._s1setup()\n self._configUEApp()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shutdown Vera subscriptions and subscription thread on exit.
def stop_subscription(event): _LOGGER.info("Shutting down subscriptions.") VERA_CONTROLLER.stop()
[ "async def cleanup(self) -> None:\n for pf in self._scheduled_functions.values():\n pf.stop()\n for t in self._in_stream.values():\n t.cancel()\n if self._cb_app_heartbeat:\n self._cb_app_heartbeat.stop()\n if self.name and self._red is not None:\n # Unregister app from redis.\n await self._red.hdel(REDIS_KEYS.APPS.value, self._app_rkey) # type: ignore\n if self._red is not None:\n await self._red.close()\n if self._red_sub is not None:\n await self._red_sub.close()", "def atexit(self):\n self.stop_listen()\n for driver, _ in self.drivers:\n if hasattr(driver, \"atexit\"):\n driver.atexit()\n try:\n self.listener_thread.join()\n except AttributeError:\n pass", "def stop_wemo(event):\n _LOGGER.info(\"Shutting down subscriptions.\")\n SUBSCRIPTION_REGISTRY.stop()", "def shutdown(self):\n\n self.logger.info(\"Shutting down %s\"%__name__)\n\n try:\n self.logger.info(\"Closing websocket\")\n self.websocketClient.close()\n except Exception as e:\n self.logger.error(\"Websocket close error : %s \" %e)\n\n self.alive = False\n \n self.threadProcessQueue.join()\n\n time.sleep(1)\n self.exit = True", "def shutdown(self):\n\n self.registered.clear()\n self.ultros = None", "def _shutdown(self, *args):\r\n\t\tfor callback in self.onShutdown:\r\n\t\t\ttry:\r\n\t\t\t\tcallback()\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\t\tsys.exit()", "def unsubscribe(self) -> None:\n if self.cdata is None:\n return\n if self.asyncio_register and self.fd != -1:\n self.loop.remove_reader(self.fd)\n try:\n check_call(lib.sr_unsubscribe, self.cdata)\n finally:\n self.cdata = None\n for t in list(self.tasks.values()):\n t.cancel()\n self.tasks.clear()", "def Destroy(self):\n info('Shutting down client')\n self.exiting.set()\n if hasattr(self, 'sensorsClient'):\n self.sensorsClient.StopMonitoring()\n if hasattr(self, 'schedulerEngine'):\n self.schedulerEngine.stop()\n if hasattr(self, 'updater'):\n self.updater.stop()\n if hasattr(self, 'writerThread'):\n self.writerThread.stop()\n if hasattr(self, 'processorThread'):\n self.processorThread.stop()\n ThreadPool.Shutdown()\n self.Disconnect()\n info('Client shut down')", "def stop(self):\n if self.run_matrx_api:\n if self.verbose:\n print(\"Shutting down Matrx api\")\n _ = requests.get(\"http://localhost:\" + str(api._port)\n + \"/shutdown_API\")\n self.api_info[\"api_thread\"].join()\n\n if self.run_matrx_visualizer:\n if self.verbose:\n print(\"Shutting down Matrx visualizer\")\n _ = requests.get(\"http://localhost:\"\n + str(visualization_server.port)\n + \"/shutdown_visualizer\")\n self.matrx_visualizer_thread.join()", "def cleanup() -> None:\n\n global _broadcaster\n _broadcaster = None", "def _destroy_event_subscriber(self, sub):\n #self.remove_endpoint(sub) -- why this is making tests fail?\n # TODO determine whether self.remove_endpoint is the appropriate call\n # here and if so, how it should be used. For now, calling sub.close()\n # (this only change made the difference between successful tests and\n # failing tests that actually never exited -- I had to kill them).\n sub.close()", "def close(self):\n PubSubManager.remove_subscriber(self)", "def shutdownHandler():\n closeall()\n zLOG.LOG('Z2', zLOG.INFO , \"Shutting down\")\n sys.exit(0)", "def stopSubscription(self) -> None:\n ...", "async def unsubscribe_all(self):\n while self.subscriptions:\n name, subscription = self.subscriptions.popitem()\n await subscription.force_unsubscribe()", "def _exitHandler():\r\n _activeScheduler.killAll()", "def collectd_shutdown(self):\n self.server.stop()", "def stop(self):\n # self.stop_watches()\n logging.debug(\"Sending stop signal to etcd watcher thread\")\n self.keep_running = False\n self.observer_thread.join()\n logging.info(\"Romana watcher plugin: Stopped\")", "async def stop(self):\n # pylint: disable=protected-access\n # Workaround until PR is accepted:\n # https://github.com/pubnub/python/pull/99\n # self._pubnub.stop()\n await self._pubnub._session.close()\n if self._pubnub._subscription_manager is not None:\n self._pubnub._subscription_manager.stop()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
IFieldWidget factory for SelectWidget.
def CollectionChoiceSelectFieldWidget(field, value_type, request): return SelectFieldWidget(field, None, request)
[ "def _createWidget(context, field, viewType, request):\n field = field.bind(context)\n return component.getMultiAdapter((field, request), viewType)", "def new_default_widget(self):\n if self.choices is not None:\n widget = QtWidgets.QComboBox()\n elif self.dtype in [int, float]:\n if self.si:\n widget = pg.SpinBox()\n else:\n widget = QtWidgets.QDoubleSpinBox()\n elif self.dtype in [bool]:\n widget = QtWidgets.QCheckBox() \n elif self.dtype in [str]:\n widget = QtWidgets.QLineEdit()\n self.connect_to_widget(widget)\n \n return widget", "def get_select_renderer():\n return RenderFieldValue(\"select\",\n view_renderer=Select_view_renderer(view_select),\n edit_renderer=Select_edit_renderer(edit_select),\n )", "def choiceWidget(field):\n label = field.verbose_name\n\n choices = []\n choices.append(('', label))\n for choice in field.choices:\n choices.append((str(choice), unicode(choice)))\n return widgets.Select(choices=choices)", "def choiceWidget(field):\n choices = _generateChoices(field)\n label = field.verbose_name\n choices = [('', label)] + choices\n return forms.Select(choices=choices)", "def __init__(self, attrs=None):\n# if attrs:\n# if 'choices' in attrs:\n# lang_choices=choices\n# else:\n# lang_choices = [\"Gherkin\", \"Tomato\" ] \n print \"in LanguageFieldsWidget.__init__()...\"\n lang_choices = (\n \n ('eggs', 'Eggs'),\n ('tomato', 'Tomato'),\n ('gherkin', 'Gherkin'),\n \n ) \n \n widgets = (\n forms.Select(attrs={'widget_name': 'language_name'}, choices=(lang_choices)), \n forms.Select(attrs={'widget_name': 'language_usage'}, choices=(UserLanguage.LANGUAGE_USE_CHOICES)),\n forms.Select(attrs={'widget_name': 'language_preferred'}, choices=(UserLanguage.LANGUAGE_PREFERENCE_CHOICES))\n )\n super(LanguageFieldsWidget, self).__init__(widgets, attrs)", "def BlockDataGridFieldObjectFactory(field, request):\n\n # Create a normal DataGridFieldObject widget\n widget = FieldWidget(field, BlockDataGridFieldObject(request))\n return widget", "def create_widget(self):\n self.widget = wx.ComboBox(self.parent_widget(), style=wx.CB_READONLY)", "def get_form(self, request, obj=None, **kwargs):\n form = super(ProjectAdmin, self).get_form(request, obj, **kwargs)\n form.base_fields[\"client\"] = df.ModelChoiceField(\n queryset=Client.objects.all(),\n widget=df.FilteringSelect(attrs={'queryExpr': '${0}*'}),\n empty_label='', required=False)\n return form", "def from_feat(cls, feat, parent=None):\n\n _get = cls._WRAPPERS.get\n\n if feat.values:\n if isinstance(feat.values, dict):\n tmp = set(feat.values.keys())\n else:\n tmp = set(feat.values)\n\n if tmp == {True, False}:\n widget = _get(QtGui.QCheckBox)\n else:\n widget = _get(QtGui.QComboBox)\n elif not feat.units is None or feat.limits:\n widget = _get(QtGui.QDoubleSpinBox)\n else:\n widget= _get(QtGui.QLineEdit)\n\n widget = widget(parent)\n cls.wrap(widget)\n\n return widget", "def createWidget( self, parent ):\n widget = self.widgetClass()(parent, self.uiFile())\n widget.setPlugin(self)\n return widget", "def get_add_field_selector(field=''):\n if field not in settings.multiple_choice_fields:\n return ''\n tagSelection = settings.multiple_choice_fields[field]\n return render_template('modals/select_gramm.html', tag_table=tagSelection)", "def get_choice_renderer():\n return RenderFieldValue(\"choice\",\n view_renderer=Select_view_renderer(view_choice),\n edit_renderer=Select_edit_renderer(edit_choice),\n )", "def option_widget(self, ):\n pass", "def formfield_for_choice_field(self, db_field, request, **kwargs):\n # If the field is named as a radio_field, use a RadioSelect\n if db_field.name in self.radio_fields:\n # Avoid stomping on custom widget/choices arguments.\n if \"widget\" not in kwargs:\n\n # BEGIN CUSTOMIZATION\n kwargs[\"widget\"] = SemanticRadioSelect(\n attrs={\n \"class\": get_ul_class(self.radio_fields[db_field.name]),\n }\n )\n # END CUSTOMIZATION\n\n if \"choices\" not in kwargs:\n kwargs[\"choices\"] = db_field.get_choices(\n include_blank=db_field.blank, blank_choice=[(\"\", _(\"None\"))]\n )\n\n # BEGIN CUSTOMIZATION\n if \"widget\" not in kwargs:\n kwargs[\"widget\"] = SemanticSelect()\n # END CUSTOMIZATION\n\n return db_field.formfield(**kwargs)", "def formfield_for_foreignkey(self, db_field, request, **kwargs):\n db = kwargs.get(\"using\")\n\n if db_field.name in self.get_autocomplete_fields(request):\n # BEGIN CUSTOMIZATION\n\n kwargs[\"widget\"] = SemanticAutocompleteSelect(\n db_field, self.admin_site, using=db\n )\n\n # END CUSTOMIZATION\n\n elif db_field.name in self.raw_id_fields:\n # TODO\n kwargs[\"widget\"] = widgets.ForeignKeyRawIdWidget(\n db_field.remote_field, self.admin_site, using=db\n )\n elif db_field.name in self.radio_fields:\n kwargs[\"widget\"] = SemanticRadioSelect(\n attrs={\"class\": get_ul_class(self.radio_fields[db_field.name])}\n )\n kwargs[\"empty_label\"] = _(\"None\") if db_field.blank else None\n\n if \"queryset\" not in kwargs:\n queryset = self.get_field_queryset(db, db_field, request)\n if queryset is not None:\n kwargs[\"queryset\"] = queryset\n\n # BEGIN CUSTOMIZATION\n if \"widget\" not in kwargs:\n kwargs[\"widget\"] = SemanticSelect()\n # END CUSTOMIZATION\n\n return db_field.formfield(**kwargs)", "def mailchimp_grouping_factory(self, grouping):\r\n field_type = grouping.get('form_field', None)\r\n name = grouping.get('name', None)\r\n groups = grouping.get('groups', [])\r\n choices = ((x['name'], x['name']) for x in groups)\r\n kwargs = {'label': name, 'choices': choices, 'required': False}\r\n\r\n if field_type == 'checkboxes':\r\n kwargs.update({'widget': forms.CheckboxSelectMultiple})\r\n return forms.MultipleChoiceField(**kwargs)\r\n\r\n if field_type == 'radio':\r\n kwargs.update({'widget': forms.RadioSelect})\r\n return forms.ChoiceField(**kwargs)\r\n\r\n if field_type == 'dropdown':\r\n kwargs.update({'widget': forms.Select})\r\n return forms.ChoiceField(**kwargs)\r\n\r\n if field_type == 'hidden':\r\n kwargs.update({'widget': forms.HiddenInput})\r\n return forms.ChoiceField(**kwargs)", "def make_choose_control(field_name,\n included_label,\n included_items,\n excluded_label,\n excluded_items,\n item_to_text=str,\n item_to_value=str,\n ordered=0):\n \n # We'll construct an array of buttons. Each element is an HTML\n # input control.\n buttons = []\n # Construct the encoding for the items initially included.\n initial_value = string.join(map(item_to_value, included_items), \",\")\n # The hidden control that will contain the encoded representation of\n # the included items.\n hidden_control = '<input type=\"hidden\" name=\"%s\" value=\"%s\">' \\\n % (field_name, initial_value)\n # Construct names for the two select controls.\n included_select_name = \"_inc_\" + field_name\n excluded_select_name = \"_exc_\" + field_name\n\n # The select control for included items. When the user selects an\n # item in this list, deselect the selected item in the excluded\n # list, if any.\n included_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (included_select_name, excluded_select_name)\n # Build options for items initially selected.\n for item in included_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n included_select = included_select + option\n included_select = included_select + '</select>\\n'\n\n # The select control for excluded items. When the user selects an\n # item in this list, deselect the selected item in the included\n # list, if any.\n excluded_select = '''\n <select name=\"%s\"\n width=\"160\"\n size=\"8\"\n onchange=\"document.form.%s.selectedIndex = -1;\">''' \\\n % (excluded_select_name, included_select_name)\n # Build options for items initially excluded.\n for item in excluded_items:\n option = '<option value=\"%s\">%s</option>\\n' \\\n % (item_to_value(item), item_to_text(item))\n excluded_select = excluded_select + option\n excluded_select = excluded_select + '</select>\\n'\n\n # The Add button.\n button = '''\n <input type=\"button\"\n value=\" << Add \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />\n ''' % (excluded_select_name, included_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n # The Remove button.\n button = '''\n &nbsp;<input\n type=\"button\"\n value=\" Remove >> \"\n onclick=\"move_option(document.form.%s, document.form.%s);\n document.form.%s.value =\n encode_select_options(document.form.%s);\" />&nbsp;\n ''' % (included_select_name, excluded_select_name,\n field_name, included_select_name)\n buttons.append(button)\n\n if ordered:\n # The Move Up button.\n button = '''\n <input type=\"button\"\n value=\" Move Up \"\n onclick=\"swap_option(document.form.%s, -1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n\n buttons.append(button)\n\n # The Move Down button.\n button = '''\n <input type=\"button\"\n value=\" Move Down \"\n onclick=\"swap_option(document.form.%s, 1);\n document.form.%s.value =\n encode_select_options(document.form.%s);\"/>\n ''' % (included_select_name, field_name, included_select_name)\n buttons.append(button)\n\n # Arrange everything properly.\n buttons = string.join(buttons, \"\\n<br />\\n\")\n return '''\n %(hidden_control)s\n <table border=\"0\" cellpadding=\"0\" cellspacing=\"0\">\n <tr valign=\"center\">\n <td>\n %(included_label)s:\n <br />\n %(included_select)s\n </td>\n <td align=\"center\">\n %(buttons)s\n </td>\n <td>\n %(excluded_label)s:<br />\n %(excluded_select)s\n </td>\n </tr>\n </table>\n ''' % locals()", "def create_widget(self):\n self.widget = UIControl()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test parsing and streaming session response KNX/IP packet.
def test_session_response(self): public_key = bytes.fromhex( "bd f0 99 90 99 23 14 3e" # Diffie-Hellman Server Public Value Y "f0 a5 de 0b 3b e3 68 7b" "c5 bd 3c f5 f9 e6 f9 01" "69 9c d8 70 ec 1f f8 24" ) message_authentication_code = bytes.fromhex( "a9 22 50 5a aa 43 61 63" # Message Authentication Code "57 0b d5 49 4c 2d f2 a3" ) raw = ( bytes.fromhex( "06 10 09 52 00 38" # KNXnet/IP header "00 01" # Secure Session Identifier ) + public_key + message_authentication_code ) knxipframe, _ = KNXIPFrame.from_knx(raw) assert isinstance(knxipframe.body, SessionResponse) assert knxipframe.body.secure_session_id == 1 assert knxipframe.body.ecdh_server_public_key == public_key assert ( knxipframe.body.message_authentication_code == message_authentication_code ) assert knxipframe.to_knx() == raw session_response = SessionResponse( secure_session_id=1, ecdh_server_public_key=public_key, message_authentication_code=message_authentication_code, ) knxipframe2 = KNXIPFrame.init_from_body(session_response) assert knxipframe2.to_knx() == raw
[ "def test_connect_response(self):\n raw = (\n 0x06,\n 0x10,\n 0x02,\n 0x06,\n 0x00,\n 0x14,\n 0x01,\n 0x00,\n 0x08,\n 0x01,\n 0xC0,\n 0xA8,\n 0x2A,\n 0x0A,\n 0x0E,\n 0x57,\n 0x04,\n 0x04,\n 0x11,\n 0xFF,\n )\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n knxipframe.from_knx(raw)\n self.assertTrue(isinstance(knxipframe.body, ConnectResponse))\n self.assertEqual(knxipframe.body.communication_channel, 1)\n self.assertEqual(knxipframe.body.status_code, ErrorCode.E_NO_ERROR)\n self.assertEqual(\n knxipframe.body.control_endpoint, HPAI(ip_addr=\"192.168.42.10\", port=3671)\n )\n self.assertEqual(\n knxipframe.body.request_type, ConnectRequestType.TUNNEL_CONNECTION\n )\n self.assertEqual(knxipframe.body.identifier, 4607)\n\n knxipframe2 = KNXIPFrame(xknx)\n knxipframe2.init(KNXIPServiceType.CONNECT_RESPONSE)\n knxipframe2.status_code = ErrorCode.E_NO_ERROR\n knxipframe2.body.communication_channel = 1\n knxipframe2.body.request_type = ConnectRequestType.TUNNEL_CONNECTION\n knxipframe2.body.control_endpoint = HPAI(ip_addr=\"192.168.42.10\", port=3671)\n knxipframe2.body.identifier = 4607\n knxipframe2.normalize()\n\n self.assertEqual(knxipframe2.to_knx(), list(raw))", "def read_response(self, read_packet):\n pass", "def test_icmpv6_ping_controller(self):\n echo_replies = self.rcv_packet(\n 2,\n 0x200,\n {\n \"eth_src\": self.P2_V200_MAC,\n \"eth_dst\": FAUCET_MAC,\n \"vid\": 0x200,\n \"ipv6_src\": \"fc00::1:1\",\n \"ipv6_dst\": \"fc00::1:254\",\n \"echo_request_data\": self.ICMP_PAYLOAD,\n },\n )[self.DP_ID]\n packet_outs = ValveTestBases.packet_outs_from_flows(echo_replies)\n self.assertTrue(packet_outs)\n data = packet_outs[0].data\n self.assertTrue(data.endswith(self.ICMP_PAYLOAD), msg=data)", "def test_is_remote_response_parsed_as_io(self):\n # Build IO data\n # One sample, ADC 0 enabled\n # DIO 1,3,5,7 enabled\n header = b'\\x01\\x02\\xAA'\n\n # First 7 bits ignored, DIO8 low, DIO 0-7 alternating\n # ADC0 value of 255\n sample = b'\\x00\\xAA\\x00\\xFF'\n data = header + sample\n\n device = Serial()\n device.set_read_data(APIFrame(\n data=b'\\x97D\\x00\\x13\\xa2\\x00@oG\\xe4v\\x1aIS\\x00' + data).output()\n )\n\n xbee = XBee(device, io_loop=self._patch_io)\n\n xbee._process_input(None, None)\n info = yield xbee.wait_read_frame()\n expected_info = {'id': 'remote_at_response',\n 'frame_id': b'D',\n 'source_addr_long': b'\\x00\\x13\\xa2\\x00@oG\\xe4',\n 'source_addr': b'v\\x1a',\n 'command': b'IS',\n 'status': b'\\x00',\n 'parameter': [{'dio-1': True,\n 'dio-3': True,\n 'dio-5': True,\n 'dio-7': True,\n 'adc-0': 255}]}\n self.assertEqual(info, expected_info)", "def test_parsing_too_short_knxip(self):\n raw = bytes.fromhex(\"06 10 02 07 00 10 15 00 08 01 C0 A8 C8 0C C3\")\n with pytest.raises(IncompleteKNXIPFrame):\n KNXIPFrame.from_knx(raw)", "def test_split_short_at_response(self):\n\n data = b'\\x88DMY\\x01'\n info = self.xbee._split_response(data)\n expected_info = {'id': 'at_response',\n 'frame_id': b'D',\n 'command': b'MY',\n 'status': b'\\x01'}\n self.assertEqual(info, expected_info)", "def test_ip_input_icmp_reply(self):\n #\n # hop limit - ICMP replies\n #\n p_version = (\n Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6, hlim=1)\n / inet6.UDP(sport=1234, dport=1234)\n / Raw(b\"\\xa5\" * 100)\n )\n\n rxs = self.send_and_expect_some(self.pg0, p_version * NUM_PKTS, self.pg0)\n\n for rx in rxs:\n icmp = rx[ICMPv6TimeExceeded]\n # 0: \"hop limit exceeded in transit\",\n self.assertEqual((icmp.type, icmp.code), (3, 0))", "def test_connect_response_connection_error(self):\n raw = (0x06, 0x10, 0x02, 0x06, 0x00, 0x08, 0x00, 0x24)\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n knxipframe.from_knx(raw)\n self.assertTrue(isinstance(knxipframe.body, ConnectResponse))\n self.assertEqual(knxipframe.body.status_code, ErrorCode.E_NO_MORE_CONNECTIONS)\n self.assertEqual(knxipframe.body.communication_channel, 0)\n\n knxipframe2 = KNXIPFrame(xknx)\n knxipframe2.init(KNXIPServiceType.CONNECT_RESPONSE)\n knxipframe2.body.status_code = ErrorCode.E_NO_MORE_CONNECTIONS\n knxipframe2.normalize()\n\n self.assertEqual(knxipframe2.to_knx(), list(raw))", "def test_04_read_server_parsed(self):\n self.fake_sfile.set_reply_buf('x\\037y\\036\\r\\na\\037b\\037c\\036\\r\\n.\\r\\n')\n out = self.conn._read_server(True)\n self.assertEquals(len(out), 2)\n self.assertEquals(len(out[0]), 2)\n self.assertEquals(len(out[1]), 3)\n self.assertEquals(out[1][1], 'b')", "def run_test_ok():\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n cseq = 1\n session = None\n\n try:\n s.connect((host,port))\n except socket.error, (value,message):\n if s:\n s.close()\n print \"Could not open socket: \" + message\n sys.exit(-1)\n\n for y in range(msg_num):\n s.send(msg[y].format(cseq,session))\n cseq = cseq + 1\n data = s.recv(buffSize)\n\n if y == 0: print \"\"\n print \"Sending:\", msg_sem[y]\n print '\\n', data[:len(data)-3]\n\n if not session:\n session = parse_session_id(data)\n if session:\n print \"\\n>>> Parsed session ID:\", session\n\n print \"*\"*80\n if y == msg_num - 1: print \"\"\n\n s.close()", "def test_verify_session_count(self):\n self.cgn.get_xml_output = MagicMock()\n xml_tree = self.xml.xml_string_to_dict(xml_str=self.response[\"VERIFY_SESSION\"])\n xml_tree = xml_tree['rpc-reply']\n _xpath = 'usf-session-count-information/usf-session-count'\n for path in _xpath.split('/'):\n xml_tree = xml_tree[path]\n self.cgn.get_xml_output.return_value = xml_tree\n self.cgn.verify_session_count(application_dict ={'udp':'1'})\n self.cgn.fn_checkout.assert_called_with(False)", "def parse_packet(self, data):\n if data.find(self.packet_prefix) != 0:\n raise Exception('Malformed packet')\n\n first_line_length = data.find(b'\\n')\n if first_line_length == -1:\n raise Exception('Malformed packet')\n\n response_type = data[len(self.packet_prefix):first_line_length].decode()\n response_data = data[first_line_length + 1:].decode()\n return response_type, response_data", "def is_probe_response(data):\n\treturn len(data) == 390 and ord(data[25]) == 0x50 and ord(data[26]) in [0x00, 0x08]", "def test_12_get_reply_information_single(self):\n self.fake_sfile.set_reply_buf('350 Foo Info\\r\\n')\n self.assertEquals(self.conn._get_reply(False), ['Foo Info'])", "def test_next_header_anomaly(self):\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6, nh=44)\n / ICMPv6EchoRequest()\n )\n\n self.pg0.add_stream(pkt)\n self.pg_start()\n\n # wait for reassembly\n self.sleep(10)", "def recvPacket(self, packet):\n # log and mote stats\n self.log(\n SimEngine.SimLog.LOG_APP_RX,\n {\n u'_mote_id': self.mote.id,\n u'packet' : packet\n }\n )", "def test_is_response_parsed_as_io(self):\n # Build IO data\n # One sample, ADC 0 enabled\n # DIO 1,3,5,7 enabled\n header = b'\\x01\\x02\\xAA'\n\n # First 7 bits ignored, DIO8 low, DIO 0-7 alternating\n # ADC0 value of 255\n sample = b'\\x00\\xAA\\x00\\xFF'\n data = header + sample\n\n device = Serial()\n device.set_read_data(APIFrame(data=b'\\x88DIS\\x00' + data).output())\n xbee = XBee(device, io_loop=self._patch_io)\n\n xbee._process_input(None, None)\n info = yield xbee.wait_read_frame()\n expected_info = {'id': 'at_response',\n 'frame_id': b'D',\n 'command': b'IS',\n 'status': b'\\x00',\n 'parameter': [{'dio-1': True,\n 'dio-3': True,\n 'dio-5': True,\n 'dio-7': True,\n 'adc-0': 255}]}\n self.assertEqual(info, expected_info)", "def _process_session(self):\n try:\n for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True):\n if chunk_type == ChunkType.STDOUT:\n self._write_flush(self._stdout, payload)\n elif chunk_type == ChunkType.STDERR:\n self._write_flush(self._stderr, payload)\n elif chunk_type == ChunkType.EXIT:\n self._write_flush(self._stdout)\n self._write_flush(self._stderr)\n return int(payload)\n elif chunk_type == ChunkType.PID:\n self.remote_pid = int(payload)\n elif chunk_type == ChunkType.START_READING_INPUT:\n self._maybe_start_input_writer()\n else:\n raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload))\n finally:\n # Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in\n # NailgunProtocol.iter_chunks(). This ensures the NailgunStreamWriter is always stopped.\n self._maybe_stop_input_writer()", "def incoming_reply(pkt):\n return pkt[ARP].psrc != str(get_if_addr(conf.iface)) and pkt[ARP].op == 2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
gets a random node from nodes list if fixed is set then the node at the nodes[fixed] will be returned, this may be useful in stress tests
def random_node(self): rnd = random.randint(0, len(self.clients_lst)-1) if not self.fixed_node else self.fixed_node pod_ip, pod_name = self.clients_lst[rnd]['pod_ip'], self.clients_lst[rnd]['name'] if not self.fixed_node: print("randomly ", end="") print(f"selected pod: ip = {pod_ip}, name = {pod_name}") return pod_ip, pod_name
[ "def getRandomNode(self, node_type=0):\n\n lists = (self.nodes_list, self.nodes_leaf, self.nodes_branch)\n cho = lists[node_type]\n if len(cho) <= 0:\n return None\n\n return prng.choice(cho)", "def pickNode(self,xrand):\n #setup nearest neighbor filters\n filters = [lambda pt,n: self.prune(n)]\n if self.dynamicDomain:\n filters.append(lambda pt,n:hasattr(n,'ddRadius') and self.metric(n.x,xrand) >= n.ddRadius)\n if self.successBiasing:\n filters.append(lambda pt,n: (random.random() > float(n.numExpansionsSuccessful+1) / float(n.numExpansionsAttempted+1)))\n #do the lookup\n res = self.nearestNeighbors.nearest(xrand,lambda pt,n:any(f(pt,n) for f in filters))\n if res == None: return None\n n = res[1]\n return n", "def sample_random_node(self):\n #Naive Approach \n return self.tree[int(self.rng.random()*len(self.tree))] # OUT OF BOUNDS ERRORS? Check this", "def getRandom(self):\n res = 0\n count = 1\n node = self.h\n while node:\n r = random.randint(1, count)\n if r == 1:\n res = node.val\n node = node.next\n count += 1\n return res", "def generate_random_node(self):\n if np.random.random_sample() > self.goal_sample_rate:\n x = np.random.uniform(self.sample_space.x_min, self.sample_space.x_max)\n y = np.random.uniform(self.sample_space.y_min, self.sample_space.y_max)\n z = np.random.uniform(self.sample_space.z_min, self.sample_space.z_max)\n node = Node(np.array((x, y, z)))\n else:\n node = self.goal_node\n\n return node", "def pickNode(self):\n\treturn sample_weighted([1.0/n.density for n in self.nodes],self.nodes)", "def chooseNode(possible_nodes, title, message): \n return hou.Node()", "def pick_nodes(self):\n if self.nodes == []:\n return []\n return self.nodes\n # return sample(self.nodes,1)", "def randomcell(grid, checkfree = True):\n if checkfree:\n lst = [i for i in range(9) if grid[i] == None]\n p = random.choice(lst)\n else:\n p = random.choice(grid)\n return p", "def getRandom(self, items=None):\n if items == None:\n items = self.organisms\n \n nitems = len(items)\n n2items = nitems * nitems\n \n # pick one parent randomly, favouring fittest\n idx = int(sqrt(randrange(n2items)))\n return items[nitems - idx - 1]", "def copy_random_list(head):\n\n if not head:\n return None\n\n node_dict = {}\n node = head\n while node:\n node_dict[id(node)] = RandomListNode(node.label)\n node = node.next\n\n node = head\n while node:\n if node.next:\n node_dict[id(node)].next = node_dict[id(node.next)]\n if node.random:\n node_dict[id(node)].random = node_dict[id(node.random)]\n node = node.next*<F19>`\n \n return node_dict[id(head)]", "def get_cheapest_nodes(self, num_nodes):\n with db_rlock:\n\n # retrieve sorted node list of \"up\" ips\n rows = []\n count = 0\n cheapest_price = 0\n for row in self.cursor.execute(\"SELECT * FROM nodes WHERE up=1 ORDER BY price ASC\"):\n\n # Check to see if we are over the requested count\n count = count + 1\n if count > num_nodes:\n # If the next node we are looking at has a higher price, then we can bail.\n # Otherwize we will add it to the list for consideration.\n if int(row[2]) > cheapest_price:\n break\n\n obj = {\n 'ip': row[0],\n 'up': row[1] > 0,\n 'price': int(row[2]),\n 'url': row[3],\n }\n rows.append(obj)\n cheapest_price = int(row[2])\n\n # Return a random subset of the cheapest nodes (count of num_nodes)\n return random.sample(rows, num_nodes)", "def random_nodes(G, n, prng):\n\tgraph_nodes = list(G.nodes)\n\tif n > len(graph_nodes):\n\t\tn = len(graph_nodes)\n\tnodes = prng.sample(graph_nodes, n)\n\tnodes = np.array(nodes)\n\n\treturn nodes", "def test_linked_list_search_in_many_returns_proper_node(n):\n from linked_list import LinkedList\n from random import randint\n l = LinkedList()\n for i in range(1, n + 1):\n l.push(i)\n search_me = randint(1, n)\n assert l.search(search_me).data == search_me", "def getRandomNeighbor(self):\n\n rdm1 = rdm2 = 0\n while rdm1 == rdm2:\n rdm1 = random.randint(0, int(self.nbObject) - 1)\n rdm2 = random.randint(0, int(self.nbObject) - 1)\n\n tmp = self.solution[rdm1]\n\n self.solution[rdm1] = self.solution[rdm2]\n self.solution[rdm2] = tmp", "def random_state(self):\n return np.random.randint(0, 2, self.nodes)", "def get_random_node_pair(n):\n i = np.random.randint(0, n)\n j = i\n while j == i:\n j = np.random.randint(0, n)\n return i, j", "def getRandomRootIP():\n pos = random.randint(0, len(ROOT_DNSES) - 1)\n return ROOT_DNSES[pos]", "def generate_rand_free_node(config: List[BaseJoint], collider: MatlabCollisionChecker, clim: List[float],\n qlim: List[float], q_margin: Optional[float] = 0.1) -> List[float]:\n for _ in range(MAX_ITERATIONS):\n # Generate random node\n jcurr = [uniform(jmin * (1 + q_margin), jmax * (1 - q_margin)) for jmin, jmax in zip(qlim[0::2], qlim[1::2])]\n\n if is_node_free_and_within(config, collider, jcurr, clim):\n return jcurr\n raise ValueError('Could not generate more nodes.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs commands on remote linux instances
def execute_commands_on_linux_instances(client, commands, instance_ids): resp = client.send_command( DocumentName="AWS-RunShellScript", # One of AWS' preconfigured documents Parameters={'commands': commands}, InstanceIds=instance_ids, ) return resp
[ "def execute_commands_on_linux_instances(config: dict, commands: List[str], instance_ips: List[str]):\n for instance_ip in instance_ips:\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n logging.info(f\"Connecting to {instance_ip}...\")\n client.connect(\n hostname=instance_ip,\n username=config['SSH_USER'],\n password=config['SSH_PASS'],\n look_for_keys=False\n )\n except Exception as e:\n logging.error(\n f\"Could not connect to {instance_ip}. Please make sure that port 22 is open in the instance and \"\n f\"the ssh credentials in config.yml are correct before retrying.\"\n )\n continue\n\n for command in commands:\n logging.info(f\"Running {command} on {instance_ip}...\")\n try:\n client.exec_command(command)\n except Exception as e:\n logging.error(f\"Could not run '{command}' on '{instance_ip}'.\")\n logging.error(e)\n continue\n client.close()", "def run_ssh(target, commands):\n # cmd = \"ssh scion@%s '%s'\" % (target, commands)\n # res = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')\n res = call(['ssh', target, commands])\n return res", "def run(self):\r\n\r\n self.wait_answer(['vagrant', 'ssh', self.machine, '-c', self.cmd])", "def remote_exec(remote_cmd, hostname):\n return subprocess.Popen([\"ssh\",\"-p\", SSH_PORT, USER_DICT[hostname] + \"@\" + hostname,\n remote_cmd], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)", "def phone_ssh_cmd(self, cmd):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n if self.hq_rsa:\n ssh.connect(self.phone_info['ipAddress'], username=\"admin\", key_filename=self.hq_rsa_path)\n else:\n ssh.connect(self.phone_info['ipAddress'], username=\"root\", password=self.phone_info['ssh_password'])\n # if str(self.get_firmware_version()).startswith('5.1'):\n # ssh.connect(self.phone_info['ipAddress'], username=\"root\", password=self.phone_info['ssh_password'])\n # else:\n # ssh.connect(self.phone_info['ipAddress'], username=\"admin\", key_filename=self.hq_rsa_path)\n except (paramiko.BadHostKeyException, paramiko.AuthenticationException,paramiko.SSHException):\n ssh.close()\n raise Exception(\"SSH connection failed!! IP, uname, or rsa may be incorrect\")\n return\n\n logger.info(\"Running ssh cmd: \\\"%s\\\" on phone %s\" % (cmd, self.phone_info['ipAddress']))\n stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True)\n result = stdout.readlines()\n\n if ssh:\n ssh.close()\n return result", "def phone_ssh_cmd(self, cmd):\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(self.phone_info['ipAddress'],username=\"admin\",key_filename=self.hq_rsa_path)\n\n logger.info(\"Running ssh cmd: \\\"%s\\\" on phone %s\" % (cmd, self.phone_info['ipAddress']))\n stdin, stdout, stderr = self.ssh.exec_command(cmd, get_pty=True)\n result = stdout.readlines()\n \n if self.ssh:\n self.ssh.close()\n return result", "def run_remote_guest(ip, domain, command):\n\n cmd = 'python %s %s \"%s\"' % (CONSOLE_APP_PATH, domain, command)\n\n return run_remote(ip, cmd)", "def do_ssh(self,args):\n parser = CommandArgumentParser(\"ssh\")\n parser.add_argument(dest='instance',help='instance index or name');\n parser.add_argument('-a','--address-number',default='0',dest='interface-number',help='instance id of the instance to ssh to');\n parser.add_argument('-ii','--ignore-host-key',dest='ignore-host-key',default=False,action='store_true',help='Ignore host key')\n parser.add_argument('-ne','--no-echo',dest='no-echo',default=False,action='store_true',help='Do not echo command')\n parser.add_argument('-L',dest='forwarding',nargs='*',help=\"port forwarding string of the form: {localport}:{host-visible-to-instance}:{remoteport} or {port}\")\n parser.add_argument('-R','--replace-key',dest='replaceKey',default=False,action='store_true',help=\"Replace the host's key. This is useful when AWS recycles an IP address you've seen before.\")\n parser.add_argument('-Y','--keyscan',dest='keyscan',default=False,action='store_true',help=\"Perform a keyscan to avoid having to say 'yes' for a new host. Implies -R.\")\n parser.add_argument('-B','--background',dest='background',default=False,action='store_true',help=\"Run in the background. (e.g., forward an ssh session and then do other stuff in aws-shell).\")\n parser.add_argument('-v',dest='verbosity',default=0,action=VAction,nargs='?',help='Verbosity. The more instances, the more verbose'); \n parser.add_argument('-m',dest='macro',default=False,action='store_true',help='{command} is a series of macros to execute, not the actual command to run on the host');\n parser.add_argument(dest='command',nargs='*',help=\"Command to run on all hosts.\") # consider adding a filter option later\n args = vars(parser.parse_args(args))\n\n interfaceNumber = int(args['interface-number'])\n forwarding = args['forwarding']\n replaceKey = args['replaceKey']\n keyscan = args['keyscan']\n background = args['background']\n verbosity = args['verbosity']\n ignoreHostKey = args['ignore-host-key']\n noEcho = args['no-echo']\n\n # Figure out the host to connect to:\n target = args['instance']\n try:\n index = int(args['instance'])\n instances = self.scalingGroupDescription['AutoScalingGroups'][0]['Instances']\n instance = instances[index]\n target = instance['InstanceId']\n except ValueError: # if args['instance'] is not an int, for example.\n pass\n \n if args['macro']:\n if len(args['command']) > 1:\n print(\"Only one macro may be specified with the -m switch.\")\n return\n else:\n macro = args['command'][0]\n print(\"Macro:{}\".format(macro))\n command = Config.config['ssh-macros'][macro]\n else:\n command = ' '.join(args['command'])\n \n ssh(target,interfaceNumber,forwarding,replaceKey,keyscan,background,verbosity,command,ignoreHostKey=ignoreHostKey,echoCommand = not noEcho)", "def instances_exec_all(cfg: Config, remote_cmd: Sequence[str]):\n escaped = shlex.join(remote_cmd)\n if not are_you_sure(f\"exec command {escaped} in all instances\", cfg):\n return\n\n print(\"Running '{}' on all instances\".format(escaped))\n exec_remote_all(pick_instances(cfg), remote_cmd)", "def remote_command(ip, cmd):\n result = subprocess.call(['./ssh-cmd.sh', ip, cmd])\n return result", "def executeMe(cmd, target):\n print \"Initiating connection to remote server %s\", target\n initiate_SSH(target)\n print \"Done!\"\n print \"Sending command '%s' to remote server %s\", (cmd, target)\n stdin, stdout, stderr = ssh.exec_command(cmd)\n channel_out = stdout.channel\n print \"--->Current clock time: \", now()\n print \"--->Checking channel exit ready status...\"\n print \"--->exit status ready ===>\", channel_out.exit_status_ready()\n print \"--\"\n print \"--->Checking recv exit status...\"\n status = channel_out.recv_exit_status()\n print \"--->recv exit status.. ===>\", status\n print \"--\"\n print \"--->Checking channel exit ready status...\"\n print \"--->exit status ready ===>\", channel_out.exit_status_ready()\n print \"--\"\n print \"--->Receiving response from server...\"\n print \"--\"\n for line in stdout.readlines():\n print line\n print \"\"\n for line in stderr.readlines():\n print line", "def instances_login(cfg: Config):\n instance = pick_instance(cfg)\n run_remote_shell(instance)", "def command(self, command):\n current_dir = os.getcwd()\n os.chdir(self.vagrant_dir)\n vagrant_ssh = '/opt/vagrant/bin/vagrant ssh -c \"{0}\"'.format(command)\n subprocess.call(vagrant_ssh, shell=True)\n os.chdir(current_dir)", "def ssh(node):\n if len(node) == 2:\n # we received env and node name\n env = node[0]\n running = helpers.check_sim_running(env)\n node = node[1]\n elif len(node) == 1:\n # assume default env\n env = 'default'\n running = helpers.check_sim_running(env)\n node = node[0]\n else:\n exit(call(['virl', 'ssh', '--help']))\n\n if running:\n sim_name = running\n server = VIRLServer()\n details = server.get_sim_roster(sim_name)\n\n # default ssh username can be overriden\n username = server.config.get('VIRL_SSH_USERNAME', 'cisco')\n\n if node:\n try:\n node_dict = get_node_from_roster(node, details)\n node_name = node_dict.get(\"NodeName\")\n ip = node_dict['managementIP']\n proxy = node_dict.get(\"managementProxy\")\n\n if 'VIRL_SSH_COMMAND' in server.config:\n cmd = server.config['VIRL_SSH_COMMAND']\n cmd = cmd.format(host=ip, username=username)\n print(\"Calling user specified command: {}\".format(cmd))\n exit(call(cmd.split()))\n\n if proxy == 'lxc':\n lxc = get_mgmt_lxc_ip(details)\n if lxc:\n click.secho(\"Attemping ssh connection\"\n \"to {} at {} via {}\".format(node_name,\n ip, lxc))\n cmd = 'ssh -o \"ProxyCommand ssh -W %h:%p {}@{}\" {}@{}'\n cmd = cmd.format(server.user, lxc, username, ip)\n\n exit(call(cmd, shell=True))\n else:\n # handle the \"flat\" networking case\n click.secho(\"Attemping ssh connection\"\n \"to {} at {}\".format(node_name,\n ip))\n\n exit(call(['ssh', '{}@{}'.format(username, ip)]))\n\n except AttributeError:\n click.secho(\"Could not find management info\"\n \" for {}:{}\".format(env, node), fg=\"red\")\n\n except KeyError:\n click.secho(\"Unknown node {}:{}\".format(env, node), fg=\"red\")\n else:\n return details.json()", "def remote_console(ctx, app_name, ssh_opts, ssh_cmd):\n gigalixir_app.remote_console(ctx.obj['host'], app_name, ssh_opts, ssh_cmd)", "def run_in_remote_server(s,command):\n try:\n (_, stdout, stderr) = s.exec_command(command)\n error_msg = stderr.read()\n if len(error_msg) > 0:\n return (False, error_msg)\n except Exception,e:\n return (False, str(e))\n \n return (True, stdout)", "def start():\n local('aws ec2 start-instances --instance-ids %s'%(AWS_INSTANCE_ID))", "def execute(self, name, command):\n if name in [\"localhost\"]:\n r = '\\n'.join(sh.sh(\"-c\", command).split()[-1:])\n else:\n r = '\\n'.join(sh.ssh(name, command).split()[-1:])\n return r", "def execute(cluster_yaml, cmd):\n config = load_config(cluster_yaml)\n head_updater = get_head_updater(config)\n head_updater.ssh_cmd(\" \".join(cmd), verbose=True)", "def enable_ssh(self):\n\n\n host = get_platform()\n\n if host == \"windows\":\n Console.error(\"Not yet implemented for this OS\")\n return \"\"\n\n if host in ['raspberry', \"linux\"]:\n sudo = True\n else:\n sudo = False\n\n card = SDCard(card_os=\"raspberry\", host=host)\n if sudo:\n command = f'sudo touch {card.boot_volume}/ssh'\n else:\n command = f'touch {card.boot_volume}/ssh'\n\n self.system(command)\n\n return \"\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the event enters category X, given the tuple computed by eventCategory.
def isInCategory(category, categoryData): if category == 0: return categoryData[0] if category == 1: return categoryData[1] if category == 2: return categoryData[0] or categoryData[1] else: return False
[ "def event_in(event, widget):\n x, y = event.x_root, event.y_root\n x1, y1, x2, y2 = (\n widget.winfo_rootx(),\n widget.winfo_rooty(),\n widget.winfo_rootx() + widget.winfo_width(),\n widget.winfo_rooty() + widget.winfo_height(),\n )\n return x1 < x < x2 and y1 < y < y2", "def has_category(item):\n return item in DataHelper.item2category", "def checkEventConsistency(event):\n\tisValid = 1\n\tif (event):\n\t\tif (event.startDate >= event.endDate):\n\t\t\tisValid = 0\n\t\tif (not (isCallGroupMember(event.oncallPerson, event.callGroup.pk))):\n\t\t\tisValid = 0\n\telse:\n\t\tisValid = 0\n\treturn isValid", "def is_event_type(self, type_name):\n is_event_type = self.event_type.name == type_name\n event_category = self.event_type\n while (not is_event_type and\n event_category.parent_category is not None\n ):\n event_category = event_category.parent_category\n is_event_type = is_event_type or event_category.name == type_name\n return is_event_type", "def test_code_to_category_application_event(self):\n # Application Event Logging, PS3.4 Annex P\n c2c = code_to_category\n\n assert c2c(0x0000) == \"Success\"\n for code in [0xC101, 0xC102, 0xC103, 0xC104, 0xC10E, 0xC110, 0xC111]:\n assert c2c(code) == \"Failure\"\n for code in [0xB101, 0xB102, 0xB104]:\n assert c2c(code) == \"Warning\"", "def _validate_category(self, category):\n try:\n voc_term_title(self.fields['category'].field, category)\n return True\n except LookupError:\n return False", "def check_event(self, event):\n # pylint: disable=R0201, W0613\n return True", "def _is_cat_name_relevant(klass, cat_name):\n exceptions = [\n \"Sconosciuto\",\n \"Corridoio\",\n \"Atrio\",\n \"Corridoio\",\n \"Balcone\",\n \"Porticato\",\n \"Terrazzo\",\n \"Cortile\"\n ]\n return cat_name not in exceptions", "def check_events(self, event:Event):\n pass", "def is_battle_category(category_name):\n category_name = category_name.lower()\n return any(x in category_name for x in BATTLE_CATEGORY_KEYWORDS)", "def test_category_4_a(self):\n ex_indx = 4\n\n message_object = self.example_object[ex_indx]\n category = message_object.data_category\n\n self.assertIsNone(category)", "def varInEvent(self, var):\n\n return var in self.event.keys()", "def catAndMouse(x, y, z):\n return \"Cat A\" if abs(x-z) < abs(y-z) else \"Cat B\" if abs(x-z) > abs(y-z) \\\n else \"Mouse C\"", "def _category_exclude_func(self, test, result):\n if test.categories:\n category = self._callbacks.get_category()\n if category is None:\n self._callbacks.undefined_category(test, result)\n return True\n elif category not in test.categories:\n self._callbacks.unsupported_category(test, result)\n return True\n\n return False", "def test_case(self, case):\n return 0 <= case[0] < self.xmax and 0 <= case[1] < self.ymax and self.etat[case] == 0", "def _should_ignore_error_category(monitoring, error_category):\n if not monitoring:\n return False\n if monitoring.silenced:\n return True\n if (monitoring.silenced_until and\n monitoring.silenced_until >= utils.utcnow()):\n return True\n if (monitoring.threshold and len(error_category.events) <\n monitoring.threshold):\n return True\n return False", "def check_tag_category(self, cr, uid, ids, code=None, name=None, context=None):\n assert bool(code is not None) or bool(name is not None), \"code or name must not be None\"\n tag_domain = [('id', 'in', ids)]\n if code is not None:\n tag_domain.append(('tag_ids.category_id.code', '=', code))\n if name is not None:\n tag_domain.append(('tag_ids.category_id.name', '=', name))\n\n count = self.search(cr, uid, tag_domain, count=1)\n return bool(count == len(ids))", "def is_event_sample(sample):\n return \"event_type\" in sample[\"resource_metadata\"]", "def _any(self, category_name, min_score, image_items_list):\n\t\tfor item in image_items_list:\n\t\t\tif (item['class'] == category_name) and (item['score'] > min_score): return True\n\t\treturn False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adjust an item to store it the way we want to based on the format and other restrictions.
def adjustItemForStorage(item, format=None, ingestSource=None, service=None, region=None): if (item['url'].startswith('http://instagram.com/p/') or item['url'].startswith('https://instagram.com/p/')): item['url'] = ( 'i/' + item['url'].split('://instagram.com/p/', 1)[1]) if format == 'message' or format == 'json': item['msg_date'] = int(item['posted_date']) item['msg_date_ms'] = int(float(item['posted_date']) * 1000) if 'caption' in item: item['msg'] = item['caption'] item['ingest_date'] = int(item.get('scraped_date', IngestTime)) item['ingest_source'] = ingestSource item['service'] = service item['region'] = region
[ "def format_item(item):\n\n for key, value in item.items():\n if not value:\n continue\n if key in [\n 'cislo_lv', 'cislo_ku', 'cislo_obce', 'cislo_casti_obce',\n 'vymera', 'cislo_stavebniho_objektu', 'ext_id_parcely',\n 'pocet_bytu', 'zastavena_plocha', 'podlahova_plocha',\n 'pocet_podlazi', 'ext_id_stavebniho_objektu'\n ]:\n item[key] = common.string_to_int(value)\n\n elif key in ['datum_dokonceni']:\n item[key] = common.get_date_from_string(value)", "def prepare_item(self, item):\n # Checking all features in items, padding them and converting them to the right dtype\n for feature in item:\n assert feature in self.proto_fields, 'Feature %s is not in proto fields.' % feature\n proto_field = self.proto_fields[feature]\n\n # Converting sets to lists\n if isinstance(item[feature], set):\n item[feature] = list(item[feature])\n\n # Var Len - Converting and flattening\n # Scalar - Just converting\n # Fixed Len - Padding and converting\n if proto_field.dtype is None:\n continue\n elif isinstance(proto_field, VarProtoField):\n item[feature] = np.array(item[feature], proto_field.dtype).flatten()\n elif not proto_field.shape:\n item[feature] = np.array(item[feature], proto_field.dtype)\n elif isinstance(proto_field, FixedProtoField):\n item[feature] = np.array(pad_list(item[feature], proto_field.shape), proto_field.dtype)\n\n # Returning item\n return item", "def _validate_item(self, item):\n if item.quality < 0:\n item.quality = 0\n elif item.quality > 50:\n item.quality = 50", "def add_item(self, item):", "def PopulateItem(self, item):\n item_output = {}\n self.CopyToDict(item,\n item_output,\n ['name', 'type', 'min_size', 'max_size', 'multiplier'])\n\n if item['type'] == 'group':\n children = []\n for child_item in item['items']:\n child_item_output = self.PopulateItem(child_item)\n children.append(child_item_output)\n item_output['items'] = children\n\n labeled_values = []\n for value, label in item.get('labels', []):\n labeled_value_output = {\n 'value': value,\n 'label': label,\n }\n labeled_values.append(labeled_value_output)\n if labeled_values:\n item_output['enums'] = labeled_values\n\n ranges = []\n for min_value, max_value in item.get('range', []):\n range_output = {\n 'min': min_value,\n 'max': max_value,\n }\n ranges.append(range_output)\n if ranges:\n item_output['ranges'] = ranges\n return item_output", "def fix(\n self,\n items,\n apply_filters,\n apply_transforms,\n delete_fields,\n to_process_item,\n dry_run=True,\n ):\n for item in items:\n # If the number of keys is zero, it was a deleted entry, or\n # otherwise uninteresting.\n if len(item[\"meta\"].keys()) and to_process_item(item):\n data = item[\"data\"]\n self.apply_filters(data, apply_filters)\n self.apply_transforms(data, apply_transforms)\n self.delete_fields(data, delete_fields)\n if not dry_run:\n item[\"meta\"][\"fixed\"] = 1\n try:\n self.update_item(item)\n except zotero_errors.UnsupportedParams as e:\n print(item, e)\n if dry_run:\n print(\"WARNING: just a dry run\")", "def save_item(self, item: Any) -> None:\n item_rejected = False\n original_item = item\n for processor in self.config.item_processors:\n item = processor(item)\n if item is None:\n item_rejected = True\n break\n if item_rejected:\n logger.debug('item %s was rejected', original_item)\n return\n\n logger.debug('writing item %s to file %s', item, self.config.backup_filename)\n with self._lock:\n write_mp(self.config.backup_filename, item, mode='a', encoder=self.config.msgpack_encoder)", "def type_cast(cls, item: Union[str, Kwarg]) -> Union[int, float, str]:\n if isinstance(item, Kwarg):\n item.value(cls._type_cast(item.value()))\n else:\n item = cls._type_cast(item)\n return item", "def fill_item(self, item):\n fext = ''\n self.gdriveid = item['id']\n self.filename = item['name']\n if 'md5Checksum' in item:\n self.md5sum = item['md5Checksum']\n _temp = {}\n if 'modifiedTime' in item:\n _temp['st_mtime'] = int(parse(item['modifiedTime']).strftime(\"%s\"))\n if 'fileSize' in item:\n _temp['st_size'] = item['fileSize']\n if 'size' in item:\n _temp['st_size'] = item['size']\n self.fill_stat(**_temp)\n self.mimetype = item['mimeType']\n item_parents = item.get('parents', [])\n if len(item_parents) > 0:\n self.parentid = item['parents'][0]\n if self.mimetype == 'application/vnd.google-apps.folder':\n return\n if 'webContentLink' in item:\n self.urlname = item['webContentLink']\n if 'fileExtension' in item:\n fext = item['fileExtension']\n # this is meant to fix a very specific bug, not a good idea in general\n if self.filename.lower().endswith('.{f}.{f}'.format(f=fext)):\n self.filename = '.'.join(self.filename.split('.')[:-1])\n if fext.lower() not in self.filename.lower():\n print('file extension', self.filename, fext)\n self.filename += '.%s' % fext\n if 'owners' in item:\n self.owned_by_me = any(x.get('me', False) for x in item['owners'])", "def _to_pystac_item(item: Union[None, Dict, pystac.Item]) -> Union[None, pystac.Item]:\n if isinstance(item, Dict):\n return pystac.Item.from_dict(item)\n\n return item", "def normalize_shape(item):\n padding = 4 - len(item.shape) #len(DataRaw.DEFAULT_DIMS)\n if padding > 0:\n item.shape = ([1] * padding) + list(item.shape)\n return item", "def _format_item_entries_closeout(self):\n self.closeout_df['item'] = self.closeout_df['item'].apply(self._format_items)", "def process_item(self, item, spider):\n\n listing = GenericListings(**item)\n\n self.session.add(listing)\n\n try:\n self.session.commit()\n except:\n self.session.rollback()\n raise\n\n\n return item", "def make_item_from_raw(entry, smm_info):\n d = {}\n # map to internal labels and flip names\n d['idno'] = entry[u'Identifikationsnr']\n d['typ'] = entry[u'Typ av objekt']\n d['benamning'] = entry[u'Benämning']\n d['material'] = entry[u'Material']\n d['namn_konstnar'] = helpers.flip_name(entry[u'Namn-Konstnär'])\n namn_konstnar_knav = entry[u'Konstnär-KulturNav']\n d['namn_konstruktor'] = helpers.flip_names(entry[u'Namn-Konstruktör'])\n namn_konstruktor_knav = entry[u'Konstruktör-KulturNav']\n d['namn_fotograf'] = helpers.flip_name(entry[u'Namn-Fotograf'])\n d['namn_tillverkare'] = helpers.flip_names(entry[u'Namn-Tillverkare'])\n d['date_foto'] = entry[u'Datering-Fotografering']\n d['date_produktion'] = entry[u'Datering-Produktion']\n avbildad_namn = entry[u'Avbildade namn']\n avbildad_namn_knav = entry[u'Avbildade-KulturNav']\n d['avbildad_ort'] = entry[u'Avbildade - orter']\n d['amnesord'] = entry[u'Ämnesord']\n d['beskrivning'] = entry[u'Beskrivning']\n d['motiv_amnesord'] = entry[u'Motiv-ämnesord']\n d['motiv_beskrivning'] = entry[u'Motiv-beskrivning']\n d['rattighet'] = entry[u'Rättigheter']\n d['samling'] = entry[u'Samling']\n d['dimukod'] = entry[u'Dimukode']\n\n # handle kulturNav\n if namn_konstnar_knav:\n smm_info.add_to_k_nav_list(\n namn_konstnar_knav, d['namn_konstnar'])\n if namn_konstruktor_knav:\n smm_info.add_to_k_nav_list(\n namn_konstruktor_knav,\n d['namn_konstruktor'][0])\n if avbildad_namn_knav:\n smm_info.add_to_k_nav_list(\n avbildad_namn_knav,\n helpers.flip_name(avbildad_namn[0]))\n\n # split avbildad_namn into people and ships/boat types\n # a person is anyone with a name like Last, First\n d['avbildad_person'] = []\n d['avbildat_fartyg'] = []\n for a in avbildad_namn:\n if a != helpers.flip_name(a):\n d['avbildad_person'].append(helpers.flip_name(a))\n else:\n d['avbildat_fartyg'].append(a)\n # add to dict, now with flipped names\n d['avbildad_namn'] = d['avbildad_person'] + d['avbildat_fartyg']\n\n # cleanup lists\n d['avbildad_person'] = common.trim_list(d['avbildad_person'])\n d['avbildat_fartyg'] = common.trim_list(d['avbildat_fartyg'])\n d['avbildad_namn'] = common.trim_list(d['avbildad_namn'])\n\n # cleanup blacklisted\n if d['date_foto'].strip('.').lower() in smm_info.bad_date:\n d['date_foto'] = ''\n if d['date_produktion'].strip('.').lower() in smm_info.bad_date:\n d['date_produktion'] = ''\n if d['namn_konstnar'].lower() in smm_info.bad_namn:\n d['namn_konstnar'] = ''\n if d['namn_fotograf'].lower() in smm_info.bad_namn:\n d['namn_fotograf'] = ''\n\n return SMMItem(d)", "def export_item(self, item):\n pass", "def _item_to_value(_, item: str) -> str:\n return item", "def __setitem__(self, item, value):\n keys = None\n if \".\" in item:\n keys = item.split(\".\")\n d = self.data\n for i in keys[:-1]:\n d = d[i]\n d[keys[-1]] = value\n else:\n self.data[item] = value\n self.save()", "def handle_item(item):\n owner = item.metadata.labels.get('owner')\n if not owner:\n raise Rejection(\"Label 'owner' missing from {}:{}\".format(\n item.metadata.namespace, item.metadata.name), 'MissingOwner')\n\n # Update the item's template. All deployments should have a template with labels; we will\n # update the 'owner' label iff it's not present.\n # If the label is present and doesn't match the deployment's label, raise an error, since we\n # don't want to figure out if it's used in the deployment's selector before mutating.\n\n template_metadata = item.spec.template.metadata\n\n if 'owner' not in template_metadata.labels:\n # Set the template's owner label.\n template_metadata.labels['owner'] = owner\n elif template_metadata.labels['owner'] != owner:\n raise Rejection(\n 'Template label owner={} does not match Deployment label owner={}'.format(\n owner, template_metadata.labels['owner']), 'MismatchedOwner')\n\n # Return the updated / validated item.\n return item", "def putItem(self, container, item, quantity=1):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert an instagram JSON object to our item format.
def convertInstagramJSONToItem(inst, partial): if (not inst.get('location', None) or 'latitude' not in inst['location'] or 'longitude' not in inst['location']): return None item = { 'user_name': inst['user']['username'], 'user_fullname': inst['user']['full_name'], 'user_id': inst['user']['id'], 'posted_date': inst['created_time'], 'url': inst['link'], 'latitude': inst['location']['latitude'], 'longitude': inst['location']['longitude'], 'comment_count': inst['comments']['count'], 'comments': '', 'like_count': inst['likes']['count'], 'likes': '', 'scraped_date': int(inst['created_time']), 'msg_id': inst['link'].rsplit('/', 1)[-1] } if inst['caption']: item['caption'] = inst['caption']['text'] item['scraped_date'] = max(item['scraped_date'], int(inst['caption'].get('created_time', 0))) if 'data' in inst['comments']: for comm in inst['comments']['data']: item['scraped_date'] = max( item['scraped_date'], int(comm.get('created_time', 0))) if not partial: if 'id' in inst['location']: item['location_id'] = inst['location']['id'] if 'name' in inst['location']: item['location_name'] = inst['location']['name'] if 'data' in inst['comments']: item['comments'] = '|'.join(['%s;%s;%s;%s' % ( comm['from']['username'], comm['from']['id'], comm['created_time'], comm['text'].replace('|', ' ') ) for comm in inst['comments']['data']]) if 'data' in inst['likes']: item['likes'] = '|'.join(['%s;%s' % ( like['username'], like['id'] ) for like in inst['likes']['data']]) return item
[ "def convertTwitterJSONToItem(tw, decoder, line, partial):\n if 'created_at' not in tw or tw.get('coordinates', None) is None:\n return None\n if tw['created_at'] in DateLookup:\n date = DateLookup[tw['created_at']]\n else:\n try:\n date = int(calendar.timegm(time.strptime(\n tw['created_at'][4:], \"%b %d %H:%M:%S +0000 %Y\")))\n except ValueError:\n date = int(calendar.timegm(dateutil.parser.parse(\n tw['created_at']).utctimetuple()))\n DateLookup[tw['created_at']] = date\n item = {\n 'user_name': tw['user']['screen_name'],\n 'user_fullname': tw['user']['name'],\n 'user_id': tw['user']['id_str'],\n 'posted_date': date,\n 'caption': decoder.unescape(tw['text']),\n 'url': 't/' + tw['user']['id_str'] + '/' + tw['id_str'],\n 'msg_id': tw['id_str'],\n # 'comment_count': inst['comments']['count'],\n # 'comments': '',\n 'like_count': tw.get('retweet_count', None),\n 'favorites_count': tw.get('favourites_count', None),\n 'followers_count': tw.get('followers_count', None),\n 'friends_count': tw.get('friends_count', None),\n 'statuses_count': tw.get('statuses_count', None),\n # 'likes': '',\n 'latitude': tw['coordinates']['coordinates'][1],\n 'longitude': tw['coordinates']['coordinates'][0],\n 'scraped_date': date\n }\n if 'instagr.am\\\\/p\\\\/' in line:\n item['hash'] = line.split('instagr.am\\\\/p\\\\/', 1)[1].split(\n '\\\\/', 1)[0]\n if ('source' in tw and 'Instagram' in tw['source'] and\n 'entities' in tw and 'urls' in tw['entities'] and\n len(tw['entities']['urls']) >= 1 and\n 'display_url' in tw['entities']['urls'][0] and\n 'instagram' in tw['entities']['urls'][0]['display_url']):\n item['source'] = tw['entities']['urls'][0]['display_url']\n item['utc_offset'] = tw['user']['utc_offset']\n if not partial:\n if ('entities' in tw and 'media' in tw['entities'] and\n len(tw['entities']['media']) > 0 and\n 'media_url_https' in tw['entities']['media'][0]):\n item['image_url'] = tw['entities']['media'][0][\n 'media_url_https']\n if tw.get('place', None) and 'id' in tw['place']:\n item['location_id'] = tw['place']['id']\n if tw.get('place', None) and 'name' in tw['place']:\n item['location_name'] = tw['place']['name']\n return item", "def parse_item(self, item_json):\n track_id, source = item_json['track_id'], item_json['source']\n return get_or_create(db.session, Item, track_id=track_id, source=source)", "def adjustItemForStorage(item, format=None, ingestSource=None, service=None,\n region=None):\n if (item['url'].startswith('http://instagram.com/p/') or\n item['url'].startswith('https://instagram.com/p/')):\n item['url'] = (\n 'i/' + item['url'].split('://instagram.com/p/', 1)[1])\n if format == 'message' or format == 'json':\n item['msg_date'] = int(item['posted_date'])\n item['msg_date_ms'] = int(float(item['posted_date']) * 1000)\n if 'caption' in item:\n item['msg'] = item['caption']\n item['ingest_date'] = int(item.get('scraped_date', IngestTime))\n item['ingest_source'] = ingestSource\n item['service'] = service\n item['region'] = region", "def _convert_json_to_news_bite(self, news_item):\n return NewsBite(\n url=news_item['webUrl'],\n category=news_item['sectionName'],\n headline=news_item['webTitle'],\n publish_date=news_item['webPublicationDate']\n )", "def _parse_json(self):\n try:\n self.main_item = self.video_obj.get('items')[0]\n self.video_data = self.main_item.get('snippet')\n self.content = self.main_item.get('contentDetails')\n self.statistics = self.main_item.get('statistics')\n except Exception as e:\n raise Exception(e)", "def add_item_from_json(self, item_json):\n item_name = item_json[\"type\"]\n item_location = tuple(item_json[\"position\"])\n\n return self.add_item(item_name, item_location)", "def from_json(self, json_str):", "def to_representation(self, obj):\n return {\n 'id': obj.id,\n 'image_type': obj.image_type\n }", "def _to_pystac_item(item: Union[None, Dict, pystac.Item]) -> Union[None, pystac.Item]:\n if isinstance(item, Dict):\n return pystac.Item.from_dict(item)\n\n return item", "def from_json(self, value):\n raise NotImplementedError", "def _json_reddit_objecter(self, json_data):\n try:\n object_class = self.config.by_kind[json_data['kind']]\n except KeyError:\n if 'json' in json_data:\n if len(json_data) != 1:\n msg = 'Unknown object type: {0}'.format(json_data)\n warn_explicit(msg, UserWarning, '', 0)\n return json_data['json']\n else:\n return object_class.from_api_response(self, json_data['data'])\n return json_data", "def write(self, **kwargs: Any) -> ItemEntity:\r\n json_data = super()._write(data=self.get_json(), **kwargs)\r\n return self.from_json(json_data=json_data)", "def make_item_from_raw(entry, smm_info):\n d = {}\n # map to internal labels and flip names\n d['idno'] = entry[u'Identifikationsnr']\n d['typ'] = entry[u'Typ av objekt']\n d['benamning'] = entry[u'Benämning']\n d['material'] = entry[u'Material']\n d['namn_konstnar'] = helpers.flip_name(entry[u'Namn-Konstnär'])\n namn_konstnar_knav = entry[u'Konstnär-KulturNav']\n d['namn_konstruktor'] = helpers.flip_names(entry[u'Namn-Konstruktör'])\n namn_konstruktor_knav = entry[u'Konstruktör-KulturNav']\n d['namn_fotograf'] = helpers.flip_name(entry[u'Namn-Fotograf'])\n d['namn_tillverkare'] = helpers.flip_names(entry[u'Namn-Tillverkare'])\n d['date_foto'] = entry[u'Datering-Fotografering']\n d['date_produktion'] = entry[u'Datering-Produktion']\n avbildad_namn = entry[u'Avbildade namn']\n avbildad_namn_knav = entry[u'Avbildade-KulturNav']\n d['avbildad_ort'] = entry[u'Avbildade - orter']\n d['amnesord'] = entry[u'Ämnesord']\n d['beskrivning'] = entry[u'Beskrivning']\n d['motiv_amnesord'] = entry[u'Motiv-ämnesord']\n d['motiv_beskrivning'] = entry[u'Motiv-beskrivning']\n d['rattighet'] = entry[u'Rättigheter']\n d['samling'] = entry[u'Samling']\n d['dimukod'] = entry[u'Dimukode']\n\n # handle kulturNav\n if namn_konstnar_knav:\n smm_info.add_to_k_nav_list(\n namn_konstnar_knav, d['namn_konstnar'])\n if namn_konstruktor_knav:\n smm_info.add_to_k_nav_list(\n namn_konstruktor_knav,\n d['namn_konstruktor'][0])\n if avbildad_namn_knav:\n smm_info.add_to_k_nav_list(\n avbildad_namn_knav,\n helpers.flip_name(avbildad_namn[0]))\n\n # split avbildad_namn into people and ships/boat types\n # a person is anyone with a name like Last, First\n d['avbildad_person'] = []\n d['avbildat_fartyg'] = []\n for a in avbildad_namn:\n if a != helpers.flip_name(a):\n d['avbildad_person'].append(helpers.flip_name(a))\n else:\n d['avbildat_fartyg'].append(a)\n # add to dict, now with flipped names\n d['avbildad_namn'] = d['avbildad_person'] + d['avbildat_fartyg']\n\n # cleanup lists\n d['avbildad_person'] = common.trim_list(d['avbildad_person'])\n d['avbildat_fartyg'] = common.trim_list(d['avbildat_fartyg'])\n d['avbildad_namn'] = common.trim_list(d['avbildad_namn'])\n\n # cleanup blacklisted\n if d['date_foto'].strip('.').lower() in smm_info.bad_date:\n d['date_foto'] = ''\n if d['date_produktion'].strip('.').lower() in smm_info.bad_date:\n d['date_produktion'] = ''\n if d['namn_konstnar'].lower() in smm_info.bad_namn:\n d['namn_konstnar'] = ''\n if d['namn_fotograf'].lower() in smm_info.bad_namn:\n d['namn_fotograf'] = ''\n\n return SMMItem(d)", "async def transform_feed(self, data: dict) -> dict:\n if data[\"roles\"]:\n try:\n data[\"roles\"] = loads(data[\"roles\"])\n except TypeError:\n data[\"roles\"] = None\n else:\n data[\"roles\"] = list()\n if data[\"embed_structure\"]:\n try:\n data[\"embed_structure\"] = loads(data[\"embed_structure\"])\n except TypeError:\n data[\"embed_structure\"] = None\n else:\n data[\"embed_structure\"] = None\n if data[\"date\"]:\n data[\"date\"] = datetime.datetime.strptime(data[\"date\"], \"%Y-%m-%d %H:%M:%S\")\n data[\"added_at\"] = datetime.datetime.strptime(\n data[\"added_at\"], \"%Y-%m-%d %H:%M:%S\"\n )\n return data", "def _from_json(cls, obj):\n assert obj['type'] == 'recipe'\n\n if 'result' in obj or ('results' in obj and len(obj['results']) == 1):\n return SingleResultRecipe.from_json(obj)\n # TODO(brian@sweetapp.com): Should handle multiple result recipes.", "def parse_json(obj):\n return ensure_dict(obj, 'JWE')", "def to_as1(obj, type=None):\n if not obj:\n return {}\n\n type = obj.get('$type') or type\n if not type:\n raise ValueError('Bluesky object missing $type field')\n\n # TODO: once we're on Python 3.10, switch this to a match statement!\n if type in ('app.bsky.actor.defs#profileView',\n 'app.bsky.actor.defs#profileViewBasic'):\n images = [{'url': obj.get('avatar')}]\n banner = obj.get('banner')\n if banner:\n images.append({'url': obj.get('banner'), 'objectType': 'featured'})\n\n handle = obj.get('handle')\n did = obj.get('did')\n\n ret = {\n 'objectType': 'person',\n 'id': did,\n 'url': (Bluesky.user_url(handle) if handle\n else did_web_to_url(did) if did and did.startswith('did:web:')\n else None),\n 'displayName': obj.get('displayName'),\n 'summary': obj.get('description'),\n 'image': images,\n }\n\n elif type == 'app.bsky.feed.post':\n text = obj.get('text', '')\n\n # convert facets to tags\n tags = []\n for facet in obj.get('facets', []):\n tag = {}\n\n for feat in facet.get('features', []):\n if feat.get('$type') == 'app.bsky.richtext.facet#link':\n tag.update({\n 'objectType': 'article',\n 'url': feat.get('uri'),\n })\n elif feat.get('$type') == 'app.bsky.richtext.facet#mention':\n tag.update({\n 'objectType': 'mention',\n 'url': Bluesky.user_url(feat.get('did')),\n })\n\n index = facet.get('index', {})\n # convert indices from UTF-8 encoded bytes to Unicode chars (code points)\n # https://github.com/snarfed/atproto/blob/5b0c2d7dd533711c17202cd61c0e101ef3a81971/lexicons/app/bsky/richtext/facet.json#L34\n byte_start = index.get('byteStart')\n if byte_start is not None:\n tag['startIndex'] = len(text.encode()[:byte_start].decode())\n byte_end = index.get('byteEnd')\n if byte_end is not None:\n tag['displayName'] = text.encode()[byte_start:byte_end].decode()\n tag['length'] = len(tag['displayName'])\n\n tags.append(tag)\n\n in_reply_to = obj.get('reply', {}).get('parent', {}).get('uri')\n\n ret = {\n 'objectType': 'comment' if in_reply_to else 'note',\n 'content': text,\n 'inReplyTo': [{\n 'id': in_reply_to,\n 'url': at_uri_to_web_url(in_reply_to),\n }],\n 'published': obj.get('createdAt', ''),\n 'tags': tags,\n }\n\n elif type in ('app.bsky.feed.defs#postView', 'app.bsky.embed.record#viewRecord'):\n ret = to_as1(obj.get('record') or obj.get('value'))\n author = obj.get('author') or {}\n uri = obj.get('uri')\n ret.update({\n 'id': uri,\n 'url': (at_uri_to_web_url(uri, handle=author.get('handle'))\n if uri.startswith('at://') else None),\n 'author': to_as1(author, type='app.bsky.actor.defs#profileViewBasic'),\n })\n\n # convert embeds to attachments\n for embed in util.get_list(obj, 'embeds') + util.get_list(obj, 'embed'):\n embed_type = embed.get('$type')\n\n if embed_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(embed))\n\n elif embed_type in ('app.bsky.embed.external#view',\n 'app.bsky.embed.record#view'):\n ret.setdefault('attachments', []).append(to_as1(embed))\n\n elif embed_type == 'app.bsky.embed.recordWithMedia#view':\n ret.setdefault('attachments', []).append(to_as1(\n embed.get('record', {}).get('record')))\n media = embed.get('media')\n media_type = media.get('$type')\n if media_type == 'app.bsky.embed.external#view':\n ret.setdefault('attachments', []).append(to_as1(media))\n elif media_type == 'app.bsky.embed.images#view':\n ret.setdefault('image', []).extend(to_as1(media))\n else:\n assert False, f'Unknown embed media type: {media_type}'\n\n elif type == 'app.bsky.embed.images#view':\n ret = [{\n 'url': img.get('fullsize'),\n 'displayName': img.get('alt'),\n } for img in obj.get('images', [])]\n\n elif type == 'app.bsky.embed.external#view':\n ret = to_as1(obj.get('external'), type='app.bsky.embed.external#viewExternal')\n\n elif type == 'app.bsky.embed.external#viewExternal':\n ret = {\n 'objectType': 'link',\n 'url': obj.get('uri'),\n 'displayName': obj.get('title'),\n 'summary': obj.get('description'),\n 'image': obj.get('thumb'),\n }\n\n elif type == 'app.bsky.embed.record#view':\n record = obj.get('record')\n return to_as1(record) if record else None\n\n elif type == 'app.bsky.embed.record#viewNotFound':\n return None\n\n elif type in ('app.bsky.embed.record#viewNotFound',\n 'app.bsky.embed.record#viewBlocked'):\n return None\n\n elif type == 'app.bsky.feed.defs#feedViewPost':\n ret = to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n reason = obj.get('reason')\n if reason and reason.get('$type') == 'app.bsky.feed.defs#reasonRepost':\n ret = {\n 'objectType': 'activity',\n 'verb': 'share',\n 'object': ret,\n 'actor': to_as1(reason.get('by'), type='app.bsky.actor.defs#profileViewBasic'),\n }\n\n elif type == 'app.bsky.graph.follow':\n ret = {\n 'objectType': 'activity',\n 'verb': 'follow',\n 'actor': {\n 'url': obj.get('subject'),\n },\n }\n\n elif type == 'app.bsky.feed.defs#threadViewPost':\n return to_as1(obj.get('post'), type='app.bsky.feed.defs#postView')\n\n elif type == 'app.bsky.feed.defs#generatorView':\n uri = obj.get('uri')\n ret = {\n 'objectType': 'service',\n 'id': uri,\n 'url': at_uri_to_web_url(uri),\n 'displayName': f'Feed: {obj.get(\"displayName\")}',\n 'summary': obj.get('description'),\n 'image': obj.get('avatar'),\n 'author': to_as1(obj.get('creator'), type='app.bsky.actor.defs#profileView'),\n }\n\n else:\n raise ValueError(f'Bluesky object has unknown $type: {type}')\n\n return util.trim_nulls(ret)", "def from_json(cls: AnyClass, /, obj: Any) -> AnyClass:\n raise NotImplementedError", "def items(self, obj):\n posts = super(iTunesPodcastsFeed, self).items(obj)\n posts = [iTunesPodcastPost(item) for item in posts]\n return posts" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert a twitter JSON object to our item format.
def convertTwitterJSONToItem(tw, decoder, line, partial): if 'created_at' not in tw or tw.get('coordinates', None) is None: return None if tw['created_at'] in DateLookup: date = DateLookup[tw['created_at']] else: try: date = int(calendar.timegm(time.strptime( tw['created_at'][4:], "%b %d %H:%M:%S +0000 %Y"))) except ValueError: date = int(calendar.timegm(dateutil.parser.parse( tw['created_at']).utctimetuple())) DateLookup[tw['created_at']] = date item = { 'user_name': tw['user']['screen_name'], 'user_fullname': tw['user']['name'], 'user_id': tw['user']['id_str'], 'posted_date': date, 'caption': decoder.unescape(tw['text']), 'url': 't/' + tw['user']['id_str'] + '/' + tw['id_str'], 'msg_id': tw['id_str'], # 'comment_count': inst['comments']['count'], # 'comments': '', 'like_count': tw.get('retweet_count', None), 'favorites_count': tw.get('favourites_count', None), 'followers_count': tw.get('followers_count', None), 'friends_count': tw.get('friends_count', None), 'statuses_count': tw.get('statuses_count', None), # 'likes': '', 'latitude': tw['coordinates']['coordinates'][1], 'longitude': tw['coordinates']['coordinates'][0], 'scraped_date': date } if 'instagr.am\\/p\\/' in line: item['hash'] = line.split('instagr.am\\/p\\/', 1)[1].split( '\\/', 1)[0] if ('source' in tw and 'Instagram' in tw['source'] and 'entities' in tw and 'urls' in tw['entities'] and len(tw['entities']['urls']) >= 1 and 'display_url' in tw['entities']['urls'][0] and 'instagram' in tw['entities']['urls'][0]['display_url']): item['source'] = tw['entities']['urls'][0]['display_url'] item['utc_offset'] = tw['user']['utc_offset'] if not partial: if ('entities' in tw and 'media' in tw['entities'] and len(tw['entities']['media']) > 0 and 'media_url_https' in tw['entities']['media'][0]): item['image_url'] = tw['entities']['media'][0][ 'media_url_https'] if tw.get('place', None) and 'id' in tw['place']: item['location_id'] = tw['place']['id'] if tw.get('place', None) and 'name' in tw['place']: item['location_name'] = tw['place']['name'] return item
[ "def convertInstagramJSONToItem(inst, partial):\n if (not inst.get('location', None) or\n 'latitude' not in inst['location'] or\n 'longitude' not in inst['location']):\n return None\n item = {\n 'user_name': inst['user']['username'],\n 'user_fullname': inst['user']['full_name'],\n 'user_id': inst['user']['id'],\n 'posted_date': inst['created_time'],\n 'url': inst['link'],\n 'latitude': inst['location']['latitude'],\n 'longitude': inst['location']['longitude'],\n 'comment_count': inst['comments']['count'],\n 'comments': '',\n 'like_count': inst['likes']['count'],\n 'likes': '',\n 'scraped_date': int(inst['created_time']),\n 'msg_id': inst['link'].rsplit('/', 1)[-1]\n }\n if inst['caption']:\n item['caption'] = inst['caption']['text']\n item['scraped_date'] = max(item['scraped_date'],\n int(inst['caption'].get('created_time', 0)))\n if 'data' in inst['comments']:\n for comm in inst['comments']['data']:\n item['scraped_date'] = max(\n item['scraped_date'],\n int(comm.get('created_time', 0)))\n if not partial:\n if 'id' in inst['location']:\n item['location_id'] = inst['location']['id']\n if 'name' in inst['location']:\n item['location_name'] = inst['location']['name']\n if 'data' in inst['comments']:\n item['comments'] = '|'.join(['%s;%s;%s;%s' % (\n comm['from']['username'], comm['from']['id'],\n comm['created_time'], comm['text'].replace('|', ' ')\n ) for comm in inst['comments']['data']])\n if 'data' in inst['likes']:\n item['likes'] = '|'.join(['%s;%s' % (\n like['username'], like['id']\n ) for like in inst['likes']['data']])\n return item", "def parse_item(self, item_json):\n track_id, source = item_json['track_id'], item_json['source']\n return get_or_create(db.session, Item, track_id=track_id, source=source)", "def fromJSON(json_in: str):\n obj = json.loads(json_in)\n processed_tweet = ProcessedTweet(\n obj.get(\"id\"),\n obj.get(\"user_id\"),\n obj.get(\"text\")\n )\n\n return processed_tweet", "def fromTweepyJSON(json_in: Dict):\n id = json_in.get(\"id\")\n screen_name = json_in.get(\"screen_name\")\n name = json_in.get(\"name\")\n created_at = json_in.get(\"created_at\")\n followers_count = json_in.get(\"followers_count\")\n friends_count = json_in.get(\"friends_count\")\n listed_count = json_in.get(\"listed_count\")\n favourites_count = json_in.get(\"favourites_count\")\n statuses_count = json_in.get(\"statuses_count\")\n default_profile = json_in.get(\"default_profile\")\n default_profile_image = json_in.get(\"default_profile_image\")\n\n user = User(id=id, name=name, screen_name=screen_name,\n created_at=created_at, followers_count=followers_count,\n friends_count=friends_count, listed_count=listed_count,\n favourites_count=favourites_count, statuses_count=statuses_count,\n default_profile=default_profile,\n default_profile_image=default_profile_image)\n\n return user", "def add_item_from_json(self, item_json):\n item_name = item_json[\"type\"]\n item_location = tuple(item_json[\"position\"])\n\n return self.add_item(item_name, item_location)", "def from_json(self, json_str):", "async def transform_feed(self, data: dict) -> dict:\n if data[\"roles\"]:\n try:\n data[\"roles\"] = loads(data[\"roles\"])\n except TypeError:\n data[\"roles\"] = None\n else:\n data[\"roles\"] = list()\n if data[\"embed_structure\"]:\n try:\n data[\"embed_structure\"] = loads(data[\"embed_structure\"])\n except TypeError:\n data[\"embed_structure\"] = None\n else:\n data[\"embed_structure\"] = None\n if data[\"date\"]:\n data[\"date\"] = datetime.datetime.strptime(data[\"date\"], \"%Y-%m-%d %H:%M:%S\")\n data[\"added_at\"] = datetime.datetime.strptime(\n data[\"added_at\"], \"%Y-%m-%d %H:%M:%S\"\n )\n return data", "def make_item_from_raw(entry, smm_info):\n d = {}\n # map to internal labels and flip names\n d['idno'] = entry[u'Identifikationsnr']\n d['typ'] = entry[u'Typ av objekt']\n d['benamning'] = entry[u'Benämning']\n d['material'] = entry[u'Material']\n d['namn_konstnar'] = helpers.flip_name(entry[u'Namn-Konstnär'])\n namn_konstnar_knav = entry[u'Konstnär-KulturNav']\n d['namn_konstruktor'] = helpers.flip_names(entry[u'Namn-Konstruktör'])\n namn_konstruktor_knav = entry[u'Konstruktör-KulturNav']\n d['namn_fotograf'] = helpers.flip_name(entry[u'Namn-Fotograf'])\n d['namn_tillverkare'] = helpers.flip_names(entry[u'Namn-Tillverkare'])\n d['date_foto'] = entry[u'Datering-Fotografering']\n d['date_produktion'] = entry[u'Datering-Produktion']\n avbildad_namn = entry[u'Avbildade namn']\n avbildad_namn_knav = entry[u'Avbildade-KulturNav']\n d['avbildad_ort'] = entry[u'Avbildade - orter']\n d['amnesord'] = entry[u'Ämnesord']\n d['beskrivning'] = entry[u'Beskrivning']\n d['motiv_amnesord'] = entry[u'Motiv-ämnesord']\n d['motiv_beskrivning'] = entry[u'Motiv-beskrivning']\n d['rattighet'] = entry[u'Rättigheter']\n d['samling'] = entry[u'Samling']\n d['dimukod'] = entry[u'Dimukode']\n\n # handle kulturNav\n if namn_konstnar_knav:\n smm_info.add_to_k_nav_list(\n namn_konstnar_knav, d['namn_konstnar'])\n if namn_konstruktor_knav:\n smm_info.add_to_k_nav_list(\n namn_konstruktor_knav,\n d['namn_konstruktor'][0])\n if avbildad_namn_knav:\n smm_info.add_to_k_nav_list(\n avbildad_namn_knav,\n helpers.flip_name(avbildad_namn[0]))\n\n # split avbildad_namn into people and ships/boat types\n # a person is anyone with a name like Last, First\n d['avbildad_person'] = []\n d['avbildat_fartyg'] = []\n for a in avbildad_namn:\n if a != helpers.flip_name(a):\n d['avbildad_person'].append(helpers.flip_name(a))\n else:\n d['avbildat_fartyg'].append(a)\n # add to dict, now with flipped names\n d['avbildad_namn'] = d['avbildad_person'] + d['avbildat_fartyg']\n\n # cleanup lists\n d['avbildad_person'] = common.trim_list(d['avbildad_person'])\n d['avbildat_fartyg'] = common.trim_list(d['avbildat_fartyg'])\n d['avbildad_namn'] = common.trim_list(d['avbildad_namn'])\n\n # cleanup blacklisted\n if d['date_foto'].strip('.').lower() in smm_info.bad_date:\n d['date_foto'] = ''\n if d['date_produktion'].strip('.').lower() in smm_info.bad_date:\n d['date_produktion'] = ''\n if d['namn_konstnar'].lower() in smm_info.bad_namn:\n d['namn_konstnar'] = ''\n if d['namn_fotograf'].lower() in smm_info.bad_namn:\n d['namn_fotograf'] = ''\n\n return SMMItem(d)", "def _convert_json_to_news_bite(self, news_item):\n return NewsBite(\n url=news_item['webUrl'],\n category=news_item['sectionName'],\n headline=news_item['webTitle'],\n publish_date=news_item['webPublicationDate']\n )", "def parse_tweet(data):\r\n return Tweet01(\r\n id=data.get('id', None),\r\n created_at=data.get('created_at', None),\r\n user_id=data.get('user_id', None),\r\n user_name=data.get('user_name', None),\r\n tweet_text=data.get('tweet_text', None),\r\n url=data.get('url')\r\n )", "def write(self, **kwargs: Any) -> ItemEntity:\r\n json_data = super()._write(data=self.get_json(), **kwargs)\r\n return self.from_json(json_data=json_data)", "def from_json(self, value):\n raise NotImplementedError", "def adjustItemForStorage(item, format=None, ingestSource=None, service=None,\n region=None):\n if (item['url'].startswith('http://instagram.com/p/') or\n item['url'].startswith('https://instagram.com/p/')):\n item['url'] = (\n 'i/' + item['url'].split('://instagram.com/p/', 1)[1])\n if format == 'message' or format == 'json':\n item['msg_date'] = int(item['posted_date'])\n item['msg_date_ms'] = int(float(item['posted_date']) * 1000)\n if 'caption' in item:\n item['msg'] = item['caption']\n item['ingest_date'] = int(item.get('scraped_date', IngestTime))\n item['ingest_source'] = ingestSource\n item['service'] = service\n item['region'] = region", "def _to_pystac_item(item: Union[None, Dict, pystac.Item]) -> Union[None, pystac.Item]:\n if isinstance(item, Dict):\n return pystac.Item.from_dict(item)\n\n return item", "def accommodate_format(_tweet):\n if isinstance(_tweet, (Status, dict)):\n return PrepTweet(_tweet)\n elif isinstance(_tweet, PrepTweet):\n return _tweet", "def __to_format(self, response):\n if self.format == 'json':\n return response.json()", "def tweet2rest(tweets_json):\n for tweet in tweets_json:\n if not tweet['retweeted'] and tweet['in_reply_to_status_id_str'] == None and tweet['text'][0] != '@' and not 'Instagram' in tweet['source']:\n text = tweet['text']\n summary = text.split(\"\\n\")[0]\n data = \"####################\\n\"\n data += tweet['id_str'] + \"\\n\"\n data += \"####################\\n\"\n data += \"\\n\"\n date = datetime.strptime(tweet['created_at'], \"%a %b %d %H:%M:%S %z %Y\")\n data += \":date: \" + date.astimezone(tz.gettz('Europe/Paris')).strftime(\"%Y-%m-%d %H:%M:%S\") + \"\\n\"\n if \"entities\" in tweet.keys():\n if \"hashtags\" in tweet['entities'].keys():\n if len(tweet['entities']['hashtags']) > 0:\n data += \":tags: \"\n for tag in tweet['entities']['hashtags']:\n data += \"#\" + tag['text'] + \", \"\n data = data[:-2] # Remove last comma-space\n data += \"\\n\"\n if \"media\" in tweet['entities'].keys() and tweet['entities']['media'][0]['type'] == \"photo\":\n data += \":image: {photo}../images/tweets/\" + tweet['entities']['media'][0]['id_str'] + \".jpg\\n\"\n data += \":og_image: /images/tweets/\" + tweet['entities']['media'][0]['id_str'] + \".jpg\\n\"\n img = urllib.request.urlopen(tweet['entities']['media'][0]['media_url']).read()\n try:\n stat(\"./content/images\")\n except:\n mkdir(\"./content/images\")\n try:\n stat(\"./content/images/tweets\")\n except:\n mkdir(\"./content/images/tweets\")\n Image.open(io.BytesIO(img)).save(\"./content/images/tweets/\" + tweet['entities']['media'][0]['id_str'] + \".jpg\", quality=95, optimize=True)\n logging.debug(\"Image \" + tweet['entities']['media'][0]['id_str'] + \".jpg saved\")\n for img in tweet['entities']['media']:\n summary = summary.replace(img['url'], '')\n text = text.replace(img['url'], '')\n #TODO : Add gallery support for multiple photos in a tweet.\n for url in URLExtract().find_urls(text):\n text = text.replace(url, \"`\"+url+\" <\"+url+\">`_\")\n text_2 = list()\n for word in text.split():\n if word[0] == \"@\" or word[0:2] == \".@\" :\n if word[-1].isalnum(): # Take care of non alphanum at the end, like comma or point.\n word = word.replace(word, '`' + word + ' <https://twitter.com/' + word[1:] + '>`_')\n else:\n word = word.replace(word, '`' + word[:-1] + ' <https://twitter.com/' + word[1:-1] + '>`_' + word[-1])\n if word[0] == \"#\":\n if word[-1].isalnum():\n word = word.replace(word, '`' + word + ' <https://twitter.com/hashtag/' + word[1:] + '>`_')\n else:\n word = word.replace(word, '`' + word[:-1] + ' <https://twitter.com/hashtag/' + word[1:-1] + '>`_' + word[-1])\n text_2.append(word)\n text = ' '.join(text_2)\n data += \":summary: \" + summary + \"\\n\"\n data += \"\\n\"\n data += text\n try:\n stat(\"./content/SocialNetworks\")\n except:\n mkdir(\"./content/SocialNetworks\")\n f = open(\"./content/SocialNetworks/tweet_\" + tweet['id_str'] + \".rst\", \"w\", encoding=\"UTF-8\")\n f.write(data)\n logging.debug(\"Tweet number \" + tweet['id_str'] + \" saved !\")\n f.close()", "def parse_json(obj):\n return ensure_dict(obj, 'JWE')", "def item_to_row(item):\n track = item[\"track\"]\n\n track_id = track[\"id\"]\n track_name = track[\"name\"]\n track_artists = [artist[\"name\"] for artist in track[\"artists\"]]\n by, feature = track_artists[0], track_artists[1:]\n\n if not feature:\n feature = None\n\n album_id = track[\"album\"][\"id\"]\n album_name, album_image = track[\"album\"][\"name\"], track[\"album\"][\"images\"][0][\"url\"]\n release_date = track[\"album\"][\"release_date\"]\n album_popularity = track[\"popularity\"]\n duration = convert_ms(track[\"duration_ms\"])\n\n return [\n track_name,\n track_id,\n by,\n feature,\n album_id,\n album_name,\n album_image,\n release_date,\n album_popularity,\n duration,\n ]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process the list of files, either determining the latest entry we have for each instagram message or storing the latest entries for final conversion.
def processFiles(files, items, fileData, format='instagram'): processed = 0 itemsStored = 0 files_processed = 0 keylist = (KeyList if format == 'instagram' else (JSONKeyList if format == 'json' else MessageKeyList)) for filerecord in files: region = filerecord.get('region', None) (fptr, filename) = processFilesOpen(**filerecord) if filename.split('.')[-1].lower() == 'csv': itemlist = csvToItems(fptr, fileData is None) ingestSource = 'instagram_csv' service = 'i' else: line = fptr.readline() # Not all formats support seeking back to zero, so just reopen (fptr, filename) = processFilesOpen(**filerecord) if 'tweet' in line: itemlist = twitterToItems(fptr, fileData is None, fileData) ingestSource = 'twitter_json' service = 't' else: itemlist = jsonToItems(fptr, fileData is None, line.strip()) ingestSource = 'instagram_json' service = 'i' for item in itemlist: if not processed % 1000: sys.stderr.write('%4d/%4d %9d/%9d\r' % ( files_processed + 1, len(files), itemsStored, processed)) sys.stderr.flush() processed += 1 try: # Check that these are reasonable and castable to the # expected data type lat = float(item['latitude']) lon = float(item['longitude']) if (not int(item['posted_date']) or not item['url'] or lat < -90 or lat > 90 or lon < -180 or lon > 180): continue except Exception: continue item['url'] = item['url'].rstrip('/') scrapedDate = int(item.get('scraped_date', item.get( 'posted_date', 0))) # The same message is repeated often with just different likes or # comments. We keep the keep the latest message based on # scraped_date or the latest comment or caption date. key = item['url'].rsplit('/', 1)[-1] if 'hash' in item: # If we have a hash value, use it instead of the key, but # treat the data as a later addition. key = item['hash'] scrapedDate -= 365 * 86400 if fileData is None: items[key] = max(items.get(key, 0), scrapedDate) itemsStored = len(items) continue if key not in items or scrapedDate != items[key]: continue del items[key] trackMentions(fileData.get('mentions', None), item, service) trackLikes(fileData.get('mentions', None), item, fileData.get('likes', False)) adjustItemForStorage(item, format, ingestSource, service, region) if format == 'json': item = json.dumps({jkey: item[jkey] for jkey in keylist if item.get(jkey, None) is not None}) else: item = [item.get(lkey, None) for lkey in keylist] # Escape for Postgres bulk import item = ['\\N' if col is None else unicode(col).replace( '\t', ' ').replace('\r', ' ').replace('\n', ' ').replace( '\v', ' ').replace('\f', ' ').replace('\b', ' ').replace( '\x00', ' ').replace('\\', '\\\\') for col in item] item = '\t'.join(item) dataToFiles(fileData, item) itemsStored += 1 files_processed += 1 sys.stderr.write('%4d/%4d %9d %s\n' % ( files_processed, len(files), itemsStored, filename[-59:])) return processed
[ "def process_files(self):\n while self.search_set:\n current_file = self.add_file(self.search_set.pop())\n self.save_file(current_file)", "def process_files(self):\n while self.running:\n current_time = time.time()\n # sort by created time descending\n for fastqfile, create_time in sorted(\n self.fastq_files_to_create.items(), key=lambda x: x[1]\n ):\n # remove the file from fastq files to create as we are doing that now\n if create_time > time.time() - 5:\n time.sleep(5)\n # file created 5 sec ago, so should be complete. For simulations we make the time longer.\n del self.fastq_files_to_create[fastqfile]\n parse_fastq_file(\n fastqfile,\n self.rundict,\n self.args,\n self.header,\n self.minotour_api,\n self.sequencing_statistic,\n )\n self.sequencing_statistic.files_processed += 1\n\n if not self.running:\n break\n if current_time + 5 > time.time():\n time.sleep(5)", "def handle_files(self, files):\n for f in files:\n if self.handle_file(f):\n self.processed_files.add(f.dest_file)\n else:\n self.ignored_files.add(f.dest_file)", "def loop():\n\n currentFiles = os.listdir(incoming_dir)\n print \"Current files:\", currentFiles\n\n kf = set(knownFiles)\n new = [fname for fname in currentFiles if fname not in kf]\n\n # the 'new' list SHOULD only have one filename in it, as this update should run more slowly than real time \n #for fname in new: \n #img = new[0]\n #print img\n\n # Now try to recognize a given photo\n testPhotoUrl = 'http://sphotos.xx.fbcdn.net/hphotos-ash3/559477_10100177363179781_22009713_42015310_339035254_n.jpg' # Eric M\n userId = extractUserIdByImage(testPhotoUrl, client, faceApi['namespace'])\n\n if (userId):\n dossier = Dossier(userId)\n if (dossier.getDossierData() == True):\n dossier.generateFile()\n \n # finally, track that these files are done\n knownFiles = currentFiles\n # end stuff to do every second", "def processFiles(self):\n\n\t\t# iterate over all files in self.input_dir, collecting all .shtml and .inc files\n\t\tfor f in listdir(self.input_dir):\n\t\t\tfName, fExt = path.splitext(f)\n\t\t\tif fExt == \".shtml\":\n\t\t\t\tself.shtml_files.append(f)\n\t\t\telif fExt == \".inc\":\n\t\t\t\tself.inc_files.append(f)\n\n\t\t# process each .shtml file individually via self.__processFile()\n\t\tfor f in self.shtml_files:\n\t\t\tfName = path.splitext(f)[0]\n\t\t\tinput_file = self.input_dir + '/' + f\n\t\t\toutput_file = self.output_dir + '/' + fName + \".html\"\n\t\t\tself.__processFile(input_file, output_file, False)", "def processFiles(pathList):\n lettersRegEx = re.compile('^\\D+', re.IGNORECASE)\n files = []\n masterList = []\n \n for i in range(len(pathList)):\n data = map(lambda l: l.strip().split('\\t'), open(pathList[i],'rU'))\n if data[0][0].startswith('#'): data.pop(0) # get rid of headers\n tmpDict = {}\n for i in range(len(data)):\n tmpDict[data[i][0]] = combinePos1and2(data[i][1:])\n \n \n # Add new hitDict to the master list\n masterList.append(tmpDict)\n return masterList", "def _fetch_files(self, fetch_all):\n\n photos = Photo.objects.filter(user=self.account.user)\n\n if not fetch_all:\n photos = photos.filter(original_file=\"\")\n\n error_messages = []\n\n for photo in photos:\n try:\n self._fetch_and_save_file(photo=photo, media_type=\"photo\")\n self.results_count += 1\n except FetchError as e:\n error_messages.append(str(e))\n\n if photo.media == \"video\":\n try:\n self._fetch_and_save_file(photo=photo, media_type=\"video\")\n self.results_count += 1\n except FetchError as e:\n error_messages.append(str(e))\n\n if len(error_messages) > 0:\n self.return_value[\"success\"] = False\n self.return_value[\"messages\"] = error_messages\n else:\n self.return_value[\"success\"] = True", "def process_emails():\n # i = 0\n for email in polite_files:\n tokens = tokenize_email(email)\n get_ngrams(tokens, True, True)\n # if i > 10:\n # break\n # i += 1\n \n for email in impolite_files:\n tokens = tokenize_email(email)\n get_ngrams(tokens, False, True)", "def process_dir(all_files, dirname, filenames):\n for filename in filenames:\n fullpath = os.path.join(dirname, filename)\n\n if not os.path.isfile(fullpath):\n continue\n\n mtime = os.path.getmtime(fullpath)\n file = File(fullpath, mtime)\n all_files.add(file)", "def process_files(files, dir):\n messages = {}\n for f in files:\n with open(join(dir, f)) as config_file:\n content = json.load(config_file)\n if content:\n from_add, from_name = '', ''\n if len(content.get('messages')):\n for message in content.get('messages'):\n if not message.get('sent') and message.get('inbox'):\n from_add = message.get('from').get('e')\n from_name = message.get('from').get('n')\n # if senders are from gmail, yahoo, hotmail - may be personal emails\n # and others would be apps and services - so split to get the service name\n if not ('gmail' in from_add or 'yahoo' in from_add or 'hotmail' in from_add):\n from_add = from_add.rsplit('@')[1]\n break\n if from_add:\n if messages.get(from_add):\n mails = messages[from_add]['mails']\n if mails and len(mails):\n mails.append((content.get('subject'), content.get('internalDate')))\n else:\n mails = [(content.get('subject'), content.get('internalDate'))]\n messages[from_add]['mails'] = mails\n else:\n messages.update({from_add: {}})\n messages[from_add]['mails'] = [(content.get('subject'), content.get('internalDate'))]\n messages[from_add]['from_name'] = from_name\n\n return messages", "def _read_files(self) -> None:\n for file in self.files:\n with open(file, 'r') as f:\n serialized = json.load(f)\n self.obj['avsc'].append(serialized)", "def on_added_handler(file_list):\n print_file_list(file_list, \"Added\")\n for f in file_list:\n logger.info('Sending file \"{0}\"...'.format(f));\n send_file(f)", "def getCandidateFiles(self, outputList, outputLFNs, fileMask):\n fileInfo = {}\n for outputFile in outputList:\n if outputFile.has_key('outputFile') and outputFile.has_key('outputDataSE') and outputFile.has_key('outputPath'):\n fname = outputFile['outputFile']\n fileSE = outputFile['outputDataSE']\n filePath = outputFile['outputPath']\n fileInfo[fname] = {'path' : filePath, 'workflowSE' : fileSE}\n else:\n self.log.error('Ignoring malformed output data specification', str(outputFile))\n\n for lfn in outputLFNs:\n if os.path.basename(lfn) in fileInfo.keys():\n fileInfo[os.path.basename(lfn)]['lfn']=lfn\n self.log.verbose('Found LFN %s for file %s' %(lfn, os.path.basename(lfn)))\n if len(os.path.basename(lfn))>127:\n self.log.error('Your file name is WAAAY too long for the FileCatalog. Cannot proceed to upload.')\n return S_ERROR('Filename too long')\n if len(lfn)>256+127:\n self.log.error('Your LFN is WAAAAY too long for the FileCatalog. Cannot proceed to upload.')\n return S_ERROR('LFN too long')\n \n #Check that the list of output files were produced\n for fileName, metadata in fileInfo.items():\n if not os.path.exists(fileName):\n self.log.error('Output data file %s does not exist locally' % fileName)\n if not self.ignoreapperrors:\n return S_ERROR('Output Data Not Found')\n del fileInfo[fileName]\n #Check the list of files against the output file mask (if it exists)\n #candidateFiles = {}\n #if fileMask:\n ##nothing to do yet, as FileMask is not used\n #for fileName,metadata in fileInfo.items():\n # if metadata['type'].lower() in fileMask or fileName.split('.')[-1] in fileMask:\n # candidateFiles[fileName]=metadata\n # else:\n # self.log.info('Output file %s was produced but will not be treated (outputDataFileMask is %s)' %(fileName,\n # string.join(self.outputDataFileMask,', ')))\n\n #if not candidateFiles.keys():\n # return S_OK({}) #nothing to do\n # candidateFiles = fileInfo\n #else:\n #do not apply mask to files\n \n candidateFiles = fileInfo\n #Sanity check all final candidate metadata keys are present (return S_ERROR if not)\n mandatoryKeys = ['path', 'workflowSE', 'lfn'] #filedict is used for requests\n for fileName, metadata in candidateFiles.items():\n for key in mandatoryKeys:\n if not metadata.has_key(key):\n return S_ERROR('File %s has missing %s' % (fileName, key))\n \n return S_OK(candidateFiles)", "def extract_latest_file(self, list_blobs):\n last_recent_file = None\n possible_recent_date_collision = False\n recent_date = datetime.strptime('01-01-1900', '%d-%m-%Y')\n for filename in list_blobs:\n date_file = extract_date_from_file(filename)\n if date_file:\n if date_file == recent_date:\n if not self.is_a_spark_directory(filename):\n possible_recent_date_collision = True\n else:\n # it is spark dir. Check if it is the same dir\n if os.path.dirname(filename) != os.path.dirname(last_recent_file):\n logger.debug(\"'{}' vs '{}'\".format(filename, last_recent_file))\n possible_recent_date_collision = True\n if date_file > recent_date:\n possible_recent_date_collision = False\n recent_date = date_file\n last_recent_file = filename\n if possible_recent_date_collision:\n # Raise an error. No filename is unique in the recent date selected.\n msg = \"Error TWO files with the same date: '{}' and '{}'\".format(last_recent_file,\n recent_date.strftime('%d-%m-%Y'))\n logger.error(msg)\n raise ValueError(msg)\n logger.info(\"Latest file: %s %s\", last_recent_file, recent_date.strftime('%d-%m-%Y'))\n return {\"latest_filename\": last_recent_file,\n \"suffix\": recent_date.strftime('%Y-%m-%d'), \"spark\": self.is_a_spark_directory(last_recent_file)}", "def process_files(files_directory):\n files = os.listdir(files_directory)\n files = [os.path.join(files_directory, file) for file in files]\n pool = Pool(10)\n all_text = pool.map(all_files, enumerate(files))\n pool.close()\n pool.join()\n transcript = []\n for t in sorted(all_text, key=lambda x: x['idx']):\n transcript.append(t['text'])\n return transcript", "def categorize_emails():\n for i in range(1,7): # 1 - 6, because we ignore 1.7 and 1.8\n filepath = os.path.join(\"enron_with_categories\",str(i))\n filelist = os.listdir(filepath)\n filelist.sort() # may not be necessary, do it anyway\n i = 0\n num_files = len(filelist)\n while i < num_files:\n cats_file = filelist[i]\n email_file = filelist[i+1]\n type = process_cats( os.path.join(filepath,cats_file) )\n email_filepath = os.path.join(filepath, email_file)\n if type == \"polite\":\n polite_files.append(email_filepath)\n elif type == \"impolite\":\n impolite_files.append(email_filepath)\n # else:\n # print \"ignoring file\", email_file\n i += 2", "def _walk_documents(self, files, conf):\n for id, file, backend_document in self._group_files_ids(files, self.ingest_batch_size, conf):\n if self.ignore_file(file):\n continue\n\n action = 'update' if backend_document else 'create'\n\n try:\n document = self.prepare_document(file, conf, backend_document)\n job = (id, action, document)\n except Exception as e:\n logging.exception(e)\n continue\n\n logging.info(job[1] + ' ' + file.url + ' (' + str(file.mimetype) + ')')\n\n yield job", "def checkFiles(filelist, ivmlist = None):\n #newfilelist = []\n removed_files = []\n translated_names = []\n newivmlist = []\n\n if ivmlist == None:\n ivmlist = [None for l in filelist]\n\n sci_ivm = zip(filelist, ivmlist)\n\n for file in sci_ivm:\n #find out what the input is\n # if science file is not found on disk, add it to removed_files for removal\n try:\n imgfits,imgtype = fileutil.isFits(file[0])\n except IOError:\n print(\"Warning: File %s could not be found\\n\" %file[0])\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file)\n continue\n if file[1] != None:\n #If an ivm file is not found on disk\n # Remove the corresponding science file\n try:\n ivmfits,ivmtype = fileutil.isFits(file[1])\n except IOError:\n print(\"Warning: File %s could not be found\\n\" %file[1])\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file)\n # Check for existence of waiver FITS input, and quit if found.\n # Or should we print a warning and continue but not use that file\n if imgfits and imgtype == 'waiver':\n newfilename = waiver2mef(file[0], convert_dq=True)\n if newfilename == None:\n print(\"Removing file %s from input list - could not convert waiver to mef\" %file[0])\n removed_files.append(file[0])\n else:\n translated_names.append(newfilename)\n\n # If a GEIS image is provided as input, create a new MEF file with\n # a name generated using 'buildFITSName()'\n # Convert the corresponding data quality file if present\n if not imgfits:\n newfilename = geis2mef(file[0], convert_dq=True)\n if newfilename == None:\n print(\"Removing file %s from input list - could not convert geis to mef\" %file[0])\n removed_files.append(file[0])\n else:\n translated_names.append(newfilename)\n if file[1] != None:\n if ivmfits and ivmtype == 'waiver':\n print(\"Warning: PyDrizzle does not support waiver fits format.\\n\")\n print(\"Convert the input files to GEIS or multiextension FITS.\\n\")\n print(\"File %s appears to be in waiver fits format \\n\" %file[1])\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file[0])\n\n if not ivmfits:\n newfilename = geis2mef(file[1], convert_dq=False)\n if newfilename == None:\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file[0])\n else:\n newivmlist.append(newfilename)\n\n newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files)\n\n if newfilelist == []:\n return [], []\n \"\"\"\n errormsg = \"\\n No valid input was found. Quitting ...\\n\"\n raise IOError, errormsg\n \"\"\"\n if translated_names != []:\n # Since we don't allow input from different instruments\n # we can abandon the original input list and provide as\n # input only the translated names\n removed_expt_files = check_exptime(translated_names)\n newfilelist, ivmlist = update_input(translated_names, newivmlist, removed_expt_files)\n else:\n # check for STIS association files. This must be done before\n # the check for EXPTIME in order to handle correctly stis\n # assoc files\n if pyfits.getval(newfilelist[0], 'INSTRUME') == 'STIS':\n newfilelist, ivmlist = checkStisFiles(newfilelist, ivmlist)\n #removed_files = check_exptime(newflist)\n\n removed_expt_files = check_exptime(newfilelist)\n newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_expt_files)\n if removed_expt_files:\n errorstr = \"#############################################\\n\"\n errorstr += \"# #\\n\"\n errorstr += \"# ERROR: #\\n\"\n errorstr += \"# #\\n\"\n errorstr += \"# The following files were excluded from #\\n\"\n errorstr += \"# Multidrizzle processing because their #\\n\"\n errorstr += \"# header keyword EXPTIME values were 0.0: #\\n\"\n for name in removed_expt_files:\n errorstr += \" \"+ str(name) + \"\\n\"\n errorstr += \"# #\\n\"\n errorstr += \"#############################################\\n\\n\"\n print(errorstr)\n\n removed_ngood_files = checkNGOODPIX(newfilelist)\n newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_ngood_files)\n if removed_ngood_files:\n msgstr = \"####################################\\n\"\n msgstr += \"# #\\n\"\n msgstr += \"# WARNING: #\\n\"\n msgstr += \"# NGOODPIX keyword value of 0 in #\\n\"\n for name in removed_ngood_files:\n msgstr += \" \"+ str(name) + \"\\n\"\n msgstr += \"# has been detected. Images with #\\n\"\n msgstr += \"# no valid pixels will not be #\\n\"\n msgstr += \"# used during processing. If you #\\n\"\n msgstr += \"# wish this file to be used in #\\n\"\n msgstr += \"# processing, please check its DQ #\\n\"\n msgstr += \"# array and reset driz_sep_bits #\\n\"\n msgstr += \"# and final_bits parameters #\\n\"\n msgstr += \"# to accept flagged pixels. #\\n\"\n msgstr += \"# #\\n\"\n msgstr += \"####################################\\n\"\n print(msgstr)\n\n return newfilelist, ivmlist", "def get_all_files_modif_date(basedir,ext='.h5'):\n for root, dirs, files in os.walk(basedir):\n files = glob.glob(os.path.join(root,'*'+ext))\n for f in files :\n mdate = file_modif_date(f)\n MODIFQUEUE.put_nowait( (-mdate,f) )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If we are tracking mentions and likes, parse likes and comments and add those users to the user mention dictionaries.
def trackLikes(mentions, item, likes=False): if (mentions is None or not likes or (not item.get('likes', None) and not item.get('comments', None))): return users = [] likes = item.get('likes', None) if likes: users.extend([like.split(';', 1)[0] for like in likes.split('|')]) comments = item.get('comments', None) if comments: users.extend([like.split(';', 1)[0] for like in comments.split('|')]) if not len(users): return user = item['user_name'].lower() mentions[user] = mentions.get(user, {}) for mention in users: name = mention.lower() mentions[user][name] = mentions[user].get(name, 0) + 1
[ "def trackMentions(mentions, item, service):\n if (mentions is None or not item.get('caption', None) or\n '@' not in item['caption']):\n return\n if service == 'i':\n users = InstagramMentionPattern.findall(item['caption'])\n else:\n users = TwitterMentionPattern.findall(item['caption'])\n if not len(users):\n return\n user = item['user_name'].lower()\n mentions[user] = mentions.get(user, {})\n for mention in users:\n name = mention[1:].lower()\n mentions[user][name] = mentions[user].get(name, 0) + 1", "def getmentions():\n\tuserid = input(\"User mentions, userid? [return]=me: \")\n\tif userid == '':\n\t\tuserid = \"me\"\n\tpostcontent = pnutpy.api.users_mentioned_posts(userid, count=retrievecount, include_raw=True)\n\tdisplaypost(postcontent)", "def test_tweet_user_mentions(self):\n user_mention = {\n 'id_str': '123',\n 'screen_name': 'fakeuser',\n 'name': 'Fake User',\n }\n msg = {\n 'entities': {'user_mentions': [user_mention]},\n 'id_str': '12345',\n 'text': 'This is a tweet mentioning @fakeuser.',\n 'user': {},\n }\n self.assertEqual(\n [user_mention], self.messagetools.tweet_user_mentions(msg))", "def add_mentions(self, tweet_id, mentioned_user_ids):\n if len(mentioned_user_ids) == 0: return\n\n self.body += 'TYPE=MN&ID=%d' % tweet_id\n for uid in mentioned_user_ids:\n self.body += '&RefID=%d' % uid\n self.body += '\\n'", "def analyze_tweet(tweet,results):\n \n # tweet body information\n if \"body_term_count\" not in results:\n results[\"body_term_count\"] = SimpleNGrams(\n char_lower_cutoff=3\n ,n_grams=1\n ,tokenizer=\"twitter\"\n )\n results[\"body_term_count\"].add(tweet[\"body\"])\n\n # which users are involved\n if \"at_mentions\" not in results:\n results[\"at_mentions\"] = defaultdict(constant_factory)\n #if \"mention_edges\" not in results:\n # results[\"mention_edges\"] = {}\n for u in [x for x in tweet[\"twitter_entities\"][\"user_mentions\"]]:\n \tresults[\"at_mentions\"][u[\"id_str\"]] = (results[\"at_mentions\"][u[\"id_str\"]][0] + 1, \n results[\"at_mentions\"][u[\"id_str\"]][1] | set([u[\"screen_name\"].lower()]))\n #if u not in results[\"mention_edges\"]:\n # results[\"mention_edges\"][u[\"id_str\"]] = {tweet[\"actor\"][\"id\"][15:]: 1}\n #else:\n # actor_id = tweet[\"actor\"][\"id\"][15:]\n # if actor_id not in results[\"mention_edges\"][u[\"id_str\"]]:\n # results[\"mention_edges\"][u[\"id_str\"]][actor_id] = 1\n # else:\n # results[\"mention_edges\"][u[\"id_str\"]][actor_id] += 1\n \n if \"inReplyTo\" in tweet:\n if \"in_reply_to\" not in results:\n results[\"in_reply_to\"] = defaultdict(int)\n #print tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()\n results[\"in_reply_to\"][tweet[\"inReplyTo\"][\"link\"].split(\"/\")[3].lower()] += 1\n\n if tweet[\"verb\"] == \"share\":\n if \"RT_of_user\" not in results:\n results[\"RT_of_user\"] = defaultdict(constant_factory)\n rt_of_name = tweet[\"object\"][\"actor\"][\"preferredUsername\"].lower()\n rt_of_id = tweet[\"object\"][\"actor\"][\"id\"][15:]\n results[\"RT_of_user\"][rt_of_id] = (results[\"RT_of_user\"][rt_of_id][0] + 1, \n results[\"RT_of_user\"][rt_of_id][1] | set([rt_of_name]))\n\n if \"hashtags\" not in results:\n results[\"hashtags\"] = defaultdict(int)\n if \"hashtags\" in tweet[\"twitter_entities\"]:\n for h in [x[\"text\"].lower() for x in tweet[\"twitter_entities\"][\"hashtags\"]]:\n results[\"hashtags\"][h] += 1\n\n if \"local_timeline\" not in results:\n results[\"local_timeline\"] = defaultdict(int)\n utcOffset = tweet[\"actor\"][\"utcOffset\"]\n if utcOffset is not None:\n posted = tweet[\"postedTime\"]\n hour_and_minute = (datetime.datetime.strptime(posted[0:16], \"%Y-%m-%dT%H:%M\") + \n datetime.timedelta(seconds = int(utcOffset))).time().strftime(\"%H:%M\")\n results[\"local_timeline\"][hour_and_minute] += 1\n\n if \"urls\" not in results:\n results[\"urls\"] = defaultdict(int)\n if \"urls\" in tweet[\"gnip\"]:\n try:\n for url in [x[\"expanded_url\"] for x in tweet[\"gnip\"][\"urls\"]]:\n results[\"urls\"][url.split(\"/\")[2]] += 1\n except KeyError:\n pass\n\n if \"user_ids_user_freq\" not in results:\n results[\"user_ids_user_freq\"] = defaultdict(int)\n results[\"user_ids_user_freq\"][tweet[\"actor\"][\"id\"][15:]] += 1", "async def expand_user_mentions(cls, app_data: AppData, tweets: Iterable):\n # with cls.lock:\n cls.filter()\n missing_users = set([])\n for tweet in tweets:\n for user in collect_key_values('user', tweet):\n cls.add(user)\n mention_sections = collect_key_values('user_mentions', tweet)\n for mention_section in mention_sections:\n for mention in mention_section:\n if not mention['id'] in cls.users:\n missing_users.add(mention['id'])\n if not missing_users:\n return\n missing_users = list(missing_users)\n n = 100\n chunks = [\n missing_users[i:i + n] for i in range(0, len(missing_users), n)\n ]\n for chunk in chunks:\n await UserQuery(\n app_data,\n 'twicorder',\n user_id=','.join([str(u) for u in chunk])\n ).start()\n\n for tweet in tweets:\n mention_sections = collect_key_values('user_mentions', tweet)\n for mention_section in mention_sections:\n for mention in mention_section:\n full_user = cls.users.get(mention['id'])\n if not full_user:\n continue\n mention.update(full_user.data)\n return tweets", "def countMentions(tweet_list):\n\n mention_count = {}\n mention_pattern = re.compile(\"@[\\w]+\")\n \n for tweet in tweet_list:\n mentions = re.findall(mention_pattern, tweet[\"full_text\"])\n\n for mention in mentions:\n # update mention_count\n mention_count[mention] = mention_count.setdefault(mention, 0)\n mention_count[mention] += 1\n\n return mention_count", "def process_message_stats(stats, msg, users):\n if (msg['sender_id']) not in stats.keys():\n stats[msg['sender_id']] = {'name': '', 'messages_sent': 0, 'likes_received': 0, 'likes_given': 0, 'pct_msgs_liked': 0, 'self_likes': 0, 'words_sent': 0, 'images_sent': 0}\n users.update([msg['sender_id']])\n if not bool(stats[msg['sender_id']]['name'] and stats[msg['sender_id']]['name'].strip()):\n stats[msg['sender_id']]['name'] = msg['name']\n stats[msg['sender_id']]['messages_sent'] += 1\n stats[msg['sender_id']]['likes_received'] += len(msg['favorited_by'])\n if len(msg['favorited_by']) > 0:\n for usr in msg['favorited_by']:\n if usr not in stats.keys():\n stats[usr] = {'name': '', 'messages_sent': 0, 'likes_received': 0, 'likes_given': 0, 'pct_msgs_liked': 0, 'self_likes': 0, 'words_sent': 0, 'images_sent': 0}\n users.update(msg['favorited_by'])\n stats[usr]['likes_given'] += 1\n if msg['sender_id'] in msg['favorited_by']:\n stats[msg['sender_id']]['self_likes'] += 1\n if msg['text'] is not None:\n msg['text'].split(' ')\n stats[msg['sender_id']]['words_sent'] += len(msg['text'].split(' '))\n if msg['attachments']:\n for attachment in msg['attachments']:\n if attachment['type'] == 'image':\n stats[msg['sender_id']]['images_sent'] += 1", "def flatten_comment_mentioned_user_keys_for_fetch_incident(comment: Dict[str, Any]) -> None:\n if 'mentionedUsers' in comment:\n comment['mentionedUsers'] = [flatten_user_dict(user) for user in\n comment['mentionedUsers']]", "def test_dm_user_mentions(self):\n user_mention = {\n 'id_str': '123',\n 'screen_name': 'fakeuser2',\n 'name': 'Fake User 2',\n }\n msg = {\n \"id_str\": \"1\",\n 'text': 'This is a dm mentioning @fakeuser2.',\n \"sender_id\": 1,\n \"sender_id_str\": \"1\",\n \"sender_screen_name\": \"fakeuser\",\n \"sender\": {},\n \"recipient_id\": 2,\n \"recipient_id_str\": \"2\",\n \"recipient_screen_name\": \"fakeuser2\",\n \"recipient\": {},\n 'entities': {'user_mentions': [user_mention]},\n }\n self.assertEqual(\n [user_mention], self.messagetools.dm_user_mentions(msg))", "def like_by_users(self, usernames, amount=10, randomize=False, media=None):\n if self.aborting:\n return self\n\n total_liked_img = 0\n already_liked = 0\n inap_img = 0\n commented = 0\n followed = 0\n usernames = usernames or []\n upper_follower_limit = None\n lower_follower_limit = None\n\n for index, username in enumerate(usernames):\n self.logger.info(\n 'Username [{}/{}]'.format(index + 1, len(usernames)))\n self.logger.info('--> {}'.format(str(username.encode('utf-8'))[self.bye_b]))\n following = random.randint(0, 100) <= self.follow_percentage\n\n valid_user = validate_username(self.browser,\n username,\n self.ignore_users,\n self.blacklist,\n upper_follower_limit,\n lower_follower_limit)\n if valid_user is not True:\n self.logger.info(valid_user)\n continue\n\n try:\n links = get_links_for_username(\n self.browser,\n username,\n amount,\n self.logger,\n randomize,\n media)\n except NoSuchElementException:\n self.logger.error('Element not found, skipping this username')\n continue\n\n if (self.do_follow and\n username not in self.dont_include and\n following and\n self.follow_restrict.get(username, 0) < self.follow_times):\n followed += follow_user(self.browser,\n self.follow_restrict,\n self.username,\n username,\n self.blacklist,\n self.logger,\n self.logfolder)\n else:\n self.logger.info('--> Not following')\n sleep(1)\n\n if links is False:\n continue\n\n # Reset like counter for every username\n liked_img = 0\n jumped = 0\n\n for i, link in enumerate(links):\n # Check if target has reached\n if liked_img >= amount:\n self.logger.info('-------------')\n self.logger.info(\"--> Total liked image reached it's \"\n \"amount given: {}\".format(liked_img))\n break\n\n elif jumped >= 1:\n self.logger.info('-------------')\n self.logger.info(\"--> Like quotient reached! Total liked images: {}\".format(liked_img))\n break\n\n self.logger.info('Post [{}/{}]'.format(liked_img + 1, amount))\n self.logger.info(link)\n\n try:\n inappropriate, user_name, is_video, reason = (\n check_link(self.browser,\n link,\n self.dont_like,\n self.ignore_if_contains,\n self.ignore_users,\n self.username,\n upper_follower_limit,\n lower_follower_limit,\n self.logger,\n self.bye_b)\n )\n\n if not inappropriate:\n liked = like_image(self.browser,\n user_name,\n self.blacklist,\n self.logger,\n self.logfolder)\n\n if liked == True:\n total_liked_img += 1\n liked_img += 1\n checked_img = True\n temp_comments = []\n commenting = random.randint(\n 0, 100) <= self.comment_percentage\n\n if self.use_clarifai and (following or commenting):\n try:\n checked_img, temp_comments = (\n check_image(self.browser,\n self.clarifai_api_key,\n self.clarifai_img_tags,\n self.logger,\n self.clarifai_full_match)\n )\n except Exception as err:\n self.logger.error(\n 'Image check error: {}'.format(err))\n if (self.do_comment and\n user_name not in self.dont_include and\n checked_img and\n commenting):\n\n if temp_comments:\n # use clarifai related comments only!\n comments = temp_comments\n elif is_video:\n comments = (self.comments +\n self.video_comments)\n else:\n comments = (self.comments +\n self.photo_comments)\n commented += comment_image(self.browser,\n user_name,\n comments,\n self.blacklist,\n self.logger,\n self.logfolder,\n self.bye_b)\n else:\n self.logger.info('--> Not commented')\n sleep(1)\n\n elif liked == False:\n already_liked += 1\n elif liked == 'jumped':\n jumped += 1\n\n else:\n self.logger.info(\n '--> Image not liked: {}'.format(str(reason.encode('utf-8'))[self.bye_b]))\n inap_img += 1\n except NoSuchElementException as err:\n self.logger.error('Invalid Page: {}'.format(err))\n\n if liked_img < amount:\n self.logger.info('-------------')\n self.logger.info(\"--> Given amount not fullfilled, \"\n \"image pool reached its end\\n\")\n\n self.logger.info('Liked: {}'.format(total_liked_img))\n self.logger.info('Already Liked: {}'.format(already_liked))\n self.logger.info('Inappropriate: {}'.format(inap_img))\n self.logger.info('Commented: {}'.format(commented))\n\n self.liked_img += liked_img\n self.already_liked += already_liked\n self.inap_img += inap_img\n self.commented += commented\n\n return self", "def process_tweet(self, message):\n try:\n # self.list_mention.append(\n self.processedTweet.run(message, self.loop_number)\n except Exception:\n pass", "def test_tweet_user_mentions_no_mentions(self):\n msg = {\n 'entities': {'user_mentions': []},\n 'id_str': '12345',\n 'text': 'This is a tweet mentioning @fakeuser.',\n 'user': {},\n }\n self.assertEqual([], self.messagetools.tweet_user_mentions(msg))", "def genStats(self, user_name):\n\n #create SQL query to get all tweets from user\n q = \"SELECT created_at,text FROM tweets WHERE from_user=%(user_name)s ORDER BY created_at\"\n vals = {'user_name':user_name}\n tweets = self.sql.q(q,vals)\n\n #declare all counts\n num_days = 0.0\n\n num_per_time = [0.0]*6\n\n num_per_weekday = [0.0]*7\n num_at = 0.0\n num_rt = 0.0\n num_hash = 0.0\n num_links = 0.0\n\n mentions = []\n hashes = []\n\n if (len(tweets) > 0):\n cur_datetime = tweets[0][0]\n num_days+=1\n\n for tweet in tweets:\n created = tweet[0]\n text = tweet[1]\n\n #update day count\n if created.day != cur_datetime.day or created.month != cur_datetime.month or created.year != cur_datetime.year:\n cur_datetime = created\n num_days+=1\n\n #update num_per_time count\n num_per_time[math.floor(created.hour / 4)] += 1\n\n #update num_per_weekday count\n num_per_weekday[created.weekday()]+=1\n\n #Get RT @ and # counts\n link = False\n mention = False\n rt = False\n has = False\n for word in text.split(\" \"):\n if \"http://\" in word and not link:\n num_links+=1\n link = True\n \n if len(word) > 0 and word[0] == \"@\" and word[1:] != user_name:\n mentions.append(word)\n if not mention:\n num_at +=1\n mention = True\n\n if \"RT\" == word and not rt:\n num_rt+=1\n rt = True\n \n if len(word) > 0 and word[0] == \"#\":\n hashes.append(word)\n if not has:\n num_hash +=1\n has = True\n\n mention_count = collections.Counter(mentions)\n unique_mentions = -1.0\n if len(mentions)!=0:\n unique_mentions = float(len(mention_count))/len(mentions)\n\n hash_count = collections.Counter(hashes)\n unique_hashes = -1.0\n if len(hashes)!=0:\n unique_hashes = float(len(hash_count))/len(hashes)\n\n total_tweets = len(tweets)\n dicvals ={}\n if total_tweets != 0:\n dicvals = {\"tr_day\": float(total_tweets)/num_days,\n \"tr_monday\": num_per_weekday[0]/total_tweets,\n \"tr_tuesday\": num_per_weekday[1]/total_tweets,\n \"tr_wednesday\": num_per_weekday[2]/total_tweets,\n \"tr_thursday\": num_per_weekday[3]/total_tweets,\n \"tr_friday\": num_per_weekday[4]/total_tweets,\n \"tr_saturday\": num_per_weekday[5]/total_tweets,\n \"tr_sunday\": num_per_weekday[6]/total_tweets,\n \"tr_latenight\": num_per_time[0]/total_tweets,\n \"tr_earlymorning\": num_per_time[1]/total_tweets,\n \"tr_morning\": num_per_time[2]/total_tweets,\n \"tr_afternoon\": num_per_time[3]/total_tweets,\n \"tr_evening\": num_per_time[4]/total_tweets,\n \"tr_night\": num_per_time[5]/total_tweets,\n \"mention_rate\": float(num_at)/total_tweets,\n \"retweet_rate\": float(num_rt)/total_tweets,\n \"hash_rate\": float(num_hash)/total_tweets,\n \"link_rate\": float(num_links)/total_tweets,\n \"unique_hash\": unique_hashes,\n \"unique_mention\": unique_mentions,\n \"user\":user_name,\n \"ph\":0\n }\n else:\n dicvals = {\"tr_day\": -1.0,\n \"tr_monday\": -1.0,\n \"tr_tuesday\": -1.0,\n \"tr_wednesday\": -1.0,\n \"tr_thursday\":-1.0,\n \"tr_friday\": -1.0,\n \"tr_saturday\": -1.0,\n \"tr_sunday\": -1.0,\n \"tr_latenight\": -1.0,\n \"tr_earlymorning\": -1.0,\n \"tr_morning\": -1.0,\n \"tr_afternoon\": -1.0,\n \"tr_evening\": -1.0,\n \"tr_night\": -1.0,\n \"mention_rate\": -1.0,\n \"retweet_rate\": -1.0,\n \"hash_rate\": -1.0,\n \"link_rate\": -1.0,\n \"unique_hash\": -1.0,\n \"unique_mention\": -1.0,\n \"user\":user_name,\n \"ph\":0\n }\n\n #insert dictionary into DB\n print(\"inserting user \",user_name)\n \n #the query needs to be REPLACE if unique key already existS! \n dicq= \"\"\"INSERT INTO celeb_stats VALUES(%(tr_day)s,\n %(tr_monday)s,\n %(tr_tuesday)s,\n %(tr_wednesday)s,\n %(tr_thursday)s,\n %(tr_friday)s,\n %(tr_saturday)s,\n %(tr_sunday)s,\n %(tr_latenight)s,\n %(tr_earlymorning)s,\n %(tr_morning)s,\n %(tr_afternoon)s,\n %(tr_evening)s,\n %(tr_night)s,\n %(mention_rate)s,\n %(retweet_rate)s,\n %(hash_rate)s,\n %(link_rate)s,\n %(unique_hash)s,\n %(unique_mention)s,\n %(user)s,\n %(ph)s,\n %(ph)s,\n %(ph)s,\n %(ph)s,\n %(ph)s,\n %(ph)s,\n %(ph)s,\n %(ph)s,\n %(ph)s)\n ON DUPLICATE KEY UPDATE tr_day=%(tr_day)s,\n tr_monday=%(tr_monday)s,\n tr_tuesday=%(tr_tuesday)s,\n tr_wednesday=%(tr_wednesday)s,\n tr_thursday=%(tr_thursday)s,\n tr_friday=%(tr_friday)s,\n tr_saturday=%(tr_saturday)s,\n tr_sunday=%(tr_sunday)s,\n tr_latenight=%(tr_latenight)s,\n tr_earlymorning=%(tr_earlymorning)s,\n tr_morning=%(tr_morning)s,\n tr_afternoon=%(tr_afternoon)s,\n tr_evening=%(tr_evening)s,\n tr_night=%(tr_night)s,\n mention_rate=%(mention_rate)s,\n retweet_rate=%(retweet_rate)s,\n hash_rate=%(hash_rate)s,\n link_rate=%(link_rate)s,\n unique_hash=%(unique_hash)s,\n unique_mention=%(unique_mention)s,\n P_m=%(ph)s,\n P_h=%(ph)s,\n P_l=%(ph)s,\n P_um=%(ph)s,\n P_uh=%(ph)s,\n dim_1=%(ph)s,\n dim_2=%(ph)s,\n dim_3=%(ph)s,\n dim_4=%(ph)s \"\"\"\n\n\n succeeded = False\n try:\n self.sql.q(dicq,dicvals)\n succeeded = True\n print (\"Success\")\n except UnicodeEncodeError:\n try:\n print(\"UNIDECODE ERROR, trying decode...\")\n for k in dicvals:\n dicvals[k] = unidecode(dicvals[k])\n self.sql.q(dicq,dicvals)\n succeeded = True\n except:\n print(\"Unidecode failed :(\")\n\n return succeeded", "def countMention(file):\n\n with open(file, \"r\") as tweet_corpus:\n mention_count = {}\n mention_pattern = re.compile(\"@[\\w]+\")\n \n for line in tweet_corpus.readlines():\n tweet = json.loads(line)\n mentions = re.findall(mention_pattern, tweet[\"full_text\"])\n\n for mention in mentions:\n # update mention_count\n mention_count[mention] = mention_count.setdefault(mention, 0)\n mention_count[mention] += 1\n return mention_count", "def get_tagged_users_handles_dict():\n tagged_users_dict = {}\n with open('tagged_users_handle.txt', 'r') as f:\n lines = f.readlines()\n current_tag = \"\"\n current_user_list = []\n for line in lines:\n colon_index = line.find(\":\") \n if colon_index >= 0:\n if len(current_user_list) > 0:\n tagged_users_dict[current_tag] = current_user_list\n current_tag = line[:colon_index].lower()\n current_user_list = []\n elif len(line) > 0 and line != \"\\n\" and line != \"\":\n current_user_list.append(line[1:])\n if len(current_user_list) > 0:\n tagged_users_dict[current_tag] = current_user_list\n return tagged_users_dict", "def post_list_mentions(db, usernick, limit=50):\n\n #create cursor to the database\n cursor = db.cursor()\n\n #if user nick is specified or not, find mentions accordingly\n if usernick==None:\n sqlQuery = \"\"\"SELECT p.id, p.timestamp, p.usernick, u.avatar, p.content\n FROM posts p, users u\n WHERE p.usernick=u.nick AND p.content = '%@%'\n ORDER BY timestamp DESC\"\"\"\n\n #execute sql command, search post list mentions\n cursor.execute(sqlQuery)\n else:\n sqlQuery = \"\"\"SELECT p.id, p.timestamp, p.usernick, u.avatar, p.content\n FROM posts p, users u\n WHERE p.usernick=u.nick AND p.content LIKE ?\n ORDER BY timestamp DESC\"\"\"\n\n #add tags to the usernick to search, saerch post list mentions with specified usernick\n usernick = '%'+'@'+usernick+'%'\n\n #execute sql command\n cursor.execute(sqlQuery, (usernick,))\n\n #return all fetched results\n return cursor.fetchall()[:limit]", "def getUserHandles( tweet ):\n return re.findall(r'(@[a-zA-Z0-9_]+)',tweet);", "def _get_mentions(self, sents):\n mentions = []\n word_offset = 0\n # Iterate over sentences in the doc.\n for i in range(len(sents)):\n if i != 0:\n word_offset += len(sents[i-1])\n # Iterate over words/rows in the sentence.\n sent = sents[i]\n for j in range(len(sent)):\n vals = sent[j]\n coref_parts = vals['coref'].split('|')\n for part in coref_parts:\n singleton_match = re.match('^\\((\\d+)\\)$', part)\n multi_match = re.match('^\\((\\d+)$', part)\n if singleton_match:\n label = singleton_match.group(1)\n new_mention = Mention(j, j+1, sents[i], i, word_offset, label=label)\n mentions.append(new_mention)\n elif multi_match:\n label = multi_match.group(1)\n mention_end = part[1:] + ')'\n k = j + 1\n found_end = False\n while not found_end:\n coref = sent[k]['coref']\n if mention_end in coref.split('|'):\n found_end = True\n else:\n k += 1\n new_mention = Mention(j, k+1, sents[i], i, word_offset, label=label)\n mentions.append(new_mention)\n return mentions" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
If we are tracking mentions, parse a message string for mentions and build user mention dictionaries.
def trackMentions(mentions, item, service): if (mentions is None or not item.get('caption', None) or '@' not in item['caption']): return if service == 'i': users = InstagramMentionPattern.findall(item['caption']) else: users = TwitterMentionPattern.findall(item['caption']) if not len(users): return user = item['user_name'].lower() mentions[user] = mentions.get(user, {}) for mention in users: name = mention[1:].lower() mentions[user][name] = mentions[user].get(name, 0) + 1
[ "def test_tweet_user_mentions(self):\n user_mention = {\n 'id_str': '123',\n 'screen_name': 'fakeuser',\n 'name': 'Fake User',\n }\n msg = {\n 'entities': {'user_mentions': [user_mention]},\n 'id_str': '12345',\n 'text': 'This is a tweet mentioning @fakeuser.',\n 'user': {},\n }\n self.assertEqual(\n [user_mention], self.messagetools.tweet_user_mentions(msg))", "def test_dm_user_mentions(self):\n user_mention = {\n 'id_str': '123',\n 'screen_name': 'fakeuser2',\n 'name': 'Fake User 2',\n }\n msg = {\n \"id_str\": \"1\",\n 'text': 'This is a dm mentioning @fakeuser2.',\n \"sender_id\": 1,\n \"sender_id_str\": \"1\",\n \"sender_screen_name\": \"fakeuser\",\n \"sender\": {},\n \"recipient_id\": 2,\n \"recipient_id_str\": \"2\",\n \"recipient_screen_name\": \"fakeuser2\",\n \"recipient\": {},\n 'entities': {'user_mentions': [user_mention]},\n }\n self.assertEqual(\n [user_mention], self.messagetools.dm_user_mentions(msg))", "def parse_direct_mention(message_text):\n matches = search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def countMentions(tweet_list):\n\n mention_count = {}\n mention_pattern = re.compile(\"@[\\w]+\")\n \n for tweet in tweet_list:\n mentions = re.findall(mention_pattern, tweet[\"full_text\"])\n\n for mention in mentions:\n # update mention_count\n mention_count[mention] = mention_count.setdefault(mention, 0)\n mention_count[mention] += 1\n\n return mention_count", "def _get_mentions(self, sents):\n mentions = []\n word_offset = 0\n # Iterate over sentences in the doc.\n for i in range(len(sents)):\n if i != 0:\n word_offset += len(sents[i-1])\n # Iterate over words/rows in the sentence.\n sent = sents[i]\n for j in range(len(sent)):\n vals = sent[j]\n coref_parts = vals['coref'].split('|')\n for part in coref_parts:\n singleton_match = re.match('^\\((\\d+)\\)$', part)\n multi_match = re.match('^\\((\\d+)$', part)\n if singleton_match:\n label = singleton_match.group(1)\n new_mention = Mention(j, j+1, sents[i], i, word_offset, label=label)\n mentions.append(new_mention)\n elif multi_match:\n label = multi_match.group(1)\n mention_end = part[1:] + ')'\n k = j + 1\n found_end = False\n while not found_end:\n coref = sent[k]['coref']\n if mention_end in coref.split('|'):\n found_end = True\n else:\n k += 1\n new_mention = Mention(j, k+1, sents[i], i, word_offset, label=label)\n mentions.append(new_mention)\n return mentions", "def getmentions():\n\tuserid = input(\"User mentions, userid? [return]=me: \")\n\tif userid == '':\n\t\tuserid = \"me\"\n\tpostcontent = pnutpy.api.users_mentioned_posts(userid, count=retrievecount, include_raw=True)\n\tdisplaypost(postcontent)", "def process_message_stats(stats, msg, users):\n if (msg['sender_id']) not in stats.keys():\n stats[msg['sender_id']] = {'name': '', 'messages_sent': 0, 'likes_received': 0, 'likes_given': 0, 'pct_msgs_liked': 0, 'self_likes': 0, 'words_sent': 0, 'images_sent': 0}\n users.update([msg['sender_id']])\n if not bool(stats[msg['sender_id']]['name'] and stats[msg['sender_id']]['name'].strip()):\n stats[msg['sender_id']]['name'] = msg['name']\n stats[msg['sender_id']]['messages_sent'] += 1\n stats[msg['sender_id']]['likes_received'] += len(msg['favorited_by'])\n if len(msg['favorited_by']) > 0:\n for usr in msg['favorited_by']:\n if usr not in stats.keys():\n stats[usr] = {'name': '', 'messages_sent': 0, 'likes_received': 0, 'likes_given': 0, 'pct_msgs_liked': 0, 'self_likes': 0, 'words_sent': 0, 'images_sent': 0}\n users.update(msg['favorited_by'])\n stats[usr]['likes_given'] += 1\n if msg['sender_id'] in msg['favorited_by']:\n stats[msg['sender_id']]['self_likes'] += 1\n if msg['text'] is not None:\n msg['text'].split(' ')\n stats[msg['sender_id']]['words_sent'] += len(msg['text'].split(' '))\n if msg['attachments']:\n for attachment in msg['attachments']:\n if attachment['type'] == 'image':\n stats[msg['sender_id']]['images_sent'] += 1", "def trackLikes(mentions, item, likes=False):\n if (mentions is None or not likes or (not item.get('likes', None) and\n not item.get('comments', None))):\n return\n users = []\n likes = item.get('likes', None)\n if likes:\n users.extend([like.split(';', 1)[0] for like in likes.split('|')])\n comments = item.get('comments', None)\n if comments:\n users.extend([like.split(';', 1)[0] for like in comments.split('|')])\n if not len(users):\n return\n user = item['user_name'].lower()\n mentions[user] = mentions.get(user, {})\n for mention in users:\n name = mention.lower()\n mentions[user][name] = mentions[user].get(name, 0) + 1", "def add_mentions(self, tweet_id, mentioned_user_ids):\n if len(mentioned_user_ids) == 0: return\n\n self.body += 'TYPE=MN&ID=%d' % tweet_id\n for uid in mentioned_user_ids:\n self.body += '&RefID=%d' % uid\n self.body += '\\n'", "async def on_mention(self):\n await self.bot.wait_until_ready()\n\n def check(message):\n content = message.content\n valid = self.bot.user.mention in content and \\\n not content.startswith(self.bot.command_prefix)\n return valid\n\n while not self.bot.is_closed():\n\n message = await self.bot.wait_for('message', check=check)\n\n with open('cogs/Responses/mentions.json', 'r') as f:\n mentions = json.load(f)['mentions']\n out = np.random.choice(mentions)\n await self.send_typing_delay(message.channel)\n await message.channel.send(out)", "def test_dm_user_mentions_no_mentions(self):\n msg = {\n \"id_str\": \"1\",\n 'text': 'This is a dm.',\n \"sender_id\": 1,\n \"sender_id_str\": \"1\",\n \"sender_screen_name\": \"fakeuser\",\n \"sender\": {},\n \"recipient_id\": 2,\n \"recipient_id_str\": \"2\",\n \"recipient_screen_name\": \"fakeuser2\",\n \"recipient\": {},\n 'entities': {'user_mentions': []},\n }\n self.assertEqual([], self.messagetools.dm_user_mentions(msg))", "def test_tweet_user_mentions_no_mentions(self):\n msg = {\n 'entities': {'user_mentions': []},\n 'id_str': '12345',\n 'text': 'This is a tweet mentioning @fakeuser.',\n 'user': {},\n }\n self.assertEqual([], self.messagetools.tweet_user_mentions(msg))", "def parse_user_input(sentence):\n tracks = {}\n tokens = nlp(sentence)\n for token in tokens:\n track_id = spotify_uri(token.text)\n if token.text not in tracks:\n tracks[token.text] = track_id\n return tracks", "async def mentioned(message: discord.Message, member: discord.Member=Annotate.Self):\n after = datetime.utcnow() - timedelta(hours=24)\n was_found = False\n await client.send_typing(message.channel)\n\n # Go through all messages since 24 hours ago\n async for mention_message in client.logs_from(message.channel, limit=5000, before=message, after=after):\n if member not in mention_message.mentions:\n continue\n\n was_found = True\n\n # Format the message when it's found, along with messages from prior 15 seconds and latter 15 seconds\n after = mention_message.timestamp - timedelta(seconds=15)\n message_content = []\n async for nm in client.logs_from(message.channel, limit=50, after=after, before=after + timedelta(seconds=30)):\n if nm.author == mention_message.author:\n # Add an invisible separator and some spaces for an indent effect\n message_content.append(\"\\N{INVISIBLE SEPARATOR}\" + \" \" * 4 + nm.clean_content)\n\n found_message = await client.say(message, \"**{0} - {1:%A, %d %B %Y %H:%M}**\\n{2}\".format(\n mention_message.author.display_name, after, \"\\n\".join(reversed(message_content))))\n\n # The member will be able to search for another mention by typing next\n next_message = await client.say(message, \"Type `next` to expand your search.\")\n reply = await client.wait_for_message(timeout=30, author=member, channel=message.channel, content=\"next\")\n\n # Remove the previously sent help message and break if there was no response\n if reply is None:\n await client.delete_message(next_message)\n break\n\n await client.delete_messages([found_message, reply, next_message])\n await client.send_typing(message.channel)\n else:\n await client.say(message, \"{} mentioning you in the last 24 hours.\".format(\n \"Found no more messages\" if was_found else \"Could not find a message\"))", "def prepare_message_mention(guild_id, channel_id, message_id):\n return f\"https://discordapp.com/channels/{guild_id}/{channel_id}/{message_id}\"", "def countMention(file):\n\n with open(file, \"r\") as tweet_corpus:\n mention_count = {}\n mention_pattern = re.compile(\"@[\\w]+\")\n \n for line in tweet_corpus.readlines():\n tweet = json.loads(line)\n mentions = re.findall(mention_pattern, tweet[\"full_text\"])\n\n for mention in mentions:\n # update mention_count\n mention_count[mention] = mention_count.setdefault(mention, 0)\n mention_count[mention] += 1\n return mention_count", "def parse_email(message):\n\n pass", "def process_tweet(self, message):\n try:\n # self.list_mention.append(\n self.processedTweet.run(message, self.loop_number)\n except Exception:\n pass", "def flatten_comment_mentioned_user_keys_for_fetch_incident(comment: Dict[str, Any]) -> None:\n if 'mentionedUsers' in comment:\n comment['mentionedUsers'] = [flatten_user_dict(user) for user in\n comment['mentionedUsers']]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read yspec.in from the directory
def read_yspec_in(path1): yspec_in_fio = open(os.path.join(path1, 'yspec.in')) yspec_in_fi = yspec_in_fio.readlines() yspec_in_fi = yspec_in_fi[86:] for i in range(len(yspec_in_fi)): yspec_in_fi[i] = yspec_in_fi[i].split() for i in range(len(yspec_in_fi)): for j in range(len(yspec_in_fi[i])): yspec_in_fi[i][j] = float(yspec_in_fi[i][j]) return yspec_in_fi
[ "def read_specs(folder):\n specfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\n specs = []\n for file in specfiles:\n if file.startswith(\".\"):\n continue\n print(f\"Parsing spec file: {file}\")\n # Only use the first part of the filename as spec name\n name = file.split(\".\")[0]\n with open(os.path.join(folder, file), \"r\") as f:\n spec = safe_load(f)\n testspec = TestSpec(spec[\"spec\"])\n specs.append((name, testspec))\n return specs", "def read_specs_file(data_flag, data_dir=data_dir):\n\n\tfilename = '%s/specs/%s.txt' % (data_dir, data_flag)\t\n\ttry:\n\t\tos.stat(filename)\n\texcept:\n\t\tprint (\"There is no input file %s/specs/%s.txt\" \n\t\t\t\t% (data_dir, data_flag))\n\t\texit()\n\tspecs_file = open(filename, 'r')\n\n\tdata_vars = dict()\n\test_vars = dict()\n\test_specs = dict()\n\t\n\tline_number = 0\n\tfor line in specs_file:\n\t\tline_number += 1\n\t\tif line.strip():\n\t\t\tif not line.startswith(\"#\"):\n\t\t\t\tkeys = line.split()\n\t\t\t\tvar_type = keys[0]\n\t\t\t\tif var_type == 'data_var':\n\t\t\t\t\tvar_name = keys[1]\n\t\t\t\t\tdata_vars[var_name] = str(keys[2])\n\t\t\t\telif var_type == 'est_var':\n\t\t\t\t\tvar_name = keys[1]\n\t\t\t\t\test_vars[var_name] = str(keys[2])\n\t\t\t\telif var_type == 'est_spec':\n\t\t\t\t\tvar_name = keys[1]\n\t\t\t\t\test_specs[var_name] = keys[2:]\n\t\t\t\telse:\n\t\t\t\t\tprint (\"Unidentified input on line %s of %s.txt: %s\" \n\t\t\t\t\t\t\t%(line_number, data_flag, line))\n\t\t\t\t\tquit()\n\t\t\n\tspecs_file.close()\n\tprint ('\\n -- Input vars and params loaded from %s.txt\\n' % data_flag)\n\t\n\tlist_dict = dict()\n\tfor i in ('data_vars', 'est_vars', 'est_specs'):\n\t\tlist_dict[i] = locals()[i]\n\t\n\treturn list_dict", "def read_specification(self):\n try:\n self._cfg_yaml = load(self._cfg_file, Loader=FullLoader)\n except YAMLError as err:\n raise PresentationError(msg=u\"An error occurred while parsing the \"\n u\"specification file.\",\n details=repr(err))\n\n self._parse_env()\n self._parse_configuration()\n self._parse_input()\n self._parse_output()\n self._parse_static()\n self._parse_elements()\n\n logging.debug(f\"Specification: \\n{pformat(self._specification)}\")", "def _read_config():\n\n import configparser\n import os\n\n basepath = os.getcwd()\n prev = None\n while basepath != prev:\n prev = basepath\n path = os.path.join(basepath, 'uriconfig.ini')\n if os.path.exists(path):\n break\n basepath = os.path.split(basepath)[0]\n\n parser = configparser.ConfigParser()\n parser.read(path)\n return parser", "def readin(self):\n \n if self.filename.endswith('.fits'):\n # Assumes Science Verification data\n self.read_SV_fits()\n elif self.filename.endswith('.npz'): \n # Assumes DES Y3 Gold data\n self.read_Y3_2_2_npz()\n else: \n print('Unrecognized file type: ' + self.filename)", "def _read_spec(src):\n extension = os.path.splitext(src)[1]\n if extension.lower() == '.json':\n try:\n with open(src, 'r') as f:\n data = f.read()\n return json.loads(data)\n except json_parse_exception as e:\n error_and_quit(\"Invalid JSON provided in spec\\n{}\".format(e))\n else:\n raise ValueError('The provided file extension for the spec is not supported')", "def src_spec_file(name, path=None):\n from os.path import dirname, join\n if path is None:\n global main_spec_file_path\n path = dirname(main_spec_file_path)\n path = join(path, name + \".pktinc\")\n logging.info(u\"Spec: \\\"%s\\\" (included)\", path)\n return Spec.from_file(path).payload.instantiate()", "def generate(yamldir, includes):\n data = dict()\n for yamlfile in gen_yamlfile_locations(yamldir, includes):\n try:\n data.update(yaml.safe_load(yamlfile.read()))\n except AttributeError: # empty file\n sys.exit('No data found inside: %s' % yamlfile)\n except yaml.scanner.ScannerError as e: # syntax error\n sys.exit('Error while loading YAML-file: %s' % e)\n finally:\n yamlfile.close()\n return data", "def read_reqs_file(reqs_name):\n path_reqs_file = os.path.join(\n \"requirements\", \"requirements-{}.txt\".format(reqs_name)\n )\n with open(path_reqs_file, \"r\") as reqs_file:\n return [\n pkg.rstrip() for pkg in reqs_file.readlines() if not pkg.startswith(\"#\")\n ]", "def load_tyk2_example():\n module_path = Path(__file__).parent\n data = {'complex': list(map(str, module_path.glob('tyk2_ejm_47~ejm_31/complex/*/ti-*.out.bz2'))),\n 'solvated': list(map(str, module_path.glob('tyk2_ejm_47~ejm_31/solvated/*/ti-*.out.bz2')))\n }\n\n with open(module_path / 'tyk2_ejm_47~ejm_31' / 'descr.rst') as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=data,\n DESCR=fdescr)", "def _load_and_save_yadage_spec(workflow: Workflow, operational_options: Dict):\n operational_options.update({\"accept_metadir\": True})\n toplevel = operational_options.get(\"toplevel\", \"\")\n workflow.reana_specification = yadage_load_from_workspace(\n workflow.workspace_path,\n workflow.reana_specification,\n toplevel,\n )\n Session.commit()", "def load_spec_file(filepath: str) -> dict:\n build_spec = {}\n\n logger.debug(f\"Loading file {filepath}\")\n\n with open(filepath, \"r\") as spec_file:\n try:\n build_spec = yaml.safe_load(spec_file)\n except yaml.YAMLError as exc:\n logger.error(f\"Error loading build spec file:\\n\\n{exc}\")\n sys.exit(1)\n\n logger.debug(f\"build spec: {build_spec}\")\n\n return build_spec", "def testReadDirectory(self):\n definitions_directory = self._GetTestFilePath([u'definitions'])\n\n definitions_registry = registry.DataTypeDefinitionsRegistry()\n definitions_reader = reader.YAMLDataTypeDefinitionsFileReader()\n\n definitions_reader.ReadDirectory(\n definitions_registry, definitions_directory)", "def read_make_examples_run_info(path):\n with tf.gfile.GFile(path) as f:\n return text_format.Parse(f.read(), deepvariant_pb2.MakeExamplesRunInfo())", "def _load_spec_file(self, filename):\n if not filename.startswith('/'):\n filename = os.path.join(\n self.app.root_path,\n filename\n )\n with open(filename) as file:\n if filename.endswith(\".yml\") or filename.endswith(\".yaml\"):\n spec = yaml.safe_load(file)\n else:\n spec = json.load(file)\n\n self._openapi_json = spec", "def concrete_specs_from_file(args):\n result = []\n for file in args.specfiles:\n with open(file, \"r\") as f:\n if file.endswith(\"yaml\") or file.endswith(\"yml\"):\n s = spack.spec.Spec.from_yaml(f)\n else:\n s = spack.spec.Spec.from_json(f)\n\n concretized = s.concretized()\n if concretized.dag_hash() != s.dag_hash():\n msg = 'skipped invalid file \"{0}\". '\n msg += \"The file does not contain a concrete spec.\"\n tty.warn(msg.format(file))\n continue\n result.append(concretized)\n return result", "def get_yaml(yamlfile):\n # FIXME test this sub\n config_file = open(\"%s/etc/%s\" % (os.environ['KERRIGAN_ROOT'], yamlfile), 'r')\n y = yaml.load(config_file)\n return y", "def load_slybot_spec(self, project):\n return open_project_from_dir(self.projectdir)", "def test_read_file(self):\n self.assertIsNotNone(read_file(os.path.join(dir_path, 'input.txt')))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
cut time window around theoretical arrival time It is meant to be run in parallel
def cut_time_window(i, all_files_i, req_phase, forward_code, path1): tr = read(os.path.join(all_files_i))[0] (phase_flag, O, A, B, E, GCARC, tr_sliced) = epi_dist(tr, req_phase=req_phase, tb=20, ta=100, forward_code=forward_code) if phase_flag == 'Y': tr_sliced.stats.sac.o = O tr_sliced.stats.sac.a = A tr_sliced.stats.sac.b = B tr_sliced.stats.sac.e = E #tr_sliced.stats.sac.gcarc = GCARC tr_sliced.write(os.path.join(path1, 'grf_cut', 'grf.%s.%s.%s.x00.%s' % (tr_sliced.stats.network, tr_sliced.stats.station, tr_sliced.stats.location, tr_sliced.stats.channel)), format='SAC')
[ "def cutting_time_prediction(self, job):\r\n # print('job :', job)\r\n cutting_time = 10\r\n return cutting_time", "def syncronize_movie(movie,times,t_window=1e-3,N_interval=3,t_start=1e-4,bgsub=0): \n \n frames = movie[:]\n times = movie.timestamps\n \n frames_sub = np.zeros(frames.shape)\n \n if bgsub > 0:\n bgsub = backgroundSubtractorMin(bgsub)\n \n for i in np.arange(frames.shape[0]):\n frames_sub[i] = bgsub.apply(frames[i])\n frames = frames_sub\n \n \n synced_frames = []\n synced_times = []\n \n for i in np.arange(N_inter):\n \n synced_frames.append([])\n synced_times.append([])\n \n for time in times:\n \n try:\n start_ind = np.where(times >= time + t_exclude)[0][0]\n except:\n raise Error(\"Error: time outside movie time window\")\n try:\n end_ind = np.where(times >= time + t_window)[0][0]\n except:\n if time + t_window > times[-1]:\n end_ind = frames.shape[0] - 1\n else:\n raise Error(\"Error: end_ind calculation failed\")\n \n chunked_frames = np.array_split(frames[start_ind:end_ind],N_inter,axis=0)\n chunked_times = np.array_split(times[start_ind:end_ind],N_inter,axis=0)\n \n for i in np.arange(N_inter):\n synced_frames[i].append(chunked_frames[i])\n synced_times[i].append(chunked_times[i])\n \n for i in np.arange(N_inter):\n \n synced_frames[i] = np.concatenate(synced_frames[i],axis=0)\n synced_times[i] = np.concatenate(synced_times[i],axis=0)\n \n return synced_frames,synced_times", "def schedule(t, k=20, lam=0.005, limit=10000):\n #return (k * np.exp(-lam * t))\n return (k * np.exp(-lam * t) if t < limit else 0)", "def timeFlow(self):", "def test_sliding_time_window(self):\n dst = \"ngc5921.split.sliding_time_window.ms\"\n ref = 'ngc5921_statwt_ref_test_sliding_time_window.ms'\n timebin = \"300s\"\n \"\"\"\n row_to_rows = []\n row_to_rows.append([0, 6])\n row_to_rows.append([0, 7])\n row_to_rows.append([0, 8])\n row_to_rows.append([0, 9])\n row_to_rows.append([0, 9])\n row_to_rows.append([0, 10])\n row_to_rows.append([1, 12])\n row_to_rows.append([2, 12])\n row_to_rows.append([3, 12])\n row_to_rows.append([5, 12])\n row_to_rows.append([6, 12])\n row_to_rows.append([6, 12])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([12, 17])\n row_to_rows.append([17, 20])\n row_to_rows.append([17, 21])\n row_to_rows.append([17, 22])\n row_to_rows.append([18, 23])\n row_to_rows.append([19, 24])\n row_to_rows.append([20, 25])\n row_to_rows.append([21, 26])\n row_to_rows.append([22, 27])\n row_to_rows.append([23, 28])\n row_to_rows.append([24, 29])\n row_to_rows.append([25, 30])\n row_to_rows.append([26, 31])\n row_to_rows.append([27, 32])\n row_to_rows.append([28, 33])\n row_to_rows.append([29, 33])\n row_to_rows.append([30, 33])\n row_to_rows.append([33, 35])\n row_to_rows.append([33, 35])\n row_to_rows.append([35, 38])\n row_to_rows.append([35, 38])\n row_to_rows.append([35, 38])\n row_to_rows.append([38, 41])\n row_to_rows.append([38, 42])\n row_to_rows.append([38, 43])\n row_to_rows.append([39, 44])\n row_to_rows.append([40, 45])\n row_to_rows.append([41, 46])\n row_to_rows.append([42, 47])\n row_to_rows.append([43, 48])\n row_to_rows.append([44, 49])\n row_to_rows.append([45, 50])\n row_to_rows.append([46, 51])\n row_to_rows.append([47, 52])\n row_to_rows.append([48, 53])\n row_to_rows.append([49, 54])\n row_to_rows.append([50, 55])\n row_to_rows.append([51, 56])\n row_to_rows.append([52, 56])\n row_to_rows.append([53, 56])\n row_to_rows.append([56, 60])\n row_to_rows.append([56, 60])\n row_to_rows.append([56, 60])\n row_to_rows.append([56, 60])\n \"\"\"\n shutil.copytree(src, dst)\n myms.open(dst, nomodify=False)\n myms.statwt(timebin=timebin, slidetimebin=True)\n myms.done()\n # self._check_weights(\n # dst, row_to_rows, 'c', None, False, None, None\n # )\n self.compare(dst, ref)\n shutil.rmtree(dst)", "def cut_p(st, bf=0, tafp=0.8, time_after='relative_time', sta_shift=dict(), refine_window=False):\n\n stas=0\n\n for tr in st:\n\n stas = get_sta_shift(tr.stats.station, sta_shift)\n\n relps = tr.stats['s_time'] - tr.stats['p_time']\n\n p_start = tr.stats['p_time']-bf+stas\n\n if time_after == 'absolute_time':\n p_end = p_start + tafp\n\n if time_after == 'relative_time':\n p_end = p_start + tafp*relps\n\n if p_end > tr.stats['endtime']:\n p_end = tr.stats['endtime']\n\n\n tr.trim(p_start, p_end)\n\n\n if refine_window:\n rw_start, rw_end = signal_intensity(tr)\n\n p_start = p_start + rw_start\n p_end = p_start + rw_end\n\n tr.trim(p_start, p_end)\n\n link_window_to_trace(tr, p_start, p_end)", "def _timeseriessplit(self, trainstart='201301', holdoutstart='201511', holdoutmonths = 1, final=False):\n print(f\"Train Start : {trainstart}\")\n self.df_train = self.rawfeatures[(self.rawfeatures.period >= trainstart) & (self.rawfeatures.period < holdoutstart)]\n\n if not final:\n holdoutend = addmonth(holdoutstart, holdoutmonths)\n self.df_holdout = self.rawfeatures[(self.rawfeatures.period >= holdoutstart) & (self.rawfeatures.period < holdoutend)]\n\n print(f\"Holdout Start : {holdoutstart}\")\n print(f\"Holdout End : {holdoutend}\")\n self.flow.log_status(logmessage=f\"Train Start : {trainstart}, Holdout Start : {holdoutstart}, Holdout End : {holdoutend}\")\n else:\n self.flow.log_status(logmessage=f\"Train Start : {trainstart}\")", "def time_slice(self, t_start, t_stop):\n idx = numpy.where((self._spike_times >= t_start) & (self._spike_times <= t_stop))[0]\n if self.spikes: \n return SpikeTrain(spikes=self._spikes[idx], t_start=t_start, t_stop=t_stop, neuron=self.neuron) \n else:\n return SpikeTrain(spike_times = self._spike_times[idx], t_start=t_start, t_stop=t_stop, neuron=self.neuron)", "def _flow_time_step(self, dt: float, **kwargs):\n ...", "def cut_s(st, bf=2, rafp=0.8, tafs=20, time_after='absolute_time', sta_shift=dict(), refine_window=True):\n stas=0\n\n for tr in st:\n\n stas = get_sta_shift(tr.stats.station, sta_shift)\n relps = tr.stats['s_time'] - tr.stats['p_time']\n p_end = tr.stats['p_time'] + relps*rafp + stas\n\n if time_after == 'absolute_time':\n s_end = p_end + tafs\n\n if time_after == 'relative_ps':\n s_end = p_end + tafs*relps\n\n\n if s_end > tr.stats['endtime']:\n s_end = tr.stats['endtime']\n\n\n tr.trim(p_end, s_end)\n\n if refine_window:\n rw_start, rw_end = signal_intensity(tr)\n\n s_end = p_end + rw_end\n p_end = p_end + rw_start\n\n tr.trim(p_end, s_end)\n\n link_window_to_trace(tr, p_end, s_end)", "def getObsTime(vscan,area,mode='optimal'):\n \n # Verify the mode\n if (mode != 'optimal'):\n print(\"Observing mode parameter is not in allowed values\")\n print(\"Allowed values are [optimal]\")\n return\n \n if (mode == 'optimal'):\n pixSizeBand = getPixSizeBand(over=useDef)\n bbopFoV = getGeomQuant(pixSizeBand)[0]\n # we're in the mode were we have found the optimal compromise between on-source\n # time and map execution time\n # Formulas used below come from document BBOP-DAp-RP-002\n # The formula for the obsTime may be slightly wrong because \n # scanlegs do not come in fraction so somewhere there should be rounding\n # to the integer immediately above. Not considered major here.\n onSrcTime = bbopFoV / (0.84*vscan)\n obsTime = area * (3600./vscan) * (1.2 * (3600./bbopFoV+0.54))\n # determines the number of scan legs required for this observation\n # the +2 is because I cannot accept a fractional number of legs spacings\n # so the map is overdimensioned (+1 w.r.t to the integral part of the ratio\n # between map widths and leg spacing), and then for a given nunber of spacings\n # we have to make +1 number of legs\n nLegs = int(3600.*math.sqrt(area)/(bbopFoV*0.84))+2\n #print nLegs\n # add the turn around overhead\n # for each leg of the scan we need to accelerate to it and brake at the end\n # this assumes that spacecraft is delivered standing still to begin the \n # observation, and is positioned standing still at the end of the obs.\n # We need then one step motion betwen the legs, so one less than the \n # number of legs. \n # Since here we are computing the time for 1 map, we assume the observing \n # time clock stops at the end of the map.\n obsTimeFull = obsTime + (nLegs-1)*timeStep + nLegs*2*timeBrakeAccel\n # add the initial overhead\n obsTimeFull += initOverhead\n \n return [onSrcTime,obsTimeFull, obsTime/obsTimeFull,nLegs]", "def segregate_timestamps(self) -> None:\r\n self.first = self.timestamps[0]\r\n self.last = self.timestamps[-1]\r\n period = self.last - self.first\r\n period = period.total_seconds() / ( TIMESTEP_MINUTES * 60 ) # total TIMESTEP_MINUTES minute blocks\r\n segregated = {\r\n step: [\r\n x for x in self.timestamps \r\n if \r\n x >= (self.first + (timedelta(minutes = step * TIMESTEP_MINUTES))) \r\n and\r\n x < (self.first + (timedelta(minutes = (step + 1) * TIMESTEP_MINUTES)))\r\n ]\r\n for step in range( ceil(period) )\r\n }\r\n self.segregated = segregated", "def fit_turnout_times(data, prios=[1, 2, 3], vehicle_types=[\"TS\", \"RV\", \"HV\", \"WO\"],\n rough_lower_bound=30, rough_upper_bound=600, stations_to_exclude=None,\n station_col=\"inzet_kazerne_groep\", volunteer_stations=None):\n\n # filter stations\n data[station_col] = data[station_col].str.upper()\n if stations_to_exclude:\n data = data[~np.isin(data[station_col], stations_to_exclude)].copy()\n\n # add full time or part time indicator\n data = add_parttime_fulltime_indicator(data, station_col=station_col,\n volunteer_stations=volunteer_stations)\n\n # calculate dispatch times\n data[\"turnout_time\"] = (pd.to_datetime(data[\"inzet_uitgerukt_datumtijd\"], dayfirst=True) -\n pd.to_datetime(data[\"inzet_gealarmeerd_datumtijd\"], dayfirst=True)\n ).dt.seconds\n\n # filter out unrealistic values\n data = data[(data[\"turnout_time\"] > rough_lower_bound) & \n (data[\"turnout_time\"] <= rough_upper_bound)].copy()\n\n # fit variables per incident type (use backup rv if not enough samples)\n rv_dict = {}\n for appointment in [\"fulltime\", \"parttime\"]:\n df = data[data[\"fulltime\"] == (appointment == \"fulltime\")].copy()\n backup_rv = fit_gamma_rv(df[\"turnout_time\"], scale=100)\n\n df_rv_dict = {}\n\n for prio in prios:\n df_prio = df[df[\"dim_prioriteit_prio\"] == prio]\n prio_backup_rv = fit_gamma_rv(df[\"turnout_time\"], scale=100)\n\n prio_rv_dict = {}\n for vtype in vehicle_types:\n X = df[df[\"voertuig_groep\"] == vtype][\"turnout_time\"]\n if sample_size_sufficient(X):\n prio_rv_dict[vtype] = fit_gamma_rv(X, scale=100)\n else:\n prio_rv_dict[vtype] = copy.deepcopy(prio_backup_rv)\n\n df_rv_dict[prio] = prio_rv_dict.copy()\n\n rv_dict[appointment] = df_rv_dict.copy()\n\n return rv_dict", "def update_travtime_bounds(self):\n\t\t#pass\n\t\tld = len(self.data[0])\n\t\tfor i in range(ld):\n\t\t\ttt_min = self.t_min[i]\n\t\t\tdel_lb = self.del_lb[i]\n\t\t\tdel_ub = self.del_ub[i]\n\t\t\ttt_lb = max(0, tt_min-del_lb)\n\t\t\ttt_ub = max(0, tt_min+del_ub)\n\t\t\tself.t_lb[i] = tt_lb\n\t\t\tself.t_ub[i] = tt_ub", "def getRunningTraj(self, threshspeed=0.01, window_len=51):\n\n # get running speed and smooth it a bit\n speed_dummy = signale.smooth(self.getSpeed()[1], window_len=window_len, window='hanning')\n\n indices = numpy.where(speed_dummy < threshspeed)[0]\n self.run_times = numpy.delete(self.times, indices, 0)\n self.run_places = numpy.delete(self.places, indices, 0)\n\n return self.run_times, self.run_places", "def run(self):\n time = 0\n while time <= self.max_time:\n self.step()\n time += self.time_step", "def stop_points_based_segmentation(trajectories, identifier='second_pass', speed_threshold=2.0, distance_threshold=5.0, time_threshold=300):\n \n temp = []\n traj_id_ = 1\n\n for traj_id, sdf in trajectories.groupby(identifier, group_keys=False):\n grp_copy = sdf.copy(deep=False).reset_index(drop=True).sort_values(by='timestamp', ascending=True)\n \n # Stop points for each group\n stop_points = [0] \n # Candidates points\n slow_speed_points = grp_copy[grp_copy['calc_speed'] <= speed_threshold].index\n candidates_index = 0\n \n while not slow_speed_points.empty and candidates_index < len(slow_speed_points):\n center = slow_speed_points[candidates_index]\n center_row = grp_copy.iloc[center]\n \n # Left Search\n li = side_search(grp_copy.iloc[stop_points[-1]:center], center_row, distance_threshold)\n # Right Search\n ri = side_search(grp_copy.iloc[center + 1:], center_row, distance_threshold)\n \n # If there is no right or left point closer that satisfies the side_search conditions\n if li is None or ri is None:\n candidates_index = candidates_index + 1\n continue\n\n \n left_limit = grp_copy.iloc[li]\n right_limit = grp_copy.iloc[ri]\n if (right_limit['timestamp'] - left_limit['timestamp']) >= time_threshold: \n stop_points.append(center)\n try:\n # If we are not at the end of the data stream\n _next = grp_copy.iloc[ri + 1:][grp_copy['calc_speed'] > speed_threshold]['timestamp'].idxmin()\n slow_speed_points = grp_copy.iloc[_next:][grp_copy['calc_speed'] <= speed_threshold].index\n candidates_index = 0\n except ValueError:\n break\n else:\n candidates_index = candidates_index + 1\n \n\n stop_points.pop(0)\n # Mark stop points\n if len(stop_points) == 0:\n continue\n grp_copy.loc[stop_points, 'stop'] = 'Yes'\n\n # Segment trips based on stop - points index position\n if grp_copy.iloc[:stop_points[0]][grp_copy['calc_speed'] > speed_threshold]['timestamp'].empty:\n last_check = 0\n else:\n last_check = grp_copy.iloc[:stop_points[0]][grp_copy['calc_speed'] > speed_threshold]['timestamp'].idxmin()\n \n sdfs = []\n for ind in stop_points:\n sdfs.append(grp_copy.iloc[last_check:ind + 1])\n try:\n last_check = grp_copy.iloc[ind + 1:][grp_copy['calc_speed'] > speed_threshold]['timestamp'].idxmin()\n except ValueError:\n last_check = ind + 1\n\n for i in range(0,len(sdfs)):\n if sdfs[i].empty:\n continue\n sdfs[i]['traj_id'] = traj_id_\n traj_id_ = traj_id_ + 1\n \n temp.extend(sdfs) \n \n return pd.concat(temp)", "def run(self,t):\r\n while t>0:\r\n update(min(t,dt))\r\n t -= dt", "def calculate_trajectory_cutoff(trajectories, window):\n ma = np.mean(rolling_window(trajectories, window), -1)\n ma_mean = np.mean(ma, axis=1)\n ma_std = np.std(ma, axis=1)\n cutoff = ma_mean + ma_std\n\n return cutoff.reshape(-1, 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Schema resulting from merging the subschemas. NOT equivalent to the schema defined for the item type itself (use the TypeInfo contained in registry[TYPES].by_abstract_type[item_type] for that)
def schema(self): subschemas = (self.types[name].schema for name in self.subtypes) return reduce(combine_schemas, subschemas)
[ "def merge_schemas(subschema, types):\n if not subschema:\n return None\n # handle arrays by simply jumping into them\n # we don't care that they're flattened during mapping\n ref_types = None\n subschema = subschema.get('items', subschema)\n if 'linkFrom' in subschema:\n _ref_type, _ = subschema['linkFrom'].split('.', 1)\n ref_types = [_ref_type]\n elif 'linkTo' in subschema:\n ref_types = subschema['linkTo']\n if not isinstance(ref_types, list):\n ref_types = [ref_types]\n if ref_types is None:\n curr_s = subschema\n else:\n embedded_types = [types[t].schema for t in ref_types\n if t in types.all]\n if not embedded_types:\n return None\n curr_s = reduce(combine_schemas, embedded_types)\n return curr_s", "def allof_schema(context_jsonschema=None):\n schema = json.loads(UNION_SCHEMA_TEMPLATE)\n default_jsonschema = json.load(open(DEFAULT_CONTEXT_JSONSCHEMA, 'r'))\n # Performs an outer merge on the provided schemas\n schema['allOf'].append(default_jsonschema)\n schema[ID_PROPERTY] = default_jsonschema[ID_PROPERTY]\n if context_jsonschema is not None:\n # TODO - enforce that context_jsonschema is valid JSONschema\n schema['allOf'].append(context_jsonschema)\n schema[ID_PROPERTY] = context_jsonschema[ID_PROPERTY]\n return schema", "def _calculate_x_to_one_schema(\n *, parent: str, schema: oa_types.Schema, schemas: oa_types.Schemas\n) -> oa_types.ObjectRefSchema:\n return_schema: oa_types.ObjectRefSchema = {\n oa_types.OpenApiProperties.TYPE.value: \"object\",\n oa_types.ExtensionProperties.DE_REF.value: parent,\n }\n\n description = peek.prefer_local(\n get_value=peek.description, schema=schema, schemas=schemas\n )\n if description is not None:\n return_schema[oa_types.OpenApiProperties.DESCRIPTION.value] = description\n nullable = peek.prefer_local(\n get_value=peek.nullable, schema=schema, schemas=schemas\n )\n if nullable is not None:\n return_schema[oa_types.OpenApiProperties.NULLABLE.value] = nullable\n write_only = peek.prefer_local(\n get_value=peek.write_only, schema=schema, schemas=schemas\n )\n if write_only is not None:\n return_schema[oa_types.OpenApiProperties.WRITE_ONLY.value] = write_only\n\n return return_schema", "def get_schema(self):\r\n schema = {}\r\n schema[\"type\"] = self.type\r\n if self.type == \"string\":\r\n schema[\"blank\"] = True # allow blank strings\r\n if self.optional:\r\n schema[\"required\"] = False\r\n\r\n return schema", "def merge_all_of_schemas(original_data: dict) -> dict:\n if ALL_OF_SCHEMAS_KEY not in original_data.keys():\n return original_data\n\n logger.debug(f\"Merging 'allOf' schemas\")\n\n schema_dict: dict = {}\n\n for nested_schema_dict in original_data[ALL_OF_SCHEMAS_KEY]:\n merged_nested_schema = merge_all_of_schemas(nested_schema_dict)\n schema_dict = merge_schema(schema_dict, merged_nested_schema)\n\n return schema_dict", "def schema(self):\n if self.parent is None:\n return self.bus_client.schema\n else:\n # TODO: Implement getting schema of child nodes if there is demand\n raise AttributeError(\n 'Schema only available on root node. Use bus.schema, not bus.my_api.schema'\n )", "def get_schema(self) -> typing.Optional[\"MMSchema\"]:\n return self.root", "def _rebuild_internal_schema(self):\n\t\tself.columns = OrderedDict()\n\t\tself.primary_cgroup = None\n\n\t\tfor cgroup, schema in self._cgroups.iteritems():\n\t\t\tfor colname, dtype in schema['columns']:\n\t\t\t\tassert colname not in self.columns\n\t\t\t\tself.columns[colname] = ColumnType()\n\t\t\t\tself.columns[colname].name = colname\n\t\t\t\tself.columns[colname].dtype = np.dtype(dtype)\n\t\t\t\tself.columns[colname].cgroup = cgroup\n\n\t\t\tif self.primary_cgroup is None and not self._is_pseudotablet(cgroup):\n\t\t\t\tself.primary_cgroup = cgroup\n\t\t\t\tif 'primary_key' in schema:\n\t\t\t\t\tself.primary_key = self.columns[schema['primary_key']]\n\t\t\t\tif 'temporal_key' in schema:\n\t\t\t\t\tself.temporal_key = self.columns[schema['temporal_key']]\n\t\t\t\tif 'spatial_keys' in schema:\n\t\t\t\t\t(lon, lat) = schema['spatial_keys']\n\t\t\t\t\tself.spatial_keys = (self.columns[lon], self.columns[lat])\n\t\t\telse:\n\t\t\t\t# If any of these are defined, they must be defined in the\n\t\t\t\t# primary cgroup\n\t\t\t\tassert 'primary_key' not in schema\n\t\t\t\tassert 'spatial_keys' not in schema\n\t\t\t\tassert 'temporak_key' not in schema\n\n\t\t\tif 'blobs' in schema:\n\t\t\t\tfor colname in schema['blobs']:\n\t\t\t\t\tassert self.columns[colname].dtype.base == np.int64, \"Data structure error: blob reference columns must be of int64 type\"\n\t\t\t\t\tself.columns[colname].is_blob = True", "def test_nested_schemas_splitted() -> None:\n\n class A(AvroModel):\n class Meta:\n namespace = \"namespace\"\n\n class B(AvroModel):\n a: A\n\n class C(AvroModel):\n b: B\n a: A\n\n # first the B schema is generated\n assert parse_schema(B.avro_schema_to_python())\n\n # then check that the C schema is valid\n assert parse_schema(C.avro_schema_to_python())", "def subschema(self, sub, deps=None):\n\n # Inherit path and fragment\n new_schema_path = self._schema_path\n new_schema_fragment = \"#/definitions/\"+sub # SEE NOTE 2\n\n # Shallow copy of the subschema\n subschema = dict(self._schema[\"definitions\"][sub])\n\n # Add schema metadata\n subschema[\"$schema\"] = self._schema[\"$schema\"] # Same version, I hope\n\n # Add dependencies (if any)\n if deps is not None and len(deps) > 0:\n # Shallow copy the subschema's definitions (if any)\n if \"definitions\" in subschema:\n subschema[\"definitions\"] = dict(subschema[\"definitions\"])\n else:\n subschema[\"definitions\"] = dict()\n\n # Add references to dependent subschemas into definitions\n definitions = subschema[\"definitions\"]\n for dep in deps:\n definitions[dep] = self._schema[\"definitions\"][dep]\n\n return Schema(subschema,\n _schema_path=new_schema_path,\n _schema_fragment=new_schema_fragment)", "def get_schemas(self):\n query = self.osqlqry.get_schemas()\n logger.info(u'Schemas query: {0}'.format(query))\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def finalizeSchema(schema):\n schema.moveField('businessOldLocation', after='workLocations')\n schema.moveField('foldermanagers', after='businessOldLocation')\n schema.moveField('rubrics', after='folderCategory')\n schema.moveField('description', after='additionalLegalConditions')\n return schema", "def merge_schema(*schema_list):\n new_schema = {\"@context\": {}, \"@graph\": [], \"@id\": \"merged\"}\n _ids = [\"merged\"]\n for schema in schema_list:\n new_schema[\"@context\"].update(schema.get(\"@context\", {}))\n new_schema[\"@graph\"].extend(schema.get(\"@graph\", []))\n _id = schema.get(\"@id\", None)\n if _id:\n _ids.append(_id)\n new_schema[\"@id\"] = \"_\".join(_ids)\n return new_schema", "def test_schema(parent_schema, property_schema, schemas):\n _, returned_schema = association.calculate_schema(\n property_schema=property_schema,\n parent_schema=parent_schema,\n schemas=schemas,\n )\n\n assert returned_schema == {\n \"type\": \"object\",\n \"x-tablename\": \"association\",\n \"properties\": {\n \"parent_table_parent_column\": {\n \"type\": \"parent type\",\n \"x-primary-key\": True,\n \"x-foreign-key\": \"parent_table.parent_column\",\n },\n \"ref_table_ref_column\": {\n \"type\": \"ref type\",\n \"x-primary-key\": True,\n \"x-foreign-key\": \"ref_table.ref_column\",\n },\n },\n \"required\": [\"parent_table_parent_column\", \"ref_table_ref_column\"],\n }", "def schema(self):\n return BookUpdateSchema()", "def type_mapping(types, item_type, embed=True):\n type_info = types[item_type]\n schema = type_info.schema\n # use top_level parameter here for schema_mapping\n mapping = schema_mapping('*', schema, True)\n embeds = add_default_embeds(item_type, types, type_info.embedded_list, schema)\n embeds.sort()\n if not embed:\n return mapping\n for prop in embeds:\n single_embed = {}\n curr_s = schema\n curr_m = mapping\n split_embed_path = prop.split('.')\n for curr_e in split_embed_path:\n # if we want to map all fields (*), do not drill into schema\n if curr_e != '*':\n # drill into the schemas. if no the embed is not found, break\n subschema = curr_s.get('properties', {}).get(curr_e, None)\n curr_s = merge_schemas(subschema, types)\n if not curr_s:\n break\n curr_m = update_mapping_by_embed(curr_m, curr_e, curr_s)\n return mapping", "def _jsonschema_type_mapping(self):\n oneOf = {\"oneOf\": [], \"description\": description, \"default\": default, \"title\": self.name}\n\n for idx, option in enumerate(field_options):\n mfield_meta = option.metadata[\"marshmallow_field\"]\n\n # Necessary for key/name de-duplication in case a name is not supplied by the user:\n mfield_meta_class_name = str(mfield_meta.__class__).split(\".\")[-1].split(\"'\")[0].lower()\n\n # If the option inherits from a custom dataclass-field, then use the custom jsonschema:\n if hasattr(mfield_meta, \"_jsonschema_type_mapping\"):\n oneOf[\"oneOf\"].append(mfield_meta._jsonschema_type_mapping())\n # Otherwise, extract the jsonschema using a dummy dataclass as intermediary:\n else:\n\n @m_dataclass\n class DummyClass:\n tmp: Any = option\n\n dummy_schema = unload_jsonschema_from_marshmallow_class(DummyClass)\n tmp_json_schema = dummy_schema[\"properties\"][\"tmp\"]\n # Manually set the title, otherwise it would be 'tmp':\n tmp_json_schema[\"title\"] = f\"{self.name}_{mfield_meta_class_name}_option\"\n oneOf[\"oneOf\"].append(tmp_json_schema)\n\n # Add null as an option if we want to allow none but none of the field options allow none.\n any_field_options_allow_none = any(\n option.metadata[\"marshmallow_field\"].allow_none for option in field_options\n )\n if allow_none and not any_field_options_allow_none:\n oneOf[\"oneOf\"] += [{\"type\": \"null\", \"title\": \"null_option\", \"description\": \"Disable this parameter.\"}]\n\n return oneOf", "def fill_schema(schema_to, schemas):\n def fill(schema):\n \"\"\"\n :param schema to fill: A partial part of a schema\n \"\"\"\n # Rescursivly search schema for schema references\n for key, v in schema.items():\n if isinstance(v, dict):\n schema[key] = fill(v, schemas)\n if isinstance(v, list):\n for node in v:\n if isinstance(node, dict):\n schema[key] = fill(node, schemas)\n\n # Find references to schemas\n for option, constraints in schema.items():\n if not option in [\"extends\", \"items\"]:\n continue\n\n # Make a list of constraints if we don't have one\n if not isinstance(constraints, list):\n constraints = [constraints]\n\n # Schema names have to be strings\n filled = []\n\n for constraint in constraints:\n if isinstance(constraint, basestring):\n filled.append(schemas[constraint])\n\n schema[option] = filled\n\n fill(schema_to)", "def resolve_nested_schema(self, schema):\n try:\n schema_instance = resolve_schema_instance(schema)\n # If schema is a string and is not found in registry,\n # assume it is a schema reference\n except marshmallow.exceptions.RegistryError:\n return schema\n schema_key = make_schema_key(schema_instance)\n if schema_key not in self.refs:\n name = self.schema_name_resolver(schema)\n if not name:\n try:\n json_schema = self.schema2jsonschema(schema_instance)\n except RuntimeError as exc:\n raise APISpecError(\n \"Name resolver returned None for schema {schema} which is \"\n \"part of a chain of circular referencing schemas. Please\"\n \" ensure that the schema_name_resolver passed to\"\n \" MarshmallowPlugin returns a string for all circular\"\n \" referencing schemas.\".format(schema=schema)\n ) from exc\n if getattr(schema, \"many\", False):\n return {\"type\": \"array\", \"items\": json_schema}\n return json_schema\n name = get_unique_schema_name(self.spec.components, name)\n self.spec.components.schema(name, schema=schema)\n return self.get_ref_dict(schema_instance)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if item is in array and returns its index, if not found returns 1
def index_of(array, item): try: return array.index(item) except ValueError: return -1
[ "def find_index(array, value):\n for index, val in enumerate(array):\n if val == value:\n return index\n return -1", "def linear_search(arr, value):\r\n\r\n for i in range(len(arr)): # O(n)\r\n if arr[i] == value:\r\n return i\r\n return -1", "def find(target, items):\n for i in range(len(items)):\n if target == items[i]:\n return i\n return -1", "def indice(elt, liste):\n for n, x in enumerate(liste):\n if x == elt:\n return n\n return -1", "def better_linear_search(l, item):\n for i in range(len(l)):\n if l[i]==item:\n return i\n return False", "def array_search(A: int, N: int, x: int):\n for index in range(N):\n if A[index] == x:\n return index\n return -1", "def linear_search(ls: list, item: object) -> bool:\n assert pythonic_is_sorted(ls)\n for index, e in enumerate(ls):\n if e == item:\n return index\n return -1", "def linear(self,search_item=None):\n for index,element in enumerate(self.data):\n if element == search_item:\n return index\n break\n return -1", "def find_index_by_entry(self, entry: int) -> int:\n return self.entries.index(entry, 0, self.list_size)", "def index(self, value):\n for i, k in enumerate(self):\n if k == value:\n return i", "def seqsearch(data, item):\n\tindex = 0\n\tfound = False\n\twhile index < len(data) and data[index] <= item and not found:\n\t\tif data[index] == item:\n\t\t\tfound = True\n\t\telse:\n\t\t\tindex += 1\n\tif found:\n\t\treturn index\n\telse:\n\t\treturn \"item not in list\"", "def last_index_of(item, arr):\n i = len(arr) - 1\n if i == 0:\n return -1\n if arr[i] == item:\n return i\n return last_index_of(item, arr[:i])", "def indexOf(list, predicate):\n for i, x in enumerate(list):\n if predicate(x):\n return i\n return -1", "def index_or_none(l, item, *args):\n\n try:\n idx = l.index(item, *args)\n except ValueError:\n idx = None\n return idx", "def find_item_index(self, user_list, urn):\n for i in range(0, len(user_list)):\n user = user_list[i]\n if str(urn) == user[i][1:user.find(':')]:\n return i\n\n return -1", "def search_sorted_1(data, target):\n count = 0\n for item in data:\n count += 1\n if item == target:\n return count # Found it\n return count # Didn't find it", "def first_occ_index(array, n_at_least):\n curr_found_false = 0\n curr_index = 0\n for index, elem in enumerate(array):\n if not elem:\n if curr_found_false == 0:\n curr_index = index\n curr_found_false += 1\n if curr_found_false == n_at_least:\n return curr_index\n else:\n curr_found_false = 0", "def find_in_list(_value, _list):\n return np.array([i for i, v in enumerate(_list) if v == _value], dtype=int)", "def linear_search(L, v):\n\tfor i in range(len(L)):\n\t\tif L[i] == v:\n\t\t\treturn i\n\treturn -1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fix given path according to os path sep
def fix_path(path): not_sep = "/" if os.path.sep == "\\" else "\\" return path.replace(not_sep, os.path.sep)
[ "def convert_directory_separator(path):\n if os.path.sep != '/':\n path = path.replace(os.path.sep, '/')\n\n return '/' + path", "def fix_path(path):\n correct_path = '/' + path if not path.startswith('/') else path\n correct_path = correct_path + '/' if not correct_path.endswith('/') else correct_path\n return correct_path", "def normalize_file_path(path):\n return path.rstrip('/')", "def normalize(path:str, force:bool=False) -> str:\r\n other = ''.join(i for i in '\\/' if not i==os.sep)\r\n if force:\r\n if sys.platform == 'win32':\r\n forbiddens\r\n new = ''.join(i for i in path if not i in forbiddens)\r\n else:\r\n new = path[:]\r\n if other in path:\r\n terms = []\r\n for term in path.split(os.sep):\r\n if other in term:\r\n for part in term.split(other):\r\n terms.append(part)\r\n else:\r\n terms.append(term)\r\n new = os.path.join(*terms)\r\n return new", "def __good_path(path):\n # type: (str) -> str\n path = path.replace(\"\\\\\", \"/\")\n path = path.replace(\"//\", \"/\")\n return path", "def fix_path_hacks(cls, path):\n return path", "def clean_path(self, path):\n return path.replace('../', '').lstrip('/')", "def _normalize(path):\n return path.replace(\" \", \"_\").lower().replace(\"//\", \"/\")", "def _path_for_main_spec(path):\n return path.replace(os.path.sep, \"/\")", "def clean_path(self, pth):\n pth = os.path.normpath(os.path.join(self._cwd, pth))\n return pth", "def parse_config_path(fpath):\n if not(('{CORE}' in fpath) or ('{DATA}' in fpath) or ('{PARENT}' in fpath)):\n return fpath\n # Default path; go to work.\n # Make all separators the same.\n fpath = fpath.replace('\\\\', '/')\n fpath_s = fpath.split('/')\n new_path = os.path.sep.join(fpath_s)\n new_path = new_path.replace('{PARENT}', str(pathlib.Path(os.path.dirname(__file__)).parent))\n new_path = new_path.replace('{DATA}', os.path.join('{CORE}', 'data'))\n new_path = new_path.replace('{CORE}', os.path.dirname(__file__))\n return new_path", "def ensure_slash(path):\n return path.rstrip('/') + '/'", "def _norm_with_dir(path):\n normed = normpath(path)\n if path.endswith(os_sep):\n return normed + os_sep\n return normed", "def _normalise_path(self, path: str) -> str:\n return os.path.normpath(os.path.normcase(path))", "def sanitize_path(path):\n home_folder = os.path.expanduser('~')+\"/\"\n\n if path.endswith('/'):\n path = path[:-1]\n\n return path.replace(\"~\", home_folder)", "def path_trans(path):\n\n\tglobal is_cygwin\n\n\tif not is_cygwin or not path.startswith('/cygdrive/'):\n\t\treturn path\n\n\t# too slow:\n\t# subprocess.check_output('/usr/bin/cygpath -w ' + shlex.quote(path), shell = True, universal_newlines = True).strip()\n\n\tpath = path[10] + ':\\\\' + path[12:].replace('/', '\\\\')\n\treturn path", "def clean_path(path):\n if sys.platform in [\"win32\", \"cygwin\", \"msys\"]:\n path_clean = re.sub(r\"[<>:|?*\\\"\\/\\\\]\", \"-\", path)\n # This checks for strings that end in ... or similar,\n # weird corner case that affects fewer than 0.1% of titles\n path_clean = re.sub(r\"(.)[.]\\1+$\", \"-\", path_clean)\n return path_clean\n return path", "def translate_path(self, path):\n # abandon query parameters\n path = path.split('?',1)[0]\n path = path.split('#',1)[0]\n path = posixpath.normpath(urllib.unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = os.path.dirname(__file__)\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir): continue\n path = os.path.join(path, word)\n return path", "def translate_path(self, path):\n # abandon query parameters\n path = path.split('?',1)[0]\n path = path.split('#',1)[0]\n path = posixpath.normpath(urllib.unquote(path))\n path=path.decode('utf-8')\n words = path.split('/')\n words = filter(None, words)\n path=self.prefixpath\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word in (os.curdir, os.pardir): continue\n path = os.path.join(path, word)\n return path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Single target Djikstra's will terminate once a path is found from the source s to target t It is the same worstcase complexity but can be faster than normal Djikstra's
def single_target_dijkstra(adjacency_list, weights, s, t): unfinished = {} for u in adjacency_list: unfinished[u] = float('inf') unfinished[s] = 0 predecessors = {s: None} path_costs = {} while unfinished: u = min(unfinished, key=unfinished.get) path_costs[u] = unfinished[u] del unfinished[u] if u == t: # terminate early if you find the target return path_costs[t], predecessors for v in adjacency_list[u]: if path_costs[u] + weights[(u, v)] < unfinished[v]: unfinished[v] = path_costs[u] + weights[(u, v)] predecessors[v] = u return path_costs[t], predecessors
[ "def shortest_path(source, target):\n ##for testing\n # source=person_id_for_name(\"Lupita Nyong'o\")\n # target=person_id_for_name(\"Joan Cusack\")\n ## \n explored=[]\n frontier=QueueFrontier()\n init_state=Node(state=source,parent=None,action=None)\n frontier.add(init_state)\n success=None\n while frontier.empty ==False or success is None:\n if frontier.contains_state(target) == True:\n success=frontier.search(target)\n print(\"success\")\n else:\n removal=frontier.remove_all()\n for node in removal:\n for i in neighbors_for_person(node.state):\n n=Node(i[1],node.state,i[0])\n if any(node==n for node in explored)==False and\\\n frontier.contains_state(n.state)==False:\n frontier.add(n)\n explored.append(node)\n removal.clear()\n if frontier.empty==True:\n return None\n elif success.parent==source:\n return [(success.action,success.state)]\n else:\n movie_path=[(success.action,success.state)]\n node_path=[success]\n while node_path[0].parent != source:\n p_node=search_for_parent(explored,node_path[0].parent) \n movie_path.insert(0,(p_node.action,p_node.state))\n node_path.insert(0,p_node)\n return movie_path", "def dijkstra(adj, cost, s, t):\n\n V = range(len(adj)) # set of nodes, sequentially numbered\n # Note!!: this is not entirely general - there is no quarantee that\n # the graph node list is sequentially numbered from 0 to n-1\n\n # for all u∈V:\n # dist[u] ← ∞, prev[u] ← nil\n # dist[v] will be an upper bound on the actual distance from s to v.\n dist = [approxInf for u in V] # initialize dist to completely unknown for all u∈V\n prev = [None for u in V]\n # visited = [False for u in V] # this is represented as dist[u] = infinite\n\n dist[s] = 0 # zero distance to start node\n\n # H ← MakeQueue(V ) {dist-values as keys} # this is the Unknown region, not(R)\n # the set of unknown (unvisited, or not fully visited) vertices\n H = make_queue(V) #, dist)\n\n while len(H) > 0: # H, set of unknown vertices is not empty:\n # On each iteration we take a vertex outside of R (in H) with the minimal dist-value,\n # add it to R, and relax all its outgoing edges.\n u = extract_min(H, dist) # [u, d] = extract_min(H)\n # Lemma: When a node u is selected via ExtractMin, dist[u] = d(S,u), actual minimum distance.\n # First node to be extracted will be the source s (since dist[s]==0)\n # Should we stop early if min node u == t (t is moved to known set R before unknown H is exhausted)?\n for i in range(len(adj[u])): # for all (u,v) ∈ E: Relax(u,v) # relax all _outgoing_ edges from u\n # edge relaxation procedure for an edge (u,v) just checks whether\n # going from s to v through u improves the current value of dist[v].\n v = adj[u][i] # v in adj[u]\n if dist[v] > (dist[u] + cost[u][i]): # + w(u,v):\n dist[v] = dist[u] + cost[u][i] # update the distance\n prev[v] = u # update the predecessor node\n # ChangePriority(H , v , dist[v]) # rather than priority queue, update dist and scan array for min dist\n\n return dist[t]", "def _compute_all_shortest_paths(graph, source, target, exclude_edge=False):", "def dijkstra(graph, start, end):\n\n\n #init S ensemble with start_node inside\n S = [start]\n #defin V ensemble with all node of graph\n V = [x for x in range(len(graph))]\n #init distance dictionnary\n distance = {}\n #init previous history dictionnary\n previous = {}\n\n #init all of node distances to inf exept for start node\n for v in V:\n if v != start:\n distance[v] = inf\n\n #loop until S != V\n while len(S) != len(V):\n #for all element of V exept for the element which are in S\n for v in (set(V)-set(S)):\n #init uc as the last element added in S\n uc = S[-1]\n\n #if uc == 0 that signified we are in the start node\n if uc == 0:\n\n #add set uc as previous[v] if the new distance if shortest than the current\n if 0+graph[uc][v] < distance[v]:\n previous[v] = uc\n\n #set the v distance as the min beetween the current v distance and the edge of uc and v.\n distance[v] = min(distance[v], 0+graph[uc][v])\n\n else:\n #add set uc as previous[v] if the new distance if shortest than the current\n if distance[uc]+graph[uc][v] <distance[v]:\n previous[v] = uc\n #set the v distance as the min beetween the current v distance and the distance of u + the edge of uc and v.\n distance[v] = min(distance[v], distance[uc]+graph[uc][v])\n\n #find the node with the shortest distance\n #init vmin as inf\n vmin = inf\n x = inf\n #loop for all v in V / S\n for v in (set(V)-set(S)):\n #if v distance < vmin\n if distance[v] < vmin:\n vmin = distance[v]\n # x = the node with the shortest distance\n x = v\n\n\n # UPDATE STATEMENT\n # define new uc as x\n uc = x\n # add new uc to S\n S.append(uc)\n\n #define total_cost to cost of the ending distance\n total_cost= distance[end]\n #init shortest path\n path = []\n\n #loop to insert in path the previous node from end's node\n while(end != start):\n path.insert(0, end)\n end = previous[end]\n path.insert(0, start)\n\n #return the shortest_way and total cost of dijkstra from start to end\n return path, total_cost", "def quickestSafePath(self,source,target):\r\n try:\r\n source = int(source)\r\n target = int(target)\r\n except:\r\n print(\"source and target must be numbers\")\r\n possible=False\r\n discovered=[[source,0]]\r\n finalized=[]\r\n vertices=[-2 for i in range (self.largest+1)]\r\n vertices[source]=0\r\n while len(discovered)>0:\r\n if vertices[target]==-1:\r\n possible=True\r\n break\r\n u=discovered[0][0]\r\n for edgeIndex in range (len(self.graph[u].adj)):\r\n edge=self.graph[u].adj[edgeIndex]\r\n vPos = vertices[edge.v]\r\n uPos = vertices[edge.u]\r\n #if u and v don't have cameras and this edge doesn't have a toll\r\n valid=self.graph[u].hasCamera==False and edge.hasToll==False and self.graph[edge.v].hasCamera==False\r\n\r\n if vPos==-2 and valid:\r\n item=[edge.v,discovered[uPos][1]+edge.w,edge.u]\r\n discovered=self.heapInsert(discovered,item,vertices)\r\n else:\r\n if valid:\r\n if discovered[vPos][1]>discovered[uPos][1]+edge.w:\r\n if vPos!=-1:\r\n discovered[vPos][1]=discovered[uPos][1]+edge.w\r\n discovered[vPos][2]=edge.u\r\n finalized.append(discovered[0])\r\n self.heapRemoveSmallest(discovered,vertices)\r\n if vertices[target] == -1:\r\n possible = True\r\n if possible:\r\n quickest=self.findPath(finalized,source)\r\n else:\r\n return [[],-1]\r\n time=finalized[len(finalized)-1][1]\r\n return (quickest,time)", "def shortest_path(self, source, target):\n if source not in self.d or target not in self.d:\n raise KeyError(\"one or both nodes are not in Graph\")\n path = dict()\n visited = []\n queue = [source]\n marked = {source}\n #include endpoints in FINAL_PATH\n FINAL_PATH = []\n currentNode = source\n\n #### Perform the search\n while currentNode is not target:\n currentNode = queue.pop(0)\n neighborNodeSet = self.d.get(currentNode)\n #add neighbors of currentNodvisited.append(currentNode)e to Queue if they aren't in marked,\n #add the neighbors of currentNode to the marked list if they aren't in it already\n # pdb.set_trace()\n for i in neighborNodeSet:\n if i not in marked:\n marked.add(i)\n queue.append(i)\n #when a element goes into marked, add it to the path dictionary backwards\n # I could use add attribute for the path dictionary, not update, if I don't want it to overwrite which node arrived first to a given node\n path.update({i:currentNode})\n\n #add currentNode to visited and reset currentNode\n visited.append(currentNode)\n # print(path)\n\n #### Evaluate the search using the dictionary\n currentNode = target\n while True:\n if currentNode == source:\n FINAL_PATH.append(currentNode)\n break\n FINAL_PATH.append(currentNode)\n currentNode = path.get(currentNode)\n\n #reverse the path\n return FINAL_PATH[::-1]", "def dijkstra(self):\n\n # Initialise the needed variables\n graphs, edges = self.maze_to_graph()\n start = graphs[str(self.maze.start[0]) + \":\" + str(self.maze.start[1])]\n target = graphs[str(self.maze.target[0]) + \":\" + str(self.maze.target[1])]\n\n # In actual_ay all possible next nodes are stored\n actual_way = {\n str(start): NodeGraph(start, None, None)\n }\n # node_way contains all already visited nodes\n node_way = {}\n\n while str(target) not in actual_way.keys():\n # Takes the node with smallest length, that isn't visited\n neares_node = actual_way[min(actual_way, key=lambda k: actual_way[k].get_length())]\n # Create all next possible Nodes, from the actual Node, with the edges that can be go from the actual node\n for edge in neares_node.itself.edges:\n node_to_add = neares_node.itself.edges[edge].node_two\n new_node = NodeGraph(node_to_add, neares_node, neares_node.itself.edges[edge])\n\n # Add only if not in nodes to visit and not in visited nodes so no node get's visited two times.\n # If it is already visited there is an shorter way to reach this Node and cause the algorithm looks for\n # the shortest way its not in need to visit this node again\n if str(new_node.itself) not in list(actual_way.keys()) and \\\n str(new_node.itself) not in list(node_way.keys()):\n new_node.add_length(neares_node.itself.edges[edge].get_length())\n actual_way[str(new_node.itself)] = new_node\n\n # Add the actual node to node_way and remove it from possible next waypoints\n node_way[str(neares_node.itself)] = neares_node\n actual_way.pop(str(neares_node.itself))\n\n # For visualisation makes. Start by target, because the linked List works with previous Nodes\n way = []\n point = actual_way[str(target)]\n\n # Starts to search for start of maze\n while str(point.itself) != str(start):\n way.append(point)\n point = point.privious\n\n # Add the start to way\n way.append(node_way[str(start)])\n\n # Change value of target, only for visualisation\n self.maze.maze[self.maze.target[0]][self.maze.target[1]] = 4\n\n # Reverse the list of waypoints and go through it, that means start at start and at end\n for node in way[::-1]:\n if node.itself and node.privious:\n # Visualise each edge with time delay.\n edge_way = node.edge.get_way()\n self.maze.maze[node.edge.node_one.y][node.edge.node_one.x] = 2\n for wp in edge_way:\n self.maze.maze[wp[0]][wp[1]] = 5\n time.sleep(self.maze.delay)", "def k_shortest_paths(G, source, target, k=1, weight='weight'):\n if source == target:\n return ([0], [[source]])\n\n length, path = nx.single_source_dijkstra(G, source, weight=weight)\n# print(length,path)\n if target not in length:\n print(\"node %s not reachable from %s\" % (target, source))\n return [],[]\n # raise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n\n lengths = [length[target]]\n paths = [path[target]]\n c = count()\n B = []\n G_original = G.copy()\n\n for i in range(1, k):\n for j in range(len(paths[-1]) - 1):\n spur_node = paths[-1][j]\n root_path = paths[-1][:j + 1]\n\n edges_removed = []\n for c_path in paths:\n if len(c_path) > j and root_path == c_path[:j + 1]:\n u = c_path[j]\n v = c_path[j + 1]\n if G.has_edge(u, v):\n edge_attr = G.edges[u,v]\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n for n in range(len(root_path) - 1):\n node = root_path[n]\n for u in G.nodes:\n if [u,node] in G.edges:\n edge_attr = G.edges[u,node]\n G.remove_edge(u, node)\n edges_removed.append((u, node, edge_attr))\n if [node,u] in G.edges:\n edge_attr = G.edges[node,u]\n G.remove_edge(node,u)\n edges_removed.append((node,u, edge_attr))\n spur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, weight=weight)\n if target in spur_path and spur_path[target]:\n total_path = root_path[:-1] + spur_path[target]\n total_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]\n heappush(B, (total_path_length, next(c), total_path))\n\n for e in edges_removed:\n u, v, edge_attr = e\n if weight:\n G.add_edge(u, v, weight = edge_attr[weight])\n\n if B:\n (l, _, p) = heappop(B)\n lengths.append(l)\n paths.append(p)\n else:\n break\n G = G_original\n\n return (lengths, paths)", "def _compute_shortest_path(graph, source, target, distance=None,\n exclude_edge=False):\n pass", "def yens_shortest_paths(G, start, target, max_paths=10):\n letters = list(string.ascii_letters)\n shortestPaths = []\n k = 0\n try:\n paths = list(itertools.islice(nx.shortest_simple_paths(G, start, target), max_paths))\n except Exception:\n raise PyeMapShortestPathException(\"No paths between \" + str(start) + \" and \" + str(target) + \" were found.\")\n for k in range(0, len(paths)):\n path = paths[k]\n sum = 0\n weights = []\n for i in range(0, len(path) - 1): # sum up edge weights\n sum += (G[path[i]][path[i + 1]]['weight'])\n weights.append(G[path[i]][path[i + 1]]['weight'])\n path = ShortestPath(path, weights, sum)\n shortestPaths.append(path)\n shortestPaths = sorted(shortestPaths)\n for i in range(0, len(shortestPaths)):\n path = shortestPaths[i].path\n if i == 0: # shortest path gets bolder edges\n for j in range(len(path) - 1):\n G[path[j]][path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['style'] = 'solid'\n G.nodes[path[j]]['penwidth'] = 6.0\n G.nodes[path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['color'] = '#778899FF'\n # make the nodes look opaque if they are connected to the source\n if len(G.nodes[path[j]]['fillcolor']) != 9:\n G.nodes[path[j]]['fillcolor'] += 'FF'\n G.nodes[path[j]]['color'] = '#708090FF'\n if len(G.nodes[path[j + 1]]['fillcolor']) != 9:\n G.nodes[path[j + 1]]['fillcolor'] += 'FF'\n G.nodes[path[j + 1]]['color'] = '#708090FF'\n else:\n for j in range(len(path) - 1):\n G[path[j]][path[j + 1]]['penwidth'] = 6.0\n G[path[j]][path[j + 1]]['style'] = 'solid'\n G.nodes[path[j]]['penwidth'] = 6.0\n G.nodes[path[j + 1]]['penwidth'] = 6.0\n if G[path[j]][path[j + 1]]['color'] != '#778899FF':\n G[path[j]][path[j + 1]]['color'] = '#7788997F'\n # make the nodes look opaque if they are connected to the source\n if len(G.nodes[path[j]]['fillcolor']) != 9:\n G.nodes[path[j]]['fillcolor'] += '7F'\n G.nodes[path[j]]['color'] = '#7080907F'\n if len(G.nodes[path[j + 1]]['fillcolor']) != 9:\n G.nodes[path[j + 1]]['fillcolor'] += '7F'\n G.nodes[path[j + 1]]['color'] = '#7080907F'\n shortestPaths[i].set_id(\"1\" + letters[i])\n br = Branch(1, shortestPaths[0].path[-1])\n for pt in shortestPaths:\n br.add_path(pt)\n return [br]", "def dijkstra(self,source:int,end:int,delivery_path:tuple) -> tuple:\r\n delivery_path_used = False\r\n self.vertices[source].cost = 0\r\n discovered = MinHeap(self.vertices) #create MinHeap and add all vertices into MinHeap\r\n discovered.rise(discovered.indx[source]) #rise the source vertex\r\n while (len(discovered)) > 0 :\r\n u = discovered.serve() \r\n\r\n if u.id == end: #reached our end, terminate early\r\n path,delivery_path_used = self.get_path(source,end,delivery_path) #backtrack to get path \r\n return (u.cost,path,delivery_path_used)\r\n\r\n u.visited = True\r\n for edge in u.edges: #edge relaxation\r\n v = self.vertices[edge.v]\r\n if v.discovered == False:\r\n v.discovered = True\r\n v.cost = u.cost + edge.w\r\n v.previous = u\r\n position = discovered.indx[v.id] #calculate positon of vertex v in heap\r\n discovered.rise(position)\r\n elif v.visited == False:\r\n if v.cost > u.cost + edge.w:\r\n v.cost = u.cost + edge.w\r\n v.previous = u\r\n position = discovered.indx[v.id] #calculate positon of vertex v in heap\r\n discovered.rise(position) \r\n \r\n path,delivery_path_used = self.get_path(source,end,delivery_path) #backtrack to get path\r\n return (self.vertices[end].cost, path, delivery_path_used)", "def option0_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n return Shortest_path \n else:\n self.logger.info('No path from %s to %s', S, D)\n Shortest_path = []\n return Shortest_path", "def _compute_yen_shortest_paths(graph, target, n,\n distance, exclude_edge=False):\n pass", "def test_shortest_path(self):\n self.func2()\n for x in self.list_algo:\n nodeSize = x.get_graph().v_size()\n for v in x.get_graph().get_v_keys():\n for ni in x.get_graph().get_v_keys():\n if v != ni and ni in x.get_graph().all_out_edges_of_node(v).keys():\n id = v\n run = 0\n for theList in x.shortest_path(v,ni)[1]:\n if run > 0:\n self.assertTrue(theList in x.get_graph().all_out_edges_of_node(id).keys())\n id = theList\n run += 1\n id = v\n run = 0\n run = 0\n for theList in x.shortest_path(v,nodeSize*5):\n if run == 0:\n self.assertEqual(theList,-1)\n if run == 1:\n self.assertEqual(len(theList),0)\n run += 1", "def find_shortest_path(self, start_id, target_id):\n if not self.contains_id(start_id) or not self.contains_id(target_id):\n raise KeyError(\"One or both vertices are not in the graph!\")\n\n # vertex keys we've seen before and their paths from the start vertex\n vertex_id_to_path = {\n start_id: [start_id] # only one thing in the path\n }\n\n # queue of vertices to visit next\n queue = deque() \n queue.append(self.get_vertex(start_id))\n\n # while queue is not empty\n while queue:\n current_vertex_obj = queue.popleft() # vertex obj to visit next\n current_vertex_id = current_vertex_obj.get_id()\n\n # found target, can stop the loop early\n if current_vertex_id == target_id:\n break\n\n neighbors = current_vertex_obj.get_neighbors()\n for neighbor in neighbors:\n if neighbor.get_id() not in vertex_id_to_path:\n current_path = vertex_id_to_path[current_vertex_id]\n # extend the path by 1 vertex\n next_path = current_path + [neighbor.get_id()]\n vertex_id_to_path[neighbor.get_id()] = next_path\n queue.append(neighbor)\n # print(vertex_id_to_path)\n\n if target_id not in vertex_id_to_path: # path not found\n return None\n\n return vertex_id_to_path[target_id]", "def DAG_shortest_path(G, s):\n\tp, topological_sorted = CDFS(G)\n\tp = None\n\tinitialize_single_source(G, s)\n\tprint(topological_sorted)\n\tfor vertex in topological_sorted:\n\t\tfor v in G.adj_list[vertex]:\n\t\t\tRelax(G, vertex, v)\n\treturn topological_sorted[-1]", "def computeShortestPath(self):\n for row in range(len(self.graph)):\n # track row, which vertices to compute greedy Dijkstra\n v = self.graph[row][0][0] # key to sd list\n\n for ele in range(1, len(self.graph[row])):\n if len(self.graph[row][ele]) == 2:\n self.computeGreedyDijkstra(v, self.graph[row][ele])", "def find_fastest_path(file_name,src,dest): \n #initilized parameters\n visited=[]\n distances={}\n predecessors={}\n\n #create dic that represent the graph edges for each vertex\n graph = create_graph(file_name)\n \n #sanity checks\n if sanity_checks(graph,src,dest)==False:\n return\n\n #initial run, initializes the cost of source node\n distances[src]=0\n pq = [(0, src)] \n \n while len(pq) > 0:\n current_distance, current_vertex = heapq.heappop(pq) \n if current_vertex == dest :\n break\n\n # visit the neighbors\n for neighbor, weight in graph[current_vertex].items():\n if neighbor not in visited: \n new_distance = current_distance + int(weight)\n #check if new distance are shorter then calculate before \n if new_distance < distances.get(neighbor,float('inf')):\n distances[neighbor] = new_distance \n predecessors[neighbor] = current_vertex \n heapq.heappush(pq, (new_distance, neighbor)) \n # mark as visited\n visited.append(current_vertex)\n \n path=reversed_path(predecessors,dest)\n readable =create_path_string(path,graph) \n print(\"path: \"+readable+\", cost=\"+str(distances[dest]))", "def shortest_path_search(start, successors, is_goal):\n if is_goal(start):\n return [start]\n explored = set()\n frontier = [[start]]\n while frontier:\n path = frontier.pop(0)\n s = path[-1]\n for (state, action) in successors(s).items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if is_goal(state):\n return path2\n else:\n frontier.append(path2)\n return Fail" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that a plot update is requested (scheduled for the next cycle of the timer) and that the timer is active, which means the updates are going to happen. Once that is assured, do the updates immediately instead of waiting one cycle (that would slow down the test)
def check_update_is_requested_and_apply(self): # check self.assertTrue(self.plot.update_required) self.assertTrue(self.plot.plot_updater.active) # update self.plot._check_scheduled_updates()
[ "def update_plotting_chart(self, extraInfo = None):\n \n self.plot_lock.acquire()\n desired_value = self.Monitor.desired_value # The desired value of the sensor\n range_warning = self.Monitor.range_warning # The range which if crosses we send email\n range_stop = self.Monitor.range_stop # The range which if crosses we stop\n \n# print (\"Gonna plot\")\n self.data_lock.acquire()\n data, time = np.array(self.data_buffer), np.array(self.time_buffer)\n self.data_lock.release()\n \n if (type(data) == type(None)):\n self.plot_lock.release()\n return True ; ## For the task manager ?\n if (type(time) == type(None)):\n self.plot_lock.release()\n return True ; ## For the task manager ?\n ## Select the start and end index to plot\n s_indx = max([data.size - self.show_window, 0])\n e_indx = data.size -1\n \n if(self.first_plot_flag):\n ## Remove the text box\n self.initial_text_data.set_visible(False)\n \n if(len(data) < 2): # Plot 2 data minimum\n self.plot_lock.release()\n return True ; ## For the task manager ?\n \n self.first_plot_flag = False\n \n ##------------------------------------------\n #### Warning bands \n ax_aux, plots_data_upper_warning_band = gl.plot([time[s_indx],time[e_indx]], [desired_value + range_warning, desired_value + range_warning], ax = self.data_axes,\n color = \"y\", lw = 3, ls=\"--\", return_drawing_elements = True, legend = [\"Warning email\"], loc = \"upper right\"); #, legend = [\"Warning area\"]\n \n ax_aux, plots_data_lower_warning_band = gl.plot([time[s_indx],time[e_indx]], [desired_value - range_warning, desired_value - range_warning], ax = self.data_axes,\n color = \"y\", lw = 3, ls=\"--\", return_drawing_elements = True);\n \n #### Error bands \n ax_aux, plots_data_upper_error_band = gl.plot([time[s_indx],time[e_indx]], [desired_value + range_stop, desired_value + range_stop], ax = self.data_axes,\n color = \"r\", lw = 3, ls=\"--\", return_drawing_elements = True, legend = [\"Stop\"], loc = \"upper right\"); #, legend = [\"Warning area\"]\n \n ax_aux, plots_data_lower_error_band = gl.plot([time[s_indx],time[e_indx]], [desired_value - range_stop, desired_value - range_stop], ax = self.data_axes,\n color = \"r\", lw = 3, ls=\"--\", return_drawing_elements = True);\n \n \n ax_aux, plot_time_series = gl.plot(time[s_indx:e_indx+1], data[s_indx:e_indx+1], ax = self.data_axes,\n labels = [\"Cleaning Procedure: \" + self.cleaning_ID, self.time_now.strftime(\"%B %d, %Y\"), \"PH\"], color = \"k\", xaxis_mode = \"intraday\", return_drawing_elements = True,\n loc = \"upper right\");\n \n gl.set_fontSizes(ax = self.data_axes, title = 25, xlabel = 20, ylabel = 20, \n legend = 15, xticks = 15, yticks = 15)\n \n ## Save the elements so that we can modify them later\n \n self.plots_data = [plot_time_series[0], plots_data_upper_warning_band[0], plots_data_lower_warning_band[0], plots_data_upper_error_band[0], plots_data_lower_error_band[0]]\n \n\n \n else:\n# print self.plots_data\n self.plots_data[0].set_xdata(time[s_indx:e_indx+1])\n self.plots_data[0].set_ydata(data[s_indx:e_indx+1])\n \n ## Warning bands\n self.plots_data[1].set_xdata([time[s_indx],time[e_indx]])\n self.plots_data[1].set_ydata([desired_value + range_warning, desired_value + range_warning])\n \n self.plots_data[2].set_xdata([time[s_indx],time[e_indx]])\n self.plots_data[2].set_ydata([desired_value - range_warning, desired_value - range_warning])\n \n ## Error bands\n self.plots_data[3].set_xdata([time[s_indx],time[e_indx]])\n self.plots_data[3].set_ydata([desired_value + range_stop, desired_value + range_stop])\n self.plots_data[4].set_xdata([time[s_indx],time[e_indx]])\n self.plots_data[4].set_ydata([desired_value - range_stop, desired_value - range_stop])\n# gl.set_xlim(ax = self.data_axes, X = time[s_indx:e_indx+1], xmin = np.min(time[s_indx:e_indx+1]), xmax = np.max(time[s_indx:e_indx+1]))\n# gl.set_ylim(ax = self.data_axes, Y = data[s_indx:e_indx+1], ymin =np.min(data[s_indx:e_indx+1]),ymax = np.max(data[s_indx:e_indx+1]))\n \n\n# gl.set_zoom(X = time[s_indx:e_indx+1],Y = data[s_indx:e_indx+1],xlimPad = [0.2,0.2] ,ylimPad = [0.1, 0.1])\n# gl.set_zoom(X = time[s_indx:e_indx+1],Y = data[s_indx:e_indx+1],xlimPad = [0.2,0.2] ,ylimPad = [0.1, 0.1])\n# gl.set_zoom(X = time[s_indx:e_indx+1],Y = data[s_indx:e_indx+1],xlimPad = [0.2,0.2] ,ylim = [0, 14])\n gl.set_zoom(X = time[s_indx:e_indx+1],Y = data[s_indx:e_indx+1],xlimPad = [0.2,0.2] ,ylim = [0, 10])\n pass\n# self.data_axes.update()\n# self.data_axes.draw(self.plots_data[0])\n plt.draw()\n# l.set_ydata(ydata)\n# ax.set_ylim(np.min(ydata), np.max(ydata))\n# plt.draw()\n \n# self.fig.canvas.draw()\n# #### RETOQUES ########\n# if (len(self.data_buffer) > 1):\n# gl.set_zoom(X = time[s_indx:e_indx+1],Y = data[s_indx:e_indx+1],xlimPad = [0.2,0.2] ,ylimPad = [0.1, 0.1])\n \n#\n # if (update_data.index == 1000):\n # rt.stop()\n # information.serial.close()\n self.check_monitoring(data[s_indx:e_indx+1])\n \n self.plot_lock.release()\n return True ; ## For the task manager ?", "def _configure_timers(self):\n self._timer_plot = QtCore.QTimer(self)\n self._timer_plot.timeout.connect(self._update_plot)\n # self.timer = QtCore.QTimer()", "def testRefresherLoop(self):\n interval = 0\n self.testRefresh = QtCore.QTimer(interval=interval,timeout=self.testRefresh_exec)", "def on_redraw_timer(self, event):\r\n if ((not (self.paused or not self.running))\r\n and (len(RAW_Q)%frequency == 0)):\r\n readPort()\r\n nxt = len(self.data) #this is set for the case that\r\n #PLOT_ARRAY is updating faster than the graph\r\n #is being drawn\r\n self.data.append(PLOT_ARRAY[nxt])\r\n self.draw_plot()", "def waitForRealTime(self):\n\t\tpass", "def _monitor_loop(self):\n # First check if monitor is allowed to start\n if self._busy or not self._allow_monitor:\n self.logger.warning('Monitor should only be run from GUI and not while Operator is busy')\n return\n try:\n # Preparations before running the monitor\n self.analog_monitor_1 = np.zeros(self.properties['monitor']['plot_points'])\n self.analog_monitor_2 = np.zeros(self.properties['monitor']['plot_points'])\n # self.analog_monitor_time = np.zeros(self.properties['monitor']['plot_points'])\n self.analog_monitor_time = np.arange(1-self.properties['monitor']['plot_points'], 1)*self.properties['monitor']['time_step']\n except:\n self.logger.error(\"'plot_points' or 'time_step' missing or invalid in config\")\n return\n self._busy = True # set flag to indicate operator is busy\n self._monitor_start_time = time()\n next_time = 0\n while not self._stop:\n timestamp = time() - self._monitor_start_time\n analog_in = self.instrument.read_analog() # read the two analog in channels\n # To keep the length constant, roll/shift the buffers and add the new datapoints\n self.analog_monitor_1 = np.roll(self.analog_monitor_1, -1)\n self.analog_monitor_2 = np.roll(self.analog_monitor_2, -1)\n self.analog_monitor_time = np.roll(self.analog_monitor_time, -1)\n self.analog_monitor_1[-1] = analog_in[0]\n self.analog_monitor_2[-1] = analog_in[1]\n self.analog_monitor_time[-1] = timestamp\n self._new_monitor_data = True\n # in stead of sleep, calculate when the next datapoint should be acquired and wait until that time arrives\n # this allows to keep the timing correct\n next_time += self.properties['monitor']['time_step']\n while time()-self._monitor_start_time < next_time:\n if self._stop: break # check for stop flag while waiting to move to next point\n self._stop = False # reset stop flag to false\n self._busy = False # indicate the operator is not busy anymore", "async def test_update(hass: HomeAssistant) -> None:\n entry = MockConfigEntry(domain=islamic_prayer_times.DOMAIN, data={})\n entry.add_to_hass(hass)\n\n with patch(\n \"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times\"\n ) as FetchPrayerTimes, freeze_time(NOW):\n FetchPrayerTimes.side_effect = [\n PRAYER_TIMES,\n NEW_PRAYER_TIMES,\n ]\n\n await hass.config_entries.async_setup(entry.entry_id)\n await hass.async_block_till_done()\n\n pt_data = hass.data[islamic_prayer_times.DOMAIN]\n assert pt_data.data == PRAYER_TIMES_TIMESTAMPS\n\n future = pt_data.data[\"Midnight\"] + timedelta(days=1, minutes=1)\n\n async_fire_time_changed(hass, future)\n await hass.async_block_till_done()\n assert pt_data.data == NEW_PRAYER_TIMES_TIMESTAMPS", "def delayed_update_flags(self):\n if self._update_list_timer.isActive():\n return\n\n self._update_list_timer.start(REFRESH_RATE)", "def start_timer(self):\n if not self.running:\n if self.powerSpectra.is_running:\n print('Cant update while power spectra is running.')\n else:\n conditions = {}\n # Starts the timer for updating the GUI\n conditions['devs'] = self.devices\n conditions['accuracy'] = self._session.monitorTimeresol/1000 # In seconds\n self.trap.startMonitor(conditions)\n self.ctimer.start(self._session.monitorRefresh)\n\n self.running = True\n else:\n self.stop_timer()", "def timer_update(self):\n if self.play_using_button:\n self.next_frame()", "def run_update(status_update):\n status = next(status_update)\n sleep(1)\n if status != 100:\n print_progress(status)\n run_update(status_update)\n elif status == 100:\n print_progress(100)\n _print('DONE')\n return", "def check_update(self):\n\n if time.time() - self._last_update_check >= self.frametime:\n # A framerate occurs! Check if it was too long ago\n if time.time() - self._last_update_check >= self._reset_timeout:\n # Reset it\n self._last_update_check = time.time()\n else:\n self._last_update_check += self.frametime\n return True\n return False", "def mpUpdateLoop( self ):\n assert self.multproc, \"only to be used when multiprocessing\"\n \n q= self.multiprocQueue\n# plt.ion()\n# plt.draw()\n# plt.pause(.001)\n while True:\n try:\n print(\"here\")\n try:\n frame= q.get(timeout= 2)\n except:\n continue\n \n self.updatePlots( frame )\n if len( self._updateList ) == 0:\n sys.exit()\n print(\"here2\")\n except:\n traceback.print_exc()\n sys.exit()", "def _cron(self):\n while True:\n self.check_update()\n sleep(60)", "def set_update_timer(self):\n update_timer = qc.QTimer(self)\n update_timer.timeout.connect(self.refresh_camera_frames)\n update_timer.start(5)", "def tick(self):\n\n # Run activities whose time has come\n for act in self.__activities:\n if not act.iteratorHolder[0]:\n continue\n\n try:\n next(act.iteratorHolder[0])\n except StopIteration:\n act.cb()\n if act.repeating:\n act.iteratorHolder[0] = iter(xrange(act.period))\n else:\n act.iteratorHolder[0] = None\n\n return True", "def refresh(self):\n self.action = None\n self.check_clocks()\n if self.action is not None:\n self.step(self.action)", "def test_settle_time(self):\n self.do_configure(axes_to_scan=[\"x\", \"y\"], settle=0.01)\n self.do_check_output_settle()", "def tick(self):\n # Run activities whose time has come\n for act in self.__activities:\n if not act.iteratorHolder[0]:\n continue\n\n try:\n next(act.iteratorHolder[0])\n except StopIteration:\n act.cb()\n if act.repeating:\n act.iteratorHolder[0] = iter(xrange(act.period-1))\n else:\n act.iteratorHolder[0] = None\n\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test register intent files using register_intent.
def test_register_intent_intent_file(self): self._test_intent_file(SimpleSkill6())
[ "async def test_send_intent(self):\n with patchers.patch_connect(True)[self.PATCH_KEY], patchers.patch_shell('output\\r\\nretcode')[self.PATCH_KEY]:\n result = await self.ftv._send_intent(\"TEST\", constants.INTENT_LAUNCH)\n self.assertEqual(getattr(self.ftv._adb, self.ADB_ATTR).shell_cmd, \"monkey -p TEST -c android.intent.category.LAUNCHER 1; echo $?\")\n self.assertDictEqual(result, {'output': 'output', 'retcode': 'retcode'})\n\n with patchers.patch_connect(True)[self.PATCH_KEY], patchers.patch_shell(None)[self.PATCH_KEY]:\n result = await self.ftv._send_intent(\"TEST\", constants.INTENT_LAUNCH)\n self.assertEqual(getattr(self.ftv._adb, self.ADB_ATTR).shell_cmd, \"monkey -p TEST -c android.intent.category.LAUNCHER 1; echo $?\")\n self.assertDictEqual(result, {})", "def test_update_asset_device_registration(self):\n pass", "def batch_register(filename, test, update_if_exists):\n batch_register = commands.get_client().batch_register(filename, test,\n update_if_exists=update_if_exists)\n click.echo(json.dumps(batch_register, indent=2))", "def test_patch_asset_device_registration(self):\n pass", "def test_a_register_device_for_loan_license(self):\n self.status.register(self.status.DEVICEID1, self.status.DEVICENAME1)", "def register(filename, title, locations, replaces, test, json):\n mc = commands.get_client()\n kwargs = parse_none_values([\n ('replaces', replaces, None),\n ('locations', locations.split(',') if locations else None, []),\n ])\n minid = mc.register_file(filename, title=title, test=test, **kwargs)\n print_minids(minid.data, output_json=json)", "def test_register_route_request(self):\n pass", "def test_install_files(runpath):\n binary = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"example_binary.py\"\n )\n config = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"config.yaml\"\n )\n bfile = os.path.join(\n os.path.abspath(os.path.dirname(__file__)), \"binary_file\"\n )\n stdout_regexps = [\n re.compile(r\".*binary=(?P<binary>.*)\"),\n re.compile(r\".*command=(?P<command>.*)\"),\n re.compile(r\".*app_path=(?P<app_path>.*)\"),\n ]\n dst = runpath\n app = App(\n name=\"App\",\n binary=binary,\n pre_args=[sys.executable],\n install_files=[\n config,\n bfile,\n (config, os.path.join(dst, \"config.yaml\")),\n (config, os.path.join(\"rel_path\", \"config.yaml\")),\n ],\n stdout_regexps=stdout_regexps,\n shell=True,\n runpath=runpath,\n )\n with app:\n assert os.path.exists(app.extracts[\"binary\"])\n assert bool(json.loads(app.extracts[\"command\"]))\n assert os.path.exists(app.extracts[\"app_path\"])\n assert os.path.exists(os.path.join(app.app_path, \"etc\", \"binary_file\"))\n assert os.path.exists(os.path.join(app.app_path, \"etc\", \"config.yaml\"))\n assert os.path.exists(os.path.join(dst, \"config.yaml\"))\n assert os.path.exists(\n os.path.join(app.app_path, \"etc\", \"rel_path\", \"config.yaml\")\n )", "def test_register_calls_aggregator(self):\n self.registry.register_resource(\"a\", 1, \"flow\", \"flow_a_key\", {\"label\": \"flow_a\"})\n expected_args = [('resource', 'flow', 'flow_a_key'), {'label': 'flow_a'}]\n self.assertEqual(self.mock_aggregator.register_invocations, [expected_args])", "def test_generate_app_twice(self):\n app_name = 'test_generate_app_twice'\n self._generator.generate_new(app_name, self._project_dir)\n\n # Generate an app and add customized files\n app_folder_path = os.path.join(self._project_dir, app_name)\n with open(os.path.join(app_folder_path, 'new_file'), 'w') as new_file:\n new_file.write('123')\n\n # Generate the same app again. This should not have any effects.\n self._generator.generate_new(app_name, self._project_dir)\n files_list = os.listdir(app_folder_path)\n self.assertIn('new_file', files_list)", "def test_put_install_item(self):\n pass", "async def test_intent(\n self, intent: Text, translation: i18n.Translations = None, **kwargs\n ) -> Response:\n try:\n handler = self.intents[intent]\n except KeyError:\n raise KeyError(f\"Intent {intent} not found\")\n\n r = util.create_request(intent, **kwargs).with_translation(\n translation if translation else i18n.Translations()\n )\n\n return await invoke(handler, r)", "def test_post_installs(self):\n pass", "def test_sender_manifest_returns_http(self):\n with mock.patch(\"nmosnode.registry.PROTOCOL\", \"http\"):\n self.registry.register_resource(\"a\", 1, \"sender\", \"sender_a_key\", {\"manifest_href\": \"http://some-url.com\"})\n sender_resources = self.registry.list_resource(\"sender\")\n scheme = sender_resources[\"sender_a_key\"][\"manifest_href\"].split(\"://\")[0]\n self.assertEqual(scheme, \"http\")", "def add_intent(self, intent):\n self.intents.append(intent)", "def _install_test_actions(ctx):\n test_actions = []\n\n # For files, we run the file from the build tree.\n for test in ctx.attr.install_tests:\n for f in _depset_to_list(test.files):\n test_actions.append(\n struct(src = f, cmd = f.path),\n )\n\n return test_actions", "def test_create_asset_managed_device(self):\n pass", "def test_install_signal_handlers():\n install_signal_handlers()\n install_signal_handlers()\n install_signal_handlers()", "def register_callback():\n query_params = bp.current_request.query_params\n query_params = query_params if query_params else dict()\n\n code = query_params.get(\"code\", None)\n if code is None:\n return Response(\n body='{\"error\": \"The GitHub application-manifest code is missing.\"}',\n status_code=HTTPStatus.EXPECTATION_FAILED,\n headers={\"Content-Type\": \"text/html\"},\n )\n\n o = get_configured_octokit()\n o = o.apps.create_from_manifest(code=code)\n\n config.save_app_registration(o.json)\n\n return Response(\n body=\"\",\n status_code=HTTPStatus.SEE_OTHER,\n headers={\"Location\": urljoin(f'{o.json[\"html_url\"]}/', \"installations/new\")},\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Assert that the a translatable list can be loaded from dialog and locale.
def test_translate_locations(self): # Check that translatables can be loaded from the dialog directory s = SimpleSkill1() s.root_dir = abspath(join(dirname(__file__), 'translate', 'in-dialog/')) lst = s.translate_list('good_things') self.assertTrue(isinstance(lst, list)) vals = s.translate_namedvalues('named_things') self.assertTrue(isinstance(vals, dict)) template = s.translate_template('test', data={'thing': 'test framework'}) self.assertEqual(template, ['Oh look it\'s my favourite test framework']) # Check that translatables can be loaded from locale folder s = SimpleSkill1() s.root_dir = abspath(join(dirname(__file__), 'translate', 'in-locale')) lst = s.translate_list('good_things') self.assertTrue(isinstance(lst, list)) vals = s.translate_namedvalues('named_things') self.assertTrue(isinstance(vals, dict)) template = s.translate_template('test', data={'thing': 'test framework'}) self.assertEqual(template, ['Oh look it\'s my favourite test framework']) # Check loading in a non-en-us language s = SimpleSkill1() s.config_core['lang'] = 'de-de' s.root_dir = abspath(join(dirname(__file__), 'translate', 'in-locale')) lst = s.translate_list('good_things') self.assertEqual(lst, ['sonne', 'mycroft', 'zahne']) vals = s.translate_namedvalues('named_things') self.assertEqual(vals['blau'], '2') template = s.translate_template('test', data={'thing': 'test framework'}) self.assertEqual(template, ['Aber setzen sie sich herr test framework']) # Check fallback to english lst = s.translate_list('not_in_german') self.assertEqual(lst, ['not', 'in', 'German']) # Restore lang to en-us s.config_core['lang'] = 'en-us'
[ "def test_language_chooser_available_language_with_translated_page(self):\n content = {\"en\": \"Language menu test\", \"fr\": \"Test du menu de langues\"}\n page = create_i18n_page(\n content, published=True, template=\"richie/single_column.html\"\n )\n url = page.get_absolute_url(language=\"fr\")\n response = self.client.get(url)\n\n # Every available language is present\n for item in [\"en\", \"fr\"]:\n self.assertContains(response, page.get_absolute_url(language=item))\n\n # Current language item is marked active according to user language\n # choice (from i18n url prefix)\n self.assertContains(\n response,\n (\n '<li class=\"topbar__menu__list__item '\n \"topbar__menu__list__item--language \"\n 'topbar__menu__list__item--en\">'\n ),\n )\n self.assertContains(\n response,\n (\n '<li class=\"topbar__menu__list__item '\n \"topbar__menu__list__item--language \"\n \"topbar__menu__list__item--fr \"\n 'topbar__menu__list__item--active\">'\n ),\n )", "def test_form_template_i18n():", "def test_localized_bundle_languages(self):\n strings = dotstrings.load_all_strings(self.bundle_path)\n self.assertEqual(sorted(strings.languages()), [\"en\", \"fr\"])", "def test_localized_bundle_tables_for_language(self):\n strings = dotstrings.load_all_strings(self.bundle_path)\n self.assertEqual(sorted(strings.tables_for_language(\"en\")), [\"One\", \"Two\"])\n self.assertEqual(sorted(strings.tables_for_language(\"fr\")), [\"One\", \"Two\"])", "def test_unready_for_l10n(self):\n locale = settings.WIKI_DEFAULT_LANGUAGE\n d = TemplateDocumentFactory()\n RevisionFactory(document=d, is_ready_for_localization=True)\n ApprovedRevisionFactory(\n document=d, is_ready_for_localization=False, significance=MAJOR_SIGNIFICANCE\n )\n\n row = self.row(locale=locale)\n\n self.assertEqual(row[\"title\"], d.title)\n self.assertEqual(str(row[\"status\"]), \"Changes Not Ready For Localization\")", "def test_app_locales(self):\n filenames = list(gen_filenames())\n self.assertIn(os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),\n filenames)", "def test_django_locales(self):\n filenames = list(gen_filenames())\n locales = []\n\n basedir = os.path.join(os.path.dirname(conf.__file__), 'locale')\n for dirpath, dirnames, locale_filenames in os.walk(basedir):\n for filename in locale_filenames:\n if filename.endswith('.mo'):\n locales.append(os.path.join(dirpath, filename))\n\n self.assertTrue(len(locales) > 10) # assume a few available locales\n for filename in locales:\n self.assertIn(filename, filenames)", "def test_localized_bundle_table_for_languages(self):\n strings = dotstrings.load_all_strings(self.bundle_path)\n\n one_languages = strings.table_for_languages(\"One\")\n self.assertEqual(sorted(list(one_languages.keys())), [\"en\", \"fr\"])\n\n one_english = one_languages[\"en\"]\n self.assertEqual(len(one_english), 2)\n\n one_french = one_languages[\"fr\"]\n self.assertEqual(len(one_french), 2)\n\n two_languages = strings.table_for_languages(\"Two\")\n self.assertEqual(sorted(list(two_languages.keys())), [\"en\", \"fr\"])\n\n two_english = two_languages[\"en\"]\n self.assertEqual(len(two_english), 1)\n\n two_french = two_languages[\"fr\"]\n self.assertEqual(len(two_french), 1)", "def test_language_chooser_available_language(self):\n page = create_page(\n language=\"en\",\n menu_title=\"Language menu test\",\n title=\"Language menu test\",\n slug=\"language-test-page\",\n template=\"richie/single_column.html\",\n published=True,\n )\n\n url = page.get_absolute_url(language=\"en\")\n response = self.client.get(url)\n\n # Every available language is present\n for item in [\"en\", \"fr\"]:\n self.assertContains(response, page.get_absolute_url(language=item))", "def test_word_translations(self, linked_words):\n\n words = Word.objects.word_translations(self.russian_word.pk)\n\n assert self.english_word in words\n assert self.spanish_word in words\n assert len(words) == 2", "def test_LangsAreAvail(self):\r\n for lang in self.broker.list_languages():\r\n if not self.broker.dict_exists(lang):\r\n assert False, \"language '\"+lang+\"' advertised but non-existent\"", "def test_resource_available_languages(self):\r\n self.assertEqual(len(self.resource.available_languages), 3)\r\n self.assertEqual(len(self.resource.available_languages_without_teams), 2)", "def test_valid_configuration(self):\n self.assertIsNotNone(CONFIGURATION)\n locales = CONFIGURATION.locales\n self.assertIsNotNone(locales)\n self.assertIsInstance(locales, list)\n self.assertIn('en', locales)\n self.assertEqual('fr', CONFIGURATION.dummy_locale)\n self.assertEqual('en', CONFIGURATION.source_locale)", "def test_texts(self):\n self.assertEqual(self.dlg.texts(), ['WPF Sample Application'])", "def test_available_langs_per_resource(self):\r\n self.assertEqual(type(self.resource.available_languages.count()), int)\r\n for user in ['anonymous', 'registered','team_member', 'maintainer']:\r\n resp = self.client[user].get(self.urls['resource'])\r\n self.assertContains(\r\n resp, \"Available languages (%s)\" % (\r\n self.resource.available_languages.count()\r\n ))", "def test_snippet_translation_data(self):\r\n # Set some custom data\r\n self.entity.translations.create(string=\"StringTrans1\",\r\n language=self.language, user=self.user[\"team_member\"],\r\n resource=self.resource\r\n )\r\n # Test the response contents\r\n resp = self.client['team_member'].get(self.snippet_url)\r\n self.assertContains(resp, '0 minutes', status_code=200)", "def test_load_local_data__languages(self):\n services = {'Abbr1': 'Service 1', 'Abbr2': 'Service 2'}\n languages = {'Lang1': 'Language 1', 'Lang2': 'Language 2', 'Lang3': 'Language 3'}\n self.cmd.languages = languages\n self.cmd.services = services\n self.cmd.save_data()\n self.cmd.services = None\n self.cmd.languages['Lang4'] = 'Language 4'\n self.cmd.load_local_data()\n self.assertTrue(len(self.cmd.languages) == 4)\n self.assertTrue('Lang4' in self.cmd.languages)\n self.assertTrue(len(self.cmd.services) == 2)", "def test_general_po(self):\r\n\r\n # Empty our resource\r\n SourceEntity.objects.filter(resource=self.resource).delete()\r\n\r\n # Import file with two senteces\r\n handler = POHandler('%s/general/test.pot' %\r\n os.path.split(__file__)[0])\r\n handler.bind_resource(self.resource)\r\n handler.set_language(self.resource.source_language)\r\n handler.parse_file(is_source=True)\r\n handler.save2db(is_source=True)\r\n exported_file = polib.pofile(handler.compile())\r\n for entry in exported_file:\r\n se = SourceEntity.objects.get(\r\n string = entry.msgid,\r\n context = entry.msgctxt or 'None',\r\n resource = self.resource\r\n )\r\n\r\n if se.pluralized:\r\n plurals = Translation.objects.filter(\r\n source_entity__resource = self.resource,\r\n language = self.resource.source_language,\r\n source_entity__string = entry.msgid\r\n ).order_by('rule')\r\n\r\n plural_keys = {}\r\n # last rule excluding other(5)\r\n lang_rules = self.resource.source_language.get_pluralrules_numbers()\r\n # Initialize all plural rules up to the last\r\n for p,n in enumerate(lang_rules):\r\n plural_keys[str(p)] = \"\"\r\n for n,p in enumerate(plurals):\r\n plural_keys[str(n)] = p.string\r\n\r\n self.assertEqual(entry.msgstr_plural, plural_keys)\r\n\r\n else:\r\n trans = se.get_translation(\r\n self.resource.source_language.code, rule=5\r\n )\r\n\r\n self.assertEqual(entry.msgstr, trans.string.encode('utf-8'), \"Source '%s'\"\\\r\n \" differs from translation %s\" % (entry.msgstr,\r\n trans.string.encode('utf-8')))", "def test_plural_data(self):\r\n\r\n self.assertEqual(Translation.objects.filter(\r\n source_entity=self.source_entity_plural,\r\n language=self.language_en).count(), 2)\r\n\r\n self.assertEqual(Translation.objects.filter(\r\n source_entity=self.source_entity_plural,\r\n language=self.language_ar).count(), 6)\r\n\r\n resp = self.client['maintainer'].post(\r\n self.translate_content_arabic_url, self.DataTable_params)\r\n self.assertContains(resp, 'ArabicTrans1', status_code=200)\r\n self.assertContains(resp, 'ArabicTrans2')\r\n self.assertContains(resp, 'ArabicTrans3')\r\n self.assertContains(resp, 'ArabicTrans4')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Planification du parcours point par point en mode "avion". En fait le drone se tourne toujours vers son point cible avant d'avancer vers celui ci.
def goToPoint(self,point, pas): point1 = [self.positionX[-1], self.positionY[-1], self.positionZ[-1]] #position actuelle du drone if distanceXY(point, point1) >= pas and denivellation(point,point1)**2 >= pas: print("going to point") theta = math.atan2(point[1],point[0]) #angle du point à atteindre phi = math.atan2(point1[1], point1[0]) #angle absolu du drone en tant que point print("theta",theta) #angle := angle relatif selon lequel le drone doit se tourner pour faire face au point cible angle = math.atan2(distanceXY(point,[0,0])*math.sin(theta-self.alpha)-distanceXY(point1,[0,0])*math.sin(phi-self.alpha), distanceXY(point,[0,0])*math.cos(theta-self.alpha)-distanceXY(point1,[0,0])*math.cos(phi-self.alpha)) print("angle", angle) print(int((angle/self.maxSpeedRotation)*10000)/10000.0) if angle < 0: self.clockwise(1,int(-(angle/self.maxSpeedRotation)*10000)/10000.0) else : self.counterClockwise(1,int((angle/self.maxSpeedRotation)*10000)/10000.0) if denivellation(point1,point)==0: rate=0 else: rate = distanceXY(point1, point)/denivellation(point1, point) print("rate",rate) niveau = denivellation(point1, point) if rate > 0: self.frontUp(1,rate, niveau) elif rate< 0: self.frontDown(1, rate, niveau) else: self.front(1, int(((distanceXY(point1, point) /self.maxSpeed)*10000)/10000.0)) self.fidelity += 1 else: print("skipping point") self.goneToX += [self.positionX[-1]] self.goneToY += [self.positionY[-1]] self.goneToZ += [self.positionZ[-1]]
[ "def velocidad_promedio(self): \n u_x = 0\n u_y = 0\n u_z = 0\n for i in range(self.N):\n u_x += self.particulas[i].v[0]\n u_y += self.particulas[i].v[1]\n u_z += self.particulas[i].v[2]\n self.p_vx = u_x /self.N\n self.p_vy = u_y /self.N\n self.p_vz = u_z /self.N\n self.p_v = self.p_vx + self.p_vy + self.p_vz", "def CalculPointage(self):\n \n noir = 12\n blanc = 12\n for piece in self.partie.damier.cases.values():\n if str(piece) == \"x\" or str(piece) == \"X\":\n blanc = blanc - 1\n elif str(piece) == \"o\" or str(piece) == \"O\":\n noir = noir - 1\n self.pointBlanc[\"text\"] = str(blanc)\n self.pointNoir[\"text\"] = str(noir)", "def enveloppe_convexe(points):\n if len(points) <= 3:\n return points.copy()\n\n #on recherche le pivot, c'est-à-dire le point de plus petite abscisse et de\n #plus petite ordonnée à défaut.\n pivot = points[0]\n for p in points:\n if p[0] < pivot[0] or (p[0] == pivot[0] and p[1] < pivot[1]):\n pivot = p\n\n points.sort(key=lambda p: Vecteur(pivot, p))\n #Tri des points en fonction de leur angle.\n\n pile = [pivot, points[1]]\n for i in range(2, len(points)):\n p = points[i]\n while len(pile) >= 2 and \\\n Vecteur(pile[-2], p).det(Vecteur(pile[-2], pile[-1])) <= 0:\n #Si les points pile[-2], pile[-1], p forment un tour à droite :\n pile.pop()\n pile.append(p)\n \n return pile", "def afficher_avions(self):\n ihm.afficher(\"Il y a {} avion(s) au sol à l'aéroport\"\n .format(len(self._avions)))\n ihm.afficher_paginer(self._avions, \"Avions au sol\", pas=10)\n return", "def iva(self,precio):\n if precio<=0 or precio>100:\n return False\n self.precio+= (self.precio*0.16) \n return True", "def _carta_a_pila(self, origen, destino):#igual a solitario_claseico.py(no hace falta cambiarlo, se los dejo como comentario)\n if origen.es_vacia():\n raise SolitarioError(\"El origen está vacío\")\n\n destino.apilar(origen.tope())\n origen.desapilar()\n\n if origen in self.mesa.pilas_tablero and not origen.es_vacia() and origen.tope().boca_abajo:\n origen.tope().voltear()", "def aplica(self, estado, accion):\n pass", "def aptitudPromedio(v):\n\n ac = 0\n\n for i in range(len(v)):\n ac += v[i][1]\n \n p = ac/len(v)\n\n return p", "def antipodal_points(p1, p2, p3):\n # First we produce normal vecors to the planes going through the\n # perpedicular bisectors of p1-p2 and p1-p3.\n normal_p12 = p2 - p1\n normal_p13 = p3 - p1\n antipodal_vector = cross(normal_p12, normal_p13)\n res_p = antipodal_vector.normalize()\n return res_p, -res_p", "def point_appartient_obstacle(p, obstacle):\n \n if (((p[0] >= obstacle.x)and(p[0] <= obstacle.x1))and((p[1] >= obstacle.y)and(p[1] <= obstacle.y1))):\n return True\n else :\n return False", "def isPlanar(*args, **kwargs):\n \n pass", "def presion_teorica(self): \n self.velocidad_promedio2()\n V = self.cubo.volumen\n m = self.particulas[0].m\n N = self.N\n v2 = self.p_v2\n self.P_teorica = (N*m*v2)/(3*V)\n return self.P_teorica", "def on_point_goal(self, *args: list) -> None:\n self.goals.points.text = f'Points goal: {self.point_goal:,}'\n set_text_to_fit(self.goals.points)\n if self.parent:\n solo_game = [screen for screen in self.parent.screens if screen.name == 'solo'][0]\n solo_game.point_goal = int(self.point_goal)\n if self.turn_limit and self.point_goal:\n self.set_difficulty()", "def project_point_onto_line(o, v, p):\n return o + dv.vector_projection(p - o, v)", "def charge_de_travail_point_de_fonction(self):\n\n taille_projet = TailleProjet.objects.get(taille_projet=self.taille_projet)\n\n charge_de_travail = taille_projet.charge_de_travail\n\n return charge_de_travail * self.point_de_fonction_net", "def op_point(self, v: dict) -> dict:\n\n vds = self.polarity * (v['d'] - v['s'])\n vds = min(vds, 1.0)\n vgs = self.polarity * (v['g'] - v['s'])\n vgs = min(vgs, 1.0)\n vov = vgs - self.vth\n\n reversed = bool(vds < 0)\n if reversed: vds = -1 * vds\n\n if vov <= 0: # Cutoff\n mode = 'CUTOFF'\n ids = 0\n gm = 0\n gds = 0\n elif vds >= vov: # Saturation\n mode = 'SAT'\n ids = self.beta / 2 * (vov ** 2) * (1 + self.lam * vds)\n gm = self.beta * vov * (1 + self.lam * vds)\n gds = self.lam * self.beta / 2 * (vov ** 2)\n else: # Triode\n mode = 'TRIODE'\n ids = self.beta * ((vov * vds) - (vds ** 2) / 2) * (1 + self.lam * vds)\n gm = self.beta * vds * (1 + self.lam * vds)\n gds = self.beta * ((vov - vds) * (1 + self.lam * vds) + self.lam * ((vov * vds) - (vds ** 2) / 2))\n\n rds = np.NaN if gds == 0 else 1 / gds\n d_ = {\"ids\": ids, \"gds\": gds, \"gm\": gm, \"rds\": rds, \"mode\": mode, 'rev': reversed}\n # print(f'Op Point: {d_}')\n return d_", "def idudeddvivacanampragrhyam(self):\n self.Pragrhya = False\n # PMS: 1.1.11. IdUdeddvivacanam pragfhyam\n if self.External and self.Pada1 in pragrhya_set:\n self.Pragrhya = True", "def evalutationLineAdv(jeu):\n if(monJoueur == 1):\n advCases = jeu[0][1]\n else:\n advCases = jeu[0][0]\n cpt=0\n for i in advCases:\n if(i in [1,2]):\n cpt+=1\n \n return cpt", "def pivprocess(filename,colorcode,stopframe,pixperm,fps,numpysaveto,graphsaveto,startframe=0,contouralpha=0,vertvelmin=-10,vertvelmax=10,hozvelmin=0,hozvelmax=5):\n\n\n\n #####Import the video#####\n vc = cv2.VideoCapture(filename)\n c=1\n\n\n ######Get frames for use in the PIV#####\n\n if vc.isOpened():\n rval , frame1 = vc.read()\n rval , frame2 = vc.read()\n \n else:\n rval = False\n\n\n\n #####Make Lists for Later#####\n\n U=[]\n V=[]\n\n #####Cuts out the green layer so that plant movement is not a factor in the velocity determinations#####\t\n\n GreenOption = colorcode\t\t\t\t\t\t\t\t\t\t\t\t\n if GreenOption==1: # use avg of red and blue\n frame1[:,:,1] = 0.5 * (frame1[:,:,0]+frame1[:,:,2])\n frame2[:,:,1] = 0.5 * (frame2[:,:,0]+frame2[:,:,2])\n elif GreenOption==2: #replace green with red\n frame1[:,:,1] = frame1[:,:,0]\n frame2[:,:,1] = frame2[:,:,0]\n elif GreenOption==0:\n frame1=frame1\n\tframe2=frame2\n else: #replace green with blue\n frame1[:,:,1] = frame1[:,:,2]\n frame2[:,:,1] = frame2[:,:,2]\n\n #####Starts with horizontal components calculation#####\n while rval:\n rval, frame3 = vc.read()\n if startframe < c < stopframe:\t\t\t\t\t\t\t\t\t\t\t\t\n myimage = frame3.copy()\n \n if GreenOption==1: # use avg of red and blue\n frame3[:,:,1] = 0.5 * (frame3[:,:,0]+frame3[:,:,2])\n elif GreenOption==2: #replace green with red\n frame3[:,:,1] = frame3[:,:,0]\n elif GreenOption==0:\n frame3=frame3\n else: #replace green with blue\n frame3[:,:,1] = frame3[:,:,2]\n\n f1 = frame1.mean(axis=2)\n f2 = frame2.mean(axis=2)\n f3 = frame3.mean(axis=2)\n\n vold = (f2-f1) * (f2-f1) / (f2+f1+1)\n vnew = (f3-f2) * (f3-f2) / (f3+f2+1)\n\t \n vold = 255.*(vold - vold.min() ) / (vold.max()-vold.min()+1)\n vnew = 255.*(vnew - vnew.min() ) / (vnew.max()-vnew.min()+1)\n\n oldimg = (255*vold).astype('int32')\n newimg = (255*vnew).astype('int32')\n\n u, v, sig2noise = openpiv.process.extended_search_area_piv( oldimg, newimg, window_size=24, overlap=12, dt=1./fps, search_area_size=64, sig2noise_method='peak2peak' ) \n x, y = openpiv.process.get_coordinates( image_size=newimg.shape, window_size=24, overlap=12 )\n u, v, mask = openpiv.validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )\n u, v = openpiv.filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)\n x, y, u, v = openpiv.scaling.uniform(x, y, u, v, scaling_factor = pixperm )\t\t\t\t\n \n scalef = pixperm\n\n U.append(u)\n V.append(v)\n if len(U)>fps:\t\t\t\t\t\t\t\t\t\t\t\t\n junk = U.pop(0)\n junk = V.pop(0)\n for ui in U:\n if len(U)==1:\n UU=ui\n else:\n UU=UU+ui\n UU = UU / float(len(U))\n for vi in V:\n if len(V)==1:\n VV=vi\n else:\n VV=VV+vi\n VV = VV / float(len(V))\n magnitude = np.sqrt( UU*UU+VV*VV )\n \n\n ######Vertical component calculations#####\n dvdy,dvdx = np.gradient( VV )\n dudy,dudx = np.gradient( UU )\n Vort = dvdx-dudy\n divergence = dudx+dvdy \n WW = -2*divergence\t\n\n\n #####Making the plots#####\n plt.figure()\n levels = np.arange(vertvelmin,vertvelmax+1,1)\t\t\t\t\t\t\t\t\t\t\n\n\n plt.contourf(x*scalef,y*scalef,WW,levels=levels,alpha=contouralpha,cmap='seismic')#, norm=clr.Normalize(vmin=vertvelmin,vmax=vertvelmax))\n plt.colorbar(ticks = levels, label='Vertical Velocities (m/s)', alpha = contouralpha)\n plt.streamplot(scalef*x,scalef*y, UU, VV, color=magnitude , density=2, linewidth = 1, arrowsize=1,cmap='nipy_spectral') #, norm=clr.Normalize(vmin=hozvelmin,vmax=hozvelmax) )\n plt.colorbar(extend = 'max',label='Horizontal Velocity(m/s)')\n \n \n\n\n\n\n #####Putting the image from the video in the background (Q is there to make sure the colors are normal)#####\n # plt.streamplot(scalef*x,scalef*y, UU, VV, color='b' , density=2, linewidth= 1, arrowsize=1)\n Q = np.ones( frame3.shape ) * 1.0\n Q[:,:,2] = myimage[:,:,0] / np.float( myimage[:,:,0].max() )\n Q[:,:,1] = myimage[:,:,1] / np.float( myimage[:,:,1].max() )\n Q[:,:,0] = myimage[:,:,2] / np.float( myimage[:,:,2].max() )\n \n #####This saves the numpy arrays and the images so that they can be analyzed later on#####\n ####This particular command saves the velocities####\n\n if numpysaveto != None:\n np.savez(numpysaveto %c,x=x,y=y,UU=UU,VV=VV,WW=WW)\t\t\n plt.imshow(Q, aspect = 'auto') \t\t\t\t\t\t\n plt.tight_layout() \n ####This particular command saves the images with the vector plots and vertical velocity contours####\t \n plt.title('Frame %05d'%c)\t\t\t\t\t\t\t\t\t\t\n plt.savefig( graphsaveto %c )\t\t\t\t\t\t\t\n plt.close()\n # break\n frame1 = frame2\n frame2 = frame3\n\n\n c += 1\n cv2.waitKey(1)\n vc.release()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }