query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Initialize the handler. Read all config variables and creates a connection to OpenStack.
|
def __init__(self, config):
self.USERNAME = os.environ["OS_USERNAME"]
self.PASSWORD = os.environ["OS_PASSWORD"]
self.PROJECT_NAME = os.environ["OS_PROJECT_NAME"]
self.PROJECT_ID = os.environ["OS_PROJECT_ID"]
self.USER_DOMAIN_NAME = os.environ["OS_USER_DOMAIN_NAME"]
self.AUTH_URL = os.environ["OS_AUTH_URL"]
self.PROJECT_DOMAIN_ID = os.environ["OS_PROJECT_DOMAIN_ID"]
self.USE_APPLICATION_CREDENTIALS = os.environ.get(
"USE_APPLICATION_CREDENTIALS", False
)
if self.USE_APPLICATION_CREDENTIALS:
self.LOG.info("APPLICATION CREDENTIALS will be used!")
try:
self.APPLICATION_CREDENTIAL_ID = os.environ["APPLICATION_CREDENTIAL_ID"]
self.APPLICATION_CREDENTIAL_SECRET = os.environ[
"APPLICATION_CREDENTIAL_SECRET"
]
except KeyError:
self.LOG.error(
"Usage of Application Credentials enabled - but no credential id or/and secret provided in env!"
)
sys.exit(1)
self.SSH_PORT = 22
with open(config, "r") as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)
self.LOG = setup_logger(config=cfg)
self.DEFAULT_SECURITY_GROUP_NAME = "defaultSimpleVM"
self.DEFAULT_SECURITY_GROUPS = [self.DEFAULT_SECURITY_GROUP_NAME]
self.GATEWAY_SECURITY_GROUP_ID = cfg["openstack_connection"][
"gateway_security_group_id"
]
self.USE_GATEWAY = cfg["openstack_connection"]["use_gateway"]
self.NETWORK = cfg["openstack_connection"]["network"]
self.FLOATING_IP_NETWORK = cfg["openstack_connection"][
"floating_ip_network"
]
self.PRODUCTION = cfg["openstack_connection"]["production"]
self.CLOUD_SITE = cfg["cloud_site"]
# connection to redis. Uses a pool with 10 connections.
self.REDIS_HOST = cfg["redis"]["host"]
self.REDIS_PORT = cfg["redis"]["port"]
self.REDIS_PASSWORD = cfg["redis"].get("password", None)
self.LOG.info(
f"Connecting to Redis at {self.REDIS_HOST}:{self.REDIS_PORT}.."
)
self.pool = redis.ConnectionPool(
host=self.REDIS_HOST, port=self.REDIS_PORT, password=self.REDIS_PASSWORD
)
self.redis = redis.Redis(connection_pool=self.pool, charset="utf-8")
try:
self.redis.ping()
self.LOG.info("Connected to Redis!")
except redis.ConnectionError:
self.LOG.exception("Could not connect to Redis!")
sys.exit(1)
# try to initialize forc connection
try:
self.SUB_NETWORK = cfg["bibigrid"]["sub_network"]
self.BIBIGRID_MODES = cfg["bibigrid"]["bibigrid_modes"]
self.BIBIGRID_HOST = cfg["bibigrid"]["host"]
self.BIBIGRID_PORT = cfg["bibigrid"]["port"]
if cfg["bibigrid"].get("https", False):
self.BIBIGRID_URL = (
f"https://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}/bibigrid/"
)
self.BIBIGIRD_EP = (
f"https://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}"
)
else:
self.BIBIGRID_URL = (
f"http://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}/bibigrid/"
)
self.BIBIGIRD_EP = (
f"http://{self.BIBIGRID_HOST}:{self.BIBIGRID_PORT}"
)
self.BIBIGRID_DEACTIVATE_UPRADES_SCRIPT = (
self.create_deactivate_update_script()
)
self.BIBIGRID_ANSIBLE_ROLES = cfg["bibigrid"].get(
"ansibleGalaxyRoles", []
)
self.BIBIGRID_LOCAL_DNS_LOOKUP = cfg["bibigrid"].get(
"localDnsLookup", False
)
self.LOG.info(
f"Loaded Ansible Galaxy Roles for Bibigrid:\n {self.BIBIGRID_ANSIBLE_ROLES}"
)
self.LOG.info(msg=f"Bibigrd url loaded: {self.BIBIGRID_URL}")
except Exception as e:
self.LOG.exception(e)
self.LOG.info("Bibigrid not loaded.")
self.BIBIGRID_URL = None
self.SUB_NETWORK = None
try:
self.RE_BACKEND_URL = cfg["forc"]["forc_url"]
backend_url_host = self.RE_BACKEND_URL.split(":")
self.FORC_URL = (
cfg["forc"].get("openresty_url", None)
or f"https:{backend_url_host[1]}/"
)
self.FORC_API_KEY = os.environ.get("FORC_API_KEY", None)
self.FORC_ALLOWED = {}
self.FORC_HTTPS = cfg["forc"].get("forc_https", True)
self.FORC_REMOTE_ID = cfg["forc"]["forc_remote_id"]
self.GITHUB_PLAYBOOKS_REPO = cfg["forc"]["github_playbooks_repo"]
if (
not self.RE_BACKEND_URL
or not self.FORC_API_KEY
or not self.GITHUB_PLAYBOOKS_REPO
):
raise ValueError
self.LOG.info(msg=f"Forc-Backend url loaded: {self.RE_BACKEND_URL}")
self.LOG.info(msg=f"Forc-Frontend Url loaded: {self.FORC_URL}")
except ValueError as ve:
self.LOG.exception(ve)
self.LOG.info(
"Forc-Backend not loaded as one of the configurations was empty."
)
self.RE_BACKEND_URL = None
self.FORC_API_KEY = None
self.FORC_ALLOWED = None
self.GITHUB_PLAYBOOKS_REPO = None
except Exception as e:
self.LOG.exception(e)
self.LOG.info("Forc-Backend not loaded.")
self.RE_BACKEND_URL = None
self.FORC_API_KEY = None
self.FORC_ALLOWED = None
self.GITHUB_PLAYBOOKS_REPO = None
if self.USE_GATEWAY:
self.GATEWAY_IP = cfg["openstack_connection"]["gateway_ip"]
self.SSH_FORMULAR = cfg["openstack_connection"][
"ssh_port_calc_formular"
]
self.UDP_FORMULAR = cfg["openstack_connection"][
"udp_port_calc_formular"
]
self.LOG.info(f"Gateway IP is {self.GATEWAY_IP}")
self.conn = self.create_connection()
self._validate_forc_security_group()
self.update_playbooks()
self.validate_gateway_security_group()
self.create_or_get_default_ssh_security_group()
|
[
"def _do_custom_setup(self):\n self._create_handle(\n hostname=self.configuration.ixsystems_server_hostname,\n port=self.configuration.ixsystems_server_port,\n login=self.configuration.ixsystems_login,\n password=self.configuration.ixsystems_password,\n apikey=self.configuration.ixsystems_apikey,\n api_version=self.configuration.ixsystems_api_version,\n transport_type=self.configuration.ixsystems_transport_type)\n\n if not self.handle:\n raise FreeNASApiError(\n \"Failed to create handle for FREENAS server\")",
"def initialize(self) -> None:\n conn = self.optionally_wrap_socket(self.client.connection)\n conn.setblocking(False)\n if self.encryption_enabled():\n self.client = TcpClientConnection(conn=conn, addr=self.client.addr)\n if b'HttpProtocolHandlerPlugin' in self.flags.plugins:\n for klass in self.flags.plugins[b'HttpProtocolHandlerPlugin']:\n instance = klass(\n self.uid,\n self.flags,\n self.client,\n self.request,\n self.event_queue)\n self.plugins[instance.name()] = instance\n logger.debug('Handling connection %r' % self.client.connection)",
"def __init__(self, path=None, env=''):\n self.env = env\n filename = 'config/tornado_skeleton{}.yaml'.format(('.' + self.env) if self.env else '')\n if path:\n filename = os.path.join(path, filename)\n\n super().__init__(filename, raise_none=False)\n\n self.base_url = self.get('api:base_url')\n self.port = self.get('api:port')\n self.handlers_initializer = {\n 'env': self.env,\n 'contact': self.get('contact'),\n 'base_url': self.get('api:base_url'),\n 'api_version': self.get('api:version')\n }",
"def __init__(self):\n self.config = ConfigUtil.ConfigUtil('../../../data/ConnectedDevicesConfig.props')\n self.config.loadConfig()\n print('Configuration data...\\n' + str(self.config)) \n print('============= Setting Done! =============')\n self.host = self.config.getProperty(ConfigConst.COAP_GATEWAY_SECTION, ConfigConst.DEFAULT_HOST )\n self.port = int(self.config.getProperty(ConfigConst.COAP_GATEWAY_SECTION, ConfigConst.DEFAULT_COAP_PORT))\n self.serverAddr = (self.host, self.port)\n print('URL(IP): ' + str(self.serverAddr))\n self.url = \"coap://\" + self.host + \":\" + str(self.port) + \"/temp\"",
"def _initialize():\n\n\t\t# Read the configuration from file:\n\t\tdbType = Config.get(\"localdb\", \"type\")\n\t\tdbName = Config.get(\"localdb\", \"name\")\n\t\tdbHost = Config.get(\"localdb\", \"hostname\")\n\t\tdbUser = Config.get(\"localdb\", \"username\")\n\t\tdbPass = Config.get(\"localdb\", \"password\")\n\t\t\n\t\t# Construct the dbPath string, or rais an exception if the dbtype is unknown.\n\t\tif(dbType == \"sqlite\"):\n\t\t\tdbpath = 'sqlite:///' + dbName\n\t\telif(dbType == \"mysql\"):\n\t\t\tdbpath = dbType + \"://\" + dbUser + \":\" + dbPass + \"@\" + dbHost + \"/\" + dbName\n\t\telse:\n\t\t\traise Exception(\"DatabaseConfiguration is not correct\")\n\t\t\n\t\t# Create a dbengine, and depending on the configfile maybe turn on the debug.\n\t\tif(Config.get(\"localdb\", \"debug\") == \"0\"):\n\t\t\tSession.engine = create_engine(dbpath)\n\t\telse:\n\t\t\tSession.engine = create_engine(dbpath, echo=True)\n\t\t\n\t\t# Create a session, and bind it to the engine.\n\t\tSession.session = sessionmaker(bind=Session.engine)\n\t\t\n\t\t# Making sure that the dbSchema is created.\n\t\tBase.metadata.create_all(Session.engine)",
"def __init__(self):\n\n self.config = {\n 'debug': False,\n 'enable': False,\n 'secret': '',\n 'timeout': 120,\n 'delay': 3,\n 'drift_backward': 1,\n 'drift_forward': 1,\n }\n self.config_path = os.path.join(os.environ['HOME'], '.ssh', 'otp')\n self.load()",
"def _init_client(self):\n pass",
"def __init__ (self):\n # Create a connection to S3\n self.handle = self.connect()",
"def populate_initial_config(self):\n try:\n with openstack.OpenStack() as client:\n self._populate_system_config(client)\n self._populate_load_config(client)\n self._populate_network_config(client)\n if self.kubernetes:\n self._populate_dns_config(client)\n self._populate_docker_config(client)\n controller = self._populate_controller_config(client)\n # ceph_mon config requires controller host to be created\n self._inventory_config_complete_wait(client, controller)\n self._populate_interface_config(client, controller)\n self._populate_default_storage_backend(client, controller)\n\n except (KeystoneFail, SysInvFail) as e:\n LOG.exception(e)\n raise ConfigFail(\"Failed to provision initial system \"\n \"configuration\")",
"def setUpClass(cls):\n super(NeutronNetworkingBase, cls).setUpClass(\n application_name='neutron-api')\n cls.neutron_client = (\n openstack_utils.get_neutron_session_client(cls.keystone_session))",
"def connection_setup(self):\n\n self.logger.debug(\"Create the connection to the mgr....\")\n # Create a connection to Hal driver mgr\n self.mgrConnection = HalTransport(HalTransport.HalTransportClientMgr,\n HalTransport.HalClientMode,\n disconnectHandlerCb=self.connectionDisconnectCb)\n\n # create the poller\n if self.poller is None:\n self.poller = self.dispatcher.get_poll()\n\n # register the mgr socket\n self.dispatcher.fd_register(self.mgrConnection.socket, self.dispatcher.EV_FD_IN, self.host_management_cb)\n self.dispatcher.fd_register(self.mgrConnection.monitor, self.dispatcher.EV_FD_IN, self.host_management_cb)",
"def init(args):\n Configuration.load_config(vars(args).get(\"config\"))",
"def prepare_openstack(self, setup):\n # init variables\n exist_networks = self.os_conn.list_networks()['networks']\n ext_network = [x for x in exist_networks\n if x.get('router:external')][0]\n self.zone = self.os_conn.nova.availability_zones.find(zoneName=\"nova\")\n self.hosts = self.zone.hosts.keys()[:2]\n self.instance_keypair = self.os_conn.create_key(key_name='instancekey')\n self.security_group = self.os_conn.create_sec_group_for_ssh()\n self.networks = []\n\n # create router\n self.router = self.os_conn.create_router(name=\"router01\")['router']\n self.os_conn.router_gateway_add(router_id=self.router['id'],\n network_id=ext_network['id'])\n logger.info('router {} was created'.format(self.router['id']))\n\n self.dhcp_agent_ids = [agt['id'] for agt in\n self.os_conn.neutron.list_agents(\n binary='neutron-dhcp-agent')['agents']]",
"def init_app(self, app, config_group=\"flask_keystone\"):\n cfg.CONF.register_opts(RAX_OPTS, group=config_group)\n\n self.logger = logging.getLogger(__name__)\n try:\n logging.register_options(cfg.CONF)\n except cfg.ArgsAlreadyParsedError: # pragma: no cover\n pass\n logging.setup(cfg.CONF, \"flask_keystone\")\n\n self.config = cfg.CONF[config_group]\n self.roles = self._parse_roles()\n self.User = self._make_user_model()\n self.Anonymous = self._make_anonymous_model()\n self.logger.debug(\"Initialized keystone with roles: %s and \"\n \"allow_anonymous: %s\" % (\n self.roles,\n self.config.allow_anonymous_access\n ))\n app.wsgi_app = auth_token.AuthProtocol(app.wsgi_app, {})\n\n self.logger.debug(\"Adding before_request request handler.\")\n app.before_request(self._make_before_request())\n self.logger.debug(\"Registering Custom Error Handler.\")\n app.register_error_handler(FlaskKeystoneException, handle_exception)",
"def __init__(self, config='config.json'):\n self.read_config(config)",
"def init():\n click.echo(\"Enter the profile name, server, token and path to vault secrets\")\n profile_name = click.prompt(\"Profile Name\", type=str)\n vault_server = click.prompt(\"Vault Server\", type=str)\n vault_token = click.prompt(\"Vault Token\", type=str)\n vault_secret_path = click.prompt(\"Path to vault secret\", type=str)\n\n config_file = config.create_config_file(\n vault_server, vault_token, vault_secret_path, profile_name\n )\n\n yaml.dump_data_to_yml(config_file)\n\n click.echo(\n \"\"\"\n Following information is saved.\n name: {name}\n vault_server: {server}\n vault_token: {token}\n vault_secret_path: {secret_path}\n \"\"\".format(\n name=profile_name,\n server=vault_server,\n token=vault_token,\n secret_path=vault_secret_path,\n )\n )",
"def __init__(self):\n self.host = CONF.AGENT.zvm_xcat_server\n self.port = 443\n self.xcat_timeout = CONF.AGENT.zvm_xcat_timeout\n try:\n self.conn = HTTPSClientAuthConnection(self.host, self.port,\n CONF.AGENT.zvm_xcat_ca_file,\n timeout=self.xcat_timeout)\n except Exception:\n LOG.error(\"Connect to xCat server %s failed\" % self.host)\n raise exception.zVMxCatConnectionFailed(xcatserver=self.host)",
"def initialize_credentials():\n\n # import credentials\n # https://kedro.readthedocs.io/en/stable/04_kedro_project_setup/02_configuration.html\n from kedro.config import ConfigLoader\n\n # conf_paths = [\"../conf/base\", \"../conf/local\"]\n conf_paths = [Path(BASE_DIR, \"conf/local\")]\n print(f\"conf_paths are: {conf_paths}\")\n\n print\n conf_loader = ConfigLoader(conf_paths)\n credentials = conf_loader.get(\"credentials*\", \"credentials*/**\")\n\n # Environment setup\n os.environ[\"X_NFER_BASEURL\"] = \"https://preview.nferx.com\"\n os.environ[\"NFERENCE_USER\"] = credentials[\"nfer_access_key\"] # \"yash@nference.net\"\n os.environ[\"NFERENCE_TOKEN\"] = credentials[\"nfer_secret_key\"] # \"<api_token>\"\n\n print(\"Loaded credentials. Nference SDK ready to use.\\n\")",
"def setup(self):\n # Create an underlying uwsgi app to handle the setup and execution.\n if \"nginx\" in self.config:\n if self.config[\"nginx\"].get(\"enabled\", False) is True:\n self.nginx = nginx(self.config[\"nginx\"])\n self.nginx.run()\n\n # Create an underlying uwsgi app to handle the setup and execution.\n self = uwsgi.createObject(self)\n\n # We call this last here because we are going to update variables if we use ``uwsgi`` for execution.\n super().setup()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get Image with Tags.
|
def get_Image_with_Tag(self, id):
self.LOG.info(f"Get Image {id} with tags")
try:
img = self.conn.get_image(name_or_id=id)
if not img:
return Image()
properties = img.get("properties")
if not properties:
properties = {}
self.LOG.warning(f"Could not get properties for image: {img}")
description = properties.get("description", "")
tags = img.get("tags", [])
image = Image(
name=img["name"],
min_disk=img["min_disk"],
min_ram=img["min_ram"],
status=img["status"],
os_version=img.get("os_version", ""),
os_distro=img.get("os_distro", ""),
created_at=img["created_at"],
updated_at=img["updated_at"],
openstack_id=img["id"],
description=description,
tag=tags,
)
return image
except Exception as e:
self.LOG.exception(f"Get Image {id} with Tag Error: {e}")
return Image()
|
[
"def getOGTagsImage(self):",
"def retrieve_pictures_by_tag(tag_):\n database = get_db()\n return database.retrieve_pictures_by_tag(tag_)",
"def get_photos_by_tag(tag, **args):\n args.update({\n 'access_key': ACCESS_KEY\n })\n\n url = API_BASE + \"/by_tag/\" + str(tag) + '?' + urllib.urlencode(args)\n \n if('format' in args and args['format'] == 'xml'):\n result = urllib2.urlopen(url).read()\n else:\n result = simplejson.load(urllib.urlopen(url))\n\n return result",
"def _get_image_data(self, image_name):\n endpoint = \"/\".join([\"repository\", image_name])\n return self._get(endpoint)['tags']",
"def sample_img_from_tag(self, img_tag) :\n cat_id = coco.getCatIds(catNms=img_tag)\n img_id = np.random.choice(coco.getImgIds(catIds=cat_id), 1)\n img_path = utils.get_img_path(coco.loadImgs(int(img_id))[0]['file_name'])\n return utils.load_image(img_path), img_path, img_tag",
"def get_images(self):\n pass",
"def retrieve_tags_for_picture(picture):\n database = get_db()\n return database.retrieve_tags_for_picture(picture)",
"async def danr(self, *, tags):\n image = await self.helper.lookup_tags(tags,limit='1',random='true')\n await self.bot.say(image[0]['file_url'])",
"def tags_query(self):\n return Q(picture = self)",
"def find_img_tags(document):\n return document.findAll('img')",
"def dog_picture_get(self, slug):\n url = self._build_api_url(\"picture/dog/{slug}\".format(slug=slug))\n return self.make_request(url)",
"def get_image(self):",
"def get_image(self, image_id):\r\n images = self.list_images(ex_image_ids=[image_id])\r\n image = images[0]\r\n\r\n return image",
"def image_tags(self):\n return Versions(SystemCommand(self.cmd.image_tags).output).sorted",
"def get_tag(self, exif, tag, format=repr):\n\n try:\n tag_id = INV_TAGS[tag]\n tag_val = exif[tag_id]\n return tag_val\n except KeyError: # Exif field missing\n return None\n except TypeError: # No exif data with img, is None\n return None",
"def getImage(i):\n print \"Not implemented\"",
"def list(self):\n with self.alternate_service_type('image', allowed_types=('image',)):\n return self._list('/v2/images', 'images')",
"def pull_image(image=image_tag):\n run(f'docker pull {image}')",
"def get_image(self, image_name):\n return self._image[image_name]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the Limits. (maxTotalVolumes,maxTotalVolumeGigabytes, maxTotalInstances,totalRamUsed,totalInstancesUsed) of the OpenStack Project from the Client.
|
def get_limits(self):
self.LOG.info("Get Limits")
limits = {}
limits.update(self.conn.get_compute_limits())
limits.update(self.conn.get_volume_limits()["absolute"])
return {
"max_total_cores": str(limits["max_total_cores"]),
"max_total_instances": str(limits["max_total_instances"]),
"max_total_ram_size": str(math.ceil(limits["max_total_ram_size"] / 1024)),
"total_cores_used": str(limits["total_cores_used"]),
"total_instances_used": str(limits["total_instances_used"]),
"total_ram_used": str(math.ceil(limits["total_ram_used"] / 1024)),
"maxTotalVolumes": str(limits["maxTotalVolumes"]),
"maxTotalVolumeGigabytes": str(limits["maxTotalVolumeGigabytes"]),
"totalVolumesUsed": str(limits["totalVolumesUsed"]),
"totalGigabytesUsed": str(limits["totalGigabytesUsed"]),
}
|
[
"def ex_limits(self):\r\n\r\n result = self._sync_request(command='listResourceLimits',\r\n method='GET')\r\n\r\n limits = {}\r\n resource_map = {\r\n 0: 'max_instances',\r\n 1: 'max_public_ips',\r\n 2: 'max_volumes',\r\n 3: 'max_snapshots',\r\n 4: 'max_images',\r\n 5: 'max_projects',\r\n 6: 'max_networks',\r\n 7: 'max_vpc',\r\n 8: 'max_cpu',\r\n 9: 'max_memory',\r\n 10: 'max_primary_storage',\r\n 11: 'max_secondary_storage'\r\n }\r\n\r\n for limit in result.get('resourcelimit', []):\r\n # We will ignore unknown types\r\n resource = resource_map.get(int(limit['resourcetype']), None)\r\n if not resource:\r\n continue\r\n limits[resource] = int(limit['max'])\r\n\r\n return limits",
"def get_project_quotas(context, resources, project_id):\n\n # init with defaults\n project_quota = dict((key, resource.default)\n for key, resource in resources.items())\n\n # update with project specific limits\n quota_objs = quota_obj.Quota.get_objects(context,\n project_id=project_id)\n for item in quota_objs:\n project_quota[item['resource']] = item['limit']\n\n return project_quota",
"def getOpenstackProjectUsage(connection, project_id):\n\n quota = {}\n\n # Determine the compute-usage\n compute = connection.get_compute_limits(project_id)\n quota['compute'] = {\n 'instances': compute['total_instances_used'],\n 'cpu': compute['total_cores_used'],\n 'ram_mb': compute['total_ram_used'],\n 'ram_human': '%sB' % humanReadable(compute['total_ram_used'], 'm'),\n }\n\n try:\n quota['compute']['instances_percent'] = int(\n (compute['total_instances_used'] * 100) / \n compute['max_total_instances']\n )\n except ZeroDivisionError:\n quota['compute']['instances_percent'] = 100\n\n try:\n quota['compute']['cpu_percent'] = int(\n (compute['total_cores_used'] * 100) / compute['max_total_cores'])\n except ZeroDivisionError:\n quota['compute']['cpu_percent'] = 100\n\n try:\n quota['compute']['ram_percent'] = int(\n (compute['total_ram_used'] * 100) / compute['max_total_ram_size'])\n except ZeroDivisionError:\n quota['compute']['ram_percent'] = 100\n\n\n # Determine cinder usage\n volumequota = connection.get_volume_quotas(project_id)\n quota['volumes'] = {\n 'gigabytes': 0, \n 'volumes': 0, \n }\n\n # For each volume belonging to project, sum up the size and number of volumes\n for volume in connection.volume.volumes(**{\n 'all_projects': True, 'project_id': project_id}):\n quota['volumes']['gigabytes'] += volume.size\n quota['volumes']['volumes'] += 1 \n\n # Calculate human readable and percentages.\n quota['volumes']['gigabytes_human'] = '%sB' % \\\n humanReadable(quota['volumes']['gigabytes'], 'g')\n\n try:\n quota['volumes']['gigabytes_percent'] = int(\n (quota['volumes']['gigabytes'] * 100) / volumequota['gigabytes'])\n except ZeroDivisionError:\n quota['volumes']['gigabytes_percent'] = 100\n\n try:\n quota['volumes']['volumes_percent'] = int(\n (quota['volumes']['volumes'] * 100) / volumequota['volumes'])\n except ZeroDivisionError:\n quota['volumes']['volumes_percent'] = 100\n \n return quota",
"def get_detailed_project_quotas(self, context, resources, project_id):\n res_reserve_info = quota_api.get_reservations_for_resources(\n context, project_id, resources.keys())\n project_quota_ext = {}\n for key, resource in resources.items():\n if isinstance(resource, res.TrackedResource):\n used = self.get_resource_count(context, project_id, resource)\n else:\n # NOTE(ihrachys) .count won't use the plugin we pass, but we\n # pass it regardless to keep the quota driver API intact\n plugins = directory.get_plugins()\n plugin = plugins.get(key, plugins[constants.CORE])\n used = resource.count(context, plugin, project_id)\n\n project_quota_ext[key] = {\n 'limit': resource.default,\n 'used': used,\n 'reserved': res_reserve_info.get(key, 0),\n }\n # update with specific project limits\n quota_objs = quota_obj.Quota.get_objects(context,\n project_id=project_id)\n for item in quota_objs:\n project_quota_ext[item['resource']]['limit'] = item['limit']\n return project_quota_ext",
"def limits(self):\n return self.discretization.limits",
"def list_limit_range(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_limit_range\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/limitranges'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1LimitRangeList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def showLimits():\r\n limits = css.serviceInfo.limits\r\n print limits",
"def limits_params(self):\n LIMITS = []\n if self.LIMITS:\n for user in ['default', 'root', 'oracle']:\n limits_stanza = self.__limits_stanza_read(self.LIMITS, user + ':')\n if limits_stanza:\n curlimits = []\n for limit in range(len(limits_stanza)):\n curlimits.append({'limname' : limits_stanza[limit].split()[0],\n 'limval' : limits_stanza[limit].split()[2]})\n LIMITS.append({'username' : user, 'limitlist' : curlimits})\n else:\n return None\n return LIMITS",
"def DescribeNetworkInterfaceLimit(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeNetworkInterfaceLimit\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeNetworkInterfaceLimitResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))",
"async def get_limits(self):\r\n lo = await trio.to_thread.run_sync(self.handle.get_travel_range_min)\r\n hi = await trio.to_thread.run_sync(self.handle.get_travel_range_max)\r\n return lo, hi",
"def job_limit(self) -> Dict[str, Any]:\n url = self.get_url('jobs_limit')\n return map_jobs_limit_response(self.session.get(url).json())",
"def ex_get_limits(self):\r\n raise NotImplementedError(self._not_implemented_msg)",
"def get_authorization_divisions_limit(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_authorization_divisions_limit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/authorization/divisions/limit'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='int',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def volume_get_limits(self, space_guarantee=None, data_aggr_list=None, enable_snapdiff=None, namespace_mirror_aggr_list=None, max_data_constituent_size=None, max_namespace_constituent_size=None, namespace_aggregate=None):\n return self.request( \"volume-get-limits\", {\n 'space_guarantee': [ space_guarantee, 'space-guarantee', [ basestring, 'None' ], False ],\n 'data_aggr_list': [ data_aggr_list, 'data-aggr-list', [ basestring, 'aggr-name' ], True ],\n 'enable_snapdiff': [ enable_snapdiff, 'enable-snapdiff', [ bool, 'None' ], False ],\n 'namespace_mirror_aggr_list': [ namespace_mirror_aggr_list, 'namespace-mirror-aggr-list', [ basestring, 'aggr-name' ], True ],\n 'max_data_constituent_size': [ max_data_constituent_size, 'max-data-constituent-size', [ int, 'None' ], False ],\n 'max_namespace_constituent_size': [ max_namespace_constituent_size, 'max-namespace-constituent-size', [ int, 'None' ], False ],\n 'namespace_aggregate': [ namespace_aggregate, 'namespace-aggregate', [ basestring, 'None' ], False ],\n }, {\n 'max-infinitevol-size': [ int, False ],\n 'min-infinitevol-size': [ int, False ],\n } )",
"def quota_get_all_by_project_id(self, project_id):",
"def getArgLimits(self):\n minargs, maxargs = getattr(self.method, 'arglimits', (None, None))\n return getCommandArgLimits(self.method, minargs, maxargs)",
"def watchLimitRangelist(self, **kwargs):\n\n allParams = ['fieldSelector', 'labelSelector', 'resourceVersion', 'watch']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method watchLimitRangelist\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/api/v1beta3/watch/limitranges'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = '*/*,'\n\n \n if ('fieldSelector' in params):\n queryParams['fieldSelector'] = self.apiClient.toPathValue(params['fieldSelector'])\n \n if ('labelSelector' in params):\n queryParams['labelSelector'] = self.apiClient.toPathValue(params['labelSelector'])\n \n if ('resourceVersion' in params):\n queryParams['resourceVersion'] = self.apiClient.toPathValue(params['resourceVersion'])\n \n if ('watch' in params):\n queryParams['watch'] = self.apiClient.toPathValue(params['watch'])\n \n\n \n\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n \n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'json_WatchEvent')\n return responseObject",
"def operational_limits(self):\n\n try:\n _ = getattr(self, \"crane\")\n max_windspeed = self._crane_specs[\"max_windspeed\"]\n\n except MissingComponent:\n max_windspeed = self._transport_specs[\"max_windspeed\"]\n\n _dict = {\n \"windspeed\": le(max_windspeed),\n \"waveheight\": le(self._transport_specs[\"max_waveheight\"]),\n }\n\n return _dict",
"def absolute_limits(nova_client):\r\n limits = nova_client.limits.get()\r\n return dict([(limit.name, limit.value) for limit in list(limits.absolute)])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A lightweight dummy request. This request is ultralightweight and should be used only when the request itself is not a large focus in the callstack. It is way easier to mock and control sideeffects using this object. It does not have request extensions applied. Threadlocals are not properly pushed.
|
def dummy_request(app):
request = DummyRequest()
request.registry = app.registry
request.host = 'example.com'
return request
|
[
"def _mock_request():\n return _MockRequestClient().request()",
"def dummy_request(db_session):",
"def test_request_init(self):\n\n\t\tself.assertEqual(self.request.path, '/index')\n\t\tself.assertEqual(self.request.method, 'GET')\n\t\tself.assertEqual(self.request._get_data, None)\n\t\tself.assertEqual(self.request._post_data, None)",
"async def request_impl(self: Client, request: Request) -> Response:\n raise NotImplementedError(\n f\"{self.__class__.__name__}.request_impl not implemented.\"\n )",
"def construct_dummy_request(json_body: dict = None, match_dict: dict = None) -> DummyRequest:\n if json_body is None:\n json_body = dict()\n if match_dict is None:\n match_dict = dict()\n return DummyRequest(json_body=json_body, matchdict=match_dict, validated={}, errors=Errors(), mailer=DummyMailer,\n cookies={'_LOCALE_': 'en'})",
"def mock_request():\n request = RequestFactory().get('/')\n\n # We have to manually add a session since we'll be bypassing\n # the middleware chain.\n session_middleware = SessionMiddleware()\n session_middleware.process_request(request)\n\n return request",
"def test_request_init(cls, json_kwargs, api_request):\n\trs = cls(request=api_request, json_kwargs=json_kwargs)\n\t# Not loaded at first\n\tassert rs.loaded is False\n\tassert rs.request == api_request\n\tresp = api_request()\n\tassert rs.response == resp\n\tassert rs.results == resp.results(**json_kwargs)",
"def _simulate_request(self, path, **kwargs):\n\n if not path:\n path = '/'\n\n return self.api(create_environ(path=path, **kwargs),\n self.srmock)",
"def api_request(api_response):\n\n\tclass FakeApiRequestObject:\n\t\tdef __call__(self, *args, **kwargs):\n\t\t\treturn api_response\n\n\t\tsend = __call__\n\n\treturn FakeApiRequestObject()",
"def create_blank_request(*args, **kwargs):\n if isinstance(kwargs.get('body'), str):\n kwargs['body'] = kwargs['body'].encode('utf8')\n return webob.Request.blank(*args, **kwargs)",
"def _request(self, url):\n return Request(url)",
"def test_construction_empty():\n req = DataRequest()\n assert req.headers == {}\n assert req.form_data == {}\n assert req.url == ''\n assert not req.can_send",
"def test_call_makes_request_with_required_parameters(self):\n base.call(\"GET\", self.url, self.req_ctx)\n self.session.request.assert_called_once_with(\n \"GET\", self.url, auth=None, **self.OPTIONAL_REQUEST_ARGS)",
"def __init__(self, request_data=None, request_handler=None):\n self.request_data = request_data or {} # data for the request\n self.request_handler = request_handler or \\\n get_default_request_handler() # handler to fullfil the request\n self.response = None # response is stored here",
"def _start_watching_request(req=None, name=None):\n req = req or request\n name = name or request.endpoint\n w_data = _start_watch(name, 'flask_request', None, None, False)\n g.w_data = w_data # pylint: disable=assigning-non-slot",
"def test_standard_requests(self):\n get_msgs = self.client.message_recorder(\n blacklist=self.BLACKLIST, replies=True)\n nomid_req = partial(self.client.blocking_request, use_mid=False)\n nomid_req(katcp.Message.request(\"watchdog\"))\n nomid_req(katcp.Message.request(\"restart\"))\n nomid_req(katcp.Message.request(\"log-level\"))\n nomid_req(katcp.Message.request(\"log-level\", \"trace\"))\n nomid_req(katcp.Message.request(\"log-level\", \"unknown\"))\n nomid_req(katcp.Message.request(\"help\"))\n nomid_req(katcp.Message.request(\"help\", \"watchdog\"))\n nomid_req(katcp.Message.request(\"help\", \"unknown-request\"))\n nomid_req(katcp.Message.request(\"client-list\"))\n nomid_req(katcp.Message.request(\"version-list\"))\n nomid_req(katcp.Message.request(\"sensor-list\"))\n nomid_req(katcp.Message.request(\"sensor-list\", \"an.int\"))\n nomid_req(katcp.Message.request(\"sensor-list\", \"an.unknown\"))\n nomid_req(katcp.Message.request(\"sensor-value\"))\n nomid_req(katcp.Message.request(\"sensor-value\", \"an.int\"))\n nomid_req(katcp.Message.request(\"sensor-value\",\n \"an.unknown\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\",\n \"differential\", \"2\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\",\n \"event-rate\", \"2\", \"3\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\",\n \"an.unknown\", \"auto\"))\n nomid_req(katcp.Message.request(\"sensor-sampling\", \"an.int\", \"unknown\"))\n\n def tst():\n self.server.log.trace(\"trace-msg\")\n self.server.log.debug(\"debug-msg\")\n self.server.log.info(\"info-msg\")\n self.server.log.warn(\"warn-msg\")\n self.server.log.error(\"error-msg\")\n self.server.log.fatal(\"fatal-msg\")\n self.server.ioloop.add_callback(tst)\n\n self.assertEqual(self.server.restart_queue.get_nowait(), self.server)\n expected_msgs = [\n (r\"!watchdog ok\", \"\"),\n (r\"!restart ok\", \"\"),\n (r\"!log-level ok warn\", \"\"),\n (r\"!log-level ok trace\", \"\"),\n (r\"!log-level fail Unknown\\_logging\\_level\\_name\\_'unknown'\", \"\"),\n (r\"#help cancel-slow-command Cancel\\_slow\\_command\\_request,\\_\"\n \"resulting\\_in\\_it\\_replying\\_immediately\", \"\"),\n (r\"#help client-list\", \"\"),\n (r\"#help halt\", \"\"),\n (r\"#help help\", \"\"),\n (r\"#help log-level\", \"\"),\n (r\"#help new-command\", \"\"),\n (r\"#help raise-exception\", \"\"),\n (r\"#help raise-fail\", \"\"),\n (r\"#help restart\", \"\"),\n (r\"#help sensor-list\", \"\"),\n (r\"#help sensor-sampling\", \"\"),\n (r\"#help sensor-sampling-clear\", \"\"),\n (r\"#help sensor-value\", \"\"),\n (r\"#help slow-command\", \"\"),\n (r\"#help version-list\", \"\"),\n (r\"#help watchdog\", \"\"),\n (r\"!help ok %d\" % NO_HELP_MESSAGES, \"\"),\n (r\"#help watchdog\", \"\"),\n (r\"!help ok 1\", \"\"),\n (r\"!help fail\", \"\"),\n (r\"#client-list\", \"\"),\n (r\"!client-list ok 1\", \"\"),\n (r\"#version-list katcp-protocol\", \"\"),\n (r\"#version-list katcp-library\", \"\"),\n (r\"#version-list katcp-device\", \"\"),\n (r\"!version-list ok 3\", \"\"),\n (r\"#sensor-list an.int An\\_Integer. count integer -5 5\", \"\"),\n (r\"!sensor-list ok 1\", \"\"),\n (r\"#sensor-list an.int An\\_Integer. count integer -5 5\", \"\"),\n (r\"!sensor-list ok 1\", \"\"),\n (r\"!sensor-list fail\", \"\"),\n (r\"#sensor-value 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-value ok 1\", \"\"),\n (r\"#sensor-value 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-value ok 1\", \"\"),\n (r\"!sensor-value fail\", \"\"),\n (r\"!sensor-sampling ok an.int none\", \"\"),\n (r\"#sensor-status 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-sampling ok an.int differential 2\", \"\"),\n (r\"#sensor-status 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-sampling ok an.int event-rate 2 3\", \"\"),\n (r\"!sensor-sampling fail No\\_sensor\\_name\\_given.\", \"\"),\n (r\"!sensor-sampling fail Unknown\\_sensor\\_name:\\_an.unknown.\", \"\"),\n (r\"!sensor-sampling fail Unknown\\_strategy\\_name:\\_unknown.\", \"\"),\n (r\"#log trace\", r\"root trace-msg\"),\n (r\"#log debug\", r\"root debug-msg\"),\n (r\"#log info\", r\"root info-msg\"),\n (r\"#log warn\", r\"root warn-msg\"),\n (r\"#log error\", r\"root error-msg\"),\n (r\"#log fatal\", r\"root fatal-msg\"),\n ]\n self._assert_msgs_like(get_msgs(min_number=len(expected_msgs)),\n expected_msgs)",
"def test_no_process_request(self):\n request = HttpRequest()\n response = HttpResponse()\n self.mw.process_response(request, response)",
"def create_req(self):\n \n pass",
"def set_request(r: http.Request):\n _requests[threading.get_id()] = r\n\n return r"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse the specified handle into an NBRF record.
|
def parse(self, handle):
self._consumer = _RecordConsumer()
self._scanner.feed(handle, self._consumer)
return self._consumer.data
|
[
"def create_rec_parser(self, file_handle):\n parser = AdcptMLog9Parser(self.rec_config,\n file_handle,\n self.exception_callback)\n return parser",
"def parse(cls, buf):\n kwargs = {}\n\n # Splits buf containing the record info into rows\n for row in buf.split('\\n'):\n # Skips empty.\n if not row:\n continue\n\n column, value = _record_row_parser(row)\n value = _record_value_parser(value)\n kwargs[column] = value\n\n return cls(**kwargs)",
"def parse_record(self, nmea_record):\n if not nmea_record:\n return None\n if not isinstance(nmea_record, str):\n logging.info('Record is not NMEA string: \"%s\"', nmea_record)\n return None\n try:\n (data_id, raw_ts, message) = nmea_record.strip().split(maxsplit=2)\n ts = timestamp(raw_ts, time_format=self.time_format)\n except ValueError:\n logging.info('Record not in <data_id> <timestamp> <NMEA> format: \"%s\"',\n nmea_record)\n return None\n\n # Figure out what kind of message we're expecting, based on data_id\n sensor = self.sensors.get(data_id, None)\n if not sensor:\n logging.error('Unrecognized data_id (\"%s\") in record: %s',\n data_id, nmea_record)\n return None\n\n model_name = sensor.get('model', None)\n if not model_name:\n logging.error('No \"model\" for sensor %s', sensor)\n return None\n\n # If something goes wrong during parsing, we'll get a ValueError\n try:\n (fields, message_type) = self.parse_nmea(sensor_model_name=model_name,\n message=message)\n except ValueError as e:\n logging.error(str(e))\n return None\n\n # Finally, convert field values to variable names specific to sensor\n sensor_fields = sensor.get('fields', None)\n if not sensor_fields:\n logging.error('No \"fields\" definition found for sensor %s', data_id)\n return None\n\n named_fields = {}\n for field_name in fields:\n var_name = sensor_fields.get(field_name, None)\n if var_name:\n named_fields[var_name] = fields[field_name]\n\n record = DASRecord(data_id=data_id, message_type=message_type,\n timestamp=ts, fields=named_fields)\n logging.debug('created DASRecord: %s', str(record))\n return record",
"def parse_record(record_string):\n type_search_string = \"@.*{\"\n type_re = re.compile(type_search_string)\n type_match = type_re.match(record_string)\n record_type = record_string[type_match.start()+1:type_match.end()-1]\n record_string = record_string[type_match.end()-1:]\n test_record_types({record_type})\n nested_ex = nestedExpr(\"{\", \"}\")\n token_list = nested_ex.parseString(record_string)[0]\n label = token_list.pop(0).strip(\",\")\n _LOGGER.info(\"Parsing %s...\" % label)\n if record_type == \"article\":\n return Article(label, token_list)\n elif record_type == \"report\":\n return Report(label, token_list)\n elif record_type == \"incollection\":\n return InCollection(label, token_list)\n elif record_type == \"inproceedings\":\n return InProceedings(label, token_list)\n else:\n errstr = \"Unknown record type: %s\" % record_type\n raise ValueError(errstr)",
"def FieldHandle(self) -> _n_2_t_10:",
"def parse(self, obj, target=None):\n super().parse(obj, target)\n\n # Log object for debugging.\n logger.info(\"Parsing NBA PBP Object: %s\" % self.o)\n srid_pbp_desc = self.o.get('id', None)\n pbp_desc = self.get_pbp_description_by_srid(srid_pbp_desc)\n\n # TODO: (zach) I'm not sure if any of this description stuff is needed.\n if pbp_desc:\n # DataDenNba.parse() | nba.event pbp {\n # 'updated': '2015-06-17T03:58:49+00:00',\n # 'parent_list__id': 'events__list',\n # 'possession': '583ec825-fb46-11e1-82cb-f4ce4684ea4c',\n # 'dd_updated__id': 1441316758302,\n # 'parent_api__id': 'pbp',\n # 'clock': '00:00',\n # 'description': 'End of 4th Quarter.',\n # 'event_type': 'endperiod',\n # 'quarter__id': '37d8a2b0-eb65-431d-827f-1c25396a3f1f',\n # 'game__id': '63aa3abe-c1c2-4d69-8d0f-5e3e2f263470',\n # 'id': '3688ff8b-f056-412f-9189-7f123073217f',\n # '_id': 'cGFyZW50X2FwaV9faWRwYnBnYW1lX19pZDYzYWEzYWJlLWMxYzItNGQ2OS04ZDBmLTVlM2U...'\n # }\n\n # pbp_description_model: <class 'sports.nba.models.PbpDescription'>\n\n description = self.o.get('description', None)\n logger.debug('description: %s' % description)\n\n if pbp_desc.description != description:\n # only save it if its changed\n logger.debug(\n '..saving it because it doesnt match the description we currently have (must '\n 'have changed)')\n pbp_desc.description = description\n pbp_desc.save()\n logger.debug('before: %s' % pbp_desc.description)\n pbp_desc.refresh_from_db()\n logger.debug('after: %s' % pbp_desc.description)\n else:\n logger.debug('..not saving description because it matches what we currently have.')\n pass\n else:\n logger.debug('pbp_desc not found by srid %s' % srid_pbp_desc)\n pass",
"def parse_srt(file_handle):\n TIMECODE_SEP = re.compile('[ \\->]*') \n \n state = 'waiting' # or timerange or lines\n \n doc = SRTDocument()\n\n start = None\n end = None\n lines = []\n\n for line in file_handle:\n line = line.strip()\n \n if state == 'waiting':\n #assume its a valid SRT\n if line:\n state = 'time'\n elif state == 'time':\n start, end = map(Timecode.from_string, TIMECODE_SEP.split(line))\n state = 'text'\n elif state == 'text':\n if line == '':\n # switch \n doc = doc.add_frame(SRTFrame(start, end, lines))\n start = None\n end = None\n lines = []\n state = 'waiting'\n else:\n lines.append(line)\n \n if start:\n doc = doc.add_frame(SRTFrame(start, end, lines))\n return doc",
"def parse_record(record):\n return {'abstract': get_abstract(record),\n 'accession_num': get_accession_num(record),\n 'date': get_date(record),\n 'edition': get_edition(record),\n 'internal_pdf': get_internal_pdf(record),\n 'journal': get_journal(record),\n 'database': get_remote_database(record),\n 'title': get_title(record),\n 'year': get_year(record)}",
"def parse_record(self, record):\n # Extract start and end timestamps\n start_timestamp_string = record.attributes['startDate'].value\n end_timestamp_string = record.attributes['endDate'].value\n try:\n start_time = datetime.strptime(start_timestamp_string, '%Y-%m-%d %H:%M:%S -0500')\n end_time = datetime.strptime(end_timestamp_string, '%Y-%m-%d %H:%M:%S -0500')\n except ValueError:\n start_time = datetime.strptime(start_timestamp_string, '%Y-%m-%d %H:%M:%S -0400')\n end_time = datetime.strptime(end_timestamp_string, '%Y-%m-%d %H:%M:%S -0400')\n\n # Extract biometric data\n try:\n # convert to float for numerical values\n biometric = float(record.attributes['value'].value)\n except:\n biometric = record.attributes['value'].value\n\n return start_time, end_time, biometric",
"def __record(self):\r\n f = self.__getFileObj(self.dbf)\r\n recFmt = self.__recordFmt()\r\n recordContents = unpack(recFmt[0], f.read(recFmt[1]))\r\n if recordContents[0] != ' ':\r\n # deleted record\r\n return None\r\n record = []\r\n for (name, typ, size, deci), value in zip(self.fields,\r\n recordContents):\r\n if name == 'DeletionFlag':\r\n continue\r\n elif not value.strip():\r\n record.append(value)\r\n continue\r\n elif typ == \"N\":\r\n value = value.replace('\\0', '').strip()\r\n if value == '':\r\n value = 0\r\n elif deci:\r\n value = float(value)\r\n else:\r\n value = int(value)\r\n elif typ == 'D':\r\n try:\r\n y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])\r\n value = [y, m, d]\r\n except:\r\n value = value.strip()\r\n elif typ == 'L':\r\n value = (value in 'YyTt' and 'T') or \\\r\n (value in 'NnFf' and 'F') or '?'\r\n else:\r\n value = value.strip()\r\n record.append(value)\r\n return record",
"def test_lazy_parse_sff_handle(self):\n flows, head = lazy_parse_sff_handle(self.rec)\n flows = list(flows)\n self.assertEqual(len(flows),2)\n self.assertEqual(len(head), 11)\n self.assertEqual(head['Key Length'], '4')\n self.assertEqual(head['Key Sequence'], 'TCAG')\n self.assertEqual(flows[0].Name, 'FIQU8OX05GCVRO')\n self.assertEqual(flows[1].Name, 'FIQU8OX05F8ILF')",
"def parseRecord(fields, record):\n\n start = end = issued = None # Initialize start, end, and issued to None\n out = {} # Emtpy dict\n for ID, field in enumerate( fields ): # Iterate over all fields\n key = None # Set key to None by default\n val = record[ID-1] # Set val to record that corresponds with field\n if field == 'VALID': # If field is VALID\n start = datetime.strptime(val, DATEFMT) # Parse start time\n elif field == 'EXPIRE':\n end = datetime.strptime(val, DATEFMT) # Parse end time\n elif field == 'ISSUE':\n issued = datetime.strptime(val, DATEFMT) # Parse issued time\n elif field == 'LABEL':\n key = 'label' # Set key value\n val = convert2Percent( val ) # Update val value\n elif field == 'stroke':\n key = 'edgecolor' # Set key val\n elif field == 'fill':\n key = 'facecolor' # Set key val\n if key: # If the key is set\n out[key] = val # Add value to the out dict\n\n label = out.get('label', None)\n if label == 'SIGN':\n out.update( {'fill' : False, 'hatch' : '..', 'linestyle' : '--'} )\n\n return start, end, issued, out # Return values",
"def parse_flir_record_metadata(stream: BinaryIO, record_nr: int) -> Optional[Tuple[int, int, int, int]]:\n # FLIR record entry (ref 3):\n # 0x00 - int16u record type\n # 0x02 - int16u record subtype: RawData 1=BE, 2=LE, 3=PNG; 1 for other record types\n # 0x04 - int32u record version: seen 0x64,0x66,0x67,0x68,0x6f,0x104\n # 0x08 - int32u index id = 1\n # 0x0c - int32u record offset from start of FLIR data\n # 0x10 - int32u record length\n # 0x14 - int32u parent = 0 (?)\n # 0x18 - int32u object number = 0 (?)\n # 0x1c - int32u checksum: 0 for no checksum\n entry = 32 * record_nr\n stream.seek(entry)\n record_type = int.from_bytes(stream.read(2), 'big')\n if record_type < 1:\n return None\n\n _ = int.from_bytes(stream.read(2), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n record_offset = int.from_bytes(stream.read(4), 'big')\n record_length = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n _ = int.from_bytes(stream.read(4), 'big')\n return entry, record_type, record_offset, record_length",
"def _record_row_parser(buf):\n column, value = buf.split(':', 1)\n\n return column.strip(), value.strip()",
"def read_record(self, address, fileIdentifier, start_record, record_count):\n raise NotImplementedError(\"read_record\")",
"def parse_flir_record_metadata(stream: BinaryIO, record_nr: int) -> Optional[Tuple[int, int, int, int]]:\n # FLIR record entry (ref 3):\n # 0x00 - int16u record type\n # 0x02 - int16u record subtype: RawData 1=BE, 2=LE, 3=PNG; 1 for other record types\n # 0x04 - int32u record version: seen 0x64,0x66,0x67,0x68,0x6f,0x104\n # 0x08 - int32u index id = 1\n # 0x0c - int32u record offset from start of FLIR data\n # 0x10 - int32u record length\n # 0x14 - int32u parent = 0 (?)\n # 0x18 - int32u object number = 0 (?)\n # 0x1c - int32u checksum: 0 for no checksum\n entry = 32 * record_nr\n stream.seek(entry)\n record_type = int.from_bytes(stream.read(2), \"big\")\n if record_type < 1:\n return None\n\n _ = int.from_bytes(stream.read(2), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n record_offset = int.from_bytes(stream.read(4), \"big\")\n record_length = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n _ = int.from_bytes(stream.read(4), \"big\")\n return (entry, record_type, record_offset, record_length)",
"def read_scanner_data(handle): \n name = read_string(handle)\n pmt_green = read_int(handle)\n pmt_red = read_int(handle)\n scanner_version = read_string(handle)\n imaging_user = read_string(handle)\n return ScannerData(name, pmt_green, pmt_red, scanner_version, imaging_user)",
"def _parse_bidding_record(raw_bidding_record: List[str], record_dict: dict) -> Tuple[List[str], List[BidMetadata]]:\n bid_index = 0\n bidding_record = []\n bidding_metadata = []\n for raw_bid in raw_bidding_record:\n canonical_bid = canonicalize_bid(raw_bid)\n if canonical_bid:\n bidding_record.append(canonical_bid)\n bid_index += 1\n elif raw_bid.upper() == \"AP\":\n bidding_record.extend([\"PASS\"] * 3)\n bid_index += 3\n else:\n _update_bidding_metadata(bid_index, raw_bid, bidding_record, bidding_metadata, record_dict)\n return bidding_record, bidding_metadata",
"def read(filehandle, header=False):\n for line_raw in filehandle:\n line = line_raw.strip()\n # skip empty lines (whitespace-only)\n if len(line) == 0:\n continue\n fields = line.split('\\t')\n # skip header lines by default\n if fields[0].startswith('@') and re.search(HEADER_REGEX, fields[0]):\n if header:\n yield fields\n else:\n continue\n if len(fields) < 11:\n raise FormatError('Fewer than 11 fields on line:\\n'+line_raw)\n yield _dictify(fields, FIELD_NAMES)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assert that all keyword with the given name and args have the given status Keyword names need to be passed in as fully qualified names exactly as they appear in the logs. expected_status should be either PASS or FAIL Example Log Hello, world Assert keyword status PASS BuiltIn.log Hello, world
|
def assert_keyword_status(self, expected_status, keyword_name, *args):
keyword_was_found = False
for name, attrs in self.keyword_log:
if name == keyword_name and args == tuple(attrs["args"]):
keyword_was_found = True
if attrs["status"] != expected_status:
message = (
f"Status of keyword {keyword_name} with args {args} "
f"expected to be {expected_status} but was {attrs['status']}"
)
raise AssertionError(message)
if not keyword_was_found:
raise AssertionError(
f"No keyword with name '{keyword_name}' with args '{args}' was found"
)
|
[
"def test_find_words_by_status(self):\n pass",
"def assert_job_status(job: BatchJob, expected_status: str):\n # If the next assert is going to fail, then first show the logs of the job.\n actual_status = job.status()\n message = (\n f\"job {job}: did not end with expected status '{expected_status}', \"\n + f\"but ended with status '{actual_status}'\"\n )\n assert_batch_job(job, actual_status == expected_status, extra_message=message)",
"def reportKeywordStatus(status, kw_name):\n\n TCOBJ.report_keyword_status(status, kw_name)",
"def _check_statuses(self, expected_status):\n statuses = self.json['statuses']\n if len(self.json['statuses']) == 0:\n raise TestFailedError(\n 'FIELD STATUSES: Expected: At least one status,'\n ' Actual: No status')\n for status in self.json['statuses']:\n for status_key in expected_status.keys():\n if status_key == 'created_before':\n created_before = expected_status['created_before']\n created_at = status['created_at']\n created_at = dateutil_parser.parse(created_at).date()\n if created_at > created_before:\n raise TestFailedError(\n 'STATUSES FIELD {}: Expected: Before {},'\n ' Actual: {}'.format(created_at, created_before,\n created_at))\n else:\n expected = expected_status[status_key]\n actual = status[status_key] if\\\n status.has_key(status_key)\\\n else 'Not present'\n if not self._check_value(expected, actual):\n raise TestFailedError(\n 'STATUSES FIELD {}: Expected: {},'\n ' Actual: {}'.format(status_key, expected,\n actual.encode('utf-8')))",
"def run_keyword(self, name, args):\n func = getattr(self, name, None)\n result = {'error': '', 'return': ''}\n try:\n retval = func(*args)\n except Exception, e:\n result['status'] = 'FAIL'\n result['error'] = str(e)\n else:\n result['status'] = 'PASS'\n result['return'] = retval\n result['output'] = retval\n return result",
"def test_get_loglevel_from_name(self):\n # the function should just pass down integers\n self.assertEqual(misc.get_loglevel_from_name(1), 1)\n \n # strings should be translated to their respective log-level\n self.assertEqual(misc.get_loglevel_from_name('info'), logging.INFO)\n self.assertEqual(misc.get_loglevel_from_name('warn'), logging.WARN)\n self.assertEqual(misc.get_loglevel_from_name('debug'), logging.DEBUG)\n \n self.assertRaises(ValueError, lambda: misc.get_loglevel_from_name('a'))",
"def test_check_name_param_not_set(self):\n\n config = {\n 'init_config': {},\n 'instances': [\n {\n 'url': 'http://localhost:13001',\n 'authentication': {\n 'token_auth': {\n 'initial_token': \"dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx\",\n 'audience': \"search\",\n 'renewal_days': 10\n }\n },\n 'saved_searches': [{\n \"name\": \"minimal_metrics\",\n \"parameters\": {}\n }],\n 'tags': []\n }\n ]\n }\n # This is done to avoid going in the commit_succeeded call after the check runs\n self.collect_ok = False\n\n check = False\n try:\n self.run_check(config, mocks={\n '_dispatch_saved_search': _mocked_dispatch_saved_search,\n '_search': _mocked_search,\n '_saved_searches': _mocked_saved_searches\n })\n except CheckException:\n check = True\n\n self.assertTrue(check, msg='Splunk metric instance missing \"authentication.token_auth.name\" value')",
"def test_loglevel_badvalue():\n expected = 'loglevel expected LogLevel, got str'\n with pytest.raises(TypeError) as err:\n BaseLoop(loglevel='bad value')\n assert err.value.args == (expected, )",
"def assertAllStatuses(self, changes, status):\n action_history = self.fake_db.GetActionsForChanges(changes)\n progress_map = clactions.GetPreCQProgressMap(changes, action_history)\n for change in changes:\n for config in progress_map[change]:\n self.assertEqual(progress_map[change][config][0], status)",
"def test_basic_macro_status(capsys):\n output = Output()\n with pytest.raises(NotImplementedError):\n output.log(OutputMethod.MACRO_STATUS)\n\n output = Basic()\n # Only the directive.\n output.log(OutputMethod.MACRO_STATUS, 'BUILD')\n captured = capsys.readouterr()\n assert captured.out == '2021-01-02T01:06:34 build-magic [ DONE ] BUILD \\n'\n\n # Default status code.\n output.log(OutputMethod.MACRO_STATUS, 'BUILD', 'tar -czf hello.tar.gz')\n captured = capsys.readouterr()\n assert captured.out == '2021-01-02T01:06:34 build-magic [ DONE ] BUILD : tar -czf hello.tar.gz\\n'\n\n # No command but failing status code.\n output.log(OutputMethod.MACRO_STATUS, 'BUILD', status_code=1)\n captured = capsys.readouterr()\n assert captured.out == '2021-01-02T01:06:34 build-magic [ FAIL ] BUILD \\n'\n\n # Command with failing status code.\n output.log(OutputMethod.MACRO_STATUS, 'BUILD', 'tar -czf hello.tar.gz', 1)\n captured = capsys.readouterr()\n assert captured.out == '2021-01-02T01:06:34 build-magic [ FAIL ] BUILD : tar -czf hello.tar.gz\\n'",
"def test_pytest_all_tests_pass_status_propagates(self):\n py_file = self.testdir.makepyfile(\n \"\"\"\n def test_ok():\n assert True\n\n def test_ok_2():\n assert True\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = self.inline_run(\"--ddtrace\", file_name)\n rec.assertoutcome(passed=2)\n spans = self.pop_spans()\n for span in spans:\n assert span.get_tag(\"test.status\") == \"pass\"",
"def test_b_check_status_is_returned(self):\n self.assertTrue(self.status.is_returned(), \"The awaited status is returned, the current status is {}\".format(self.status.get_status()))",
"def assertion_summary(self, name, status, output=False):\n if status == 0 or status == 'pass':\n self._passes += 1\n self.message('pass', \"'%s' passed\" % name)\n else:\n self._failures += 1\n if output:\n self.message('debug', \"%s returned: \\n%s\" %\n (name, output.rstrip()))\n self.message('fail', \"'%s' failed\" % name)",
"def test_history_bad_param(self):\n self.assertRaises(NameError, self._params.get_history, \"status\")",
"def test_result_logging(self):\n self.instance.result_tracker = mock_tracker = Mock()\n self.instance.should_stop(epoch=0)\n log_metrics = mock_tracker.log_metrics\n self.assertIsInstance(log_metrics, Mock)\n log_metrics.assert_called_once()\n _, call_args = log_metrics.call_args_list[0]\n self.assertIn(\"step\", call_args)\n self.assertEqual(0, call_args[\"step\"])\n self.assertIn(\"prefix\", call_args)\n self.assertEqual(\"validation\", call_args[\"prefix\"])",
"def test_add_loglevel_arg(key):\n ap = argparse.ArgumentParser()\n liblog.add_loglevel_arg(ap)\n cmd = ap.parse_args(f'--log-level {key}'.split())\n assert cmd.log_level",
"def test_create_run_status(self):\n pass",
"def test_gcb_change_rule_alerting_status_command_when_valid_args_provided(client):\n from GoogleChronicleBackstory import gcb_change_rule_alerting_status_command\n\n with open('test_data/gcb_change_rule_alerting_status_ec.json', 'r') as f:\n expected_ec = json.loads(f.read())\n\n with open('test_data/gcb_change_rule_alerting_status_hr.md', 'r') as f:\n expected_hr = f.read()\n\n mock_response = (\n Response(dict(status=200)),\n '{}'\n )\n args = {\"rule_id\": \"ru_ab4d76c1-20d2-4cde-9825-3fb1c09a9b62\", \"alerting_status\": \"enable\"}\n client.http_client.request.return_value = mock_response\n hr, ec, json_data = gcb_change_rule_alerting_status_command(client, args)\n\n assert ec == expected_ec\n assert hr == expected_hr",
"def test_validate_ticket_track_arguments_failed_execution():\n # Verify invalid value\n with pytest.raises(ExtraHop_v2.InvalidValueError) as err:\n ExtraHop_v2.validate_ticket_track_arguments(\"4\")\n\n assert (\n str(err.value)\n == \"4 is an invalid value for incident_status. Possible values are: ['0', '1', '2', '3']\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns the current working directory as a `remote path ` object
|
def getpath(self):
return RemotePath(self.remote, self)
|
[
"def remote_path(self) -> str:\n return self._remote_path",
"def get_local_directory(self):\n \n # Gives Local Direcory path equivalent to URL Path in server\n rval = os.path.join(self.rootdir, self.domain)\n\n for diry in self.dirpath:\n if not diry: continue\n rval = os.path.abspath( os.path.join(rval, self.make_valid_filename(diry)))\n\n return os.path.normpath(rval)",
"def get_current_dir():\n return os.getcwd()",
"def local_path(self) -> str:\n\n return self.__local_path",
"def get_current_path():\n return os.path.join(\".deploy\", \"current\")",
"def get_current_dir() -> str:\n return os.getcwd()",
"def remote_fs_path(filename):\n return fs_testdir[\"remote_fs_basepath\"]+'/'+filename",
"def import_path(self):\n return os.path.join(self.remote_root, self.pkg) if self.pkg else self.remote_root",
"def cwd() -> str:\n return os.path.abspath(os.getcwd())",
"def abs_path(self, remote_path):\n if os.path.isabs(remote_path):\n logger.warning('use absolute path as dfs remote path is not recommended, '\n 'which may lead to privilege bugs:', remote_path)\n return os.path.join(self.root, remote_path)",
"def get_working_dir(self, gerrit, project):\n return os.path.join(\n os.getcwd(), '%s-%s-tmp' % (gerrit['host'], project))",
"def host_dir(self):\n\n return self._sysroot.host_dir",
"def get_deploy_dir() -> str:\n deploydir = local(\"pwd\", capture=True)\n return deploydir",
"def world_path(self) -> str:\n return self._directory",
"def getcwd():\r\n try:\r\n a = os.stat(os.environ['PWD'])\r\n b = os.stat(os.getcwd())\r\n if a.ino == b.ino and a.dev == b.dev:\r\n working_dir = os.environ['PWD']\r\n else:\r\n working_dir = os.getcwd()\r\n except:\r\n working_dir = os.getcwd()\r\n return working_dir",
"def local_backup_dir(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"local_backup_dir\")",
"def path_on_server(self):\n\n # change dev_base if necessary\n if ConfigHandler.cfg.wb_new == \"True\":\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI41\n else:\n oPB.DEV_BASE = oPB.DEV_BASE_OPSI40\n\n # if on Linux, we have to subtract local share base from development folder\n # -> the local share base acts like the drive letter on windows\n if platform.system() == 'Linux':\n tmp = self.projectfolder.replace(ConfigHandler.cfg.local_share_base, \"\")\n else:\n tmp = self.projectfolder\n\n if platform.system() == \"Windows\":\n # remove drive letter\n return oPB.DEV_BASE + tmp[2:].replace(\"\\\\\", \"/\")\n else:\n # replace possible double '/' with single '/'\n return (oPB.DEV_BASE + \"/\" + tmp).replace(\"//\", \"/\")\n\n \"\"\"\n if tmp.startswith(repo_base):\n return tmp\n else:\n if tmp.strip() != \"\":\n ret = (repo_base + \"/\" + tmp + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"a\", ret)\n return ret\n else:\n ret = (repo_base + \"/\" + self.id).replace(\"//\", \"/\")\n print(\"b\", ret)\n return ret\n \"\"\"",
"def _get_local_repo_base_path(self):\n return os.path.join(os.path.expanduser('~'), \".localcache\")",
"def get_current_dir():\n return os.path.dirname(os.path.abspath(getsourcefile(lambda: 0)))",
"def cwd(self, path):\n self._nc.rpc.set_cli_working_directory(directory=path)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uploads a local file/directory (``src``) to a remote destination (``dst``).
|
def upload(self, src, dst):
raise NotImplementedError()
|
[
"def upload(self, source, dest=None, overwrite=False, fs=None):\n from ..filesystems.local import LocalFsClient\n\n if fs is None or isinstance(fs, LocalFsClient):\n logger.info('Copying file from local...')\n dest = dest or posixpath.basename(source)\n cmd = (\n \"scp -r -o ControlPath={socket} '{local_file}' {login}:'{remote_file}'\".format(\n socket=self._socket_path,\n local_file=source.replace('\"', r'\\\"'), # quote escaped for bash\n login=self._login_info,\n remote_file=dest.replace('\"', r'\\\"'),\n )\n )\n proc = run_in_subprocess(cmd, check_output=True)\n logger.info(proc.stderr or 'Success')\n else:\n return super(RemoteClient, self).upload(source, dest, overwrite, fs)",
"def copy_remote(src_path, dst_path):\n assert ':' not in src_path, src_path\n idx = dst_path.find(':')\n dst = dst_path[:idx]\n file_path = dst_path[idx+1:]\n assert ':' not in file_path, dst_path\n if os.path.isfile(src_path):\n cmd = 'scp %s %s' % (src_path, dst_path)\n else:\n cmd = 'scp -r %s %s' % (src_path, dst_path)\n res = run(cmd, shell=True, stdout=PIPE).stdout.decode('utf-8')\n return res",
"def copy_file(server, source, target):\n with setup_server_connection(server) as connection:\n Transfer(connection).put(local=source, remote=target)",
"def upload_file(self, src, dest):\n k = Key(self.bucket)\n k.key = dest\n k.set_contents_from_filename(src)",
"def copyin(self, src, dst):\n if self.error:\n sys.stderr.write(self.error)\n return 7\n if not dst.startswith('/'):\n dst = '/' + dst\n\n cmd = [self.conf.adb_command, 'push', src, dst]\n if self.conf.debug:\n sys.stderr.write(' '.join(cmd) + '\\n')\n\n with open(os.devnull, 'w') as fnull:\n try:\n err = subprocess.call(cmd, stdout=fnull, stderr=fnull)\n except subprocess.CalledProcessError:\n sys.stderr.write('Error executing adb shell')\n return 8\n\n if err != 0:\n sys.stderr.write('Cannot push the file, '\n '%s, error %d' % (dst, err))\n return 9\n return 0",
"def cp(src_filename, dst_filename):\n src_is_remote = is_remote_path(src_filename)\n dst_is_remote = is_remote_path(dst_filename)\n if src_is_remote == dst_is_remote:\n return auto(copy_file, src_filename, dst_filename)\n filesize = auto(get_filesize, src_filename)\n if src_is_remote:\n with open(dst_filename, 'wb') as dst_file:\n return remote(send_file_to_host, src_filename, dst_file, filesize,\n xfer_func=recv_file_from_remote)\n with open(src_filename, 'rb') as src_file:\n return remote(recv_file_from_host, src_file, dst_filename, filesize,\n xfer_func=send_file_to_remote)",
"def copy_files(dir_loc, dir_name, src_host, dest_host, username=\"ubuntu\"):\n dir_path = dir_loc + dir_name\n #First src -> local\n os.system(\"scp -r {}@{}:{} . >/dev/null 2>&1\".format(username, src_host, dir_path))\n #Then local -> dest\n os.system(\"scp -r ./{} {}@{}:{} >/dev/null 2>&1\".format(dir_name, username, dest_host, dir_path))\n #rm local\n os.system(\"rm -rf ./{} >/dev/null 2>&1\".format(dir_name))",
"def upload(src, dest_bucket, dest_object):\n # TODO\n pass",
"def copyfile(src, dst, **kwargs):\n norm_src = ntpath.normpath(src)\n norm_dst = ntpath.normpath(dst)\n\n if not is_remote_path(norm_src):\n raise ValueError(\"src must be an absolute path to where the file should be copied from.\")\n\n if not is_remote_path(norm_dst):\n raise ValueError(\"dst must be an absolute path to where the file should be copied to.\")\n\n src_host = ntpath.splitdrive(norm_src)[0].split(\"\\\\\")[2]\n dst_host = ntpath.splitdrive(norm_dst)[0].split(\"\\\\\")[2]\n if src_host.lower() != dst_host.lower():\n raise ValueError(\"Cannot copy a file to a different host than the src.\")\n\n with open_file(norm_src, mode=\"rb\", share_access=\"r\", buffering=0, **kwargs) as src_fd:\n with SMBFileTransaction(src_fd) as transaction_src:\n ioctl_request(\n transaction_src,\n CtlCode.FSCTL_SRV_REQUEST_RESUME_KEY,\n flags=IOCTLFlags.SMB2_0_IOCTL_IS_FSCTL,\n output_size=32,\n )\n\n resume_response = SMB2SrvRequestResumeKey()\n resume_response.unpack(transaction_src.results[0])\n resume_key = resume_response[\"resume_key\"].get_value()\n\n chunks = []\n offset = 0\n while offset < src_fd.fd.end_of_file:\n copychunk_struct = SMB2SrvCopyChunk()\n copychunk_struct[\"source_offset\"] = offset\n copychunk_struct[\"target_offset\"] = offset\n copychunk_struct[\"length\"] = min(MAX_COPY_CHUNK_SIZE, src_fd.fd.end_of_file - offset)\n\n chunks.append(copychunk_struct)\n offset += MAX_COPY_CHUNK_SIZE\n\n with open_file(norm_dst, mode=\"wb\", share_access=\"r\", buffering=0, **kwargs) as dst_fd:\n for i in range(0, len(chunks), MAX_COPY_CHUNK_COUNT):\n batch = chunks[i : i + MAX_COPY_CHUNK_COUNT]\n with SMBFileTransaction(dst_fd) as transaction_dst:\n copychunkcopy_struct = SMB2SrvCopyChunkCopy()\n copychunkcopy_struct[\"source_key\"] = resume_key\n copychunkcopy_struct[\"chunks\"] = batch\n\n ioctl_request(\n transaction_dst,\n CtlCode.FSCTL_SRV_COPYCHUNK_WRITE,\n flags=IOCTLFlags.SMB2_0_IOCTL_IS_FSCTL,\n output_size=12,\n input_buffer=copychunkcopy_struct,\n )\n\n for result in transaction_dst.results:\n copychunk_response = SMB2SrvCopyChunkResponse()\n copychunk_response.unpack(result)\n if copychunk_response[\"chunks_written\"].get_value() != len(batch):\n raise OSError(\n f\"Failed to copy all the chunks in a server side copyfile: '{norm_src}' -> '{norm_dst}'\"\n )",
"async def push(self, src, dest, mode, progress=None):\n exists, timestamp, total_size = await get_running_loop().run_in_executor(None, _get_src_info, src)\n\n if not exists:\n raise FileNotFoundError(\"Can't find the source file {}\".format(src))\n\n sent_size = 0\n\n # SEND\n mode = mode | S_IFREG\n args = \"{dest},{mode}\".format(dest=dest, mode=mode)\n await self._send_str(Protocol.SEND, args)\n\n # DATA\n async with aiofiles.open(src, 'rb') as stream:\n while True:\n chunk = await stream.read(self.DATA_MAX_LENGTH)\n if not chunk:\n break\n\n sent_size += len(chunk)\n await self._send_length(Protocol.DATA, len(chunk))\n await self.connection.write(chunk)\n\n if progress is not None:\n progress(src, total_size, sent_size)\n\n # DONE\n await self._send_length(Protocol.DONE, timestamp)\n await self.connection._check_status()",
"def push_to_hdfs(self, src, dst):\n self.logger.info(\"push_to_hdfs starts\")\n os.system(\"hadoop fs -cp %s %s\" % (src, dst))\n #shutil.copy(src, dst)\n self.logger.info(\"push_to_hdfs finished\")\n pass",
"def copy_s3_bucket(src_bucket_name, src_bucket_secret_key, src_bucket_access_key,\n dst_bucket_name, dst_bucket_secret_key, dst_bucket_access_key):\n with cd(env.remote_path):\n tmp_dir = \"s3_tmp\"\n sudo('rm -rf %s' % tmp_dir, warn_only=True, user=env.remote_user)\n sudo('mkdir %s' % tmp_dir, user=env.remote_user)\n sudo('s3cmd --recursive get s3://%s/upload/ %s --secret_key=%s --access_key=%s' % (\n src_bucket_name, tmp_dir, src_bucket_secret_key, src_bucket_access_key),\n user=env.remote_user)\n sudo('s3cmd --recursive put %s/ s3://%s/upload/ --secret_key=%s --access_key=%s' % (\n tmp_dir, dst_bucket_name, dst_bucket_secret_key, dst_bucket_access_key),\n user=env.remote_user)\n\n sudo('s3cmd setacl s3://%s/upload --acl-public --recursive --secret_key=%s --access_key=%s' % (\n dst_bucket_name, dst_bucket_secret_key, dst_bucket_access_key),\n user=env.remote_user)\n # cleanup\n sudo('rm -rf %s' % tmp_dir, warn_only=True, user=env.remote_user)",
"def upload(self,local_file_path,remote_file_path):\n if os.path.isdir(local_file_path):\n return self.upload_dir(local_file_path,remote_file_path)\n with self._get_sftp() as sftp:\n logging.info(\"Upload %r=>%r\" % (local_file_path, remote_file_path))\n sftp.put(local_file_path,remote_file_path)",
"def copyfile(self, src, dst):\n self.logger.debug('Copying file %s to %s.', src, dst)\n shutil.copy2(src, dst)",
"def download(self, src, dst):\n pass",
"def copy_to(self, src_file, dest_file, user='root'):\n args = ['rsync', '-qrLptH', '-e', 'ssh ' + ' '.join(self.ssh_opts)]\n args.extend([src_file, user + '@' + self.ip + ':' + dest_file])\n self.__call_subprocess(args, allowed_retvals=[0, 24])",
"def sync(src, dst, src_bucket=False):\n if src_bucket is False:\n system(\"aws s3 sync {src} s3://{dst}\".format(src=src, dst=dst))\n print(\n \"Local directory '{src}' and S3 bucket '{dst}' synced successfully\".format(src=src, dst=dst))\n else:\n system(\"aws s3 sync s3://{src} s3://{dst}\".format(src=src, dst=dst))\n print(\n \"S3 buckets '{src}' and '{dst}' synced successfully\".format(src=src, dst=dst))",
"def copy(self, src, dst, label=None):\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n shutil.copyfile(src, os.path.join(self.chroot, dst))",
"def rsync(self, src = None, dst = None,\n persistent_name = None,\n persistent_dir = '/persistent.tcf.d'):\n target = self.target\n target.shell.run(\"mkdir -p /mnt/%s\" % persistent_dir)\n # upload the directory to the persistent area\n if persistent_name == None:\n assert src != None, \\\n \"no `src` parameter is given, `persistent_name` must \" \\\n \"then be specified\"\n persistent_name = os.path.basename(src)\n if src != None:\n target.report_info(\n \"rsyncing %s to target's persistent area /mnt%s/%s\"\n % (src, persistent_dir, persistent_name))\n target.shcmd_local(\n # don't be verbose, makes it too slow and timesout when\n # sending a lot of files\n \"time rsync -aAX --numeric-ids --delete\"\n \" --port %%(rsync_port)s \"\n \" %s/. %%(rsync_server)s::rootfs/%s/%s\"\n % (src, persistent_dir, persistent_name))\n target.testcase._targets_active()\n if dst != None:\n # There is a final destination specified, so now, in the\n # target, make a copy from the persistent area to the final\n # destination\n parent_dirs = os.path.dirname(dst)\n if parent_dirs != '':\n target.shell.run(\"mkdir -p /mnt/%s\" % parent_dirs)\n target.shell.run(\n # don't be verbose, makes it too slow and timesout when\n # sending a lot of files\n \"time rsync -aAX --delete /mnt/%s/%s/. /mnt/%s\"\n % (persistent_dir, persistent_name, dst))",
"def ftp_copy(addr, username, password, src_dir, tgt_dir, fnames):\n temp_binary = Path('/tmp/temp_binary_stream.bin')\n with ftplib.FTP(addr) as ftp:\n ftp.login(user=username, passwd=password)\n for subdir in tgt_dir.split('/'):\n if ftp_directory_exists(ftp, subdir):\n ftp.cwd(subdir)\n else:\n ftp.mkd(subdir)\n ftp.cwd(subdir)\n ftp.cwd('/')\n filelist = []\n ftp.retrlines('LIST ' + src_dir, filelist.append)\n filelist = [f.split()[-1] for f in filelist]\n for f in filelist:\n if f in fnames:\n L.info('Copying {}'.format(f))\n # Change directory to src_dir\n ftp.cwd(src_dir)\n with temp_binary.open('wb') as io:\n ftp.retrbinary(\"RETR \" + f, io.write)\n # Change to target dir and upload the file there\n ftp.cwd('/')\n ftp.cwd(tgt_dir)\n with temp_binary.open('rb') as io:\n ftp.storbinary(\"STOR \" + f, io)\n ftp.cwd('/')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
r"""Creates an SSH tunnel from the TCP port (``lport``) of the local machine (``lhost``, defaults to ``"localhost"``, but it can be any IP you can ``bind()``) to the remote TCP port (``dport``) of the destination machine (``dhost``, defaults to ``"localhost"``, which means this remote machine). The returned
|
def tunnel(self, lport, dport, lhost = "localhost", dhost = "localhost"):
opts = ["-L", "[%s]:%s:[%s]:%s" % (lhost, lport, dhost, dport)]
return SshTunnel(ShellSession(self.popen((), opts), self.encoding))
|
[
"def ssh_port_forward(context, port, host=None, local_port=None):\n\n # Load the SSH config\n ssh_config = aws_infrastructure.tasks.ssh.SSHConfig.load(ssh_config_path=ssh_config_path)\n\n # Remote port is required\n remote_port = int(port)\n\n # If no remote host is provided, use 'localhost'\n if host:\n remote_host = host\n else:\n remote_host = 'localhost'\n\n # If no local port is provided, use the same as the remote port\n if local_port:\n local_port = int(local_port)\n else:\n local_port = remote_port\n\n # Connect via SSH\n with aws_infrastructure.tasks.ssh.SSHClientContextManager(ssh_config=ssh_config) as ssh_client:\n # Initiate port forwarding\n with aws_infrastructure.tasks.ssh.SSHPortForwardContextManager(\n ssh_client=ssh_client,\n local_port=local_port,\n remote_host=remote_host,\n remote_port=remote_port\n ) as port_forward:\n port_forward.serve_forever()",
"def _create_ssh_tunnel(self, kernel_channel, local_port, remote_port, remote_ip, server, port, key):\n channel_name = kernel_channel.value\n self.log.debug(\"Creating SSH tunnel for '{}': 127.0.0.1:'{}' to '{}':'{}'\"\n .format(channel_name, local_port, remote_ip, remote_port))\n try:\n process = self._spawn_ssh_tunnel(kernel_channel, local_port, remote_port, remote_ip, server, port, key)\n self.tunnel_processes[channel_name] = process\n except Exception as e:\n self.log_and_raise(http_status_code=500, reason=\"Could not open SSH tunnel for port {}. Exception: '{}'\"\n .format(channel_name, e))",
"def _tunnel_to_port(self, kernel_channel, remote_ip, remote_port, server, port=ssh_port, key=None):\n local_port = self.select_ports(1)[0]\n self._create_ssh_tunnel(kernel_channel, local_port, remote_port, remote_ip, server, port, key)\n return local_port",
"def port_forward(host_port_pair: str, host_port: int) -> int:\n\n ssh_prefix = get_ssh_prefix(host_port_pair)\n\n # Allow a tunnel to be established.\n subprocess.run(ssh_prefix + ['echo', 'true'], check=True)\n\n forward_cmd = [\n '-O',\n 'forward', # Send SSH mux control signal.\n '-R',\n '0:localhost:%d' % host_port,\n '-v', # Get forwarded port info from stderr.\n '-NT' # Don't execute command; don't allocate terminal.\n ]\n forward_proc = subprocess.run(ssh_prefix + forward_cmd,\n capture_output=True,\n check=False,\n text=True)\n if forward_proc.returncode != 0:\n raise Exception(\n 'Got an error code when requesting port forwarding: %d' %\n forward_proc.returncode)\n\n output = forward_proc.stdout\n parsed_port = int(output.splitlines()[0].strip())\n logging.debug('Port forwarding established (local=%d, device=%d)',\n host_port, parsed_port)\n return parsed_port",
"def __init__(self, ssh_host, username, password, ssh_port=22):\n \n log.info(\"Initializing SSH tunnel to {0}:{1}\".format(ssh_host, ssh_port))\n \n # A flag we can use to abort all port forwards while in transfer mode\n self.__shut_down = threading.Event()\n \n # Remember the tunnel threads we launch\n self.__tunnel_servers = []\n \n # Set up the TCP tunnel to the remote end\n self.__transport = paramiko.Transport((ssh_host, ssh_port))\n self.__transport.connect(hostkey=None, username=username, password=password, pkey=None)",
"def createTunnel(self):\n self.ssh.createTunnel()",
"def _tunnel_to_kernel(self, connection_info, server, port=ssh_port, key=None):\n cf = connection_info\n\n lports = self.select_ports(5)\n\n rports = cf['shell_port'], cf['iopub_port'], cf['stdin_port'], cf['hb_port'], cf['control_port']\n\n channels = KernelChannel.SHELL, KernelChannel.IOPUB, KernelChannel.STDIN, \\\n KernelChannel.HEARTBEAT, KernelChannel.CONTROL\n\n remote_ip = cf['ip']\n\n if not tunnel.try_passwordless_ssh(server + \":\" + str(port), key):\n self.log_and_raise(http_status_code=403, reason=\"Must use password-less scheme by setting up the \"\n \"SSH public key on the cluster nodes\")\n\n for lp, rp, kc in zip(lports, rports, channels):\n self._create_ssh_tunnel(kc, lp, rp, remote_ip, server, port, key)\n\n return tuple(lports)",
"def get_random_lport():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('127.0.0.1', 0))\n addr, localport = sock.getsockname()\n sock.close()\n return localport",
"def create_tunnel():\n if utils.print_debug_messages(args):\n print('Connecting to {0} via SSH').format(instance)\n\n cmd = ['ssh']\n if args.zone:\n cmd.extend(['--zone', args.zone])\n port_mapping = 'localhost:' + str(args.port) + ':localhost:8080'\n if os.name == 'posix':\n # The '-o' flag is not supported by all SSH clients (notably,\n # PuTTY does not support it). To avoid any potential issues\n # with it, we only add that flag when we believe it will\n # be supported. In particular, checking for an os name of\n # 'posix' works for both Linux and Mac OSX, which do support\n # that flag.\n cmd.extend([\n '--ssh-flag=-o',\n '--ssh-flag=LogLevel=' + args.ssh_log_level])\n cmd.extend([\n '--ssh-flag=-4',\n '--ssh-flag=-N',\n '--ssh-flag=-L',\n '--ssh-flag=' + port_mapping])\n cmd.append('datalab@{0}'.format(instance))\n if args.internal_ip:\n cmd.extend(['--internal-ip'])\n return gcloud_compute(args, cmd, wait=False)",
"def connect(cls, host, port):\n return cls(socket.create_connection((host, port)))",
"def test_localToRemoteForwarding(self):\n localPort = self._getFreePort()\n process = ConchTestForwardingProcess(localPort, 'test\\n')\n d = self.execute('', process,\n sshArgs='-N -L%i:127.0.0.1:%i'\n % (localPort, self.echoPort))\n d.addCallback(self.assertEqual, 'test\\n')\n return d",
"def forward(local_port, pid):\n return _adb_command(\"forward tcp:{} jdwp:{}\".format(local_port, pid))",
"def add_vxlan_port(self, name, remote_ip,\n local_ip=None, key=None, ofport=None):\n self.add_tunnel_port(name, 'vxlan', remote_ip,\n local_ip=local_ip, key=key, ofport=ofport)",
"def test_remoteToLocalForwarding(self):\n localPort = self._getFreePort()\n process = ConchTestForwardingProcess(localPort, 'test\\n')\n d = self.execute('', process,\n sshArgs='-N -R %i:127.0.0.1:%i'\n % (localPort, self.echoPort))\n d.addCallback(self.assertEqual, 'test\\n')\n return d",
"def add_forward(self, host_port, guest_port):\n raise NotImplementedError()",
"def new_remote(cls, gateway, hostport=None): \r\n if hostport is None: \r\n host, port = ('', 0) # XXX works on all platforms? \r\n else: \r\n host, port = hostport \r\n socketserverbootstrap = py.code.Source(\r\n mypath.dirpath('script', 'socketserver.py').read('rU'), \"\"\"\r\n import socket\r\n sock = bind_and_listen((%r, %r)) \r\n port = sock.getsockname()\r\n channel.send(port) \r\n startserver(sock)\r\n \"\"\" % (host, port)\r\n ) \r\n # execute the above socketserverbootstrap on the other side\r\n channel = gateway.remote_exec(socketserverbootstrap)\r\n (realhost, realport) = channel.receive()\r\n #gateway._trace(\"new_remote received\" \r\n # \"port=%r, hostname = %r\" %(realport, hostname))\r\n return py.execnet.SocketGateway(host, realport)",
"def _create_tunnel(name, ip, gre_local, gre_remote, route_dst=None):\n\n logging.debug(\"Creating %s interface.\", name)\n _ipr.link(\"add\", ifname=name, kind=\"gre\",\n gre_local=gre_local,\n gre_remote=gre_remote,\n gre_ttl=255)\n\n logging.debug(\"Assigning %s address to %s interface.\", ip, name)\n index = _ipr.link_lookup(ifname=name)[0]\n _ipr.link(\"set\", index=index, state=\"down\")\n _ipr.addr(\"add\", index=index, address=ip)\n _ipr.link(\"set\", index=index, state=\"up\")\n\n if route_dst is not None:\n # Adding new route\n _add_route(route_dst, name)",
"def new_socket(self, host, port):\n\n raise NotImplementedError()",
"def _open_tunnel(tunnel_cmd) -> None:\n\n # The following doesnt appear to work on ubuntu\n if not 'ubuntu' in platform.platform().lower():\n FNULL = open(os.devnull, 'w')\n s = subprocess.call('lsof -ti:3306',shell=True,stdout=FNULL,stderr=subprocess.STDOUT)\n # Only if the tunnel isnt already open, attempt to open a new one.\n if s == 1:\n os.system(tunnel_cmd)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
returns the first position ('i') a given threshold ('thresh') is exceeded (for switch=1), or not exceeded (for switch=0), in a given list ('sequence') for a moving window specified by the moving_window module. If you want to see which week in a year the temperature first exceeds 15 degrees, this will do that for you.
|
def first_threshold (sequence, winsize, step, thresh, switch):
import numpy as np
import moving_window
try: chunks=moving_window(sequence,winsize,step)
except TypeError:
raise Exception("**ERROR** moving_window poorly specified**")
import sys
sys.exit(1)
i=0
for chunk in chunks:
if switch == 1: test1 = chunk > thresh
if switch == 0: test1 = chunk < thresh
if not (0 <= switch <= 1):
raise Exception("**ERROR** Switch must be 0 or 1")
import sys
sys.exit(1)
testchunk = all(test1)
if testchunk:
if switch == 1: return i
if switch == 0: return i
i=i+1
return -1
|
[
"def startFinder( xyBinStds, threshold ):\n\n for i in range(1,len(xyBinStds)):\n if (xyBinStds[i] - xyBinStds[i-1]) > threshold:\n return i",
"def get_thresh(data,threshold):\r\n for i in range(len(data)):\r\n if np.abs(data[i]) >= threshold:\r\n return i\r\n return math.nan",
"def localMinima(sequence) -> Tuple[List, List]:\n localminima = ([], [])\n if sequence[0] < sequence[1]: # boundary value\n localminima[0].append(0)\n localminima[1].append(sequence[0])\n for index, center in enumerate(sequence[1:-1], 1):\n before = sequence[index - 1]\n after = sequence[index + 1]\n if before > center < after or numpy.isnan(before) and center < after:\n localminima[0].append(index)\n localminima[1].append(center)\n if sequence[-1] < sequence[-2]: # boundary value\n localminima[0].append(len(sequence) - 1)\n localminima[1].append(sequence[-1])\n return localminima",
"def when_threshold_reached(self, max_nr_legs=100000):\n\t\tmax_i = None\n\t\tfor (i, w) in enumerate(self.when):\n\t\t\tif w[2] >= max_nr_legs:\n\t\t\t\tbreak\n\t\t\tmax_i = i\n\t\treturn max_i",
"def findIndex(sequence, function):\n return next(__builtin__.filter(lambda x: function(x[1]), enumerate(sequence)), None)[0]",
"def find_trigger_start(self, trigger_trace):\r\n\r\n if not self.is_trigger(trigger_trace):\r\n print('not trigger')\r\n return 0\r\n\r\n p0 = -1\r\n\r\n for i, p in enumerate(trigger_trace):\r\n if p != p0:\r\n if all(trigger_trace[i:i+10] != p0):\r\n return max(0, i)\r\n else:\r\n continue",
"def get_indexes(r_peak_times, window):\n\n indexes = []\n multiplier = 1\n for i in range(0, len(r_peak_times)):\n if r_peak_times[i] >= multiplier*window:\n indexes.append(i)\n multiplier += 1\n return indexes",
"def _detect_bout(source, window=10, threshold=0.08, bout_dis=80,\n bout_dur=300, show_flag=False, debug_flag=False) -> list:\n\n # calculate sd for window\n n = len(source)\n n_source = np.reshape(source[:n//window*window], (n//window, window))\n sd_source = np.std(n_source, axis=1)\n windowid = np.arange(len(sd_source))\n\n boutid = windowid[np.where(sd_source > threshold)]\n if (debug_flag): print(boutid)\n bout_list = []\n\n if (len(boutid) > 0):\n # detect continous bout (inter distance 100 windows)\n n_boutid = np.zeros(len(boutid)+2)\n n_boutid[0] = -1000\n n_boutid[-1] = boutid[-1] + 1000\n n_boutid[1:-1] = boutid\n ii = [i for i in range(len(n_boutid)-1) if (n_boutid[i+1] - n_boutid[i]) > bout_dis]\n last_window = n_boutid[ii]\n ii = [i for i in range(1, len(n_boutid)) if (n_boutid[i] - n_boutid[i-1]) > bout_dis]\n first_window = n_boutid[ii]\n\n for i in range(len(first_window)-1):\n if (last_window[i+1] - first_window[i] > bout_dur):\n bout_list.append((first_window[i], last_window[i+1]))\n if (debug_flag): print(bout_list)\n\n # show in time series\n if show_flag and (n < 5000):\n f = figure(width=950, height=200, y_range=[min(sd_source), max(sd_source)],\n title='standard deviation in window size {}, interdistance {}'.format(window, window*bout_dis))\n f.line(windowid, sd_source, color='navy')\n f.circle(boutid, sd_source[boutid], size=7, color='red', alpha=0.5)\n for i in range(len(bout_list)):\n bouts_start = Span(location=bout_list[i][0], dimension='height', line_color='green',\n line_dash='dashed', line_width=1.5)\n f.add_layout(bouts_start)\n bouts_stop = Span(location=bout_list[i][1], dimension='height', line_color='blue',\n line_dash='dashed', line_width=1.5)\n f.add_layout(bouts_stop)\n\n show(f)\n\n for i in range(len(bout_list)):\n bout_list[i] = (bout_list[i][0]*window, bout_list[i][1]*window)\n\n return bout_list",
"def minimum_value(sequence):\r\n low = sequence[0] # need to start with some value\r\n for i in sequence:\r\n if i < low:\r\n low = i\r\n return low",
"def remove_above_threshold(thresh, ls, running_ls_thresh=None, i=0):\n # Initialize running_ls_thresh in first pass\n if running_ls_thresh is None:\n running_ls_thresh = []\n\n if i == len(ls):\n return running_ls_thresh\n else:\n if ls[i] <= thresh:\n running_ls_thresh.append(ls[i])\n return remove_above_threshold(thresh, ls, running_ls_thresh, i + 1)",
"def get_filt(self, elem, ion, thresh = 1e-20):\n #Remember this is not in log...\n met = np.max(self.get_density(elem, ion), axis=1)\n ind = np.where(np.logical_and(met > thresh, np.max(self.get_observer_tau(elem, ion), axis=1) > 0.1))\n print(\"Sightlines with rotating absorption: \",np.size(ind))\n return ind",
"def compute_acceleration_floor_method(position,jump_number=0):\n \"\"\" First find the indices that are not good due to a mistake with alphapose\"\"\"\n confidences = []\n with open(\"Trajectory/Confidences.txt\") as file:\n for line in file:\n confidences.append(float(line))\n confidences = np.array(confidences)\n good_values = np.where(confidences >2)\n outliers = np.where(confidences <=2)\n\n index_floor_before_jump = 0\n index_floor_after_jump = 0\n\n # Keep only the indices that are correctly detected with Alphapose\n indices_kept_pos = []\n for index1 in range(0,len(position)):\n for index2 in good_values[0]:\n if index1==index2:\n indices_kept_pos.append(index1)\n\n \"\"\" Select the range 0:30 floor the floor position and find the maximum in the trajectory\n Compute the distance from floor to maximum \"\"\"\n indices_values = 30\n floor = np.median(position[0:indices_values][np.where(np.array(indices_kept_pos)<indices_values)])\n\n total_max_index = scipy.signal.argrelmax(position[indices_kept_pos],order = len(position))[0] # Global maxium\n\n total_max_value = position[total_max_index]\n distance_floor_max = abs(total_max_value-floor)\n\n \"\"\" The threshold is useful to select the floor indices even if there is noise\"\"\"\n threshold = 0.15*distance_floor_max\n index_floor = np.where((position > floor-threshold) & (position < floor+threshold))[0]\n\n position[index_floor] = floor\n\n jump_positions = scipy.signal.argrelmax(position[indices_kept_pos],order = 10)[0]\n min_positions = scipy.signal.argrelmax(-position[indices_kept_pos],order = 10)[0]\n\n\n # We also need to remove the points that are not height enough (false true)\n index_to_delete = np.where(position[jump_positions] < floor+threshold*2)\n jump_positions = np.delete(jump_positions,index_to_delete) # remove the indices of the values that were wrongly detected\n\n # We have the maxima. Now we need to compute the values for the jump\n air_fly_time = np.where(position > floor)\n # First we get the index of the first element on the floor and take the next one, same for after the jump\n index_floor_before_jump = index_floor[np.where(index_floor < jump_positions[jump_number])][-1] +1\n index_floor_after_jump = index_floor[np.where(index_floor > jump_positions[jump_number])][0] -1\n\n # We can now fit a polynom of degree two to the curve.\n index_fly = range(index_floor_before_jump,index_floor_after_jump+1)\n\n return index_fly,good_values",
"def find_top_left(paragraph_list, prev_top_left_x_val):\n paragraph_list = [paragraph for paragraph in paragraph_list if paragraph['bounding_box']['top_left']['x'] >= prev_top_left_x_val ]\n x_val_list = [ paragraph['bounding_box']['top_left']['x'] for paragraph in paragraph_list[:5]]\n if not x_val_list:\n return None\n\n min_x = min(x_val_list)\n top_left_val = x_val_list.index(min_x)\n return x_val_list.index(min_x)",
"def next_example(w, b, start_i):\n for i in list(range(start_i, n)) + list(range(0, start_i)):\n if not classified_correctly(w, b, i):\n return i\n return -1",
"def calculate_trajectory_cutoff(trajectories, window):\n ma = np.mean(rolling_window(trajectories, window), -1)\n ma_mean = np.mean(ma, axis=1)\n ma_std = np.std(ma, axis=1)\n cutoff = ma_mean + ma_std\n\n return cutoff.reshape(-1, 1)",
"def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None], \n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n x_start_stop[0] = x_start_stop[0] or 0\n x_start_stop[1] = x_start_stop[1] or img.shape[1]\n y_start_stop[0] = y_start_stop[0] or 0\n y_start_stop[1] = y_start_stop[1] or img.shape[0]\n\n # Compute the span of the region to be searched \n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n \n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0]*(1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1]*(1 - xy_overlap[1]))\n \n # Compute the number of windows in x/y\n nx_windows = np.int(xspan/nx_pix_per_step) - 1\n ny_windows = np.int(yspan/ny_pix_per_step) - 1\n \n window_list = []\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs*nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys*ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n \n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list",
"def check_landmarks(windows, landmarks):\n \n # The landmarks have to be inside windows and at least a margin away from the edges of that window \n margin = (windows[:, 2] // 10)\n # print(type(margin))\n # print(margin.shape)\n # print(margin.dtype)\n # print(margin)\n\n # windows_ is the smaller windows when we cut the margin off\n windows_ = np.zeros_like(windows)\n windows_[:, :2] = windows[:, :2] + margin.reshape((-1, 1))\n windows_[:, 2] = windows[:, 2] - 2 * margin\n # print(windows, windows_)\n\n # tic = time.time()\n result = []\n for i, window_ in enumerate(windows_):\n if np.count_nonzero(np.min(landmarks, 0) < window_[:2]) == 0 and np.count_nonzero(np.max(landmarks, 0) > window_[:2] + window_[2] - 1) == 0:\n result.append(windows[i])\n # print(window_)\n # print(windows[i])\n # toc = time.time()\n # print(toc - tic)\n\n return np.asarray(result)",
"def findPosition(self,i): # TEST\n return self.abstract.findPosition(self.notes[i])",
"def get_window(stats,g_speed,params):\n # Properties of trace\n s_0 = int((stats.npts-1)/2)\n dist = stats.sac.dist\n \n Fs = stats.sampling_rate\n n = stats.npts\n\n if g_speed is not None and params['hw'] is not None:\n \n # Find indices for window bounds\n ind_lo = int((dist/g_speed - params['hw'])*Fs) + s_0\n ind_hi = int((dist/g_speed + params['hw'])*Fs) + s_0\n ind_lo_n = ind_hi + int(params['sep_noise']*params['hw']*Fs)\n ind_hi_n = ind_lo_n + int(2*params['hw']*Fs)\n\n else:\n\n ind_lo = s_0\n ind_hi = n - 1\n ind_lo_n = s_0\n ind_hi_n = n-1\n \n \n # Checks..overlap, out of bounds\n scs = window_checks(ind_lo,ind_hi,ind_lo_n,ind_hi_n,n,params['win_overlap'])\n \n if scs:\n \n # Fill signal window\n win_signal = window(params['wtype'],n,ind_lo,ind_hi)\n # Fill noise window\n win_noise = window(params['wtype'],n,ind_lo_n,ind_hi_n)\n \n \n return win_signal, win_noise, scs\n\n else:\n return [],[],scs"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
draw x, mu std
|
def draw_mean_std(x, mu, std, title = None):
|
[
"def gaussian(x, mean, std):\n return (1/(std*np.sqrt(2*np.pi))) * np.exp(-0.5*np.square((x-mean)/std))",
"def normalize(X, mu=None, stdev=None):\n ### START YOUR CODE ###\n if mu == None:\n mu = np.mean(X)\n if stdev == None:\n stdev = np.std(X, ddof=1)\n X1 = (X - mu)/stdev\n ### END YOUR CODE ###\n \n return X1,mu,stdev",
"def gaussian_distribution(mean, stdev, num_pts=50):\n xstart = mean - (4.0 * stdev)\n xend = mean + (4.0 * stdev)\n x = np.linspace(xstart,xend,num_pts)\n y = (1.0/np.sqrt(2.0*np.pi*stdev*stdev)) * np.exp(-1.0 * ((x - mean)**2)/(2.0*stdev*stdev))\n return x,y",
"def t_statistic(x1,x2,x3,x4,mu):\r\n return (sample_mean(x1,x2,x3,x4) - mu) / standard_error(x1,x2,x3,x4)",
"def plot_std(data, sensor_cols, setting_cols):\n data[sensor_cols + setting_cols].std().plot(kind='bar', title=\"Feature STD\")\n plt.show()",
"def gaussian( x, mu, var):\n\treturn np.exp(-np.power(x - mu, 2.) / (2 * np.power(var, 2.)))",
"def normal_x(self) -> float:\n pass",
"def profile_swath(data, val, npix=3, ax=0):\n nrow, ncol = data.shape\n fig = plt.figure(figsize=(11,8.5))\n #im = plt.imshow(data, cmap=plt.cm.jet)\n\n if ax == 1:\n plt.axvline(color='gray',linestyle='dashed')\n data = data[:,val-npix:val+npix]\n else:\n plt.axhline(color='gray',linestyle='dashed')\n data = data[val-npix:val+npix,:]\n\n swath_mean = np.mean(data, axis=ax)\n #swath_median = np.median(data, axis=ax)\n swath_std = np.std(data, axis=ax)\n\n ax = fig.add_subplot(111)\n ax.plot(swath_mean, 'k-', lw=2, label='mean')\n ax.plot(swath_mean + swath_std, 'k--', label='+/- 1std')\n ax.plot(swath_mean - swath_std, 'k--')\n\n plt.show()\n\n return swath_mean",
"def standard_2dnormal(x, y, _sigma):\n return np.exp(-0.5 / _sigma ** 2 * (x ** 2 + y ** 2)) / (2 * np.pi * _sigma ** 2)",
"def plot_gmm_solution(X, mu, sigma, title='', ax=None):\n ps2_test = False\n if ax is None:\n ps2_test = True\n fig, ax = plt.subplots(figsize=(6, 6))\n ax.scatter(X[:, 0], X[:, 1], s=50, c='tab:blue')\n # ax.scatter(mu[:, 0].A1, mu[:, 1].A1, c='r', s=150, marker='x',lw=2)\n ax.scatter(np.ravel(mu[:, 0]), np.ravel(mu[:, 1]), c='r', s=150, marker='x', lw=2)\n t = np.linspace(0, 2 * np.pi, 100)\n for i in range(np.shape(mu)[0]):\n u = mu[i, 0] # x-position center\n v = mu[i, 1] # y-position center\n\n p = .9\n s = -2 * np.log(1 - p)\n #print(sigma)\n D, V = np.linalg.eig(sigma[i] * s)\n a = (V * np.sqrt(D)) @ [np.cos(t), np.sin(t)]\n ax.plot(a[0, :] + u, a[1, :] + v, c='g', lw=2)\n\n ax.set_title(title)\n ax.grid(color='lightgray', linestyle='--')\n custom_lines = [Line2D([0], [0], color='tab:blue', lw=1, marker='o'),\n Line2D([0], [0], color='g', lw=4),\n Line2D([0], [0], color='r', lw=1, marker='x')]\n ax.legend(custom_lines, ['Data points', 'GMM Covariance', 'Mean vectors'])\n if ps2_test:\n plt.show()",
"def gaussian(self, mu, sigma, x):\n return np.exp(- ((mu - x) ** 2) / (sigma ** 2) / 2.0) / np.sqrt(2.0 * np.pi * (sigma ** 2))",
"def standard_units(nums):\n \n return (nums - np.mean(nums))/np.std(nums)",
"def std(self) -> float:\n return math.sqrt(self.var())",
"def stddev(self):\n m = self.mean()\n n = np.sum(self.counts)\n dx = self.axis().center - m \n return np.sqrt(np.sum(self.counts*dx**2)/n)",
"def getSTD(self):\r\n return np.std(self.members)",
"def std_error(self, x, y):\n std, _, binnum = binned_statistic(x, y, statistic='std', bins=self.bin_edges)\n num_points = np.array([len(binnum[binnum==i+1]) for i in range(len(self))])\n\n return std / np.sqrt(num_points)",
"def gaussian(x, mu, sigma):\r\n return exp(- ((x - mu) ** 2) / (2 * (sigma ** 2)))",
"def gauss(sigma):\n\n return Gx, x",
"def MSTD(X , m , M , step , n_runs , max_iter = 2000 , n_jobs = -1):\n fig, ax = plt.subplots(1 , 2 , figsize = (20 , 7))\n mean = []\n for i in tqdm(range(m , M+step , step)):\n #for i in range(m , M+step , step): #uncomment if you don't want to use tqdm (and comment the line above !)\n s = StabilizedICA(i , max_iter ,n_jobs)\n Index,*_ = s.fit(X , n_runs)\n mean.append(np.mean(Index))\n ax[0].plot(range(1 , len(Index)+1) , Index , 'k')\n \n ax[1].plot(range(m , M+step , step) , mean) \n \n ax[1].set_title(\"Mean stability\")\n ax[1].set_xlabel(\"Number of components\")\n ax[0].set_title(\"Index stability distribution\")\n ax[0].set_xlabel(\"Number of components\") \n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This should return a button with appropriate classes for an action in a given context this will typiclly be called by a get_buttons function which will take call get actions to get the actions and then make
|
def make_button(action, id, context='std', rectype='quest', eventid=0, questid=0):
# Below is result for call to link question to event
session = current.session
stdclass = "btn btn-primary btn-xs btn-group-xs"
warnclass = "btn btn-warning btn-xs btn-group-xs"
successclass = "btn btn-success btn-xs btn-group-xs"
if action == 'Other':
stringlink = XML("ajax('" + URL('viewquest', 'agree', args=[id, 1]) + "' , ['quest'], ':eval')")
buttonhtml = TAG.INPUT(_TYPE='BUTTON', _class="btn btn-success btn-xs btn-group-xs", _onclick=stringlink, _VALUE="Review")
elif action == 'Disagree':
stringlink = XML("ajax('" + URL('viewquest', 'agree', args=[id, 2]) + "' , ['quest'], ':eval')")
buttonhtml = TAG.INPUT(_TYPE='BUTTON', _class="btn btn-danger btn-xs btn-group-xs", _onclick=stringlink, _VALUE="Disagree")
elif action == 'Approve':
stringlink = XML("ajax('" + URL('answer','quickanswer', args=[id, 0]) + "', ['quest'], ':eval')")
buttonhtml = TAG.INPUT(_TYPE='BUTTON', _class="btn btn-success btn-xs btn-group-xs", _onclick=stringlink, _VALUE="Approve")
elif action == 'Not Review':
stringlink = XML("ajax('" + URL('answer', 'quickanswer', args=[id, 1]) + "', ['quest'], ':eval')")
buttonhtml = TAG.INPUT(_TYPE='BUTTON', _class="btn btn-danger btn-xs btn-group-xs", _onclick=stringlink, _VALUE="Disapprove")
elif action == 'Review':
stringlink = XML(
"parent.location='" + URL('answer', 'answer_question', args=[id], extension='html') + "'")
buttonhtml = TAG.INPUT(_TYPE='BUTTON', _class=stdclass, _onclick=stringlink, _VALUE="Review")
elif action == 'Create_Action':
stringlink = XML("parent.location='" + URL('submit', 'new_question', args=['action'], extension='html') + "'")
buttonhtml = TAG.INPUT(_TYPE='BUTTON', _class=stdclass, _onclick=stringlink, _VALUE="Create Action")
else:
buttonhtml = XML("<p>Button not setup</p>")
return buttonhtml
|
[
"def translateButtonsFromKupu(self, context, buttons):\n return_buttons = []\n\n for button in buttons:\n if button == 'save-button':\n try:\n if not context.checkCreationFlag():\n return_buttons.append('save')\n except AttributeError:\n pass\n elif button == 'bg-basicmarkup':\n pass\n elif button == 'bold-button':\n return_buttons.append('bold')\n elif button == 'italic-button':\n return_buttons.append('italic')\n elif button == 'bg-supsuper-button':\n pass\n elif button == 'subscript':\n return_buttons.append('sub')\n elif button == 'supscript':\n return_buttons.append('sup')\n elif button == 'bg-colorchooser':\n pass\n elif button == 'forecolor-button':\n return_buttons.append('forecolor')\n elif button == 'hilitecolor-button':\n return_buttons.append('backcolor')\n elif button == 'bg-justify':\n pass\n elif button == 'justifyleft-button':\n return_buttons.append('justifyleft')\n elif button == 'justifycenter-button':\n return_buttons.append('justifycenter')\n elif button == 'justifyright-button':\n return_buttons.append('justifyright')\n elif button == 'bg-list':\n pass\n elif button == 'list-ol-addbutton':\n return_buttons.append('numlist')\n elif button == 'list-ul-addbutton':\n return_buttons.append('bullist')\n elif button == 'definitionlist':\n pass\n elif button == 'bg-indent':\n pass\n elif button == 'outdent-button':\n return_buttons.append('outdent')\n elif button == 'indent-button':\n return_buttons.append('indent')\n elif button == 'bg-drawers':\n pass\n elif button == 'imagelibdrawer-button':\n return_buttons.append('image')\n elif button == 'linklibdrawer-button' or button == 'linkdrawer-button' or button == 'anchors-button':\n if 'link' not in return_buttons:\n return_buttons.append('link')\n elif button == 'embed-tab':\n return_buttons.append('media')\n elif button == 'manage-anchors-tab':\n return_buttons.append('anchor')\n elif button == 'toc-tab':\n pass\n elif button == 'tabledrawer-button':\n return_buttons.append('tablecontrols')\n elif button == 'bg-remove':\n pass\n elif button == 'removeimage-button':\n pass\n elif button == 'removelink-button':\n return_buttons.append('unlink')\n elif button == 'bg-undo':\n pass\n elif button == 'undo-button':\n return_buttons.append('undo')\n elif button == 'redo-button':\n return_buttons.append('redo')\n elif button == 'spellchecker':\n return_buttons.append('iespell')\n elif button == 'source':\n return_buttons.append('code')\n elif button == 'styles' or button == 'ulstyles' or button == 'olstyles':\n if 'style' not in return_buttons:\n return_buttons.append('style')\n elif button == 'zoom':\n return_buttons.append('fullscreen')\n else:\n if button not in return_buttons:\n return_buttons.append(button)\n return return_buttons",
"def _get_action_frame(self):\n return self.ActionFrame(self.master)",
"def actions(self):\n isinst = isinstance\n return [c.widget() for c in self.children() if isinst(c, WxAction)]",
"def workbenchButtons(workbench):\n clearList(menuList)\n clearList(buttonList)\n\n g = None\n uid = None\n actions = cpc.actionList()\n base = p.GetGroup(\"User\").GetGroup(workbench)\n cpc.defaultGroup(base)\n if base.GetBool(\"default\", 0):\n uid = base.GetString(\"default\")\n g = cpc.findGroup(base, uid)\n if g:\n commands = g.GetString(\"commands\")\n if commands:\n commands = commands.split(\",\")\n commands = menuCommands(base, commands)\n else:\n commands = []\n for cmd in commands:\n btn = buttonFactory()\n if cmd.startswith(\"CP_Collapse_\"):\n a = QtGui.QAction(btn)\n try:\n gUid = cmd.split(\"CP_Collapse_\", 1)[1]\n except IndexError:\n gUid = \"No_UID\"\n data = \",\".join([workbench, gUid, str(0)])\n a.setData(data)\n a.setText(\"Collapse\")\n a.setIcon(QtGui.QIcon(path + \"CommandPanelCollapse.svg\"))\n a.setToolTip(\"Collapse menu\")\n btn.setDefaultAction(a)\n mapperExpandCollapse.setMapping(btn, data)\n btn.clicked.connect(mapperExpandCollapse.map)\n elif cmd == \"CP_Separator\":\n btn.setEnabled(False)\n btn.setObjectName(\"CP_Separator\")\n elif cmd == \"CP_Spacer\":\n btn.setEnabled(False)\n btn.setObjectName(\"CP_Spacer\")\n elif cmd == \"CP_Menu\":\n menu = QtGui.QMenu()\n btn.setMenu(menu)\n btn.setIcon(QtGui.QIcon(\":/icons/freecad\"))\n # Themes support\n btn.setObjectName(\"qt_toolbutton_menubutton\")\n btn.setPopupMode(QtGui.QToolButton\n .ToolButtonPopupMode.MenuButtonPopup)\n btn.setToolTip(\"Empty menu\")\n elif cmd.startswith(\"CP_Menu_\"):\n menu = menuButton(workbench, base, cmd, btn, actions)\n btn.setMenu(menu)\n # Theme support\n btn.setObjectName(\"qt_toolbutton_menubutton\")\n btn.setPopupMode(QtGui.QToolButton\n .ToolButtonPopupMode.MenuButtonPopup)\n elif cmd in actions:\n btn.setDefaultAction(actions[cmd])\n if btn.icon().isNull():\n btn.setIcon(QtGui.QIcon(\":/icons/freecad\"))\n else:\n btn.setEnabled(False)\n btn.setToolTip(\"Command \" +\n cmd +\n \" is currently not available\")\n btn.setIcon(QtGui.QIcon(\":/icons/freecad\"))\n\n if p.GetString(\"Layout\") == \"Grid\" and btn.objectName() == \"CP_Spacer\":\n pass\n else:\n buttonList.append(btn)\n\n for m in menuList:\n m.triggered.connect(onMenuTriggered)\n\n return buttonList",
"def getAction( self, pointclicked ):\n if pointclicked is None:\n return \"None\"\n for i in range( len( self.buttons.getButtons() ) ):\n if self.buttons.getButtons()[i].clicked( pointclicked ):\n return self.buttons.getButtons()[i].getLabel()",
"def _create_buttons(self, share_button, move_buttons, jump_button, \n top_label):\n if top_label:\n self.top_label = Gtk.Label(label=top_label)\n self.top_label.set_use_markup(True)\n self.track_ref_for_deletion(\"top_label\")\n\n self.add_btn = SimpleButton(Gtk.STOCK_ADD, self.add_button_clicked)\n self.edit_btn = SimpleButton(Gtk.STOCK_EDIT, self.edit_button_clicked)\n self.del_btn = SimpleButton(Gtk.STOCK_REMOVE, self.del_button_clicked)\n self.track_ref_for_deletion(\"add_btn\")\n self.track_ref_for_deletion(\"edit_btn\")\n self.track_ref_for_deletion(\"del_btn\")\n\n self.add_btn.set_tooltip_text(self._MSG['add'])\n self.edit_btn.set_tooltip_text(self._MSG['edit'])\n self.del_btn.set_tooltip_text(self._MSG['del'])\n \n if share_button:\n self.share_btn = SimpleButton(Gtk.STOCK_INDEX, self.share_button_clicked)\n self.share_btn.set_tooltip_text(self._MSG['share'])\n self.track_ref_for_deletion(\"share_btn\")\n else:\n self.share_btn = None\n \n if move_buttons:\n self.up_btn = SimpleButton(Gtk.STOCK_GO_UP, self.up_button_clicked)\n self.up_btn.set_tooltip_text(self._MSG['up'])\n self.down_btn = SimpleButton(Gtk.STOCK_GO_DOWN, \n self.down_button_clicked)\n self.down_btn.set_tooltip_text(self._MSG['down'])\n self.track_ref_for_deletion(\"up_btn\")\n self.track_ref_for_deletion(\"down_btn\")\n else:\n self.up_btn = None\n self.down_btn = None\n\n if jump_button:\n self.jump_btn = SimpleButton(Gtk.STOCK_JUMP_TO, self.jump_button_clicked)\n self.track_ref_for_deletion(\"jump_btn\")\n self.jump_btn.set_tooltip_text(self._MSG['jump'])\n else:\n self.jump_btn = None\n\n hbox = Gtk.HBox()\n hbox.set_spacing(6)\n if top_label:\n hbox.pack_start(self.top_label, False, True, 0)\n hbox.pack_start(self.add_btn, False, True, 0)\n if share_button:\n hbox.pack_start(self.share_btn, False, True, 0)\n hbox.pack_start(self.edit_btn, False, True, 0)\n hbox.pack_start(self.del_btn, False, True, 0)\n if move_buttons:\n hbox.pack_start(self.up_btn, False, True, 0)\n hbox.pack_start(self.down_btn, False, True, 0)\n\n if self.jump_btn:\n hbox.pack_start(self.jump_btn, False, True, 0)\n hbox.show_all()\n self.pack_start(hbox, False, True, 0)\n\n if self.dbstate.db.readonly:\n self.add_btn.set_sensitive(False)\n self.del_btn.set_sensitive(False)\n if share_button:\n self.share_btn.set_sensitive(False)\n if jump_button and self.jump_btn:\n self.jump_btn.set_sensitive(False)\n if move_buttons:\n self.up_btn.set_sensitive(False)\n self.down_btn.set_sensitive(False)",
"def button_views(category=None, show_disabled=False):",
"def create_multi_action(self):\n return MultiAction(self._selenium_web_driver())",
"def actions_to_control(self, actions):\n raise NotImplementedError",
"def CreateButtons(self):\r\n \r\n # Build a couple of fancy and useless buttons \r\n okBmp = self.MainFrame.CreateBitmap(\"ok\")\r\n cancelBmp = self.MainFrame.CreateBitmap(\"file_error\")\r\n self.okButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_OK, okBmp, \"Ok\")\r\n self.cancelButton = buttons.ThemedGenBitmapTextButton(self, wx.ID_CANCEL, cancelBmp, \"Cancel\")",
"def create_buttons(self):\n self.log.info(__name__ + ': ' + 'def ' + self.create_buttons.__name__ + '(): ' + self.create_buttons.__doc__)\n\n for index, phrase in enumerate(self.phrases['menu_buttons']):\n _x = self.offset[0]\n _y = self.offset[1] + (self.button_y + self.size_font * 2) * index\n text = Text(self.font_obj, phrase, _x, _y, Colors.ORANGE)\n self.buttons.append(Button(_x, _y, self.textures['button'], text))",
"def create_buttons(self):\n self.create_button(\"ADD\", self.add_contact)\n self.create_button(\"EDIT\", self.edit, y=260)\n self.create_button(\"DELETE\", self.delete, y=210)\n self.create_button(\"VIEW\", self.view, y=160)\n self.create_button(\"EXIT\", self.exit_book, bg='tomato', x=300, y=320)\n self.create_button(\"RESET\", self.reset, y=310)",
"def get_by_type(cls, context, action_type):\n\n db_action = cls.dbapi.get_action_description_by_type(\n context, action_type)\n action = cls._from_db_object(cls(context), db_action)\n return action",
"def buttons(self, state):\n pass",
"def as_html(self):\n tag = 'div'\n attrs = {'class': 'pmd-card-actions'}\n content = ''\n for btn in self.items:\n if isinstance(btn, Button):\n content = text_concat(content, mark_safe(btn.as_html()))\n return render_tag(tag, attrs=attrs, content=mark_safe(content), )",
"def select_template_action(self,name,action):\n locator=npsp_lex_locators[\"gift_entry\"][\"actions_dropdown\"].format(name)\n self.selenium.click_element(locator)\n element=self.selenium.get_webelement(locator)\n status=element.get_attribute(\"aria-expanded\")\n if status==\"false\":\n self.selenium.wait_until_page_contains(\"Clone\") \n self.selenium.click_link(action)\n if action==\"Edit\" or action==\"Clone\":\n self.selenium.wait_until_page_contains(\"Gift Entry Template Information\")\n elif action==\"Delete\":\n self.selenium.wait_until_page_does_not_contain(name)",
"def get_actions_toolbar(self):\n return []",
"def _create_command_menu(self):\n f1 = urwid.Button('Jump', on_press=self.button_show_jump)\n f2 = urwid.Button('Sell', on_press=self.button_show_sell)\n f3 = urwid.Button('Buy', on_press=self.button_show_buy)\n f4 = urwid.Button('Upgrade', on_press=self.button_show_equip)\n f5 = urwid.Button('Galaxy', on_press=self.button_show_galaxy)\n f6 = urwid.Button('Locals', on_press=self.button_show_locals)\n f7 = urwid.Button('System', on_press=self.button_show_planet_info)\n f8 = urwid.Button('Market', on_press=self.button_show_market)\n f9 = urwid.Button('Status', on_press=self.button_show_status)\n f0 = urwid.Button('Cargo', on_press=self.button_show_cargo)\n buttons = [f1, f2, f3, f4, f5, f6, f7, f8, f9, f0]\n buttons = (urwid.AttrMap(b, 'button') for b in buttons)\n menu = urwid.Columns(buttons)\n menu.focus_position = 8\n return menu",
"def action_buttons(node: pipelines.Node):\n path = node.path()\n return [\n response.ActionButton(\n action=flask.url_for('mara_pipelines.run_page', path='/'.join(path[:-1]),\n with_upstreams=True, ids=path[-1]),\n label='Run with upstreams', icon='play',\n title=f'Run the task and all its upstreams in the pipeline \"{node.parent.id}\"'),\n response.ActionButton(\n action=flask.url_for('mara_pipelines.run_page', path='/'.join(path[:-1]),\n with_upstreams=False, ids=path[-1]),\n label='Run', icon='play',\n title=f'Run only this task, without upstreams')]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the points xs,ys with labels drawn on each point
|
def plot(self, xs, ys, labels, colours=None):
plt.scatter(xs, ys, c=colours)
if labels is not None:
for label, x, y in zip(labels, xs, ys):
plt.annotate(
label,
xy=(x, y), xytext=(-30, 30),
textcoords='offset points', ha='right', va='bottom',
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'))
self.cl += 1
|
[
"def plot(X,Ys,labels,xlabel=\"\",ylabel=\"\",title=\"\"):\n for Y,label in zip(Ys,labels):\n plt.plot(X,Y,label=label)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.suptitle(title)\n plt.legend()\n plt.show()",
"def draw_points(self, pts_x, pts_y):\n pylab.clf()\n pylab.plot(pts_x, [1-y for y in pts_y], marker='o', color='r', ls='')\n pylab.xlim(-.05, 1.05)\n pylab.ylim(-.05, 1.05)\n pylab.axis('off')\n pylab.savefig(os.path.join(self.work_dir, 'points.png'),\n bbox_inches='tight')",
"def plot_pts(pts, pts_labels=None, plus1_format='co', minus1_format='yo',\n other_format='ko'):\n\n if pts_labels is None:\n plot(pts[:,1], pts[:,2], other_format)\n\n else:\n for i in xrange(len(pts_labels)):\n if pts_labels[i] == 1:\n plot(pts[i,1], pts[i,2], plus1_format)\n elif pts_labels[i] == -1:\n plot(pts[i,1], pts[i,2], minus1_format)\n else:\n plot(pts[i,1], pts[i,2], other_format)",
"def plot_points(points: list, path: list) -> None:\n x = [x[0] for x in points]\n y = [y[1] for y in points]\n\n plot.plot(x, y, '.', color='black') \n\n path_points = []\n for v in path:\n path_points.append(points[v])\n plot.text(points[v][0], points[v][1], str(v + 1), fontsize=8)\n\n data = np.array(path_points)\n plot.plot(data[:, 0], data[:, 1])\n\n plot.show()",
"def plot_point(points: Union[Point, List[Point]], display=True):\n if isinstance(points, Point):\n points = [points]\n\n for point in points:\n plt.plot(point.x, point.y, \"-o\")\n plt.axes().set_aspect(\"equal\", \"datalim\")\n if display:\n plt.show()",
"def visualize(X, Y):\n plt.plot(X, Y, \"bx\")\n plt.show()",
"def show(self):\n self._ax.coords[self.x].set_axislabel_position('b')\n self._ax.coords[self.y].set_axislabel_position('l')",
"def plotPoints(ptList, x1,x2,y1,y2):\r\n w = GraphWin(\"Point plot\", 500, 500)\r\n w.setCoords(x1, y1, x2, y2)\r\n xAxis = Line(Point(0,y1), Point(0,y2))\r\n xAxis.draw(w)\r\n yAxis = Line(Point(x1,0), Point(x2,0))\r\n yAxis.draw(w)\r\n r = (y2-y1)/100.0 #a radius for the point depending on the size of window\r\n for (x,y) in ptList:\r\n p = Point(x,y)\r\n c = Circle(p,r)\r\n c.setFill(\"blue\")\r\n c.draw(w)\r\n time.sleep(.5)\r\n p1 = w.getMouse()\r\n w.close()",
"def plot_positions(self):\n colors = ['k', 'b', 'g', 'r', 'm']\n positions = [self.tx_coords] + self.updates.get_rx_positions()\n for i in range(len(positions)):\n plt.plot(positions[i].long, positions[i].lat, 'x' + colors[i])\n\n # Get the actual target coords (sent from the Rxs for plotting).\n actual_target_coords = self.updates.get_actual_target_coords()\n plt.plot(actual_target_coords.long, actual_target_coords.lat, 'oc')\n\n # Call pause to render the changes.\n plt.pause(0.0000001)",
"def visual_graph(self, point_list):\n x = []\n y = []\n # # print len(e)\n for index in point_list:\n for i in index:\n x.append(i[0])\n y.append(i[2])\n plt.scatter(x, y, label=\"stars\", color=\"green\",\n marker=\"*\", s=50)\n plt.plot(x, y)\n plt.legend()\n plt.show()",
"def plotPoints(a):\n n = len(a)\n stddraw.setXscale(-1, n)\n stddraw.setPenRadius(1.0 / (3.0 * n))\n for i in range(n):\n stddraw.point(i, a[i])",
"def plot_points(coordAmp):\n xValues = coordAmp.loc[:, 'xPos 1'::8]\n yValues = coordAmp.loc[:, 'yPos 1'::8]\n plt.scatter(xValues, yValues)\n plt.show()",
"def plot_generated_data_points(features: np.array, targets: np.array) -> None:\n d2l.set_figsize((3.5, 2.5))\n d2l.plt.scatter(features[:, 1].asnumpy(), targets.asnumpy(), 1)\n d2l.plt.savefig(\"generated_data\")",
"def _plot_points(self, tags: bool = False):\n for point in self._data:\n if tags:\n tag_dot(self._canvas, *self._get_px(point), tag=str(point), radius=DOT_RADIUS_PX)\n else:\n dot(self._canvas, *self._get_px(point), radius=DOT_RADIUS_PX)",
"def plot(data_dict, x_data, y_data):\n data = featureFormat(data_dict, [x_data, y_data, 'poi'])\n\n for value in data:\n x = value[0]\n y = value[1]\n poi = value[2]\n color = 'blue' if poi else 'grey'\n plt.scatter(x, y, color=color)\n plt.xlabel(x_data)\n plt.ylabel(y_data)\n plt.show()",
"def display_2D_scatter_plot(dataset, title, xlabel, ylabel, labels = None):\n \n plt.figure()\n plt.scatter(dataset[:,0], dataset[:,1], c = labels)\n plt.suptitle(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.show()",
"def create_scatter_plot(self):\n xy = self.get_x_and_y_as_dict()\n x = xy[\"x\"]\n y = xy[\"y\"]\n plt.scatter(x, y)\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.title(\"Scatter plot of x and y values\")\n plt.savefig(f\"{self.save_directory}/task_2_scatter_plot.png\")",
"def plot_labeled_lines(points, *args):\n # Draw points and label them with their index number\n plot_lines(points, 'bo')\n for (label, p) in enumerate(points):\n plt.text(p.x, p.y, ' '+str(label))\n # Draw lines indicated by args\n style = 'bo-'\n for arg in args:\n if isinstance(arg, str):\n style = arg\n else: # arg is a list of indexes into points, forming a line\n Xs = [points[i].x for i in arg]\n Ys = [points[i].y for i in arg]\n plt.plot(Xs, Ys, style)\n plt.axis('scaled'); plt.axis('off'); plt.show()",
"def plot(self): \n # Forwards plot\n self.__plot_vals(self.xin, self.yin, self.__eval_forward)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots the centroids of the cluster
|
def plot_cluster(self, centroids):
self.plot(centroids[:, 0], centroids[:, 1], labels=None, colours=['g'] * centroids.shape[1])
|
[
"def plotClusters(data_points, centroids, labels):\r\n plt.scatter(data_points[:, 0], data_points[:, 1], c=labels)\r\n for i, _ in enumerate(centroids):\r\n label = \"Centroid \" + str(i)\r\n colors = [\"red\", \"green\", \"blue\"]\r\n plt.scatter(centroids[i][0], centroids[i][1], s=50,\r\n c=colors[i], label=label)\r\n plt.legend(loc=\"best\", fancybox=True)\r\n fig = plt.gcf()\r\n plt.show()\r\n directory = \"../images\"\r\n if not os.path.isdir(directory):\r\n os.makedirs(directory)\r\n fig.savefig(\"../images/clusters.png\")",
"def _annotate_centers(self, ax):\n try:\n # Plot the centroids as a white X\n centroids = self.clusterer.cluster_centers_\n except AttributeError:\n return\n\n ax.scatter(centroids[:, 0], centroids[:, 1],\n marker='x', s=169, linewidths=3,\n color='k', zorder=10)\n for i in range(self.n_clusters):\n try:\n ax.annotate(str(i),\n (centroids[i, 0], centroids[i, 1]),\n fontsize=24, xytext=(-20, 0),\n textcoords='offset points')\n except:\n pass",
"def plot_centroids(self, title=\"Centroids\", **kwargs):\n\n # create plot and setup the plot\n with post.plotting_context(title=title, **kwargs) as (fig, ax):\n # plot the finite element mesh\n self.plot_mesh(**dict(kwargs, ax=ax))\n\n # if the elastic centroid has been calculated\n if self.section_props.cx is not None:\n ax.scatter(\n self.section_props.cx,\n self.section_props.cy,\n edgecolors=\"r\",\n facecolors=\"none\",\n marker=\"o\",\n s=100,\n label=\"Elastic centroid\",\n )\n\n # if the shear centre has been calculated\n if self.section_props.x_se is not None:\n (x_s, y_s) = self.get_sc()\n ax.scatter(x_s, y_s, c=\"r\", marker=\"+\", s=100, label=\"Shear centre\")\n\n # if the global plastic centroid has been calculated\n if self.section_props.x_pc is not None:\n (x_pc, y_pc) = self.get_pc()\n ax.scatter(\n x_pc,\n y_pc,\n c=\"r\",\n marker=\"x\",\n s=100,\n label=\"Global plastic centroid\",\n )\n\n # if the principal plastic centroid has been calculated\n if self.section_props.x11_pc is not None:\n (x11_pc, y22_pc) = self.get_pc_p()\n ax.scatter(\n x11_pc,\n y22_pc,\n edgecolors=\"r\",\n facecolors=\"none\",\n marker=\"s\",\n s=100,\n label=\"Principal plastic centroid\",\n )\n\n # if the principal axis has been calculated\n if self.section_props.phi is not None:\n post.draw_principal_axis(\n ax,\n self.section_props.phi * np.pi / 180,\n self.section_props.cx,\n self.section_props.cy,\n )\n\n # display the legend\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n\n return ax",
"def plot_cluster_map(self, color_scheme='random'):\r\n plt.figure()\r\n plt.title('Cluster Map')\r\n if color_scheme=='random':\r\n #Randomly generate a color for each cluster. Usually provides more visual contrast between clusters.\r\n # np.random.seed(68454)\r\n colors = np.random.rand(self.cluster_model.n_clusters,3).astype('float32')\r\n color_map = colors[self.cluster_map.reshape(-1)].reshape(*self.cluster_map.shape, 3)\r\n plt.imshow(color_map)\r\n elif color_scheme=='classification':\r\n colors = (pltcolors.to_rgba_array(self.cluster_colors)*255).astype('uint8')[:,0:3]\r\n color_map = colors[self.cluster_map.reshape(-1)].reshape(*self.cluster_map.shape,3)\r\n # color_map = colors[self.cluster_map.reshape(-1)].reshape(*self.cluster_map.shape)\r\n # color_map = pltcolors.to_rgba_array(color_map)\r\n plt.imshow(color_map)\r\n elif color_scheme=='heatmap':\r\n #Simply use the cluster indexes as heatmap values for an easy way to make an image.\r\n plt.imshow(self.cluster_map)\r\n else:\r\n print('Invalid color scheme.')",
"def plot_clusters(self, clustered_data, file_path='visualization/clusters.png'):\n number_of_clusters = nx.number_connected_components(self.network)\n plt.clf()\n plt.title('Cluster affectation')\n color = ['r', 'b', 'g', 'k', 'm', 'r', 'b', 'g', 'k', 'm']\n for i in range(number_of_clusters):\n observations = []\n for observation, s in clustered_data:\n if s.any() == i:\n observations.append(observation)\n #observations = [observation for observation, s in clustered_data if s == i]\n if len(observations) > 0:\n observations = np.array(observations)\n plt.scatter(observations[:, 0], observations[:, 1], color=color[i], label='cluster #'+str(i))\n plt.legend()\n plt.savefig(file_path)",
"def plot_centers(self, _class):\n ins = self.instance_matrix[_class,:,:]\n import open3d as o3d\n pcd = o3d.geometry.PointCloud()\n c = ins[ins != self.no_instance].reshape((-1, 3))\n if c.shape[0] == 1:\n c = np.vstack([np.array([0,0,0]), c])\n\n pcd.points = o3d.utility.Vector3dVector(c)\n o3d.visualization.draw_geometries([pcd])",
"def gen_cluster_plot(x,y,clusters):\n fig = plt.figure()\n ax = plt.subplot(111)\n ax.scatter(x,y,s=5,c=clusters, linewidths = 0)\n ax.axis('equal')\n plt.show()\n plt.close()\n return",
"def plot_kmeans_clustering(x, y, titles_list, sc_output,\n output_file_name):\n fig= plt.figure(figsize=(15, 4))\n for i in range(3): \n plt.subplot(1, 3, i+1)\n plt.tight_layout()\n plt.scatter(x,y,c=sc_output[i].labels_)\n plt.xlabel(\"X\")\n plt.ylabel(\"Y\")\n plt.title(titles_list[i])\n plt.savefig(output_file_name) \n plt.show()",
"def visualize_cluster2D(self, xlabel=None, ylabel=None, titre=\"Clustering\"):\n if xlabel is None:\n xlabel = self.df.columns[0]\n\n if ylabel is None:\n ylabel = self.df.columns[1]\n color = ['0.75', 'b', 'g', 'r', 'c', 'm', '0.75', 'y', 'k', '0.45', 'b', 'g', 'r', 'c', 'm', '0.75', 'y',\n 'k']\n for i in self.clusters:\n x = self.df[self.df['cluster'] == i][xlabel]\n y = self.df[self.df['cluster'] == i][ylabel]\n pyplot.xlabel(xlabel)\n pyplot.ylabel(ylabel)\n pyplot.title(titre)\n pyplot.plot(x, y, 'o', c=color[i], markersize=6)\n pyplot.show()",
"def visualization_clusters(self, cmethod):\r\n \r\n # colors used to label data points\r\n colors = [\"red\",\"blue\",\"yellow\"]\r\n \r\n legends = [None, None, None]\r\n \r\n fig = plt.figure(cmethod)\r\n ax_cluster = Axes3D(fig)\r\n \r\n if (cmethod == self._kmeans):\r\n labels = self.kmeans.labels_\r\n smethod = \"kmeans\"\r\n elif (cmethod == self._spectral):\r\n labels = self.spectral.labels_\r\n smethod = \"Spectral clustering\"\r\n else:\r\n labels = self.hac.labels_\r\n smethod = \"HAC\"\r\n \r\n for i,v in enumerate(self.val_hist):\r\n # select cluster color/label\r\n l = labels[i]\r\n \r\n if (l == 0):\r\n cls = 'class 1'\r\n elif (l == 1):\r\n cls = 'class 2'\r\n else:\r\n cls = 'class 3'\r\n \r\n if (legends[l] == None):\r\n ax_cluster.scatter(v[0], v[1], v[2], marker='o', color=colors[l], s=20, label=cls)\r\n legends[l] = l\r\n else:\r\n ax_cluster.scatter(v[0], v[1], v[2], marker='o', color=colors[l], s=20)\r\n \r\n ax_cluster.text(v[0], v[1], v[2], '{0}'.format(i), size=5, zorder=1, color='k') \r\n \r\n ax_cluster.set_title(\"{0}\".format(smethod)) \r\n ax_cluster.set_xlabel('X')\r\n ax_cluster.set_ylabel('Y')\r\n ax_cluster.set_zlabel('Z')\r\n ax_cluster.legend()",
"def plot_clusters(clusters, tjcs, title, dims): \n wm = plt.get_current_fig_manager()\n wm.window.wm_geometry(dims)\n plt.axis([0,16,0,12])\n plt.xlabel('X (mts)')\n plt.ylabel('Y (mts)')\n plt.title(title)\n\n clusterLenght = clusters.shape[1]\n palette = generate_palette(clusterLenght)\n clustersTot = np.sum(clusters)\n for n, traj in enumerate(tjcs):\n traj = np.array(traj)\n c = np.array( [0., 0., 0.] )\n if np.sum(clusters[n, :]) / clustersTot > 1E-4:\n clusterTot = np.sum(clusters[n, :])\n for m in xrange(clusterLenght):\n c += palette[m] * clusters[n, m]/clusterTot\n plt.plot(traj[:,0], traj[:,1], \"-\", color = c)\n else:\n c = np.array([0.8, 0.8, 0.8])\n plt.plot(traj[:,0], traj[:,1], \"-\", color = c, alpha = 0.3)",
"def plot_k(self, k, xlabel=\"Feature 1\", ylabel=\"Feature 2\"):\n fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True, figsize=(12,9))\n axes = [ax1, ax2, ax3, ax4, ax5, ax6]\n\n for i in range(2, k+1):\n kmeans = Kmeans(i)\n kmeans.fit(self._data)\n\n cycol = cycle('bgrcmk')\n\n # Plot points belonging to clusters\n for j in range(kmeans._k):\n cluster_i_inds = kmeans._groups[kmeans._groups[:, 2] == j]\n axes[i-2].scatter(cluster_i_inds[:, 0], cluster_i_inds[:, 1], c=next(cycol), s=8)\n\n # Plot cluster centres\n for j in range(i):\n axes[i-2].scatter(kmeans._centroids[:, 0], kmeans._centroids[:, 1], c='w', marker='x', s=100)\n\n axes[i-2].set_title(f'K-means clustering with {i} clusters')\n axes[i-2].set_xlabel(xlabel)\n axes[i-2].set_ylabel(ylabel)\n\n plt.savefig(f\"./plots/clustering_results/kmeans_clusters_2-{k}_plots.pdf\")\n plt.show()\n plt.close()",
"def visualize(kmeans, x,y, title):\n plt.scatter(x, y, s=10, c=kmeans.labels_)\n plt.title(title)\n plt.show()",
"def _update_cluster_center(self):\n # Update the center of each cluster if there are points into it\n for cluster in self._clusters:\n\n # Get the number of points into this cluster\n nb_points = len(cluster.points)\n if nb_points > 0:\n\n # Update the way of getting sums and centers for 3D points\n\n # Add all x and y values of each point of this cluster\n x_sum, y_sum = 0, 0\n for point in cluster.points:\n x_sum += point.x\n y_sum += point.y\n\n # Reassign the center of this cluster by getting the mean\n cluster.center.x = x_sum / nb_points\n cluster.center.y = y_sum / nb_points\n\n # DEBUG: Display the new centers approximations\n # print(\n # 'center.x=%s and center.y=%s' %\n # (cluster.center.x, cluster.center.y)\n # )",
"def get_cluster_centers(self):\n pass",
"def plotProgresskMeans(X, centroids, previous, idx, K, i):\n util.plotDataPoints(X, idx)\n plt.plot(centroids[:, 0], centroids[:, 1], 'kx')\n for j in range(len(centroids)):\n # plt.plot([centroids[j, 0], previous[j, 0]],\n # [centroids[j, 1], previous[j, 1]], 'k')\n util.drawLine(centroids[j, :], previous[j, :], 'k')\n plt.title('Iteration number %d' % (i+1))",
"def draw_centers_hypercube(num_clusters, dim, min_sep):\n X = []\n p = 4 * (np.random.rand(dim) - 0.5)\n X.append(p)\n counter = 0\n for i1 in range(num_clusters - 1):\n min_sep_p = min_sep - 1\n while min_sep_p < min_sep:\n p = 4 * (np.random.rand(dim) - 0.5)\n min_sep_p = 100000 # Just a very large number...\n for x in X:\n sep = norm(np.array(x) - p)\n min_sep_p = min(min_sep_p, sep)\n counter = counter + 1\n X.append(p)\n X = np.array(X)\n # print(\"minimum cluster separation allowed: \" + str(min_sep))\n from scipy.spatial.distance import pdist\n # print(\"minimum cluster separation generated: \" + str(np.min(pdist(X))))\n return np.array(X)",
"def k_means(data):\n kmeans = KMeans(init='k-means++', n_clusters=n_clusters_, n_init=10)\n output = kmeans.fit(data)\n plt.subplot(1,1,1)\n plt.title('Clusters identified using K-means: %d' % n_clusters_)\n plt.scatter(data[:, 0], data[:, 1], c=output.labels_)\n plt.show()",
"def plot_mean_images(numbers, clusters,data):\n\n fig = plt.figure(figsize=(10,8))\n A = []\n for i in range(1,len(numbers)):\n A.append(fig.add_subplot(520+i))\n A.append(fig.add_subplot(5,2,10))\n\n for i,a in enumerate(A):\n a.imshow(compute_mean_image(i,clusters,data),cmap='gray')\n a.set_title(numbers[i])\n fig.suptitle(\"Mean image of each cluster\")\n plt.show()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plots documents on the maps (similar documents clustered together
|
def plot_documents(self, svd, names, doc_clusters, no_clusters):
u, vt = svd
pts = vt
# each cluster gets a different colour
colormap = plt.get_cmap("hsv")
norm = matplotlib.colors.Normalize(vmin=0, vmax=no_clusters)
scalarMap = matplotlib.cm.ScalarMappable(cmap=colormap, norm=norm)
self.plot(pts[1], pts[2], names, colours=[scalarMap.to_rgba(i) for i in doc_clusters])
|
[
"def plot_1d_all(self, map_data):\r\n import plotly\r\n import plotly.graph_objs as go\r\n import numpy as np\r\n\r\n nx = self.reservoir.nx\r\n nc = self.physics.n_components\r\n\r\n data = []\r\n for i in range(nc - 1):\r\n data.append(go.Scatter(x=np.linspace(0, 1, nx), y=map_data[i + 1::nc][1:nx], dash='dash'))\r\n\r\n plotly.offline.plot(data, filename='Compositions.html')",
"def plot_cloud(words_dist,size):\n mpl.rcParams['figure.figsize'] = (10.0, 10.0)\n mpl.rcParams['font.size'] = 12\n mpl.rcParams['savefig.dpi'] = 300\n mpl.rcParams['figure.subplot.bottom'] = .1\n \n wordcloud = WordCloud(width=1600, height=800,\n background_color='black').generate_from_frequencies(words_dist)\n \n fig = plt.figure(figsize=size, facecolor='k')\n plt.imshow(wordcloud)\n plt.axis('off')\n #plt.title(title, fontsize=50, color='y')\n plt.tight_layout()\n # plt.savefig('{}.png'.format(title), format='png', dpi=300)\n plt.show()",
"def visualize(data_dict, feature_x, feature_y):\n import matplotlib.pyplot as plt\n data = featureFormat(data_dict, [feature_x, feature_y, 'poi'], remove_all_zeroes=False)\n\n for datum in data:\n x = datum[0]\n y = datum[1]\n poi = datum[2]\n color = 'blue' if not poi else 'red'\n plt.scatter(x, y, color=color)\n plt.xlabel(feature_x)\n plt.ylabel(feature_y)\n #plt.show() \n plt.savefig('../final_project/plots/plot_{0}_{1}'.format(feature_x, feature_y)) # save the figure to file\n plt.close()",
"def plot_data(shard_dict, x_units, y_scale, show=False, append_to_title=\"\"):\n\n if not show:\n return\n\n for shard in shard_dict.values():\n plot_shard_data(shard, y_scale, x_units, append_to_title)",
"def _visualize():\n viz_words = len(model.wv.vectors)\n word_vector = model.wv.vectors\n tsne = sklearn.manifold.TSNE(n_components=2)\n embed_tsne = tsne.fit_transform(word_vector)\n fig, ax = plt.subplots(figsize=(6, 6))\n for i in range(viz_words):\n plt.scatter(*embed_tsne[i, :], s=2, alpha=0.6, color='b')\n plt.annotate(model.wv.index2word[i], (embed_tsne[i, 0], embed_tsne[i, 1]), alpha=0.6, fontsize=7)\n plt.savefig('tsne_zones_{}.png'.format(self.store_id))",
"def plot_map(self, words, total_count):\n for word in words[::-1]:\n labelsize = self.calc_label_size(word.count, total_count)\n if word.count < 1 or labelsize < self.min_font_size:\n continue\n (x, y) = self.adjust_label(int(labelsize), word.surface, word.x, word.y)\n logger.debug('%s %f %f %s' % (word.surface, word.x, word.y, labelsize))\n args = {'size': labelsize, 'color': 'white', 'ha': 'center', 'va': 'center',\n 'bbox': self.bbox, 'fontproperties': self.prop}\n plt.text(word.x, word.y, word.surface, **args)",
"def AtlasPlots(cf, p, atlas, m_array, EnergyHistory): \n\n fig = plt.figure(1)\n fig.patch.set_facecolor('white')\n\n TE = [sum(x) for x in EnergyHistory] \n VE = [row[0] for row in EnergyHistory] \n IE = [row[1] for row in EnergyHistory] \n\n plt.subplot(1,3,1)\n plt.plot(TE)\n plt.title('Total Energy')\n plt.hold(False)\n plt.subplot(1,3,2)\n plt.plot(VE)\n plt.title('Vector Energy')\n plt.hold(False)\n plt.subplot(1,3,3)\n plt.plot(IE)\n plt.title('Image Energy')\n plt.hold(False)\n plt.draw()\n plt.show()\n\n if cf.io.outputPrefix != None: \n energyFilename = cf.io.outputPrefix + \"Energy.pdf\"\n plt.savefig(energyFilename)",
"def add_images(self):\n occupancy = np.zeros(self.map.shape, dtype=float)\n self.occupancy_axes = show_map(occupancy, self.res, cmap=red_cm, ax=self.axes, zorder=11)\n # initialize plots with map\n map_ = self.map if self.plot_map else np.zeros_like(self.map)\n self.map_axes = show_map(map_, self.res, cmap=black_cm, ax=self.axes, zorder=12)\n if self.plot_seen:\n # add seen image\n self.seen_axes = show_map(occupancy, self.res, cmap=black_cm, alpha=0.2, ax=self.axes)",
"def plot_marked_region(self,boundary_parts, bulk_parts,plot_index_1=1,view_elev=0, view_azim=0):\n ax_1= self.axes[str(plot_index_1+1)]\n ax_1.view_init(view_elev , view_azim)\n ax_1.set_title('Mesh bulk', fontsize=20)\n plot(bulk_parts)\n plt.show()\n plt.savefig('/test_'+str(0)+'.png', dpi=100)",
"def feature_maps(embeddings, labels):\n\n _, N = np.shape(embeddings)\n e_map = embeddings.reshape((16, 16, int(N/2)))\n fig = plt.figure(figsize=(12, 12))\n columns = 2\n rows = 1\n for i in range(1, columns * rows + 1):\n img = e_map[:, :, i - 1]\n fig.add_subplot(rows, columns, i)\n plt.imshow(img)\n plt.gca().set_title(labels[i - 1])\n plt.colorbar()\n plt.show()",
"def Display_List_Dist(self):\n # Get the neccessary distributions\n p,lh = self.get_p()\n low = lh[0]\n high = lh[1]\n N = len(p)\n clr = ['g','c','b','r'] \n fig , subplt = plt.subplots(nrows=N, figsize=(8, 9))\n x_grid = np.arange(low,high,self.get_precision())\n for i in range(N):\n subplt[i].plot(x_grid,p[i](x_grid),\\\n clr[i%4], linewidth=2.5,\\\n label = 'PDF {}'.format(i))\n subplt[i].legend()\n plt.show(block = False)",
"def display_heatmap(self,key1='leiden_clusters',key2='leiden_clusters',colorbar=True,**kwargs):\n sam1=self.sam1\n sam2=self.sam2\n samap=self.samap\n from samap import q, pd\n import matplotlib.pyplot as plt\n cl1 = q(sam1.adata.obs[key1])\n cl2 = q(sam2.adata.obs[key2])\n species = q(samap.adata.obs['species']).astype('object')\n samap.adata.obs['mapping_labels'] = pd.Categorical(species + '_' + np.append(cl1,cl2).astype('str').astype('object'))\n _,labels1,labels2,mapping_scores = _compute_csim(samap,key='mapping_labels')\n mapping_scores/=20\n fig,ax = plt.subplots(nrows=1,ncols=1)\n im = ax.imshow(mapping_scores,**kwargs)\n ax.set_xticks(np.arange(labels2.size))\n ax.set_yticks(np.arange(labels1.size))\n ax.set_xticklabels(labels2,rotation=90)\n ax.set_yticklabels(labels1)\n h = 10\n w = labels2.size*10/labels1.size\n fig.set_size_inches((w,h))\n if colorbar:\n fig.colorbar(im,shrink=0.5)\n fig.tight_layout()\n return fig,pd.DataFrame(data=mapping_scores,index=labels1,columns=labels2)",
"def plot_ELmaps(linefile, map_vmin=-0.03, map_vmax=0.06, wht_vmin=-0.01, wht_vmax=20000, zoom=None, colormap='viridis', verbose=True):\n line = afits.open(linefile)\n maps = line[0].header['HASLINES'].split()\n line.info()\n\n latexnames = gw.linelatexnames()\n\n if zoom is None:\n xmin,ymin = 0,0\n xmax,ymax = line['DSCI'].data.shape\n else:\n xmin,xmax,ymin,ymax = zoom\n\n lowcorner_x = np.abs(xmax-xmin)*0.05\n lowcorner_y = np.abs(ymax-ymin)*0.05\n\n Nmaps = len(maps)\n Nrows = Nmaps+1\n Ncols = 4\n FS = 8\n fig = plt.figure(figsize=[4,Nmaps+1])\n\n ax = fig.add_subplot(Nrows, Ncols, 1)\n ax.imshow(line['DSCI'].data[ymin:ymax,xmin:xmax], vmin=map_vmin, vmax=map_vmax, cmap=colormap, origin='lower')\n ax.text(lowcorner_x, lowcorner_y,'Direct '+line['DSCI'].header['FILTER'], ha='left', va='bottom', fontsize=FS)\n # ax.set_title('Direct '+line['DSCI'].header['FILTER'], fontsize=FS, color='black')\n\n ax = fig.add_subplot(Nrows, Ncols, 2)\n ax.imshow(line['DWHT'].data[ymin:ymax,xmin:xmax], vmin=wht_vmin, vmax=wht_vmax, cmap='gray', origin='lower')\n ax.text(lowcorner_x, lowcorner_y,r'Direct weight', ha='left', va='bottom', color='w', fontsize=FS)\n # ax.set_title(r'Direct weight', fontsize=FS, color='black')\n\n for mm, map in enumerate(maps):\n ax = fig.add_subplot(Nrows, Ncols, 1+4*(mm+1))\n ax.imshow(line['LINE', map].data[ymin:ymax,xmin:xmax], vmin=map_vmin, vmax=map_vmax, cmap=colormap, origin='lower')\n ax.text(lowcorner_x, lowcorner_y,latexnames[map][0], ha='left', va='bottom', fontsize=FS)\n # ax.set_title(latexnames[map][0], fontsize=FS, color='black')\n\n ax = fig.add_subplot(Nrows, Ncols, 2+4*(mm+1))\n ax.imshow(line['LINEWHT', map].data[ymin:ymax,xmin:xmax], vmin=wht_vmin, vmax=wht_vmax, cmap='gray', origin='lower')\n ax.text(lowcorner_x, lowcorner_y,latexnames[map][1], ha='left', va='bottom', color='w', fontsize=FS)\n # ax.set_title(latexnames[map][1], fontsize=FS, color='black')\n\n ax = fig.add_subplot(Nrows, Ncols, 3+4*(mm+1))\n ax.imshow(line['CONTINUUM', map].data[ymin:ymax,xmin:xmax], vmin=map_vmin, vmax=map_vmax, cmap=colormap, origin='lower')\n ax.text(lowcorner_x, lowcorner_y,latexnames[map][2], ha='left', va='bottom', fontsize=FS)\n # ax.set_title(latexnames[map][2], fontsize=FS, color='black')\n\n ax = fig.add_subplot(Nrows, Ncols, 4+4*(mm+1))\n ax.imshow(line['CONTAM', map].data[ymin:ymax,xmin:xmax], vmin=map_vmin, vmax=map_vmax, cmap=colormap, origin='lower')\n ax.text(lowcorner_x, lowcorner_y,latexnames[map][3], ha='left', va='bottom', fontsize=FS)\n # ax.set_title(latexnames[map][3], fontsize=FS, color='black')\n\n for ax in fig.axes:\n ax.set_xticklabels([]); ax.set_yticklabels([])\n ax.set_xticks([]); ax.set_yticks([])\n\n fig.tight_layout(pad=0.1)\n\n plotname = linefile.replace('.fits','_emissionlinemaps.pdf')\n if verbose: print(' - Saving figure to '+plotname)\n plt.savefig(plotname)\n plt.clf()\n plt.close('all')",
"def plot(corpus,fname):\n tfidf_vectorizer = TfidfVectorizer()\n tfidf_matrix = tfidf_vectorizer.fit_transform(corpus)\n print('Begin prepare matrices:',tfidf_matrix.shape)\n def print_tfidf():\n # print words and tfidfs:\n words = tfidf_vectorizer.get_feature_names()\n for i in range(len(corpus)):\n if i<5:\n print('----Document %d----' % (i))\n for j in range(len(words)):\n if tfidf_matrix[i, j] > 1e-5:\n print(words[j], tfidf_matrix[i, j]) # .encode('utf-8')\n print_tfidf()\n\n dist = 1 - cosine_similarity(tfidf_matrix)\n\n print('\\nBegin print img:')\n linkage_matrix = ward(dist) #define the linkage_matrix using ward clustering pre-computed distances\n print('line 68: linkage_matrix = ward(dist)')\n fig, ax = plt.subplots(figsize=(20, 600)) # set size\n ax = dendrogram(\n linkage_matrix,\n leaf_font_size=6,\n orientation=\"right\"\n # ,\n # truncate_mode='lastp', # show only the last p merged clusters\n # p=12, # show only the last p merged clusters\n # leaf_rotation=90.,\n # show_contracted=True, # to get a distribution impression in truncated branches\n\n ) # labels=[i for i in range(len(corpus))]\n\n print('line 71: ax = dendrogram(...)')\n plt.tick_params(\\\n axis= 'x', # changes apply to the x-axis\n which='both', # both major and minor ticks are affected\n bottom='off', # ticks along the bottom edge are off\n top='off', # ticks along the top edge are off\n labelbottom='off')\n print('line 78: plt.tick_params')\n plt.tight_layout() #show plot with tight layout # plt.show()\n print('line 80: plt.tight_layout()')\n\n plt.savefig('../pic/ward_clusters-%s.png'%(fname), dpi=100) #save figure as ward_clusters\n return ax",
"def plot_projected_cluster(ss_pos, companions_pos):\n plt.figure(figsize=(10,10))\n plt.plot(ss_pos['x'], ss_pos['y'],linestyle='none',marker='o' )\n plt.plot(companions_pos['x'], companions_pos['y'],linestyle='none',marker='.' )\n \n #makes lines between companion and primary star\n for i in companions_pos:\n plt.plot([i['x'], ss_pos[i['system_idx']]['x']],[i['y'], ss_pos[i['system_idx']]['y']],color='grey',linewidth=1)\n \n plt.xlabel(\"x (AU)\")\n plt.ylabel(\"y (AU)\")\n plt.show()\n \n return",
"def PlotTomoMap(fname, dlon=0.5, dlat=0.5, title='', datatype='ph', outfname='', browseflag=False, saveflag=True):\n if title=='':\n title=fname;\n if outfname=='':\n outfname=fname;\n Inarray=np.loadtxt(fname)\n LonLst=Inarray[:,0]\n LatLst=Inarray[:,1]\n ZValue=Inarray[:,2]\n llcrnrlon=LonLst.min()\n llcrnrlat=LatLst.min()\n urcrnrlon=LonLst.max()\n urcrnrlat=LatLst.max()\n Nlon=int((urcrnrlon-llcrnrlon)/dlon)+1\n Nlat=int((urcrnrlat-llcrnrlat)/dlat)+1\n fig=plt.figure(num=None, figsize=(8, 12), dpi=80, facecolor='w', edgecolor='k')\n m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, \\\n rsphere=(6378137.00,6356752.3142), resolution='l', projection='merc')\n \n lon = LonLst\n lat = LatLst\n x,y = m(lon, lat)\n xi = np.linspace(x.min(), x.max(), Nlon)\n yi = np.linspace(y.min(), y.max(), Nlat)\n xi, yi = np.meshgrid(xi, yi)\n \n #-- Interpolating at the points in xi, yi\n zi = griddata(x, y, ZValue, xi, yi)\n # m.pcolormesh(xi, yi, zi, cmap='seismic_r', shading='gouraud')\n cmap=matplotlib.cm.seismic_r\n cmap.set_bad('w',1.)\n m.imshow(zi, cmap=cmap)\n m.drawcoastlines()\n m.colorbar(location='bottom',size='2%')\n # m.fillcontinents()\n # draw parallels\n m.drawparallels(np.arange(-90,90,10),labels=[1,1,0,1])\n # draw meridians\n m.drawmeridians(np.arange(-180,180,10),labels=[1,1,1,0])\n plt.suptitle(title,y=0.9, fontsize=22);\n if browseflag==True:\n plt.draw()\n plt.pause(1) # <-------\n raw_input(\"<Hit Enter To Close>\")\n plt.close('all')\n if saveflag==True:\n fig.savefig(outfname+'.ps', format='ps')\n return",
"def plot_2d_cosine_similarity(self, \n documents: List[Dict], anchor_documents: List[Dict], vector_fields: List[str], \n label: str, mode='markers+text', textposition='top center', show_spikes=True,\n text_label_font_size: int=12, text_label_font_family = \"Rockwell\",\n text_label_bgcolor=\"white\",\n marker_colors=['purple', 'aquamarine'], metric='cosine',\n plot_bgcolor=\"#e6e6fa\", spikedash='dot', spikethickness=1.5, include_diagonal_line: bool=True\n ):\n if metric != 'cosine':\n raise NotImplementedError(\"Cosine similarity score is currently not implemented.\")\n\n assert (\n len(anchor_documents) == 2\n ), \"You need 2 anchor documents for a 2d cosine similarity plot.\"\n\n fig = go.FigureWidget()\n \n if len(vector_fields) > len(marker_colors):\n num_of_extra_fields = len(vector_fields) - len(marker_colors)\n marker_colors += self.random_colour(num_of_extra_fields)\n\n for vector_field_counter, vector_field in enumerate(vector_fields):\n scores_x = self.get_cosine_similarity_scores(\n documents, anchor_documents[0], vector_field\n )\n scores_y = self.get_cosine_similarity_scores(\n documents, anchor_documents[1], vector_field\n )\n\n for i, doc in enumerate(documents):\n doc[\"cos_score_x\"] = scores_x[i]\n doc[\"cos_score_y\"] = scores_y[i]\n x = [round(x[\"cos_score_x\"], 3) for x in documents]\n y = [round(x[\"cos_score_y\"], 3) for x in documents]\n labels = self.get_field_across_documents(label, documents)\n comparisons = [x_i > y[i] for i, x_i in enumerate(x)]\n text_comparisons = [\"has <b>lower</b> cosine similarity with\" if c == True \n else \"has <b>higher</b> cosine similarity with\" for c in comparisons]\n fig.add_trace(go.Scatter(x=x, y=y, \n text=labels,\n mode=mode, \n name=vector_field,\n customdata=text_comparisons,\n hovertemplate = label +\": <b>%{text}<extra></extra><br></b>\" + \\\n \"%{customdata} <br>\" + \\\n \"<b>\" + anchor_documents[1][label] + \"</b> (%{y})<br>\" + \\\n \"compared to <br>\" + \\\n \"<b>\" + anchor_documents[0][label] + \"</b> (%{x})</b>\",\n marker=dict(\n color=marker_colors[vector_field_counter],\n size=5,\n line=dict(width=0.5, color='DarkSlateGrey')\n )))\n\n self.add_labels_to_figure(fig, x_axis_label=f\"Cosine Similarity With {anchor_documents[0][label]}\",\n y_axis_label=f\"Cosine Similarity WIth {anchor_documents[1][label]}\",\n title_text=f\"2D Cosine Similarity Comparison With {anchor_documents[0][label]} and {anchor_documents[1][label]}\")\n\n fig.update_traces(textposition=textposition)\n\n fig.update_xaxes(showspikes=show_spikes, \n spikedash=spikedash, spikethickness=spikethickness)\n fig.update_yaxes(showspikes=show_spikes, \n spikedash=spikedash, spikethickness=spikethickness)\n \n fig.update_layout(plot_bgcolor=plot_bgcolor)\n fig.update_layout(\n hoverlabel=dict(\n bgcolor=text_label_bgcolor,\n font_size=text_label_font_size,\n font_family=text_label_font_family\n )\n )\n if include_diagonal_line:\n fig = self.add_line_to_fig(fig, x0=0, y0=0, x1=1, y1=1)\n return fig",
"def plot_replica_maps_grid(dataset, plotspecs):\n cwd = os.getcwd()\n grid_dims = plotspecs[\"grid_dims\"]\n for t in range(len(dataset.topologies)):\n names = dataset.top_names[t]\n for n in range(len(names)):\n # Plot whatever for a protein\n pairs = dataset.pairs[t][n]\n N = dataset.prot_sizes[t][n]\n print dataset.top_names[t][n]\n for j in range(len(dataset.b_values)):\n print \" b-values:\", dataset.b_values[j]\n fig, axes = plt.subplots(*grid_dims, sharex=True, sharey=True, figsize=(12,10))\n if len(dataset.ydata[t][n][j]) > 0:\n for rep in range(len(dataset.ydata[t][n][j])):\n ax = axes[rep / grid_dims[0], rep % grid_dims[0]]\n\n vals = dataset.ydata[t][n][j][0]\n C = np.zeros((N, N))\n for m in range(len(pairs)):\n if m < dataset.prot_n_native[t][n]:\n C[pairs[m, 1], pairs[m, 0]] = vals[m]\n else:\n C[pairs[m, 1], pairs[m, 0]] = -vals[m]\n\n # plot native and non-native contacts in different colors\n vmin, vmax = plotspecs[\"vminmax\"]\n pa = ax.pcolormesh(np.ma.array(C, mask=(C == 0)), cmap=\"bwr_r\", vmin=vmin, vmax=vmax)\n\n ax.annotate(\"rep = \" + str(rep + 1),\n xy=(0,0), xytext=plotspecs[\"xytext\"],\n bbox={\"boxstyle\":\"square\",\"facecolor\":\"w\",\"edgecolor\":\"k\"},\n xycoords=\"axes fraction\", textcoords=\"axes fraction\")\n ax.plot(np.arange(0, N), np.arange(0, N), 'k', lw=2)\n\n ax.set_xlim(0, N)\n ax.set_ylim(0, N)\n ax.set_aspect(\"equal\")\n\n plt.subplots_adjust(wspace=0, hspace=0)\n big_ax = fig.add_subplot(111)\n big_ax.grid(False)\n big_ax.set_axis_bgcolor('none')\n big_ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')\n big_ax.set_ylabel(plotspecs[\"ylabel\"])\n big_ax.set_xlabel(plotspecs[\"xlabel\"])\n big_ax.set_title(plotspecs[\"title\"] + \" b = \" + dataset.b_values[j])\n\n if not (plotspecs[\"saveas\"] is None):\n savedir = \"{}/{}/b_{}/plots\".format(dataset.topologies[t], \n dataset.top_names[t][n], dataset.b_values[j])\n \n if not os.path.exists(savedir):\n os.mkdir(savedir)\n os.chdir(savedir)\n for format in plotspecs[\"saveas_formats\"]:\n plt.savefig(plotspecs[\"saveas\"] + \".\" + format, bbox_inches=\"tight\")\n os.chdir(cwd)",
"def plots(pdframe_kmers, filename):\r\n proportion_kmers_oberserved = p9.ggplot(data = pdframe_kmers_df,\r\n mapping = p9.aes(x = 'k' , y = 'kemers_obs/kmers_poss')) + p9.geom_point()\r\n\r\n proportion_kmers_oberserved.save(filename + \"proportion_kmers_oberserved.pdf\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a PR curve summary op for a single binary classifier. Computes true/false positive/negative values for the given `predictions` against the ground truth `labels`, against a list of evenly distributed threshold values in `[0, 1]` of length `num_thresholds`. Each number in `predictions`, a float in `[0, 1]`, is compared with its corresponding boolean label in `labels`, and counts as a single tp/fp/tn/fn value at each threshold. This is then multiplied with `weights` which can be used to reweight certain values, or more commonly used for masking values.
|
def op(
name,
labels,
predictions,
num_thresholds=None,
weights=None,
display_name=None,
description=None,
collections=None,
):
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if num_thresholds is None:
num_thresholds = _DEFAULT_NUM_THRESHOLDS
if weights is None:
weights = 1.0
dtype = predictions.dtype
with tf.name_scope(name, values=[labels, predictions, weights]):
tf.assert_type(labels, tf.bool)
# We cast to float to ensure we have 0.0 or 1.0.
f_labels = tf.cast(labels, dtype)
# Ensure predictions are all in range [0.0, 1.0].
predictions = tf.minimum(1.0, tf.maximum(0.0, predictions))
# Get weighted true/false labels.
true_labels = f_labels * weights
false_labels = (1.0 - f_labels) * weights
# Before we begin, flatten predictions.
predictions = tf.reshape(predictions, [-1])
# Shape the labels so they are broadcast-able for later multiplication.
true_labels = tf.reshape(true_labels, [-1, 1])
false_labels = tf.reshape(false_labels, [-1, 1])
# To compute TP/FP/TN/FN, we are measuring a binary classifier
# C(t) = (predictions >= t)
# at each threshold 't'. So we have
# TP(t) = sum( C(t) * true_labels )
# FP(t) = sum( C(t) * false_labels )
#
# But, computing C(t) requires computation for each t. To make it fast,
# observe that C(t) is a cumulative integral, and so if we have
# thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
# where n = num_thresholds, and if we can compute the bucket function
# B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
# then we get
# C(t_i) = sum( B(j), j >= i )
# which is the reversed cumulative sum in tf.cumsum().
#
# We can compute B(i) efficiently by taking advantage of the fact that
# our thresholds are evenly distributed, in that
# width = 1.0 / (num_thresholds - 1)
# thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
# Given a prediction value p, we can map it to its bucket by
# bucket_index(p) = floor( p * (num_thresholds - 1) )
# so we can use tf.scatter_add() to update the buckets in one pass.
# Compute the bucket indices for each prediction value.
bucket_indices = tf.cast(
tf.floor(predictions * (num_thresholds - 1)), tf.int32
)
# Bucket predictions.
tp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds)
* true_labels,
axis=0,
)
fp_buckets = tf.reduce_sum(
input_tensor=tf.one_hot(bucket_indices, depth=num_thresholds)
* false_labels,
axis=0,
)
# Set up the cumulative sums to compute the actual metrics.
tp = tf.cumsum(tp_buckets, reverse=True, name="tp")
fp = tf.cumsum(fp_buckets, reverse=True, name="fp")
# fn = sum(true_labels) - tp
# = sum(tp_buckets) - tp
# = tp[0] - tp
# Similarly,
# tn = fp[0] - fp
tn = fp[0] - fp
fn = tp[0] - tp
precision = tp / tf.maximum(_MINIMUM_COUNT, tp + fp)
recall = tp / tf.maximum(_MINIMUM_COUNT, tp + fn)
return _create_tensor_summary(
name,
tp,
fp,
tn,
fn,
precision,
recall,
num_thresholds,
display_name,
description,
collections,
)
|
[
"def compute_roc_curve(labels, predictions, num_thresholds=None, weights=None):\n if isinstance(labels, list):\n labels = np.array(labels)\n if isinstance(predictions, list):\n predictions = np.array(predictions)\n _MINIMUM_COUNT = 1e-7\n\n if weights is None:\n weights = 1.0\n\n bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))\n float_labels = labels.astype(np.float)\n histogram_range = (0, num_thresholds - 1)\n tp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=float_labels * weights)\n fp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=(1.0 - float_labels) * weights)\n\n # Obtain the reverse cumulative sum.\n tp = np.cumsum(tp_buckets[::-1])[::-1]\n fp = np.cumsum(fp_buckets[::-1])[::-1]\n tn = fp[0] - fp\n fn = tp[0] - tp\n tpr = tp / np.maximum(_MINIMUM_COUNT, tn + fp)\n fpr = fp / np.maximum(_MINIMUM_COUNT, tn + fp)\n data = {\n 'tp': tp.astype(int).tolist(),\n 'fp': fp.astype(int).tolist(),\n 'tn': tn.astype(int).tolist(),\n 'fn': fn.astype(int).tolist(),\n 'tpr': tpr.astype(float).tolist(),\n 'fpr': fpr.astype(float).tolist()\n }\n return data",
"def evaluate(labels, predictions):\n positive_total, positive_rate = 0, 0\n negative_total, negative_rate = 0, 0\n for i in range(len(labels)):\n \n # sensitivity\n if labels[i] == 1:\n positive_total += 1\n if labels[i] == predictions[i]:\n positive_rate += 1\n \n # specificity\n else:\n negative_total += 1\n if labels[i] == predictions[i]:\n negative_rate += 1\n\n sensitivity = float(positive_rate / positive_total)\n specificity = float(negative_rate / negative_total)\n \n return (sensitivity, specificity)",
"def evaluate(labels, predictions):\n identifiedTruePositives = 0\n identifiedTrueNegatives = 0\n totalPositives = 0\n totalNegatives = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n totalPositives += 1\n if labels[i] == predictions[i]:\n identifiedTruePositives += 1\n else:\n totalNegatives += 1\n if labels[i] == predictions[i]:\n identifiedTrueNegatives += 1\n sensitivity = identifiedTruePositives / totalPositives\n specificity = identifiedTrueNegatives / totalNegatives\n\n return (sensitivity, specificity)",
"def evaluate(labels, predictions):\n i=0\n j=0\n total_true = 0\n total_wrong = 0\n for label,prediction in zip(labels,predictions):\n if label==1:\n total_true = total_true + 1\n if prediction == 1:\n i = i + 1\n else:\n total_wrong = total_wrong + 1\n if prediction == 0:\n j = j + 1\n sensitivity = float(i/total_true)\n specificity = float(j/total_wrong)\n return(sensitivity, specificity)\n\n\n\n\n raise NotImplementedError",
"def count_tp_fp(ious_list, scores_list, score_thresholds, iou_threshold,\n pred_labels_list, gt_labels_list, **kwargs):\n tp_fp_fn_dict = defaultdict(tp_fp_fn_counter)\n for ious_mat, scores_vec, pred_labels, gt_labels in zip(\n ious_list, scores_list, pred_labels_list, gt_labels_list):\n gt_labels = np.array(gt_labels)\n pred_labels = np.array(pred_labels)\n scores_vec = np.array(scores_vec)\n for thresh in score_thresholds:\n counter = tp_fp_fn_dict[thresh]\n pred_indices = scores_vec >= thresh.score\n if len(ious_mat) == 0:\n # no gt, therefore all predictions are fp\n fp = np.sum(pred_indices)\n counter.fp += fp\n counter.fn += len(gt_labels)\n continue\n # when there's no gt, ious_mat is [], hence size assetion fails\n assert len(ious_mat) == len(scores_vec), \"ious_mat and \"\n \"scores_vec mismatch\"\n pred_ious = ious_mat[pred_indices]\n num_pred, num_gt = pred_ious.shape\n if num_pred == 0:\n # no predictions, therefore all gt are fn\n counter.fn += num_gt\n continue\n fp = np.sum(pred_ious.max(axis=1) < iou_threshold)\n tp = np.sum(pred_ious.max(axis=0) >= iou_threshold)\n fn = num_gt - tp\n counter.tp += tp\n counter.fp += fp\n counter.fn += fn\n return tp_fp_fn_dict",
"def evaluate(labels, predictions):\n labels = np.array(labels)\n predictions = np.array(predictions)\n\n total_positives = len(labels[labels == 1])\n correctly_predicted = np.sum(np.logical_and(labels == 1, predictions == 1))\n\n sensitivity = correctly_predicted/total_positives\n\n total_negatives = len(labels[labels == 0])\n correctly_predicted = np.sum(np.logical_and(labels == 0, predictions == 0))\n\n specificty = correctly_predicted/total_negatives\n\n precision, recall, fscore, _ = precision_recall_fscore_support(y_pred=predictions, y_true=labels, average='binary')\n print(f'Precision: {precision:.4f}\\nRecall: {recall:.4f}\\nfscore: {fscore:.4f}')\n\n return (sensitivity, specificty)",
"def fit_thresholds(y_true, \n\t\t\t\t y_pred_raw, \n\t\t\t\t metric, \n\t\t\t\t verbose=True,\n\t\t\t\t thresholds=np.arange(0.1, 1, 0.1),\n\t\t\t\t return_top_threshold=False):\n\t# Set up functions dict.\n\tmetrics_dct = {\n\t\t'f1_score': f1_score, \n\t\t'accuracy_score': accuracy_score, \n\t\t'recall': recall_score, \n\t\t'precision': precision_score\n\t}\n\n\tif metric not in metrics_dct:\n\t\traise Exception('''\n`metric` value must be one of:\n\t['f1_score', 'accuracy_score', 'recall', 'precision']\n''')\n\n\t# Set up top lists.\n\ttop_thresh_val = None\n\ttop_score = 0\n\ttop_pred = None\n\n\t# Iterate through thresholds and keep top score.\n\tfor threshold in thresholds:\n\t\tpred = y_pred_raw.copy()\n\n\t\t# Set predictions based on given threshold.\n\t\tpred[pred >= threshold] = 1\n\t\tpred[pred < threshold] = 0\n\n\t\t# Determine whether the top score should be replaced.\n\t\tscore = metrics_dct[metric](y_true, pred)\n\t\tif score > top_score:\n\t\t\ttop_thresh_val = threshold\n\t\t\ttop_score = score\n\t\t\ttop_pred = pred\n\n\tresults = f'Optimal Threshold: {top_thresh_val}. Top Score: {top_score}'\n\tprint(headerize(results))\n\tif verbose:\n\t\tshow_scores(y_true, top_pred, header=metric.upper())\n\tif return_top_threshold:\n\t\treturn top_thresh_val",
"def abstract_ROC(classifier, labels, num_points = 1000): \n\n # array for holding predictions based on the threshold\n preds_by_thresh = np.zeros((num_points, len(labels)))\n\n anti_labels = 1 - labels\n class1_count = np.sum(labels)\n class0_count = len(labels) - class1_count\n\n min_thresh = np.min(classifier)\n max_thresh = np.max(classifier)\n\n # iterate through linspace of thresholds\n for i, thresh in enumerate(np.linspace(min_thresh, max_thresh, num_points)):\n\n # make predictions based on classifier and threshold\n preds_by_thresh[i, classifier > thresh] = 1\n\n # compute fraction of class 1 we got correct for each threshold\n class1_eff = np.dot(preds_by_thresh, labels)/class1_count\n\n # compute fraction of class 0 we got correct for each threshold\n class0_eff = 1 - np.dot(preds_by_thresh, anti_labels)/class0_count\n \n return class0_eff, class1_eff",
"def get_optimal_threshold(y_true, y_pred, label_list, eval_func, independent=True, lower_better=False,\n **eval_fn_kwargs):\n assert isinstance(y_true, pd.DataFrame) and isinstance(y_pred, pd.DataFrame)\n assert y_true.shape[0] == y_pred.shape[0]\n\n thresholds = [0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50, 0.55, 0.60, 0.65]\n\n if independent:\n result = {}\n result_eval = {}\n for label in label_list:\n truth = y_true[label]\n preds = y_pred[label]\n\n best_score = 0\n best_thr = 0\n for thr in thresholds:\n hard_preds = np.array(preds > thr).astype(int)\n score = eval_func(truth, hard_preds, **eval_fn_kwargs)\n\n if (lower_better and score < best_score) or (\n not lower_better and score > best_score):\n best_score = score\n best_thr = thr\n\n result[label] = best_thr\n result_eval[label] = best_score\n\n else:\n print(eval_fn_kwargs)\n best_score = 0\n best_thr = 0\n for thr in thresholds:\n y_pred_discrete = logits_to_discrete(y_pred, label_list, thr)\n score = eval_func(y_true[label_list], y_pred_discrete[label_list], **eval_fn_kwargs)\n\n if (lower_better and score < best_score) or (not lower_better and score > best_score):\n best_score = score\n best_thr = thr\n result = best_thr\n result_eval = best_score\n\n return result, result_eval",
"def __predict_label(self, label_probs):\n def driver(prob):\n candidate = np.argmax(prob)\n if candidate == 0 and prob[0] > self.model_paras['threshold_positive']:\n return 0\n elif candidate == 2 and prob[2] > self.model_paras['threshold_negative']:\n return 2\n else:\n return 1\n\n labels = list(map(driver, label_probs))\n return labels",
"def threshold_predictions(preds, classes, prediction_threshold=0.5):\n labelled_preds = [' '.join([classes[i] for i, p in enumerate(pred) if p > prediction_threshold])\n for pred in preds]\n return labelled_preds",
"def binarize_preds(predictions: torch.Tensor, threshold=0.5) -> torch.Tensor:\n return predictions.__ge__(threshold).int()",
"def make_predictions(main_dict, param, pheno_df, threshold, features, output):\n\n print('\\nMaking predictions...')\n\n # Split training and test set for cross-validation\n pheno_df, x, y, x_train, x_test, y_train, y_test, phenotypes = split_labeled_set(pheno_df, features,\n param['k_fold_cv'])\n\n # Initialize arrays for NN runs\n identifier_index = len(pheno_df.columns.values) - len(features)\n df_output = pheno_df.iloc[:, :identifier_index]\n sum_prob_labeled = np.zeros([y.shape[0], y.shape[1]])\n sum_prob_test = np.zeros([y.shape[0], y.shape[1]])\n\n # Train NN with cross validation for evaluating performance\n performance = pd.DataFrame()\n divide = x.shape[0] // param['k_fold_cv']\n run = 1\n for cv in range(param['k_fold_cv']):\n start = cv * divide\n end = (cv + 1) * divide\n if cv == (param['k_fold_cv'] - 1):\n end = x.shape[0]\n # Train and make predictions for each fold for a number of runs\n for n in range(param['runs']):\n runn = n + cv * param['runs']\n # Train NN with training set\n model, performance = neural_network(x_train[cv], y_train[cv], param, phenotypes, performance, runn,\n x_test[cv], y_test[cv])\n # Predictions on test data\n probabilities_test = model.predict(x_test[cv], batch_size=param['batch_size'])\n sum_prob_test[start:end] += probabilities_test\n\n # Predictions on labeled data\n probabilities_labeled = model.predict(x, batch_size=param['batch_size'])\n predictions_labeled = np.argmax(probabilities_labeled, axis=1)\n sum_prob_labeled += probabilities_labeled\n df_output['Run-%d' % run] = [phenotypes[i] for i in predictions_labeled]\n run += 1\n\n # Save training performance of cross-validation\n num_runs = param['k_fold_cv'] * param['runs']\n plot_training_performance(performance, output['TrainingCV'], num_runs)\n\n # Train NN with the complete labeled set\n performance = pd.DataFrame()\n sum_prob_all = np.zeros([main_dict['data_scaled'].shape[0], y.shape[1]])\n for n in range(param['runs']):\n model, performance = neural_network(x, y, param, phenotypes, performance, n)\n # Predictions on all data\n probabilities_all = model.predict(main_dict['data_scaled'], batch_size=param['batch_size'])\n sum_prob_all += probabilities_all\n plot_training_performance(performance, output['Training'], param['runs'])\n\n # Labeled set single cell accuracies\n cell_accuracy(df_output, sum_prob_labeled, phenotypes, num_runs, output)\n\n # Test-set predictions\n y_pred = np.argmax(sum_prob_test, axis=1)\n y_true = np.argmax(y, axis=1)\n plot_confusion_matrix(y_true, y_pred, phenotypes, output['Confusion'])\n\n # Make predictions for the complete data\n y_all = sum_prob_all / param['runs']\n y_prob_all = (y_all >= threshold).astype('int')\n y_pred_all = np.argmax(y_all, axis=1)\n phenotype_all = []\n for i in range(len(y_pred_all)):\n pred = phenotypes[y_pred_all[i]]\n # If none of the probabilities pass the threshold, predict as None phenotype\n if sum(y_prob_all[i]) == 0:\n pred = 'none'\n phenotype_all.append(pred)\n\n # Save phenotype predictions for cell_IDs provided\n cell_id = pd.DataFrame(columns=['CellID', 'Prediction'] + list(phenotypes))\n cell_id['CellID'] = main_dict['cell_id']\n cell_id['Prediction'] = np.array(phenotype_all)\n for i in range(len(phenotypes)):\n cell_id[phenotypes[i]] = y_all[:, i]\n cell_id = cell_id.sort_values('CellID', ascending=True).reset_index(drop=True)\n cell_id.to_csv(path_or_buf=output['PhenotypeCellIDs'], index=False)\n\n # Save predictions and inlier state in the combined dictionary\n main_dict['phenotype'] = np.array(phenotype_all)\n main_dict['is_inlier'] = np.array([p == 'negative' for p in main_dict['phenotype']])\n\n return main_dict",
"def _optimal_thresholds(precisions, recalls, confidence_thresholds, name, eps=1e-6):\n\n def compute_f(beta):\n return (beta**2.0+1.0)*precisions*recalls/(beta**2.0*precisions+recalls+eps)\n\n f1_scores = compute_f(beta=1.0)\n fdot5_scores = compute_f(beta=0.5)\n f2_scores = compute_f(beta=2.0)\n\n index_max_f1 = np.argmax(f1_scores)\n index_max_fdot5 = np.argmax(fdot5_scores)\n index_max_f2 = np.argmax(f2_scores)\n\n print(\"'{}':\".format(name))\n\n print(' Threshold: {:.2f}'.format(confidence_thresholds[index_max_f1]))\n print(' F1 : {:.2f}%'.format(100.0*f1_scores[index_max_f1]))\n print(' P : {:.2f}%'.format(100.0*precisions[index_max_f1]))\n print(' R : {:.2f}%'.format(100.0*recalls[index_max_f1]))\n\n print(' Threshold: {:.2f}'.format(confidence_thresholds[index_max_fdot5]))\n print(' F.5 : {:.2f}%'.format(100.0*fdot5_scores[index_max_fdot5]))\n print(' P : {:.2f}%'.format(100.0*precisions[index_max_fdot5]))\n print(' R : {:.2f}%'.format(100.0*recalls[index_max_fdot5]))\n\n print(' Threshold: {:.2f}'.format(confidence_thresholds[index_max_f2]))\n print(' F2 : {:.2f}%'.format(100.0*f2_scores[index_max_f2]))\n print(' P : {:.2f}%'.format(100.0*precisions[index_max_f2]))\n print(' R : {:.2f}%'.format(100.0*recalls[index_max_f2]))\n\n return {'P1': precisions[index_max_f1].item(),\n 'R1': recalls[index_max_f1].item(),\n 'P.5': precisions[index_max_fdot5].item(),\n 'R.5': recalls[index_max_fdot5].item(),\n 'P2': precisions[index_max_f2].item(),\n 'R2': recalls[index_max_f2].item()}",
"def test_binary_neg_threshold():\n vals = np.zeros((2, 2))\n vals[0, :] = -5\n vals[1, :] = -10\n # create classifier\n C = classifier.BinaryClassifier(vals, -7.5)\n assert np.all(C.data[0, :] == -5)\n assert np.all(C.data[1, :] == -10)\n assert np.all(C.classified[0, :] == 1)\n assert np.all(C.classified[1, :] == 0)\n # check threshold value\n assert C.threshold == -7.5\n # re-classify then check values\n C.classify(0)\n assert np.all(C.data[0, :] == -5)\n assert np.all(C.data[1, :] == -10)\n assert np.all(C.classified[0, :] == 0)\n assert np.all(C.classified[1, :] == 0)",
"def precision(gold_labels, classified_labels, pos_label='1', neg_label='0'):\n # precision = tp/(tp + fp)\n true_positives = 0\n false_positives = 0\n \n for i in range(len(gold_labels)):\n if gold_labels[i] == pos_label and classified_labels[i] == pos_label:\n true_positives += 1\n elif gold_labels[i] == neg_label and classified_labels[i] == pos_label:\n false_positives += 1\n \n if true_positives + false_positives == 0:\n return 0\n \n return true_positives / (true_positives + false_positives)",
"def compute_precision_scores(self, y_pred, y_true, prob_thresholds):\n precisions = []\n for prob_thres in prob_thresholds:\n flagged_idxes = filter(lambda idx: y_pred[idx] >= prob_thres, range(len(y_pred)))\n true_flagged_idxes = filter(lambda idx: y_pred[idx] >= prob_thres and y_true[idx] == 1, range(len(y_pred)))\n precision = (len(true_flagged_idxes) / float(len(flagged_idxes))) if len(flagged_idxes) else 0.0\n precisions.append((precision, prob_thres))\n\n return sorted(precisions, key=lambda (prec, prob): prec)",
"def calculate_metrics(true_test_labels, pred_test_labels):\n weighted_f1 = precision_recall_fscore_support(true_test_labels,\n pred_test_labels,\n beta=3,\n average='binary')\n\n precision, recall, fbeta_score, _ = weighted_f1\n\n return precision, recall, fbeta_score",
"def __compute_non_binary_metrics(class_probability_matrix, target_values):\n\n probability_threshold = 0.611\n predicted_labels = eval_utils.determinize_probabilities(\n class_probability_matrix=class_probability_matrix,\n binarization_threshold=probability_threshold)\n\n contingency_matrix = eval_utils.get_contingency_table(\n predicted_labels=predicted_labels, observed_labels=target_values,\n num_classes=3)\n\n print 'Contingency table:\\n{0:s}\\n'.format(str(contingency_matrix))\n\n accuracy = eval_utils.get_accuracy(contingency_matrix)\n peirce_score = eval_utils.get_peirce_score(contingency_matrix)\n heidke_score = eval_utils.get_heidke_score(contingency_matrix)\n gerrity_score = eval_utils.get_gerrity_score(contingency_matrix)\n\n print (\n 'Accuracy = {0:.4f} ... Peirce score = {1:.4f} ... Heidke score = '\n '{2:.4f} ... Gerrity score = {3:.4f}\\n'\n ).format(accuracy, peirce_score, heidke_score, gerrity_score)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a PR curves summary protobuf from raw data values.
|
def raw_data_pb(
name,
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
num_thresholds=None,
display_name=None,
description=None,
):
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
if display_name is None:
display_name = name
summary_metadata = metadata.create_summary_metadata(
display_name=display_name if display_name is not None else name,
description=description or "",
num_thresholds=num_thresholds,
)
tf_summary_metadata = tf.SummaryMetadata.FromString(
summary_metadata.SerializeToString()
)
summary = tf.Summary()
data = np.stack(
(
true_positive_counts,
false_positive_counts,
true_negative_counts,
false_negative_counts,
precision,
recall,
)
)
tensor = tf.make_tensor_proto(np.float32(data), dtype=tf.float32)
summary.value.add(
tag="%s/pr_curves" % name, metadata=tf_summary_metadata, tensor=tensor
)
return summary
|
[
"def pr_curve_raw(tag, tp, fp, tn, fn, precision, recall, step, walltime):\n \"\"\"\n if isinstance(tp, np.ndarray):\n tp = tp.astype(int).tolist()\n if isinstance(fp, np.ndarray):\n fp = fp.astype(int).tolist()\n if isinstance(tn, np.ndarray):\n tn = tn.astype(int).tolist()\n if isinstance(fn, np.ndarray):\n fn = fn.astype(int).tolist()\n if isinstance(precision, np.ndarray):\n precision = precision.astype(int).tolist()\n if isinstance(recall, np.ndarray):\n recall = recall.astype(int).tolist()\n \"\"\"\n prcurve = Record.PRCurve(\n TP=tp, FP=fp, TN=tn, FN=fn, precision=precision, recall=recall)\n return Record(values=[\n Record.Value(id=step, tag=tag, timestamp=walltime, pr_curve=prcurve)\n ])",
"def roc_curve_raw(tag, tp, fp, tn, fn, tpr, fpr, step, walltime):\n \"\"\"\n if isinstance(tp, np.ndarray):\n tp = tp.astype(int).tolist()\n if isinstance(fp, np.ndarray):\n fp = fp.astype(int).tolist()\n if isinstance(tn, np.ndarray):\n tn = tn.astype(int).tolist()\n if isinstance(fn, np.ndarray):\n fn = fn.astype(int).tolist()\n if isinstance(tpr, np.ndarray):\n tpr = tpr.astype(int).tolist()\n if isinstance(fpr, np.ndarray):\n fpr = fpr.astype(int).tolist()\n \"\"\"\n roc_curve = Record.ROC_Curve(TP=tp, FP=fp, TN=tn, FN=fn, tpr=tpr, fpr=fpr)\n return Record(values=[\n Record.Value(\n id=step, tag=tag, timestamp=walltime, roc_curve=roc_curve)\n ])",
"def translate_summary(data):\n headers = sorted(data.get(\"Headers\", []))\n summary = '\\\\FloatBarrier \\n \\\\section{$NAME} \\n'.replace('$NAME', data.get(\"Title\", \"table\"))\n summary += ' \\\\begin{table}[!ht] \\n \\\\begin{center}'\n\n # Set the number of columns\n n_cols = len(headers)\n col_str = \"l\" + \"c\" * n_cols\n summary += '\\n \\\\begin{tabular}{$NCOLS} \\n'.replace(\"$NCOLS\", col_str)\n spacer = ' &' * n_cols + r'\\\\[.5em]'\n\n for header in headers:\n summary += '& $HEADER '.replace('$HEADER', header).replace('%', '\\%')\n summary += ' \\\\\\\\ \\hline \\n'\n\n names = sorted(six.iterkeys(data.get(\"Data\", [])))\n for name in names:\n summary += '\\n\\n \\\\textbf{{{}}} {} \\n'.format(name, spacer)\n cases = data.get(\"Data\", []).get(name, {})\n for case, c_data in cases.items():\n summary += ' $CASE & '.replace('$CASE', str(case))\n for header in headers:\n h_data = c_data.get(header, \"\")\n if list is type(h_data) and len(h_data) == 2:\n summary += (' $H_DATA_0 of $H_DATA_1 &'\n .replace('$H_DATA_0', str(h_data[0]))\n .replace('$H_DATA_1', str(h_data[1]))\n .replace('%', '\\%'))\n else:\n summary += ' $H_DATA &'.replace('$H_DATA', str(h_data)).replace('%', '\\%')\n\n # This takes care of the trailing & that comes from processing the headers.\n summary = summary[:-1] + r' \\\\'\n\n summary += '\\n \\end{tabular} \\n \\end{center} \\n \\end{table}\\n'\n return summary",
"def compute_roc_curve(labels, predictions, num_thresholds=None, weights=None):\n if isinstance(labels, list):\n labels = np.array(labels)\n if isinstance(predictions, list):\n predictions = np.array(predictions)\n _MINIMUM_COUNT = 1e-7\n\n if weights is None:\n weights = 1.0\n\n bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))\n float_labels = labels.astype(np.float)\n histogram_range = (0, num_thresholds - 1)\n tp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=float_labels * weights)\n fp_buckets, _ = np.histogram(\n bucket_indices,\n bins=num_thresholds,\n range=histogram_range,\n weights=(1.0 - float_labels) * weights)\n\n # Obtain the reverse cumulative sum.\n tp = np.cumsum(tp_buckets[::-1])[::-1]\n fp = np.cumsum(fp_buckets[::-1])[::-1]\n tn = fp[0] - fp\n fn = tp[0] - tp\n tpr = tp / np.maximum(_MINIMUM_COUNT, tn + fp)\n fpr = fp / np.maximum(_MINIMUM_COUNT, tn + fp)\n data = {\n 'tp': tp.astype(int).tolist(),\n 'fp': fp.astype(int).tolist(),\n 'tn': tn.astype(int).tolist(),\n 'fn': fn.astype(int).tolist(),\n 'tpr': tpr.astype(float).tolist(),\n 'fpr': fpr.astype(float).tolist()\n }\n return data",
"def format_message(self, evr_hist_data):\n size_formatter_info = {\n \"s\": -1,\n \"c\": 1,\n \"i\": 4,\n \"d\": 4,\n \"u\": 4,\n \"x\": 4,\n \"hh\": 1,\n \"h\": 2,\n \"l\": 4,\n \"ll\": 8,\n \"f\": 8,\n \"g\": 8,\n \"e\": 8,\n }\n type_formatter_info = {\n \"c\": \"U{}\",\n \"i\": \"MSB_I{}\",\n \"d\": \"MSB_I{}\",\n \"u\": \"MSB_U{}\",\n \"f\": \"MSB_D{}\",\n \"e\": \"MSB_D{}\",\n \"g\": \"MSB_D{}\",\n \"x\": \"MSB_U{}\",\n }\n\n formatters = re.findall(r\"%(?:\\d+\\$)?([cdieEfgGosuxXhlL]+)\", self._message)\n\n cur_byte_index = 0\n data_chunks = []\n\n for f in formatters:\n # If the format string we found is > 1 character we know that a length\n # field is included and we need to adjust our sizing accordingly.\n f_size_char = f_type = f[-1]\n if len(f) > 1:\n f_size_char = f[:-1]\n\n fsize = size_formatter_info[f_size_char.lower()]\n\n try:\n if f_type != \"s\":\n end_index = cur_byte_index + fsize\n fstr = type_formatter_info[f_type.lower()].format(fsize * 8)\n\n # Type formatting can give us incorrect format strings when\n # a size formatter promotes a smaller data type. For instnace,\n # 'hhu' says we'll promote a char (1 byte) to an unsigned\n # int for display. Here, the type format string would be\n # incorrectly set to 'MSB_U8' if we didn't correct.\n if fsize == 1 and \"MSB_\" in fstr:\n fstr = fstr[4:]\n\n d = dtype.PrimitiveType(fstr).decode(\n evr_hist_data[cur_byte_index:end_index]\n )\n\n # Some formatters have an undefined data size (such as strings)\n # and require additional processing to determine the length of\n # the data and decode data.\n else:\n end_index = evr_hist_data.find(0x00, cur_byte_index)\n d = str(evr_hist_data[cur_byte_index:end_index], \"utf-8\")\n\n data_chunks.append(d)\n # TODO: Make this not suck\n except Exception:\n msg = \"Unable to format EVR Message with data {}\".format(evr_hist_data)\n log.error(msg)\n raise ValueError(msg)\n\n cur_byte_index = end_index\n\n # If we were formatting a string we need to add another index offset\n # to exclude the null terminator.\n if f == \"s\":\n cur_byte_index += 1\n\n # Format and return the EVR message if formatters were present, otherwise\n # just return the EVR message as is.\n if len(formatters) == 0:\n return self._message\n else:\n # Python format strings cannot handle size formatter information. So something\n # such as %llu needs to be adjusted to be a valid identifier in python by\n # removing the size formatter.\n msg = self._message\n for f in formatters:\n if len(f) > 1:\n msg = msg.replace(\"%{}\".format(f), \"%{}\".format(f[-1]))\n\n return msg % tuple(data_chunks)",
"def _build_parsed_values(self):\n log.debug('VectorVelocityHeaderDataParticle: raw data =%r', self.raw_data)\n\n try:\n unpack_string = '<4s6sH8B20sH'\n sync, timestamp, number_of_records, noise1, noise2, noise3, _, correlation1, correlation2, correlation3, _,\\\n _, cksum = struct.unpack(unpack_string, self.raw_data)\n\n if not validate_checksum('<20H', self.raw_data):\n log.warn(\"Failed checksum in %s from instrument (%r)\", self._data_particle_type, self.raw_data)\n self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED\n\n timestamp = NortekProtocolParameterDict.convert_time(timestamp)\n self.set_internal_timestamp((timestamp-datetime(1900, 1, 1)).total_seconds())\n\n except Exception as e:\n log.error('Error creating particle vel3d_cd_data_header, raw data: %r', self.raw_data)\n raise SampleException(e)\n\n result = [{DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.TIMESTAMP, DataParticleKey.VALUE: str(timestamp)},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NUMBER_OF_RECORDS, DataParticleKey.VALUE: number_of_records},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE1, DataParticleKey.VALUE: noise1},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE2, DataParticleKey.VALUE: noise2},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.NOISE3, DataParticleKey.VALUE: noise3},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION1, DataParticleKey.VALUE: correlation1},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION2, DataParticleKey.VALUE: correlation2},\n {DataParticleKey.VALUE_ID: VectorVelocityHeaderDataParticleKey.CORRELATION3, DataParticleKey.VALUE: correlation3}]\n\n log.debug('VectorVelocityHeaderDataParticle: particle=%s', result)\n return result",
"def make_summary(value_dict):\n return tf.Summary(value=[tf.Summary.Value(tag=k, simple_value=v) for k, v in value_dict.items()])",
"def create_rate_figure(max_len=60):\n latency_tracker.update()\n keys = sorted(latency_tracker.time.keys())\n data = []\n now = datetime.datetime.now()\n i = 0\n rate_axis_max = 3\n for key in keys:\n time_vals = [datetime.datetime.fromtimestamp(t) for t in latency_tracker.time[key][-max_len:]]\n rate = np.array(latency_tracker.rate[key][-max_len:])\n if key == 'all':\n name = 'Total'\n style = {'width': 2, 'color': 'black'}\n else:\n name = 'Worker ' + str(i)\n style = {}\n i += 1\n data.append(go.Scatter(\n x=time_vals,\n y=rate,\n line=style,\n mode='lines',\n name=name\n ))\n if key == 'all':\n # Add ingestion rate data\n inrate = np.array(latency_tracker.ingestion_rate['all'][-max_len:])\n data.append(go.Scatter(x=time_vals, y=inrate, line={'width':3, 'color':'red', 'dash': 'dash'},\n name='Ingestion', mode='lines'))\n if rate.size and inrate.size:\n rate_axis_max = max(rate_axis_max, np.max(rate) + 0.9)\n rate_axis_max = max(rate_axis_max, np.max(inrate) + 0.9)\n return {\n 'data': data \n ,\n 'layout': {\n 'height': 210,\n 'margin': {'l': 70, 'b': 40, 'r': 10, 't': 10},\n 'yaxis': {'type': 'linear', 'range': [0,rate_axis_max], 'autorange': False, 'title': 'Rate (x1000msg/s)'},\n 'xaxis': {'range': [now-datetime.timedelta(seconds=60), now], 'autorange': False, 'title': 'Date'},\n 'legend':{'x':0.05,'y':0.95, 'borderwidth':1, 'bgcolor':'white' }\n }\n }",
"def make_curve(report, success_name, fail_names):\n success_results = report[success_name]\n fail_name = None # pacify pylint\n found = False\n for fail_name in fail_names:\n if fail_name in report:\n found = True\n break\n if not found:\n raise ValueError(fail_name + \" not in report.\"\n \"Available keys: \" + str(report.keys()))\n fail_results = report[fail_name]\n\n # \"good\" means drawn from the distribution where we measure success rate.\n # \"bad\" means drawn from the distribution where we measure failure rate.\n # From here on out we use those terms, to avoid confusion between examples\n # that actually failed and examples that were drawn from the distribution\n # where we measured failure rate.\n\n old_all_probs_version = False\n if isinstance(success_results, dict):\n # This dictionary key lookup will trigger a deprecation warning if `success_results` is not the old dictionary\n # style of report, so we don't want to do a dictionary lookup unless we really are using the old version.\n old_all_probs_version = 'all_probs' in success_results\n\n if old_all_probs_version:\n warnings.warn(\"The 'all_probs' key is included only to support \"\n \" old files from a private development codebase. \"\n \"Support for this key can be dropped at any time \"\n \" without warning.\")\n good_probs = success_results['all_probs']\n bad_probs = fail_results['all_probs']\n bad_corrects = fail_results['correctness_mask']\n good_corrects = success_results['correctness_mask']\n else:\n if isinstance(success_results, dict):\n # Still using dict, but using newer key names\n warnings.warn(\"Support for dictionary confidence reports is deprecated. Switch to using the classes in \"\n \"cleverhans.confidence_report. Support for old dictionary-style reports may be removed \"\n \"on or after 2019-07-19.\")\n good_probs = success_results['confidence']\n bad_probs = fail_results['confidence']\n good_corrects = success_results['correctness']\n bad_corrects = fail_results['correctness']\n else:\n # current version\n good_probs = success_results.confidence\n bad_probs = fail_results.confidence\n good_corrects = success_results.correctness\n bad_corrects = fail_results.correctness\n good_triplets = [(prob, correct, True) for prob, correct\n in safe_zip(good_probs, good_corrects)]\n bad_triplets = [(prob, correct, False) for prob, correct\n in safe_zip(bad_probs, bad_corrects)]\n total_good = len(good_triplets)\n total_bad = len(bad_triplets)\n if total_good != 10000:\n warnings.warn(\"Not using full test set? Found \" + str(total_good) +\n \" examples for measuring success rate\")\n if total_bad != 10000:\n warnings.warn(\"Not using full test set for adversarial examples?\")\n all_triplets = good_triplets + bad_triplets\n all_triplets = sorted(all_triplets, key=lambda x: -x[0])\n\n # Start with the case for threshold t = 1.\n # Examples are covered only if prob > t (strict inequality)\n # So initially nothing is covered\n good_covered_and_correct = 0\n bad_covered_and_incorrect = 0\n\n # Number of examples that are bad, incorrect, and covered by\n # a t >= 0.5, or that were merely covered by a t < 0.5\n failure_opportunities = 0\n\n next_idx = 0\n\n fail_optimal = []\n success_optimal = []\n fail_upper_bound = []\n fail_lower_bound = []\n success_bounded = []\n\n bounded = False\n\n # NOTE: the loop always exits via an internal break statement.\n # Copied the termination condition to the while statement for ease\n # of reading.\n while next_idx < len(all_triplets):\n gs = float(good_covered_and_correct) / total_good\n bf = float(bad_covered_and_incorrect) / total_bad\n # Add results for current threshold to the list\n if not bounded:\n\n # Sometimes when there are big jumps the failure rate it makes\n # artifacts in the plot, where there's a long linear track.\n # This implies the real success-fail curve is linear when\n # actually it just isn't sampled by the data.\n # To avoid implying that the model reaches a higher success\n # rate than it actually does, we avoid these plotting artifacts\n # by introducing extra points that make the graph move horizontally\n # to the right first, then vertically.\n if len(fail_optimal) > 0:\n prev_bf = fail_optimal[-1]\n prev_gs = success_optimal[-1]\n\n if gs > prev_gs and bf > prev_bf:\n fail_optimal.append(bf)\n success_optimal.append(prev_gs)\n\n success_optimal.append(gs)\n fail_optimal.append(bf)\n else:\n success_bounded.append(gs)\n fail_lower_bound.append(bf)\n fail_upper_bound.append(float(failure_opportunities) / total_bad)\n\n if next_idx == len(all_triplets):\n break\n\n # next_prob_to_include is not quite the same thing as the threshold.\n # The threshold is infinitesimally smaller than this value.\n next_prob_to_include = all_triplets[next_idx][0]\n\n # Process all ties\n while next_prob_to_include == all_triplets[next_idx][0]:\n _prob, correct, is_good = all_triplets[next_idx]\n if is_good:\n good_covered_and_correct += correct\n else:\n if next_prob_to_include <= .5:\n failure_opportunities += 1\n else:\n failure_opportunities += 1 - correct\n bad_covered_and_incorrect += 1 - correct\n next_idx += 1\n if next_idx == len(all_triplets):\n break\n\n if next_prob_to_include <= .5:\n bounded = True\n\n out = (fail_optimal, success_optimal, fail_lower_bound, fail_upper_bound,\n success_bounded)\n return out",
"def get_roc_curve_single_subject(raw_path, results_path):\n if type(results_path) == str:\n res_df = pd.read_csv(results_path, index_col=0)\n else:\n res_df = results_path\n\n mean_tpr = 0.0\n mean_fpr = np.linspace(0, 1, 100)\n\n fprs, tprs, roc_aucs = [], [], []\n\n best_fpr, best_tpr, best_roc_auc = None, None, 0.0\n\n for test_type in range(1, 6):\n # choose best test score out of top 20 best validation scores\n best_res = res_df[res_df.test_type == '[' + str(test_type) + ']'].sort_values(['mean_test_score'], ascending=False).head(1)\n # best_res = best_res.sort_values(['best_estimator_test_score'], ascending=False).head(1)\n\n data_df = extract_features(\n raw_path,\n best_res['au_method'].values.tolist()[0],\n int(best_res['au_top'].values.tolist()[0]),\n best_res['fe_method'].values.tolist()[0],\n int(best_res['fe_top'].values.tolist()[0]),\n best_res['pca_method'].values.tolist()[0],\n int(best_res['pca_dim'].values.tolist()[0]),\n best_res['learning_method'].values.tolist()[0],\n '.garbage'\n )\n\n data = data_df.iloc[:, len(META_COLUMNS):].values\n target = (data_df[TARGET_COLUMN] == RecordFlags.RECORD_FLAG_ANSWER_TRUE).values\n\n train_idx, test_idx = (data_df[data_df.question_type != test_type].index, data_df[data_df.question_type == test_type].index)\n\n best_estimator = svm.SVC(C=best_res['param_C'].values.tolist()[0], probability=True, kernel='linear')\n\n class_weight = dict(Counter(target))\n class_weight_sum = max(list(class_weight.values()))\n\n for x in class_weight.keys():\n class_weight[x] = 1. * class_weight[x] / class_weight_sum\n\n best_estimator.set_params(class_weight=class_weight)\n\n best_estimator.fit(data[train_idx, :], target[train_idx])\n\n y_test = target[test_idx]\n probas = best_estimator.predict_proba(data[test_idx, :])\n\n # Compute ROC curve and ROC area for each class\n fpr, tpr, thresholds = roc_curve(y_test, probas[:, 1])\n roc_auc = auc(fpr, tpr)\n\n mean_tpr += np.interp(mean_fpr, fpr, tpr)\n mean_tpr[0] = 0.0\n\n fprs.append(fpr)\n tprs.append(tpr)\n roc_aucs.append(roc_auc)\n\n if roc_auc > best_roc_auc:\n best_fpr = fpr\n best_tpr = tpr\n best_roc_auc = roc_auc\n\n mean_tpr /= 5\n mean_tpr[-1] = 1.0\n mean_roc_auc = auc(mean_fpr, mean_tpr)\n\n return fprs, tprs, roc_aucs, mean_fpr, mean_tpr, mean_roc_auc, best_fpr, best_tpr, best_roc_auc",
"def _read_data_msg(self):\n\t\tself.values.clear()\n\t\tdata_pattern = re.compile('(\\w+)-(\\w+):(\\w+).(\\w+).(\\w+)*(\\w+)?')\n\t\tvalue_pattern = re.compile('\\((.*?)\\)')\n\t\tend = b'\\x03'\n\t\twhile True:\n\t\t\tbinary_line = self.ser.readline()\n\t\t\t_logger.debug(binascii.hexlify(binary_line))\n\t\t\tline = binary_line.decode('ascii')\n\t\t\t_logger.debug(line)\n\n\t\t\tosis_data = data_pattern.match(line)\n\t\t\tvalue_data = value_pattern.search(line)\n\t\t\tif not value_data:\n\t\t\t\t_logger.warning('No value match')\n\t\t\t\tcontinue\n\t\t\tvalue_groups = value_data.groups()\n\t\t\tif not value_groups:\n\t\t\t\t_logger.warning('No value groups')\n\t\t\t\tcontinue\n\t\t\tv = value_groups[0].split('*')\n\t\t\t_value = v[0]\n\t\t\t_unit = v[1] if len(v) == 2 else None\n\n\t\t\tif not osis_data:\n\t\t\t\t_logger.warning('No osis data')\n\t\t\t\tcontinue\n\t\t\tvalue = Value(\n\t\t\t\tmedium=osis_data.group(1),\n\t\t\t\tchannel=osis_data.group(2),\n\t\t\t\tmeasure=osis_data.group(3),\n\t\t\t\tmode=osis_data.group(4),\n\t\t\t\trate=osis_data.group(5),\n\t\t\t\tprevious=osis_data.group(6),\n\t\t\t\tvalue=_value,\n\t\t\t\tunit=_unit\n\t\t\t)\n\t\t\tself.values.append(value)\n\t\t\t_logger.debug(str(value))\n\t\t\tif end in binary_line:\n\t\t\t\t_logger.info('end of data message detected')\n\t\t\t\t# bcc = line[1]\n\t\t\t\tbreak",
"def cpr_curve_creator(description='.2 ramp 6 for 30, 6'):\n\n periods = str(description).split(',')\n nperiods = 360\n end_period = False\n\n cpr_curve = []\n\n current_period = 1\n\n for period in periods:\n start_cpr = 0\n end_cpr = 0\n period_duration = 0\n cpr_increment = 0\n period_curve = None\n\n if period == periods[-1]:\n end_period = True\n\n period_duration = nperiods + current_period\n words = period.strip().split(' ')\n\n for i in range(len(words)):\n if i == 0:\n start_cpr = float(words[i]) / 100.\n end_cpr = float(words[i]) / 100.\n elif words[i] == 'ramp':\n end_cpr = float(words[i + 1]) / 100.\n elif words[i] == 'for':\n period_duration = float(words[i + 1])\n\n period_curve = np.linspace(start_cpr, end_cpr, period_duration)\n\n cpr_curve.extend(list(period_curve))\n current_period += period_duration\n\n return cpr_curve",
"def _generate_curve(losses, probs_of_exceedance):\n\n mean_losses = collect(loop(losses, lambda x, y: mean([x, y])))\n return shapes.Curve(zip(mean_losses, probs_of_exceedance))",
"def createData(self):\n print(\"Creating Data...\")\n #--- Draw CMB_T Random Field S --->\n self.s = self.config.C_T.get_random_field(domain=self.config.domain)\n# print(self.s)\n self.s_power = self.s.power()\n #--- Draw Noise --->\n self.n = self.config.N.get_random_field(domain=self.config.domain)\n #--- Create Data: Signal to Lensing Response --->\n self.Rs = self.R(self.s)\n d = self.Rs + self.n\n return d",
"def _parse_raw_data(self, raw_data: bytes) -> dict:\n if len(self.presets) != len(raw_data) // 4:\n raise ValueError(f\"Incorrect raw_data length\\nExpected: {len(self.presets)}, Got: {len(raw_data) // 4}\")\n\n registers = struct.unpack(f\"<{len(raw_data) // 4}L\", raw_data)\n customs = {name: format_value(registers[i], 32) for i, name in enumerate(self.presets)}\n return customs",
"def _make_feature_stats_proto(\n stats_values,\n feature_name):\n\n result = statistics_pb2.FeatureNameStatistics()\n result.name = feature_name\n\n # Sort alphabetically by statistic name to have deterministic ordering\n stat_names = sorted(stats_values.keys())\n for stat_name in stat_names:\n result.custom_stats.add(name=stat_name, num=stats_values[stat_name])\n return result",
"def summary(self):\n\n init_str = (\n f\"pypcurve v. {self.__version__} is based on Uri Simonsohn's \"\n f\"P-Curve's app v. {self.__pcurve_app_version__}.\\n\"\n )\n print(init_str)\n self.plot_pcurve(dpi=100)\n plt.show()\n summary_str = (\"------------- Summary of p-curve tests -------------\\n\\n\"\n + self.pcurve_analysis_summary().to_string())\n print(summary_str)\n return None",
"def calc_roc(test_data, probs_data):\n return roc_curve(test_data, probs_data)",
"def format_data(desc, data):\n first, last = desc[FIRST_VALID_PNT],desc[LAST_VALID_PNT]\n offset, gain = desc[VERTICAL_OFFSET],desc[VERTICAL_GAIN]\n data = data[first:last] * gain - offset\n xoff, xint = desc[HORIZ_OFFSET],desc[HORIZ_INTERVAL]\n x = np.arange(xoff,xoff + len(data)*xint, xint)\n return x, data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get apache beam pipeline options to run with Dataflow on the cloud
|
def get_cloud_pipeline_options():
options = {
'runner': 'DataflowRunner',
'job_name': ('relation-extraction-{}'.format(
datetime.now().strftime('%Y%m%d%H%M%S'))),
'staging_location': "gs://relation_extraction/beam/binaries/",
'temp_location': "gs://relation_extraction/beam/tmp/",
'project': "iotpubsub-1536350750202",
'region': 'europe-west1',
'zone': 'europe-west1-b',
'autoscaling_algorithm': 'THROUGHPUT_BASED',
'save_main_session': True,
'setup_file': './setup.py',
}
return beam.pipeline.PipelineOptions(flags=[], **options)
|
[
"def pipeline_options_local(argv):\n\n from google.cloud.dataflow import Pipeline\n from google.cloud.dataflow.utils.options import PipelineOptions\n\n options = PipelineOptions(flags=argv)\n\n # [START pipeline_options_define_custom_with_help_and_default]\n class MyOptions(PipelineOptions):\n\n @classmethod\n def _add_argparse_args(cls, parser):\n parser.add_argument('--input',\n help='Input for the dataflow pipeline',\n default='gs://my-bucket/input')\n parser.add_argument('--output',\n help='Output for the dataflow pipeline',\n default='gs://my-bucket/output')\n # [END pipeline_options_define_custom_with_help_and_default]\n\n my_options = options.view_as(MyOptions)\n\n my_input = my_options.input\n my_output = my_options.output\n\n # [START pipeline_options_local]\n # Create and set your Pipeline Options.\n options = PipelineOptions()\n p = Pipeline(options=options)\n # [END pipeline_options_local]\n\n lines = p | df.io.Read('ReadFromText', df.io.TextFileSource(my_input))\n lines | df.io.Write('WriteToText', df.io.TextFileSink(my_output))\n p.run()",
"def create_beam_pipeline():\n # Define Beam pipeline options.\n options = {\n 'runner': FLAGS.runner\n }\n # Define Dataflow-specific options.\n if 'dataflow' in FLAGS.runner.lower():\n temp_location = os.path.join(FLAGS.output_dir.rstrip('/'), 'tmp')\n options.update({\n 'project': FLAGS.project_id,\n 'job_name': 'astronet-preprocess-{}'.format(\n datetime.datetime.now().strftime('%Y%m%d%H%M%S')),\n 'temp_location': temp_location,\n 'max_num_workers': 5,\n 'region': 'us-east1',\n 'setup_file':\n os.path.abspath(\n os.path.join(os.path.dirname(__file__), '../../', 'setup.py'))\n })\n pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options)\n\n pipeline = beam.Pipeline(options=pipeline_options)\n\n return pipeline",
"def cloud_build_options(self) -> Optional[pulumi.Input['FlexibleAppVersionDeploymentCloudBuildOptionsArgs']]:\n return pulumi.get(self, \"cloud_build_options\")",
"def bigquery_options(self) -> Sequence['outputs.GetSinkBigqueryOptionResult']:\n return pulumi.get(self, \"bigquery_options\")",
"def gcloud_config():\n return {\n \"account\": \"test@hail.is\",\n \"project\": \"hailctl-dataproc-tests\",\n \"dataproc/region\": \"us-central1\",\n \"compute/zone\": \"us-central1-b\",\n }",
"def get_pip_cli_options(self):\n options = [\n \"--python-version\",\n self.python_version,\n \"--implementation\",\n self.implementation,\n ]\n for abi in self.abis:\n options.extend([\"--abi\", abi])\n\n for platform in self.platforms:\n options.extend([\"--platform\", platform])\n\n return options",
"def custom_arg_options(self) -> Dict[str, Any]:\n return self.field.metadata.get(\"custom_args\", {})",
"def run(argv=None):\n # type: (List[str]) -> None\n logging.info('Command: %s', ' '.join(argv or sys.argv))\n known_args, pipeline_args = vcf_to_bq_common.parse_args(argv,\n _COMMAND_LINE_OPTIONS)\n # Note VepRunner creates new input files, so it should be run before any\n # other access to known_args.input_pattern.\n if known_args.run_annotation_pipeline:\n runner = vep_runner.create_runner_and_update_args(known_args, pipeline_args)\n runner.run_on_all_files()\n runner.wait_until_done()\n logging.info('Using VEP processed files: %s', known_args.input_pattern)\n\n variant_merger = _get_variant_merge_strategy(known_args)\n pipeline_mode = vcf_to_bq_common.get_pipeline_mode(\n known_args.input_pattern, known_args.optimize_for_large_inputs)\n\n # Starts a pipeline to merge VCF headers in beam if the total files that\n # match the input pattern exceeds _SMALL_DATA_THRESHOLD\n _merge_headers(known_args, pipeline_args, pipeline_mode)\n\n # Retrieve merged headers prior to launching the pipeline. This is needed\n # since the BigQuery schema cannot yet be dynamically created based on input.\n # See https://issues.apache.org/jira/browse/BEAM-2801.\n header_fields = vcf_header_parser.get_vcf_headers(\n known_args.representative_header_file)\n counter_factory = metrics_util.CounterFactory()\n processed_variant_factory = processed_variant.ProcessedVariantFactory(\n header_fields,\n known_args.split_alternate_allele_info_fields,\n known_args.annotation_fields,\n known_args.use_allele_num,\n known_args.minimal_vep_alt_matching,\n counter_factory)\n\n partitioner = None\n if ((known_args.optimize_for_large_inputs and variant_merger) or\n known_args.partition_config_path):\n partitioner = variant_partition.VariantPartition(\n known_args.partition_config_path)\n\n beam_pipeline_options = pipeline_options.PipelineOptions(pipeline_args)\n pipeline = beam.Pipeline(options=beam_pipeline_options)\n variants = _read_variants(pipeline, known_args)\n variants |= 'FilterVariants' >> filter_variants.FilterVariants(\n reference_names=known_args.reference_names)\n if partitioner:\n num_partitions = partitioner.get_num_partitions()\n partitioned_variants = variants | 'PartitionVariants' >> beam.Partition(\n partition_variants.PartitionVariants(partitioner), num_partitions)\n variants = []\n for i in range(num_partitions):\n if partitioner.should_keep_partition(i):\n variants.append(partitioned_variants[i])\n else:\n num_partitions -= 1\n else:\n # By default we don't partition the data, so we have only 1 partition.\n num_partitions = 1\n variants = [variants]\n\n for i in range(num_partitions):\n if variant_merger:\n variants[i] |= ('MergeVariants' + str(i) >>\n merge_variants.MergeVariants(variant_merger))\n variants[i] |= (\n 'ProcessVariants' + str(i) >>\n beam.Map(processed_variant_factory.create_processed_variant).\\\n with_output_types(processed_variant.ProcessedVariant))\n if partitioner and partitioner.should_flatten():\n variants = [variants | 'FlattenPartitions' >> beam.Flatten()]\n num_partitions = 1\n\n for i in range(num_partitions):\n table_suffix = ''\n if partitioner and partitioner.get_partition_name(i):\n table_suffix = '_' + partitioner.get_partition_name(i)\n table_name = known_args.output_table + table_suffix\n _ = (variants[i] | 'VariantToBigQuery' + table_suffix >>\n variant_to_bigquery.VariantToBigQuery(\n table_name,\n header_fields,\n variant_merger,\n processed_variant_factory,\n append=known_args.append,\n update_schema_on_append=known_args.update_schema_on_append,\n allow_incompatible_records=known_args.allow_incompatible_records,\n omit_empty_sample_calls=known_args.omit_empty_sample_calls,\n num_bigquery_write_shards=known_args.num_bigquery_write_shards,\n null_numeric_value_replacement=(\n known_args.null_numeric_value_replacement)))\n\n result = pipeline.run()\n result.wait_until_finish()\n\n metrics_util.log_all_counters(result)",
"def experimental_options(self):\n ...",
"def get_pipeline():",
"def createOptionParameter(self):\n p = package.EnumParameter(\"dataFlow\", \"Data flow\")\n p.isInit = True\n for opt in self.m.options:\n desc = package.EnumDescription(opt.ident.constant(), str(opt.name))\n desc.name = opt.name\n p.descriptions.append(desc)\n return p",
"def setup(parser):\n\n parser.add_argument(\n '--sink_write_disposition',\n help='How to merge the output of this process with whatever records are already there in the sink tables. Might be WRITE_TRUNCATE to remove all existing data and write the new data, or WRITE_APPEND to add the new date without. Defaults to WRITE_APPEND.',\n default='WRITE_APPEND',\n )\n parser.add_argument(\n '--wait',\n help='When present, waits until the dataflow job is done before returning.',\n action='store_true',\n default=False,\n )\n\n required = parser.add_argument_group('remote required arguments')\n required.add_argument(\n '--sink',\n help='BigQuery table names to which the processed data is uploaded.',\n required=True,\n )",
"def test_terraform_cloud_from_options(self):\n collector = Collector(\n project_name=\"project_name\",\n terraform_backend=\"terraform-cloud\",\n terraform_cloud_hostname=\"app.terraform.io\",\n terraform_cloud_token=\"mytfcT0k3N\",\n terraform_cloud_organization=\"myTFCOrg\",\n terraform_cloud_organization_create=True,\n terraform_cloud_admin_email=\"admin@test.com\",\n )\n self.assertEqual(collector.terraform_cloud_hostname, \"app.terraform.io\")\n self.assertEqual(collector.terraform_cloud_token, \"mytfcT0k3N\")\n self.assertEqual(collector.terraform_cloud_organization, \"myTFCOrg\")\n self.assertTrue(collector.terraform_cloud_organization_create)\n self.assertEqual(collector.terraform_cloud_admin_email, \"admin@test.com\")\n with mock.patch(\"bootstrap.collector.click\") as mocked_click:\n collector.set_terraform_cloud()\n self.assertEqual(collector.terraform_cloud_hostname, \"app.terraform.io\")\n self.assertEqual(collector.terraform_cloud_token, \"mytfcT0k3N\")\n self.assertEqual(collector.terraform_cloud_organization, \"myTFCOrg\")\n self.assertTrue(collector.terraform_cloud_organization_create)\n self.assertEqual(collector.terraform_cloud_admin_email, \"admin@test.com\")\n mocked_click.prompt.assert_not_called()",
"def launcher_argv(self, is_geopmctl):\n result = []\n result.extend(self.num_node_option())\n result.extend(self.exclude_list_option())\n result.extend(self.num_rank_option(is_geopmctl))\n if self.config and self.config.do_affinity:\n result.extend(self.affinity_option(is_geopmctl))\n result.extend(self.preload_option())\n result.extend(self.timeout_option())\n result.extend(self.time_limit_option())\n result.extend(self.job_name_option())\n result.extend(self.node_list_option())\n result.extend(self.host_file_option())\n result.extend(self.partition_option())\n result.extend(self.reservation_option())\n result.extend(self.performance_governor_option())\n return result",
"def get_pipelines_providers():\n gqlapi = gql.get_api()\n pipelines_providers = gqlapi.query(PIPELINES_PROVIDERS_QUERY)[\"pipelines_providers\"]\n\n for pp in pipelines_providers:\n defaults = pp.pop(\"defaults\")\n for k, v in defaults.items():\n if k not in pp or not pp[k]:\n pp[k] = v\n\n return pipelines_providers",
"def dataflow_executable(self):\n pass",
"def GetCommandLineOptions(self):\n return self.args_",
"def get_pipeline_base_data(self):\n\n #yaml_path\n yaml_path = os.path.join(data_path, 'pipeline_base_data.yaml')\n \n #sandbox\n if(self.command_line_args_dict.get('sandbox', None)):\n #yaml_path\n yaml_path = os.path.join(data_path, 'pipeline_base_data_sandbox.yaml')\n \n #command line option (-cyp, --custom_yaml_path)\n if(self.command_line_args_dict.get('custom_yaml_path', None)):\n #check if path exists\n if(os.path.isfile(self.command_line_args_dict.get('custom_yaml_path', None))):\n #yaml_path\n yaml_path = self.command_line_args_dict.get('custom_yaml_path', None)\n\n \n \n \n #pipeline_base_data_dict\n pipeline_base_data_dict = self.load_yaml(yaml_path)\n\n \n return pipeline_base_data_dict",
"def test_gitlab_options(self):\n collector = Collector(\n project_name=\"project_name\",\n gitlab_url=\"https://gitlab.custom-domain.com\",\n gitlab_token=\"input-G1tl4b_Tok3n!\",\n gitlab_namespace_path=\"inputnamespacepath\",\n )\n self.assertEqual(collector.gitlab_url, \"https://gitlab.custom-domain.com\")\n self.assertEqual(collector.gitlab_token, \"input-G1tl4b_Tok3n!\")\n self.assertEqual(collector.gitlab_namespace_path, \"inputnamespacepath\")\n collector.set_gitlab()\n self.assertEqual(collector.gitlab_url, \"https://gitlab.custom-domain.com\")\n self.assertEqual(collector.gitlab_token, \"input-G1tl4b_Tok3n!\")\n self.assertEqual(collector.gitlab_namespace_path, \"inputnamespacepath\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Processes a file track, extracting it's features
|
def process_track(filename):
track = Track.from_gpx(filename)[0]
track.compute_metrics()
for segment in track.segments:
features = extract_features_2(segment.points)
return features
|
[
"def get_features(track_id: str, sp: ...) -> ...: # TODO ***************\n features = sp.audio_features('spotify:track:' + track_id)\n return([features[0]['acousticness'], features[0]['danceability'], features[0]['energy'],\n features[0]['duration_ms'], features[0]['instrumentalness'], features[\n 0]['valence'], features[0]['tempo'], features[0]['liveness'],\n features[0]['loudness'], features[0]['speechiness'], features[0]['key']])",
"def track_infos(cue_fname: str) -> Iterable[Tuple[str, str]]:\n for line in run('cueprint', '--track-template', '%n %t\\n', cue_fname):\n track_num, title = line.split(' ', maxsplit=1)\n yield track_num, title",
"def _parse_track(self, obj: Dict, add_features: bool = True) -> SongInformation:\n\n def _fetch_features(track_id: str) -> Dict[str, float]:\n feature_dict = self.api.audio_features([track_id])[0]\n return {k: v for k, v in feature_dict.items() if k in FEATURES}\n\n track_id: str = obj['id']\n\n name: str = obj.get('name', None)\n album: str = obj.get('album', {}).get('name', None)\n\n _release_date: str = obj.get('album', {}).get('release_date', None)\n year: int = int(_release_date.split(\"-\")[0]) if _release_date is not None else None\n\n _track_number: str = obj.get('track_number', None)\n track_number: int = int(_track_number) if _track_number is not None else None\n\n total_tracks: int = obj.get('album', {}).get('total_tracks', 0)\n\n _artists: Tuple[str, ...] = tuple(art['name'] for art in obj.get('artists', {}))\n artists: Tuple[str, ...] = _artists if len(_artists) > 0 else None\n\n links: Dict[str, str] = obj.get('external_urls', None)\n\n image_url: str = obj.get('album', {}).get('images', [{}])[0].get('url', None)\n image: Optional[Union[PNGSongImage, JPEGSongImage]] = self._fetch_image(image_url) \\\n if image_url is not None else None\n\n _additional_information = _fetch_features(track_id) if add_features else {}\n additional_information = \"\\n\".join(f\"{k} {v}\" for k, v in _additional_information.items())\n\n return SongInformation(name, album, (track_number, total_tracks), artists, image, year, links=links,\n additional_information=additional_information)",
"def fetch_features():\n sp = get_client()\n\n raw_data = sys.stdin.read()\n tracks = jsonpickle.decode(raw_data)\n\n # get track features\n from span.tasks.features import get_audio_features\n\n features = get_audio_features(sp, tracks)\n\n # export data\n sys.stdout.write(jsonpickle.encode(features))",
"def get_all_features(track_list = list, artist_list = list, sp=None):\n\n track_features = []\n artist_features = []\n\n track_iters = int(len(track_list)/50)\n track_remainders = len(track_list)%50\n\n start = 0\n end = start+50\n\n for i in range(track_iters):\n track_features.extend(sp.audio_features(track_list[start:end]))\n artist_features.extend(sp.artists(artist_list[start:end]).get('artists'))\n start += 50\n end = start+50\n\n\n if track_remainders:\n end = start + track_remainders\n track_features.extend(sp.audio_features(track_list[start:end]))\n artist_features.extend(sp.artists(artist_list[start:end]).get('artists'))\n\n\n return track_features, artist_features",
"def build_tracks(track_1, track_2, track_3, track_4, file_name):\r\n note_file = open(file_name, 'r')\r\n note_file.readline() # Buffer to eliminate bpm data\r\n\r\n line1 = note_file.readline().strip()\r\n read_note_line(track_1, line1)\r\n line2 = note_file.readline().strip()\r\n read_note_line(track_2, line2)\r\n line3 = note_file.readline().strip()\r\n read_note_line(track_3, line3)\r\n line4 = note_file.readline().strip()\r\n read_note_line(track_4, line4)\r\n\r\n note_file.close()",
"def get_track_features(track_id, sp):\n\n feature_filter = ['danceability', 'energy', 'instrumentalness', 'loudness', 'speechiness', 'tempo', 'valence']\n return_features = []\n\n # Get features from this track.\n features = sp.audio_features([track_id])\n\n if None in features:\n return []\n\n # Add desired features of track.\n for feature in features[0]:\n if feature in feature_filter:\n return_features.append(features[0][feature])\n\n return return_features",
"def extractTrackTrack(track, pid, pname):\n # Return\n piste = []\n # Function\n if track['track']:\n piste.append(\n (pid,\n pname,\n track['track']['id'])\n )\n else:\n pass\n # Retrun\n return piste",
"def extract_features(video):\r\n\r\n # Extract video features\r\n input_file = \"Extracted_Features/\" + video[:len(video) - 4] + \"_Features/\" + video[:len(video) - 4] + \".csv\"\r\n\r\n file = open(input_file)\r\n reader = csv.DictReader(file)\r\n video_feat = {}\r\n\r\n for row in reader:\r\n # Taking only good frames where faces have been detected with a confidence higher than 0.8 (Openface standard)\r\n if int(row[' success']) == 1 and float(row[' confidence']) > 0.8:\r\n face_id = int(row[' face_id'])\r\n frame = int(row['frame']) - 1\r\n\r\n video_feat.setdefault(frame, {})\r\n face_features = []\r\n\r\n # mouth action units\r\n au = [\"10\", \"12\", \"14\", \"15\", \"17\", \"20\", \"23\", \"25\", \"26\"]\r\n for i in au:\r\n face_features.append(float(row[' AU' + i + '_r']))\r\n for i in au:\r\n face_features.append(float(row[' AU' + i + '_c']))\r\n\r\n # LandMarks\r\n for i in range(0, 68):\r\n face_features.append(float(row[' x_' + str(i)]))\r\n\r\n for i in range(0, 68):\r\n face_features.append(float(row[' y_' + str(i)]))\r\n\r\n video_feat[frame][face_id] = face_features\r\n\r\n # Extract audio features\r\n output_audio = video[:len(video) - 4] + \".wav\"\r\n\r\n os.system(\"ffmpeg -i dataset/\" + video + \" -loglevel panic -ac 1 -vn -y\\\r\n Extracted_Features/\" + video[:len(video) - 4] + \"_Features/audio.wav\")\r\n\r\n input = \"Extracted_Features/\" + video[:len(video) - 4] + \"_Features/audio.wav\"\r\n\r\n (rate, sig) = wav.read(input)\r\n\r\n audio_feat = mfcc(sig, rate, winstep=1 / fps, numcep=12)\r\n\r\n return video_feat, audio_feat",
"def parse_file(self):\n \n # define tokens which delimit specific parts of the collector_probe file\n TK_SUMMARY='COLLECTOR PROBE SUMMARY INFORMATION:'\n TK_LOCATION='LOCATION:'\n TK_DIAMETER_DPERP='DIAMETER,DPERP:'\n TK_ABSFAC='ABSFAC:'\n TK_HEADER='INDEX'\n \n try:\n fptr = open(self.filename,'r')\n except:\n print 'collector_probe.parse file: failed to open file: {0}'.format(self.filename)\n return\n \n # count occurences of tk_summary in order to count number of separate probes in file\n wholefile=fptr.read()\n result=re.findall(TK_SUMMARY,wholefile)\n self.num_probes=len(result)\n print 'number of probes found in file:',self.num_probes\n \n # go back to start of file, and loop over fixed structure for each probe\n fptr.seek(0)\n probes=[]\n curline=fptr.readline()\n try:\n while(len(curline)>0):\n if self.find_token(curline,TK_SUMMARY) >= 0:\n # found a new probe, so initialize a new data dict\n data = OrderedDict()\n if self.find_token(curline,TK_LOCATION) >= 0:\n locline=curline.split()\n data['R1P']=float(locline[1])\n data['Z1P']=float(locline[2])\n data['R2P']=float(locline[3])\n data['Z2P']=float(locline[4])\n elif self.find_token(curline,TK_DIAMETER_DPERP) >= 0:\n dialine=curline.split()\n data['DIAMETER']=float(dialine[1])\n data['DPERP']=float(dialine[2])\n elif self.find_token(curline,TK_ABSFAC) >= 0:\n absline=curline.split()\n data['ABSFAC']=float(absline[1])\n data['ABSFAC_NEUT']=float(absline[2])\n elif self.find_token(curline,TK_HEADER) >= 0:\n # found the main data block, which is of unknown length\n header=curline.split()\n numheader=len(header)\n for value in header: # initialize dictionary entries\n data[value]=[]\n idxline=header\n while(len(idxline)==numheader):\n idxline=fptr.readline().split()\n if len(idxline)>0: \n for j,value in enumerate(header,0):\n data[value].append(float(idxline[j]))\n else: # reached end of data block for this probe\n for value in header: # convert all arrays to ndarrays\n data[value]=np.array(data[value])\n probes.append(data) # save data dict for this probe\n break\n curline=fptr.readline()\n except Exception, Ex:\n print 'collector_probe.parse file: failed to read data, exception: {0}'.format(Ex)\n return\n fptr.close()\n \n # sanity check\n if len(probes)!=self.num_probes:\n print 'collector_probe.parse file: something went wrong in loading multiple probes'\n \n self.probes=probes\n self.absfac_applied=False\n \n # apply absfac to convert divimp units to physical fluxes/densities\n self.apply_absfac()\n \n return probes",
"def read_tracks(self):\n # Each track is a bs4 Tag object\n track_soup = self.find_track_holder()\n data_attrs = [\"startTime\", \"duration\", \"name\"]\n for track in track_soup.children:\n # Initialize data storage\n data_keys = []\n data_vals = []\n if track.name is None:\n continue\n # For each of the child elements in the track,\n for child in track.children:\n # If the name isn't None (emptystr) and the name starts with\n # \"xmpdm:\", the prefix on all of the data tags,\n if child.name is not None and child.name in data_attrs:\n # Append the name (minus the prefix) to the keys list\n data_keys.append(child.name.lower())\n # Append the value to the values list\n data_vals.append(\n self.number_normalizer(child.contents[0])\n )\n # if child.name == \"xmpdm:name\":\n # print(\"Reading %s...\" % child.contents[0])\n # This looks like\n # {\n # 'name':'Wolfgun - Road to Jupiter',\n # 'starttime':10300,\n # 'duration':347000\n # }\n data = dict(zip(data_keys, data_vals))\n self.tracklist.append(data)",
"def dirWavFeatureExtraction(dirName, mtWin, mtStep, stWin, stStep, computeBEAT=False):\n\n allMtFeatures = numpy.array([])\n processingTimes = []\n\n types = ('*.wav', '*.aif', '*.aiff', '*.mp3','*.au')\n wavFilesList = []\n for files in types:\n wavFilesList.extend(glob.glob(os.path.join(dirName, files)))\n\n wavFilesList = sorted(wavFilesList) \n wavFilesList2 = []\n for i, wavFile in enumerate(wavFilesList): \n print \"Analyzing file {0:d} of {1:d}: {2:s}\".format(i+1, len(wavFilesList), wavFile.encode('utf-8'))\n if os.stat(wavFile).st_size == 0:\n print \" (EMPTY FILE -- SKIPPING)\"\n continue \n [Fs, x] = audioBasicIO.readAudioFile(wavFile) # read file \n if isinstance(x, int):\n continue \n\n t1 = time.clock() \n x = audioBasicIO.stereo2mono(x) # convert stereo to mono \n if x.shape[0]<float(Fs)/10:\n print \" (AUDIO FILE TOO SMALL - SKIPPING)\"\n continue\n wavFilesList2.append(wavFile)\n if computeBEAT: # mid-term feature extraction for current file\n [MidTermFeatures, stFeatures] = mtFeatureExtraction(x, Fs, round(mtWin * Fs), round(mtStep * Fs), round(Fs * stWin), round(Fs * stStep))\n [beat, beatConf] = beatExtraction(stFeatures, stStep)\n else:\n [MidTermFeatures, _] = mtFeatureExtraction(x, Fs, round(mtWin * Fs), round(mtStep * Fs), round(Fs * stWin), round(Fs * stStep))\n\n MidTermFeatures = numpy.transpose(MidTermFeatures)\n MidTermFeatures = MidTermFeatures.mean(axis=0) # long term averaging of mid-term statistics\n if (not numpy.isnan(MidTermFeatures).any()) and (not numpy.isinf(MidTermFeatures).any()): \n if computeBEAT:\n MidTermFeatures = numpy.append(MidTermFeatures, beat)\n MidTermFeatures = numpy.append(MidTermFeatures, beatConf)\n if len(allMtFeatures) == 0: # append feature vector\n allMtFeatures = MidTermFeatures\n else:\n allMtFeatures = numpy.vstack((allMtFeatures, MidTermFeatures))\n t2 = time.clock()\n duration = float(len(x)) / Fs\n processingTimes.append((t2 - t1) / duration)\n if len(processingTimes) > 0:\n print \"Feature extraction complexity ratio: {0:.1f} x realtime\".format((1.0 / numpy.mean(numpy.array(processingTimes))))\n return (allMtFeatures, wavFilesList2)",
"def parse_feature(basedir: str, filename: str, encoding: str = \"utf-8\") -> Feature:\n __tracebackhide__ = True\n abs_filename = os.path.abspath(os.path.join(basedir, filename))\n rel_filename = os.path.join(os.path.basename(basedir), filename)\n feature = Feature(\n scenarios=OrderedDict(),\n filename=abs_filename,\n rel_filename=rel_filename,\n line_number=1,\n name=None,\n tags=set(),\n background=None,\n description=\"\",\n )\n scenario: ScenarioTemplate | None = None\n mode: str | None = None\n prev_mode = None\n description: list[str] = []\n step = None\n multiline_step = False\n prev_line = None\n\n with open(abs_filename, encoding=encoding) as f:\n content = f.read()\n\n for line_number, line in enumerate(content.splitlines(), start=1):\n unindented_line = line.lstrip()\n line_indent = len(line) - len(unindented_line)\n if step and (step.indent < line_indent or ((not unindented_line) and multiline_step)):\n multiline_step = True\n # multiline step, so just add line and continue\n step.add_line(line)\n continue\n else:\n step = None\n multiline_step = False\n stripped_line = line.strip()\n clean_line = strip_comments(line)\n if not clean_line and (not prev_mode or prev_mode not in TYPES_WITH_DESCRIPTIONS):\n # Blank lines are included in feature and scenario descriptions\n continue\n mode = get_step_type(clean_line) or mode\n\n allowed_prev_mode = (types.BACKGROUND, types.GIVEN, types.WHEN)\n\n if not scenario and prev_mode not in allowed_prev_mode and mode in types.STEP_TYPES:\n raise exceptions.FeatureError(\n \"Step definition outside of a Scenario or a Background\", line_number, clean_line, filename\n )\n\n if mode == types.FEATURE:\n if prev_mode is None or prev_mode == types.TAG:\n _, feature.name = parse_line(clean_line)\n feature.line_number = line_number\n feature.tags = get_tags(prev_line)\n elif prev_mode == types.FEATURE:\n # Do not include comments in descriptions\n if not stripped_line.startswith(\"#\"):\n description.append(clean_line)\n else:\n raise exceptions.FeatureError(\n \"Multiple features are not allowed in a single feature file\",\n line_number,\n clean_line,\n filename,\n )\n\n prev_mode = mode\n\n # Remove Feature, Given, When, Then, And\n keyword, parsed_line = parse_line(clean_line)\n\n if mode in [types.SCENARIO, types.SCENARIO_OUTLINE]:\n # Lines between the scenario declaration\n # and the scenario's first step line\n # are considered part of the scenario description.\n if scenario and not keyword:\n # Do not include comments in descriptions\n if not stripped_line.startswith(\"#\"):\n scenario.add_description_line(clean_line)\n continue\n tags = get_tags(prev_line)\n scenario = ScenarioTemplate(\n feature=feature,\n name=parsed_line,\n line_number=line_number,\n tags=tags,\n templated=mode == types.SCENARIO_OUTLINE,\n )\n feature.scenarios[parsed_line] = scenario\n elif mode == types.BACKGROUND:\n feature.background = Background(feature=feature, line_number=line_number)\n elif mode == types.EXAMPLES:\n mode = types.EXAMPLES_HEADERS\n scenario.examples.line_number = line_number\n elif mode == types.EXAMPLES_HEADERS:\n scenario.examples.set_param_names([l for l in split_line(parsed_line) if l])\n mode = types.EXAMPLE_LINE\n elif mode == types.EXAMPLE_LINE:\n scenario.examples.add_example([l for l in split_line(stripped_line)])\n elif mode and mode not in (types.FEATURE, types.TAG):\n step = Step(name=parsed_line, type=mode, indent=line_indent, line_number=line_number, keyword=keyword)\n if feature.background and not scenario:\n feature.background.add_step(step)\n else:\n scenario = cast(ScenarioTemplate, scenario)\n scenario.add_step(step)\n prev_line = clean_line\n\n feature.description = \"\\n\".join(description).strip()\n return feature",
"def get_ts_features_to_preprocess(self):",
"def onReadTracks(self, event):\n\t\tfilename = self.parameters[\"ResultsFile\"]\n\t\tif not os.path.exists(filename):\n\t\t\treturn\n\t\tself.track = lib.Track.TrackReader()\n\t\tself.track.readFromFile(filename)\n\t\tself.tracks = self.track.getTracks(self.parameters[\"MinLength\"])\n\t\tself.trackGrid.showTracks(self.tracks)",
"def computeFeaturesForFullSong(file_path, feature_list, pack_size):\n\n # will hold the evaluated feature values\n feature_values = []\n\n raw_data, fs, enc = wavread(file_path)\n raw_chunks = chunks(raw_data, pack_size)\n\n for feature_name in feature_list:\n # print \"Computing \" + feature_name\n class_ = getattr(features, feature_name)\n if class_.requireFullSong is False: # ensure full song\n raise \"Every feature must be a full song feature\"\n\n feature = class_(raw_chunks)\n feature_values.append(feature.value)\n\n return feature_values",
"def get_audio_features(track_id=None):\n\n # connect to MongoDB\n mongo = MongoDatabase()\n mongo.connect()\n db = mongo.db\n\n if track_id is None:\n # get Spotify Tracks ids\n spotify_ids = db.songs.find({}, {\"spotify_id\" : 1, \"_id\" : 0})\n all_ids = [item[\"spotify_id\"] for item in spotify_ids]\n limit = len(all_ids)\n \n # the url only accept 100 ids at a time, so we will use batch ids\n for i in range(0, 1370, 100):\n if i + 100 < limit:\n batch = ','.join(all_ids[i:i+100])\n else:\n batch = ','.join(all_ids[i:limit])\n\n url = f\"https://api.spotify.com/v1/audio-features/?ids={batch}\"\n \n # get audio features\n result = fetch_data(url)\n\n # keep relevant info \n for audio_feat in result[\"audio_features\"]:\n save_audio_features(audio_feat, db.songs)\n else:\n # get audio features\n url = f\"https://api.spotify.com/v1/audio-features/{track_id}\"\n result = fetch_data(url)\n return result",
"def extract(self):\n tags = mutagen.File(self.input_file)\n \n ext = os.path.splitext(self.input_file)[1].lower()\n if ext in self.exts:\n for tag, key in self.__tag_mapping[ext].items():\n if key in tags:\n self.tags[tag] = tags[key][0]\n elif tag == 'lyrics' and key == 'USLT':\n for id3tag in tags:\n if id3tag.startswith(key):\n self.tags[tag] = tags[id3tag].text\n \n # Handle info tags specially\n self.tags['length'] = int(tags.info.length)\n self.tags['bitrate'] = (tags.info.bitrate \n if hasattr(tags.info, 'bitrate') \n else int(os.path.getsize(path) * 8 / tags.info.length)) / 1000\n \n # Convert string values to integers for certain tags, ignoring \n # any non-integer characters.\n for key in ['year', 'tracknumber', 'discnumber']:\n if self.tags[key] is not None:\n match = re.match('\\d+', str(self.tags[key]))\n if match:\n self.tags[key] = int(match.group(0))\n \n for key in ['title', 'artist', 'album']:\n self.tags[key] = self.tags[key].strip()",
"def get_audio_features( tracks, tracks_artistnames):\n if not tracks:\n print('No tracks provided.')\n return\n\n \n track_map = {track.get('id'): track for track in tracks}\n\n # Request the audio features for the chosen tracks (limited to 50)\n \n tracks_features_response = spotify.audio_features(tracks=track_map.keys())\n\n desired_features = [\n 'tempo',\n 'time_signature',\n 'key',\n 'mode',\n 'loudness',\n 'energy',\n 'danceability',\n 'acousticness',\n 'instrumentalness',\n 'liveness',\n 'speechiness',\n 'valence'\n ]\n\n tracks_features_list = []\n for track_features in tracks_features_response:\n \n features_dict = dict()\n for feature in desired_features:\n \n feature_value = track_features.get(feature)\n\n \n if feature == 'key':\n feature_value = translate_key_to_pitch(feature_value)\n \n features_dict[feature] = feature_value\n \n tracks_features_list.append(features_dict)\n\n\n\n tracks_features_map = {f.get('id'): [tracks_artistnames[i], tracks_features_list[i], \"https://open.spotify.com/track/\" + f.get('id')] for i, f in enumerate(tracks_features_response)}\n\n \n \n \n \n \n\n return tracks_features_map"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the get_capacities_and_distinct_transports method returns two dicts with values 0 for empty data
|
def test_get_capacities_and_distinct_transports_returns_0_for_empty_data(self):
capacities, distinct_transports = \
transport.get_capacities_and_distinct_transports(self.empty_data)
expected_capacities = {
'cars': 0,
'trains': 0,
'planes': 0
}
expected_distinct_transports = {
'distinct-cars': 0,
'distinct-trains': 0,
'distinct-planes': 0
}
self.assertDictEqual(capacities, expected_capacities)
self.assertDictEqual(distinct_transports, expected_distinct_transports)
|
[
"def test_get_capacities_and_distinct_transports_returns_totals(self):\n capacities, distinct_transports = \\\n transport.get_capacities_and_distinct_transports(self.test_data)\n\n expected_capacities = {\n 'cars': 14,\n 'trains': 150,\n 'planes': 524\n }\n\n expected_distinct_transports = {\n 'distinct-cars': 3,\n 'distinct-trains': 1,\n 'distinct-planes': 2\n }\n\n self.assertDictEqual(capacities, expected_capacities)\n self.assertDictEqual(distinct_transports, expected_distinct_transports)",
"def test_sort_values_returns_list_of_set_sorted_by_value(self):\n sorted_capacities = transport.sort_values(self.test_capacities)\n expected_capacities = [\n (6, 'cars'),\n (3, 'planes'),\n (1, 'trains')\n ]\n\n self.assertListEqual(sorted_capacities, expected_capacities)",
"def getOptionTwoAdresses(self, start_pos, end_pos, time, transportation_type):\n\n try:\n\n routingPublicTransportApi = herepy.public_transit_api.PublicTransitApi(self.HERE_ID, self.HERE_PASSWD)\n routingApi = herepy.routing_api.RoutingApi(self.HERE_ID, self.HERE_PASSWD)\n\n characteristics = dict()\n\n if transportation_type.lower() == \"car\":\n response = routingApi.car_route(start_pos, end_pos)\n characteristics['type'] = \"car\"\n characteristics['distance'] = int(response.as_dict()['response']['route'][0]['summary']['distance'] / 1609) # in miles\n characteristics['time'] = int(response.as_dict()['response']['route'][0]['summary']['baseTime'] / 60) # in minutes\n characteristics['price'] = 2 + 0.21*characteristics['time'] + 1.10*characteristics['distance']\n elif transportation_type.lower() == \"by foot\":\n response = routingApi.pedastrian_route(start_pos, end_pos)\n characteristics['type'] = \"by foot\"\n characteristics['price'] = 0\n elif transportation_type.lower() == \"public transport\":\n response = routingApi.public_transport(start_pos, end_pos, False)\n\n characteristics['type'] = \"public transport\"\n\n time = '2019-07-21T08:00:00'\n\n route_public_transport = routingPublicTransportApi.calculate_route(start_pos, end_pos, time)\n\n list_of_fares = route_public_transport.as_dict()['Res']['Connections']['Connection'][0]['Tariff']['Fares'][0]['Fare']\n pricePublicTransport = 0\n\n for fare_obj in list_of_fares:\n pricePublicTransport += fare_obj['price']\n\n characteristics['price'] = pricePublicTransport\n else:\n raise NotImplementedError\n\n characteristics['distance'] = int(response.as_dict()['response']['route'][0]['summary']['distance'] / 1609) # in miles\n characteristics['time'] = int(response.as_dict()['response']['route'][0]['summary']['baseTime'] / 60) # in minutes\n\n return characteristics\n\n except:\n return None",
"def test_organization_networks_traffic(self):\n req = MerakiAPI(KEY).organizations(ORGANIZATION_ID).networks(NETWORK_ID).lazy().traffic({\n \"timespan\": 7200,\n \"deviceType\": \"wireless\"\n })\n\n self.assertEqual(\n \"https://dashboard.meraki.com/api/v0/organizations/\"\n + ORGANIZATION_ID\n + \"/networks/\"\n + NETWORK_ID\n + \"/traffic\"\n , req\n .cached\n .url\n )\n self.assertEqual(\n {'deviceType': 'wireless', 'timespan': 7200}\n , req\n .cached\n .data\n)",
"def _process_unique_data(data, operator_info):\n unique_data = OrderedDict()\n for ability in operator_info.unique_abilities:\n # try to match each ability to the data returned from the API\n # currently hard-coded to only return PVP stats\n match = \"{stat_name}:{index}:infinite\".format(stat_name=ability.pvp_stat_name, index=operator_info.index)\n if match in data:\n unique_data[ability] = data[match]\n else:\n unique_data[ability] = 0 # the stupid API just doesnt return anything if we have zero of that stat\n if \"aruni\" in match:\n logging.warning(\"aruni unique stat may not work. I haven't been able to find the correct API name \"\n \"so 0 will be returned. Use aruni's unique stat with caution\")\n\n return unique_data",
"def number_transports(self) -> int:",
"def get_client_round_trip_times(self) -> dict[str, RoundTripData]:\n # first step: collect all round trip times of subscriptions, group them by notify_to_address\n tmp = defaultdict(list)\n ret = {}\n with self._subscriptions.lock:\n for subscription in self._subscriptions.objects:\n if subscription.max_roundtrip_time > 0:\n tmp[subscription.notify_to_address].append(subscription.get_roundtrip_stats())\n for key, stats in tmp.items():\n all_values = [stat.values for stat in stats]\n ret[key] = RoundTripData(all_values, max([s.max for s in stats]))\n return ret",
"def test_get_daily_data_req_empty(self):\n output = self.main.get_daily_data(self.request_empty)\n self.assertIsInstance(\n json.loads(output)[0],\n dict,\n )",
"def get_distribution(cls) -> Dict[ModeOfTransport, Dict[ModeOfTransport, float]]:\n fractions = {\n mode_of_transport_i: {\n mode_of_transport_j: cls._get_fraction(mode_of_transport_i, mode_of_transport_j)\n for mode_of_transport_j in ModeOfTransport\n }\n for mode_of_transport_i in ModeOfTransport\n }\n distributions = {}\n for mode_of_transport_i in ModeOfTransport:\n sum_over_mode_of_transport_i = sum(fractions[mode_of_transport_i].values())\n distributions[mode_of_transport_i] = {\n mode_of_transport_j: 0 if fractions[mode_of_transport_i][mode_of_transport_j] == 0\n else fractions[mode_of_transport_i][mode_of_transport_j] / sum_over_mode_of_transport_i\n for mode_of_transport_j in ModeOfTransport\n }\n return distributions",
"def get_potential_city_stats(self, unit: Settler) -> dict:\n surroundings = self.get_city_area(unit)\n return City.calculate_goods_no_city(surroundings)",
"def test_get_asset_device_contract_information_list(self):\n pass",
"def get_subscription_round_trip_times(self) -> dict[tuple[str, tuple[str]], RoundTripData]:\n ret = {}\n with self._subscriptions.lock:\n for subscription in self._subscriptions.objects:\n if subscription.max_roundtrip_time > 0:\n ret[(subscription.notify_to_address,\n subscription.short_filter_names())] = subscription.get_roundtrip_stats()\n return ret",
"def _get_avaliable_vr(self):\n\n sql = \"SELECT DISTINCT VENDOR_KEY, RETAILER_KEY FROM AP_ALERT_CYCLE_MAPPING \" \\\n \"UNION \" \\\n \"SELECT DISTINCT VENDOR_KEY, RETAILER_KEY FROM AP_ALERT_CYCLE_RC_MAPPING\"\n self._logger.debug(sql)\n dct_vendor_retailer_hub = {}\n # dct_vendor_retailer_hub = dict(self._db.query(sql))\n for v_r in self._db.query(sql):\n try:\n config = Config(meta=self.context[\"meta\"], vendor_key=v_r.VENDOR_KEY, retailer_key=v_r.RETAILER_KEY)\n hub_id = config.get_hub_id()\n _key = str(v_r.VENDOR_KEY) + ',' + str(v_r.RETAILER_KEY)\n dct_vendor_retailer_hub[_key] = hub_id\n # in case there is no config returned for given vendor & retailer, then skip this vendor & retailer.\n except Exception as e:\n # self._logger.warning(str(e))\n self._logger.warning(\"Seems there is no silo configed for vendor: %s and retailer: %s\" %\n (str(v_r.VENDOR_KEY), str(v_r.RETAILER_KEY)))\n continue\n\n return dct_vendor_retailer_hub",
"def debug_transports_combos(c, transport_id):\n import cargonet.preprocessing.tasks.debug_transport as dt\n from cargonet.preprocessing.datalake.retrieval import Retriever\n\n r = Retriever()\n s = r.retrieve_stations(keep_ids=True)\n t_raw = r.retrieve_transport(transport_id=transport_id, raw_sections=True)\n all_live = []\n for sec in t_raw.get(\"sections\"):\n for l in sec.get(\"live\"):\n all_live.append(l)\n dt.debug_combinations(t_raw.get(\"sections\"), s, all_live)",
"def test_portals_count_get(self):\n pass",
"def test_should_return_none_distances_if_no_distance_is_known(self):\n satellites_info = Satellite.objects.all()\n\n result = get_distances(satellites_info)\n \n assert result == {'kenobi': None, 'skywalker': None, 'sato': None}",
"def _test_map_all_zero(vv, phash_map_lu):\n all_zero_map= collections.defaultdict(bool) # Default False\n for xmap in phash_map_lu.keys():\n omap = phash_map_lu[xmap]\n if omap == None:\n all_zero_map[xmap]=True\n mbuild.msgb(\"ALL ZEROS\", \"VV={} MAP={}\".format(vv, xmap))\n return all_zero_map",
"def test_empty_circ(self):\n qc = QuantumCircuit()\n self.assertDictEqual(final_measurement_mapping(qc), {})",
"def test_cultivatedvarietys_get(self):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the get_capacities_and_distinct_transports method returns two dicts with correct totals for example data
|
def test_get_capacities_and_distinct_transports_returns_totals(self):
capacities, distinct_transports = \
transport.get_capacities_and_distinct_transports(self.test_data)
expected_capacities = {
'cars': 14,
'trains': 150,
'planes': 524
}
expected_distinct_transports = {
'distinct-cars': 3,
'distinct-trains': 1,
'distinct-planes': 2
}
self.assertDictEqual(capacities, expected_capacities)
self.assertDictEqual(distinct_transports, expected_distinct_transports)
|
[
"def test_get_capacities_and_distinct_transports_returns_0_for_empty_data(self):\n capacities, distinct_transports = \\\n transport.get_capacities_and_distinct_transports(self.empty_data)\n\n expected_capacities = {\n 'cars': 0,\n 'trains': 0,\n 'planes': 0\n }\n\n expected_distinct_transports = {\n 'distinct-cars': 0,\n 'distinct-trains': 0,\n 'distinct-planes': 0\n }\n\n self.assertDictEqual(capacities, expected_capacities)\n self.assertDictEqual(distinct_transports, expected_distinct_transports)",
"def test_sort_values_returns_list_of_set_sorted_by_value(self):\n sorted_capacities = transport.sort_values(self.test_capacities)\n expected_capacities = [\n (6, 'cars'),\n (3, 'planes'),\n (1, 'trains')\n ]\n\n self.assertListEqual(sorted_capacities, expected_capacities)",
"def cost_usage_data():\n\n return {\n \"ResultsByTime\": [\n {\n \"Estimated\": True,\n \"TimePeriod\": {\n \"Start\": \"2019-11-06\",\n \"End\": \"2019-11-07\"\n },\n \"Total\": {\n \"BlendedCost\": {\n \"Amount\": \"2.523114075\",\n \"Unit\": \"USD\"\n },\n \"UnblendedCost\": {\n \"Amount\": \"2.5219729944\",\n \"Unit\": \"USD\"\n },\n \"UsageQuantity\": {\n \"Amount\": \"470666.3868981499\",\n \"Unit\": \"N/A\"\n }\n },\n \"Groups\": []\n }\n ]\n}",
"def get_client_round_trip_times(self) -> dict[str, RoundTripData]:\n # first step: collect all round trip times of subscriptions, group them by notify_to_address\n tmp = defaultdict(list)\n ret = {}\n with self._subscriptions.lock:\n for subscription in self._subscriptions.objects:\n if subscription.max_roundtrip_time > 0:\n tmp[subscription.notify_to_address].append(subscription.get_roundtrip_stats())\n for key, stats in tmp.items():\n all_values = [stat.values for stat in stats]\n ret[key] = RoundTripData(all_values, max([s.max for s in stats]))\n return ret",
"def number_transports(self) -> int:",
"def test_portals_count_get(self):\n pass",
"def test_organization_networks_traffic(self):\n req = MerakiAPI(KEY).organizations(ORGANIZATION_ID).networks(NETWORK_ID).lazy().traffic({\n \"timespan\": 7200,\n \"deviceType\": \"wireless\"\n })\n\n self.assertEqual(\n \"https://dashboard.meraki.com/api/v0/organizations/\"\n + ORGANIZATION_ID\n + \"/networks/\"\n + NETWORK_ID\n + \"/traffic\"\n , req\n .cached\n .url\n )\n self.assertEqual(\n {'deviceType': 'wireless', 'timespan': 7200}\n , req\n .cached\n .data\n)",
"def getOptionTwoAdresses(self, start_pos, end_pos, time, transportation_type):\n\n try:\n\n routingPublicTransportApi = herepy.public_transit_api.PublicTransitApi(self.HERE_ID, self.HERE_PASSWD)\n routingApi = herepy.routing_api.RoutingApi(self.HERE_ID, self.HERE_PASSWD)\n\n characteristics = dict()\n\n if transportation_type.lower() == \"car\":\n response = routingApi.car_route(start_pos, end_pos)\n characteristics['type'] = \"car\"\n characteristics['distance'] = int(response.as_dict()['response']['route'][0]['summary']['distance'] / 1609) # in miles\n characteristics['time'] = int(response.as_dict()['response']['route'][0]['summary']['baseTime'] / 60) # in minutes\n characteristics['price'] = 2 + 0.21*characteristics['time'] + 1.10*characteristics['distance']\n elif transportation_type.lower() == \"by foot\":\n response = routingApi.pedastrian_route(start_pos, end_pos)\n characteristics['type'] = \"by foot\"\n characteristics['price'] = 0\n elif transportation_type.lower() == \"public transport\":\n response = routingApi.public_transport(start_pos, end_pos, False)\n\n characteristics['type'] = \"public transport\"\n\n time = '2019-07-21T08:00:00'\n\n route_public_transport = routingPublicTransportApi.calculate_route(start_pos, end_pos, time)\n\n list_of_fares = route_public_transport.as_dict()['Res']['Connections']['Connection'][0]['Tariff']['Fares'][0]['Fare']\n pricePublicTransport = 0\n\n for fare_obj in list_of_fares:\n pricePublicTransport += fare_obj['price']\n\n characteristics['price'] = pricePublicTransport\n else:\n raise NotImplementedError\n\n characteristics['distance'] = int(response.as_dict()['response']['route'][0]['summary']['distance'] / 1609) # in miles\n characteristics['time'] = int(response.as_dict()['response']['route'][0]['summary']['baseTime'] / 60) # in minutes\n\n return characteristics\n\n except:\n return None",
"def get_distribution(cls) -> Dict[ModeOfTransport, Dict[ModeOfTransport, float]]:\n fractions = {\n mode_of_transport_i: {\n mode_of_transport_j: cls._get_fraction(mode_of_transport_i, mode_of_transport_j)\n for mode_of_transport_j in ModeOfTransport\n }\n for mode_of_transport_i in ModeOfTransport\n }\n distributions = {}\n for mode_of_transport_i in ModeOfTransport:\n sum_over_mode_of_transport_i = sum(fractions[mode_of_transport_i].values())\n distributions[mode_of_transport_i] = {\n mode_of_transport_j: 0 if fractions[mode_of_transport_i][mode_of_transport_j] == 0\n else fractions[mode_of_transport_i][mode_of_transport_j] / sum_over_mode_of_transport_i\n for mode_of_transport_j in ModeOfTransport\n }\n return distributions",
"def _generate_compliance_breakdown(per_tac_results):\n results = {\n 'num_compliant_imeis': 0,\n 'num_noncompliant_imeis': 0,\n 'num_noncompliant_imeis_blocking': 0,\n 'num_noncompliant_imeis_info_only': 0,\n 'num_compliant_triplets': 0,\n 'num_noncompliant_triplets': 0,\n 'num_noncompliant_triplets_blocking': 0,\n 'num_noncompliant_triplets_info_only': 0,\n 'num_compliant_imei_imsis': 0,\n 'num_noncompliant_imei_imsis': 0,\n 'num_noncompliant_imei_imsis_blocking': 0,\n 'num_noncompliant_imei_imsis_info_only': 0,\n 'num_compliant_imei_msisdns': 0,\n 'num_noncompliant_imei_msisdns': 0,\n 'num_noncompliant_imei_msisdns_blocking': 0,\n 'num_noncompliant_imei_msisdns_info_only': 0\n }\n\n for tac, combinations in per_tac_results.items():\n for combination, data in combinations.items():\n if data['compliance_level'] == 0:\n results['num_noncompliant_imeis_blocking'] += data['num_imeis']\n results['num_noncompliant_imeis'] += data['num_imeis']\n results['num_noncompliant_triplets_blocking'] += data['num_subscriber_triplets']\n results['num_noncompliant_triplets'] += data['num_subscriber_triplets']\n results['num_noncompliant_imei_imsis_blocking'] += data['num_imei_imsis']\n results['num_noncompliant_imei_imsis'] += data['num_imei_imsis']\n results['num_noncompliant_imei_msisdns_blocking'] += data['num_imei_msisdns']\n results['num_noncompliant_imei_msisdns'] += data['num_imei_msisdns']\n elif data['compliance_level'] == 1:\n results['num_noncompliant_imeis_info_only'] += data['num_imeis']\n results['num_noncompliant_imeis'] += data['num_imeis']\n results['num_noncompliant_triplets_info_only'] += data['num_subscriber_triplets']\n results['num_noncompliant_triplets'] += data['num_subscriber_triplets']\n results['num_noncompliant_imei_imsis_info_only'] += data['num_imei_imsis']\n results['num_noncompliant_imei_imsis'] += data['num_imei_imsis']\n results['num_noncompliant_imei_msisdns_info_only'] += data['num_imei_msisdns']\n results['num_noncompliant_imei_imsis'] += data['num_imei_msisdns']\n else:\n results['num_compliant_imeis'] += data['num_imeis']\n results['num_compliant_triplets'] += data['num_subscriber_triplets']\n results['num_compliant_imei_imsis'] += data['num_imei_imsis']\n results['num_compliant_imei_msisdns'] += data['num_imei_msisdns']\n\n return results",
"def consolidate_results(self) -> List[Tuple[Any, ...]]:\n for client in self.clients:\n self.result.append(\n (self.experiment_number,) + client.client_transaction_summary\n )\n return self.result",
"def test_get_account_asset_size_agg_all_using_get(self):\n pass",
"def calc_total(group_metrics):\n tenanted = defaultdict(Metric)\n total = Metric()\n for gm in group_metrics:\n total.desired += gm.desired\n total.actual += gm.actual\n total.pending += gm.pending\n tenanted[gm.tenant_id].desired += gm.desired\n tenanted[gm.tenant_id].actual += gm.actual\n tenanted[gm.tenant_id].pending += gm.pending\n return tenanted, total",
"def get_planet_metrics():\n import pandas as pd\n print('Retrieving planet metrics from NEXSCI...')\n NEXSCI_ENDPOINT = 'http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI'\n KEPLER_DISCOVERY = \"pl_facility+like+%27%25Kepler%25%27\"\n K2_DISCOVERY = \"pl_facility+like+%27%25K2%25%27\"\n\n metrics = collections.OrderedDict()\n # Count the number of Kepler candidate planets\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=cumulative&select=count(*)'\n '&where=koi_pdisposition+like+%27CANDIDATE%27')\n metrics['kepler_candidates_count'] = int(df['count(*)'][0])\n # Count Kepler confirmed planets\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY)\n metrics['kepler_confirmed_count'] = int(df['count(*)'][0])\n # Count Kepler confirmed planets with mass estimates\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY + '+and+pl_masse+is+not+null')\n metrics['kepler_confirmed_with_mass_count'] = int(df['count(*)'][0])\n\n # Count K2 candidate planets\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=k2candidates&select=count(*)'\n '&where=k2c_disp+like+%27C%25%27+and+k2c_recentflag=1')\n metrics['k2_candidates_count'] = int(df['count(*)'][0])\n # Count K2 confirmed planets\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY)\n metrics['k2_confirmed_count'] = int(df['count(*)'][0])\n # Count K2 confirmed planets with mass estimates\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY + '+and+pl_masse+is+not+null')\n metrics['k2_confirmed_with_mass_count'] = int(df['count(*)'][0])\n\n # Count number of Kepler planets by size bin\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY + '+and+pl_rade<1.25')\n metrics['kepler_earth_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY + '+and+pl_rade>=1.25+and+pl_rade<2.0')\n metrics['kepler_super_earth_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY + '+and+pl_rade>=2.0+and+pl_rade<6.0')\n metrics['kepler_neptune_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY + '+and+pl_rade>=6.0+and+pl_rade<15.0')\n metrics['kepler_jupiter_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + KEPLER_DISCOVERY + '+and+pl_rade>=15.0')\n metrics['kepler_larger_size_count'] = int(df['count(*)'][0])\n\n # Count number of K2 planets by size bin\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY + '+and+pl_rade<1.25')\n metrics['k2_earth_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY + '+and+pl_rade>=1.25+and+pl_rade<2.0')\n metrics['k2_super_earth_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY + '+and+pl_rade>=2.0+and+pl_rade<6.0')\n metrics['k2_neptune_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY + '+and+pl_rade>=6.0+and+pl_rade<15.0')\n metrics['k2_jupiter_size_count'] = int(df['count(*)'][0])\n df = pd.read_csv(NEXSCI_ENDPOINT + '?table=exoplanets&select=count(*)&where='\n + K2_DISCOVERY + '+and+pl_rade>=15.0')\n metrics['k2_larger_size_count'] = int(df['count(*)'][0])\n\n # Combined planet counts\n for name in ['candidates', 'confirmed', 'confirmed_with_mass', 'earth_size',\n 'super_earth_size', 'neptune_size', 'jupiter_size', 'larger_size']:\n metrics[name + '_count'] = metrics['kepler_' + name + '_count'] + \\\n metrics['k2_' + name + '_count']\n\n return metrics",
"def get_subscription_round_trip_times(self) -> dict[tuple[str, tuple[str]], RoundTripData]:\n ret = {}\n with self._subscriptions.lock:\n for subscription in self._subscriptions.objects:\n if subscription.max_roundtrip_time > 0:\n ret[(subscription.notify_to_address,\n subscription.short_filter_names())] = subscription.get_roundtrip_stats()\n return ret",
"def test_get_user_totals(self):\n response = base.get_totals(self.credentials)\n self.assertEqual(response.status_code, 200)",
"def aggregate_requirements(groups):\n requirements = Counter()\n for group in groups:\n quantity, item = parse_deal_code(group)\n requirements.update({item: quantity})\n\n return requirements",
"def debug_transports_combos(c, transport_id):\n import cargonet.preprocessing.tasks.debug_transport as dt\n from cargonet.preprocessing.datalake.retrieval import Retriever\n\n r = Retriever()\n s = r.retrieve_stations(keep_ids=True)\n t_raw = r.retrieve_transport(transport_id=transport_id, raw_sections=True)\n all_live = []\n for sec in t_raw.get(\"sections\"):\n for l in sec.get(\"live\"):\n all_live.append(l)\n dt.debug_combinations(t_raw.get(\"sections\"), s, all_live)",
"def gatherCrypto():\n for key, value in coinDict.items():\n apiURL = 'https://api.coinmarketcap.com/v2/ticker/{}.json'.format(key)\n jsonData = requests.get(apiURL).json()\n value['name'] = jsonData['data']['name']\n value['rank'] = jsonData['data']['rank']\n value['circulation'] = jsonData['data']['circulating_supply']\n\n if jsonData['data']['quotes']['USD']['price'] < 0.0001:\n price = '{0:.8f}'.format(jsonData['data']['quotes']['USD']['price'])\n value['USD'] = str(price)\n else:\n price = '{0:.4f}'.format(jsonData['data']['quotes']['USD']['price'])\n value['USD'] = str(price)\n\n value['volume_24h'] = round(jsonData['data']['quotes']['USD']['volume_24h'], 2)\n value['change_1h'] = jsonData['data']['quotes']['USD']['percent_change_1h']\n value['change_24h'] = jsonData['data']['quotes']['USD']['percent_change_24h']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test the sort_values method returns a list of sets sorted by value given a dict with totals returned by get_capacities_and_distinct_transports method
|
def test_sort_values_returns_list_of_set_sorted_by_value(self):
sorted_capacities = transport.sort_values(self.test_capacities)
expected_capacities = [
(6, 'cars'),
(3, 'planes'),
(1, 'trains')
]
self.assertListEqual(sorted_capacities, expected_capacities)
|
[
"def test_get_capacities_and_distinct_transports_returns_totals(self):\n capacities, distinct_transports = \\\n transport.get_capacities_and_distinct_transports(self.test_data)\n\n expected_capacities = {\n 'cars': 14,\n 'trains': 150,\n 'planes': 524\n }\n\n expected_distinct_transports = {\n 'distinct-cars': 3,\n 'distinct-trains': 1,\n 'distinct-planes': 2\n }\n\n self.assertDictEqual(capacities, expected_capacities)\n self.assertDictEqual(distinct_transports, expected_distinct_transports)",
"def scrape_sortings(self):\n return [self.scrape_data()]",
"def collect_reducer_set(values):\n return sorted(list(set(values)))",
"def sort_value_list_pyxb(obj_pyxb):\n obj_pyxb.sort(key=lambda x: x.value())",
"def sort_by_value_and_groups(transactions):\n # calculate overall sums\n all_groups = transactions[0].values()[0]\n all_sums = defaultdict(float)\n for month_tr in transactions:\n for group,value in month_tr.values()[0].items():\n all_sums[group] += value\n # devide by grouped and ungrouped\n grouped = [ (k,v) for k,v in all_sums.items() if not k.startswith('NOT GROUPED')]\n ungrouped = [ (k,v) for k,v in all_sums.items() if k.startswith('NOT GROUPED')]\n # sort\n grouped.sort(key=lambda x: x[1])\n ungrouped.sort(key=lambda x: x[1])\n return grouped + ungrouped",
"def _sorted(dictionary):\n d = dictionary.copy()\n case_insensitive = lambda k: str.lower(k) if isinstance(k, str) else k\n nonevalue = d.pop(None) if None in d else None\n values = [d[key] for key in sorted(d.keys(), key=case_insensitive)]\n if nonevalue:\n values.insert(0, nonevalue)\n return values",
"def test_order_by(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly\"\n query_params = self.mocked_query_params(url, AzureCostView)\n handler = AzureReportQueryHandler(query_params)\n\n unordered_data = [\n {\"date\": self.dh.today, \"delta_percent\": 8, \"total\": 6.2, \"rank\": 2},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 1},\n {\"date\": self.dh.today, \"delta_percent\": 7, \"total\": 8.2, \"rank\": 1},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 2},\n ]\n\n order_fields = [\"date\", \"rank\"]\n expected = [\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 1},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 2},\n {\"date\": self.dh.today, \"delta_percent\": 7, \"total\": 8.2, \"rank\": 1},\n {\"date\": self.dh.today, \"delta_percent\": 8, \"total\": 6.2, \"rank\": 2},\n ]\n\n ordered_data = handler.order_by(unordered_data, order_fields)\n self.assertEqual(ordered_data, expected)\n\n order_fields = [\"date\", \"-delta\"]\n expected = [\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 1},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 2},\n {\"date\": self.dh.today, \"delta_percent\": 8, \"total\": 6.2, \"rank\": 2},\n {\"date\": self.dh.today, \"delta_percent\": 7, \"total\": 8.2, \"rank\": 1},\n ]\n\n ordered_data = handler.order_by(unordered_data, order_fields)\n self.assertEqual(ordered_data, expected)",
"def values_sorted_by_key(dist):\n return [dist[k] for k in sorted(dist.keys())]",
"def sort_items(self):\n print('Sorting items')\n for timestamp, border_measures in self.report_dict.items():\n new_border_measures = OrderedDict(sorted(border_measures.items(),\n key=lambda x: [x[1]['sum'], x[0][1], x[0][0]],\n reverse=True)\n )\n self.report_dict[timestamp] = new_border_measures",
"def sort(settings):\n\tfilter = settings.format(settings.content)\n\tfilter.sort()\n\tsettings.content = filter.content",
"def sort(activities):\n def sort_order(value):\n \"\"\"\n Sort activities by the number of connected members.\n \"\"\"\n return -(value.get('members', 0) or 0)\n\n return sorted(activities.values(), key=sort_order)",
"def sort_by_value(d):\n return sorted(d.iteritems(), key=lambda (k, v): (v, k), reverse=True)",
"def test_sort_debts(self):\n results = ds.sort_by_payoff_time({'row_count': '3',\n 'debt_name_1': 'debt a', 'balance_1':'10000',\n 'payment_1': '200', 'apr_1': '12',\n 'debt_name_2': 'debt b', 'balance_2':'10000',\n 'payment_2': '300', 'apr_2': '12',\n 'debt_name_3': 'debt c', 'balance_3':'10000',\n 'payment_3': '150', 'apr_3': '12'})\n self.assertEqual('debt b', results[0]['debt_name'])\n self.assertEqual('debt a', results[1]['debt_name'])\n self.assertEqual('debt c', results[2]['debt_name'])",
"def sortByValues(self):\n\t\tself._dKeys = sorted(self._dKeys, key=lambda tupl: self._dValues[tupl[1]])\n\t\treturn self",
"def test_query_order_by_several_fields(self):\n data = {\n \"object_name\": \"Regulation\",\n \"order_by\": [{\"name\": \"notes\", \"desc\": True}, {\"name\": \"title\"}],\n \"filters\": {\"expression\": {}},\n }\n regulations = self._get_first_result_set(data,\n \"Regulation\", \"values\")\n\n data_unsorted = {\n \"object_name\": \"Regulation\",\n \"filters\": {\"expression\": {}},\n }\n regulations_unsorted = self._get_first_result_set(data_unsorted,\n \"Regulation\", \"values\")\n\n self.assertListEqual(\n regulations,\n sorted(sorted(regulations_unsorted,\n key=itemgetter(\"title\")),\n key=itemgetter(\"notes\"),\n reverse=True),\n )",
"def get_analysed_sorted(self):\n\n\t\treturn sorted(self.analysed, key = lambda a: a.address)",
"def sort(self):",
"def sort_entries(cart):\n\n pass",
"def test_order_by(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly\"\n query_params = self.mocked_query_params(url, OCICostView)\n handler = OCIReportQueryHandler(query_params)\n\n unordered_data = [\n {\"date\": self.dh.today, \"delta_percent\": 8, \"total\": 6.2, \"rank\": 2},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 1},\n {\"date\": self.dh.today, \"delta_percent\": 7, \"total\": 8.2, \"rank\": 1},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 2},\n ]\n\n order_fields = [\"date\", \"rank\"]\n expected = [\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 1},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 2},\n {\"date\": self.dh.today, \"delta_percent\": 7, \"total\": 8.2, \"rank\": 1},\n {\"date\": self.dh.today, \"delta_percent\": 8, \"total\": 6.2, \"rank\": 2},\n ]\n\n ordered_data = handler.order_by(unordered_data, order_fields)\n self.assertEqual(ordered_data, expected)\n\n order_fields = [\"date\", \"-delta\"]\n expected = [\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 1},\n {\"date\": self.dh.yesterday, \"delta_percent\": 4, \"total\": 2.2, \"rank\": 2},\n {\"date\": self.dh.today, \"delta_percent\": 8, \"total\": 6.2, \"rank\": 2},\n {\"date\": self.dh.today, \"delta_percent\": 7, \"total\": 8.2, \"rank\": 1},\n ]\n\n ordered_data = handler.order_by(unordered_data, order_fields)\n self.assertEqual(ordered_data, expected)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert list_maze to nested list
|
def convert_maze(self):
self.maze = [[line[i]
for i in range(len(line))] for line in self.list_line]
return 0
|
[
"def generate_maze(self):\n with open(self.maze, \"r\") as file:\n global_maze = []\n # We iterate on each row contained in our file .txt\n for row in file:\n maze_row = []\n # We iretate on each sprite from rows, to create lists with each value\n for sprite in row:\n if sprite != \"\\n\":\n maze_row.append(sprite)\n\n # We append each list (row) to our global list which represent our maze\n global_maze.append(maze_row)\n\n self.maze = global_maze",
"def build_adjlist(self):\n adj_list = {}\n for i in range(0, self.maze_numrows):\n for j in range (0, self.maze_numcols):\n adjacent_cells = []\n if(self.paths[i][j] == 0):\n # check if connected to northwest cell\n if(i - 1 >= 0) and (j - 1 >= 0):\n if(self.paths[i - 1][j - 1] == 0):\n adjacent_cells.append(str(i - 1) + \" \" + str(j - 1))\n # check if connected to north cell\n if(i - 1 >= 0):\n if(self.paths[i - 1][j] == 0):\n adjacent_cells.append(str(i - 1) + \" \" + str(j))\n # check if connected to northeast cell\n if(i - 1 >= 0) and (j + 1 < self.maze_numcols):\n if(self.paths[i - 1][j + 1] == 0):\n adjacent_cells.append(str(i - 1) + \" \" + str(j + 1))\n # check if connected to west cell\n if(j - 1 >= 0):\n if(self.paths[i][j - 1] == 0):\n adjacent_cells.append(str(i) + \" \" + str(j - 1))\n # check if connected to east cell\n if(j + 1 < self.maze_numcols):\n if(self.paths[i][j + 1] == 0):\n adjacent_cells.append(str(i) + \" \" + str(j + 1))\n # check if connected to southwest cell\n if(i + 1 < self.maze_numrows) and (j - 1 >= 0):\n if(self.paths[i + 1][j - 1] == 0):\n adjacent_cells.append(str(i + 1) + \" \" + str(j - 1))\n # check if connected to south cell\n if(i + 1 < self.maze_numrows):\n if(self.paths[i + 1][j] == 0):\n adjacent_cells.append(str(i + 1) + \" \" + str(j))\n # check if connected to southeast cell\n if(i + 1 < self.maze_numrows) and (j + 1 < self.maze_numcols):\n if(self.paths[i + 1][j + 1] == 0):\n adjacent_cells.append(str(i + 1) + \" \" + str(j + 1))\n adj_list[str(i) + \" \" + str(j)] = adjacent_cells\n setattr(self, 'adj_list', adj_list)",
"def mazeToGraph(mazeList):\n openSquares, startPos, goalPos = collectOpenSquares(mazeList)\n startNode = openSquares.index(startPos)\n goalNode = openSquares.index(goalPos)\n \n \n numOpen = len(openSquares)\n mazeGraph = ListGraph(numOpen)\n \n for sqPos in range(numOpen):\n (r, c) = openSquares[sqPos]\n for neigh in [(r, c - 1), (r, c + 1), (r - 1, c), (r + 1, c)]:\n if neigh in openSquares:\n neighPos = openSquares.index(neigh)\n mazeGraph.addEdge(sqPos, neighPos)\n return mazeGraph, startNode, goalNode",
"def list_2dimension_convert(self, lst):\r\n self.matrix_value = []\r\n for sub_list in lst:\r\n self.matrix_value.append(sub_list)",
"def convert_block_to_list(grid: list, _row: int, _col: int) -> list:\n block = []\n _col0 = (_col // 3) * 3\n _row0 = (_row // 3) * 3\n for i in range(3):\n for j in range(3):\n block.append(grid[_row0 + i][_col0 + j])\n return block",
"def flatten(self) -> list:\n board = self.board_list\n return_list = []\n for row in board:\n for column in row:\n if column is None:\n column = 0\n elif column == 'X':\n column = 1\n elif column == 'O':\n column = -1\n return_list.append(column)\n return return_list",
"def flatten_board(board):\n return [n for row in board for n in row]",
"def lane_map(self):\n lanes = []\n for i in range(self.lane_num):\n lanes.append([])\n for obstacle in self.map.obs:\n left_lane = max(self.get_lane(obstacle.lx), 0)\n left_depth = obstacle.ly\n right_lane = min(self.get_lane(obstacle.rx), self.lane_num - 1)\n right_depth = obstacle.ry\n for j in range(right_lane - left_lane + 1):\n depth_delta = right_depth - left_depth\n depth_interval = depth_delta / (right_lane - left_lane)\n lanes[left_lane + j].append(left_depth + (depth_interval * j))\n for lane in lanes:\n lane.sort()\n return lanes",
"def encode_nested_list_recursive(seq):\n if not isinstance(seq, list):\n return [seq]\n return [\"up\"] + [x for y in seq for x in encode_nested_list(y)] + [\"down\"]",
"def _flatten(block: Block) -> List[List[Tuple[int, int, int]]]:\r\n # size of unit cell\r\n unit_size = 2 ** block.max_depth // 2 ** block.level\r\n # if block does not have children (block.children == [])\r\n lst = []\r\n if block.colour is not None and not block.children:\r\n # populating lst with unit cells with only block.colour\r\n for i in range(unit_size):\r\n # adding empty column\r\n lst.append([])\r\n for _ in range(unit_size):\r\n # populating that column with unit_size items\r\n lst[i].append(block.colour)\r\n\r\n else: # block has children\r\n\r\n # flattening each child first\r\n lst0 = _flatten(block.children[0]) # upper right\r\n lst1 = _flatten(block.children[1]) # upper left\r\n lst2 = _flatten(block.children[2]) # lower left\r\n lst3 = _flatten(block.children[3]) # lower right\r\n\r\n # combining the sublists of each child\r\n # left blocks\r\n for i in range(len(lst1)):\r\n # adding the columns together\r\n # upper left then lower left\r\n lst.append(lst1[i] + lst2[i])\r\n # right blocks\r\n for i in range(len(lst0)):\r\n # adding the rows together\r\n # upper right then lower right\r\n lst.append(lst0[i] + lst3[i])\r\n\r\n return lst",
"def list_flat(l, depth=1):\n buffer = []\n _list_flat_impl(l, buffer, depth)\n return buffer",
"def make_raw_maze(length, breadth, depth, start=(0.5,0.5,0.5)):\n #make maze full of walls and unvisited cells (this works, don't touch)\n maze = [[[1+((i%2)*(j%2)*(k%2)) for i in range(2*length+1)] for j in range(2*breadth+1)] for k in range(2*depth+1)]\n\n #make top and bottom borders\n for i in range(len(maze[0])):\n for j in range(len(maze[0][0])):\n maze[0][i][j]=3\n maze[-1][i][j]=3\n #make front and back borders\n for i in range(len(maze)):\n for j in range(len(maze[0][0])):\n maze[i][0][j]=3\n maze[i][-1][j]=3\n #make right and left borders\n for i in range(len(maze)):\n for j in range(len(maze[0])):\n maze[i][j][0]=3\n maze[i][j][-1]=3\n\n wall_list=[]\n finished=0\n \n #mark start cell as visited\n start_depth=2*int(start[2]*(depth-1))+1\n start_breadth=2*int(start[1]*(breadth-1))+1\n start_length=2*int(start[0]*(length-1))+1\n maze[start_depth][start_breadth][start_length] = 0\n\n while finished==0:\n #append the six walls of start cell to wall list\n #top\n if maze[start_depth+1][start_breadth][start_length]==1:\n wall_list.append((start_depth+1,start_breadth,start_length))\n #bottom\n if maze[start_depth-1][start_breadth][start_length]==1:\n wall_list.append((start_depth-1,start_breadth,start_length))\n #front\n if maze[start_depth][start_breadth+1][start_length]==1:\n wall_list.append((start_depth,start_breadth+1,start_length))\n #back\n if maze[start_depth][start_breadth-1][start_length]==1:\n wall_list.append((start_depth,start_breadth-1,start_length))\n #right\n if maze[start_depth][start_breadth][start_length+1]==1:\n wall_list.append((start_depth,start_breadth,start_length+1))\n #left\n if maze[start_depth][start_breadth][start_length-1]==1:\n wall_list.append((start_depth,start_breadth,start_length-1))\n\n #randomly select wall to check\n active_wall = random.choice(wall_list)\n wall_list.remove(active_wall)\n\n #look for adjacent, unvisited cells\n adjacent = (-1,-1,-1)\n if maze[active_wall[0]+1][active_wall[1]][active_wall[2]]==2:\n adjacent = (active_wall[0]+1,active_wall[1],active_wall[2])\n if maze[active_wall[0]-1][active_wall[1]][active_wall[2]]==2:\n adjacent = (active_wall[0]-1,active_wall[1],active_wall[2])\n if maze[active_wall[0]][active_wall[1]+1][active_wall[2]]==2:\n adjacent = (active_wall[0],active_wall[1]+1,active_wall[2])\n if maze[active_wall[0]][active_wall[1]-1][active_wall[2]]==2:\n adjacent = (active_wall[0],active_wall[1]-1,active_wall[2])\n if maze[active_wall[0]][active_wall[1]][active_wall[2]+1]==2:\n adjacent = (active_wall[0],active_wall[1],active_wall[2]+1)\n if maze[active_wall[0]][active_wall[1]][active_wall[2]-1]==2:\n adjacent = (active_wall[0],active_wall[1],active_wall[2]-1)\n\n #if there are any adjacent unvisited cells, knock the wall down and treat the unvisited cell as a new cell\n if adjacent != (-1,-1,-1):\n maze[active_wall[0]][active_wall[1]][active_wall[2]]=0\n start_depth=adjacent[0]\n start_breadth=adjacent[1]\n start_length=adjacent[2]\n maze[start_depth][start_breadth][start_length]=0\n\n #check if finished by looking for unvisited cells\n finished=1\n for i in range(len(maze)):\n for j in range(len(maze[0])):\n if 2 in maze[i][j]:\n finished=0\n\n #rewrite top and bottom borders\n for i in range(len(maze[0])):\n for j in range(len(maze[0][0])):\n maze[0][i][j]=1\n maze[-1][i][j]=1\n #rewrite front and back borders\n for i in range(len(maze)):\n for j in range(len(maze[0][0])):\n maze[i][0][j]=1\n maze[i][-1][j]=1\n #rewrite right and left borders\n for i in range(len(maze)):\n for j in range(len(maze[0])):\n maze[i][j][0]=1\n maze[i][j][-1]=1\n\n # for floor in maze:\n # for row in floor:\n # print row\n # print\n print(\"Maze generated.\")\n return maze",
"def array2graph(maze_array):\n start = tuple()\n end = tuple()\n weight = {}\n graph = {}\n #imax is max index of rows and jmax is max index of columns\n imax = len(maze_array) - 1\n jmax = len(maze_array[0]) - 1\n for i in range(imax + 1):\n for j in range(jmax + 1):\n if maze_array[i][j] == '#':\n continue\n elif maze_array[i][j] == 's':\n start = (\n i, j)\n weight[start] = 1\n elif maze_array[i][j] == 'e':\n end = (\n i, j)\n weight[end] = 1\n else:\n weight[i, j] = int(maze_array[i][j])\n #record available neighbours for each node in graph dictionary\n #why didn't put prackets here (i,j) TODO\n graph[i, j] = []\n if i > 0:\n if maze_array[i - 1][j] != '#':\n graph[(i, j)] += [(i - 1, j)]\n if j > 0:\n if maze_array[i][j - 1] != '#':\n graph[(i, j)] += [(i, j - 1)]\n if i < imax:\n if maze_array[i + 1][j] != '#':\n graph[(i, j)] += [(i + 1, j)]\n if j < jmax:\n if maze_array[i][j + 1] != '#':\n graph[(i, j)] += [(i, j + 1)]\n\n return (\n graph, weight, start, end)",
"def encode_nested_list(seq):\n # use recursive generator to avoid creating a zillion intermediate-level lists\n def encode(L):\n if not isinstance(L, list):\n yield L\n else:\n yield \"up\"\n for y in L:\n yield from encode(y)\n yield \"down\"\n\n return list(encode(seq))",
"def get_maze_list(rat, day):\n return sorted(metadata[rat]['days'][day]['sessions'].keys())",
"def flatten(l):\n return [item for sublist in l for item in sublist]",
"def expand_list(list_list):\n list_in_list = [i for i in list_list if type(i) == list].copy()\n\n try:\n nbr_ans_list = max([len(i) for i in list_in_list])\n\n ans = [list_list.copy() for i in range(nbr_ans_list)]\n for (i,l) in enumerate(ans):\n for (j,e) in enumerate(l):\n if type(e) == list:\n ans[i][j] = e[min(i,len(e)-1)]\n # S'il n'y a pas de liste dans la liste (2e exemple)\n except ValueError:\n ans = [list_list]\n\n return ans",
"def traverse_grid(start_cell, direction, num_steps):\r\n new_list = []\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * direction[0]\r\n col = start_cell[1] + step * direction[1]\r\n new_list.append([row,col])\r\n return new_list",
"def mine_depth(nodes_list):\r\n return [node.get_depth() for node in nodes_list]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Figure out the position of IA
|
def get_IA_position(self, maze):
for y in range(len(maze)):
for x in range(len(maze[y])):
if maze[y][x] == self.letter:
self.posx = x
self.posy = y
break
return 0
|
[
"def agent_initial_position(i: int, total: int) -> Position:\n layout_base = int(np.ceil(np.sqrt(total)))\n idx_map = np.arange(layout_base ** 2).reshape(layout_base, layout_base)\n (rows, cols) = np.where(idx_map == i)\n row, col = rows[0], cols[0]\n return Position(row, col) + (1, 1)\n # return Position(3 * i, 3 * i) + (1, 1)",
"def return_pos_of_DNA(self,snap,input_params):\n monomer_to_chain_map = input_params['MC_map'];\n chain_type = input_params['CT_map'];\n N_monomers = len(monomer_to_chain_map.keys())\n pos_A = [x for x in numpy.arange(N_monomers) if chain_type[monomer_to_chain_map[x]]=='A'];\n\n return (pos_A);",
"def get_aim_pos(self):\n\n left_top_client = (self._window_rect.left, self._window_rect.top)\n right_bottom_client = (self._window_rect.right, self._window_rect.bottom)\n left_top_screen = win32gui.ClientToScreen(self._process_handle, left_top_client)\n right_bottom_screen = win32gui.ClientToScreen(self._process_handle, right_bottom_client)\n\n x_pos = 0.5 * (right_bottom_screen[0] + left_top_screen[0])\n y_pos = 0.5 * (right_bottom_screen[1] + left_top_screen[1])\n\n return x_pos, y_pos",
"def calc_sag_offset_idx(self):\n return self.offset_pnt-1",
"def position(self):\n\t\t\n\t\tx_all,y_all,z_all = list(),list(),list()\n\t\tfor ant in self.antennas:\n\t\t\tx,y,z = ant.position\n\t\t\tx_all.append(x)\n\t\t\ty_all.append(y)\n\t\t\tz_all.append(z)\n\t\t\n\t\treturn (x_all,y_all,z_all)",
"def interiorpoint(self):",
"def getPos(self):\n\t\treturn self.__robot.x(), self.__robot.y(), self.__robot.a()",
"def _get_pos(self):\r\n \r\n return (self.rect.midbottom[0]-(MAP_TILE_WIDTH/2))/MAP_TILE_WIDTH, (self.rect.midbottom[1]-(MAP_TILE_HEIGHT))/MAP_TILE_HEIGHT",
"def get_loc(x0, xc, radius):\n indices = N.where(x-radius <= xc <= x+radius)\n \n if N.size(indices[0]) == 0:\n return -1, 0\n else:\n i0 = indices[0][0]\n i1 = indices[0][-1]\n return i0, i1",
"def findPosition(self,i): # TEST\n return self.abstract.findPosition(self.notes[i])",
"def find_pos(self):\n self.y = 0\n for d in self.data:\n try:\n self.x = d.index('m')\n return\n except ValueError:\n self.y += 1",
"def CalcSlicePosition(self):\n result = (0, 0, 0)\n core = self.dmgr.GetCore()\n if core and self.meshLevels:\n # -- Data matrix is z, x, y(reversed)\n # --\n # z = self.meshLevels[ self.axialValue[ 1 ] ]\n z = self.meshLevels[self.axialValue.pinIndex]\n\n assy_col = self.assemblyAddr[1] - self.coreExtent[0]\n # xxxxx channel? track with mode flag?\n x = core.npinx * assy_col + self.subAddr[0]\n\n assy_row = self.assemblyAddr[2] - self.coreExtent[1]\n # xxxxx channel?\n y = \\\n core.npiny * (self.coreExtent[-1] - assy_row) - \\\n self.subAddr[1]\n\n result = (z, x, y)\n # end if core\n\n return result",
"def position_to_index(obj):\n position = obj.position\n if position == 'gauche':\n position = 0\n elif position == 'centre':\n position = 1\n else:\n position = 2\n return position",
"def index_to_position(self,index):\n x = index // self.grid_size\n y = index % self.grid_size\n return x,y",
"def gridCoord2index( iparlist ) :\n iz,icp,icl,ilp,ipm,ifs = iparlist \n isn = iz + nlogz*(icp + ncolorpar*(icl + ncolorlaw*(ilp + nlumipar*(ipm + npkmjd*(ifs) ) ) ) )\n return( isn )",
"def _identify_all_possible_position(self):\n lign = 1\n index_number = 1\n while lign < 16:\n column = 1\n while column < 16:\n self.all_position.append(index_number)\n column += 1\n index_number += 1\n index_number += 85\n lign += 1",
"def get_char_position(char):\n i = CHAR_SET.index(char)\n if args.vertical:\n y = i % SHEET_HEIGHT\n x = i // SHEET_HEIGHT\n else:\n x = i % SHEET_WIDTH\n y = i // SHEET_WIDTH\n return (x, y)",
"def get_element_location(element):\n element_coord = element.location\n return int(element_coord['y']), int(element_coord['x'])",
"def calculate_placeholder_position(self):\n x_sum = 0\n y_sum = 0\n leftmost = 0\n topmost = 0\n\n for op_data in self.c.selection:\n\n op_ = td.op(op_data[\"path\"])\n x_sum += op_.nodeCenterX\n y_sum += op_.nodeCenterY\n\n leftmost = leftmost if leftmost > op_.nodeX else op_.nodeX\n topmost = topmost if topmost > op_.nodeY else op_.nodeY\n\n mean_x = x_sum / len(self.c.selection)\n mean_y = y_sum / len(self.c.selection)\n\n return int(mean_x), int(mean_y)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the resistance. It first ensures that the next measurement reading is up to date by first sending the "ADC" command.
|
def resistance(self):
# First make sure the mux is on the correct channel
if self._parent.mux_channel != self._idx:
self._parent.input_source = self._parent.InputSource.ground
self._parent.mux_channel = self._idx
self._parent.input_source = self._parent.InputSource.actual
# Next, prep a measurement with the ADC command
self._parent.sendcmd("ADC")
return float(self._parent.query("RES?")) * pq.ohm
|
[
"def __calculate_resistance(self, voltage, resistance=None):\n\n resistance = resistance if resistance else self.LOAD_RESISTANCE\n\n return float(resistance * (1023.0 - voltage) / float(voltage))",
"def read_RADC(self):\n return self._read_single_frame(FrameCode.RADC)",
"def ultrasonic_get(self):\n return int(self.comm('ultrasonic_get'))",
"def read(self):\n\n\t# Send a measurement request\n\tself.bus.write_quick(self.addr)\n\t# allow time for the conversion\n\ttime.sleep(0.050)\n\t# This, technically, sends an incorrect command. This issues an additional\n\t# measurement request, which causes the sensor to make another reading. As\n\t# the write is built into this, there is no delay and thus the result is\n\t# considered stale. The result it returns, however, is from moments ago so\n\t# it's fine.\n\tval = self.bus.read_i2c_block_data( 0X27, 0, 4)\n\n\t# Status is 2 bits\n\tstatus = val[0] >> 6\n\n\t# humidity is 14 bits, between 0 and 100%\n\thumidity_d = ((val[0] & (2**6-1)) << 8 )+ val[1]\n\thumidity = (humidity_d / (2**14-1.0)) * 100\n\n\t# temperature is 14 bits, between -40 and 125 deg C\n\ttemperature_d = (val[2] << 6) + (val[3] >> 2)\n\ttemperature = (temperature_d / (2**14-1.0)) * 165 - 40\n\n\treturn (humidity, temperature, status)",
"def resistance(self):\n R = self.V / self.current()\n return R",
"def load_resistance(self):\n value = self.query(\"OUTP:LOAD?\")\n try:\n return int(value) * pq.ohm\n except ValueError:\n return self.LoadResistance(value.strip())",
"def get_adc_value(self, channel):\r\n if channel == 1:\r\n command = 0x10\r\n elif channel == 2:\r\n command = 0x20\r\n elif channel == 3:\r\n command = 0x40\r\n elif channel == 4:\r\n command = 0x80\r\n else:\r\n raise ValueError(\"channel must be 1, 2, 3, or 4\")\r\n\r\n # Tell the ADC to convert a specific channel\r\n self.bus.write_byte(self.ADC_ADDRESS, command)\r\n\r\n # Get the conversion (read always gets most recent conversion)\r\n data = self.bus.read_word_data(self.ADC_ADDRESS, 0x00)\r\n\r\n # Process the data\r\n data = self.endian_swap(data)\r\n data = self.mask_high(data)\r\n # 52 is the value we got for 0.5V, so subtract this so we can work from 0 rather than 52.\r\n data -= 52\r\n\r\n # 1538 is the range in the values the ADC can give us between 0.5V and 2.5V\r\n # If were out of the scaled range we specified, set to the max or min value for our range.\r\n if data < 0:\r\n data = 0\r\n elif data > 1538:\r\n data = 1538\r\n \r\n return data",
"def get_distance():\n return sensor.distance_cm()",
"def rpms(self):\n # If encoder is not being used, return -1\n if self.outA < 0:\n return -1\n\n # Comput RPM\n prevState = 0\n ticks = 0\n startTime = time.clock()\n while ticks < MIN_TICKS:\n currState = GPIO.input(self.outA)\n if ((prevState is 0) and (currState is 1)) or \\\n ((prevState is 1) and (currState is 0)):\n ticks += 1\n prevState = currState\n if (time.clock() - startTime) > MAX_WAIT:\n return 0.0\n timeDiff = time.clock() - startTime\n return (ticks / CPR) / (timeDiff / SPM)",
"def readValue(self):\n\t\tglobal coefficient\n\t\tglobal addr_G\n\t\tdata = bus.read_i2c_block_data(addr_G, ADS1115_REG_POINTER_CONVERT, 2)\n\t\t\n\t\t# Convert the data\n\t\traw_adc = data[0] * 256 + data[1]\n\n\t\tif raw_adc > 32767:\n\t\t\traw_adc -= 65535\n\t\traw_adc = int(float(raw_adc)*coefficient)\n\t\treturn {'r' : raw_adc}",
"def readValue(self):\n\n data = self.bus.read_i2c_block_data(self.addr, ADS1115_REG_CONVERT, 2)\n \n # Convert the data\n raw_adc = data[0] * 256 + data[1]\n\n if raw_adc > 32767:\n raw_adc -= 65535\n raw_adc = int(float(raw_adc)*self.coefficient)*4\n return raw_adc",
"def resistances(self, c):\n dev = self.selectedDevice(c)\n return dev.getResistances()",
"def get_distance(self):\n \n\n # Specify -1 to retrieve the absolute position.\n return self.vr.simxReadProximitySensor(self.handle, vrep.simx_opmode_buffer)",
"def read_battery_voltage():\n\n # There is 100k resistor connecting A0 to battery and Wemos D1 mini has 100k + 220k resistors\n # The ADC has 1024 levels\n voltage = ADC(0).read() / 1024.0 * 4.2\n\n # voltage should be in range of 4.2V (fully charged or charging) and ~2.5V (discharge protection should kick in)\n return (voltage - 2.5) / (4.2 - 2.5) * 100",
"def ultrasonic(self):\n return self.ultrasonic_get()",
"def get_measurement(self):\n self._co2 = None\n\n if self.interface == 'UART':\n self.ser.flushInput()\n time.sleep(1)\n self.ser.write(\"\\xff\\x01\\x86\\x00\\x00\\x00\\x00\\x00\\x79\")\n time.sleep(.01)\n resp = self.ser.read(9)\n if len(resp) != 0:\n high_level = struct.unpack('B', resp[2])[0]\n low_level = struct.unpack('B', resp[3])[0]\n co2 = high_level * 256 + low_level\n return co2\n\n elif self.interface == 'I2C':\n self.write_register(self.FCR, 0x07)\n self.send(self.cmd_measure)\n try:\n co2 = self.parse(self.receive())\n except Exception:\n co2 = None\n return co2\n\n return None",
"def getResistance(self, drug):\n return self.resistances.get(drug, False)",
"def temperature_adc():\n v = device_read('/sys/bus/spi/devices/spi0.0/temp1_input')\n return float(v) / 1000.0",
"def GetMotor1(self):\n try:\n i2cRecv = self.RawRead(COMMAND_GET_B, I2C_MAX_LEN)\n except KeyboardInterrupt:\n raise\n except:\n self.Print('Failed reading motor 1 drive level!')\n return\n\n power = float(i2cRecv[2]) / float(PWM_MAX)\n\n if i2cRecv[1] == COMMAND_VALUE_FWD:\n return power\n elif i2cRecv[1] == COMMAND_VALUE_REV:\n return -power\n else:\n return"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get the value of a key from the server
|
def get(self, key):
socksend(self.sock, _t1(C.get, key))
socksuccess(self.sock)
return sockstr(self.sock)
|
[
"def get(self, key):\n\n key = str(key)\n database = self._get_database()\n return database.get(key, None)",
"def __getitem__(self, key):\n query = select([self.store.c.value]).where(self.store.c.key == key)\n result = self.conn.execute(query).fetchone()\n if result:\n return result['value']\n raise KeyError",
"def get_value(self, key: str) -> Any:\r\n if self.get_index(key) is None:\r\n return None\r\n return self.hash_table[self.get_index(key)][1]",
"def read_value(self, key):\n self._logger.debug(\"reading value from redis\")\n value = self._breaker.call(self._get_value, key)\n return value.decode() if value else None",
"def get(self, key):\n return self._get(self._config, key.split('.'))",
"async def get(self, key):\n _LOGGER.debug(_(\"Getting %s from sqlite\"), key)\n data = None\n\n cur = await self.client.cursor()\n await cur.execute(\"SELECT data FROM {} WHERE key=?\".format(self.table), (key,))\n row = await cur.fetchone()\n if row:\n data = json.loads(row[0], object_hook=JSONDecoder())\n\n return data",
"async def retrieve_key(request):\n LOG.debug('Retrieve key')\n requested_id = request.match_info['requested_id']\n key_type = request.match_info['key_type'].lower()\n if key_type not in ('public', 'private'):\n return web.HTTPForbidden() # web.HTTPBadRequest()\n key_id = requested_id[-16:].upper()\n key_format = 'armored' if request.content_type == 'text/plain' else None\n LOG.debug(f'Requested {key_type.upper()} key with ID {requested_id}')\n k = _cache.get(key_id, key_type, key_format=key_format)\n if k:\n return web.Response(body=k) # web.Response(text=value.hex())\n else:\n LOG.warn(f\"Requested key {requested_id} not found.\")\n return web.HTTPNotFound()",
"def get_value(self, keypath = ''):\n if not self._valid_key(keypath):\n return None\n elif keypath is '':\n return self._data\n key = self._parse_keypath(keypath)\n data = self.dict_for_key(key[:-1], False)\n if data is None:\n return None\n token = key[-1]\n if token in data:\n return data[token]\n return None",
"def get(self, sat, key):\n with self._lock:\n return self._data[sat][key]",
"async def get_at(self, key, uid):\n log.debug(\"[%r] get_at key=%r uid=%r\", self._uid, key, uid)\n try:\n peer = await self._reach(uid)\n except KeyError as exc:\n raise KeyError(key) from exc\n\n out = await self._protocol.rpc(peer, \"value\", pack(key))\n if out[0] == b\"VALUE\":\n value = out[1]\n if hash(value) == key:\n # store it\n @h.transactional\n def add(tr, key, value):\n tr.add(\"QADOM:MAPPING\", key, \"value\", value)\n\n await self._run(add, self._hoply, key, value)\n # at last!\n return value\n else:\n log.warning(\"[%r] received bad value from %r\", peer)\n await self.blacklist(peer)\n return KeyError(key)\n else:\n raise KeyError(key)",
"def direct_get(self, key):\n return get_store_value(self.store, key)",
"def get_value(key):\n data = cache.get(key)\n if data:\n return pickle.loads(data)",
"def get_value(self, key: str):\n try:\n return self._config_contents[key]\n except KeyError:\n print(f\"Could not find the desired key: {key} in the config file\")",
"def get(self, key: str):\r\n\r\n index = self.hash(key)\r\n\r\n if self.array[index] is None:\r\n return None\r\n else:\r\n # Loop through all the key/value pairs at this index, and find if\r\n # our key exists. If it does, return the value.\r\n\r\n for kvp in self.array[index]:\r\n if kvp[0] == key:\r\n return kvp[1]\r\n\r\n return None",
"def get_info_value(self, key):\n info = self.parse_info(self.get_info())\n if key in info:\n return info[key]\n else:\n return None",
"def get(self, key: str):\n node = self.reach_node(key)\n if node:\n return node.content\n else:\n return None",
"def get(key, force_reload=False):",
"def search_kv_store(self, key):\n data = {\n 'operation': 'RETRIEVE',\n 'key': key\n }\n return self.post_json(self.make_url(\"/useragent-kv\"), data)['value']",
"def get_device_value(self, key: str, subkey: str) -> Any:\n value = None\n if self.coordinator.data is not None and key in self.coordinator.data:\n data = self.coordinator.data[key]\n if subkey in data:\n value = data[subkey]\n return value",
"def _getKey(result, key):\n try:\n if isinstance(result, list):\n return [obj[key] for obj in result]\n else:\n return result[key]\n except KeyError:\n raise ServerResponseError('Missing \"%s\" key' % key, result)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Call func(key, value) with opts opts is a bitflag that can be RDBXOLCKREC for record locking and/or RDBXOLCKGLB for global locking
|
def ext(self, func, opts, key, value):
# tcrdbext opts are RDBXOLCKREC, RDBXOLCKGLB
socksend(self.sock, _t3F(C.ext, func, opts, key, value))
socksuccess(self.sock)
return sockstr(self.sock)
|
[
"def _lock_and_transform(func):\n\n @wraps(func)\n def wrapper(self, key):\n with self._lock:\n return func(self, _transform_key(key))\n\n return wrapper",
"def wrapper(*args, **kwargs):\n key = wrapper.__cache_key__(*args, **kwargs)\n result = g.cache.get(key, ENOVAL)\n\n if result is ENOVAL:\n result = func(*args, **kwargs)\n g.cache[key] = result\n\n return result",
"def ctx_set(flags: Flags, fdict: FlagDict, key: str, value: Any):\n key = ctx_fix_key(flags, fdict, key) or key\n fdict[key] = value",
"def search_param_memoize(func):\r\n def wrapper(*args, **kwargs):\r\n key = (args, frozenset(kwargs.items()))\r\n if key in search_param_cache:\r\n return search_param_cache[key]\r\n else:\r\n rv = func(*args, **kwargs)\r\n search_param_cache[key] = rv\r\n return rv\r\n return wrapper",
"def dnskey_set_flags(self, f):\n return _ldns._ldns_rr_dnskey_set_flags(self, f)\n #parameters: ldns_rr *, ldns_rdf *,\n #retvals: bool",
"def synchronized_with_memcache(key=None, timeout=10):\n def decorator(func):\n @wraps(func)\n def wrapped(*arg, **kwargs):\n start = time.time()\n end = start\n\n lock_key = key\n if lock_key is None:\n lock_key = \"%s.%s__\" % (func.__module__, func.__name__)\n \n lock_key = \"__synchronized_with_memcache_\" + lock_key \n \n client = memcache.Client()\n got_lock = False\n try:\n # Make sure the func gets called only one at a time\n while not got_lock and end - start < timeout:\n locked = client.gets(lock_key)\n\n while locked is None:\n # Initialize the lock if necessary\n client.set(lock_key, False)\n locked = client.gets(lock_key)\n\n if not locked:\n # Lock looks available, try to take it with compare \n # and set (expiration of 10 seconds)\n got_lock = client.cas(lock_key, True, time=timeout)\n \n if not got_lock:\n # If we didn't get it, wait a bit and try again\n time.sleep(0.1)\n\n end = time.time()\n\n if not got_lock:\n logging.warning((\"synchronization lock on %s:%s timed out \"\n \"after %f seconds\")\n % (func.__module__, func.__name__,\n end - start))\n elif end - start > timeout * 0.75:\n # its possible that the func didn't finish but the\n # cas timeout was reached, so if we get these\n # warnings we should probably bump the timeout as well\n logging.warning((\"synchronization lock %s:%s almost timed \"\n \"out, but got lock after %f seconds\")\n % (func.__module__, func.__name__,\n end - start))\n \n results = func(*arg, **kwargs)\n\n finally:\n if got_lock:\n # Release the lock\n client.set(lock_key, False)\n\n return results\n return wrapped\n return decorator",
"async def wrapper(*args: Tuple[Any, ...], **kwds: Dict[str, Any]) -> Any:\n key = CacheKey.make(args, kwds)\n value = cache[key]\n # cache miss/expired\n if value is None:\n result = await fn(*args, **kwds)\n cache[key] = CacheValue(expired=time.monotonic() + expire, data=result)\n return result\n return value.data",
"def _attr_pickled(method):\r\n def wrapper(self, *args, **kwargs):\r\n \"wrap all queries searching the db_value field in some way\"\r\n self.__doc__ = method.__doc__\r\n for key in (key for key in kwargs if key.startswith('db_value')):\r\n kwargs[key] = to_pickle(kwargs[key])\r\n return method(self, *args, **kwargs)\r\n return update_wrapper(wrapper, method)",
"def fn_wrapper(func):\r\n @functools.wraps(func)\r\n def wrapped_call(self, new_value): # pylint: disable-msg=C0111\r\n old_value = getattr(self, var_name)\r\n func(self, new_value)\r\n if new_value != old_value:\r\n setattr(self, flag_on_change, True)\r\n return wrapped_call",
"def opts(arg_name: str, **options):\n\n def decorator(func):\n _quick_set(func, 'opts', arg_name, options, {})\n return func\n\n return decorator",
"def memcache_async(key, key_args=None, time=None):\n assert isinstance(key, basestring), key\n key_args = key_args or []\n assert isinstance(key_args, list), key_args\n assert all(isinstance(a, basestring) for a in key_args), key_args\n assert all(key_args), key_args\n\n memcache_set_kwargs = {}\n if time is not None:\n memcache_set_kwargs['time'] = time\n\n def decorator(func):\n unwrapped = func\n while True:\n deeper = getattr(unwrapped, '__wrapped__', None)\n if not deeper:\n break\n unwrapped = deeper\n\n argspec = inspect.getargspec(unwrapped)\n if argspec.varargs:\n raise NotImplementedError(\n 'varargs in memcached functions are not supported')\n if argspec.keywords:\n raise NotImplementedError(\n 'kwargs in memcached functions are not supported')\n\n # List of arg names and indexes. Has same order as |key_args|.\n arg_indexes = []\n for name in key_args:\n try:\n i = argspec.args.index(name)\n except ValueError:\n raise KeyError(\n 'key_format expects \"%s\" parameter, but it was not found among '\n 'function parameters' % name)\n arg_indexes.append((name, i))\n\n @functools.wraps(func)\n @ndb.tasklet\n def decorated(*args, **kwargs):\n arg_values = []\n for name, i in arg_indexes:\n if i < len(args):\n arg_value = args[i]\n elif name in kwargs:\n arg_value = kwargs[name]\n else:\n # argspec.defaults contains _last_ default values, so we need to shift\n # |i| left.\n default_value_index = i - (len(argspec.args) - len(argspec.defaults))\n if default_value_index < 0:\n # Parameter not provided. Call function to cause TypeError\n func(*args, **kwargs)\n assert False, 'Function call did not fail'\n arg_value = argspec.defaults[default_value_index]\n arg_values.append(arg_value)\n\n # Instead of putting a raw value to memcache, put tuple (value,)\n # so we can distinguish a cached None value and absence of the value.\n\n cache_key = 'utils.memcache/%s/%s%s' % (\n get_app_version(), key, repr(arg_values))\n\n ctx = ndb.get_context()\n result = yield ctx.memcache_get(cache_key)\n if isinstance(result, tuple) and len(result) == 1:\n raise ndb.Return(result[0])\n\n result = func(*args, **kwargs)\n if isinstance(result, ndb.Future):\n result = yield result\n yield ctx.memcache_set(cache_key, (result,), **memcache_set_kwargs)\n raise ndb.Return(result)\n\n return decorated\n return decorator",
"def memoize_immutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = (args, frozenset(kwargs.items())) #Must use frozenset because kwargs (= a dictionary) cannot be used as part of dictionary key \n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n else:\n pass\n #print(f'Looked-up \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n return memo[key]\n return wrapper",
"def opts_shared(cmd_func):\n options = [opt_config_file, opt_dry_run,\n opt_hostname, opt_use_regex,\n opt_log, opt_log_level]\n\n return reduce(lambda _f, opt_func: opt_func(_f),\n options,\n cmd_func)",
"def perform_atomic_get(lock, func, *argv):\r\n lock.acquire()\r\n var = func(*argv)\r\n lock.release()\r\n return var",
"def memorize(func):\n # Store results in a dict that maps arguments to results\n cache = {}\n # Define the wrapper function to return.\n @wraps(func)\n def wrapper(*args, **kwargs):\n # If these arguments haven't been seen before,\n if (args, kwargs) not in cache:\n # Call func() and store the result.\n cache[(args, kwargs)] = func(*args, **kwargs)\n return cache[(args, kwargs)]\n return wrapper",
"def _Memoize(func):\n l = threading.Lock()\n cache = {}\n def _Caller(*args, **kwargs):\n with l:\n params = repr((args, kwargs))\n try:\n return cache[params]\n except KeyError:\n result = func(*args, **kwargs)\n cache[params] = result\n return result\n return _Caller",
"def only_one(function=None, key=\"\", timeout=None):\n\n def _dec(run_func):\n \"\"\"Decorator.\"\"\"\n\n def _caller(*args, **kwargs):\n \"\"\"Caller.\"\"\"\n ret_value = None\n have_lock = False\n lock = REDIS_CLIENT.lock(key, timeout=timeout)\n try:\n have_lock = lock.acquire(blocking=False)\n if have_lock:\n ret_value = run_func(*args, **kwargs)\n else:\n logging.warning(\"Lock {} is already in use.\".format(key))\n msg = \"If you believe that this is a stale lock, log in to {}\"\n msg += \" and enter 'redis-cli del {}'\"\n logging.warning(msg.format(get_config()['redis_host'], key))\n finally:\n if have_lock:\n lock.release()\n\n return ret_value\n\n return _caller\n\n return _dec(function) if function is not None else _dec",
"def _cache_and_lock_accounts_keystore(fn):\n cached_accounts = {}\n last_mod = None\n\n def wrap(*args):\n nonlocal last_mod\n _keystore_cache_lock.acquire()\n files_in_dir = str(os.listdir(_account_keystore_path))\n dir_mod_time = str(os.path.getmtime(_account_keystore_path))\n curr_mod = hash(files_in_dir + dir_mod_time + _binary_path)\n if curr_mod != last_mod:\n cached_accounts.clear()\n cached_accounts.update(fn(*args))\n last_mod = curr_mod\n accounts = cached_accounts.copy()\n _keystore_cache_lock.release()\n return accounts\n\n return wrap",
"async def random_memcached_op(self):\n # Sleep random amount of time to avoid having too much\n # contention when getting a free connection\n sleep_time = random.randint(0, 1000) / 1000\n await asyncio.sleep(sleep_time)\n\n op = self.get_op()\n key = self.get_key()\n args = []\n if op == \"set\":\n value = self.get_value()\n args.append(value)\n func = getattr(self.driver, op)\n try:\n await func(key, *args)\n except Exception:\n logger.error(f\"OP failed {op} {key}\", exc_info=True)",
"def memoize_mutable(f):\n memo = {}\n def wrapper(*args, **kwargs):\n key = pickle.dumps(args) + pickle.dumps(kwargs) #To use as hash for mutable objects.\n if not key in memo:\n memo[key] = f(*args, **kwargs)\n #print(f'Calculated \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n else:\n pass\n #print(f'Looked-up \"{f.__name__}\" for args: {str(args)[:100]} and kwargs: {str(kwargs)[:100]}')\n return memo[key]\n return wrapper"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
calculate the position of a player against a location for now we have only a sphere and cube next will be rooms, then polygons for more exotic bases. the goal is to use exactly the space one needs instead of arbitrary shapes dictated by my lack of mathskills!
|
def player_is_inside_boundary(self, player_object):
player_is_inside_boundary = False
if self.shape == "sphere":
""" we determine the location by the locations radius and the distance of the player from it's center,
spheres make this especially easy, so I picked them first ^^
"""
distance_to_location_center = float(math.sqrt(
(float(self.pos_x) - float(player_object.pos_x)) ** 2 + (
float(self.pos_y) - float(player_object.pos_y)) ** 2 + (
float(self.pos_z) - float(player_object.pos_z)) ** 2))
player_is_inside_boundary = distance_to_location_center <= float(self.radius)
if self.shape == "cube":
""" we determine the area of the location by the locations center and it's radius (half a sides-length)
"""
if (float(self.pos_x) - float(self.radius)) <= float(player_object.pos_x) <= (float(self.pos_x) + float(self.radius)) and (float(self.pos_y) - float(self.radius)) <= float(player_object.pos_y) <= (float(self.pos_y) + float(self.radius)) and (float(self.pos_z) - float(self.radius)) <= float(player_object.pos_z) <= (float(self.pos_z) + float(self.radius)):
player_is_inside_boundary = True
if self.shape == "room":
""" we determine the area of the location by the locations center, it's width, height and length. height will be calculated from ground level (-1) upwards
"""
if (float(self.pos_x) - float(self.width) / 2) <= float(player_object.pos_x) <= (float(self.pos_x) + float(self.width) / 2) and float(self.pos_y) <= float(player_object.pos_y) + 1 <= (float(self.pos_y) + float(self.height)) and (float(self.pos_z) - float(self.length) / 2) <= float(player_object.pos_z) <= (float(self.pos_z) + float(self.length) / 2):
player_is_inside_boundary = True
return player_is_inside_boundary
|
[
"def calculate_chemistry_position(player_pos, squad_pos):\n chemistry_position = 0\n pos_points = [1, 2, 3]\n if player_pos == squad_pos:\n chemistry_position = pos_points[2]\n return chemistry_position\n if squad_pos == \"LWB\" and player_pos == \"LB\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"RWB\" and player_pos == \"RB\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CM\" and player_pos == \"CDM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CM\" and player_pos == \"CAM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CDM\" and player_pos == \"CM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CAM\" and player_pos == \"CM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CAM\" and player_pos == \"CF\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CF\" and player_pos == \"ST\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CF\" and player_pos == \"CAM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"ST\" and player_pos == \"CF\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"RF\" and player_pos == \"RW\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"RW\" and player_pos == \"RF\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"RW\" and player_pos == \"RM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"RM\" and player_pos == \"RW\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"LF\" and player_pos == \"LW\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"LW\" and player_pos == \"LF\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"LW\" and player_pos == \"LM\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"LM\" and player_pos == \"LW\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CB\" and player_pos == \"RB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CB\" and player_pos == \"CDM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CB\" and player_pos == \"LB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LWB\" and player_pos == \"LM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LWB\" and player_pos == \"RWB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LWB\" and player_pos == \"LW\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LB\" and player_pos == \"LM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LB\" and player_pos == \"RB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LB\" and player_pos == \"CB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LB\" and player_pos == \"LWB\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"RWB\" and player_pos == \"RM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RWB\" and player_pos == \"RW\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RWB\" and player_pos == \"LWB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RB\" and player_pos == \"RM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RB\" and player_pos == \"LB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RB\" and player_pos == \"CB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RB\" and player_pos == \"RWB\":\n chemistry_position = pos_points[1]\n return chemistry_position\n if squad_pos == \"CM\" and player_pos == \"LM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CM\" and player_pos == \"RM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CDM\" and player_pos == \"CB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CDM\" and player_pos == \"CAM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CAM\" and player_pos == \"CDM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CF\" and player_pos == \"LF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"CF\" and player_pos == \"RF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"ST\" and player_pos == \"LF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"ST\" and player_pos == \"RF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RF\" and player_pos == \"ST\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RF\" and player_pos == \"CF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RF\" and player_pos == \"LF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RF\" and player_pos == \"RM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RW\" and player_pos == \"LW\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RW\" and player_pos == \"RWB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RM\" and player_pos == \"RF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RM\" and player_pos == \"RWB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RM\" and player_pos == \"RB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RM\" and player_pos == \"LM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"RM\" and player_pos == \"CM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LF\" and player_pos == \"ST\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LF\" and player_pos == \"CF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LF\" and player_pos == \"RF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LF\" and player_pos == \"LM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LW\" and player_pos == \"RW\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LW\" and player_pos == \"LWB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LM\" and player_pos == \"LF\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LM\" and player_pos == \"LB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LM\" and player_pos == \"LWB\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LM\" and player_pos == \"CM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n if squad_pos == \"LM\" and player_pos == \"RM\":\n chemistry_position = pos_points[0]\n return chemistry_position\n return chemistry_position",
"def room_coordinates(self):\n #Stand Position\n x,y,z = self.stand_coordinates\n ax,ay,az = self.stand.pitch, self.stand.yaw, self.stand.roll\n\n #####\n # X #\n #####\n rm_x = (x*cos(ay)*cos(az)\n + y*(sin(ax)*sin(ay)*cos(az)-sin(az)*cos(ax))\n + z*(sin(ay)*cos(ax)*cos(az)+sin(ax)*sin(az)))\n\n #####\n # Y #\n #####\n rm_y = (x*sin(az)*cos(ay)\n + y*(sin(ax)*sin(ay)*sin(az)+cos(ax)*cos(az))\n + z*(sin(ay)*cos(ax)*sin(az)-sin(ax)*cos(az)))\n\n #####\n # Z #\n #####\n rm_z = (-x*sin(ay)\n + y*sin(ax)*cos(ay)\n + z*cos(ax)*cos(ay))\n\n return Point(rm_x, rm_y, rm_z)",
"def get_object_position_on_grid(self, distance):\n #now we know the spatial resolution per grid spacing\n dx = distance[0] / self._dx\n dy = distance[1] / self._dy\n dz = distance[2] / self._dz\n #get the center loction\n cx = int(self.x_dim / 2)\n cy = int(self.y_dim / 2)\n cz = int(self.z_dim / 2)\n #return the x, y, z indicies to add at\n return cx + dx, cy + dy, cz + dz",
"def calculate_new_player_position(player_choice, player):\n player_newY = player.y # start with current position\n player_newX = player.x # start with current position\n \n # Calculate new position\n if player_choice == 'w':\n player_newY -= 1\n elif player_choice == 's':\n player_newY += 1\n elif player_choice == 'a':\n player_newX -= 1\n elif player_choice == 'd':\n player_newX += 1\n\n return player_newY, player_newX",
"def _get_random_location(self):\n\n width, height = self.world_size\n\n # # Generate a random (x, y) coordinate within the world's borders\n x = random.uniform(self.radius, width - self.radius)\n y = random.uniform(self.radius, height - self.radius)\n\n x -= width // 2\n y -= height // 2\n\n return x, y",
"def obj_coords(self, soma_id, soma_map, soma_config):\n query = { \"map\": soma_map,\n \"config\": soma_config,\n \"id\": soma_id\n } \n\n res = self.find_projection(query, {\"pose\": 1})\n\n if res.count() == 0:\n return None\n return res[0]['pose']['position']['x'], res[0]['pose']['position']['y'], \\\n res[0]['pose']['position']['z']",
"def calc_pos(self, gridpos):\n x,y = gridpos\n x = self.x_offset + self.x_u * x\n y = self.y_offset + self.y_u * y\n return x, y",
"def make_uv_sphere(radius, longitude_resolution, latitude_resolution):\n\n # Create a grid of cells\n columns = np.arange(longitude_resolution, np.pi, longitude_resolution)\n rows = np.arange(0, 2 * np.pi, latitude_resolution)\n\n ####################\n # Helper functions #\n ####################\n\n def back_a_row(vertex_index):\n \"\"\"\n Get the index of the vertex in the same column position, but on the previous row.\n \"\"\"\n back_index = vertex_index - len(columns)\n if back_index < 0:\n # We've gone before the first row\n raise IndexError\n else:\n return back_index\n\n def back_a_column(vertex_index):\n \"\"\"\n Get the index of the vertex in the same row position, but on the previous column.\n \"\"\"\n if vertex_index % len(columns) == 0:\n # We are the first column in a row, so we can't go back\n raise IndexError\n else:\n return vertex_index - 1\n\n def create_cell(lower_right_vertex_index):\n \"\"\"\n Create a rectangular face made of two triangles.\n \"\"\"\n\n # Figure out the IDs of the vertices that will make up the grid cell\n lower_left_vertex_index = back_a_column(vertex_index)\n upper_right_vertex_index = back_a_row(vertex_index)\n upper_left_vertex_index = back_a_column(upper_right_vertex_index)\n\n # Create the two triangular faces of the grid cell\n triangle1 = [upper_left_vertex_index, lower_right_vertex_index, lower_left_vertex_index]\n triangle2 = [lower_right_vertex_index, upper_left_vertex_index, upper_right_vertex_index]\n\n return (triangle1, triangle2)\n\n\n # Go through the grid - in row-major order - and construct vertices\n # While we're at it, construct faces for each grid cell\n # The vertices will along a sphere\n vertices = list()\n faces = list()\n vertex_index = 0 # the index of the current vertex\n for row in rows:\n for col in columns:\n\n # Try to make a cell using the vertices above and to the left of this\n # vertex\n try:\n for face in create_cell(vertex_index):\n faces.append(face)\n except IndexError:\n # These helper functions raise index errors when they go out of bounds.\n # If they went out of bounds, this vertex isn't on the lower right\n # of a cell. So just skip the cell creation for this vertex.\n pass\n\n # Store the geometry for this vertex\n vertices.append((radius * np.sin(col) * np.cos(row),\n radius * np.sin(col) * np.sin(row),\n radius * np.cos(col)))\n\n # Increment the vertex index\n vertex_index += 1\n\n # Connect the rows of the grid together\n first_row_vertices = range(len(columns))\n last_row_vertices = range(vertex_index - len(columns), vertex_index)\n for i in range(len(first_row_vertices) - 1):\n\n # Figure out the IDs of the vertices that will make up the grid cell\n lower_left_vertex_index = last_row_vertices[i]\n lower_right_vertex_index = last_row_vertices[i + 1]\n upper_left_vertex_index = first_row_vertices[i]\n upper_right_vertex_index = first_row_vertices[i + 1]\n\n # Create the two triangular faces of the grid cell\n triangle1 = [lower_left_vertex_index, lower_right_vertex_index, upper_right_vertex_index]\n triangle2 = [upper_right_vertex_index, upper_left_vertex_index, lower_left_vertex_index]\n\n faces.append(triangle1)\n faces.append(triangle2)\n\n # Add vertices on the column sides, and close up the sphere\n first_column_vertices = range(0, vertex_index - len(columns) + 1, len(columns))\n last_column_vertices = range(len(columns) - 1, vertex_index, len(columns))\n\n vertices.append((0, 0, radius))\n for v1, v2 in zip(first_column_vertices[:-1], first_column_vertices[1:]):\n faces.append([v1, v2, vertex_index])\n faces.append([first_column_vertices[-1], first_column_vertices[0], vertex_index])\n vertex_index += 1\n\n vertices.append((0, 0, -1 * radius))\n for v1, v2 in zip(last_column_vertices[:-1], last_column_vertices[1:]):\n faces.append([vertex_index, v2, v1])\n faces.append([vertex_index, last_column_vertices[0], last_column_vertices[-1]])\n vertex_index += 1\n\n return (vertices, faces)",
"def generate_random_location(self): \n snake = self.snake.get_locations()\n stones = self.stones.get_locations()\n apples = self.apples.get_locations()\n already_taken_space = snake + stones + apples + self.wall\n xy = (0,0)\n while True:\n xy = (random.randrange(0, screen_width, grid_size.x), random.randrange(0, screen_height, grid_size.y))\n if xy not in already_taken_space:\n break\n return xy",
"def coordinates(x_t, y_t, z_t, r_t,\n tube_function, tube_subdivision, inside,\n time_step):\n location = (x_t(time_step),\n y_t(time_step),\n z_t(time_step))\n\n tube_offset = tube_function(tube_subdivision=tube_subdivision,\n inside=inside,\n rotation=r_t(time_step),\n time_step=time_step)\n\n #print(\"%.4f %d %.4f %.4f %.4f %.4f %.4f %.4f\" %\n # (time_step, tube_subdivision, location[0], location[1], location[2],\n # tube_offset[0], tube_offset[1], tube_offset[2]))\n\n location = (location[0] + tube_offset[0],\n location[1] + tube_offset[1],\n location[2] + tube_offset[2])\n\n return location",
"def get_pos_ring(et, num_pts=100, radius = 122000, name_body='Jupiter', units='radec', wcs=False, \n frame='J2000', abcorr='LT+S', name_observer='New Horizons'):\n \n# Now calculate the ring points...\n \n ring_lon = np.linspace(0, 2. * np.pi, num_pts)\n ra_ring = np.zeros(num_pts)\n dec_ring = np.zeros(num_pts)\n \n rot = sp.pxform('IAU_' + name_body, frame, et) # Get matrix from arg1 to arg2\n \n st,ltime = sp.spkezr(name_body, et, frame, abcorr, name_observer)\n pos = st[0:3]\n# vel = st[3:6] # velocity, km/sec, of jupiter\n \n for j in range(num_pts):\n xyz = np.zeros(3)\n xyz = np.array((radius * np.cos(ring_lon[j]), radius * np.sin(ring_lon[j]), 0.))\n \n d_j2000_xyz = np.dot(rot,xyz) # Manual says that this is indeed matrix multiplication\n j2000_xyz = 0 * d_j2000_xyz\n j2000_xyz[0:3] = d_j2000_xyz[0:3] # Looks like this is just copying it\n\n rho_planet = pos # Position of planet\n rho_ring = rho_planet + j2000_xyz # Vector obs-ring\n# dist_ring = sp.vnorm(rho_ring)*1000 # Convert to km... CHECK UNITS!\n \n range_out, ra, dec = sp.recrad(rho_ring) # 'range' is a protected keyword in python!\n \n ra_ring[j] = ra # save RA, Dec as radians\n dec_ring[j] = dec\n \n if (units == 'pixels'):\n x_ring, y_ring = wcs.wcs_world2pix(ra_ring*r2d, dec_ring*r2d, 0) # Convert to pixels\n return x_ring, y_ring\n \n return ra_ring, dec_ring",
"def __create_unitspherical_coord(self, position, args):\n lon = position[0]\n lat = position[1]\n return SkyCoord((lon+360) % 360, lat, unit=u.deg, frame=self.frame.sys, representation=\"unitspherical\", **args)",
"def pointPosition(object, local=bool, world=bool):\n pass",
"def calculePosition(self):\n #Si latitude + vitesse se trouve entre -90 degre et 90 degre\n if self.latitude + self.vitesse <= 324000 and self.latitude + self.vitesse >= -324000:\n self.latitude = self.latitude + self.vitesse\n self.longitude = self.longitude - 15\n #Si latitude + vitesse se trouve superieur a 90 degre (il vient de depasser le Pole Nord)\n elif self.latitude + self.vitesse > 324000:\n self.latitude = (648000) - (self.latitude + self.vitesse)\n self.longitude = -(648000) + (self.longitude - 15)\n self.vitesse = -self.vitesse\n #Si latitude + vitesse se trouve infenieur a 90 degre (il vient de depasser le Pole Sud)\n else:\n self.latitude = -(648000) - (self.latitude + self.vitesse)\n self.longitude = -(648000) + (self.longitude - 15)\n self.vitesse = -self.vitesse\n #Si longitude depasse -648000\" alors longitude repasse à 647999\"\n if self.longitude < -648000:\n self.longitude = 648000 - (-self.longitude - 648000)\n self.calculPointageCamera()",
"def return_position(self, unit='volts'):\n if unit == 'volts':\n curr_x, curr_y = self.position['x'], self.position['y']\n else:\n curr_x, curr_y = self.volts_to_micron(self.position['x'],'x'), self.volts_to_micron(self.position['y'],'y')\n return curr_x, curr_y",
"def position(lat, lon, utc, method=\"ASHRAE\"):\n\n # Calculate solar coefficients\n sinDec, cosDec, eqnOfTime, solFactor = orbit(utc, method=method)\n\n # Calculate hour angle\n H = hour_angle(lon, utc, eqnOfTime)\n sinH = np.sin(H)\n cosH = np.cos(H)\n\n # Sun position\n sinLat = np.sin(np.radians(lat))\n cosLat = np.cos(np.radians(lat))\n\n return (\n cosDec * sinH,\n sinDec * cosLat - cosDec * sinLat * cosH,\n sinDec * sinLat + cosDec * cosLat * cosH,\n )",
"def pointPotential(x,y,q,posx,posy):\n k = 8.987e9 #N m^2/C^2\n Vxy = (k*q)/(((x-posx)**2 + (y-posy)**2)**(1/2.)) \n return Vxy",
"def get_initial_player_placement(self, team):\n min_x = 1\n max_x = self.BOX_WIDTH - 2\n if team == 1:\n min_y = self.BOX_HEIGHT - 3\n max_y = self.BOX_HEIGHT - 2\n elif team == 2:\n min_y = 1\n max_y = 2\n else:\n print('ERROR placing player of team', team)\n return\n\n # -- choose random box\n box_x = random.randint(min_x, max_x)\n box_y = random.randint(min_y, max_y)\n\n # -- reposition element as long as the selected box is not empty\n while not self.is_empty(box_x, box_y):\n box_x = random.randint(min_x, max_x)\n box_y = random.randint(min_y, max_y)\n\n return box_x, box_y",
"def place(place_pos, place_facing, curr_pos, curr_facing, bounds): \n # If we're trying to place the robot out of bounds, do nothing.\n if out_of_bounds(place_pos, bounds):\n return curr_pos, curr_facing\n \n return place_pos, place_facing"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
(TESTED AND WORKS) Adds new followers activities to user's feed
|
def add_new_follower_latest_activities(cls, user_id, to_follow_id):
payload = {'user_id': user_id, 'to_follow_id': to_follow_id}
response = requests.get('http://localhost:6543/add_new_follower_acts', params=payload)
json_response = json.loads(response.text)
activities = json_response['activities']
# then add activities to user's feed
user_feed = UserFeed(user_id)
for activity in activities:
user_feed.add_activities(activity)
|
[
"def follow_user(user_id, follower_id):\n added = add_follower(user_id, follower_id)\n all_activities_key = \"activities:%s\" % (user_id,)\n\n if not added:\n return\n\n redis_connection = redis.StrictRedis(\n host='localhost', port=6379, db=REDIS_DB, decode_responses=True)\n\n retries = 3\n redis_connection.watch(all_activities_key)\n for x in range(retries):\n try:\n # Go through every activity\n for activity_key in redis_connection.smembers(all_activities_key):\n activity = redis_connection.hgetall(activity_key)\n activity['id'] = activity_key\n # write that activity to the feed at the right spot and cleanup.\n for version in VERSIONS:\n aggr_key = write_aggr(activity, version)\n add_to_feed(follower_id, aggr_key, activity, version)\n removed_keys = trim_activity_feed(follower_id, version)\n\n if removed_keys:\n garbage_collection(removed_keys)\n # Go all the way here.\n break\n except redis.exceptions.WatchError:\n # Retry processing this guy.\n pass",
"def direct_message_new_followers():\n # REMEMBER - takes an api instance\n\n screen_name = 'aderalv2'\n followers = []\n unmessaged_followers = []\n\n print(\"Fetching un-messaged followers .... \")\n for user in tweepy.Cursor(api.followers).items():\n followers.append(user)\n\n api.sent_direct_messages((api.me, 1))\n\n #\n # #create list of unmessaged followers\n #\n # for user in unmessaged_followers:\n # # direct_message(user, api)\n #\n # print(\"Messaged un-messaged followers \\n\")",
"def test_add_followers_for_task(self):\n pass",
"def AddFollowers(self, client, adding_user_id, existing_follower_ids, add_follower_ids, timestamp):\n @gen.coroutine\n def _UpdateFollower(follower_id):\n \"\"\"Create a new follower of this viewpoint in the database.\"\"\"\n follower = Follower(user_id=follower_id, viewpoint_id=self.viewpoint_id)\n follower.timestamp = timestamp\n follower.adding_user_id = adding_user_id\n follower.viewed_seq = 0\n follower.labels = [Follower.CONTRIBUTE]\n\n # Create the follower and corresponding Followed record.\n yield [gen.Task(follower.Update, client),\n gen.Task(Followed.UpdateDateUpdated, client, follower_id, self.viewpoint_id,\n old_timestamp=None, new_timestamp=timestamp)]\n\n raise gen.Return(follower)\n\n # Adding user should be an existing user.\n assert adding_user_id is None or adding_user_id in existing_follower_ids, \\\n (adding_user_id, existing_follower_ids)\n\n # Caller should never pass overlapping existing/add user id sets.\n assert not any(follower_id in existing_follower_ids for follower_id in add_follower_ids), \\\n (existing_follower_ids, add_follower_ids)\n\n # Ensure that friendships are created between the followers to add.\n yield gen.Task(Friend.MakeFriendsWithGroup, client, add_follower_ids)\n\n # Ensure that friendships are created with existing followers.\n yield [gen.Task(Friend.MakeFriends, client, existing_id, add_id)\n for existing_id in existing_follower_ids\n for add_id in add_follower_ids]\n\n # Add new followers to viewpoint with CONTRIBUTE permission.\n add_followers = yield [_UpdateFollower(follower_id) for follower_id in add_follower_ids]\n\n raise gen.Return(add_followers)",
"def follow(self, followerId: int, followeeId: int) -> None:\n if followerId!= followeeId:\n if followerId in self.followers:self.followers[followerId].add(followeeId)\n else:self.followers[followerId] ={followeeId}",
"def insert_activities(self):\n print(\"adding activites\")\n users_ids = self.fs_helper.get_all_ids()\n for user_id in users_ids:\n print(\"\\x1b[2J\\x1b[H INSERTING ACTIVITIES\", round(((int(user_id)+1)/182) * 100, 2), \"%\")\n self.insert_activity_for_user(user_id)",
"def AddFollowersOperation(cls, client, callback, activity, user_id, viewpoint_id, contacts):\n # TODO(Andy): Remove this once the AddFollowersOperation is in production.\n from viewfinder.backend.op.add_followers_op import AddFollowersOperation\n AddFollowersOperation.Execute(client, activity, user_id, viewpoint_id, contacts, callback=callback)",
"def update_post_users(sender, instance, *args, **kwargs):\n followers = Followers.get_active_followers(instance.user).values_list(\"follower_id\", flat=True)\n instance.show_to_users.set(list(followers))",
"def userStartFollowing(userId, idToFollow):\n user = getUser(userId)\n userToFollow = getUser(idToFollow)\n\n user.suscriptions.append(idToFollow)\n userToFollow.followers.append(userId)\n user.put()\n userToFollow.put()",
"def add_to_feed(feed_user_id, aggr_key, activity, version):\n feed_key = \"activity_feed:%s:%s\" % (version, feed_user_id,)\n write_aggregate_to_feed(aggr_key, feed_key, activity['timestamp'])",
"def follow():\n user = mongo.db.Users\n uuid = request.json['uuid']\n foreign_uuid = request.json['foreign_uuid']\n user.update({'uuid': uuid}, {\"$addToSet\": {'follow': foreign_uuid}}, True)\n user.update({'uuid': foreign_uuid}, {\"$addToSet\": {'follower': uuid}}, True)\n\n return jsonify({'result': \"Follow successful!\"})",
"def follow(self, user):\n user.followers += 1\n self.following += 1",
"def follow_account_followers(account_name, api, max_number_to_follow = 75):",
"def update_booking_followups(apps, schema_editor):\n booking_followups = []\n BookingFollowup = Booking.followup.through\n bookings = Booking.objects.all()\n Followup = apps.get_model('core','Followup')\n for b in bookings:\n followups = Followup.objects.filter(booking=b.id)\n for f in followups:\n booking_followups.append(BookingFollowup(booking=b,followup_id=f.id))\n BookingFollowup.objects.bulk_create(booking_followups)",
"def follow_users(self) -> None:\n self.navigate_site()\n followers = [\n name.text[1:]\n for name in self.page_source.findAll(\n \"span\", attrs={\"class\": \"dashboard-username\"}\n )\n ]\n for follower in followers:\n self.driver.get(f\"https://dev.to/{follower}\")\n follow_back_xpath = '//*[@id=\"user-follow-butt\"]'\n status = \"\"\n retries = 5\n for i in range(retries):\n while True:\n try:\n status = WebDriverWait(self.driver, self._timeout).until(\n EC.presence_of_element_located((By.XPATH, follow_back_xpath))\n )\n status = re.sub(r\"[^\\w]\", \"\", status.text)\n assert status\n except BaseException:\n continue\n else:\n break\n\n if status.upper() != \"FOLLOWING\":\n follow_back = self.driver.find_element_by_xpath(follow_back_xpath)\n follow_back.click()\n time.sleep(random.randint(3, 10))\n follow_back = self.driver.find_element_by_xpath(follow_back_xpath)\n follow_back = re.sub(r\"[^\\w]\", \"\", follow_back.text)\n print(f\"{follow_back} -> {follower}\")\n followers.pop()",
"def test_user_list_followers(self):\n pass",
"def test_user_current_put_follow(self):\n pass",
"def saveFollowers(followees,twitter_api,path='data/followers/'):\n from utilities.twhelper import get_followers_ids\n import json\n \n for followee in (followees):\n try:\n followers = get_followers_ids(twitter_api, screen_name=followee)\n with open(path+followee+'.json', 'w') as outfile:\n json.dump(followers, outfile)\n except Exception as e:\n print('[ERROR]: ',followee,e)",
"def follow(user, obj, send_action=True, actor_only=True):\n from actstream.models import Follow, action\n\n check_actionable_model(obj)\n follow, created = Follow.objects.get_or_create(user=user,\n object_id=obj.pk,\n content_type=ContentType.objects.get_for_model(obj),\n actor_only=actor_only)\n if send_action and created:\n action.send(user, verb=_('started following'), target=obj)\n return follow"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pulls out song name from file path, strips disc/track numbers and file extension. IN
|
def song_name_extractor(file_link):
# first pattern takes everything between last / and .ext
p1 = re.compile(r"/([^/]+)\.\w{3}")
# next takes everything after track/disc number and whitespace
p2 = re.compile(r"[\d-]*\s(.+)")
# testing both cases
step1 = p1.search(file_link)
if step1:
sname = step1.group(1)
else:
sname = file_link
step2 = p2.match(sname)
if step2:
sname = step2.group(1)
return sname
|
[
"def songCleaner(filename):\r\n \"\"\"NOTE: For now, you need to manually remove lyrics, alternate versions,\r\n \"variations\", and anything else that comes after the body of\r\n the song! \"\"\"\r\n # Be careful that the first song starts at the top of the file!\r\n file1 = open(filename, 'rb')\r\n string = file1.read()\r\n songList = []\r\n songParts = string.split(\"X\")\r\n for i in range(1, len(songParts)):\r\n songList.append(songParts[i].split(\"K:G\")[1])\r\n songString = \"\"\r\n for i in range(len(songList)):\r\n songString+=(songList[i])\r\n noEndings = \"\"\r\n sections = songString.split(\"|1\")\r\n listOfParts = []\r\n listOfParts.append(sections[0])\r\n for i in range(1,len(sections)):\r\n listOfParts.append(sections[i].split(\":|2\")[1])\r\n notesString = \"\" \r\n for i in range(len(listOfParts)):\r\n noEndings += listOfParts[i] \r\n for i in range(len(noEndings) - 1): \r\n #For now, we remove all ornamentation\r\n if noEndings[i] not in [\" \", \"|\", \"\\n\", \":\", \"~\", \"\\r\"]:\r\n notesString += noEndings[i] \r\n return notesString",
"def get_song_path(track_id: int):\n\n return f'song_samples/{track_id:06}.mp3'",
"def songOnDisk(song):\n graph = getGraph()\n root = graph.value(showUri(), L9['musicRoot'])\n if not root:\n raise ValueError(\"%s has no :musicRoot\" % showUri())\n \n name = graph.value(song, L9['songFilename'])\n if not name:\n raise ValueError(\"Song %r has no :songFilename\" % song)\n\n return path.abspath(path.join(root, name))",
"def subsong_splitext(filepath):\n subsong_exts = SUBSONG_FORMATS.values()\n return splitext(filepath, *subsong_exts)",
"def path_from_trackid(msddir,trackid):\n p = os.path.join(msddir,trackid[2])\n p = os.path.join(p,trackid[3])\n p = os.path.join(p,trackid[4])\n p = os.path.join(p,trackid.upper()+'.h5')\n return p",
"def loadSong(filename):\n\t# Split off the file extension from the file name.\n\tname, extension = os.path.splitext(filename)\n\t\n\t# Use the original file name, along with the extension\n\t# to properly load the audio segment.\n\treturn AudioSegment.from_file(filename, extension[1:])",
"def get_file_name_from_path(file_path):\n file_name = file_path\n slash_position = file_name.rfind('/')\n dot_position = file_name.rfind('.')\n if slash_position >= 0:\n file_name = file_name[slash_position + 1:]\n if dot_position >= 0:\n file_name = file_name[:dot_position]\n return file_name",
"def author_title_from_filename(self,filename):\n filename = filename.replace('.mp3','')\n filename = filename.replace('_',' ')\n parts = filename.split(' - ')\n self.author = parts[0]\n self.title = parts[1]",
"def get_album_name(album_data):\n if not type(album_data) is dict:\n raise TypeError(\"Type of album_data is not a dictionary.\")\n\n # Get the title from the album\n title = album_data[\"Albums\"][0][\"Title\"]\n print(title)\n\n # Reserved characters in a folder/file name... Python 2 sucks:\n to_remove = r'[\\/\\*\\?:\"<>]+'\n title = ''.join(re.split(to_remove, title))\n\n print(title)\n return title",
"def _file2name(self, filename):\n rel_filename = re.sub('^{0}/'.format(self._content_root()),\n '', filename)\n fullname = os.path.splitext(rel_filename)[0]\n return fullname",
"def extract_from_song(path, base_duration):\n song = guitarpro.parse(path)\n track = song.tracks[0]\n return extract_data(track, base_duration)",
"def normalize_file_path(path):\n return path.rstrip('/')",
"def path_from_trackid(msddir, trackid):\n p = os.path.join(msddir, trackid[2])\n p = os.path.join(p, trackid[3])\n p = os.path.join(p, trackid[4])\n p = os.path.join(p, trackid.upper() + '.h5')\n return p",
"def filename(self):\n fname = self.raw_filename\n if not isinstance(fname, text_type):\n fname = fname.decode('utf8', 'ignore')\n fname = normalize('NFKD', fname)\n fname = fname.encode('ASCII', 'ignore').decode('ASCII')\n fname = os.path.basename(fname.replace('\\\\', os.path.sep))\n fname = re.sub(r'[^a-zA-Z0-9-_.\\s]', '', fname).strip()\n fname = re.sub(r'[-\\s]+', '-', fname).strip('.-')\n return fname[:255] or 'empty'",
"def StripSuffixes(Filename):\n return Filename.split(\".\")[0].split(\"_\")[0]",
"def _generate_track_filename(self, extention):\n track_filename = ''\n for char in self.title:\n if char in \" -,.;:(){}[]`~'\":\n track_filename += '_'\n else:\n track_filename += char\n\n if extention != '':\n track_filename = f'{track_filename}.{extention}'\n else:\n pass\n\n return track_filename",
"def filename_core (apath):\n if (apath is None): # sanity check\n return ''\n return os.path.basename(os.path.splitext(apath)[0])",
"def nameext_from_path(path: str) -> str:\n nameext = os.path.split(path)[-1]\n return nameext",
"def subsongtype(filepath):\n ext = subsong_splitext(filepath)[1].lower()\n for subsongformat_type, subsongformat_ext in SUBSONG_FORMATS.items():\n if ext == subsongformat_ext.lower():\n return subsongformat_type\n\n exts = \" \".join(repr(x) for x in SUBSONG_FORMATS.values())\n filename = os.path.basename(filepath)\n raise ExtensionError(\n f\"subsong filename {filename!r} should have one of the extensions [{exts}], \"\n f\"not {ext!r}\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Feeds each song in queue directory to the chunk_song() function. IN
|
def chunk_queue(dir_in="../audio/chunk_queue",
dir_out="../audio/wav_chunked",
chunk_len=5,
sr=22050,
log=True
):
for root, dirs, files in os.walk(dir_in):
for fname in files:
if not re.match(r'^\.', fname):
rel_fpath = os.path.join(root, fname)
chunk_song(rel_fpath, chunk_len=chunk_len, sr=sr, log=log)
|
[
"def queue_callback(self):\n selected_song, index = self._player.selected_song()\n response = requests.get('http://localhost:5000/song/' + selected_song)\n song_object = response.json()\n media_file = song_object['pathname']\n media_file = media_file.replace('/', '\\\\')\n song_name = song_object['title']\n self.queue_path.append(media_file)\n self.queue_name.append(song_name)\n self._player.list_songs_queue(self.queue_name)",
"def ingest(self, args):\n self.logger.info(\"ingesting...\")\n for dirpath, _, filenames in os.walk(args.dir):\n for f in filenames:\n song_path = os.path.abspath(os.path.join(dirpath, f))\n song = fzsong.SongEntry(song_path, title=f, album=args.dir)\n\n self.databaser.write(song)",
"def process_songlist(songlist):\n\n for song in songlist:\n process(song)\n scrapit.delay()",
"def _send_chunks(self, chunks, path):\n for chunk in chunks:\n self.send_msg(protocols.build_header(protocols.FILE_CHUNK, path), chunk)\n sleep(CHUNK_SEND_WAIT)\n self.send_msg(protocols.build_header(protocols.FILE_END, path), '')",
"def _sync_queue(self):\n resp = self.get('playlist.json').json()\n playlist = resp['children'][0]\n for i, d in enumerate(playlist['children']):\n track = self._tracks[int(d['name'])]\n track.idx = i\n track.plid = int(d['id'])",
"def chunks(self):\n for name in self.chunk_names():\n yield self.storage.open(name).read()",
"async def _queue(self, ctx: commands.Context, *, page: int = 1):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send(\"Empty queue.\")\n\n items_per_page = 10\n pages = math.ceil(len(ctx.voice_state.songs) / items_per_page)\n\n start = (page - 1) * items_per_page\n end = start + items_per_page\n\n queue = \"\"\n for i, song in enumerate(ctx.voice_state.songs[start:end], start=start):\n queue += \"`{0}.` [**{1.source.title}**]({1.source.url})\\n\".format(\n i + 1, song\n )\n\n embed = discord.Embed(\n description=\"**{} tracks:**\\n\\n{}\".format(len(ctx.voice_state.songs), queue)\n ).set_footer(text=\"Viewing page {}/{}\".format(page, pages))\n await ctx.send(embed=embed)",
"async def sxm_songs(self, ctx: Context, search: str) -> None:\n\n await self._search_archive(ctx, search, True)",
"def fetch_songs(self):\n if len(self.songs) == 0:\n for file in self.MUSIC_DIR.joinpath (\"./songs\").iterdir():\n if file.is_file():\n self.songs.append (file)\n return self.songs",
"def queue(position):\n global _playlist\n collection = get_collection()\n _playlist.append(collection[position])\n log.info(\"Adding : %s\" % collection[position])\n start_player()",
"async def preload_song(self, ctx: commands.Context) -> None:\n try:\n if self.guilds[ctx.guild.id].song_queue.qsize() == 0:\n return\n i = 0\n for item in self.guilds[ctx.guild.id].song_queue.queue:\n item: Song\n if item.stream:\n continue\n backup_title: str = str(item.title)\n if item.link is not None:\n try:\n type_of_source = Url.determine_source(item.link)\n if type_of_source == Url.youtube_url:\n youtube_dict = await self.parent.youtube.youtube_url(\n item.link, ctx.guild.id\n )\n elif type_of_source == Url.soundcloud_track:\n youtube_dict = await self.parent.soundcloud.soundcloud_track(\n item.link\n )\n else:\n continue\n except BasicError:\n self.parent.log(\n logging_manager.debug_info(traceback.format_exc())\n )\n continue\n youtube_dict.user = item.user\n else:\n if item.title:\n continue\n try:\n youtube_dict = await self._search_song(ctx, item)\n except BasicError:\n continue\n youtube_dict.user = item.user\n j: int = 0\n\n for _song in self.guilds[ctx.guild.id].song_queue.queue:\n _song: Song\n if _song.title != backup_title:\n j += 1\n continue\n self.guilds[ctx.guild.id].song_queue.queue[\n j\n ] = Song.copy_song(\n youtube_dict,\n self.guilds[ctx.guild.id].song_queue.queue[j],\n )\n break\n break\n i += 1\n except IndexError:\n pass\n except AttributeError:\n traceback.print_exc()",
"def queueSong(self):\n queueWindow = curses.newwin(5, 40, 5, 50)\n queueWindow.border()\n queueWindow.addstr(0,0, \"What is the file path?\", curses.A_REVERSE)\n self.stdscr.refresh()\n curses.echo()\n path = queueWindow.getstr(1,1, 30)\n curses.noecho()\n del queueWindow\n self.stdscr.touchwin()\n self.stdscr.refresh()\n \n try:\n self.library.add_tracks(path.decode(encoding=\"utf-8\"))\n except CLI_Exception.CLI_Audio_File_Exception:\n self.printError('Error queueing file or folder')",
"def queue(name):\n path = _loader.load(name)\n _music.queue(path)",
"def produce(self,chunk=1):",
"def update_downloaded_queue(self):\n if len(self.queue) >= 2:\n song_file = youtube.download_mp3(self.queue[1][1], \"audio_files/\")\n self.downloaded_queue.append(song_file)",
"def manage_queue(url, dir_queue):\r\n while True:\r\n directory = dir_queue.get()\r\n resource = url.strip(\"/\") + \"/\" + directory\r\n make_request(resource)\r\n dir_queue.task_done()",
"def chunk(self, chunks, dim=0): # real signature unknown; restored from __doc__\n return []",
"async def baron_chunk(self, ctx: commands.Context):\n unchunked = [g async for g in AsyncIter(self.bot.guilds, steps=100) if not g.chunked]\n if not unchunked:\n return await ctx.send(\"All servers are chunked.\")\n await self.chunk(ctx, unchunked)",
"def publish_chunks(self):\n try:\n udp_dest = (self.udp_audio_host, self.udp_audio_port)\n\n while not self._exit_requested:\n chunk = self.chunk_queue.get()\n if chunk:\n # MQTT output\n with io.BytesIO() as wav_buffer:\n wav_file: wave.Wave_write = wave.open(wav_buffer, \"wb\")\n with wav_file:\n wav_file.setframerate(self.sample_rate)\n wav_file.setsampwidth(self.sample_width)\n wav_file.setnchannels(self.channels)\n wav_file.writeframes(chunk)\n\n wav_bytes = wav_buffer.getvalue()\n\n if self.udp_output:\n # UDP output\n self.udp_socket.sendto(wav_bytes, udp_dest)\n else:\n # Publish to output site_id\n self.publish(\n AudioFrame(wav_bytes=wav_bytes),\n site_id=self.output_site_id,\n )\n if self._dump_file is not None:\n # print(\"tell is\", self._dump_file.tell(), end=' ') \n # write_wave( self._dump_file, wav_bytes, remove_header=True)\n if USE_SOUNDFILE:\n\t\t\t\t\t\t\t\t# soultion soundfile\n self._dump_file.write(np.frombuffer(wav_bytes[44:], np.int16))\t# removing header!\n else:\n\t\t\t\t\t\t\t\t# Solution wave, this write always the latest buffer and doesnt happend it!\n self._dump_file.writeframesraw(wav_bytes[44:]) # removing header!\t\t\t\t\t\n if self.enable_summary:\n self.summary_frames_left -= 1\n if self.summary_frames_left > 0:\n continue\n\n self.summary_frames_left = self.summary_skip_frames\n if not self.vad:\n # Create voice activity detector\n self.vad = webrtcvad.Vad()\n self.vad.set_mode(self.vad_mode)\n # webrtcvad needs 16-bit 16Khz mono\n # TODO: would be possible to split here if demux is not selected? this would avoid resampling,\n # which is called continuously. (uncomment this code). With the switch --demux a proper channel\n # is produced\n # with io.BytesIO(wav_bytes) as wav_io:\n # with wave.open(wav_io, \"rb\") as wav_file:\n # if (wav_file.getframerate() != 16000) or \\\n # (wav_file.getsampwidth() != 2) or \\\n # (wav_file.getnchannels() != 1):\n # print(\"Need Resample: sr={}, width={}, n_ch={}\".format(wav_file.getframerate(),\n # wav_file.getsampwidth(),\n # wav_file.getnchannels()))\n # else:\n # print(\"No resample\")\n # webrtcvad needs 16-bit 16Khz mono\n self.vad_audio_data += self.maybe_convert_wav(\n wav_bytes, sample_rate=16000, sample_width=2, channels=1\n )\n is_speech = False\n # Process in chunks of 30ms for webrtcvad\n while len(self.vad_audio_data) >= self.vad_chunk_size:\n vad_chunk = self.vad_audio_data[: self.vad_chunk_size]\n self.vad_audio_data = self.vad_audio_data[\n self.vad_chunk_size :\n ]\n # Speech in any chunk counts as speech\n is_speech = is_speech or self.vad.is_speech(\n vad_chunk, 16000\n )\n # Publish audio summary\n self.publish(\n AudioSummary(\n debiased_energy=AudioSummary.get_debiased_energy(chunk),\n is_speech=is_speech,\n ),\n site_id=self.output_site_id,\n )\n\n except Exception as e:\n _LOGGER.exception(\"publish_chunks\")\n self.publish(\n AudioRecordError(\n error=str(e), context=\"publish_chunks\", site_id=self.site_id\n )\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts all .wav files in a directory to .mp3 with bitrate specified. Checks destination directory to see if file has been converted already. IN
|
def wav_to_mp3_batch(dir_in,
dir_out="../audio/mp3_chunked",
bitrate=96
):
existing = set()
bitrate = str(bitrate)
for mp3_fpath in glob(dir_out + "/*.mp3"):
f_id = os.path.splitext(os.path.basename(mp3_fpath))[0]
existing.add(f_id)
for wav_fpath in glob(dir_in + "/*.wav"):
f_id = os.path.splitext(os.path.basename(wav_fpath))[0]
if f_id not in existing:
command = "lame -b{} {}/{}.wav {}/{}.mp3".format(bitrate,
dir_in,
f_id,
dir_out,
f_id)
result = os.system(command)
if result != 0:
print("*** ERROR: {} not converted".format(fb_id))
|
[
"def wav2mp3(wavfile, mp3file, bitrate=128):\n cmd = \"sox -c 1 %s -C %d %s\" % (wavfile, bitrate, mp3file)\n subprocess.call(cmd.split(\" \"))",
"def mp3_to_wav(self, pathname):\n if self.folder_method == 'folder':\n label_list = open(self.out_folder + '/' + 'labels.txt', 'w')\n my_path_cwd = self.ext_storage\n labels = os.listdir(my_path_cwd)\n pbar = tqdm(range(self.total_conv))\n for upl in pbar:\n rand_lab = random.choice(labels)\n path = my_path_cwd + '/' + rand_lab\n file = random.choice(os.listdir(path))\n filetype = file[-4:]\n filename = file[:-4]\n pbar.set_description(filename)\n if filetype == '.mp3':\n mp3_form = AudioSegment.from_mp3(path + '/' + file)\n full_clip = self.seconds * 1000 # Runs in miliseconds\n if full_clip > len(mp3_form):\n continue\n rand_start = np.random.randint(0, len(mp3_form) - full_clip)\n mp3_wav = mp3_form[rand_start:(rand_start + full_clip)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n mp3_wav.export(destin, format='wav')\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n elif filetype == '.m4a':\n m4a_form = AudioSegment.from_file(path + '/' + file)\n full_clip = self.seconds * 1000 # Runs in miliseconds\n if full_clip > len(m4a_form):\n continue\n rand_start = np.random.randint(0, len(m4a_form) - full_clip)\n m4a_wav = m4a_form[rand_start:(rand_start + full_clip)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n m4a_wav.export(destin, format='wav')\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n elif filetype == '.wav':\n wav_form = AudioSegment.from_wav(path + '/' + file)\n full_clip = self.seconds * 1000 # Runs in miliseconds\n if full_clip > len(wav_form):\n continue\n rand_start = np.random.randint(0, len(wav_form) - full_clip)\n wav_wav = wav_form[rand_start:(rand_start + full_clip)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n wav_wav.export(destin, format='wav')\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n else:\n if file[-5:] == '.flac':\n filetype = file[-5:]\n filename = file[:-5]\n data, samprate = sf.read(path + '/' + file)\n # set value due to different sample sizes\n set_value = self.seconds * samprate\n if set_value > len(data):\n continue\n rand_start = np.random.randint(0, len(data) - set_value)\n new_data = data[rand_start:int(rand_start + set_value)]\n destin = self.out_folder + '/' + filename + '.wav'\n if Path(destin).is_file():\n filename, destin = self.assign_new_name(destin)\n sf.write(destin, new_data, samprate)\n label_list.write('%s : %s\\n' % (filename, rand_lab))\n return",
"def convert_playlist(self):\n # Taking all the current files of specified format inside dir\n for (dir_name, dirs, files) in os.walk('.'):\n for input_file_name in files:\n # ex : if filename ends with \".mp4\"\n if input_file_name.endswith(self.input_media_format):\n # giving a new name to the file, for easy use\n new_input_file_name = input_file_name.replace(\" \", \"_\")\n new_input_file_name = re.sub(\n \"[^a-zA-Z0-9 \\n\\._]\", \"\", new_input_file_name)\n os.rename(input_file_name, new_input_file_name)\n print(\"Renamed : \" + input_file_name + \" with \" + new_input_file_name)\n print(\"Converting \" + input_file_name +\n \"to \" + self.output_media_format + \"format\")\n output_file_name = new_input_file_name[:-4] + self.output_media_format\n print(input_file_name)\n print(output_file_name)\n command = \"ffmpeg -i \" + new_input_file_name + \" \" + output_file_name\n print(command)\n # converted to new file\n os.system(command)",
"def test_mp3_to_wav(src_dest):\n mp3_to_wav(src_dest[0], src_dest[1])\n\n # the following call with raise an exception\n # if the file being read is not encoded as wav\n pydub.AudioSegment.from_wav(src_dest[1])",
"def test_audio_convert_to_mp3(self):\n pass",
"def preprocess_audio(self):\n #remove the data directory if exists\n if os.path.exists(self.data_dir):\n shutil.rmtree(self.data_dir)\n #iterate over speakers\n speakers = sorted(os.listdir(self.conf['inpath']))\n for sp in tqdm(speakers, desc=\"Converting Audio\"):\n speaker_path = os.path.join(self.conf['inpath'], sp)\n wav_filenames = os.listdir(speaker_path)\n for wav in wav_filenames:\n inwav = os.path.join(speaker_path, wav)\n outwav = os.path.join(self.data_dir, wav)\n\n convert_wav(inwav,\n outwav,\n no_channels = self.conf['no_channels'],\n sampling_rate = self.conf['sampling_rate'],\n bit_precision = self.conf['bit_precision'])\n\n \n #remove the enroll directory if exists\n if os.path.exists(self.enroll_dir):\n shutil.rmtree(self.enroll_dir)\n #remove the test directory if exists\n if os.path.exists(self.test_dir):\n shutil.rmtree(self.test_dir)\n \n #create audio/enroll directory\n safe_makedir(self.enroll_dir)\n #create audio/test directory\n safe_makedir(self.test_dir)\n\n #parse num of sessions from configuration\n enroll_sessions = self.conf['enroll_sessions']\n test_sessions = self.conf['test_sessions']\n assert enroll_sessions+test_sessions <= 10,\\\n \"The summation of all sessions must be less than or equal 10!!\"\n #iterate over all preprocessed waves\n wav_filenames = os.listdir(self.data_dir)\n for wav in tqdm(wav_filenames, desc=\"Copying enroll/test waves\"):\n _, sess, _, _ = wav.split(\".\")\n inwav = os.path.join(self.data_dir, wav)\n if int(sess) <= enroll_sessions:\n outwav = os.path.join(self.enroll_dir, wav)\n shutil.copyfile(inwav, outwav)\n elif int(sess) <= enroll_sessions+test_sessions:\n outwav = os.path.join(self.test_dir, wav)\n shutil.copyfile(inwav, outwav)",
"def _encode_mp3(self, wav_filename):\n encode_mp3(\n wav_filename, self.mp3_filename, self.track_metadata,\n stdout_filename=self.stdout_filename)\n\n # check for clipping\n stdout = self.__read_stdout()\n if \"WARNING: clipping occurs at the current gain.\" in stdout:\n clipping_occurs = True\n m = re.search(\n r\"encode\\s+again\\s+using\\s+\\-\\-scale\\s+(\\d+\\.\\d+)\", stdout)\n scale = float(m.group(1)) if m else 0.99\n\n # re-encode, scaling the PCM data, until there is no clipping\n while clipping_occurs:\n self.__log.info(\n \"detected clipping in %s; re-encoding at %.2f scale...\",\n self.mp3_filename, scale)\n status = (\n self.track_index, self.cdda_filename, self.flac_filename,\n self.stdout_filename, TRACK_REENCODING_MP3(scale))\n _ENCODING_QUEUE.put((5, status))\n\n encode_mp3(\n wav_filename, self.mp3_filename, self.track_metadata,\n scale=scale, stdout_filename=self.stdout_filename)\n\n clipping_occurs = (\n \"WARNING: clipping occurs at the current gain.\"\n in self.__read_stdout())\n scale -= 0.01",
"def convert_mp3(self, filename, to_mp3=True):\r\n fs = FluidSynth()\r\n title = filename.split('.')[0]\r\n audio_filename = f'{title}.mp3' if to_mp3 else f'{title}.wav'\r\n # saves file to disk\r\n fs.midi_to_audio(filename, audio_filename)",
"def resample_folder(input_folder, output_folder, fs, regex):\n # filedir = os.path.dirname(os.path.realpath(__file__))\n # octave.addpath(filedir)\n # add the matlab functions to octave dir here\n\n files = glob.glob(os.path.join(input_folder, regex), recursive=True)\n for f in tqdm.tqdm(files):\n\n audio, fs_read = torchaudio.load(f)\n audio = audio[0].numpy()\n audio = signal.resample_poly(audio, fs, fs_read)\n\n # tmp = octave.activlev(audio.tolist(), fs, \"n\")\n # audio, _ = tmp[:-1].squeeze(), tmp[-1]\n\n peak = np.max(np.abs(audio))\n audio = audio / peak\n audio = torch.from_numpy(audio).float()\n\n relative_path = os.path.join(\n Path(f).relative_to(Path(input_folder)).parent,\n Path(f).relative_to(Path(input_folder)).stem\n + \"_peak_{}.wav\".format(peak),\n )\n\n os.makedirs(\n Path(\n os.path.join(\n output_folder, Path(f).relative_to(Path(input_folder))\n )\n ).parent,\n exist_ok=True,\n )\n\n torchaudio.save(\n os.path.join(output_folder, relative_path),\n audio.reshape(1, -1),\n fs,\n )",
"def convert_with_avconv(input_song, output_song, folder, verbose):\n if verbose:\n level = 'debug'\n else:\n level = '0'\n\n command = ['avconv',\n '-loglevel', level,\n '-i', os.path.join(folder, input_song),\n '-ab', '192k',\n os.path.join(folder, output_song)]\n\n return subprocess.call(command)",
"def convert_to_wav(csv_file, target_dir):\n wav_dir = os.path.join(target_dir, 'wav/')\n txt_dir = os.path.join(target_dir, 'txt/')\n os.makedirs(wav_dir, exist_ok=True)\n os.makedirs(txt_dir, exist_ok=True)\n path_to_data = os.path.dirname(csv_file)\n\n def process(x):\n global media_path\n file_path, text = x\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n act_path_to_data = os.path.join(path_to_data, media_path)\n\n text = text.strip().upper()\n with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:\n f.write(text)\n cmd = \"sox {} -r {} -b 16 -c 1 {}\".format(\n os.path.join(act_path_to_data, file_path),\n args.sample_rate,\n os.path.join(wav_dir, file_name + '.wav'))\n subprocess.call([cmd], shell=True)\n\n print('Converting mp3 to wav for {}.'.format(csv_file))\n _, fext = os.path.splitext(csv_file)\n with open(csv_file) as csvfile:\n reader = None\n if fext.endswith('tsv'):\n reader = csv.DictReader(csvfile, delimiter='\\t')\n else:\n reader = csv.DictReader(csvfile)\n #i = reader.fieldnames\n #print(i)\n data = [(row['path'], row['sentence']) for row in reader]\n with ThreadPool(10) as pool:\n pool.map(process, data)",
"def convert_directory(path, compression=None, output_pixel_type=None, verbose=True):\n \n for filename in os.listdir(path):\n if filename[-4:] == 'fits':\n output_filename = path + '/' + filename[:-5] + \".exr\"\n else:\n continue\n \n if verbose:\n print (\"Converting: \" + filename)\n \n convert(path + '/' + filename, output_filename, compression, output_pixel_type, verbose)",
"def upload_directory(directory, ytmusic):\n music_formats = [\".mp3\", \".m4a\", \".flac\", \".wma\", \".ogg\"]\n for root, _, files in os.walk(directory):\n for track in filter(lambda f: f.endswith(tuple(music_formats)), files):\n filepath = os.path.join(root, track)\n print_filesize(track, filepath)\n ytmusic.upload_song(filepath)",
"def process(root, name):\n\n # if the item is an MP3 file change the sample rate\n if name.lower().endswith(OLD_EXT):\n\n # set file names\n oldfile = os.path.join(root, name)\n newfile = os.path.join(root, name[:-len(OLD_EXT)] + NEW_EXT)\n\n # call lame\n try:\n #retcode = 0\n #print COMMAND % (oldfile, newfile)\n retcode = os.system(COMMAND % (oldfile, newfile))\n if retcode != 0:\n print \"Execution was terminated by signal %d\" % -retcode\n else:\n print \"Successfully converted '%s'\" % name\n except OSError, e:\n print \"Execution failed: %s\" % e\n sys.stdout.flush()",
"def _move_all_audio(self, archive_root, dest_root):\n for directory in self.file_path_filter.filter(\n filter(\n lambda file_like: Path.is_dir(file_like),\n archive_root.iterdir(),\n )\n ):\n dest_dir = dest_root / directory.name\n self._move_audio_in_dir(directory, dest_dir)",
"def test_audio_convert_to_wav(self):\n pass",
"def _move_audio_in_dir(self, src, dest):\n if self.user_option.verbose:\n print(\"Move:\", src.name)\n os.makedirs(str(dest), exist_ok=True)\n for wav_file in self.global_config.extensions.itemize_in_directory(\n src\n ):\n dest_path = dest / wav_file.name\n self._move_file(wav_file, dest_path)",
"def transcode_to_mp3(filepath, quality='320k', slice_start=None, slice_duration=None):\r\n\r\n err_output = None\r\n cmd_path = spawn.find_executable('ffmpeg')\r\n if cmd_path is None:\r\n cmd_path = spawn.find_executable('avconv')\r\n if cmd_path is None:\r\n raise IOError('Neither ffmpeg nor avconv was found in your PATH')\r\n cmd = [cmd_path, '-i', filepath]\r\n\r\n if slice_duration is not None:\r\n cmd.extend(['-t', str(slice_duration)])\r\n if slice_start is not None:\r\n cmd.extend(['-ss', str(slice_start)])\r\n\r\n if isinstance(quality, int):\r\n cmd.extend(['-q:a', str(quality)])\r\n elif isinstance(quality, basestring):\r\n cmd.extend(['-b:a', quality])\r\n else:\r\n raise ValueError(\"quality must be int or string, but received %r\" % quality)\r\n\r\n cmd.extend(['-f', 's16le', # don't output id3 headers\r\n '-c', 'libmp3lame',\r\n 'pipe:1'])\r\n\r\n log.debug('running transcode command %r', cmd)\r\n\r\n try:\r\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,\r\n stderr=subprocess.PIPE)\r\n\r\n audio_out, err_output = proc.communicate()\r\n\r\n if proc.returncode != 0:\r\n err_output = (\"(return code: %r)\\n\" % proc.returncode) + err_output\r\n raise IOError # handle errors in except\r\n\r\n except (OSError, IOError) as e:\r\n\r\n err_msg = \"transcoding command (%s) failed: %s. \" % (' '.join(cmd), e)\r\n\r\n if 'No such file or directory' in str(e):\r\n err_msg += '\\navconv must be installed and in the system path.'\r\n\r\n if err_output is not None:\r\n err_msg += \"\\nstderr: '%s'\" % err_output\r\n\r\n log.exception('transcoding failure:\\n%s', err_msg)\r\n\r\n raise IOError(err_msg)\r\n\r\n else:\r\n return audio_out",
"def convertToMP3(fileName,codec='mpga',outputBitRate='192'):\n outputSongName = getOutputName(fileName[:-4], '.mp3')\n shutil.copy2(testPath+'/'+fileName,VLCpath+'/'+fileName)\n t = Template('vlc -I dummy $song \":sout=#transcode{acodec=$codec,ab=$outputBitRate}:std{dst=$outputSongName,access=file}\" vlc://quit')\n command = t.substitute(song='\"'+fileName+'\"', codec=codec, outputBitRate=outputBitRate, outputSongName=outputSongName)\n print (command)\n p = subprocess.Popen(command, cwd=VLCpath, shell=True)\n stdout, stderr = p.communicate()\n #log stderr and stdout\n os.remove(VLCpath+'/'+fileName)\n shutil.move(VLCpath+'/'+outputSongName,testPath+'/'+outputSongName)\n return outputSongName"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints labeled status of samples in Mongo DB, adds a status record to a separate status DB.
|
def db_status():
db = kdb.test_songs
# pull last record from status DB for comparison
last = kdb.status.find_one({"last": True})
labels = [
("Total samples\t", 'total'),
("Labeled samples\t", 'labeled'),
("Skipped samples\t", 'skipped'),
("Vocals, foreground", 'vox_fg'),
("Vocals, background", 'vox_bg'),
("Saxophone, foreground", 'sax_fg'),
("Saxophone, background", 'sax_bg'),
("Piano, foreground", 'pno_fg'),
("Piano, background", 'pno_bg')
]
# creating dict of db figures
figs = {}
figs['total'] = db.count()
figs['labeled'] = db.find({"labeled": True}).count()
figs['skipped'] = db.find({"skipped": True}).count()
figs['vox_fg'] = db.find({"vocals": 2}).count()
figs['vox_bg'] = db.find({"vocals": 1}).count()
figs['sax_fg'] = db.find({"sax": 2}).count()
figs['sax_bg'] = db.find({"sax": 1}).count()
figs['pno_fg'] = db.find({"piano": 2}).count()
figs['pno_bg'] = db.find({"piano": 1}).count()
percent = {}
for k, v in figs.items():
percent[k] = round(100 * v/figs['labeled'], 1)
percent['total'] = 'N/A'
print("\nSAMPLE DATABASE STATUS")
print("Category\t\tCount\tDelta\t% Lab'd")
print("-" * 48)
for pair in labels:
current_val = figs[pair[1]]
delta = current_val - last[pair[1]]
print("{}\t{}\t{}\t{}"
.format(pair[0],
str(current_val).rjust(5),
str(delta).rjust(5),
str(percent[pair[1]]).rjust(5))
)
print("-" * 48, '\n')
# change 'last' field of previous status entry
update_result = kdb.status.update_one({"last": True},
{"$set": {"last": False}}
)
if update_result.modified_count != 1:
print("\n*** Error altering previous status record in DB")
# add 'timestamp', 'last', and 'auto' fields to current record
figs['timestamp'] = datetime.now()
figs['last'] = True
figs['auto'] = False
# and add to DB
add_result = kdb.status.insert_one(figs)
if not add_result:
print("\n*** Error adding current status record to DB")
|
[
"def add_to_mongodb(self, status):\r\n try:\r\n insert = self.mongo_coll_tweets.insert_one(status._json)\r\n insert_it = insert.inserted_id\r\n self.media_download(insert_id)\r\n self.counter += 1\r\n except errors.ServerSelectionTimeoutError:\r\n log.log_add(cfg['log_email_threshold'],\r\n 'MongoDB ServerSelectionTimeoutError')\r\n self.connect_mongodb()\r\n except Exception as e:\r\n log.log_add(cfg['log_email_threshold'],\r\n 'Could not write to MongoDB ({})'.format(e))",
"def _write_status(self):\n\n # write active flag to the db and reset the status da\n if self._db_client.write_points(self._status_data):\n self._status_data = list()",
"def _write_status(self):\n\n # write active flag to the db\n self._dbclient.write_points(self._get_status_for_db())",
"def on_status(self, status): \n tweet = {\"id\": status.id,\n \"text\": status.text,\n \"created_at\": status.created_at,\n \"author\": {\n \"name\": status.author.name,\n \"screen_name\": status.author.screen_name}}\n try:\n self.collection.insert(tweet)\n print \"Successfully saved tweet %d\" % status.id\n except OperationFailure, e:\n sys.stderr.write(\"Failed to save tweet %d: %s\\n\" % (status.id, e))",
"def report_pipeline_status(ctx):\n\n for source in ctx.obj['sources']:\n secho('\\nID: {}'.format(source.id), **SUCCESS)\n secho('Updated :{}'.format(source.updated_on), **SUCCESS)\n for contributor in source.description.get('contributors', []):\n secho('Author: {}'.format(contributor), **SUCCESS)\n\n echo()\n for key in source.db_keys:\n if key not in ('pipeline_id', 'id', 'timestamp'):\n echo('{} = {}'.format(key, getattr(source, key)))",
"def publishSwarmingStatusToDb(connection, areaId, status):\n if status is True:\n inProgress = 1\n else:\n inProgress = 0\n\n cursor = connection.cursor()\n\n log.info(\"Adding swarm record to db for area %s, as %s\" % (areaId, status))\n cursor.execute(predictions_run_sql.insertSwarmingForAreaRecord, (areaId, inProgress))",
"def stats():\n db = ingest.load_database()\n ingest.database_stats(db)",
"def statusFile(self, query, status, delimiter=\"\\t\"):\n data = [query.pop(\"name\")] + query.values() + status.values()\n columns = [\"name\"] + query.keys() + status.keys()\n\n this_status = pd.DataFrame(\n [data], columns=columns, index=[len(self.status)])\n self.status = self.status.append(this_status)\n\n if self.stat_file_path:\n self.status.to_csv(self.stat_file_path, index=False)",
"def test_statusml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # insert ml status\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Processing.')\n self.assertEqual(response.status_code, 200)",
"def _set_status(self, status):\n with open(self.paths['status'], 'w') as fd:\n fd.write(status)\n self.status = status\n # Touch a file with our identifier as the name in the collection's\n # queue directory. When we do project.status(), this resource will first\n # get updated in the collection's leveldb.\n queue_path = os.path.join(self.coll.queue_dir, self.ident)\n _touch(queue_path)",
"def update_test_status(self):\n while True:\n try:\n if not os.path.exists(self.apollo_test_status_path):\n os.mkdir(self.apollo_test_status_path)\n logger.debug('Create ({}) directory under {} path successfully'\n .format(self.apollo_test_status_directory, self.apollo_test_status_path))\n\n # Read the test status information from the apollo_test_status directory\n test_status_list = os.listdir(self.apollo_test_status_path)\n\n if test_status_list:\n logger.debug('Read the file under path {}:\\n{}'.format(self.apollo_test_status_path,\n test_status_list))\n for file in test_status_list:\n if re.match('fx.+?_.+?_.+?.txt', file):\n logger.info('Captured file: {}'.format(file))\n # Format to check\n machine, cell, test_status = file.split('.txt')[0].split('_')\n # Start updating the access data table\n updated_status = self.update_access_table(machine=machine,\n cell=cell,\n test_status=test_status)\n # Delete test status files whose status has been updated\n if updated_status == 'PASS':\n updated_file = os.path.join(self.apollo_test_status_path, file)\n if os.path.exists(updated_file):\n os.remove(updated_file)\n logger.debug('Delete {} successful'.format(updated_file))\n else:\n time.sleep(1)\n except Exception as ex:\n logger.exception(ex)\n time.sleep(1)",
"def write_global_status(status):\n\n # write status to file\n global_status_dir = str(Path.home()) + \"/.config/nuqql\"\n Path(global_status_dir).mkdir(parents=True, exist_ok=True)\n global_status_file = global_status_dir + \"/global_status\"\n line = status + \"\\n\"\n lines = []\n lines.append(line)\n with open(global_status_file, \"w+\") as status_file:\n status_file.writelines(lines)",
"def log_status(self, sessionid, responsetime, httpstatus, contentstatus):\n self.cursor.execute(\"INSERT INTO status (sessionid, responsetime, httpstatus, contentstatus) VALUES (?,?,?,?);\", (sessionid, responsetime, httpstatus, contentstatus))\n self.connection.commit()",
"def test_load():\n client = MongoClient()\n db = client.twitterdb\n \n print(\"Extracting Trump tweet ...\")\n pprint.pprint(db.trump_tweets.find_one())\n print(\"Number of collections: {}\".format(len(db.collection_names())))",
"def loadData(self):\r\n\r\n dbName = self.db_info['name']\r\n hostname = self.db_info['hostname']\r\n user = self.db_info['user']\r\n pwd = self.db_info['pwd']\r\n label_coll_name = self.db_info['label_coll_name']\r\n history_coll_name = self.db_info['history_coll_name']\r\n port = self.db_info['port']\r\n\r\n try:\r\n print(\"Trying connection...\")\r\n client = MongoClient(hostname)\r\n client[dbName].authenticate(user, pwd)\r\n db = client[dbName]\r\n print(\"Connected to mongodb @ {0}:[{1}]\".format(\r\n hostname, port))\r\n except Exception as E:\r\n print(\"Fail to connect mongodb @ {0}:{1}, {2}\".format(\r\n hostname, port, E))\r\n exit()\r\n\r\n # Read label collection\r\n collection = db[label_coll_name]\r\n num_urls = collection.count()\r\n data = {}\r\n if num_urls > 0:\r\n dataDB = collection.find({})\r\n for i in range(num_urls):\r\n wid = dataDB[i]['idna']\r\n data[wid] = dataDB[i]['value']\r\n if 'url' not in data[wid]:\r\n data[wid]['url'] = wid\r\n\r\n # Read history\r\n collection = db[history_coll_name]\r\n num_events = collection.count()\r\n labelhistory = {}\r\n if num_events > 0:\r\n dataDB = collection.find({})\r\n for i in range(num_events):\r\n wid = dataDB[i]['idna']\r\n labelhistory[wid] = dataDB[i]['value']\r\n\r\n df_labels, df_preds = self.get_df(data, labelhistory)\r\n\r\n # In the current version, predictions are not being stored in the\r\n # mongo db. They must be loaded from files.\r\n if os.path.isfile(self.datapreds_file):\r\n # Load prediction dataframes stored in pickle files\r\n df_preds = pd.read_pickle(self.datapreds_file)\r\n\r\n return df_labels, df_preds, labelhistory",
"def print_status(numcodes, totalNum, msg): #progress indicator\n print('Record: {} / {} {:>20}\\r'.format(numcodes, totalNum, msg), end='\\r'),\n sys.stdout.flush()",
"def populate_mongoDB():\n\tglobal file, db\n\tcounter = 0\n\tfor line in file: # Standard iteration through file\n\t\ttweet = json.loads(line) # Recognize it as a JSON object\n\t\ttweet['created_at'] = datetime.strptime(tweet['created_at'].replace('+0000',''),\n\t\t\t'%a %b %d %H:%M:%S %Y')\n\t\tdb.insert(tweet) # Actually insert it into the tweets collection\n\t\tif counter % 10000 == 0: # Shows import update status - can take a while\n\t\t\tprint counter,\n\t\tcounter += 1\n\tprint \"done\"",
"def Status(con, category, message):",
"def add_status(self, project_id, status):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/statuses/'\n data = parser.to_json(status)\n response = zoho_http_client.post(url, self.details, data)\n return parser.get_statuses(response)[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a datagroup in db by randomly sampling an equal number from records where the positive label has a value of 2 (foreground) and records where the positive label has a value of 2 (none). IN
|
def create_datagroup_in_db(group_name, pos_label, n_per_label='auto'):
assert_msg = "Invalid input for n_per_label"
assert n_per_label == 'auto' or type(n_per_label) == int, assert_msg
pos_ids = np.array([])
neg_ids = np.array([])
if n_per_label == 'auto':
n_per_label = kdb.test_songs.find({pos_label: 2}).count()
label = 1
for val, arr in zip([2,0], [pos_ids, neg_ids]):
chunks = kdb.test_songs.aggregate([
{"$match": {pos_label: val}},
{"$sample": {"size": n_per_label}}
])
for chunk in chunks:
chunk_id = chunk['chunk_id']
result = kdb.test_songs.update_one(
{"chunk_id": chunk_id},
{"$set": {group_name: label}}
)
if result.modified_count != 1:
print("*** Error on DB insertion, {}".format(chunk_id))
break
label -= 1
for label in [1,0]:
members = kdb.test_songs.find(
{group_name: label}
).count()
print("Label {}: {}".format(label, members))
|
[
"def create(self, **kwargs):\r\n new_id = str(self.group_counter.next())\r\n fsg = FakeScalingGroup(new_id, **kwargs)\r\n self.groups[new_id] = fsg\r\n return fsg",
"def StratifiedSample(data, nperlabel):\n sample = pd.DataFrame()\n datagrp = data.groupby('label')\n sortedgrp = datagrp.size().order(ascending=False)\n for i, l in enumerate(sortedgrp.index):\n if sortedgrp[l] > nperlabel:\n print(\"==> %-50s %6d\" % (l, sortedgrp[l]))\n sample = sample.append(RandomSample(data[data['label'] == l],\n nperlabel))\n else:\n break\n print(\"There are %d labels have more than %d articles\" % (i, nperlabel))\n print(\"Sample size: %s articles\" % (len(sample)))\n return sample",
"def get_random_hitsound_group(hs_avail_flags, hs_data, note_metronome_group, default_mask=2):\n possible_hs_groups = get_hitsound_groups(\n hs_avail_flags, hs_data, note_metronome_group)\n if len(possible_hs_groups) > 0:\n return possible_hs_groups[np.random.randint(0, possible_hs_groups.shape[0])]\n else:\n return np.bitwise_and(np.random.randint(0, 16, size=hs_data.shape[1]), default_mask)",
"def create_dummy_observation_grouping():\n alt_axis = ObservationGroupAxis('ALT', ALT_RANGE, fmt='edges')\n az_axis = ObservationGroupAxis('AZ', AZ_RANGE, fmt='edges')\n obs_groups = ObservationGroups([alt_axis, az_axis])\n obs_groups.obs_groups_table['GROUP_ID'][0] = GROUP_ID\n\n return obs_groups",
"def background_group():\n obs_table = config.obs_table.copy()\n\n # Define observation groups\n # zenith_bins = np.array([0, 20, 30, 40, 50, 90])\n zenith_bins = np.array([0, 49, 90])\n # zenith_bins = np.array([0, 30, 90]) # for testing\n axes = [ObservationGroupAxis('ZEN_PNT', zenith_bins, 'bin_edges')]\n obs_groups = ObservationGroups(axes)\n log.info(obs_groups.info)\n\n # Apply observation grouping\n obs_table = obs_groups.group_observation_table(obs_table)\n\n # Store the results\n filename = config.obs_table_grouped_filename\n log.info('Writing {}'.format(filename))\n obs_table.write(str(filename), format='ascii.ecsv')\n\n filename = config.group_def_filename\n log.info('Writing {}'.format(filename))\n obs_groups.obs_groups_table.write(str(filename), format='ascii.ecsv')",
"def generate_three(num_train_samples=200, num_test_samples=32 * 32, noise=False):\n # Generate the dataset\n # Initialize two 2D fields with num_train_samples and num_test_samples resp.\n train_samples = np.random.uniform(0.0, 128.0, (num_train_samples, 2))\n num_test_samples = int(np.sqrt(num_test_samples))\n test_samples = list(itertools.product(np.linspace(0.5, 127.5, num_test_samples),\n np.linspace(0.5, 127.5, num_test_samples)))\n\n # Compute train and test labels\n labels = [[], []]\n\n for k, samples in enumerate((train_samples, test_samples)):\n for i in range(0, len(samples)):\n x = 8 * np.random.poisson()\n sample = samples[i]\n if sample[1] >= np.cos(sample[0] / 128 * np.pi) * 50 + 78:\n if sample[1] - x < np.cos(sample[0] / 128 * np.pi) * 50 + 78 - 4 and k == 0 and noise:\n if sample[1] >= (sample[0] - 30) * 2:\n if sample[1] - 1.2 * x < (sample[0] - 30) * 2 - 4 and k == 0 and noise:\n labels[k] = np.append(labels[k], [2])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if sample[1] + 1.2 * x >= (sample[0] - 30) * 2 + 4 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [2])\n else:\n labels[k] = np.append(labels[k], [0])\n else:\n if sample[1] + x > np.cos(sample[0] / 128 * np.pi) * 50 + 78 + 4 and k == 0 and noise:\n labels[k] = np.append(labels[k], [0])\n elif sample[1] >= (sample[0] - 30) * 2:\n if sample[1] - 1.2 * x < (sample[0] - 30) * 2 - 4 and k == 0 and noise:\n labels[k] = np.append(labels[k], [2])\n else:\n labels[k] = np.append(labels[k], [1])\n else:\n if sample[1] + 1.2 * x >= (sample[0] - 30) * 2 + 4 and k == 0 and noise:\n labels[k] = np.append(labels[k], [1])\n else:\n labels[k] = np.append(labels[k], [2])\n\n # Convert data type\n train_samples = np.asarray(train_samples, dtype=np.float32)\n train_labels = np.asarray(labels[0], dtype=np.float32)\n test_samples = np.asarray(test_samples, dtype=np.float32)\n test_labels = np.asarray(labels[1], dtype=np.float32)\n\n return (train_samples, train_labels), (test_samples, test_labels)",
"def create_group_for_peak():\n for pk in _selected_peaks():\n set_new_group([pk])",
"def init_groups(size):\n global all_groups\n all_groups = []\n for i in range(size-1):\n group = dist.new_group([0,i+1])\n all_groups.append(group)",
"def generate_data_with_irrelevent_attributes(dataset,num_groups = 0):\n\t#dataset = load_dataset(filename) # \"iris-modified.csv\"\n\n\tglobal num_irrelevant\n\tnum_irrelevant = num_groups * 2\n\n\tfor i in range(num_groups):\n\t\tdataset = add_a_new_dimension(dataset,num_groups)\n\t\tdataset = add_a_new_dimension(dataset,num_groups)\n\treturn dataset",
"def __random_sample_patch(self, img, weight, label):\n data_shape_in = tf.shape(img)\n weight_shape_in = tf.shape(weight)\n label_shape_in = tf.shape(label)\n \n data_shape_out = tf.constant(self.config['data_shape'])\n weight_shape_out= tf.constant(self.config['weight_shape'])\n label_shape_out = tf.constant(self.config['label_shape'])\n label_margin = tf.constant(self.label_margin)\n \n data_shape_sub = tf.subtract(data_shape_in, data_shape_out)\n r = tf.random_uniform([], 0, 1.0)\n img_begin = tf.cast(tf.cast(data_shape_sub, tf.float32) * r, tf.int32)\n img_begin = tf.multiply(img_begin, tf.constant([1, 1, 1, 0]))\n \n lab_begin = img_begin + label_margin\n lab_begin = tf.multiply(lab_begin, tf.constant([1, 1, 1, 0]))\n \n img_slice = tf.slice(img, img_begin, data_shape_out)\n weight_slice = tf.slice(weight, img_begin, weight_shape_out)\n label_slice = tf.slice(label, lab_begin, label_shape_out)\n return [img_slice, weight_slice, label_slice]",
"def test_sample_table(self):\n flist = find_samples(j_doe_00_01)\n samples = sample_table(flist)\n grouped = samples.groupby(\"sample\")\n self.assertEqual(len(grouped.groups[\"P001_101_index3\"]), 2)\n self.assertEqual(len(grouped.groups[\"P001_102_index6\"]), 1)",
"def generate_labeled_testdata(image_path, annotation, nb_false, radius,cond):\n features,labels = [],[]\n im_array = read_image(image_path)\n # True samples\n for obj in annotation:\n obj = [int(x + .5) for x in obj] #Project the floating coordinate values onto integer pixel coordinates.\n # For some reason the order of coordinates is inverted in the annotation files\n if True:#check_coordinate_validity(obj[1],obj[0],im_array.shape[0],im_array.shape[1],radius):\n x1 = int(obj[1]/radius)\n y1 = int(obj[0]/radius)\n #print(obj[1],obj[0])\n xx1 = x1*radius\n yy1 = y1*radius\n features.append(out_extract_neighborhood(obj[1],obj[0],im_array,radius,xx1,yy1))\n labels.append(1)\n #features.append(extract_neighborhood(obj[1],obj[0],im_array,radius))\n #labels.append(1)\n if False:\n krange = [obj[0]-4,obj[0],obj[0]+4]\n lrange = [obj[1]-4,obj[1],obj[1]+4]\n for k in krange:\n for l in lrange:\n if check_coordinate_validity(l,k,im_array.shape[0],im_array.shape[1],radius):\n #if k!=obj[0] or l!=obj[1]:\n randn = random.randint(1,9)\n if randn % 2 == 0:\n features.append(out_extract_neighborhood(l,k,im_array,radius))\n labels.append(1)\n # False samples\n for i in range(nb_false):\n c = random_different_coordinates(annotation,im_array.shape[1],im_array.shape[0],radius,cond)\n x1 = int(c[1]/radius)\n y1 = int(c[0]/radius)\n xx1 = x1*radius\n yy1 = y1*radius\n #print(c[1],c[0])\n features.append(out_extract_neighborhood(c[1],c[0],im_array,radius,xx1,yy1))\n labels.append(0)\n return np.array(labels),np.stack(features,axis=1)",
"def search_rand_blades_1(group1, group2):\n rand1 = random.choice(group1.index)\n group2.loc[:, \"options\"] = group1.loc[rand1].w > group2.w\n rand2 = random.choice((group2.options).index)\n return rand1, rand2",
"def stratified_random_resampling(combined_labels, y_label, sampling_method, seed_val):\r\n \r\n #Applies random sampling\r\n random.seed(seed_val)\r\n\r\n \r\n #Merges y_label into a single list to perform undersampling altogether\r\n \r\n combined_labels = combined_labels + [y_label]\r\n \r\n #Determine the number of y_labels\r\n label_val = np.unique(y_label).tolist()\r\n\r\n #Count the number of data in each label\r\n label_count = list()\r\n for i in range(len(label_val)):\r\n label_count.append((y_label == i).sum()) #numpy way of performing .count() function in list format\r\n \r\n #Determine which label has the least count\r\n #******************************\r\n if sampling_method == 'undersampling':\r\n min_max_label = label_count.index(min(label_count))\r\n elif sampling_method == 'oversampling':\r\n min_max_label = label_count.index(max(label_count))\r\n \r\n \r\n #Reorganize the list without the min label count\r\n label_val.remove(min_max_label)\r\n #label_val[min_label] = None\r\n \r\n #Create lists of lists containing label's original index value and its respective labels\r\n \"\"\"\r\n Ex. Suppose we have a y_label = [0,0,1,2,2] that contains 3 different labels\r\n y_label would then be converted into [[0,0], [1,0], [2,1], [3,2], [4,2]] \r\n where the first index within the list is the original index value and the second index\r\n is the y label. This is done to track random.sample() function on which label is randomly selected\r\n \"\"\"\r\n y_label_index = list()\r\n for i in range(len(y_label)):\r\n y_label_index.append([i, y_label[i]])\r\n \r\n #Now separating each of the label into its own lists\r\n list_output = list() #This specific lists output all the labels that need to be removed with its index value\r\n for i in range(len(label_val)):\r\n current_label_list = list()\r\n current_label = label_val[i]\r\n for j in range(len(y_label_index)):\r\n if y_label_index[j][1] == current_label:\r\n current_label_list.append(y_label_index[j])\r\n \r\n\r\n #Specifies how many of the said label needs to be removed based off the min/max label count\r\n if sampling_method == 'undersampling':\r\n target_label_count = label_count[current_label] - label_count[min_max_label]\r\n \r\n #Random sampling within a label without replacement\r\n randomized_list = random.sample(current_label_list, target_label_count) \r\n \r\n elif sampling_method == 'oversampling':\r\n target_label_count = label_count[min_max_label] - label_count[current_label]\r\n \r\n #Random sampling within a label WITH replacement if with replacement option cannot be done\r\n try: \r\n randomized_list = random.sample(current_label_list, target_label_count) \r\n except ValueError:\r\n print('Selected sample is larger than the population, sampling WITH replacement is used for label: ' + str(current_label_list[0][1]))\r\n randomized_list = random.choices(current_label_list, k=target_label_count)\r\n \r\n list_output.append(randomized_list)\r\n\r\n\r\n #---Take the combined_labels and remove each of them based on its index values---\r\n #Combine entire lists into a single list. If it is a binary label, then processed_list = list_output\r\n processed_list = list()\r\n for i in range(len(label_val)):\r\n processed_list.extend(list_output[i])\r\n \r\n #The lists must be sorted in reverse order so that when xlabels are removed, it is not affecting its index value\r\n processed_list.sort(reverse = True)\r\n \r\n #Deleting all the available xlabels and ylabels\r\n final_output = list()\r\n for i in range(len(combined_labels)):\r\n target_label = combined_labels[i]\r\n target_label = target_label.tolist()\r\n \r\n if sampling_method == 'undersampling':\r\n for j in tqdm(range(len(processed_list))):\r\n del target_label[processed_list[j][0]]\r\n final_output.append(target_label)\r\n \r\n elif sampling_method == 'oversampling':\r\n for j in tqdm(range(len(processed_list))):\r\n #Insert(index position, insert value)\r\n target_label.insert(processed_list[j][0], target_label[processed_list[j][0]])\r\n final_output.append(target_label)\r\n\r\n #Ouput Summary\r\n print('\\n\\n* Resampling complete * | Method used: ' + str(sampling_method))\r\n print('Original dataset count: ' + str(Counter(y_label)))\r\n \r\n #final_output's last index is always the y_label\r\n y_train_rs = np.array(final_output[len(final_output)-1])\r\n print('Resampled dataset count: ' + str(Counter(y_train_rs)))\r\n \r\n return final_output, list_output",
"def select_groups_randomly(g, max_leaves):\n # TODO implement\n pass",
"def __random_sample_patch(self, img, weight, label):\n data_shape_out = tf.constant(self.config['data_shape'])\n weight_shape_out= tf.constant(self.config['weight_shape'])\n label_shape_out = tf.constant(self.config['label_shape'])\n \n # if output shape is larger than input shape, padding is needed\n img = self.__pad_tensor_to_desired_shape(img, data_shape_out)\n weight = self.__pad_tensor_to_desired_shape(weight, weight_shape_out)\n label = self.__pad_tensor_to_desired_shape(label, label_shape_out)\n \n data_shape_in = tf.shape(img)\n weight_shape_in = tf.shape(weight)\n label_shape_in = tf.shape(label)\n \n label_margin = tf.constant(self.label_margin)\n data_shape_sub = tf.subtract(data_shape_in, data_shape_out)\n \n r = tf.random_uniform(tf.shape(data_shape_sub), 0, 1.0)\n img_begin = tf.multiply(tf.cast(data_shape_sub, tf.float32), r)\n img_begin = tf.cast(img_begin, tf.int32)\n img_begin = tf.multiply(img_begin, tf.constant([1, 1, 1, 0]))\n \n lab_begin = img_begin + label_margin\n lab_begin = tf.multiply(lab_begin, tf.constant([1, 1, 1, 0]))\n \n # print(img, img_begin, data_shape_out)\n img_slice = tf.slice(img, img_begin, data_shape_out)\n weight_slice = tf.slice(weight, lab_begin, weight_shape_out)\n label_slice = tf.slice(label, lab_begin, label_shape_out)\n \n return [img_slice, weight_slice, label_slice]",
"def generate_data(dataset, num_pairs = 10000):\n im1s, im2s, labels = [], [], []\n for _ in range(num_pairs):\n dp1_idx = np.random.randint(dataset.num_datapoints)\n dp2_idx, label = dp1_idx, 1 # same object\n \n im1_idx = np.random.randint(20)\n im2_idx = np.random.randint(20)\n \n im1s.append(255 * dataset[dp1_idx]['depth_images'][im1_idx])\n\n if np.random.random() < 0.5: # Makes half of the training data to be different objects\n while dp2_idx == dp1_idx:\n dp2_idx = np.random.randint(dataset.num_datapoints)\n label = 0\n\n im2s.append(255 * dataset[dp2_idx]['depth_images'][im2_idx])\n labels.append(label)\n im1s, im2s, labels = np.array(im1s), np.array(im2s), np.array(labels)\n return np.expand_dims(im1s, 1), np.expand_dims(im2s, 1), labels",
"def iid(dataset, num_users, seed):\n\tnp.random.seed(seed)\n\t\n\tnum_items = int(len(dataset) / num_users)\n\trem_items = len(dataset) % num_users\n\tif rem_items == 0:\n\t\tprint(\"Each user will get %d samples from the training set.\"%(num_items))\n\telse:\n\t\tprint(\"Each user will get %d samples from the training set. %d samples are discarded.\"%(num_items, rem_items))\n\n\tuser_groups = {} \n\tall_idxs = list(range(len(dataset)))\n\t\n\tfor i in range(num_users):\n\t\tuser_groups[i] = list(np.random.choice(all_idxs, num_items, replace=False))\n\t\tall_idxs = list(set(all_idxs) - set(user_groups[i]))\n\t\n\treturn user_groups",
"def _generate_labeled_data(self, num_data_points, balanced=True):\r\n\r\n self._labeled_data = []\r\n total_possible = self._num_quants*sum(\r\n quantifiers.Quantifier.num_chars**i\r\n for i in range(1, self._max_len+1))\r\n\r\n # if the total possible data pool is smaller than requested,\r\n # just generate all of it\r\n if total_possible <= num_data_points:\r\n print('generating all')\r\n for tup in self._generate_sequences():\r\n self._labeled_data.append(\r\n self._point_from_tuple(tup))\r\n else:\r\n # otherwise, generate num_data_points randomly\r\n # store which data points have already been generated\r\n # generated_idxs = bitarray(total_possible)\r\n generated_idxs = set()\r\n to_generate = min(total_possible, num_data_points)\r\n # tups: a dictionary, keys: (quant_idx, label) pairs\r\n # values: sequences. Will be used for balancing data\r\n tups = defaultdict(list)\r\n\r\n while to_generate > 0:\r\n # generate random tuple\r\n tup = self._generate_random_tuple()\r\n tup_idx = self._tuple_to_idx(tup)\r\n # have not generated this data point yet, so add it\r\n if tup_idx not in generated_idxs:\r\n generated_idxs.add(tup_idx)\r\n to_generate -= 1\r\n seq, label = self._point_from_tuple(tup)\r\n if balanced:\r\n tups[(tup[1], label)].append(seq)\r\n else:\r\n self._labeled_data.append((seq, label))\r\n\r\n if balanced:\r\n # balance across (Q, T/F), instead of just T/F\r\n num_to_sample = min([len(tups[k]) for k in tups])\r\n for (qidx, label) in tups:\r\n # randomly sample right number of sequences\r\n idxs = np.random.choice(len(tups[(qidx, label)]),\r\n num_to_sample,\r\n replace=False)\r\n # add to data\r\n for idx in np.nditer(idxs):\r\n seq = tups[(qidx, label)][idx]\r\n self._labeled_data.append(\r\n (seq, label))\r\n\r\n np.random.shuffle(self._labeled_data)\r\n return self._labeled_data"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Pulls datagroup from Mongo DB, returns a list of chunk_id, label tuples. IN
|
def pull_datagroup_from_db(group_name, df=True):
datagroup = []
for item in kdb.test_songs.find({group_name: {"$exists": True}}):
datagroup.append((item['chunk_id'], item[group_name]))
if df:
dg_trans = list(zip(*datagroup))
datagroup = pd.DataFrame({
'chunk_id': dg_trans[0],
'actual': dg_trans[1]
})
return datagroup.filter(['chunk_id', 'actual'])
|
[
"def create_datagroup_in_db(group_name, pos_label, n_per_label='auto'):\n\n assert_msg = \"Invalid input for n_per_label\"\n assert n_per_label == 'auto' or type(n_per_label) == int, assert_msg\n\n pos_ids = np.array([])\n neg_ids = np.array([])\n\n if n_per_label == 'auto':\n n_per_label = kdb.test_songs.find({pos_label: 2}).count()\n\n label = 1\n for val, arr in zip([2,0], [pos_ids, neg_ids]):\n chunks = kdb.test_songs.aggregate([\n {\"$match\": {pos_label: val}},\n {\"$sample\": {\"size\": n_per_label}}\n ]) \n for chunk in chunks:\n chunk_id = chunk['chunk_id']\n result = kdb.test_songs.update_one(\n {\"chunk_id\": chunk_id},\n {\"$set\": {group_name: label}}\n )\n if result.modified_count != 1:\n print(\"*** Error on DB insertion, {}\".format(chunk_id))\n break\n label -= 1\n\n for label in [1,0]:\n members = kdb.test_songs.find(\n {group_name: label} \n ).count()\n print(\"Label {}: {}\".format(label, members))",
"def xrootdChunks(self, dbName):\n _log.debug('list chunks in xrootd: %s', dbName)\n result = self._requestJSON('xrootd', 'dbs/' + dbName, method='GET')\n return self._getKey(result, 'chunkId')",
"def get_groups():\n try:\n cat_groups = list(\n mongo.db.category_groups.find().sort(\"group_name\", 1)\n )\n except Exception as e:\n flash(\n \"Something went wrong when accessing the database to get\"\n + \"category groups\" + e\n )\n return []\n else:\n category_groups = []\n colours = get_colours()\n length = len(colours)\n\n index = 0\n for group in cat_groups:\n category_groups.append(\n {\n \"group_name\": group['group_name'],\n \"colour\": colours[index % length]\n }\n )\n index += 1\n\n return category_groups",
"def retrieveGroups(self, request):\n B = Branch.objects.get(branchCode=request['branchCode'])\n G = Group.objects.filter(branch=B)\n return G",
"def group_dataset(self, group):\n ds = Dataset()\n ds.update(dict(\n [(tag,data_element) for tag,data_element in self.items() if tag.group==group]\n ))\n return ds",
"def chunks(self, dbName, tableName):\n _log.debug('get chunks, table: %s.%s', dbName, tableName)\n resource = dbName + '/tables/' + tableName + '/chunks'\n result = self._requestJSON('dbs', resource)\n return self._getKey(result, 'chunkId')",
"def get_age_groups():\n age_groups = db_tools.ezfuncs.query(\"\"\"\n SELECT *\n FROM shared.age_group\n \"\"\", conn_def='cod')\n return age_groups",
"def load_data_group(self, group):\n result = [self.load_data(image_index) for image_index in group]\n image_group, annotations_group = zip(*result)\n return image_group, annotations_group",
"def get_groups_from_file(data_file):\n groups = []\n try:\n with open(data_file, 'r') as file:\n jc_config = yaml.safe_load(file)\n groups = jc_config['groups']\n\n except (KeyError) as error:\n pass\n except Exception as error:\n raise error\n\n return groups",
"def read():\n return Group.get()",
"def extract_egroups(json_data):\n return json_data.get(\"groups\")",
"def _read_para_dh_group_list(self, code, cbit, clen, *, desc, length, version): # pylint: disable=unused-argument\n _dhid = list()\n for _ in range(clen):\n _dhid.append(_GROUP_ID.get(self._read_unpack(1)))\n\n dh_group_list = dict(\n type=desc,\n critical=cbit,\n length=clen,\n id=tuple(_dhid),\n )\n\n _plen = length - clen\n if _plen:\n self._read_fileng(_plen)\n\n return dh_group_list",
"def query_release_group_mbids(self):\n \n musicbrainzngs.set_useragent(*settings.MUSICBRAINZ_USERAGENT)\n \n mb_type_id = self.model.get_link_type_id(\"musicbrainz\")\n \n with shelve.open(self.mbid_shelve_name, writeback=True) as db:\n if \"mbids\" not in db:\n db[\"mbids\"] = {}\n \n for release_id, release_mbid in self.model.query(\n \"select id, target from links where type_id=?\",\n mb_type_id\n ):\n if release_id in db[\"mbids\"]:\n continue\n \n try:\n release_group_mbid = musicbrainzngs.get_release_by_id(\n release_mbid, includes=[\"release-groups\"]\n )[\"release\"][\"release-group\"][\"id\"]\n \n except musicbrainzngs.ResponseError:\n release_group_mbid = None\n \n db[\"mbids\"][release_id] = (release_mbid, release_group_mbid)\n \n self.release_mbids = db[\"mbids\"]",
"def _fetch_groups(self, group_ids: List[int]) -> Dict[int, List[Dict[str, Any]]]:\n if not group_ids:\n return {}\n\n response = self.datastore.get_many(\n [\n GetManyRequest(\n \"group\",\n group_ids,\n [\"id\", \"meeting_id\", \"user_ids\"],\n )\n ],\n lock_result=False,\n )\n partitioned_groups: Dict[int, List[Dict[str, Any]]] = defaultdict(list)\n for group in response.get(\"group\", {}).values():\n partitioned_groups[group[\"meeting_id\"]].append(group)\n return partitioned_groups",
"def get_nodegroup_by_id(self, context, cluster_id, nodegroup_id):",
"def get_rack_groups(self, **kwargs):\n return self.netbox_con.get('/dcim/rack-groups/', **kwargs)",
"def process(data, siteid):\n data = convert_type(data)\n\n groupons = {}\n for g in storage.query(storage.GROUPON, siteid=siteid):\n groupons[g.url] = g\n\n for g in data:\n if g.url in groupons:\n db_entity = groupons[g.url]\n props = vars(g)\n for p in props:\n setattr(db_entity, p, props[p])\n else:\n db_entity = storage.Groupon(key_name=g.url, **vars(g))\n groupons[g.url] = db_entity\n\n db.put(groupons.values())\n\n return data",
"def Groups(self) -> GroupCollection:",
"def getGroup(self, search_string): \n \n groups = []\n group = requests.get(self.client.baseurl+'groups',\n auth=self.client.cred,\n params={'searchPattern':search_string,\n 'searchType':4, 'limit':3000})\n if group.json().get('groups'):\n for grp in group.json()['groups']:\n groups.append(grp['displayName'])\n else:\n return \"Group not Found\"\n return groups"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
OBSOLETE, USE create_datagroup_in_db(), pull_datagroup_from_db(), and tts() instead Creates dataset labels in MongoDB under provided round name. First pass only deals with two labels; future versions will accommodate more as necessary. IN
|
def tts_full(
round_name,
train_size=0.8,
n_labels=2,
n_per_label='auto',
pos_label='sax'
):
pos_ids = np.array([])
neg_ids = np.array([])
if n_per_label == 'auto':
n_per_label = kdb.test_songs.find({pos_label: 2}).count()
# find cutoff index value that would split arrays into appropriately-sized
# groups for train/test labeling, if this would be faster than generating
# a train/test selection on each insertion
# cutoff_ix = int(train_size * n_per_label)
# pull IDs, label each record accordingly
label = n_labels - 1
for val, arr in zip([2,0], [pos_ids, neg_ids]):
chunks = kdb.test_songs.aggregate([
{"$match": {pos_label: val}},
{"$sample": {"size": n_per_label}}
])
for chunk in chunks:
arr = np.append(arr, chunk["chunk_id"])
# shuffle array if needed
# np.random.shuffle(arr)
# insert field with label number and train/test indicator
for chunk_id in arr:
tt = np.random.choice(
['train','test'],
p=[train_size, 1-train_size]
)
result = kdb.test_songs.update_one(
{"chunk_id": chunk_id},
{"$set": {round_name: (label, tt)}}
)
if result.modified_count != 1:
print("*** error on db insertion, {}".format(chunk_id))
break
label -= 1
# print validation statments
for label in [1,0]:
for group in ['train','test']:
members = kdb.test_songs.find(
{round_name: (label, group)}
).count()
print("Label {}, {}: {}".format(label, group, members))
|
[
"def create_datagroup_in_db(group_name, pos_label, n_per_label='auto'):\n\n assert_msg = \"Invalid input for n_per_label\"\n assert n_per_label == 'auto' or type(n_per_label) == int, assert_msg\n\n pos_ids = np.array([])\n neg_ids = np.array([])\n\n if n_per_label == 'auto':\n n_per_label = kdb.test_songs.find({pos_label: 2}).count()\n\n label = 1\n for val, arr in zip([2,0], [pos_ids, neg_ids]):\n chunks = kdb.test_songs.aggregate([\n {\"$match\": {pos_label: val}},\n {\"$sample\": {\"size\": n_per_label}}\n ]) \n for chunk in chunks:\n chunk_id = chunk['chunk_id']\n result = kdb.test_songs.update_one(\n {\"chunk_id\": chunk_id},\n {\"$set\": {group_name: label}}\n )\n if result.modified_count != 1:\n print(\"*** Error on DB insertion, {}\".format(chunk_id))\n break\n label -= 1\n\n for label in [1,0]:\n members = kdb.test_songs.find(\n {group_name: label} \n ).count()\n print(\"Label {}: {}\".format(label, members))",
"def graph_data_set_labels(data, dbcon):\n for dataset in data['datasets'].values():\n dataset['label'] = db_get_url_label(dbcon, dataset['url_id'])",
"def saveData(self, df_labels, df_preds, labelhistory, dest='mongodb',\r\n save_preds=True):\r\n\r\n if dest == 'file':\r\n # Keep a copy of the original datasets, just in case some\r\n # mistakes are made during labelling\r\n date_str = datetime.now().strftime(\"%Y%m%d%H%M%S%f\")\r\n if os.path.isfile(self.dataset_file):\r\n dest_file = (self.used_path + self.dataset_fname + '_' +\r\n date_str + '.pkl')\r\n shutil.move(self.dataset_file, dest_file)\r\n if os.path.isfile(self.datalabels_file):\r\n dest_file = (self.used_path + self.datalabels_fname + '_' +\r\n date_str + '.pkl')\r\n shutil.move(self.datalabels_file, dest_file)\r\n if os.path.isfile(self.datapreds_file):\r\n dest_file = (self.used_path + self.datapreds_fname + '_' +\r\n date_str + '.pkl')\r\n shutil.move(self.datapreds_file, dest_file)\r\n if os.path.isfile(self.labelhistory_file):\r\n dest_file = (self.used_path + self.labelhistory_fname + '_' +\r\n date_str + '.pkl')\r\n shutil.move(self.labelhistory_file, dest_file)\r\n\r\n # Save label history\r\n with open(self.labelhistory_file, 'wb') as f:\r\n pickle.dump(labelhistory, f)\r\n\r\n # Save dataframes to files\r\n df_labels.to_pickle(self.datalabels_file)\r\n if save_preds:\r\n df_preds.to_pickle(self.datapreds_file)\r\n\r\n else:\r\n\r\n # Start a db connection\r\n dbName = self.db_info['name']\r\n hostname = self.db_info['hostname']\r\n user = self.db_info['user']\r\n pwd = self.db_info['pwd']\r\n label_coll_name = self.db_info['label_coll_name']\r\n mode = self.db_info['mode']\r\n\r\n # history_coll_name = self.db_info['history_coll_name']\r\n port = self.db_info['port']\r\n\r\n try:\r\n print(\"Trying db connection...\")\r\n client = MongoClient(hostname)\r\n client[dbName].authenticate(user, pwd)\r\n db = client[dbName]\r\n # history_collection = db[history_coll_name]\r\n print(\"Connected to mongodb @ {0}:[{1}]\".format(\r\n hostname, port))\r\n except Exception as E:\r\n print(\"Fail to connect mongodb @ {0}:{1}, {2}\".format(\r\n hostname, port, E))\r\n exit()\r\n\r\n start_time = time.time()\r\n print(\"Saving database. This might take a while...\")\r\n if mode == 'rewrite':\r\n # The database is deleted completely and the whole set of\r\n # labels and predictions in data are loaded\r\n label_collection = db[label_coll_name]\r\n label_collection.drop()\r\n\r\n # Open collection, or create it, if it does not exist.\r\n label_collection = db[label_coll_name]\r\n\r\n for i, w in enumerate(df_labels.index):\r\n # For each wid, create the corresponding data dictionary to\r\n # send to the db\r\n dataw = {}\r\n dataw['relabel'] = df_labels.loc[w, ('info', 'relabel')]\r\n dataw['marker'] = df_labels.loc[w, ('info', 'marker')]\r\n dataw['userId'] = df_labels.loc[w, ('info', 'userId')]\r\n dataw['date'] = df_labels.loc[w, ('info', 'date')]\r\n dataw['weight'] = df_labels.loc[w, ('info', 'weight')]\r\n dataw['label'] = {}\r\n for c in self.categories:\r\n dataw['label'][c] = df_labels.loc[w, ('label', c)]\r\n\r\n # Store in db.\r\n if mode == 'rewrite':\r\n # Insert data in the database\r\n label_collection.insert({'idna': w, 'value': dataw})\r\n else: # mode == 'update'\r\n # The database is updated. Only the wids in dataw are\r\n # modified.\r\n label_collection.replace_one(\r\n {'idna': w}, {'idna': w, 'value': dataw}, upsert=True)\r\n\r\n print((\"\\rSaving entry {0} out of {1}. Speed {2} entries\" +\r\n \"/min\").format(i + 1, len(df_labels), 60 * (i+1) /\r\n (time.time() - start_time)), end=\"\")",
"def add_labels(conn: Connection, statements=\"statements\"):\n # Create a tmp labels table\n with conn.begin():\n conn.execute(\"CREATE TABLE tmp_labels(term TEXT PRIMARY KEY, label TEXT)\")\n if str(conn.engine.url).startswith(\"sqlite\"):\n # Add all terms with label\n conn.execute(\n f\"\"\"INSERT OR IGNORE INTO tmp_labels SELECT subject, value\n FROM {statements} WHERE predicate = 'rdfs:label'\"\"\"\n )\n # Update remaining with their ID as their label\n conn.execute(\n f\"\"\"INSERT OR IGNORE INTO tmp_labels\n SELECT DISTINCT subject, subject FROM {statements}\"\"\"\n )\n conn.execute(\n f\"\"\"INSERT OR IGNORE INTO tmp_labels\n SELECT DISTINCT predicate, predicate FROM {statements}\"\"\"\n )\n else:\n # Do the same for a psycopg2 Cursor\n conn.execute(\n f\"\"\"INSERT INTO tmp_labels\n SELECT subject, value FROM {statements} WHERE predicate = 'rdfs:label'\n ON CONFLICT (term) DO NOTHING\"\"\"\n )\n conn.execute(\n f\"\"\"INSERT INTO tmp_labels\n SELECT DISTINCT subject, subject FROM {statements}\n ON CONFLICT (term) DO NOTHING\"\"\"\n )\n conn.execute(\n f\"\"\"INSERT INTO tmp_labels\n SELECT DISTINCT predicate, predicate FROM {statements}\n ON CONFLICT (term) DO NOTHING\"\"\"\n )",
"def load_dataset_multi_label(dataset_name):\n in_name = f\"./data/kaggle/{dataset_name}.pkl\"\n data = joblib.load(in_name)\n return (data[\"data\"], data[\"multi_aspects\"])",
"def loadData(self):\r\n\r\n dbName = self.db_info['name']\r\n hostname = self.db_info['hostname']\r\n user = self.db_info['user']\r\n pwd = self.db_info['pwd']\r\n label_coll_name = self.db_info['label_coll_name']\r\n history_coll_name = self.db_info['history_coll_name']\r\n port = self.db_info['port']\r\n\r\n try:\r\n print(\"Trying connection...\")\r\n client = MongoClient(hostname)\r\n client[dbName].authenticate(user, pwd)\r\n db = client[dbName]\r\n print(\"Connected to mongodb @ {0}:[{1}]\".format(\r\n hostname, port))\r\n except Exception as E:\r\n print(\"Fail to connect mongodb @ {0}:{1}, {2}\".format(\r\n hostname, port, E))\r\n exit()\r\n\r\n # Read label collection\r\n collection = db[label_coll_name]\r\n num_urls = collection.count()\r\n data = {}\r\n if num_urls > 0:\r\n dataDB = collection.find({})\r\n for i in range(num_urls):\r\n wid = dataDB[i]['idna']\r\n data[wid] = dataDB[i]['value']\r\n if 'url' not in data[wid]:\r\n data[wid]['url'] = wid\r\n\r\n # Read history\r\n collection = db[history_coll_name]\r\n num_events = collection.count()\r\n labelhistory = {}\r\n if num_events > 0:\r\n dataDB = collection.find({})\r\n for i in range(num_events):\r\n wid = dataDB[i]['idna']\r\n labelhistory[wid] = dataDB[i]['value']\r\n\r\n df_labels, df_preds = self.get_df(data, labelhistory)\r\n\r\n # In the current version, predictions are not being stored in the\r\n # mongo db. They must be loaded from files.\r\n if os.path.isfile(self.datapreds_file):\r\n # Load prediction dataframes stored in pickle files\r\n df_preds = pd.read_pickle(self.datapreds_file)\r\n\r\n return df_labels, df_preds, labelhistory",
"def pull_datagroup_from_db(group_name, df=True):\n\n datagroup = []\n \n for item in kdb.test_songs.find({group_name: {\"$exists\": True}}):\n datagroup.append((item['chunk_id'], item[group_name]))\n\n if df:\n dg_trans = list(zip(*datagroup))\n datagroup = pd.DataFrame({\n 'chunk_id': dg_trans[0],\n 'actual': dg_trans[1]\n })\n\n return datagroup.filter(['chunk_id', 'actual'])",
"def createDataset(filename, group, dataset, data):\n\n deleteDataset(filename, group, dataset)\n\n FILE = h5py.File(filename, \"r+\")\n\n GROUP = FILE[group]\n\n GROUP.create_dataset(dataset, data = data)\n\n print(\"[CREATE]: <{:s}> dataset in <{:s}> group created.\".format(dataset, group))\n\n FILE.close()",
"def test_create_from_dataframe_run_general(self, runs_gridsearch: RunList):\n\n df = runs_gridsearch.extract(\n r\"(?P<algo>[\\w]+)-(?P<env_id>[\\w]+)-seed(?P<seed>[\\d]+)\")\n assert 'run' in df.columns and 'hypothesis' not in df.columns\n\n # Default: group by ['algo', 'env_id']\n ex = V(Experiment.from_dataframe(df, by=[\"algo\", \"env_id\"], name=\"A\"))\n assert len(ex.hypotheses) == 6\n assert isinstance(ex.hypotheses[0].name, str)\n assert ex.hypotheses[0].name == repr({\n 'algo': 'ppo',\n 'env_id': 'halfcheetah'\n })\n\n # Group by ['env_id', 'algo'] and use a custom namer\n namer = lambda t, _: f\"{t['env_id']}-{t['algo']}\"\n ex = V(Experiment.from_dataframe(\n df, by=[\"env_id\", \"algo\"], hypothesis_namer=namer, name=\"B\")) # yapf: disable\n assert ex.hypotheses[0].name == 'halfcheetah-ppo'\n assert len(ex.hypotheses) == 6",
"def label_predictions(data):\n\n # if isinstance(data, str):\n # data = eval(data)\n # if isinstance(data, dict) and len(data) == 1:\n # for key, value in data.items():\n # data = value\n # if isinstance(data, str):\n # data = eval(data)\n # if isinstance(data, dict):\n # data = [data]\n\n data = extract_json_from_request(data)\n\n if not isinstance(data, list) or not all([isinstance(i, dict) for i in data]):\n return bad_request(f'Supplied data was not in the correct format. data must be a list of dictionaries.')\n\n if not all(['_id' in record.keys() for record in data]) and all(['label' in record.keys() for record in data]):\n return bad_request(f'All records must contain an \"_id\" and a \"label\".')\n\n ids = [record['_id'] for record in data]\n labels = [record['label'] for record in data]\n if not all([isinstance(i, str) for i in ids]) and not all([isinstance(label, str) for label in labels]):\n return bad_request(f'All \"_id\" and \"label\" values must be strings.')\n if not all([label.lower() in ['setosa', 'versicolor', 'virginica'] for label in labels]):\n return bad_request(f'Only \"setosa\", \"versicolor\", or \"virginica\" are allowed as label values.')\n\n updated_ids = []\n invalid_ids = []\n col = get_mongo_collection(database=app.config.get('DATABASE'), collection=app.config.get('PREDICTION_COLLECTION'))\n for record in data:\n try:\n result = col.update_one(\n # filter parameter\n {'_id': ObjectId(record['_id'])},\n # set the label\n {'$set': {'label': record['label'].lower()}},\n upsert=False\n )\n if result.matched_count != 1:\n invalid_ids.append(record['_id'])\n else:\n updated_ids.append(record['_id'])\n except InvalidId:\n invalid_ids.append(record['_id'])\n\n if invalid_ids:\n message = f'only {len(updated_ids)} records labeled. invalid ids supplied: {[i for i in invalid_ids]}'\n else:\n message = f'{len(updated_ids)} records labeled'\n\n label_result = {'label_result': message}\n\n return label_result",
"def divide_labels():\n random.seed(10)\n\n if config.enable_modelarts:\n import moxing as mox\n mox.file.shift('os', 'mox')\n pic_names = os.listdir(config.data_path)\n dic = []\n with open(config.label_path) as f:\n for line in f:\n name = line.split()[1]+'.jpg'\n lst = map(int, line.split()[2:12])\n lst = list(lst)\n score = round(sum([(i+1)*j for i, j in enumerate(lst)])/sum(lst), 7)\n dic.append([name]+line.split()[2:12]+[score])\n df = pd.DataFrame(dic)\n df_new = df[df[0].isin(pic_names)].copy()\n df_new.reset_index(drop=True, inplace=True)\n test_img = random.sample(pic_names, 25597)\n\n test_label = df_new[df_new[0].isin(test_img)].copy()\n train_label = df_new[~df_new[0].isin(test_img)].copy()\n test_label.to_csv(config.val_label_path, header=0)\n train_label.to_csv(config.train_label_path, header=0)",
"def create_dataset_specification_and_records(self):\n\n # We chose the 5 smallest alphabets (i.e. those with the least characters)\n # out of the 'background' set of alphabets that are intended for train/val\n # We keep the 'evaluation' set of alphabets for testing exclusively\n # The chosen alphabets have 14, 14, 16, 17, and 20 characters, respectively.\n validation_alphabets = [\n 'Blackfoot_(Canadian_Aboriginal_Syllabics)',\n 'Ojibwe_(Canadian_Aboriginal_Syllabics)',\n 'Inuktitut_(Canadian_Aboriginal_Syllabics)', 'Tagalog',\n 'Alphabet_of_the_Magi'\n ]\n\n training_alphabets = []\n data_path_trainval = os.path.join(self.data_root, 'images_background')\n for alphabet_name in sorted(tf.io.gfile.listdir(data_path_trainval)):\n if alphabet_name not in validation_alphabets:\n training_alphabets.append(alphabet_name)\n assert len(training_alphabets) + len(validation_alphabets) == 30\n\n data_path_test = os.path.join(self.data_root, 'images_evaluation')\n test_alphabets = sorted(tf.io.gfile.listdir(data_path_test))\n assert len(test_alphabets) == 20\n\n self.parse_split_data(learning_spec.Split.TRAIN, training_alphabets,\n data_path_trainval)\n self.parse_split_data(learning_spec.Split.VALID, validation_alphabets,\n data_path_trainval)\n self.parse_split_data(learning_spec.Split.TEST, test_alphabets,\n data_path_test)",
"def create_dataset(body: Dataset):\n # TODO: Create dataset entry into database\n return dict(**body.dict(), id=1)",
"def store_dataset(group, name, obj):\n dset = group.create_dataset(name, **obj.kwds)\n update_attrs(dset, obj.attrs)",
"def createDetailGroup(groups_nickname: list):\n with SqlManager(__dbaddress__) as s:\n for i in groups_nickname:\n try:\n s.execute('''CREATE TABLE {}(\n id INTEGER primary key autoincrement,\n nickname TEXT,\n displayname TEXT,\n enterTime DATE,\n outTime DATE DEFAULT Null\n );'''.format(i))\n except sqlite3.OperationalError:\n pass",
"def setup_dataset(\n dataset_id: str,\n gpf_instance: GPFInstance,\n *studies: GenotypeData,\n dataset_config_udate: str = \"\") -> GenotypeData:\n # pylint: disable=import-outside-toplevel\n from box import Box\n from dae.studies.study import GenotypeDataGroup\n\n dataset_config = {\n \"id\": dataset_id\n }\n if dataset_config_udate:\n config_update = yaml.safe_load(dataset_config_udate)\n dataset_config.update(config_update)\n\n dataset = GenotypeDataGroup(\n Box(dataset_config, default_box=True), studies)\n # pylint: disable=protected-access\n gpf_instance._variants_db.register_genotype_data(dataset)\n\n return dataset",
"def create_labels(testing_labels_data):\r\n labels = []\r\n for e in testing_labels_data:\r\n labels.append[e['hotel_id']]\r\n return np.array(labels)",
"def prepare_dataset(filename, group_name):\n with PoseDatasetIO(dataset=filename, columns=COLUMNS, mode='r') as dataset:\n dataset = {node._v_name: dataset.store.select(node._v_pathname).\n groupby('pose').mean().\n rename(_rm_stand_pref)\n for node in dataset.store.get_node(group_name)}\n return pd.concat(dataset)",
"def label_adgroups(path_credentials, is_debug):\n adwords_service = freedan.AdWordsService(path_credentials)\n for account in adwords_service.accounts():\n print(account)\n\n ag_label = Label(LABEL_TEXT, label_id=None)\n\n # in case 1: provide label_id in initiation and skip Label.update_id call\n # in other cases: adapt 'action_if_not_found' parameter to your needs\n\n ag_label.update_id(adwords_service, is_debug=is_debug, action_if_not_found=\"create\")\n operations = [ag_label.apply_on_adgroup_operation(adgroup_id=ADGROUP_ID)]\n\n # upload will display an error if debug mode and label isn't existing yet\n adwords_service.upload(operations, is_debug=is_debug, method=\"standard\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Make spectrograms out of all audio files in given directory for which spectrograms do not exist in out directory. IN
|
def batch_spectros(
dir_in="../audio/wav_chunked",
dir_out="../specs/mel",
files='labeled',
sample_rate=22050,
hl=256,
n_fft=1024,
n_mels=512,
normalize=False
):
assert_msg = "Error: files arg must be either 'all' or 'labeled'"
assert files == 'all' or files == 'labeled', assert_msg
existing = set()
for spec_fpath in glob(dir_out + "/*.npy"):
chunk_id = os.path.splitext(os.path.basename(spec_fpath))[0]
existing.add(chunk_id)
chunk_queue = set()
if files == 'all':
for wav_fpath in glob(dir_in + "/*.wav"):
chunk_id = os.path.splitext(os.path.basename(wav_fpath))[0]
chunk_queue.add(chunk_id)
if files == 'labeled':
labeled_ids = kdb.test_songs.find(
{"labeled": True}
)
for doc in labeled_ids:
chunk_queue.add(doc['chunk_id'])
else:
pass
# expand here to accept a custom search term for MongoDB
# remove chunk IDs with existing spectros from the queue
chunk_queue -= existing
try:
new_specs = 0
for chunk_id in chunk_queue:
y, _ = audio_loader(
chunk_id,
dir_in=dir_in,
sample_rate=sample_rate,
duration=5.0
)
spectro = make_spectro(
y,
sample_rate=sample_rate,
hl=hl,
n_fft=n_fft,
n_mels=n_mels,
normalize=normalize
)
spec_path_out = os.path.join(dir_out, chunk_id)
np.save(spec_path_out, spectro)
new_specs += 1
print("{} spectrograms created".format(new_specs))
except:
print("Something bad has happened!")
|
[
"def analyse_multiple_audio_files(context, source_path, dest_path):\n context.obj[\"dest_path\"] = dest_path\n for file in os.listdir(source_path):\n file_path = os.path.join(file)\n context.invoke(\n generate_spectrograms,\n source_path=os.path.join(source_path, file_path),\n dest_path=dest_path,\n )",
"def audio_folder_gen():\n for folder in os.scandir(inp_folder_path):\n if folder.name in skip_folders or not folder.is_dir():\n continue\n yield folder",
"def wav_to_PCA(infolder='../speech_corpora/', outfile='../Data/processedspeech12.npy', \n pcafilename = '../Data/spectropca12.pickle', testfile = 'test12.npy', ncomponents = 200, whiten = True, maxspectros=100000):\n infilelist = []\n for pth, subd, files in os.walk(infolder):\n for fname in files:\n fstring = os.path.join(pth,fname)\n if fstring.lower().endswith('.wav'):\n infilelist.append(fstring)\n # infilelist = listdir(infolder)\n \n allspectros = [] # don't know length in advance, use list for flexible append. there's probably a faster way\n for infilename in infilelist:\n logflogpsd = wav_to_logPSD(infilename)\n \n nchunks = int((logflogpsd.shape[0] - ntimepoints)*(stride/logflogpsd.shape[0]))\n for chunk in range(nchunks):\n # convert each chunk to a vector and store. throw out any chunk with average power below cutoff\n start = chunk*stride #ntimepoints*chunk\n finish = chunk*stride + ntimepoints#ntimepoints*(chunk+1)\n temp = logflogpsd[start:finish,:]\n if np.mean(10**temp) > cutoff/nfreqs:\n allspectros.append(temp.flatten())\n if len(allspectros) > maxspectros:\n break\n allspectros = np.array(allspectros)\n \n # regularize, normalize spectrograms\n allspectros = np.nan_to_num(allspectros)\n allspectros = np.clip(allspectros,-1000,1000)\n# datamean = np.mean(allspectros, axis=0)\n# allspectros = allspectros - datamean\n# datastd = np.std(allspectros, axis=0)\n# allspectros = allspectros/datastd\n allspectros = allspectros - allspectros.mean(axis=1)[:,np.newaxis]\n #this is just for compatibility with other code\n datamean = 0\n datastd = 1\n\n # do PCA\n pca = PCA(dim=ncomponents, whiten=whiten)\n print (\"Fitting the PCA...\")\n pca.fit(allspectros)\n print (\"Done. Transforming and saving vectors...\")\n reduced = pca.transform(allspectros)\n \n np.save(outfile, reduced) \n with open(pcafilename, 'wb') as f:\n pickle.dump([pca, (ntimepoints, nfreqs), datamean, datastd], f) \n print (\"Done.\")\n\n # save a file with 9 example spectrograms and their reconstructions\n comparison = allspectros[:9,:]\n recons = pca.inverse_transform(reduced[:9,:])\n np.save(testfile, np.concatenate((comparison, recons),axis=0))\n \n return reduced, pca, (ntimepoints, nfreqs), datamean, datastd",
"def create_spectrogram(self, audio_path):\n audio_name = audio_path.split(\"/\")[-1].replace(\".wav\", \"\")\n fs, w = wavfile.read(audio_path)\n if len(w.shape) == 2:\n w = w[:, 0]\n dur = len(w) / fs\n\n cmap = plt.cm.get_cmap('Greys')\n cmap.set_under('w')\n f, t, sxx = scipy.signal.spectrogram(w, fs=fs, window='hann', nperseg=int(fs / 12.32),\n noverlap=int(self.overlap * (fs / 12.32)), mode='psd', nfft=16000)\n sxx_db = 10 * np.log10(abs(sxx[:1500, :]) / 2 * 10e-5)\n\n dpi = 50\n fig = plt.figure(figsize=(dur * self.sec_size // dpi, self.sec_size * 2 // dpi), dpi=dpi, frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n extent = (0, dur * self.sec_size // dpi, 0, self.sec_size * 2 // dpi)\n plt.imshow(sxx_db[::-1, :], cmap=cmap, extent=extent, norm=mpl.colors.Normalize(vmin=-50, vmax=0, clip=False))\n plt.savefig(osp.join(self.out_path, '%s.jpeg' % audio_name), dpi=dpi, frameon=False)\n\n # Resize saved image in case of bad matplotlib result\n img = imread(osp.join(self.out_path, '%s.jpeg' % audio_name))\n img = resize(img, (dur * self.sec_size, self.sec_size * 2)[::-1])\n imsave(osp.join(self.out_path, '%s.jpeg' % audio_name), img)",
"def resample_folder(input_folder, output_folder, fs, regex):\n # filedir = os.path.dirname(os.path.realpath(__file__))\n # octave.addpath(filedir)\n # add the matlab functions to octave dir here\n\n files = glob.glob(os.path.join(input_folder, regex), recursive=True)\n for f in tqdm.tqdm(files):\n\n audio, fs_read = torchaudio.load(f)\n audio = audio[0].numpy()\n audio = signal.resample_poly(audio, fs, fs_read)\n\n # tmp = octave.activlev(audio.tolist(), fs, \"n\")\n # audio, _ = tmp[:-1].squeeze(), tmp[-1]\n\n peak = np.max(np.abs(audio))\n audio = audio / peak\n audio = torch.from_numpy(audio).float()\n\n relative_path = os.path.join(\n Path(f).relative_to(Path(input_folder)).parent,\n Path(f).relative_to(Path(input_folder)).stem\n + \"_peak_{}.wav\".format(peak),\n )\n\n os.makedirs(\n Path(\n os.path.join(\n output_folder, Path(f).relative_to(Path(input_folder))\n )\n ).parent,\n exist_ok=True,\n )\n\n torchaudio.save(\n os.path.join(output_folder, relative_path),\n audio.reshape(1, -1),\n fs,\n )",
"def wav_to_mp3_batch(dir_in,\n dir_out=\"../audio/mp3_chunked\",\n bitrate=96\n ):\n\n existing = set()\n bitrate = str(bitrate)\n \n for mp3_fpath in glob(dir_out + \"/*.mp3\"):\n f_id = os.path.splitext(os.path.basename(mp3_fpath))[0]\n existing.add(f_id)\n \n for wav_fpath in glob(dir_in + \"/*.wav\"):\n f_id = os.path.splitext(os.path.basename(wav_fpath))[0]\n if f_id not in existing:\n command = \"lame -b{} {}/{}.wav {}/{}.mp3\".format(bitrate, \n dir_in, \n f_id, \n dir_out, \n f_id)\n result = os.system(command) \n if result != 0:\n print(\"*** ERROR: {} not converted\".format(fb_id))",
"def preprocess_audio(self):\n #remove the data directory if exists\n if os.path.exists(self.data_dir):\n shutil.rmtree(self.data_dir)\n #iterate over speakers\n speakers = sorted(os.listdir(self.conf['inpath']))\n for sp in tqdm(speakers, desc=\"Converting Audio\"):\n speaker_path = os.path.join(self.conf['inpath'], sp)\n wav_filenames = os.listdir(speaker_path)\n for wav in wav_filenames:\n inwav = os.path.join(speaker_path, wav)\n outwav = os.path.join(self.data_dir, wav)\n\n convert_wav(inwav,\n outwav,\n no_channels = self.conf['no_channels'],\n sampling_rate = self.conf['sampling_rate'],\n bit_precision = self.conf['bit_precision'])\n\n \n #remove the enroll directory if exists\n if os.path.exists(self.enroll_dir):\n shutil.rmtree(self.enroll_dir)\n #remove the test directory if exists\n if os.path.exists(self.test_dir):\n shutil.rmtree(self.test_dir)\n \n #create audio/enroll directory\n safe_makedir(self.enroll_dir)\n #create audio/test directory\n safe_makedir(self.test_dir)\n\n #parse num of sessions from configuration\n enroll_sessions = self.conf['enroll_sessions']\n test_sessions = self.conf['test_sessions']\n assert enroll_sessions+test_sessions <= 10,\\\n \"The summation of all sessions must be less than or equal 10!!\"\n #iterate over all preprocessed waves\n wav_filenames = os.listdir(self.data_dir)\n for wav in tqdm(wav_filenames, desc=\"Copying enroll/test waves\"):\n _, sess, _, _ = wav.split(\".\")\n inwav = os.path.join(self.data_dir, wav)\n if int(sess) <= enroll_sessions:\n outwav = os.path.join(self.enroll_dir, wav)\n shutil.copyfile(inwav, outwav)\n elif int(sess) <= enroll_sessions+test_sessions:\n outwav = os.path.join(self.test_dir, wav)\n shutil.copyfile(inwav, outwav)",
"def FetchAudios(self, input_dir, output_dir):\n print(\"Start Fetch Audios...\")\n video_pathes = sorted(glob(os.path.join(working_dir, input_dir, '*/*.mp4')))\n for video_path in tqdm(video_pathes):\n output_path = video_path.replace(input_dir, output_dir).replace('.mp4', '.wav')\n if not os.path.exists(os.path.dirname(output_path)):\n os.makedirs(os.path.dirname(output_path))\n # 调用ffmpeg执行音频提取功能\n cmd = 'ffmpeg -i ' + video_path + ' -f wav -vn ' + \\\n output_path + ' -loglevel quiet'\n os.system(cmd)",
"def handle_create_spectrograms(state):\n states = []\n\n if state == 'ALL':\n states = ['FOCUSED', 'UNFOCUSED', 'DROWSY']\n else:\n states = [state]\n\n # need to check if state-data directory exists in path\n if not os.path.isdir(STATE_DATA_OUTPUT):\n print('Error: Directory \\'{0}\\' with raw input data doesnt exists!'.format(STATE_DATA_OUTPUT))\n exit(1)\n\n # iterate through states that we need to generate spectrogram images for\n for curr_state in states:\n output_root = os.path.join(CWD, curr_state)\n\n create_output_directory(output_root)\n\n path_to_search = os.path.join(STATE_DATA_OUTPUT, '**', curr_state)\n state_data_files = glob.glob(path_to_search, recursive=True)\n\n for filename in state_data_files:\n output_subpath = filename.replace(STATE_DATA_OUTPUT, '')\n output_subpath = output_subpath.replace(curr_state, '')\n output_filepath = '{0}{1}'.format(output_root, output_subpath)\n\n os.makedirs(output_filepath)\n\n # need to get data from file\n data = load_raw_state_data(filename)\n\n output_image = os.path.join(output_filepath, curr_state)\n\n # 128, 256, 10mins, ./FOCUSED/eeg_record7/10/FOCUSED\n interate_data(FREQUENCY, M, data, output_image)",
"def process_song(source_dir, songname, target_dir, hop_length=512, n_fft=1024, context_size=25):\n \n # combine all the drumless tracks into one\n melo, sr = librosa.load(source_dir + songname + \"/vocals.wav\")\n melo += librosa.load(source_dir + songname + \"/other.wav\")[0]\n melo += librosa.load(source_dir + songname + \"/bass.wav\")[0]\n \n # drum track\n drum, sr = librosa.load(source_dir + songname + \"/drums.wav\")\n \n # mixture track\n mix, sr = librosa.load(source_dir + songname + \"/mixture.wav\")\n \n # take spectrograms of the 3 tracks\n melo_spec = np.abs(librosa.stft(melo, hop_length=hop_length, n_fft=n_fft))\n drum_spec = np.abs(librosa.stft(drum, hop_length=hop_length, n_fft=n_fft))\n mix_spec = np.abs(librosa.stft(mix, hop_length=hop_length, n_fft=n_fft))\n \n n_bins, n_frames = melo_spec.shape\n \n # container for frame names and associated labels\n fnames = []\n \n # \n for i in range(n_frames):\n # container for one image of size n_bins, context_size\n x = np.zeros(shape=(n_bins, context_size))\n \n # frame each STFT time step with context_size//2 before and after (pad with 0s at the edges)\n for j in range(context_size):\n curr_idx = i - context_size//2 + j\n \n # if current index out of range, leave 0s as padding\n if curr_idx < 0:\n continue\n elif curr_idx >= n_frames:\n break\n \n else:\n x[:, j] = mix_spec[:, curr_idx]\n \n # save the current x frame\n xfname = target_dir + \"x/%s_%d.npy\" % (songname, i)\n np.save(xfname, x)\n \n # calculate the IBM for the current x frame\n y = drum_spec[:, i] - melo_spec[:, i]\n y = np.where(y > 0, 1, 0)\n \n # save the IBM\n yfname = target_dir + \"y/%s_%d.npy\" % (songname, i)\n np.save(yfname, y)\n \n fnames.append((xfname, yfname))\n \n # save the array of x-y filename associations as a ndarray \n fnames = np.asarray(fnames)\n np.save(target_dir + \"%s_fnames\" % songname, fnames)",
"def make_elans(input_dir: str, output_dir: str, copy_wavs: bool):\n # Process each file\n files = glob.glob(f'{input_dir}/**/*.txt', recursive=True)\n print(files)\n\n for filename in files:\n\n filepath, ext = os.path.splitext(filename)\n basename = os.path.splitext(os.path.basename(filepath))[0]\n subdirname = os.path.basename(os.path.dirname(filepath))\n\n sex = subdirname[0]\n participant = subdirname[1:]\n\n # SEX :== m | f\n # SPEAKER_ID :== <INITIALS><DIGIT>\n # INITIALS :== speaker initials, 3 letters\n # DIGIT :== number 0-9 to differentiate speakers with identical initials\n\n # print(filename) # input/dr1/fmem0/sa2.txt\n # print(filepath) # input/dr1/fmem0/sa2\n # print(subdirname) # fmem0\n # print(basename) # sa2\n # print(ext) # txt\n\n # Get audio file duration - use this as the EAF annotation's end timeslot\n # duration = int(librosa.get_duration(filename=os.path.join(input_dir, filename))*1000)\n\n # Get annotation from the text file matching on file basename\n with open(filename, 'r', encoding='utf-8') as text_file:\n annotation = text_file.read()\n annotation_split = annotation.split()\n start = int(annotation_split[0])\n duration = int(annotation_split[1])\n # convert audio samples to seconds to ms\n duration = int(duration/16000*1000)\n annotation_text = \" \".join(annotation_split[2:])\n\n # Add any annotation cleaning here\n # annotation = re.sub(r\"(\\d+)\", lambda x: num2words.num2words(int(x.group(0))), annotation)\n\n print(start, duration, annotation_text)\n\n # Make EAF file\n output_eaf = Eaf()\n output_eaf.add_tier('default', part=participant)\n output_eaf.add_annotation('default', start, duration, annotation_text)\n output_eaf.add_linked_file(os.path.join(output_dir, f'{subdirname}-{basename}.wav'))\n output_eaf.to_file(os.path.join(output_dir, f'{subdirname}-{basename}.eaf'))\n\n # Copy WAV?\n # if copy_wavs:\n shutil.copyfile(f'{filepath}.wav', os.path.join(output_dir, f'{subdirname}-{basename}.wav'))\n\n print('>>> Done')",
"def create_silence():\r\n for file in os.listdir('D:/s/Tensorflowspeechrecognition/train/train/_background_noise_/'):\r\n if 'wav' in file:\r\n sig, rate = librosa.load('D:/s/Tensorflowspeechrecognition/train/train/_background_noise_/' + file, sr = 16000)\r\n sig = shifting(sig, rate, 0.5, 'both')\r\n sig = change_pitch(sig,rate,np.random.randint(-10,10))\r\n noising(sig,np.random.randint(10))\r\n sig_arr = split_arr(sig)\r\n\r\n\r\n if not os.path.exists(train_dir+'silence5/'):\r\n os.makedirs(train_dir+'silence5/')\r\n for ind, arr in enumerate(sig_arr):\r\n filename = 'frag%d' %(ind+384) + '_%s' %file # example: frag0_running_tap.wav\r\n sf.write(train_dir+'silence5/'+filename, arr, 16000)\r\n\r\n\r\n\r\n # librosa.output.write_wav(train_dir+'silence/'+filename, arr, 16000)\r",
"def spectrogram(files, adv_ms, len_ms, specfmt=\"dB\", mel_filters_N=12):\n\n # If not a list, make it so number one...\n if not isinstance(files, list):\n files = [files]\n\n # Set up frame stream and pass to DFT streamer\n framestream = MultiFileAudioFrames(files, adv_ms, len_ms)\n dftstream = DFTStream(framestream, specfmt=specfmt, mels_N=mel_filters_N)\n\n # Grab the spectra\n spectra = []\n for s in dftstream:\n spectra.append(s)\n\n # Convert to matrix\n spectra = np.asarray(spectra)\n\n # Time axis in s\n adv_s = framestream.get_frameadv_ms() / 1000\n t = [s * adv_s for s in range(spectra.shape[0])]\n\n return [spectra, t, dftstream.get_Hz()]",
"def save_spectrogram_tdsv():\n print(\"start text dependent utterance selection\")\n os.makedirs(config.train_path, exist_ok=True) # make folder to save train file\n os.makedirs(config.test_path, exist_ok=True) # make folder to save test file\n\n utterances_spec = []\n for folder in os.listdir(audio_path):\n utter_path= os.path.join(audio_path, folder, os.listdir(os.path.join(audio_path, folder))[0])\n if os.path.splitext(os.path.basename(utter_path))[0][-3:] != '001': # if the text utterance doesn't exist pass\n print(os.path.basename(utter_path)[:4], \"001 file doesn't exist\")\n continue\n\n utter, sr = librosa.core.load(utter_path, config.sr) # load the utterance audio\n utter_trim, index = librosa.effects.trim(utter, top_db=14) # trim the beginning and end blank\n if utter_trim.shape[0]/sr <= config.hop*(config.tdsv_frame+2): # if trimmed file is too short, then pass\n print(os.path.basename(utter_path), \"voice trim fail\")\n continue\n\n S = librosa.core.stft(y=utter_trim, n_fft=config.nfft,\n win_length=int(config.window * sr), hop_length=int(config.hop * sr)) # perform STFT\n S = keyword_spot(S) # keyword spot (for now, just slice last 80 frames which contains \"Call Stella\")\n utterances_spec.append(S) # make spectrograms list\n\n utterances_spec = np.array(utterances_spec) # list to numpy array\n np.random.shuffle(utterances_spec) # shuffle spectrogram (by person)\n total_num = utterances_spec.shape[0]\n train_num = (total_num//10)*9 # split total data 90% train and 10% test\n print(\"selection is end\")\n print(\"total utterances number : %d\"%total_num, \", shape : \", utterances_spec.shape)\n print(\"train : %d, test : %d\"%(train_num, total_num- train_num))\n np.save(os.path.join(config.train_path, \"train.npy\"), utterances_spec[:train_num]) # save spectrogram as numpy file\n np.save(os.path.join(config.test_path, \"test.npy\"), utterances_spec[train_num:])",
"def chunk_queue(dir_in=\"../audio/chunk_queue\",\n dir_out=\"../audio/wav_chunked\",\n chunk_len=5,\n sr=22050,\n log=True\n ):\n \n for root, dirs, files in os.walk(dir_in):\n for fname in files:\n if not re.match(r'^\\.', fname):\n rel_fpath = os.path.join(root, fname)\n chunk_song(rel_fpath, chunk_len=chunk_len, sr=sr, log=log)",
"def find_amp_analysis_results( directory ):\n for outer_entry in os.listdir( directory ):\n entry_path = os.path.join( directory, outer_entry )\n if os.path.isdir( entry_path ):\n for inner_entry in os.listdir( entry_path ):\n if inner_entry == 'amplicon_analysis.fastq':\n yield os.path.join( entry_path, inner_entry )",
"def save_spectrogram_tisv():\n print(\"start text independent utterance feature extraction\")\n os.makedirs(hp.data.train_path, exist_ok=True) # make folder to save train file\n os.makedirs(hp.data.test_path, exist_ok=True) # make folder to save test file\n\n utter_min_len = (hp.data.tisv_frame * hp.data.hop + hp.data.window) * hp.data.sr # lower bound of utterance length\n total_speaker_num = len(audio_path)\n train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test\n print(\"total speaker number : %d\"%total_speaker_num)\n print(\"train : %d, test : %d\"%(train_speaker_num, total_speaker_num-train_speaker_num))\n for i, folder in enumerate(audio_path):\n print(\"%dth speaker processing...\"%i)\n utterances_spec = []\n for utter_name in os.listdir(folder):\n if utter_name[-4:] == '.wav':\n utter_path = os.path.join(folder, utter_name) # path of each utterance\n times, segs = VAD_chunk(2, utter_path)\n #print(\"+++++++++++++++++++++++++++++\", len(segs))\n for i, seg in enumerate(segs):\n if (times[i][1]-times[i][0]) > 0.2: # If partial utterance is sufficient long,\n #utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=seg, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * hp.data.sr), hop_length=int(hp.data.hop * hp.data.sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n #samples = random.sample(S, 3 * hp.data.tisv_frame)\n #first = samples[]\n print(\"************************\", S.shape)\n #if(len(S) < 360):\n # print(\"less than 360\", len(S))\n # continue\n for i in range(0, S.shape[1] - hp.data.tisv_frame, hp.data.tisv_frame):\n #print(\"Appending of shape\", S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame].shape)\n utterances_spec.append(S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame ])\n #utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n #utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n #print(\"Shape of S\", S[-2].shape, S[-1].shape)\n #concat_seg, concat_times = concat_segs(times, segs)\n #STFT_frames, STFT_times = get_STFTs(concat_seg, concat_times)\n #STFT_frames = np.stack(STFT_frames, axis=2)\n #STFT_frames = np.transpose(STFT_frames, axes=(2,1,0))\n\n #utter, sr = librosa.core.load(utter_path, hp.data.sr) # load utterance audio\n #intervals = librosa.effects.split(utter, top_db=30) # voice activity detection \n # this works fine for timit but if you get array of shape 0 for any other audio change value of top_db\n # for vctk dataset use top_db=100\n \"\"\"for interval in intervals:\n if (interval[1]-interval[0]) > utter_min_len: # If partial utterance is sufficient long,\n utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=utter_part, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * sr), hop_length=int(hp.data.hop * sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n \"\"\"\n utterances_spec = np.array(utterances_spec)\n print(\"utterances_spec\", utterances_spec.shape)\n if(utterances_spec.shape[0] == 0):\n continue\n #print(utterances_spec.shape)\n if i<train_speaker_num: # save spectrogram as numpy file\n np.save(os.path.join(hp.data.train_path, \"speaker%d.npy\"%i), utterances_spec)\n else:\n np.save(os.path.join(hp.data.test_path, \"speaker%d.npy\"%(i-train_speaker_num)), utterances_spec)",
"def prepareOutput():\r\n\r\n os.removedirs(\"output\")\r\n os.mkdir(\"output\")",
"def files_to_process(files_list: list, output_dir: str,\n contains_extension=True, is_base_name=True) -> list:\n # Make a copy of the list\n remaining_files = files_list.copy()\n print('Checking files in', output_dir)\n counter = 0\n # List the output files in the directory\n output_files = os.listdir(output_dir)\n\n # Iterate over file names and eliminate from this_files if the respective\n # spectrogram already exists.\n for el in files_list:\n if not is_base_name:\n file_name = os.path.basename(el)\n else:\n file_name = el\n if contains_extension:\n ext = file_name.split('.')[-1]\n file_name = str(file_name[:-len(ext)-1])\n if file_name + '.npy' in output_files:\n remaining_files.remove(el)\n counter += 1\n print('There are {} files remaining to process, '\n 'and {} mfcc data in {}.'.format(len(files_list) - counter, counter,\n output_dir))\n return remaining_files"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Prints basic stats for any np array. IN
|
def arr_stats(ndarray):
print("Min:", np.min(ndarray))
print("Max:", np.max(ndarray))
print("Mean:", np.mean(ndarray))
print("Std:", np.std(ndarray))
print("Shape:", np.shape(ndarray))
|
[
"def print_stats( slopes, cr_total):\n\n wh_slope_0 = np.where( slopes == 0.) # insuff data or no signal\n for ii in range( cr_total.max()+1 ): \n cr_ii = np.where( cr_total == ii )\n print 'The number of pixels in 2d array having ' , ii,\\\n ' crs : ' , len(cr_ii[0])\n\n print 'The number of pixels having insufficient data'\n print ' due to excessive CRs :', len(wh_slope_0[0]) \n print 'Count rates - min, mean, max, std:'\\\n ,slopes.min(), slopes.mean(), slopes.max()\\\n ,slopes.std() \n print 'Cosmic rays - min, mean, max, std :',\\\n cr_total.min(), cr_total.mean(), cr_total.max(), cr_total.std()",
"def print_statistics(self):\n pass",
"def fullprint(*args, **kwargs): # From http://stackoverflow.com/questions/1987694/print-the-full-numpy-array\n \n from pprint import pprint\n import numpy\n opt = numpy.get_printoptions()\n numpy.set_printoptions(threshold='nan')\n pprint(*args, **kwargs)\n numpy.set_printoptions(**opt)",
"def print_stats(dataset):\n print(\"\\n================================================\\nStats\\n================================================\")\n num_images = len(dataset[\"images\"])\n num_anns_total = len(dataset[\"annotations\"])\n print(\"Number of images: {}\\nNumber of annotations: {}\\n\".format(num_images, num_anns_total))\n\n anns_per_class = dict()\n for ann in dataset[\"annotations\"]:\n cl = ann[\"category_id\"]\n if cl in anns_per_class:\n anns_per_class[cl] += 1\n else:\n anns_per_class[cl] = 1\n print(anns_per_class)",
"def print_stats(self):\n print '{} columns and {} rows'.format(self._shape[1], self._shape[0])\n print self._df['Class'].value_counts()",
"def print_shape(self,array):\n print 'shape = {} '.format(self.to_array(array).shape)",
"def print_raw_county_data(county_data):\n for i in county_data:\n a = np.asarray(i.data)\n\n x, y = a.T \n print(i)\n plt.plot(x,y)\n plt.show()",
"def print_sum_stats(self, stat_list):\n pass",
"def grid_statistics(self):\n #set x equal to a run of array_stats on data attribute\n x = self.array_statistics(self.data)\n #print(x)",
"def _print_popstat_info(tfpopstats, nppopstats):\n mean_errors = []\n stdev_errors = []\n for j, (tfpopstat, nppopstat) in enumerate(zip(tfpopstats, nppopstats)):\n moving_average = tfpopstat.eval()\n if j % 2 == 0:\n mean_errors.append(abs(moving_average - nppopstat))\n else:\n stdev_errors.append(abs(np.sqrt(moving_average) - np.sqrt(nppopstat)))\n\n def flatmean(xs):\n return np.mean(np.concatenate([x.flatten() for x in xs]))\n\n print('average of pop mean/stdev errors: %g %g' % (flatmean(mean_errors),\n flatmean(stdev_errors)))\n print('average of batch mean/stdev: %g %g' %\n (flatmean(nppopstats[0::2]),\n flatmean([np.sqrt(ugh) for ugh in nppopstats[1::2]])))",
"def print_array(a):\n print('[')\n for i in range(a.shape[0]):\n print('\\t[%s],' %','.join(str(el) for el in a[i]))\n print(']')",
"def print_stats(self):\n self._logger.info(\"{0:20} {1:5} {2:10} {3:10} {4:10}\".format(\n \"Function Tag\", \"Hits\", \"Max\", \"Min\", \"Average\"))\n\n for fname, data in list(self._stats.items()):\n max_time, min_time, avg_time = self._calculate_metrics(data[1])\n self._logger.info(\n \"{0:20} {1:5} {2:10} {3:10} {4:10}\".format(\n fname,\n data[0],\n self._pretty_time(max_time),\n self._pretty_time(min_time),\n self._pretty_time(avg_time)))",
"def test_describe(self):\n array = np.arange(2 * 3 * 4).reshape(2, 3, 4)\n tensor = Tensor(array=array)\n captured_output = io.StringIO() # Create StringIO object\n sys.stdout = captured_output # and redirect stdout.\n tensor.describe()\n assert captured_output.getvalue() != '' # to check that something was actually printed",
"def globalStats(arr):\n arrMedian = np.median(arr)\n arrMean = arr.mean()\n nPosCount = arr[arr > arrMean].size\n nNegCount = arr[arr < arrMean].size #useful as some RFI have a lot of values below the 'baseline'\n nPosPct = nPosCount / float(arr.size)\n nNegPct = nNegCount / float(arr.size)\n std = arr.std()\n\n \n if np.isclose(arrMedian, 0.): meanMedianRatio = 0.\n else: meanMedianRatio = np.abs(arrMean / arrMedian)\n #return a dictionary full of statistics\n return { 'mean': arrMean, 'median': arrMedian, 'std': std, 'min': arr.min(), 'max': arr.max(),\n 'meanMedianRatio': meanMedianRatio, 'maxMinRatio': np.abs(arr.max() / arr.min()),\n 'posCount': nPosCount, 'negCount': nNegCount, 'posPct': nPosPct, 'negPct': nNegPct}",
"def print_stats(generation, population):\n def ave(values):\n \"\"\"Return the average of the values \"\"\"\n return float(sum(values))/len(values)\n def std(values, ave):\n \"\"\"Return the standard deviation of the values and average \"\"\"\n return math.sqrt(float(\n sum((value-ave)**2 for value in values))/len(values))\n def get_ave_and_std(values):\n _ave = ave(values)\n _std = std(values, _ave)\n return _ave, _std\n fitness_vals = [i.fitness for i in population]\n size_vals = [i.genome.node_cnt for i in population]\n depth_vals = [i.genome.calculate_depth() for i in population]\n ave_fit, std_fit = get_ave_and_std(fitness_vals)\n ave_size, std_size = get_ave_and_std(size_vals)\n ave_depth, std_depth = get_ave_and_std(depth_vals)\n print(\"Gen:%d evals:%d fit_ave:%.2f+-%.3f size_ave:%.2f+-%.3f depth_ave:%.2f+-%.3f %s\" %\n (generation, (POPULATION_SIZE * generation),\n ave_fit, std_fit,\n ave_size, std_size,\n ave_depth, std_depth,\n population[0]))",
"def globalStats(arr):\n arrMedian = np.median(arr)\n arrMean = arr.mean()\n nPosCount = arr[arr > arrMean].size\n nNegCount = arr[arr < arrMean].size #useful as some RFI have a lot of values below the 'baseline'\n nPosPct = nPosCount / float(arr.size)\n nNegPct = nNegCount / float(arr.size)\n std = arr.std()\n\n \n if np.isclose(arrMedian, 0.): meanMedianRatio = 0.\n else: meanMedianRatio = np.abs(arrMean / arrMedian)\n #return a dictionary full of statistics\n return { 'mean': arrMean, 'median': arrMedian, 'std': std, 'min': arr.min(), 'max': arr.max(),\n 'meanMedianRatio': meanMedianRatio, 'maxMinRatio': np.abs(arr.max() / arr.min()),\n 'posCount': nPosCount, 'negCount': nNegCount, 'posPct': nPosPct, 'negPct': nNegPct}",
"def print_stats():\r\n\tprint()\r\n\r\n\tall_fn_names = [k for k in _total_times.keys() if k not in _disabled_names]\r\n\r\n\tmax_name_width = max([len(k) for k in all_fn_names] + [4])\r\n\tif max_name_width % 2 == 1: max_name_width += 1\r\n\tformat_str = ' {:>%d} | {:>10.4f} ' % max_name_width\r\n\r\n\theader = (' {:^%d} | {:^10} ' % max_name_width).format('Name', 'Time (ms)')\r\n\tprint(header)\r\n\r\n\tsep_idx = header.find('|')\r\n\tsep_text = ('-' * sep_idx) + '+' + '-' * (len(header)-sep_idx-1)\r\n\tprint(sep_text)\r\n\r\n\tfor name in all_fn_names:\r\n\t\tprint(format_str.format(name, _total_times[name]*1000))\r\n\t\r\n\tprint(sep_text)\r\n\tprint(format_str.format('Total', total_time()*1000))\r\n\tprint()",
"def metrics(self):\n print(Fore.CYAN + '[ Raw Metrics ]' + Fore.RESET)\n print(subprocess.check_output(['radon', 'raw', self.inputfile], universal_newlines=True))",
"def observe_data_structure(data): \n print(data.info())\n print('\\n Summary Statistics \\n')\n print(data.describe())"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The beginnings of a grand function for making and storing a spectrogram for each file using librosa.
|
def make_spectro_old(
fname,
sample_rate=22050,
n_fft=1024,
hl=256,
n_mels=512,
cmap='magma',
show=True,
save=False
):
# update this with os.path.join()
fpath = "../audio/" + fname + ".wav"
y, sr = librosa.load(fpath,
sr=sample_rate,
duration=5.0,
)
# make the spectrogram matrix on mel scale
M = librosa.feature.melspectrogram(y=y,
sr=sample_rate,
hop_length=hl,
n_mels=n_mels
)
# creates figure of same aspect ratio as original
w, h = figaspect(M)
fig = plt.figure(figsize=(w,h), dpi=108)
# these next two create a subplot with no margins
ax = plt.subplot(111)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1,
wspace=0, hspace=0
)
# creates visuals for display or saving
if show or save:
librosa.display.specshow(librosa.power_to_db(M, ref=np.max),
sr=sample_rate,
hop_length=hl,
y_axis='mel', # mel, log, fft
x_axis='time', # time
cmap=cmap
)
if show:
plt.show()
if save:
img_fpath = "../specs/" + fname + ".png"
plt.savefig(img_fpath, dpi=fig.dpi)
plt.close(fig)
return M
|
[
"def save_spectrogram_tisv():\n print(\"start text independent utterance feature extraction\")\n os.makedirs(hp.data.train_path, exist_ok=True) # make folder to save train file\n os.makedirs(hp.data.test_path, exist_ok=True) # make folder to save test file\n\n utter_min_len = (hp.data.tisv_frame * hp.data.hop + hp.data.window) * hp.data.sr # lower bound of utterance length\n total_speaker_num = len(audio_path)\n train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test\n print(\"total speaker number : %d\"%total_speaker_num)\n print(\"train : %d, test : %d\"%(train_speaker_num, total_speaker_num-train_speaker_num))\n for i, folder in enumerate(audio_path):\n print(\"%dth speaker processing...\"%i)\n utterances_spec = []\n for utter_name in os.listdir(folder):\n if utter_name[-4:] == '.wav':\n utter_path = os.path.join(folder, utter_name) # path of each utterance\n times, segs = VAD_chunk(2, utter_path)\n #print(\"+++++++++++++++++++++++++++++\", len(segs))\n for i, seg in enumerate(segs):\n if (times[i][1]-times[i][0]) > 0.2: # If partial utterance is sufficient long,\n #utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=seg, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * hp.data.sr), hop_length=int(hp.data.hop * hp.data.sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n #samples = random.sample(S, 3 * hp.data.tisv_frame)\n #first = samples[]\n print(\"************************\", S.shape)\n #if(len(S) < 360):\n # print(\"less than 360\", len(S))\n # continue\n for i in range(0, S.shape[1] - hp.data.tisv_frame, hp.data.tisv_frame):\n #print(\"Appending of shape\", S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame].shape)\n utterances_spec.append(S[:, i * hp.data.tisv_frame: (i + 1) * hp.data.tisv_frame ])\n #utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n #utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n #print(\"Shape of S\", S[-2].shape, S[-1].shape)\n #concat_seg, concat_times = concat_segs(times, segs)\n #STFT_frames, STFT_times = get_STFTs(concat_seg, concat_times)\n #STFT_frames = np.stack(STFT_frames, axis=2)\n #STFT_frames = np.transpose(STFT_frames, axes=(2,1,0))\n\n #utter, sr = librosa.core.load(utter_path, hp.data.sr) # load utterance audio\n #intervals = librosa.effects.split(utter, top_db=30) # voice activity detection \n # this works fine for timit but if you get array of shape 0 for any other audio change value of top_db\n # for vctk dataset use top_db=100\n \"\"\"for interval in intervals:\n if (interval[1]-interval[0]) > utter_min_len: # If partial utterance is sufficient long,\n utter_part = utter[interval[0]:interval[1]] # save first and last 180 frames of spectrogram.\n S = librosa.core.stft(y=utter_part, n_fft=hp.data.nfft,\n win_length=int(hp.data.window * sr), hop_length=int(hp.data.hop * sr))\n S = np.abs(S) ** 2\n mel_basis = librosa.filters.mel(sr=hp.data.sr, n_fft=hp.data.nfft, n_mels=hp.data.nmels)\n S = np.log10(np.dot(mel_basis, S) + 1e-6) # log mel spectrogram of utterances\n utterances_spec.append(S[:, :hp.data.tisv_frame]) # first 180 frames of partial utterance\n utterances_spec.append(S[:, -hp.data.tisv_frame:]) # last 180 frames of partial utterance\n \"\"\"\n utterances_spec = np.array(utterances_spec)\n print(\"utterances_spec\", utterances_spec.shape)\n if(utterances_spec.shape[0] == 0):\n continue\n #print(utterances_spec.shape)\n if i<train_speaker_num: # save spectrogram as numpy file\n np.save(os.path.join(hp.data.train_path, \"speaker%d.npy\"%i), utterances_spec)\n else:\n np.save(os.path.join(hp.data.test_path, \"speaker%d.npy\"%(i-train_speaker_num)), utterances_spec)",
"def save_spectrogram_tisv():\n print(\"开始文本不独立语言特征提取...\")\n ind = 0\n if not os.path.isdir(config.train_path):\n os.makedirs(config.train_path, exist_ok=True) # make folder to save train file\n else:\n ind += len(os.listdir(config.train_path))\n if not os.path.isdir(config.test_path):\n os.makedirs(config.test_path, exist_ok=True) # make folder to save test file\n else:\n ind += len(os.listdir(config.test_path))\n\n utter_min_len = (config.tisv_frame * config.hop + config.window) * config.sr # lower bound of utterance length\n total_speaker_num = len(os.listdir(audio_path))\n train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test\n print(\"总的 speaker 数为 : %d\"%total_speaker_num)\n print(\"训练 : %d, 测试 : %d\"%(train_speaker_num, total_speaker_num-train_speaker_num))\n print(os.listdir(audio_path))\n\n for i, folder in enumerate(os.listdir(audio_path)):\n # 从原来断开处继续处理\n if i < ind:\n continue\n print(\"从断开处继续处理...\")\n speaker_path = os.path.join(audio_path, folder) # path of each speaker\n print(\"第 %d 个 speaker 处理...\" % i)\n utterances_spec = []\n k=0\n for utter_name in os.listdir(speaker_path):\n utter_path = os.path.join(speaker_path, utter_name) # path of each utterance\n utter, sr = librosa.core.load(utter_path, config.sr) # load utterance audio\n utter_trim, index = librosa.effects.trim(utter, top_db=20) # voice activity detection, only trim\n\n cur_slide = 0\n mfcc_win_sample = int(config.sr*config.hop*config.tisv_frame)\n while(True):\n if(cur_slide + mfcc_win_sample > utter_trim.shape[0]):\n break\n slide_win = utter_trim[cur_slide : cur_slide+mfcc_win_sample]\n\n S = librosa.feature.mfcc(y=slide_win, sr=config.sr, n_mfcc=40)\n utterances_spec.append(S)\n\n cur_slide += int(mfcc_win_sample/2)\n\n utterances_spec = np.array(utterances_spec)\n print('utterances_spec.shape = {}'.format(utterances_spec.shape))\n\n if i<train_speaker_num: # save spectrogram as numpy file\n np.save(os.path.join(config.train_path, \"speaker%d.npy\"%i), utterances_spec)\n else:\n np.save(os.path.join(config.test_path, \"speaker%d.npy\"%(i-train_speaker_num)), utterances_spec)",
"def create_spectrogram(voice_sample):\n\n in_fpath = Path(voice_sample.replace('\"', \"\").replace(\"'\", \"\"))\n original_wav, sampling_rate = librosa.load(str(in_fpath))\n\n # Plot the signal read from wav file\n fig = plt.figure()\n #plt.subplot(111)\n plt.title(f\"Spectrogram of file {voice_sample}\")\n\n plt.plot(original_wav)\n plt.xlabel(\"Sample\")\n plt.ylabel(\"Amplitude\")\n\n # plt.subplot(212)\n # plt.specgram(original_wav, Fs=sampling_rate)\n # plt.xlabel(\"Time\")\n # plt.ylabel(\"Frequency\")\n # # plt.savefig(voice_sample.split(\".\")[0] + \"_spectogram.png\")\n return fig",
"def create_spectrogram(self, audio_path):\n audio_name = audio_path.split(\"/\")[-1].replace(\".wav\", \"\")\n fs, w = wavfile.read(audio_path)\n if len(w.shape) == 2:\n w = w[:, 0]\n dur = len(w) / fs\n\n cmap = plt.cm.get_cmap('Greys')\n cmap.set_under('w')\n f, t, sxx = scipy.signal.spectrogram(w, fs=fs, window='hann', nperseg=int(fs / 12.32),\n noverlap=int(self.overlap * (fs / 12.32)), mode='psd', nfft=16000)\n sxx_db = 10 * np.log10(abs(sxx[:1500, :]) / 2 * 10e-5)\n\n dpi = 50\n fig = plt.figure(figsize=(dur * self.sec_size // dpi, self.sec_size * 2 // dpi), dpi=dpi, frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n\n extent = (0, dur * self.sec_size // dpi, 0, self.sec_size * 2 // dpi)\n plt.imshow(sxx_db[::-1, :], cmap=cmap, extent=extent, norm=mpl.colors.Normalize(vmin=-50, vmax=0, clip=False))\n plt.savefig(osp.join(self.out_path, '%s.jpeg' % audio_name), dpi=dpi, frameon=False)\n\n # Resize saved image in case of bad matplotlib result\n img = imread(osp.join(self.out_path, '%s.jpeg' % audio_name))\n img = resize(img, (dur * self.sec_size, self.sec_size * 2)[::-1])\n imsave(osp.join(self.out_path, '%s.jpeg' % audio_name), img)",
"def save_spectrogram_tdsv():\n print(\"start text dependent utterance selection\")\n os.makedirs(config.train_path, exist_ok=True) # make folder to save train file\n os.makedirs(config.test_path, exist_ok=True) # make folder to save test file\n\n utterances_spec = []\n for folder in os.listdir(audio_path):\n utter_path= os.path.join(audio_path, folder, os.listdir(os.path.join(audio_path, folder))[0])\n if os.path.splitext(os.path.basename(utter_path))[0][-3:] != '001': # if the text utterance doesn't exist pass\n print(os.path.basename(utter_path)[:4], \"001 file doesn't exist\")\n continue\n\n utter, sr = librosa.core.load(utter_path, config.sr) # load the utterance audio\n utter_trim, index = librosa.effects.trim(utter, top_db=14) # trim the beginning and end blank\n if utter_trim.shape[0]/sr <= config.hop*(config.tdsv_frame+2): # if trimmed file is too short, then pass\n print(os.path.basename(utter_path), \"voice trim fail\")\n continue\n\n S = librosa.core.stft(y=utter_trim, n_fft=config.nfft,\n win_length=int(config.window * sr), hop_length=int(config.hop * sr)) # perform STFT\n S = keyword_spot(S) # keyword spot (for now, just slice last 80 frames which contains \"Call Stella\")\n utterances_spec.append(S) # make spectrograms list\n\n utterances_spec = np.array(utterances_spec) # list to numpy array\n np.random.shuffle(utterances_spec) # shuffle spectrogram (by person)\n total_num = utterances_spec.shape[0]\n train_num = (total_num//10)*9 # split total data 90% train and 10% test\n print(\"selection is end\")\n print(\"total utterances number : %d\"%total_num, \", shape : \", utterances_spec.shape)\n print(\"train : %d, test : %d\"%(train_num, total_num- train_num))\n np.save(os.path.join(config.train_path, \"train.npy\"), utterances_spec[:train_num]) # save spectrogram as numpy file\n np.save(os.path.join(config.test_path, \"test.npy\"), utterances_spec[train_num:])",
"def create_silence():\r\n for file in os.listdir('D:/s/Tensorflowspeechrecognition/train/train/_background_noise_/'):\r\n if 'wav' in file:\r\n sig, rate = librosa.load('D:/s/Tensorflowspeechrecognition/train/train/_background_noise_/' + file, sr = 16000)\r\n sig = shifting(sig, rate, 0.5, 'both')\r\n sig = change_pitch(sig,rate,np.random.randint(-10,10))\r\n noising(sig,np.random.randint(10))\r\n sig_arr = split_arr(sig)\r\n\r\n\r\n if not os.path.exists(train_dir+'silence5/'):\r\n os.makedirs(train_dir+'silence5/')\r\n for ind, arr in enumerate(sig_arr):\r\n filename = 'frag%d' %(ind+384) + '_%s' %file # example: frag0_running_tap.wav\r\n sf.write(train_dir+'silence5/'+filename, arr, 16000)\r\n\r\n\r\n\r\n # librosa.output.write_wav(train_dir+'silence/'+filename, arr, 16000)\r",
"def process_song(source_dir, songname, target_dir, hop_length=512, n_fft=1024, context_size=25):\n \n # combine all the drumless tracks into one\n melo, sr = librosa.load(source_dir + songname + \"/vocals.wav\")\n melo += librosa.load(source_dir + songname + \"/other.wav\")[0]\n melo += librosa.load(source_dir + songname + \"/bass.wav\")[0]\n \n # drum track\n drum, sr = librosa.load(source_dir + songname + \"/drums.wav\")\n \n # mixture track\n mix, sr = librosa.load(source_dir + songname + \"/mixture.wav\")\n \n # take spectrograms of the 3 tracks\n melo_spec = np.abs(librosa.stft(melo, hop_length=hop_length, n_fft=n_fft))\n drum_spec = np.abs(librosa.stft(drum, hop_length=hop_length, n_fft=n_fft))\n mix_spec = np.abs(librosa.stft(mix, hop_length=hop_length, n_fft=n_fft))\n \n n_bins, n_frames = melo_spec.shape\n \n # container for frame names and associated labels\n fnames = []\n \n # \n for i in range(n_frames):\n # container for one image of size n_bins, context_size\n x = np.zeros(shape=(n_bins, context_size))\n \n # frame each STFT time step with context_size//2 before and after (pad with 0s at the edges)\n for j in range(context_size):\n curr_idx = i - context_size//2 + j\n \n # if current index out of range, leave 0s as padding\n if curr_idx < 0:\n continue\n elif curr_idx >= n_frames:\n break\n \n else:\n x[:, j] = mix_spec[:, curr_idx]\n \n # save the current x frame\n xfname = target_dir + \"x/%s_%d.npy\" % (songname, i)\n np.save(xfname, x)\n \n # calculate the IBM for the current x frame\n y = drum_spec[:, i] - melo_spec[:, i]\n y = np.where(y > 0, 1, 0)\n \n # save the IBM\n yfname = target_dir + \"y/%s_%d.npy\" % (songname, i)\n np.save(yfname, y)\n \n fnames.append((xfname, yfname))\n \n # save the array of x-y filename associations as a ndarray \n fnames = np.asarray(fnames)\n np.save(target_dir + \"%s_fnames\" % songname, fnames)",
"def batch_spectros(\n dir_in=\"../audio/wav_chunked\",\n dir_out=\"../specs/mel\",\n files='labeled',\n sample_rate=22050,\n hl=256,\n n_fft=1024,\n n_mels=512,\n normalize=False\n ):\n\n assert_msg = \"Error: files arg must be either 'all' or 'labeled'\"\n assert files == 'all' or files == 'labeled', assert_msg\n\n existing = set()\n \n for spec_fpath in glob(dir_out + \"/*.npy\"):\n chunk_id = os.path.splitext(os.path.basename(spec_fpath))[0]\n existing.add(chunk_id)\n\n chunk_queue = set()\n \n if files == 'all':\n for wav_fpath in glob(dir_in + \"/*.wav\"):\n chunk_id = os.path.splitext(os.path.basename(wav_fpath))[0]\n chunk_queue.add(chunk_id)\n if files == 'labeled':\n labeled_ids = kdb.test_songs.find(\n {\"labeled\": True}\n )\n for doc in labeled_ids:\n chunk_queue.add(doc['chunk_id'])\n else:\n pass\n # expand here to accept a custom search term for MongoDB\n\n # remove chunk IDs with existing spectros from the queue\n chunk_queue -= existing\n\n try:\n new_specs = 0\n for chunk_id in chunk_queue:\n y, _ = audio_loader(\n chunk_id,\n dir_in=dir_in,\n sample_rate=sample_rate,\n duration=5.0\n )\n spectro = make_spectro(\n y,\n sample_rate=sample_rate,\n hl=hl,\n n_fft=n_fft,\n n_mels=n_mels,\n normalize=normalize\n )\n spec_path_out = os.path.join(dir_out, chunk_id)\n np.save(spec_path_out, spectro)\n new_specs += 1\n print(\"{} spectrograms created\".format(new_specs))\n except:\n print(\"Something bad has happened!\")",
"def reference_spectrogram(path, augmentations: audaugio.ChainBase):\n try:\n y, sr = librosa.load(path, sr=44100)\n except audioop.error as e:\n logger = logging.getLogger('logger')\n logger.warning(\"Could not load {0}\\n{1}\".format(path, e))\n return None\n\n augmented_audio = augmentations(y, sr)\n\n spectrograms = []\n for audio in augmented_audio:\n if audio.shape[0] < 4 * sr:\n pad = np.zeros((4 * sr - audio.shape[0]))\n y_fix = np.append(audio, pad)\n else:\n y_fix = audio[0:int(4 * sr)]\n s = librosa.feature.melspectrogram(y=y_fix, sr=sr, n_fft=1024, hop_length=1024, power=2)\n s = librosa.power_to_db(s, ref=np.max)\n s = s[:, 0:128]\n spectrograms.append(s)\n return spectrograms",
"def get_spectrogram_data(frame_rate, np_frames):\n # Set format details for plot.\n #fig = plt.figure(num=None, figsize=(12, 7.5), dpi=300)\n #ax = fig.add_subplot(111)\n #ax.xaxis.set_major_locator(ticker.MultipleLocator(1))\n #ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.1))\n #ax.yaxis.set_major_locator(ticker.MultipleLocator(2000))\n #ax.yaxis.set_minor_locator(ticker.MultipleLocator(500))\n #ax.tick_params(axis='both', direction='inout')\n #plt.title(f\"Spectrogram of:\\n{input_file}\")\n plt.title(f\"Spectrogram\")\n plt.xlabel('Time (seconds)')\n plt.ylabel('Frequency (Hz)')\n\n # If NFFT is too high, then there the horizontal (frequency) resolution is\n # too fine, and there are multiple bands for each formant. However, if\n # NFFT is too low, then the whole image is rather blurry and even the\n # formants are not well differentiated (i.e. at the default vaules for NFFT\n # and noverlap). noverlap that is half of NFFT seems to minimize background\n # noise, as well.\n noverlap = 128 # default: 128; other: 256\n NFFT = 256 # default: 256; other: 512\n\n # Create the plot.\n spectrum, frequencies, times, img = plt.specgram(\n np_frames,\n Fs=frame_rate,\n cmap='gnuplot',\n noverlap=noverlap,\n NFFT=NFFT,\n )\n return spectrum, frequencies, times, img",
"def analyse_multiple_audio_files(context, source_path, dest_path):\n context.obj[\"dest_path\"] = dest_path\n for file in os.listdir(source_path):\n file_path = os.path.join(file)\n context.invoke(\n generate_spectrograms,\n source_path=os.path.join(source_path, file_path),\n dest_path=dest_path,\n )",
"def wav_to_PCA(infolder='../speech_corpora/', outfile='../Data/processedspeech12.npy', \n pcafilename = '../Data/spectropca12.pickle', testfile = 'test12.npy', ncomponents = 200, whiten = True, maxspectros=100000):\n infilelist = []\n for pth, subd, files in os.walk(infolder):\n for fname in files:\n fstring = os.path.join(pth,fname)\n if fstring.lower().endswith('.wav'):\n infilelist.append(fstring)\n # infilelist = listdir(infolder)\n \n allspectros = [] # don't know length in advance, use list for flexible append. there's probably a faster way\n for infilename in infilelist:\n logflogpsd = wav_to_logPSD(infilename)\n \n nchunks = int((logflogpsd.shape[0] - ntimepoints)*(stride/logflogpsd.shape[0]))\n for chunk in range(nchunks):\n # convert each chunk to a vector and store. throw out any chunk with average power below cutoff\n start = chunk*stride #ntimepoints*chunk\n finish = chunk*stride + ntimepoints#ntimepoints*(chunk+1)\n temp = logflogpsd[start:finish,:]\n if np.mean(10**temp) > cutoff/nfreqs:\n allspectros.append(temp.flatten())\n if len(allspectros) > maxspectros:\n break\n allspectros = np.array(allspectros)\n \n # regularize, normalize spectrograms\n allspectros = np.nan_to_num(allspectros)\n allspectros = np.clip(allspectros,-1000,1000)\n# datamean = np.mean(allspectros, axis=0)\n# allspectros = allspectros - datamean\n# datastd = np.std(allspectros, axis=0)\n# allspectros = allspectros/datastd\n allspectros = allspectros - allspectros.mean(axis=1)[:,np.newaxis]\n #this is just for compatibility with other code\n datamean = 0\n datastd = 1\n\n # do PCA\n pca = PCA(dim=ncomponents, whiten=whiten)\n print (\"Fitting the PCA...\")\n pca.fit(allspectros)\n print (\"Done. Transforming and saving vectors...\")\n reduced = pca.transform(allspectros)\n \n np.save(outfile, reduced) \n with open(pcafilename, 'wb') as f:\n pickle.dump([pca, (ntimepoints, nfreqs), datamean, datastd], f) \n print (\"Done.\")\n\n # save a file with 9 example spectrograms and their reconstructions\n comparison = allspectros[:9,:]\n recons = pca.inverse_transform(reduced[:9,:])\n np.save(testfile, np.concatenate((comparison, recons),axis=0))\n \n return reduced, pca, (ntimepoints, nfreqs), datamean, datastd",
"def process_and_serialize(data_type):\n stride = 0.5\n\n if data_type == 'train':\n clean_folder = clean_train_folder\n noisy_folder = noisy_train_folder\n clean_save_folder = signal_train_clean_folder\n noisy_save_folder = signal_train_noisy_folder\n\n else:\n clean_folder = clean_test_folder\n noisy_folder = noisy_test_folder\n clean_save_folder = signal_test_clean_folder\n noisy_save_folder = signal_test_noisy_folder\n\n # walk through the path, slice the audio file, and save the serialized result\n for root, dirs, files in os.walk(clean_folder):\n if len(files) == 0:\n continue\n for filename in tqdm(files, desc='Serialize and down-sample {} audios'.format(data_type)):\n clean_file = os.path.join(clean_folder, filename)\n noisy_file = os.path.join(noisy_folder, filename)\n # slice both clean signal and noisy signal\n clean_sliced = slice_signal(clean_file, window_size, stride, sample_rate)\n noisy_sliced = slice_signal(noisy_file, window_size, stride, sample_rate)\n # serialize - file format goes [original_file]_[slice_number].npy\n # ex) p293_154.wav_5.npy denotes 5th slice of p293_154.wav file\n for idx, slice_tuple in enumerate(zip(clean_sliced, noisy_sliced)):\n clean_empha = emphasis(slice_tuple[0], pre=True)\n noisy_empha = emphasis(slice_tuple[1], pre=True)\n # save the signal\n librosa.output.write_wav(os.path.join(clean_save_folder, '{}_{}.wav'.format(filename.replace('.wav', ''), idx)), clean_empha, sr=16000)\n librosa.output.write_wav(os.path.join(noisy_save_folder, '{}_{}.wav'.format(filename.replace('.wav', ''), idx)), noisy_empha, sr=16000)",
"def spectrogram(samples):\n S, freqs, times = mlab.specgram(samples, NFFT=4096, Fs=44100,\n window=mlab.window_hanning,\n noverlap=(4096 // 2))\n return S, freqs, times",
"def imitation_spectrogram(path, augmentations: audaugio.ChainBase):\n try:\n y, sr = librosa.load(path, sr=16000)\n except audioop.error as e:\n logger = logging.getLogger('logger')\n logger.warning(\"Could not load {0}\\n{1}\".format(path, e))\n return None\n\n augmented_audio = augmentations(y, sr)\n\n spectrograms = []\n for audio in augmented_audio:\n # zero-padding\n if audio.shape[0] < 4 * sr:\n pad = np.zeros((4 * sr - audio.shape[0]))\n y_fix = np.append(audio, pad)\n else:\n y_fix = audio[0:int(4 * sr)]\n s = librosa.feature.melspectrogram(y=y_fix, sr=sr, n_fft=133,\n hop_length=133, power=2, n_mels=39,\n fmin=0.0, fmax=5000)\n s = s[:, :482]\n s = librosa.power_to_db(s, ref=np.max)\n spectrograms.append(s)\n return spectrograms",
"def save_spectrogram_tisv():\n\n print(\"start text independent utterance feature extraction\")\n assert hp.data.nmels == hp.data.nmfccs, 'nmels must be equal to nmfccs'\n\n # make folder to save train and test files\n if os.path.exists(hp.data.train_path):\n print('Folder [%s] exists, delete it ...' % hp.data.train_path)\n shutil.rmtree(hp.data.train_path)\n os.makedirs(hp.data.train_path)\n if os.path.exists(hp.data.test_path):\n print('Folder [%s] exists, delete it ...' % hp.data.test_path)\n shutil.rmtree(hp.data.test_path)\n os.makedirs(hp.data.test_path)\n\n # downloaded dataset path\n audio_path = glob.glob(os.path.dirname(hp.unprocessed_data))\n total_speaker_num = len(audio_path)\n train_speaker_num= (total_speaker_num//10)*9 # split total data 90% train and 10% test\n print(\"total speaker number : %d, train speaker number: %d, test speaker number: %d\" \n % (total_speaker_num, train_speaker_num, total_speaker_num-train_speaker_num))\n\n # lower bound of utterance length\n sliding_window_length = int((hp.data.tisv_frame-1) * hp.data.hop * hp.data.sr)\n sliding_stride = int(sliding_window_length//2)\n win_length=int(hp.data.window * hp.data.sr)\n hop_length=int(hp.data.hop * hp.data.sr)\n print(\"utterrance sliding window length: %d, sliding stride: %d, mel window length: %d, mel hop length: %d\" \n % (sliding_window_length, sliding_stride, win_length, hop_length))\n\n min_train_utterances_num, min_test_utterances_num = 999, 999\n for i, folder in enumerate(audio_path):\n print(\"%d_th speaker under path [%s] processing ...\" % (i, folder))\n utterances_spec = []\n for utter_name in os.listdir(folder):\n if utter_name[-4:] == '.WAV':\n # path of each utterance\n utter_path = os.path.join(folder, utter_name)\n # load utterance audio\n utter, sr = librosa.core.load(utter_path, hp.data.sr)\n # trim silent edges\n utter, _ = librosa.effects.trim(utter, top_db=30)\n\n # padding iff the length is not enough but longer than sliding_window_length * 2 / 3\n if len(utter) < sliding_window_length:\n if int(2*sliding_window_length//3) < len(utter):\n print('\\tutterance [%s], length: %d, does not have enough non-slience audio. Padding it ...' \n % (utter_name, len(utter)))\n utter = librosa.util.pad_center(utter, sliding_window_length)\n else:\n print('\\tutterance [%s], length: %d, is too short. skip it ...' \n % (utter_name, len(utter)))\n continue\n\n stop = False\n for start in range(0, len(utter), sliding_stride):\n end = start+sliding_window_length\n if end >= len(utter):\n start = len(utter)-sliding_window_length\n end = len(utter)\n stop = True\n\n # cal mel spec and mfcc features\n utter_part = utter[start:start+sliding_window_length]\n mel_spec, mfccs = cal_melspectrogram_mfcc(utter_part)\n \n # normalize features\n # norm_mel_spec = mel_spec/np.linalg.norm(mel_spec)\n # norm_mfccs = mfccs/np.linalg.norm(mfccs)\n\n if hp.model.input_size == 2:\n features = np.array([mel_spec, mfccs])\n else:\n features = np.array([mel_spec])\n\n utterances_spec.append(features)\n\n if stop:\n break\n\n utterances_spec = np.array(utterances_spec)\n\n if utterances_spec.shape[0] < hp.train.M:\n print('There are only %d utterances under path: %s, skip it' % (utterances_spec.shape[0], folder))\n continue\n\n # save spectrogram as numpy file \n if i<train_speaker_num:\n min_train_utterances_num = min_train_utterances_num if utterances_spec.shape[0] > min_train_utterances_num else utterances_spec.shape[0]\n np.save(os.path.join(hp.data.train_path, \"speaker%03d.npy\"%i), utterances_spec)\n else:\n min_test_utterances_num = min_test_utterances_num if utterances_spec.shape[0] > min_test_utterances_num else utterances_spec.shape[0]\n np.save(os.path.join(hp.data.test_path, \"speaker%03d.npy\"%(i-train_speaker_num)), utterances_spec)\n\n print('\\t%d partial utterances were extracted from original files ...' % utterances_spec.shape[0])\n\n print('min_train_utterances_num:', min_train_utterances_num)\n print('min_test_utterances_num:', min_test_utterances_num)",
"def get_spectral_values(saveFileName=csv_save, audioDirectory=data_directory):\r\n us8k = 'air_conditioner,car_horn,children_playing,dog_bark,drilling,' \\\r\n 'engine_idling,gun_shot,jackhammer,siren,street_music'.split(sep=\",\")\r\n\r\n # Create a header for the CSV file\r\n header = 'filename chroma_stft rmse spectral_centroid spectral_bandwidth rolloff zero_crossing_rate'\r\n for i in range(1, 21):\r\n header += f' mfcc{i}'\r\n header += ' label'\r\n header = header.split()\r\n print(header)\r\n\r\n # Save Spectral feature values to a CSV file\r\n on_file = 0\r\n file = open(saveFileName, 'w', newline='')\r\n with file:\r\n writer = csv.writer(file)\r\n writer.writerow(header)\r\n for i in range(1, 11):\r\n for filename in os.listdir(f'{audioDirectory}/fold{i}'):\r\n clip = f'{audioDirectory}/fold{i}/{filename}'\r\n if clip[-3:] == \"wav\":\r\n on_file = on_file + 1\r\n print(f'On File: {on_file}')\r\n y, sr = librosa.load(clip, mono=True)\r\n rms = librosa.feature.rms(y=y)\r\n chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)\r\n spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)\r\n spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)\r\n rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)\r\n zcr = librosa.feature.zero_crossing_rate(y)\r\n mfcc = librosa.feature.mfcc(y=y, sr=sr)\r\n to_append = f'{filename} {np.mean(chroma_stft)} {np.mean(rms)} {np.mean(spec_cent)} {np.mean(spec_bw)} {np.mean(rolloff)} {np.mean(zcr)}'\r\n for e in mfcc:\r\n to_append += f' {np.mean(e)}'\r\n to_append += f' {us8k[int(filename.split(sep=\"-\")[1])]}'\r\n file = open(saveFileName, 'a', newline='')\r\n with file:\r\n writer = csv.writer(file)\r\n writer.writerow(to_append.split())",
"def mp3_to_spectrogram(file):\r\n y, sr = librosa.load(file, mono=False)\r\n mspec = librosa.feature.melspectrogram(y=y, sr=sr)\r\n temp_n_mels, temp_t_frames = mspec.shape\r\n\r\n if temp_n_mels > get_n_mels():\r\n set_n_mels(temp_n_mels)\r\n\r\n if temp_t_frames > get_t_frames():\r\n set_t_frames(temp_t_frames)\r\n\r\n return mspec\r\n # return mspec[0]\r",
"def create_spectogram1(track_id):\n filename = get_path(track_id)\n y, sr = librosa.load(filename)\n spectrogram = librosa.feature.melspectrogram(y = y, sr = sr, n_fft = 2048, hop_length = 1024)\n spectrogram = librosa.power_to_db(spectrogram, ref = np.max)\n return spectrogram[:, 473:601]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a dictionary to compare against a funding source.
|
def get_funding_source_verify(self, funding_source):
verify = funding_source.__dict__['json_response']
del verify['created_time']
del verify['last_modified_time']
del verify['date_sent_for_verification']
return verify
|
[
"def prevalence_G_dict(self):\n ret = {}\n for finding in self.findings:\n if(self.isCountry(finding[0])):\n ret[finding[0]] = finding[1]\n return ret",
"def get_base_sourcedict(payload, sample, name):\n sourcedict = {}\n sourcedict[\"_event\"] = 1 if is_event_sample(sample) else 0\n sourcedict[\"_compound\"] = 1 if is_compound_sample(sample) else 0\n sourcedict[\"project_id\"] = sample[\"project_id\"]\n sourcedict[\"resource_id\"] = sample[\"resource_id\"]\n sourcedict[\"metric_name\"] = name\n sourcedict[\"metric_unit\"] = sample[\"unit\"]\n sourcedict[\"metric_type\"] = sample[\"type\"]\n sourcedict[\"display_name\"] = sample[\"resource_metadata\"].get(\"display_name\", \"\")\n\n\n if sample[\"type\"] == \"cumulative\":\n sourcedict[\"_counter\"] = 1\n\n if type(payload) == float:\n sourcedict[\"_float\"] = 1\n\n for k, v in sourcedict.items():\n sourcedict[k] = sanitize(str(v))\n\n return sourcedict",
"def funding_source(self, funding_source):\n\n self._funding_source = funding_source",
"def findForKeys(template, source):\n global foreign_key1, foreign_key2, pointless_fields\n if foreign_key1 is not None and foreign_key2 is not None:\n return foreign_key1[0], foreign_key2[0]\n foreign_key1 = []\n foreign_key2 = []\n for k1 in template.keys():\n for k2 in source.keys():\n if k1.lower() == k2.lower():\n foreign_key1.append(k1)\n foreign_key2.append(k2)\n if len(foreign_key1) == 0:\n print ('\\nNo perfect match in keys. Try to get partly match...')\n for k1 in template.keys():\n try:\n iso8601.parse_date(template[k1])\n pointless_fields.append(k1)\n except BaseException:\n pass\n try:\n float(template[k1])\n pointless_fields.append(k1)\n except BaseException:\n pass\t\t\t\t\n for k2 in source.keys():\n try:\n iso8601.parse_date(source[k2])\n pointless_fields.append(k2)\n except BaseException:\n pass\n try:\n float(source[k2])\n pointless_fields.append(k2)\n except BaseException:\n pass\n if (k1.lower() in k2.lower()) or (k2.lower() in k1.lower()):\n foreign_key1.append(k1)\n foreign_key2.append(k2)\n if len(foreign_key1) == 0:\n print ('\\nNo matches found. Key reference has to be manually handeled')\n else:\n print ('\\ntemplate key(s): {}\\nsource key(s): {}\\nfound as matching and set as foreign key'\n .format(foreign_key1[0], foreign_key2[0]))\n foreign_key = foreign_key1\n return (foreign_key1[0], foreign_key2[0])",
"def get_sources(snapcraft_file, sources=None):\n if sources == None:\n sources = {}\n\n for line in snapcraft_file.split(\"\\n\"):\n line = line.strip()\n if line.startswith(\"source:\"):\n src_type = _get_source_type_from_uri(line[7:])\n\n if src_type in sources:\n sources[src_type] += 1\n else:\n sources[src_type] = 1\n return sources",
"def download_fixity_checker(resource_dict):\n fixity_obj = {\n 'hash_algorithm': None,\n 'source_hash': None,\n 'presqt_hash': None,\n 'fixity': None,\n 'fixity_details': None,\n 'title': resource_dict['title'],\n 'path': resource_dict['path']\n }\n fixity_match = True\n\n for hash_algorithm, hash_value in resource_dict['hashes'].items():\n # If the current hash_value is not None and the hash algorithm is supported by hashlib\n # then this is the hash we will run our fixity checker against.\n if hash_value and hash_algorithm in hashlib.algorithms_available:\n # Run the file through the hash algorithm\n hash_hex = hash_generator(resource_dict['file'], hash_algorithm)\n\n fixity_obj['hash_algorithm'] = hash_algorithm\n fixity_obj['presqt_hash'] = hash_hex\n fixity_obj['source_hash'] = hash_value\n\n # Compare the given hash with the calculated hash.\n if hash_hex == hash_value:\n fixity_obj['fixity'] = True\n fixity_obj['fixity_details'] = 'Source Hash and PresQT Calculated hash matched.'\n else:\n fixity_obj['fixity'] = False\n fixity_obj['fixity_details'] = (\n 'Source Hash and PresQT Calculated hash do not match.')\n fixity_match = False\n break\n else:\n # If either there is no matching algorithms in hashlib or the provided hashes\n # don't have values then we assume fixity has remained and we calculate a new hash\n # using md5 to give to the user.\n h = hashlib.md5(resource_dict['file'])\n hash_hex = h.hexdigest()\n fixity_obj['hash_algorithm'] = 'md5'\n fixity_obj['presqt_hash'] = hash_hex\n fixity_obj['fixity_details'] = (\n 'Either a Source Hash was not provided or the source hash algorithm is not supported.')\n fixity_match = False\n\n return fixity_obj, fixity_match",
"def get_source_api(self):\n file_path = DIR_TOKENS + FILE_SOURCE_API\n try:\n lines = open(file_path).readlines()\n self.log.debug('[get_source_api]')\n self.log.debug('Path: ' + file_path)\n self.log.debug('Source API: ' + lines[0].strip())\n\n return {'source_api' : lines[0].strip()}\n except: \n self.log.info(\"Source API NOT found, path: %s\" % file_path)\n return {}",
"def get_producer_price_dict(country, from_year, to_year, item_name):\n r = {}\n\n with open('data/faostat_prices_2020_6_7.csv', 'r', encoding='utf-8') as f:\n for item in csv.DictReader(f):\n if item['Area'].lower() != country.lower():\n continue\n elif not (from_year <= int(item['Year']) <= to_year):\n continue\n elif item['Item'].lower() != item_name.lower():\n continue\n\n r[int(item['Year'])] = float(item['Value'])\n\n return r",
"def source_state(self):\n\n if self._source_state is None or self.expired:\n print \"refreshing source state of %s\" % self.project\n states = {}\n for package in self._obs.getPackageList(self.project):\n if package == '_pattern':\n continue\n\n states[package] = False\n try:\n filelist = self._obs.getPackageFileList(self.project, package)\n print filelist\n if \"_service\" in filelist:\n x = self._obs.getServiceState(self.project, package)\n print x\n if x == \"succeeded\":\n states[package] = True\n else:\n states[package] = True\n except Exception, exc:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exc(file=sys.stdout)\n print exc\n if \"failed\" in str(exc):\n states[package] = True\n\n self._source_state = states\n\n return self._source_state",
"def is_relevant_sourcekey(self, sourcekey: str) -> bool:\n ...",
"def gen_deb(deblist):\n sourcelist = {}\n\n for i in deblist:\n checkdeb = Package(i)\n if checkdeb.source in sourcelist.keys():\n # if the source changelogpath is not set and this deb has a\n # changelog, try to set it\n if sourcelist[checkdeb.source].changelogpath == '':\n sourcelist[checkdeb.source]._update_details(checkdeb)\n\n if checkdeb.name in sourcelist[checkdeb.source].debs.keys():\n # this is an update version of source\n if compare_version(checkdeb.version, sourcelist[checkdeb.source].version):\n sourcelist[checkdeb.source].oldversion = sourcelist[\n checkdeb.source].version\n sourcelist[checkdeb.source].version = checkdeb.version\n\n sourcelist[checkdeb.source].debpath = checkdeb.path\n else:\n # the same deb name of different arch(such as amd64 and\n # i386)\n if (checkdeb.version == sourcelist[checkdeb.source].version) and (sourcelist[checkdeb.source].debs[checkdeb.name].find(checkdeb.arch) == -1):\n sourcelist[checkdeb.source].debs[\n checkdeb.name] += \" \" + checkdeb.arch\n # this is an old version of this source\n else:\n sourcelist[\n checkdeb.source].oldversion = checkdeb.version\n\n else:\n # this is a new deb of this source ,add it to deb list\n sourcelist[checkdeb.source].debs[checkdeb.name] = checkdeb.arch\n\n # try to extract the smallest deb of every source\n # package.installsize and source.size are string, not number\n if (int(checkdeb.installsize) < int(sourcelist[checkdeb.source].size)):\n sourcelist[checkdeb.source]._update_details(checkdeb)\n\n else:\n # add a new source\n newsource = Source(\n checkdeb.source,\n checkdeb.name,\n checkdeb.arch,\n checkdeb.version, )\n newsource._update_details(checkdeb)\n\n sourcelist[newsource.name] = newsource\n\n return sourcelist",
"def get_source_codes():\n source_codes = dataCache.get(\"source_codes\")\n\n if source_codes == None:\n source_codes = {}\n for source in Source.objects.all():\n source_codes[source.code.upper()] = source.id\n dataCache.set(\"source_codes\", source_codes)\n\n return source_codes",
"def get_source():",
"def build_dict(cls, source, keys):\n return {attr: source.get(attr, None) for attr in keys}",
"def get_object_if_value_eq_on_key(source: list,\n key: str,\n value: Any) -> dict:\n\n for item in source:\n if source[item][key] == value:\n return source[item]\n\n return None",
"def funding(self):\n funds = self._json['item'].get('xocs:meta', {}).get('xocs:funding-list', {}).get('xocs:funding', [])\n if len(funds) == 0:\n return None\n if not isinstance(funds, list):\n funds = [funds]\n out = []\n fund = namedtuple('Funding', 'agency string id acronym country')\n for item in funds:\n new = fund(agency=item.get('xocs:funding-agency'),\n string=item.get('xocs:funding-agency-matched-string'),\n id=item.get('xocs:funding-agency-id'),\n acronym=item.get('xocs:funding-agency-acronym'),\n country=item.get('xocs:funding-agency-country'))\n out.append(new)\n return out",
"def match(cls, source: Source) -> Any:\n raise NotImplementedError()",
"def to_dict(self):\n dict_out = self.__dict__\n dict_out['kind'] = 'WLSource'\n return dict_out",
"def build_destination_dictionary(self):\n\n destinations = {}\n\n raw_destinations = [\n (\"Western Governors University\", \"4001 South 700 East\", \"87104\",\n (0.0, 7.2, 3.8, 11.0, 2.2, 3.5, 10.9, 8.6, 7.6, 2.8, 6.4, 3.2, 7.6,\n 5.2, 4.4, 3.7, 7.6, 2.0, 3.6, 6.5, 1.9, 3.4, 2.4, 6.4, 2.4, 5.0, 3.6)\n ),\n (\"International Peace Gardens\", \"1060 Dalton Ave S\", \"84106\",\n (7.2, 0.0, 7.1, 6.4, 6.0, 4.8, 1.6, 2.8, 4.8, 1.6, 2.8, 4.8, 6.3, 7.3, 5.3,\n 4.8, 3.0, 4.6, 4.5, 7.4, 6.0, 5.0, 4.8, 9.5, 10.9, 8.3, 6.9, 10.0, 4.4, 13.0)\n ),\n (\"Sugar House Park\", \"1330 2100 S\", \"84123\",\n (3.8, 7.1, 0.0, 9.2, 4.4, 2.8, 8.6, 6.3, 5.3, 1.6, 10.4, 3.0, 5.3, 6.5,\n 5.6, 5.8, 5.7, 4.1, 3.6, 4.3, 3.3, 5.0, 6.1, 9.7, 6.1, 2.8, 7.4)\n ),\n (\"Taylorsville-Bennion Heritage City Gov Off\", \"1488 4800 S\", \"84123\",\n (11.0, 6.4, 9.2, 0.0, 5.6, 6.9, 8.6, 4.0, 11.1, 7.3, 1.0, 6.4, 11.1, 3.9,\n 4.3, 4.4, 7.2, 5.3, 6.0, 10.6, 5.9, 7.4, 4.7, 0.6, 6.4, 10.1, 10.1)\n ),\n (\"Salt Lake City Division of Health\", \"177 W Price Ave\", \"84115\",\n (2.2, 6.0, 4.4, 5.6, 0.0, 1.9, 7.9, 5.1, 7.5, 2.6, 6.5, 1.5, 7.5, 3.2,\n 2.4, 2.7, 1.4, 0.5, 1.7, 6.5, 3.2, 5.2, 2.5, 6.0, 4.2, 5.4, 5.5)\n ),\n (\"South Salt Lake Public Works\", \"195 W Oakland Ave\", \"84115\",\n (3.5, 4.8, 2.8, 6.9, 1.9, 0.0, 6.3, 4.3, 4.5, 1.5, 8.7, 0.8, 4.5, 3.9,\n 3.0, 3.8, 5.7, 1.9, 1.1, 3.5, 4.9, 6.9, 4.2, 9.0, 5.9, 3.5, 7.2)\n ),\n (\"Salt Lake City Streets and Sanitation\", \"2010 W 500 S\", \"84104\",\n (10.9, 1.6, 8.6, 8.6, 7.9, 6.3, 0.0, 4.0, 4.2, 8.0, 8.6, 6.9, 4.2, 4.2,\n 8.0, 5.8, 7.2, 7.7, 6.6, 3.2, 11.2, 12.7, 10.0, 8.2, 11.7, 5.1, 14.2)\n ),\n (\"Deker Lake\", \"2300 Parkway Blvd\", \"84119\",\n (8.6, 2.8, 6.3, 4.0, 5.1, 4.3, 4.0, 0.0, 7.7, 9.3, 4.6, 4.8, 7.7, 1.6,\n 3.3, 3.4, 3.1, 5.1, 4.6, 6.7, 8.1, 10.4, 7.8, 4.2, 9.5, 6.2, 10.7)\n ),\n (\"Salt Lake City Ottinger Hall\", \"233 Canyon Rd\", \"84103\",\n (7.6, 4.8, 5.3, 11.1, 7.5, 4.5, 4.2, 7.7, 0.0, 4.8, 11.9, 4.7, 0.6, 7.6,\n 7.8, 6.6, 7.2, 5.9, 5.4, 1.0, 8.5, 10.3, 7.8, 11.5, 9.5, 2.8, 14.1)\n ),\n (\"Columbus Library\", \"2530 S 500 E\", \"84106\",\n (2.8, 6.3, 1.6, 7.3, 2.6, 1.5, 8.0, 9.3, 4.8, 0.0, 9.4, 1.1, 5.1, 4.6,\n 3.7, 4.0, 6.7, 2.3, 1.8, 4.1, 3.8, 5.8, 4.3, 7.8, 4.8, 3.2, 6.0)\n ),\n (\"Taylorsville City Hall\", \"2600 Taylorsville Blvd\", \"84118\",\n (6.4, 7.3, 10.4, 1.0, 6.5, 8.7, 8.6, 4.6, 11.9, 9.4, 0.0, 7.3, 12.0, 4.9,\n 5.2, 5.4, 8.1, 6.2, 6.9, 11.5, 6.9, 8.3, 4.1, 0.4, 4.9, 11.0, 6.8)\n ),\n (\"South Salt Lake Police\", \"2835 Main St\", \"84115\",\n (3.2, 5.3, 3.0, 6.4, 1.5, 0.8, 6.9, 4.8, 4.7, 1.1, 7.3, 0.0, 4.7, 3.5,\n 2.6, 2.9, 6.3, 1.2, 1.0, 3.7, 4.1, 6.2, 3.4, 6.9, 5.2, 3.7, 6.4)\n ),\n (\"Council Hall\", \"300 State St\", \"84103\",\n (7.6, 4.8, 5.3, 11.1, 7.5, 4.5, 4.2, 7.7, 0.6, 5.1, 12.0, 4.7, 0.0, 7.3,\n 7.8, 6.6, 7.2, 5.9, 5.4, 1.0, 8.5, 10.3, 7.8, 11.5, 9.5, 2.8, 14.1)\n ),\n (\"Redwood Park\", \"3060 Lester St\", \"84119\",\n (5.2, 3.0, 6.5, 3.9, 3.2, 3.9, 4.2, 1.6, 7.6, 4.6, 4.9, 3.5, 7.3, 0.0,\n 1.3, 1.5, 4.0, 3.2, 3.0, 6.9, 6.2, 8.2, 5.5, 4.4, 7.2, 6.4, 10.5)\n ),\n (\"Salt Lake County Mental Health\", \"3148 S 1100 W\", \"84119\",\n (4.4, 4.6, 5.6, 4.3, 2.4, 3.0, 8.0, 3.3, 7.8, 3.7, 5.2, 2.6, 7.8, 1.3,\n 0.0, 0.6, 6.4, 2.4, 2.2, 6.8, 5.3, 7.4, 4.6, 4.8, 6.3, 6.5, 8.8)\n ),\n (\"Salt Lake County/United Police Dept\", \"3365 S 900 W\", \"84119\",\n (3.7, 4.5, 5.8, 4.4, 2.7, 3.8, 5.8, 3.4, 6.6, 4.0, 5.4, 2.9, 6.6, 1.5,\n 0.6, 0.0, 5.6, 1.6, 1.7, 6.4, 4.9, 6.9, 4.2, 5.6, 5.9, 5.7, 8.4)\n ),\n (\"West Valley Prosecutor\", \"3575 W Valley Central Station bus Loop\", \"84119\",\n (7.6, 7.4, 5.7, 7.2, 1.4, 5.7, 7.2, 3.1, 7.2, 6.7, 8.1, 6.3, 7.2, 4.0,\n 6.4, 5.6, 0.0, 7.1, 6.1, 7.2, 10.6, 12.0, 9.4, 7.5, 11.1, 6.2, 13.6)\n ),\n (\"Housing Auth. of Salt Lake County\", \"3595 Main St\", \"84115\",\n (2.0, 6.0, 4.1, 5.3, 0.5, 1.9, 7.7, 5.1, 5.9, 2.3, 6.2, 1.2, 5.9, 3.2,\n 2.4, 1.6, 7.1, 0.0, 1.6, 4.9, 3.0, 5.0, 2.3, 5.5, 4.0, 5.1, 5.2)\n ),\n (\"Utah DMV Administrative Office\", \"380 W 2880 S\", \"84115\",\n (3.6, 5.0, 3.6, 6.0, 1.7, 1.1, 6.6, 4.6, 5.4, 1.8, 6.9, 1.0, 5.4, 3.0,\n 2.2, 1.7, 6.1, 1.6, 0.0, 4.4, 4.6, 6.6, 3.9, 6.5, 5.6, 4.3, 6.9)\n ),\n (\"Third District Juvenile Court\", \"410 S State St\", \"84111\",\n (6.5, 4.8, 4.3, 10.6, 6.5, 3.5, 3.2, 6.7, 1.0, 4.1, 11.5, 3.7, 1.0, 6.9,\n 6.8, 6.4, 7.2, 4.9, 4.4, 0.0, 7.5, 9.3, 6.8, 11.4, 8.5, 1.8, 13.1)\n ),\n (\"Cottonwood Regional Softball Complex\", \"4300 S 1300 E\", \"84117\",\n (1.9, 9.5, 3.3, 5.9, 3.2, 4.9, 11.2, 8.1, 8.5, 3.8, 6.9, 4.1, 8.5, 6.2,\n 5.3, 4.9, 10.6, 3.0, 4.6, 7.5, 0.0, 2.0, 2.9, 6.4, 2.8, 6.0, 4.1)\n ),\n (\"Holiday City Office\", \"4580 S 2300 E\", \"84117\",\n (3.4, 10.9, 5.0, 7.4, 5.2, 6.9, 12.7, 10.4, 10.3, 5.8, 8.3, 6.2, 10.3,\n 8.2, 7.4, 6.9, 12.0, 5.0, 6.6, 9.3, 2.0, 0.0, 4.4, 7.9, 3.4, 7.9, 4.7)\n ),\n (\"Murray City Museum\", \"5025 State St\", \"84107\",\n (2.4, 8.3, 6.1, 4.7, 2.5, 4.2, 10.0, 7.8, 7.8, 4.3, 4.1, 3.4, 7.8, 5.5,\n 4.6, 4.2, 9.4, 2.3, 3.9, 6.8, 2.9, 4.4, 0.0, 4.5, 1.7, 6.8, 3.1)\n ),\n (\"Valley Regional Softball Complex\", \"5100 South 2700 West\", \"84118\",\n (6.4, 6.9, 9.7, 0.6, 6.0, 9.0, 8.2, 4.2, 11.5, 7.8, 0.4, 6.9, 11.5, 4.4,\n 4.8, 5.6, 7.5, 5.5, 6.5, 11.4, 6.4, 7.9, 4.5, 0.0, 5.4, 10.6, 7.8)\n ),\n (\"City Center of Rock Springs\", \"5383 South 900 East #104\", \"84117\",\n (2.4, 10.0, 6.1, 6.4, 4.2, 5.9, 11.7, 9.5, 9.5, 4.8, 4.9, 5.2, 9.5, 7.2,\n 6.3, 5.9, 11.1, 4.0, 5.6, 8.5, 2.8, 3.4, 1.7, 5.4, 0.0, 7.0, 1.3)\n ),\n (\"Rice Terrace Pavilion Park\", \"600 E 900 South\", \"84105\",\n (5.0, 4.4, 2.8, 10.1, 5.4, 3.5, 5.1, 6.2, 2.8, 3.2, 11.0, 3.7, 2.8, 6.4,\n 6.5, 5.7, 6.2, 5.1, 4.3, 1.8, 6.0, 7.9, 6.8, 10.6, 7.0, 0.0, 8.3)\n ),\n (\"Wheeler Historic Farm\", \"6351 South 900 East\", \"84121\",\n (3.6, 13.0, 7.4, 10.1, 5.5, 7.2, 14.2, 10.7, 14.1, 6.0, 6.8, 6.4, 14.1,\n 10.5, 8.8, 8.4, 13.6, 5.2, 6.9, 13.1, 4.1, 4.7, 3.1, 7.8, 1.3, 8.3, 0.0)\n )\n ]\n\n\n \"\"\"\n Take the list of tuples and turn them into a dictionary. \n The 'id_num' variable is the key for easier searching and sorting.\n \"\"\"\n id_num = 0\n for destination in raw_destinations:\n name, address, zipcode, dist_tup = destination\n d = Destination(name, address, zipcode, dist_tup)\n destinations[id_num] = d\n id_num += 1\n\n\n \"\"\"\n Add distance between current and each other destination.\n The integer 'i' corresponds to the position of the distance in each tuple \n and the \"ID\" number of each destination. \n \"\"\"\n for d in destinations.values():\n i = 0\n for k,v in destinations.items():\n d.add_distance(v.name, d.dist_tup[i])\n i += 1\n\n \"\"\"\n Sort the dictionary of distances. The name of the destination is the key for easier searching later\n \"\"\"\n for k,v in destinations.items():\n v.sorted_distances = {k: v for k, v in sorted(v.dist_dict.items(), key=lambda item: item[1])}\n\n self.destinations = destinations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies the ach account with the correct verification amounts.
|
def test_ach_save_success(self):
funding_source = FundingSources.get_user_ach_funding_source()
amounts = self.client.funding_sources.ach(
funding_source.token).verification_amounts()
ach_verification = {
"verify_amount1": amounts.verify_amount1,
"verify_amount2": amounts.verify_amount2
}
result = self.client.funding_sources.ach.save(
funding_source.token, ach_verification)
verify = self.get_funding_source_verify(funding_source)
verify['verification_status'] = 'ACH_VERIFIED'
verify['active'] = True
verify_ach_response_model(self, result, verify)
|
[
"def test_account_verified(self):\n user = User.objects.get()\n token, uid = RegistrationAPIView.send_account_activation_email(user=user, send_email=False)\n response = self.verify_account(token, uid)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n user = User.objects.get()\n self.assertTrue(user.is_verified)",
"def verify(self, sender_key):\n\n try:\n contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)\n del contract_dict[\"buyer_order\"]\n contract_hash = digest(json.dumps(contract_dict, indent=4))\n\n ref_hash = unhexlify(self.contract[\"buyer_order\"][\"order\"][\"ref_hash\"])\n\n # verify that the reference hash matches the contract and that the contract actually exists\n if contract_hash != ref_hash or not self.db.HashMap().get_file(ref_hash):\n raise Exception(\"Order for contract that doesn't exist\")\n\n # verify the signature on the order\n verify_key = nacl.signing.VerifyKey(sender_key)\n verify_key.verify(json.dumps(self.contract[\"buyer_order\"][\"order\"], indent=4),\n unhexlify(self.contract[\"buyer_order\"][\"signature\"]))\n\n # verify buyer included the correct bitcoin amount for payment\n price_json = self.contract[\"vendor_offer\"][\"listing\"][\"item\"][\"price_per_unit\"]\n if \"bitcoin\" in price_json:\n asking_price = price_json[\"bitcoin\"]\n else:\n currency_code = price_json[\"fiat\"][\"currency_code\"]\n fiat_price = price_json[\"fiat\"][\"price\"]\n request = Request('https://api.bitcoinaverage.com/ticker/' + currency_code.upper() + '/last')\n response = urlopen(request)\n conversion_rate = response.read()\n asking_price = float(\"{0:.8f}\".format(float(fiat_price) / float(conversion_rate)))\n if asking_price > self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"amount\"]:\n raise Exception(\"Insuffient Payment\")\n\n # verify a valid moderator was selected\n # TODO: handle direct payments\n valid_mod = False\n for mod in self.contract[\"vendor_offer\"][\"listing\"][\"moderators\"]:\n if mod[\"guid\"] == self.contract[\"buyer_order\"][\"order\"][\"moderator\"]:\n valid_mod = True\n if not valid_mod:\n raise Exception(\"Invalid moderator\")\n\n # verify all the shipping fields exist\n if self.contract[\"vendor_offer\"][\"listing\"][\"metadata\"][\"category\"] == \"physical good\":\n shipping = self.contract[\"buyer_order\"][\"order\"][\"shipping\"]\n keys = [\"ship_to\", \"address\", \"postal_code\", \"city\", \"state\", \"country\"]\n for value in map(shipping.get, keys):\n if value is None:\n raise Exception(\"Missing shipping field\")\n\n # verify buyer ID\n pubkeys = self.contract[\"buyer_order\"][\"order\"][\"id\"][\"pubkeys\"]\n keys = [\"guid\", \"bitcoin\", \"encryption\"]\n for value in map(pubkeys.get, keys):\n if value is None:\n raise Exception(\"Missing pubkey field\")\n\n # verify redeem script\n chaincode = self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"chaincode\"]\n for mod in self.contract[\"vendor_offer\"][\"listing\"][\"moderators\"]:\n if mod[\"guid\"] == self.contract[\"buyer_order\"][\"order\"][\"moderator\"]:\n masterkey_m = mod[\"pubkeys\"][\"bitcoin\"][\"key\"]\n\n masterkey_v = bitcoin.bip32_extract_key(self.keychain.bitcoin_master_pubkey)\n masterkey_b = self.contract[\"buyer_order\"][\"order\"][\"id\"][\"pubkeys\"][\"bitcoin\"]\n buyer_key = derive_childkey(masterkey_b, chaincode)\n vendor_key = derive_childkey(masterkey_v, chaincode)\n moderator_key = derive_childkey(masterkey_m, chaincode)\n\n redeem_script = '75' + bitcoin.mk_multisig_script([buyer_key, vendor_key, moderator_key], 2)\n if redeem_script != self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"redeem_script\"]:\n raise Exception(\"Invalid redeem script\")\n\n # verify the payment address\n if self.testnet:\n payment_address = bitcoin.p2sh_scriptaddr(redeem_script, 196)\n else:\n payment_address = bitcoin.p2sh_scriptaddr(redeem_script)\n if payment_address != self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"address\"]:\n raise Exception(\"Incorrect payment address\")\n\n return True\n\n except Exception:\n return False",
"def test_ach_save_fail(self):\n\n funding_source = FundingSources.get_user_ach_funding_source()\n\n amounts = self.client.funding_sources.ach(\n funding_source.token).verification_amounts()\n\n ach_verification = {\n \"verify_amount1\": amounts.verify_amount1 + 0.01,\n \"verify_amount2\": amounts.verify_amount2 + 0.01\n }\n\n with self.assertRaises(MarqetaError):\n self.client.funding_sources.ach.save(\n funding_source.token, ach_verification)",
"def test_credit_valid(self):\n assert 0 <= self.user.credit <= 1024**2 * 210",
"def verify_transaction(self, transaction):\n\t\tsender = Bee(transaction.sender, 0)\n\t\tsender.calculate_balance(self.chain, self.last_block().index + 1)\n\n\t\treturn sender.honeycomb >= int(transaction.amount)",
"def verify_problem_answer(self, answer: models.ProblemAnswer):",
"def verify(self, plain_text):",
"def test_account_active(self, my_charge_call, my_invoice_send):\n\t\t# make account active\n\t\tself.account.status = Account.ACTIVE_STATUS\n\t\tself.account.save()\n\t\tself.assertEqual(True, self.account.access_granted())\n\t\tself.account.__unicode__()\n\n\t\t# create invoice for 201206\n\t\tself.account.createInvoice(201206, 201207)\n\t\t# issue/2030 Django w/MySQL does not store milliseonds in datetime fields, \n\t\t# ensure transaction dates different than 2nd invoice by backing up 1 second\n\t\tfor at in AccountTransaction.objects.all():\n\t\t\tat.created_on -= datetime.timedelta(seconds=1)\n\t\t\tat.save()\n\t\t# check if charges for this month and invoice are created properly:\n\t\tself.assertTrue(AccountTransaction.objects.all().count() == 3) # two charges and one invoice\n\n\t\tinvoice_tx = AccountTransaction.objects.get(account=self.account, tx_type=\"0\", \n\t\t\tperiod_start=\"201206\", period_end=\"201207\")\n\t\tinvoice_tx.__unicode__()\n\t\tinvoice = Invoice.objects.get()\n\t\tinvoice.__unicode__()\n\t\t# invoice for 06-01 total\n\t\tself.assertEqual(Decimal(\"117.50\"), invoice_tx.amount)\n\t\tself.assertTrue(invoice.paid == False)\n\n\t\t# call create invoice again, it should NOT do it!\n\t\tself.account.createInvoice(201206, 201207)\n\n\t\t# check if charges for this month and invoice are created properly:\n\t\tself.assertTrue(AccountTransaction.objects.all().count() == 3) # two charges and one invoice\n\n\t\tinvoice_tx = AccountTransaction.objects.get(account=self.account, tx_type=\"0\", \n\t\t\tperiod_start=\"201206\", period_end=\"201207\")\n\t\tinvoice = Invoice.objects.get(accounttransaction=invoice_tx.id)\n\t\t# invoice for 06-01 total\n\t\tself.assertEqual(Decimal(\"117.50\"), invoice_tx.amount)\n\t\tself.assertTrue(invoice.paid == False)\n\n\t\t# now we going to mock actual charge to brain tree but test all db updates as \n\t\t# if charge went thru ok.\n\t\t# create local payment log\n\t\tpayment_log = PaymentLog.objects.create(user=self.officestaff.user, \n\t\t\tamount=Decimal(\"117.50\"), transaction_id=\"unittest\")\n\n\t\t# mock charge, its object returned by brain tree, point to payment log, has status\n\t\tcharge = Charge()\n\t\tcharge.amount = Decimal(\"117.50\")\n\t\tcharge.is_success = True\n\t\tcharge.payment_log = payment_log\n\n\t\t# mock messaging to user notifying of payment processed\n\t\tmy_invoice_send.return_value = True\n\t\tmy_charge_call.return_value = charge\n\n\t\t# actual charge of invoice (BT call mocked)\n\t\tself.account.chargeInvoice(201206)\n\n\t\t# check results\n\t\tself.assertTrue(AccountTransaction.objects.all().count() == 4) # two charges + invoice + payment\n\t\tpayment_tx = AccountTransaction.objects.get(account=self.account, \n\t\t\ttx_type=\"1\", period_start=\"201206\", period_end=\"201207\")\n\t\tself.assertEqual(Decimal(\"117.50\"), payment_tx.amount)\n\t\tinvoice = Invoice.objects.get(accounttransaction=invoice_tx.id)\n\t\tself.assertTrue(invoice.paid == True)\n\n\t\t# call charge invoice again, but since it is PAID, should not do anything.\n\t\tself.account.chargeInvoice(201206)\n\n\t\t# check results\n\t\tself.assertTrue(AccountTransaction.objects.all().count() == 4) # two charges + invoice + payment\n\t\t# second invoice cycle, just invoice, not need to mock payment again.\n\n\t\tself.account.createInvoice(201207, 201208)\n\n\t\t# check if charges for this month and invoice are created properly:\n\t\tself.assertTrue(AccountTransaction.objects.all().count() == 8)\n\t\t# 3 charges, 1 invoice, 4 tx for prev period\n\n\t\tinvoice_tx = AccountTransaction.objects.get(account=self.account, tx_type=\"0\", \n\t\t\tperiod_start=\"201207\", period_end=\"201208\")\n\t\tinvoice = Invoice.objects.get(accounttransaction=invoice_tx.id)\n\t\t# invoice for 07-01 total\n\t\tself.assertEqual(Decimal(\"143.70\"), invoice_tx.amount)\n\t\tself.assertTrue(invoice.paid == False)",
"def validate(self):\n total = sum([entry.amount for entry in self.entries.all()])\n if total != Decimal(0):\n raise TransactionBalanceException(\n \"Credits do not equal debits. Mis-match of %s.\" % total)\n return True",
"def credit(self, account):\n #stefan\n if self.account >= \"500\": # initialize self.account\n return True\n else:\n return False",
"def verify (cls, clear_password, hashed_password) :",
"def http_verify (ctx, uid, token) :\n\n username, success_url = yield db.handle_verify(user_id, token)\n \n success_url = utils.build_url(success_url,\n uid = user_id,\n username = username,\n )\n\n ctx.redirectTo(success_url)\n\n returnValue( (\"Account '%s' successfully activated\" % username) )",
"def verify_balance(self):\n total_created = 0\n total_consumed = 0\n\n for consumed_coin in self.consumed_coins:\n total_consumed += consumed_coin.value\n for created_coin in self.created_coins:\n total_created += created_coin.value\n\n return total_consumed == total_created",
"def test_check_balance():\n print('\\n', \"Checking wif balance\")\n call.nspv_login(wif_real)\n res = call.type_convert(call.nspv_listunspent())\n amount = res.get(\"balance\")\n if amount > 0.1:\n pass\n else:\n pytest.exit(\"Not enough balance, please use another wif\")",
"def verify_challenge(self,crypt):\r\n\t\ttry:\r\n\t\t\tself.server_cert.public_key().verify(\r\n\t\t\t\tcrypt,\r\n\t\t\t\tself.challenge_nonce,\r\n\t\t\t\tpd.PSS(\r\n\t\t\t\tmgf=pd.MGF1(hashes.SHA256()),\r\n\t\t\t\tsalt_length=pd.PSS.MAX_LENGTH),\r\n\t\t\t\thashes.SHA256()\r\n\t\t\t)\r\n\t\t\tlogger.info(\"Challenge OK\")\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\tlogger.error(\"Challenge wrong. Comms Compromised\")\r\n\t\t\treturn False",
"def account_verification(request):\n if request.method == 'POST':\n serializer = AccountVerificationSerializer(data=request)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = {'message': 'Account verification success'}\n return Response(data, status=status.HTTP_200_OK)",
"def verify_receipt_signature(self, receipt_update_retrieve_res):\n pass",
"def spv_main_verify_tx(spv):\n transactions = spv.transactions\n if transactions:\n i = random.randint(0, len(transactions) - 1)\n tx_hash = algo.hash1(transactions[i])\n tx_in_bc = spv.verify_transaction_proof(tx_hash)\n print(f\"SPV {spv.name} check {tx_hash} in blockchain: {tx_in_bc}\")",
"def test_ach_xor_check_number(self):\n main_transaction = Transaction.objects.create(account=self.account,\n balance_delta=25)\n entry = BankSpendingEntry(\n check_number=\"23\", ach_payment=True, memo='check AND ach',\n main_transaction=main_transaction, date=datetime.date.today())\n self.assertRaises(ValidationError, entry.save)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tries to verify the ach account with incorrect verification amounts.
|
def test_ach_save_fail(self):
funding_source = FundingSources.get_user_ach_funding_source()
amounts = self.client.funding_sources.ach(
funding_source.token).verification_amounts()
ach_verification = {
"verify_amount1": amounts.verify_amount1 + 0.01,
"verify_amount2": amounts.verify_amount2 + 0.01
}
with self.assertRaises(MarqetaError):
self.client.funding_sources.ach.save(
funding_source.token, ach_verification)
|
[
"def test_ach_save_success(self):\n\n funding_source = FundingSources.get_user_ach_funding_source()\n\n amounts = self.client.funding_sources.ach(\n funding_source.token).verification_amounts()\n\n ach_verification = {\n \"verify_amount1\": amounts.verify_amount1,\n \"verify_amount2\": amounts.verify_amount2\n }\n\n result = self.client.funding_sources.ach.save(\n funding_source.token, ach_verification)\n\n verify = self.get_funding_source_verify(funding_source)\n\n verify['verification_status'] = 'ACH_VERIFIED'\n verify['active'] = True\n\n verify_ach_response_model(self, result, verify)",
"def test_06_allow_forbid_negative_recharges(self):\n\tprint \"...starting test 2.09\"\n\tself.testHandler.handle_maxwell_request(\"voltage:0\")\n\tself.testHandler.handle_maxwell_request(\"phase_load:10\")\n\ttime.sleep(5)\n\tpower=float((self.testHandler.handle_network_request(\"get_active_power\", validity_level=\"medium\")).split(\" \")[0])\n print \"active power = \",power,\" watts\"\n\tself.testHandler.handle_network_request(\"set_credit_limit:-100\")\n\t#test for allow or forbid negative recharges\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,0.0)\n\tprint\"allowing negative credits\"\n\tself.testHandler.handle_network_request(\"allow_negative_credits\")\n\tself.testHandler.handle_network_request(\"recharge:100\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"recharge:-120\")\n\ttime.sleep(2)\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,-20)\n\tprint\"test for forbid negative recharge strarted\"\n\tprint\"clearing account\"\n\tself.testHandler.handle_network_request(\"clear_account\", validity_level=\"high\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert_equal(a,0.0)\n\tprint\"forbiding negative credits and recharging with 100\"\n\tself.testHandler.handle_network_request(\"forbid_negative_credits\")\n\tself.testHandler.handle_network_request(\"recharge:100\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tself.testHandler.handle_network_request(\"recharge:-120\")\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a\n\tassert(a!=-20.0)\n\tassert(a==100.0)\n\tprint'it should not accept the recharge and then print credits'\n\tacc_crdts=self.testHandler.handle_network_request(\"get_accurate_credits\", validity_level=\"medium\")\n\tacc_crdts= (acc_crdts.split(\"\\n\"))[3].split(\" \")[2]\n\ta=float(acc_crdts)\n\tprint \"accurate credits = \",a",
"def test_credit_valid(self):\n assert 0 <= self.user.credit <= 1024**2 * 210",
"def test_invalid_verification_link(self):\n user = User.objects.get()\n token, uid = RegistrationAPIView.send_account_activation_email(user=user, send_email=False)\n\n # create the uid from a different username\n uid = urlsafe_base64_encode(force_bytes(\"invalid_username\")).decode(\"utf-8\")\n\n response = self.verify_account(token, uid)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n user = User.objects.get()\n # Ensure the user is not verified\n self.assertFalse(user.is_verified)",
"def test_deposit_invalid_account(\n mock_check, client, acc1_usd_deposit_transaction_factory\n):\n del mock_check\n acc1_usd_deposit_transaction_factory()\n response = client.get(\n f\"/deposit?asset_code=USD&account=GBSH7WNSDU5FEIED2JQZIOQPZXREO3YNH2M5DIBE8L2X5OOAGZ7N2QI6\",\n follow=True,\n )\n content = json.loads(response.content)\n\n assert response.status_code == 400\n assert content == {\"error\": \"invalid 'account'\", \"status_code\": 400}",
"def test_deposit_confirm_incorrect_amount(client, acc1_usd_deposit_transaction_factory):\n deposit = acc1_usd_deposit_transaction_factory()\n incorrect_amount = deposit.amount_in + 1\n response = client.get(\n f\"/deposit/confirm_transaction?transaction_id={deposit.id}&amount={incorrect_amount}\",\n follow=True,\n )\n content = json.loads(response.content)\n assert response.status_code == 400\n assert content == {\n \"error\": \"incorrect 'amount' value for transaction with given 'transaction_id'\",\n \"status_code\": 400\n }",
"def test_check_balance():\n print('\\n', \"Checking wif balance\")\n call.nspv_login(wif_real)\n res = call.type_convert(call.nspv_listunspent())\n amount = res.get(\"balance\")\n if amount > 0.1:\n pass\n else:\n pytest.exit(\"Not enough balance, please use another wif\")",
"def test_ach_xor_check_number(self):\n main_transaction = Transaction.objects.create(account=self.account,\n balance_delta=25)\n entry = BankSpendingEntry(\n check_number=\"23\", ach_payment=True, memo='check AND ach',\n main_transaction=main_transaction, date=datetime.date.today())\n self.assertRaises(ValidationError, entry.save)",
"def validate(self):\n total = sum([entry.amount for entry in self.entries.all()])\n if total != Decimal(0):\n raise TransactionBalanceException(\n \"Credits do not equal debits. Mis-match of %s.\" % total)\n return True",
"def verify_problem_answer(self, answer: models.ProblemAnswer):",
"def fix_account(self, account):\n try:\n acc = self._get_account(account)\n except Exception:\n return False\n\n acc.value = 0",
"def test_validate_entered_amount_valid(self):\n # given a positive number\n assert self.user.validate_entered_amount(\"1\") == 1.00\n # given a positive number with decimals\n assert self.user.validate_entered_amount(\"10.10\") == 10.10\n # given a number with 14 digits\n assert self.user.validate_entered_amount(\"1000000000.00\") == 1000000000.00",
"def test_ach_or_check_number_required(self):\n main_transaction = Transaction.objects.create(account=self.account,\n balance_delta=25)\n entry = BankSpendingEntry(\n check_number=None, ach_payment=None, memo='no check or ach',\n main_transaction=main_transaction, date=datetime.date.today())\n self.assertRaises(ValidationError, entry.save)",
"async def _verify_successful_trade(self, trade: TradeResponse) -> None:\n\n transactions = await self._client.get(\"users/accounts/transactions\")\n\n if not transactions:\n raise RuntimeError(\n \"The trade did not succeed (Reason: no transaction found).\"\n )\n\n # wait for the transaction to be available\n for transaction in transactions[\"transactions\"]:\n if transaction[\"orderId\"] == trade.dw_order_id:\n if re.search(failed_transaction_regex, transaction[\"updatedReason\"]):\n raise RuntimeError(\n \"The trade did not succeed \"\n f\"(Reason: {transaction['updatedReason']}\"\n )\n else:\n return\n\n raise RuntimeError(\"Could not find a matching transaction.\")",
"def verify(self, sender_key):\n\n try:\n contract_dict = json.loads(json.dumps(self.contract, indent=4), object_pairs_hook=OrderedDict)\n del contract_dict[\"buyer_order\"]\n contract_hash = digest(json.dumps(contract_dict, indent=4))\n\n ref_hash = unhexlify(self.contract[\"buyer_order\"][\"order\"][\"ref_hash\"])\n\n # verify that the reference hash matches the contract and that the contract actually exists\n if contract_hash != ref_hash or not self.db.HashMap().get_file(ref_hash):\n raise Exception(\"Order for contract that doesn't exist\")\n\n # verify the signature on the order\n verify_key = nacl.signing.VerifyKey(sender_key)\n verify_key.verify(json.dumps(self.contract[\"buyer_order\"][\"order\"], indent=4),\n unhexlify(self.contract[\"buyer_order\"][\"signature\"]))\n\n # verify buyer included the correct bitcoin amount for payment\n price_json = self.contract[\"vendor_offer\"][\"listing\"][\"item\"][\"price_per_unit\"]\n if \"bitcoin\" in price_json:\n asking_price = price_json[\"bitcoin\"]\n else:\n currency_code = price_json[\"fiat\"][\"currency_code\"]\n fiat_price = price_json[\"fiat\"][\"price\"]\n request = Request('https://api.bitcoinaverage.com/ticker/' + currency_code.upper() + '/last')\n response = urlopen(request)\n conversion_rate = response.read()\n asking_price = float(\"{0:.8f}\".format(float(fiat_price) / float(conversion_rate)))\n if asking_price > self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"amount\"]:\n raise Exception(\"Insuffient Payment\")\n\n # verify a valid moderator was selected\n # TODO: handle direct payments\n valid_mod = False\n for mod in self.contract[\"vendor_offer\"][\"listing\"][\"moderators\"]:\n if mod[\"guid\"] == self.contract[\"buyer_order\"][\"order\"][\"moderator\"]:\n valid_mod = True\n if not valid_mod:\n raise Exception(\"Invalid moderator\")\n\n # verify all the shipping fields exist\n if self.contract[\"vendor_offer\"][\"listing\"][\"metadata\"][\"category\"] == \"physical good\":\n shipping = self.contract[\"buyer_order\"][\"order\"][\"shipping\"]\n keys = [\"ship_to\", \"address\", \"postal_code\", \"city\", \"state\", \"country\"]\n for value in map(shipping.get, keys):\n if value is None:\n raise Exception(\"Missing shipping field\")\n\n # verify buyer ID\n pubkeys = self.contract[\"buyer_order\"][\"order\"][\"id\"][\"pubkeys\"]\n keys = [\"guid\", \"bitcoin\", \"encryption\"]\n for value in map(pubkeys.get, keys):\n if value is None:\n raise Exception(\"Missing pubkey field\")\n\n # verify redeem script\n chaincode = self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"chaincode\"]\n for mod in self.contract[\"vendor_offer\"][\"listing\"][\"moderators\"]:\n if mod[\"guid\"] == self.contract[\"buyer_order\"][\"order\"][\"moderator\"]:\n masterkey_m = mod[\"pubkeys\"][\"bitcoin\"][\"key\"]\n\n masterkey_v = bitcoin.bip32_extract_key(self.keychain.bitcoin_master_pubkey)\n masterkey_b = self.contract[\"buyer_order\"][\"order\"][\"id\"][\"pubkeys\"][\"bitcoin\"]\n buyer_key = derive_childkey(masterkey_b, chaincode)\n vendor_key = derive_childkey(masterkey_v, chaincode)\n moderator_key = derive_childkey(masterkey_m, chaincode)\n\n redeem_script = '75' + bitcoin.mk_multisig_script([buyer_key, vendor_key, moderator_key], 2)\n if redeem_script != self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"redeem_script\"]:\n raise Exception(\"Invalid redeem script\")\n\n # verify the payment address\n if self.testnet:\n payment_address = bitcoin.p2sh_scriptaddr(redeem_script, 196)\n else:\n payment_address = bitcoin.p2sh_scriptaddr(redeem_script)\n if payment_address != self.contract[\"buyer_order\"][\"order\"][\"payment\"][\"address\"]:\n raise Exception(\"Incorrect payment address\")\n\n return True\n\n except Exception:\n return False",
"def test_account_credits_excluded(self):\n self.nve_test(\n \"input_1000085283202600721.json\", \"expected_1000085283202600721.json\"\n )",
"def test_not_compromised(self):\n suffix = self.sample_password_suffix.replace(\"A\", \"3\")\n validator = PwnedPasswordsValidator(\n api_client=api.PwnedPasswords(client=self.http_client(suffix=suffix))\n )\n validator.validate(self.sample_password)",
"def test_account_verified(self):\n user = User.objects.get()\n token, uid = RegistrationAPIView.send_account_activation_email(user=user, send_email=False)\n response = self.verify_account(token, uid)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n user = User.objects.get()\n self.assertTrue(user.is_verified)",
"def validate_gift_card_amount(self, available_amount):\n if self.amount < 0:\n # TODO:\n # Put this bit in payment_gateway.\n self.raise_user_error(\"negative_amount\")\n if available_amount < self.amount:\n self.raise_user_error(\"insufficient_amount\", self.gift_card.number)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verifies behavior when the funding source cannot be found.
|
def test_ach_save_unknown_source(self):
ach_verification = {
"verify_amount1": 0.01,
"verify_amount2": 0.01
}
with self.assertRaises(MarqetaError):
self.client.funding_sources.ach.save(
'Not a funding source token', ach_verification)
|
[
"def test_download_with_unreachable_source(self, gcp_provider):\n gcp_provider.return_value.cost_usage_source_is_reachable.side_effect = ValidationError\n billing_source = {\"table_id\": FAKE.slug(), \"dataset\": FAKE.slug()}\n credentials = {\"project_id\": FAKE.slug()}\n with self.assertRaises(GCPReportDownloaderError):\n GCPReportDownloader(FAKE.name(), billing_source, credentials=credentials)",
"def test_svl_missing_file_error(svl_source):\n with pytest.raises(SvlMissingFileError, match=\"File\"):\n svl(svl_source, datasets=[\"ufos={}/test_datasets/ufo_sightings.csv\"])",
"def verify_source_exists(origin_file):\n\n return os.path.exists(origin_file)",
"def verify_fail(self):\n raise MissingDependencyError(self, self.installed_version)",
"def test_source_path_no_matching_files(self: TestBackupFile) -> None:\n\n backup_file = BackupFile('foo/bar.baz', 99)\n\n file_not_found_error: Optional[FileNotFoundError] = None\n try:\n backup_file.source_path\n except FileNotFoundError as err:\n file_not_found_error = err\n finally:\n self.assertRegex(str(file_not_found_error),\n 'Matching file not found')",
"def _check_source_dir(self):\n if not os.path.isdir(self.source_dir):\n raise ValueError('source directory not found: ' + self.source_dir)",
"def _check_source(cls, source_file_hash, source):\n embedding_name = cls.__name__.lower()\n if source not in source_file_hash:\n raise KeyError('Cannot find pre-trained source {} for token embedding {}. '\n 'Valid pre-trained file names for embedding {}: {}'.format(\n source, embedding_name, embedding_name,\n ', '.join(source_file_hash.keys())))",
"def test_bad_share(self):\n share = Share.load(self.tempdir)\n\n self.assertTrue(share is None)",
"def test_get_non_existent_license_fails(self):\n response = self.client.get(self.non_existent_url)\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], LICENSE_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], LICENSE_NOT_FOUND_SUB_CODE)",
"def test_invalid(sourcextractor):\n run = sourcextractor('--this-is-not-a-valid-flag')\n assert run.exit_code > 0\n assert 'unrecognised' in run.stderr",
"def test_missing_files_attribute(self):\n assert license_check(os.path.join(STUBS_PATH, \"scancode_test_1.json\")) == -1",
"def test_API3_invalid_source(self):\n responses.add(\n responses.POST, \n constants.API_URL + constants.API9_URL,\n json={'query_result': [username_1]},\n status=200)\n\n response = self.app.post(\n constants.API3_URL,\n data = json.dumps(ride_invalid_src),\n mimetype = 'application/json')\n\n self.assertEqual(response.status_code, 400)",
"def test_invalid_sweep_source(self):\r\n with self.assertRaises(fygen.InvalidSweepSourceError):\r\n self.fy.set_sweep(source=2)",
"def test_earth_relief_invalid_data_source():\n with pytest.raises(GMTInvalidInput):\n load_earth_relief(\n resolution=\"01d\", registration=\"gridline\", data_source=\"invalid_source\"\n )",
"def test_sources_parse_method_invalid_url(source: BaseSource):\n\n with pytest.raises(ParserException) as exception:\n assert source.parse('https://invalid.url/') is None\n assert 'Invalid URL has been provided' in str(exception)",
"def __validate_source(self) -> None:\n source = self.search_source\n self.review_manager.logger.debug(f\"Validate SearchSource {source.filename}\")\n\n # if \"query\" not in source.search_parameters:\n # Note : for md-sources, there is no query parameter.\n # raise colrev_exceptions.InvalidQueryException(\n # f\"Source missing query search_parameter ({source.filename})\"\n # )\n\n if \"query\" in source.search_parameters:\n pass\n # if \"simple_query_string\" in source.search_parameters[\"query\"]:\n # if \"query\" in source.search_parameters[\"query\"][\"simple_query_string\"]:\n # pass\n # else:\n # raise colrev_exceptions.InvalidQueryException(\n # \"Source missing query/simple_query_string/query \"\n # f\"search_parameter ({source.filename})\"\n # )\n\n # elif \"url\" in source.search_parameters[\"query\"]:\n # pass\n # # else:\n # raise colrev_exceptions.InvalidQueryException(\n # f\"Source missing query/query search_parameter ({source.filename})\"\n # )\n\n self.review_manager.logger.debug(f\"SearchSource {source.filename} validated\")",
"def test_no_file_error(self):\n root_dir = os.getcwd()\n filename = 'this-file-should-not-exist'\n with self.assertRaises(RuntimeError):\n read_externals_description_file(root_dir, filename)",
"def test_excel_unknown_source_type(minimal_excel_sample: dict):\n # GIVEN some sample with a known source type\n source_type = \"flagalella\"\n minimal_excel_sample[\"UDF/Source\"] = source_type\n\n # WHEN creating a excel sample\n with pytest.raises(ValidationError):\n # THEN assert that a validation error is raised since source does not exist\n excel_sample: ExcelSample = ExcelSample(**minimal_excel_sample)",
"def _check_sources(self):\n for source_name, source in self.sources.items():\n if \"data\" not in source or \"ref_column\" not in source:\n raise ValueError(\n \"Each source needs to have a `data` and a `ref_column` property\"\n )\n if not isinstance(source[\"data\"], pd.DataFrame):\n raise ValueError(\n \"The `data` property of each source must contain a DatFrame\"\n )\n if not isinstance(source[\"data\"].index, pd.DatetimeIndex):\n raise ValueError(\n \"The `data` DataFrame must have a pd.DatetimeIndex for each source\"\n )\n if source[\"data\"].index.duplicated().any():\n raise ValueError(\n \"The input dataframe must not have duplicate index values, \"\n \"convert the data into a normalized wide format\"\n )\n if (\n not isinstance(source[\"ref_column\"], str)\n or source[\"ref_column\"] not in source[\"data\"].columns\n ):\n raise ValueError(\n \"Each source must have a string specifying the reference column, and the reference\"\n \"column must be available in the source's DataFrame\"\n )\n if self.ref_source_name not in self.sources.keys():\n raise ValueError(\n \"The reference source name must be available in the source dict\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
getting tweets from twitter live stream api.
|
def get_tweets(auth):
url = 'https://stream.twitter.com/1.1/statuses/filter.json'
query_data = [('language', 'en'), ('locations', '-130,-20,100,50'), ('track', '#')]
query_url = url + '?' + '&'.join([str(t[0]) + '='+str(t[1]) for t in query_data])
res = requests.get(query_url, auth=auth, stream=True)
print(query_url, res)
return res
|
[
"def get_tweets(self):\n try:\n self.response = get(self.query_url, auth=AUTH, stream=True)\n print(self.query_url, self.response)\n return self.response\n except exceptions.HTTPError as e:\n print(\"Response error:\", e)\n exit(1)",
"def do_tweets(self):\n http = httplib2.Http()\n if self.callsign:\n url = \"http://search.twitter.com/search.json?q=%s+from:%s\" % (urllib.quote('#' + self.callsign), urllib.quote(self.master))\n else:\n url = \"http://search.twitter.com/search.json?q=from:%s\" % (urllib.quote(self.master))\n resp, content = http.request(url, \"GET\")\n d = json.loads(content)\n for j in d['results']:\n if j['id_str'] == self.lasttweeted:\n return\n else:\n self.tweet_out(j['id_str'])",
"def get_user_tweets(self):\n tweets = []\n for status in tweepy.Cursor(self.api.user_timeline).items():\n tweets.append(status)\n return tweets",
"def get_tweets(self):\n\t\treturn self.tweets",
"def get_tweets_from_timeline(self):\n tweets = []\n for status in tweepy.Cursor(self.api.home_timeline).items(200):\n tweets.append(status)\n return tweets",
"def get_all_tweets(screen_name,keys=keys,filter=True):\n\t\n\tconsumer_key,consumer_secret,access_key,access_secret = keys\n\n\t#re\n\trt = r'^RT'\n\tlink = r'https?:\\/\\/([\\w\\.-]+)\\/([\\w\\.-]+)'\n\tmention = r'^\\@'\n\n\t#authorize twitter, initialize tweepy\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n\tauth.set_access_token(access_key, access_secret)\n\tapi = tweepy.API(auth)\n\t\n\t#initialize a list to hold all the tweepy Tweets\n\talltweets = []\t\n\t\n\t#make initial request for most recent tweets (200 is the maximum allowed count)\n\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,tweet_mode='extended')\n\t\n\t#save most recent tweets\n\talltweets.extend(new_tweets)\n\t\n\t#save the id of the oldest tweet less one\n\toldest = alltweets[-1].id - 1\n\t\n\t#keep grabbing tweets until there are no tweets left to grab\n\twhile len(new_tweets) > 0:\n\t\tprint(\"getting tweets before {}\".format(oldest))\n\t\t\n\t\t#all subsiquent requests use the max_id param to prevent duplicates\n\t\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest,tweet_mode='extended')\n\t\t\n\t\t#save most recent tweets\n\t\talltweets.extend(new_tweets)\n\t\t\n\t\t#update the id of the oldest tweet less one\n\t\toldest = alltweets[-1].id - 1\n\t\t\n\t\tprint(\"...{} tweets downloaded so far\".format(len(alltweets)))\n\t\n\t#transform the tweepy tweets into a 2D array that will populate the csv\t\n\tif filter: \n\t\touttweets = [tweet.full_text for tweet in alltweets if not re.match(rt, tweet.full_text) and not re.match(mention, tweet.full_text)]\n\t\tpreproc = [re.sub(link, \"\", tweet)+\"\\n\" for tweet in outtweets][::-1]\n\telse: \n\t\touttweets = [tweet.full_text for tweet in alltweets]\n\t\n\t#write the csv\t\n\twith open('tweets/{}_tweets.txt'.format(screen_name), 'w', encoding='utf-8') as f:\n\t\tf.writelines(preproc)\n\t\tprint('tweets/{}_tweets.txt was successfully created.'.format(screen_name))\n\tpass",
"def fetchTweets(queryTopic,twitter):\r\n \r\n raw_data = twitter.search(q=str(queryTopic), count= 10, lang='en')\r\n\r\n tweets = []\r\n\r\n #search through JSON data and extract the tweets only.\r\n for tweet in raw_data['statuses']:\r\n tweets.append((tweet['text']).encode('ascii', 'ignore'))\r\n \r\n \r\n for i in range(0,len(tweets)):\r\n #removing all links, because really its just gonna mess up topic modeling\r\n tweets[i] =re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', tweets[i])\r\n #removing #'s, '\\n''s, and 'RT'\r\n tweets[i] = tweets[i].replace(\"#\",\"\")\r\n tweets[i] = tweets[i].replace(\"\\n\",\"\")\r\n if tweets[i][:2] == \"RT\":\r\n while(tweets[i][:2] != ': '):\r\n tweets[i] = tweets[i][1:]\r\n tweets[i] = tweets[i][2:]\r\n \r\n \r\n tweets = filter(lambda x: len(x) > 3, tweets)\r\n \r\n return tweets",
"def userTweets(username):\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n user_tweet = api.GetUserTimeline(screen_name=username)\n for tweet in user_tweet:\n util.safe_print(tweet.GetText())",
"def fetch_tweets(self):\n for tweet in tweepy.Cursor(\n self.twitter_api.search_full_archive,\n environment_name=self.environment_name,\n query=self.hashtag,\n fromDate=self.start_date,\n toDate=self.end_date\n ).items(self.number_of_tweets_to_pull):\n self.tweets_list.append(\n [tweet.created_at,\n tweet.text.encode(\"utf-8\"),\n self.today_datetime]\n )",
"def get_all_tweets(screen_name):\n #Twitter only allows access to a users most recent 3240 tweets with this method\n \n #authorize twitter, initialize tweepy\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n api = tweepy.API(auth)\n \n #initialize a list to hold all the tweepy Tweets\n alltweets = [] \n \n #make initial request for most recent tweets (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts = True)\n \n #only do this for users that have actually tweeted\n if len(new_tweets) > 0:\n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #save the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n #keep grabbing tweets until there are no tweets left to grab\n while len(new_tweets) > 0:\n \n #all subsiquent requests use the max_id param to prevent duplicates\n new_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest, include_rts = True)\n \n #save most recent tweets\n alltweets.extend(new_tweets)\n \n #update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n \n print \"...%s tweets downloaded so far\" % (len(alltweets))\n \n # Save tweets for user in a json file\n fname = \"tweets/\"+str(screen_name)+\".jsonl\"\n with open(fname, 'w') as f:\n for status in alltweets:\n f.write(json.dumps(status._json)+\"\\n\")\n \n #close the file\n print \"Done with \" + str(screen_name)\n time.sleep(60)\n print \"Sleeping for one minute\"",
"def get_tweets(self, account, number=MAX_TWEETS, since_id=None, max_id=None):\n import twitter\n\n all_tweets = []\n while number > 0:\n try:\n tweets = self.api.GetUserTimeline(\n screen_name=account,\n include_rts=False,\n exclude_replies=True,\n count=min(number, CHUNK_SIZE),\n since_id=since_id,\n max_id=max_id,\n )\n except twitter.TwitterError as e:\n raise plugin.PluginError(f'Unable to fetch timeline {account} for {e}')\n\n if not tweets:\n break\n\n all_tweets += tweets\n number -= len(tweets)\n max_id = tweets[-1].id - 1\n\n return all_tweets",
"def fetchTweets(self, user):\n extended_query = \"from:{}\".format(user)\n params = {'query':extended_query,\n 'maxResults': 500,\n 'fromDate' : self.fromDate,\n # 'toDate' : self.toDate\n }\n response = requests.get(self.url, params=params, \\\n auth=(api_user, api_passwd))\n\n for r in response.json()['results']:\n r['topic']= query\n self.queueKafka( json.dumps(r).encode('utf-8'))\n\n #Scrolling through until next runs out or maxResults is exceeded\n while 'next' in response.json().keys():\n params= {'query':extended_query, \"next\": response.json()['next']}\n response = requests.get(self.url, params=params, \\\n auth=(api_user, api_passwd))\n\n for r in response.json()['results']:\n r['topic']= query\n self.queueKafka( json.dumps(r).encode('utf-8'))\n #self.queueKafka(json.dumps(response.json()['results']).encode('utf-8'))",
"def trendingTweets():\n api = twitter.Api(consumer_key=key,consumer_secret=secret,access_token_key=access_key,access_token_secret=access_secret)\n trending_topics = api.GetTrendsWoeid(BOSTON_WOEID)\n for tweet in trending_topics:\n util.safe_print(tweet.GetText())",
"def tweets(self):\n tweet=[] # creating a list to add all of the tweets text to\n for json_file in self.data:\n tweet.append(json_file[\"text\"])# adding the text of the tweets to the list\n return tweet # returning the list of tweets so that I can use this function tweets and apply it",
"def getNewerTweets():\n recent = True\n Searcher.set_twitter_connection(login, TWITTER_CREDENTIALS)\n Searcher.run(search_terms, limit, recent, REST)",
"def get_tweets_for_user(self, user_id):\n tweets = [tweet for tweet in self.tweets if tweet.user.id == user_id]\n # print(tweets)\n return tweets",
"def get_latest_tweets():\n tweet = twitter.Api(consumer_key=config.twitter_consumer_key, \n consumer_secret = config.twitter_consumer_secret, \n access_token_key = config.twitter_access_key, access_token_secret = config.twitter_access_secret)\n red = redis.Redis(host = 'localhost', db = config.subfeed_db)\n unique_new_list = []\n liverpool_tweet_list = tweet.GetUserTimeline(screen_name = config.twitter_screen_name, count = config.twitter_limit)\n twitter_key = \"lfc_twitter\"\n for lfctweet in liverpool_tweet_list:\n current_time = int(time.time()) \n present_in_db = red.zadd(twitter_key, lfctweet.id, current_time)\n if present_in_db == 1:\n twitter_url = \"https://www.twitter.com/\" + config.twitter_screen_name + \"/status/\" + str(lfctweet.id)\n unique_new_list.append(twitter_url)\n return unique_new_list",
"def run(self):\n twitter_userstream = twitter.TwitterStream(\n auth=self.oauth,\n domain='userstream.twitter.com')\n for msg in twitter_userstream.user():\n if not self.run:\n break\n print ('Incoming Twitter stream message:')\n print ('-' * 72)\n pprint.pprint(msg)\n print ('-' * 72)\n if 'text' not in msg:\n # Not a status update, so skip this...\n continue\n self.send_message(u'_Received tweet from @%s:_\\n%s' % (\n msg['user']['screen_name'],\n msg['text']),\n parse_mode='Markdown')",
"def get_n_tweets(self, username, last_n_tweets=1):\n req = requests.get(url=\"https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=%s&count=%d\" % (username, last_n_tweets), auth=self.twitter_oauth)\n return [tweet['text'] for tweet in req.json()]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
sending twitter stream response to pySpark stream.
|
def send_tweets_to_spark(http_resp, tcp_connection, t_in_sec):
end_time = time.time()+t_in_sec
for line in http_resp.iter_lines():
if time.time() >= end_time:
break
try:
full_tweet = json.loads(line)
data_dict = {
'created_at': full_tweet['created_at'],
'tweet_id': full_tweet['id_str'],
'place_id': full_tweet['place']['id'],
'place': full_tweet['place']['name'],
'hash_tags': full_tweet['entities']['hashtags']
}
print("---------------get tweet--------------------")
print(full_tweet)
tcp_connection.send(bytes((json.dumps(data_dict)+"\n"), 'utf-8'))
except Exception as e:
print("Error", e)
|
[
"def send_tweets_to_spark(self, client_sock):\n num_tweets = 0\n for line in self.response.iter_lines():\n if not line.decode('utf-8'):\n continue\n try:\n full_tweet = json.loads(line.decode('utf-8'))\n if 'text' in full_tweet:\n tweet_text = full_tweet['text']\n num_tweets += 1\n print(\"successful tweets:\", num_tweets)\n print(\"Tweet Text: \" + tweet_text, '\\n', '-' * 20)\n client_sock.sendall(tweet_text.encode('utf-8'))\n except error:\n e = exc_info()\n print(\"Error sending:\", e)\n exit(1)\n except ConnectionError:\n e = exc_info()\n print(\"Connection error:\", e)\n exit(1)\n client_sock.close()",
"def run(self):\n twitter_userstream = twitter.TwitterStream(\n auth=self.oauth,\n domain='userstream.twitter.com')\n for msg in twitter_userstream.user():\n if not self.run:\n break\n print ('Incoming Twitter stream message:')\n print ('-' * 72)\n pprint.pprint(msg)\n print ('-' * 72)\n if 'text' not in msg:\n # Not a status update, so skip this...\n continue\n self.send_message(u'_Received tweet from @%s:_\\n%s' % (\n msg['user']['screen_name'],\n msg['text']),\n parse_mode='Markdown')",
"def _create_twitter_track_stream(self):\n self.logger.info('Creating birdy track stream')\n self.birdy_stream = self.birdy_conn.stream.statuses.filter.post(\n track=self.twitter_rules,\n stall_warnings='true'\n )",
"def tweet_out(self, tweet):\n for k in query_db('SELECT * ' \\\n 'FROM accounts ' \\\n 'INNER JOIN tweetswarmaccount '\\\n 'ON account.access_token=tweetswarmaccount.account_id '\\\n 'WHERE tweetswarmaccount.tweetswarm=?', ([self.id])\n ):\n s = Account()\n s.access_key = k['access_token']\n s.access_secret = k['access_secret']\n self.slaves.append(s)\n for slave in self.slaves:\n slave.tweet(tweet)\n\n query_db('UPDATE tweetswarms' \\\n 'SET lasttweeted=?' \\\n 'WHERE id=?' ([tweet, self.id])\n )\n g.db.commit()\n return True",
"def process_rdd_queue(twitter_stream, nb_tweets = 5):\n rddQueue = []\n for i in range(nb_tweets):\n json_twt = get_next_tweet(twitter_stream, i )\n dist_twt = ssc.sparkContext.parallelize([json_twt], 5)\n rddQueue += [dist_twt]\n lines = ssc.queueStream(rddQueue, oneAtATime=False)\n lines.pprint()",
"def ft_post_twt(status):\r\n\ttry:\r\n\t\ttwitter.update_status(status=status)\r\n\t\tsleep(1)\r\n\t\tprint(\"{} publish :\\n{}\".format(strftime(\"%d/%m/%y %H:%M:%S\"),status))\r\n\texcept Exception as e:\r\n\t\tprint(e)",
"def process(tweet):\n url = \"https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/d1dbaa08-93ca-4f29-81e4-8cc98f250ba7/v1/analyze?version=2019-07-12\"\n headers = {\"Content-Type\": \"application/json\"}\n data = get_json(tweet)\n auth = HTTPBasicAuth('apikey', '2YWxkOQMdI-7s7tvHJeGoXd_IsLK01G2OLbeBWDnW87n')\n res = requests.post(url, headers = headers, data=data, auth=auth)\n res = res.json()\n return res",
"def _create_twitter_locations_stream(self):\n self.logger.info('Creating birdy locations stream')\n self.birdy_stream = self.birdy_conn.stream.statuses.filter.post(\n locations=self.twitter_rules,\n stall_warnings='true'\n )",
"def _create_twitter_follow_stream(self):\n self.logger.info('Creating birdy follow stream')\n self.birdy_stream = self.birdy_conn.stream.statuses.filter.post(\n follow=self.twitter_rules,\n stall_warnings='true'\n )",
"def put_tweets(self):\n\t\tscreen_name = self.screen_name\n\t\tself.get_user_retweets()\n\t\tself.retweet_df[\"date\"] = pd.to_datetime(self.retweet_df['created_at']).dt.date\n\t\tself.retweet_df = self.retweet_df[self.retweet_df[\"date\"] >= self.__START_DATE]\n\t\tself.retweet_df = self.retweet_df.drop(\"date\",axis=1)\n\t\twrite_to_file(self.file_path,self.retweet_df,self.screen_name)\n\t\tprint(\"--- done for {} ---\".format(screen_name))",
"def get_tweets(self):\n try:\n self.response = get(self.query_url, auth=AUTH, stream=True)\n print(self.query_url, self.response)\n return self.response\n except exceptions.HTTPError as e:\n print(\"Response error:\", e)\n exit(1)",
"def _stream_responder_callback(stream, data_func, _, key, value, __):\n # Generate random data.\n data = data_func()\n print(\"{!r} updated: {!r}...Responding with {!r}\".format(key, value, data))\n stream.write(data)",
"def stream_handler(cls, client, data):\n # Do processing here\n pass",
"def _set_twitter_stream_api(self):\n auth = self._set_oauth()\n stream = Stream(auth, self)\n return stream",
"def _main_loop(self):\n self.logger.info(\"Starting tweet processing\")\n # Iterate through the twitter results\n for item in self.birdy_stream.stream():\n if item:\n try:\n tweet = json.loads(item)\n except Exception as e:\n theLogMsg = \"Caught exception while json loading the Twitter message\"\n self.logger.error(theLogMsg, extra=logExtra(e))\n dd_monitoring.increment('traptor_error_occurred',\n tags=['error_type:json_loads_error'])\n else:\n theLogMsg = \"Enriching Tweet\"\n self.logger.debug(theLogMsg, extra=logExtra({\n 'tweet_id': tweet.get('id_str', None)\n }))\n enriched_data = self._enrich_tweet(tweet)\n\n if not self._is_filtered(enriched_data):\n # #4204 - since 1.4.13\n theLogMsg = settings.DWC_SEND_TO_KAFKA_ENRICHED\n self.logger.info(theLogMsg, extra=logExtra())\n if self.kafka_enabled:\n try:\n self._send_enriched_data_to_kafka(tweet, enriched_data)\n except Exception as e:\n theLogMsg = settings.DWC_ERROR_SEND_TO_KAFKA\n self.logger.error(theLogMsg, extra=logExtra(e))\n dd_monitoring.increment('tweet_to_kafka_failure',\n tags=['error_type:kafka'])\n else:\n self.logger.debug(json.dumps(enriched_data, indent=2))\n else:\n self.logger.debug(\"Tweet Rate Filtered\", extra=logExtra({\n 'value_str': json.dumps(enriched_data, indent=2)\n }))\n\n else:\n self.logger.info(\"Stream keep-alive received\", extra=logExtra())\n\n t_now = time.time()\n\n if t_now > self._last_filter_maintenance + self.rate_limiting_reporting_interval_sec:\n self._log_rates(t_now, min(t_now - self._last_filter_maintenance,\n 2 * self.rate_limiting_reporting_interval_sec))\n self._filter_maintenance(t_now, self.rate_limiting_reporting_interval_sec)\n self._last_filter_maintenance = t_now\n\n if self.exit:\n break\n\n # Stop processing if we were told to restart\n if self._getRestartSearchFlag():\n self.logger.info(\"Restart flag is true; restarting myself\", extra=logExtra())\n break\n\n self.logger.info(\"Stream iterator has exited.\", extra=logExtra())",
"def send_tweet(status):\n client = create_client()\n url = API_URL + 'statuses/update.json?' + urlencode({'status': status})\n res, data = client.request(\n url,\n method=\"POST\", headers={\n \"content-type\": \"application/json\"\n })\n assert res['status'] == '200'\n return res",
"def stream():\n server_id = request.args.get('server_id')\n stream_url = request.args.get('stream_url')\n return render_template('/stream.html', title='Stream {}'.format(server_id), id_=server_id, stream_url=stream_url)",
"def do_tweets(self):\n http = httplib2.Http()\n if self.callsign:\n url = \"http://search.twitter.com/search.json?q=%s+from:%s\" % (urllib.quote('#' + self.callsign), urllib.quote(self.master))\n else:\n url = \"http://search.twitter.com/search.json?q=from:%s\" % (urllib.quote(self.master))\n resp, content = http.request(url, \"GET\")\n d = json.loads(content)\n for j in d['results']:\n if j['id_str'] == self.lasttweeted:\n return\n else:\n self.tweet_out(j['id_str'])",
"def follow_rtrs(tweet_id, api):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
configuring a spark stream using TCP socket.
|
def create_socket():
tcp_ip = SparkStream.TCP_IP.value
tcp_port = SparkStream.TCP_PORT.value
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((tcp_ip, tcp_port))
s.listen(1)
print("Waiting for tcp connection... ")
conn, address = s.accept()
print("current address is", address)
print("Connected ... Starting getting tweets.")
return conn
|
[
"def create_socket(self):\n super(TCPSocket, self).create_socket()\n self.adjust_buffers()",
"def connect_stream(stream):\n return factory.connect_stream(stream, SlaveService)",
"def __init__(self, address=DEFAULT_CLIENT_ADDRESS, port=DEFAULT_TCP_PORT):\n binding = 'tcp://*:{}'.format(port)\n super(TCPServer, self).__init__(zmq.Context(), binding)",
"def __connect_NN_socket(self):\n if self.mode == \"one2many\":\n # This allows only use one publisher connected at the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"*\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.bind(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" bind\")\n \n elif self.mode == \"many2one\":\n # This allows two use more that one publisher ate the same endpoint\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")\n \n elif self.mode == \"many2many\":\n if self.ip == '127.0.0.1':\n ip = \"localhost\"\n endpoint = \"tcp://\" + ip + \":\" + str(self.port)\n self.sock.connect(endpoint)\n #if self.debug or self.network == \"direct\":\n #print (\"publisher \" + endpoint + \" connect\")",
"def __init__(self):\n # Create a TCP/IP socket\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM);",
"def _setup_socketio(self) -> None:",
"def __init__(self, address=DEFAULT_CLIENT_ADDRESS, port=DEFAULT_TCP_PORT):\n connection = 'tcp://{}:{}'.format(address, port)\n super(TCPClient, self).__init__(zmq.Context(), connection)",
"def serverConnect(self):\n\t\tself.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.rtspSocket.connect((self.serverAddr, self.serverPort))",
"def configure_spark_server(cx):\n print 'called configure_spark_server'\n spark_feature = add_feature(cx, 'spark')\n\n server_name = cx.state['nodes'][spark_feature['node']]['private_dns_name']\n spark_feature['master'] = server_name\n spark_feature['master_port'] = 7077\n spark_feature['user_dir'] = '/user'\n\n master_url = \"spark://{}:{}\".format(\\\n spark_feature['master'], spark_feature['master_port'])\n\n spark_home = '/usr/local/spark-2.1.0-bin-hadoop2.4'\n start_master = spark_home + \"/sbin/start-master.sh -h {} -p {}\".format(\n spark_feature['master'],\n spark_feature['master_port'])\n remote_commands(cx, [\n r'sudo apt-get install scala',\n r'echo \"deb https://dl.bintray.com/sbt/debian /\" | sudo tee -a /etc/apt/sources.list.d/sbt.list',\n r'sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2EE0EA64E40A89B84B2DF73499E82A75642AC823',\n r'sudo apt-get update',\n r'sudo apt-get install sbt',\n r'sudo bash -c \"echo \\\"{}\\\" > /usr/local/etc/master\"'.format(master_url),\n # NOTE: This depends on the instance type chosen.\n r'sudo bash -c \"echo spark.executor.memory 25g > {}/conf/spark-defaults.conf\"'.format(spark_home),\n r'sudo {}'.format(start_master)\n ])",
"def connect(self):\n\t\tself.stream = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.stream.connect((self.host, self.port))\n\t\t# timeout after 5 seconds\n\t\tself.stream.settimeout(5)",
"def __init__(self, socketExecuter, host, port):\n self.__host = host\n self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.__socket.bind((self.__host, port))\n self.__socket.listen(500)\n self.__port = self.__socket.getsockname()[1]\n self.__SUPER = super(TCPServer, self)\n self.__SUPER.__init__(socketExecuter, self.__socket, \"TCP\")\n self.__ssl_info = None\n self.__logString = \"root.litesockets.TCPServer:{}\".format(self)\n self.__log = logging.getLogger(self.__logString)\n self.__log.info(\"New Server Created\")",
"def connect(self, secureMode=SECURE_DISABLE, useResolver=True):\n\t\tsock = transports.TCPSocket(useResolver)\n\t\tconnectType = sock.plugIn(self)\n\t\tif not connectType: \n\t\t\tsock.plugOut()\n\t\t\treturn None\n\t\tself.connectType = C_TCP\n\t\tisSSLPort = self.port in (5223, 443)\n\t\tif (secureMode == SECURE_AUTO and isSSLPort) or secureMode == SECURE_FORCE:\n\t\t\t# FIXME. This should be done in transports.py\n\t\t\ttry:\n\t\t\t\ttransports.TLS().plugIn(self, forceSSL=True)\n\t\t\t\tself.connectType = C_SSL\n\t\t\texcept socket.sslerror:\n\t\t\t\tself.TLS.PlugOut()\n\t\t\t\treturn None\n\t\tdispatcher.Dispatcher().plugIn(self)\n\t\twhile self.Dispatcher.stream._document_attrs is None:\n\t\t\tif not self.process(1):\n\t\t\t\treturn None\n\t\t# If we get version 1.0 stream the features tag MUST BE presented\n\t\tif self.Dispatcher.stream._document_attrs.get(\"version\") == \"1.0\":\n\t\t\twhile not self.Dispatcher.features and self.process(1):\n\t\t\t\tpass\n\t\tif secureMode == SECURE_AUTO and not isSSLPort:\n\t\t\t# If we get version 1.0 stream the features tag MUST BE presented\n\t\t\tif self.Dispatcher.stream._document_attrs.get(\"version\") == \"1.0\":\n\t\t\t\ttransports.TLS().plugIn(self)\n\t\t\t\tif transports.TLS_UNSUPPORTED == self.TLS.state:\n\t\t\t\t\tself.TLS.PlugOut()\n\t\t\t\t\treturn self.connectType\n\t\t\t\twhile not self.TLS.state and self.process(1):\n\t\t\t\t\tpass\n\t\t\t\tif self.TLS.state != transports.TLS_SUCCESS:\n\t\t\t\t\tself.TLS.plugOut()\n\t\t\t\t\treturn None\n\t\t\t\tself.connectType = C_TLS\n\t\treturn self.connectType",
"def __init__(self, stream):\n self.send = stream.send",
"def __init__(self, transport, stream_id=\"default\"):\n self.transport = transport\n self.stream_id = None\n self.stream_id_buff = None\n self.stream_id_length = 0\n self.set_stream_id(stream_id)\n self.transport.connect()",
"def start(self, *args, **kwargs):\n self.socket = Socket(*args, **kwargs)\n self.socket.bind(self.address)\n self.socket.listen(1)",
"def __init__(self, host, port):\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((\"%s\" % host, port))\n self.client_socket = client_socket",
"def __init__(self, host, port):\r\n self.host = host = str(host)\r\n self.port = port = int(port)\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect((host, port))\r\n io = inputoutput.SocketIO(sock)\r\n super(SocketGateway, self).__init__(io=io)\r\n self.remoteaddress = '%s:%d' % (self.host, self.port)",
"def initStream(self):\n self.out.write(\"<stream>\")\n self.stream_initiated = True",
"def start_streaming(self):\n if (not self.is_connected()):\n self.message_string = 'Board is not connected.'\n return\n\n if (not (self.is_streaming)):\n self.message_string = 'Started streaming'\n self.port.reset_input_buffer()\n self.port.write(START_STREAMING_CMD.encode('utf-8'))\n self.is_streaming = True\n self.read_state = 0\n self.skipped_bytes = 0\n read_thread = threading.Thread(target=self.collect_data)\n read_thread.daemon = True\n read_thread.start()\n self.samples_counter = 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates a cycle of `num_chunks` chunks from `array`. if repeat is False, generates one cycle only.
|
def chunk_generator(array, num_chunks, repeat=True):
chunk_len = int(np.ceil(len(array) / num_chunks))
array_iter = iter(array)
while True:
subset = tuple(itertools.islice(array_iter, chunk_len))
if len(subset) > 0:
yield subset
elif repeat:
array_iter = iter(array)
else:
return
|
[
"def repeat_or_chunk(data, chunk_size):\n if len(data) < chunk_size:\n repeats = chunk_size // len(data)\n if (repeats * len(data)) != chunk_size:\n logging.info('skipping something that does not divide four bars')\n data = []\n else:\n data = list(data) * repeats\n return [data]\n return chunk_iterator(data, chunk_size)",
"def gen_cycles(num_cycles, batch_size, cycle_length=2):\n sorted_idxes = tf.tile(tf.expand_dims(tf.range(batch_size), 0),\n [num_cycles, 1])\n sorted_idxes = tf.reshape(sorted_idxes, [batch_size, num_cycles])\n cycles = tf.reshape(tf.random.shuffle(sorted_idxes),\n [num_cycles, batch_size])\n cycles = cycles[:, :cycle_length]\n # Append the first index at the end to create cycle.\n cycles = tf.concat([cycles, cycles[:, 0:1]], axis=1)\n return cycles",
"def chunks_by_element(arr, n):\n return [arr[i:i+n] for i in range(0, len(arr), n)]",
"def _worker_batches_in_numpy_array(numpy_inputs, batch_size, shift_ratio):\n numpy_inputs = cnn_util.roll_numpy_batches(numpy_inputs, batch_size,\n shift_ratio)\n i = 0\n total_batches = numpy_inputs.shape[0]\n assert total_batches % batch_size == 0\n while True:\n yield numpy_inputs[i:i + batch_size, ...]\n i = (i + batch_size) % total_batches",
"def generate_slices(n, slice_size, allow_smaller_final_batch=True):\n start, end = 0, 0\n for pack_num in range(int(n / slice_size)):\n end = start + slice_size\n yield slice(start, end, None)\n start = end\n # last slice might not be a full batch\n if allow_smaller_final_batch:\n if end < n:\n yield slice(end, n, None)",
"def array_chunk(array, size):\n counter = 0\n outer_list = []\n inner_list = []\n\n for item in array:\n if counter > 0 and not counter % size:\n outer_list.append(inner_list)\n inner_list = [item]\n else:\n inner_list.append(item)\n\n counter += 1\n \n outer_list.append(inner_list)\n \n return outer_list",
"def IterChunks(iterable, chunk_size, fill=None):\n for _, group in itertools.groupby(\n enumerate(iterable), lambda pair: pair[0] // chunk_size\n ):\n items = list(pair[1] for pair in group)\n while len(items) < chunk_size:\n items.append(fill)\n yield tuple(items)",
"def _chunk_numpy_array(data, chunk_size):\n\n chunk_y, chunk_x = chunk_size\n ny, nx = data.shape[0:2]\n chunk_y, chunk_x = min(chunk_y, ny), min(chunk_x, nx)\n\n def _get_slice(n1, n2):\n data_slice = data[\n slice(n1 * chunk_y, min(n1 * chunk_y + chunk_y, ny)),\n slice(n2 * chunk_x, min(n2 * chunk_x + chunk_x, nx)),\n ]\n # Wrap the slice into a list wiht appropriate dimensions\n for _ in range(2, data.ndim):\n data_slice = [data_slice]\n return data_slice\n\n # Chunk the numpy array and assemble it as a dask array\n data_dask = da.block(\n [\n [_get_slice(_1, _2) for _2 in range(int(math.ceil(nx / chunk_x)))]\n for _1 in range(int(math.ceil(ny / chunk_y)))\n ]\n )\n\n return data_dask",
"def gen_chunks(start, end, stride):\n for i, num in enumerate(xrange(start, end, stride)):\n yield num, num + stride",
"def iterchunks(data, chunksize):\n offt = 0\n while offt < len(data):\n yield data[offt:offt+chunksize]\n offt += chunksize",
"def chunk_generator( sequence, kmer_len ):\n\tchunks = []\n\tfor i in range( len( sequence ) / kmer_len ):\n\t\tchunks.append( sequence[ i*kmer_len : (i+1) * kmer_len ] )\n\treturn chunks",
"def block_shuffle(arr, block_size, inside=False):\n\n if isinstance(arr, int):\n n = arr\n arr = np.arange(n)\n else:\n n = arr.shape[0]\n\n if block_size == 1:\n np.random.shuffle(arr)\n return arr\n\n assert block_size > 0\n assert isinstance(block_size, int)\n assert n % block_size == 0\n n_blocks = n // block_size\n\n if inside:\n idx = np.arange(n)\n for i in range(0, n, block_size):\n np.random.shuffle(idx[i:i+block_size])\n return arr[idx]\n\n else:\n idx_block = np.arange(n_blocks)\n np.random.shuffle(idx_block)\n idx_ele = expand_block_indices(idx_block=idx_block, block_size=block_size, squeeze=True)\n return arr[idx_ele]",
"def chunks_by_piece(arr, m):\n n = int(math.ceil(len(arr) / float(m)))\n return [arr[i:i + n] for i in range(0, len(arr), n)]",
"def split_arr(arr, stride=10, kernel=128):\n\n num_frames = arr.shape[0]\n\n if num_frames < (kernel + stride):\n split_arr = np.zeros((1, 50, kernel))\n split_arr[0, 0:25, :] = arr[0:kernel, :, 0].T\n split_arr[0, 25:, :] = arr[0:kernel, :, 1].T\n else:\n n_copies = int((num_frames - kernel) / stride)\n split_arr = np.zeros((n_copies, 50, kernel))\n for i in range(n_copies):\n start = i * stride\n end = kernel + i * stride\n split_arr[i, 0:25, :] = arr[start:end, :, 0].T\n split_arr[i, 25:, :] = arr[start:end, :, 1].T\n\n return split_arr",
"def repeat(a, repeats, axis):\n return cpp.repeat(a, repeats, axis)",
"def cycle(num_vertices):\n\n # Instantiate a Graph\n pattern = Graph()\n # Populate it\n for u in range(num_vertices):\n pattern.add_edge(u, (u + 1) % num_vertices)\n # Return the cycle\n return pattern",
"def chunks(seq, num):\n\n avg = len(seq) / float(num)\n out = []\n last = 0.0\n\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n\n return out",
"def _chunking(ds, dim=\"time\", number_chunks=False, chunk_length=False):\n if number_chunks and not chunk_length:\n chunk_length = np.floor(ds[dim].size / number_chunks)\n cmin = int(ds[dim].min())\n elif not number_chunks and chunk_length:\n cmin = int(ds[dim].min())\n number_chunks = int(np.floor(ds[dim].size / chunk_length))\n else:\n raise KeyError(\"set number_chunks or chunk_length to True\")\n c = ds.sel({dim: slice(cmin, cmin + chunk_length - 1)})\n c = c.expand_dims(\"c\")\n c[\"c\"] = [0]\n for i in range(1, number_chunks):\n c2 = ds.sel(\n {dim: slice(cmin + chunk_length * i, cmin + (i + 1) * chunk_length - 1)}\n )\n c2 = c2.expand_dims(\"c\")\n c2[\"c\"] = [i]\n c2[dim] = c[dim]\n c = xr.concat([c, c2], \"c\")\n return c",
"def ncycles(iterable, n):\n return chain.from_iterable((repeat(tuple(iterable), n)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get next round of testing subjects.
|
def next_round(self):
testing_round = []
if self.schedule[self.day]:
testing_round = next(self._tested_chunks)
self.day = (self.day + 1) % len(self.schedule)
return testing_round
|
[
"def next_trial(self):\n next_trial = None\n if self._challenger_list:\n next_trial = self._challenger_list.pop()\n return next_trial",
"def next_trial(self):\n try:\n trial = next(self.trial_iter)\n except StopIteration:\n return None\n\n return trial",
"def test_next_round_complete(self):\n lesson = Lesson(self.student)\n lesson.level = 3\n lesson.round_num = len(levels[3].rounds)\n lesson.next_round()\n # assert lesson.level == 4\n assert lesson.complete",
"def get_next_assessment_taken(self):\n return # osid.assessment.AssessmentTaken",
"def test_get_next_rule_name():\n obj = k_Sequitur(2)\n string = [0, 1, 2, 3, 2, 3, 0, 1, \"/\"]\n assert obj.next_rule_name_ix == 0\n\n obj.generate_1_layer_of_rules(string)\n assert obj.next_rule_name_ix == 2\n\n obj.generate_1_layer_of_rules(string)\n assert obj.next_rule_name_ix == 4\n\n for ix in range(80):\n obj.generate_1_layer_of_rules(string)\n assert obj.next_rule_name_ix == 4 + (1+ix)*2",
"def test_choose_next_trial_from_queue(\n self,\n default_asha_state_and_search_method: Tuple[searcher.SearcherState, ASHADSATSearchMethod],\n ) -> None:\n searcher_state, search_method = default_asha_state_and_search_method\n search_method.trial_tracker.queue.clear()\n hparams, search_data = search_method.get_random_hparams_and_search_data(1)\n # Create an arbitrary counter to differentiate hparams and avoid the duplicate check in\n # `queue_and_register_trial`.\n arbitrary = 0\n\n # Create a curr_rung = 0 lineage\n trial = None\n hparams = copy.deepcopy(hparams)\n hparams[\"_arbitrary\"] = arbitrary\n arbitrary += 1\n trial = search_method.trial_tracker.create_trial(\n hparams=hparams, search_data=copy.deepcopy(search_data), parent_trial=trial\n )\n search_method.trial_tracker.queue_and_register_trial(trial)\n assert trial.searcher_metric_name\n search_method.trial_tracker.update_trial_metric(trial, {trial.searcher_metric_name: 0.0})\n\n # Create several curr_rung = 1 lineages of varying lengths\n for num_in_lineage in range(1, 3):\n trial = None\n for _ in range(num_in_lineage):\n hparams = copy.deepcopy(hparams)\n hparams[\"_arbitrary\"] = arbitrary\n arbitrary += 1\n search_data = copy.deepcopy(search_data)\n search_data.curr_rung = 1\n trial = search_method.trial_tracker.create_trial(\n hparams=hparams, search_data=search_data, parent_trial=trial\n )\n search_method.trial_tracker.queue_and_register_trial(trial)\n assert trial.searcher_metric_name\n search_method.trial_tracker.update_trial_metric(\n trial, {trial.searcher_metric_name: 0.0}\n )\n\n # Get the next trial:\n next_trial = search_method.choose_next_trial_from_queue()\n assert next_trial.search_data\n assert isinstance(next_trial.search_data, ASHADSATSearchData)\n assert next_trial.search_data.curr_rung == 1\n assert next_trial.num_completed_trials_in_lineage == num_in_lineage",
"def get_next_assessment(self):\n return # osid.assessment.Assessment",
"def getNextRepeat(self):\n return self.next_repeat",
"def test_get_subject(self):\n pass",
"def test_next_msg1(self):\n msgid=\"\"\n for i in range(1,50):\n msgid = self.mq.put(\"testQ4\", i)\n\n for i in range(50,100):\n self.mq.put(\"testQ4\", i)\n\n self.mq.get(\"testQ4\",msgid)\n self.assertEquals(50, self.mq.nextmsg(\"testQ4\"))",
"def get_next_assessments_taken(self, n):\n return # osid.assessment.AssessmentTaken",
"def NextTest(self):\n if len(self.videoList) > 0:\n nextVideo = self.videoList[0]\n self.videoList.pop(0)\n return Test(self.user, self.testId, nextVideo)\n else:\n return None",
"def test_next_msg2(self):\n msgid=\"\"\n for i in range(1,50):\n msgid = self.mq.put(\"testQ5\", i)\n self.mq.get(\"testQ5\", msgid)\n\n for i in range(50,100):\n self.mq.put(\"testQ5\", i)\n\n self.assertEquals(50, self.mq.nextmsg(\"testQ5\"))",
"def next_round(self):\n level_rules = levels[self.level]\n # If the student has for some reason completed more rounds\n # than exist for the level, increment the level\n if self.round_num >= len(level_rules.rounds):\n # We expect that the highest level number will match the count of levels\n if self.level >= len(levels):\n self.complete = True\n return\n\n self.level += 1\n self.round_num = 1\n\n else:\n self.round_num += 1\n\n self.round_rules = levels[self.level].rounds[self.round_num - 1]",
"def get_next_match():\n pass",
"def get_next_assessments(self, n):\n return # osid.assessment.Assessment",
"def evaluate_subject(subject, TETR, FS):\r\n trainCorpusFolds, testCorpusFolds, testEpochFolds = extract_folds(subject, 'Hurst')\r\n OnsetTPRs = 0;\r\n \r\n for k in range(5):\r\n OnsetTPRs = train_classifier(OnsetTPRs, trainCorpusFolds, testCorpusFolds, testEpochFolds, k)\r\n \r\n OnsetTPR = OnsetTPRs/5;\r\n \r\n return OnsetTPR",
"def _get_next(self):\n if self.next:\n subject = self._get_subject_enc(self.next)\n next = ('<LINK REL=\"Next\" HREF=\"%s\">'\n % (url_quote(self.next.filename)))\n next_wsubj = ('<LI>' + _('Next message (by thread):') +\n ' <A HREF=\"%s\">%s\\n</A></li>'\n % (url_quote(self.next.filename),\n self.quote(subject)))\n else:\n next = next_wsubj = \"\"\n return next, next_wsubj",
"def get_next_unit(self):\n\n available_controllers = self.get_available_controllers()\n free_units = dict(filter(\n lambda x: x[0].assign_next_at == self.time_slot, available_controllers.items()))\n # print(type(free_units))\n sorted_units = sorted(free_units,\n key=lambda x: len(free_units[x]))\n # print(\"Sorted Units: \", sorted_units)\n return sorted_units[0]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests node given test sensitivity and specificity.
|
def test_state(node_state, sensitivity=1., specificity=1.,):
if node_state == "I":
# Patient is positive, true positive rate is sensitivity
positive = 1 == np.random.binomial(1, sensitivity)
else:
# Patient is negative, false positive rate is 1 - specificity
positive = 1 == np.random.binomial(1, 1 - specificity)
return positive
|
[
"def visitCase(self, testCase):",
"def test_binary_decision_function(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def test_run_feature_selection(self):",
"def test_002_ironic_node_actions(self):\n # Step 1\n fail_msg = \"Error creating node.\"\n self.node = self.verify(20, self.node_create, 1, fail_msg,\n 'Node creation', driver='fake',\n extra={'NodeTest': ''})\n LOG.debug(self.node)\n # Step 2\n prop = rand_name(\"ostf-prop\")\n value_prop = rand_name(\"prop-value\")\n fail_msg = \"Can't update node with properties.\"\n self.node = self.verify(20, self.node_update, 2, fail_msg,\n 'Updating node', self.node, prop, value_prop)\n LOG.debug(self.node)\n # Step 3\n fail_msg = \"Can't show node properties.\"\n self.node = self.verify(20, self.node_show, 3, fail_msg,\n 'Showing node', self.node)\n LOG.debug(self.node)\n for p, v in self.node.properties.items():\n self.verify(5, self.assertTrue, 3, \"Can't check node property.\",\n 'Checking node property', prop in p)\n self.verify(5, self.assertTrue, 3, \"Can't check property value.\",\n 'Checking property value', value_prop in v)\n # Step 4\n fail_msg = \"Can't delete node.\"\n self.verify(20, self.node_delete, 4, fail_msg, 'Deleting node',\n self.node)",
"def TestSplit(self,node):\n if self.config.stop==\"CaseInNode\":\n sprime=self.FindBestSplit(node)\n if sprime is None:\n node.set_pred(self.target.loc[node.index].mean())\n return False\n else: \n node.set_info(**sprime)\n return True",
"def test_evaluate(self):\n\t\tpass",
"def fidelity_test(self, *args):\n raise NotImplementedError('Subclasses must override fidelity_test().')",
"def run_test(test, host, force):\n LOG.info(\"\")\n LOG.info(\"{0:-^87s}\".format(\" \" + test['name'] + \" \"))\n if test['protocol'].lower() == 'tcp':\n tcp_test(host, test['port'])\n elif test['protocol'].lower() == 'http':\n http_test(test, host, force)\n elif test['protocol'].lower() == 'https':\n http_test(test, host, force)\n elif test['protocol'].lower() == 'noop':\n noop_test()",
"def test_manually_select_nodes_to_constrain(): # ***Incomplete test\n ##########################\n # Arrange.\n input_tree_one = \"input_tree_one\"\n name_replace_table = \"name_replace_table\"\n\n ##########################\n # Act.\n #x = manually_select_nodes_to_constrain(input_tree_one,\n #\t\tname_replace_table)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_action_network(self):\n raise NotImplementedError",
"def testSingleClass(self, level=1):\n params = svm.svm_parameter(kernel_type = svm.RBF, C = 10)\n node = BinarySVMNode(2,1,params)\n node.train(self.sc_samples, self.sc_labels)\n node.stop_training()\n \n testresult = node(self.sc_samples)\n \n # rescale from SVM output [-1,1] to [0,1]\n testresult = (testresult+1) / 2.\n \n # test if labels are the same as the test output\n assert_array_almost_equal(self.sc_labels, testresult, 2)",
"def testMultiClass(self, level=1):\n params = svm.svm_parameter(kernel_type = svm.RBF, C = 10)\n node = BinarySVMNode(2,4,params)\n node.train(self.mc_samples, self.mc_labels)\n node.stop_training()\n \n testresult = node(self.mc_samples)\n \n # test if labels are the same as the test output\n assert_array_almost_equal(self.mc_labels, testresult, 2)",
"def test_get_decision_tree_using_get(self):\n pass",
"def is_applicable(node):\n\n pass",
"def test_predict(self):\n assert 2 == 2",
"def generate_test_cases(ukernel, init_fn, mr, nr, k_block, is_pipelined, isa):\n _, test_name = ukernel.split(\"_\", 1)\n _, datatype, ukernel_type, _ = ukernel.split(\"_\", 3)\n test_args = [ukernel, init_fn]\n return xngen.preprocess(TEST_TEMPLATE, {\n \"TEST_NAME\": test_name.upper().replace(\"UKERNEL_\", \"\"),\n \"TEST_ARGS\": test_args,\n \"UKERNEL_TYPE\": ukernel_type.upper(),\n \"DATATYPE\": datatype,\n \"MR\": mr,\n \"NR\": nr,\n \"KBLOCK\": k_block,\n \"ADJKBLOCK\": 2 * k_block if is_pipelined else k_block,\n \"IS_PIPELINED\": is_pipelined,\n \"ISA_CHECK\": xnncommon.generate_isa_check_macro(isa),\n \"next_prime\": next_prime,\n })",
"def test(self, test_data):\n self.model.eval()\n if self.is_bert:\n return self._test_bert(test_data)\n else:\n return self._test_linear_layer_crf(test_data)",
"def test_right_feature_and_wrong_story():\n pass",
"def test_decision_tree(trainx, trainy, testx, testy, max_depth=6):\n\tclf = tree.DecisionTreeClassifier(max_depth=max_depth)\n\tdt = clf.fit(trainx, trainy)\n\tprint(\"max_depth = {}\".format(max_depth))\n\tprint(\"train accuracy: {}\".format(dt.score(trainx, trainy)))\n\tprint(\"test accuracy: {}\".format(dt.score(testx, testy)))\n\treturn dt"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create a simple plot of the SIR curve
|
def plot_SIR(S, I, R):
plt.figure()
plt.plot(S, label="S")
plt.plot(I, label="I")
plt.plot(R, label="R")
plt.legend()
plt.show()
|
[
"def signal_plot(t, y, **kwargs):\n\n\n fun = kwargs['vin']\n\n plt.figure(figsize=kwargs['figsize'])\n (plt.plot(t, fun(t), 'r', linewidth = 2, label = 'Input'),\n plt.plot(t, y[1].T, 'b', linewidth = 2, label = \"Out \"),\n plt.plot(t, y[0].T*0.2, 'orange', linewidth = 2, label = 'Change in S (Scaled 1 to 0.2)'),\n plt.xlabel('Time [s]'), plt.ylabel('Out [Adm]'),\n plt.title('Dynamic System Evolution'),\n plt.grid(), plt.legend(), plt.axis([0,np.max(t)*1.10, np.min(y*0.2)*1.1, np.max(y*0.2)*1.1]),\n plt.show())",
"def sn_plot(self):\n import matplotlib.pyplot as plt\n\n # Plot of the basic SN curve according to GL2010\n sigma_1 = self.Rp * (1 - self.R) / self.gamma_M\n # Number of load cycles at upper fatigue limit\n N_1 = self.N_D * (2 * self.sigma_D / sigma_1) ** self.m1\n N_e = 10 ** 9\n sigma_e = (self.N_D / N_e) ** (1 / self.m2) * self.sigma_D\n x = [0, N_1, self.N_D, N_e]\n y = [sigma_1, sigma_1, self.sigma_D, sigma_e]\n plt.loglog(x, y, lw=2, marker=\"*\")\n plt.xlabel(\"Cycle Numbers\")\n plt.ylabel(\"Stress Amplitude/MPa\")\n plt.xlim(10, 10 ** 9)\n plt.yticks([10, 100, 1000])\n plt.annotate(s=\"(%.2e,%.2f)\" % (N_1, sigma_1), xy=(N_1, sigma_1))\n plt.annotate(\n s=\"(%.2e,%.2f)\" % (self.N_D, self.sigma_D), xy=(self.N_D, self.sigma_D)\n )\n plt.annotate(s=\"m1=%.2f\" % self.m1, xy=(10 ** 3, 142))\n plt.annotate(s=\"m2=%.2f\" % self.m2, xy=(10 ** 7, 40))\n plt.show()\n return",
"def plotS4noi(self):\n\n close(1)\n figure(1, figsize=(5,5))\n \n SPT = np.loadtxt('SPT_Nl_irreg.csv', delimiter=',').T\n\n cl = self.d.S4noisecl\n l = np.arange(cl[0].size)\n ll = np.arange(10000)\n\n loglog(SPT[0], SPT[1], '.k', label='SPTpol 500 deg$^2$')\n fun = lambda l, sigmap, lknee, lexp: np.log(4*np.pi / (41253.*60**2) * (1+(l/lknee)**lexp) * sigmap**2)\n #popt, pcov = curve_fit(fun, SPT[0], np.log(SPT[1]))\n #loglog(ll, np.exp(fun(ll,*popt)), 'k', label=r'SPT model (${:0.2f}\\ \\mu K\\ arcmin, \\ell_{{knee}}={:0.1f}, \\expon.={:0.2f}$)'.format(*popt))\n sigmap = 9.4; lknee = 250.0; lexp = -1.8\n loglog(ll, np.exp(fun(ll,sigmap,lknee,lexp)),'k', label=r'SPT best fit ($9.4 \\mu K\\ arcmin$)')\n\n loglog(l,cl[0], label='S4 sim (rlz 1)', color='gray')\n sigmap = 1.2; lknee = 250.0; lexp = -1.8\n loglog(ll, np.exp(fun(ll,sigmap,lknee,lexp)),'k', label=r'S4 best fit ($1.2 \\mu K\\ arcmin$)')\n \n xlim(20,6000)\n ylim(1e-7,1e-3)\n grid('on')\n\n legend(loc='upper right')\n\n xlabel(r'Multipole $\\ell$')\n ylabel(r'$C_{\\ell}^{EE,noise} [\\mu K^2]$')\n \n tight_layout()\n savefig('figs/S4Nl.pdf', bbox_inches='tight', pad_inches=0)",
"def plot_SI(quark_eff, gluon_eff, color = 'blue', label = '',\n xlim = [.1, 1], ylim = [0, 3], show = True):\n\n plt.plot(*SI(quark_eff, gluon_eff), color = color, label = label)\n plt.xticks(np.linspace(0,1,11))\n plt.xlim(*xlim)\n plt.ylim(*ylim)\n plt.xlabel('Quark Signal Efficiency')\n plt.ylabel('Significance Improvement')\n if show:\n plt.show()",
"def u_sines():\n import matplotlib.pyplot as plt\n x = np.linspace(0, 4, 1001)\n psi0 = np.sin(2*np.pi/4*x)\n psi1 = np.sin(2*np.pi*x)\n psi2 = np.sin(2*np.pi*4*x)\n #u = 4*psi0 - 0.5*psi1 - 0*psi2\n u = 4*psi0 - 0.5*psi1\n plt.plot(x, psi0, 'r-', label=r\"$\\psi_0$\")\n plt.plot(x, psi1, 'g-', label=r\"$\\psi_1$\")\n #plt.plot(x, psi2, label=r\"$\\psi_2$\")\n plt.plot(x, u, 'b-', label=r\"$u=4\\psi_0 - \\frac{1}{2}\\psi_1$\")\n plt.legend()\n plt.savefig('u_example_sin.pdf')\n plt.savefig('u_example_sin.png')\n plt.show()",
"def plotSpikes(self):\n self.getCompleteSpikeTimes()\n b=np.ones_like(self.completeSpikeTimes)\n matplotlib.pyplot.plot(b)\n matplotlib.pyplot.eventplot(self.spikeTimes)\n matplotlib.pyplot.xlabel(\"time\") \n matplotlib.pyplot.title(\"single neuron raster plot of Neuron \"+self.name)\n matplotlib.pyplot.show()",
"def plot_averaged_SIRs(SIRs,\r\n\t\t\t\t\t max_t=None,\r\n\t\t\t\t\t lines_to_plot=\"IR\",\r\n\t\t\t\t\t means_to_plot=\"SIR\",\r\n\t\t\t\t\t figname=\"SIRs.png\",\r\n\t\t\t\t\t figtitle=None,\r\n\t\t\t\t\t show_plot=False,\r\n\t\t\t\t\t save_data=False):\r\n\r\n\tcompartments = (\"S\", \"I\", \"R\")\r\n\tcolors = {\"S\": u'#1f77b4', \"I\": u'#ff7f0e', \"R\": u'#2ca02c'}\r\n\r\n\tif max_t is None:\r\n\t\tmax_t = max(len(line) for SIR in SIRs for line in SIR)\r\n\r\n\tlines_shape = (len(SIRs), max_t+1)\r\n\tS_lines = np.zeros(lines_shape) + np.nan\r\n\tI_lines = np.zeros(lines_shape) + np.nan\r\n\tR_lines = np.zeros(lines_shape) + np.nan\r\n\r\n\t# Create multi-array of all SIR curves up to max_t\r\n\tfor i, SIR in enumerate(SIRs):\r\n\t\tS, I, R = SIR\r\n\t\tS, I, R = np.array(S), np.array(I), np.array(R)\r\n\t\tS_lines[i, :S.shape[0]] = S[:max_t+1]\r\n\t\tI_lines[i, :I.shape[0]] = I[:max_t+1]\r\n\t\tR_lines[i, :R.shape[0]] = R[:max_t+1]\r\n\r\n\t# Forward fill final values from simulation\r\n\tS_lines = ffill(S_lines)\r\n\tI_lines = ffill(I_lines)\r\n\tR_lines = ffill(R_lines)\r\n\r\n\t# Pack lines in a dict\r\n\tSIR_lines = {\"S\": S_lines, \"I\": I_lines, \"R\": R_lines}\r\n\r\n\t# Plot the averages of S, I, and R curves\r\n\tfig = plt.figure(figsize=(13, 8))\r\n\tfor comp in compartments:\r\n\t\tif comp in means_to_plot:\r\n\t\t\tplt.plot(SIR_lines[comp].mean(0),\r\n\t\t\t\t\t label=comp, color=colors[comp], linewidth=3)\r\n\r\n\t# Plot all I curves to visualize simulation runs\r\n\tfor comp in compartments:\r\n\t\tif comp in lines_to_plot:\r\n\t\t\tfor comp_line in SIR_lines[comp]:\r\n\t\t\t\tplt.plot(comp_line, color=colors[comp], linewidth=0.5)\r\n\r\n\t# Record peak of I's average and average of I's peaks\r\n\tpeak_I_avg = SIR_lines[\"I\"].mean(0).max()\r\n\tavg_I_peak = SIR_lines[\"I\"].max(1).mean()\r\n\t# Mark max avg. I and avg. max I\r\n\tplt.axhline(y=peak_I_avg, color=colors[\"I\"], linestyle=\":\",\r\n\t\tlabel=f\"peak of I avg. = {peak_I_avg:.1f}\")\r\n\tplt.axhline(y=avg_I_peak, color=\"r\", linestyle=\"--\",\r\n\t\tlabel=f\"Avg. of I peaks = {avg_I_peak:.1f}\")\r\n\r\n\t# Configure plot, show, and save\r\n\tplt.legend()\r\n\tplt.grid(which=\"major\")\r\n\tif figtitle is None:\r\n\t\tplt.title(f\"SIR Curves of {len(SIRs)} Simulations\")\r\n\telse:\r\n\t\tplt.title(figtitle)\r\n\r\n\t# Show plot\r\n\tif show_plot:\r\n\t\tplt.show()\r\n\tif figname is not None:\r\n\t\tfig.savefig(figname)\r\n\r\n\t# Save data\r\n\tif save_data:\r\n\t\t# Choose appropriate name, matching with figname if possible\r\n\t\tif figname is None:\r\n\t\t\tfname = \"SIR_data.pkl\"\r\n\t\telse:\r\n\t\t\tbasename = figname.split(\".\")[0] or \"SIR_data\"\r\n\t\t\tfname = basename + \".pkl\"\r\n\t\t# Pickle data\r\n\t\twith open(fname, \"wb\") as f:\r\n\t\t\tpickle.dump(SIR_lines, f)",
"def plot_sparsity(self, plot_dir=\".\"):\n # this is the call to matplotlib that allows dynamic plotting\n plt.ion()\n # create a variable for the line so we can later update it\n plt.plot([x[0] for x in self._sparse_vec], [100 - x[1] for x in self._sparse_vec], '-o', alpha=0.8)\n # update plot label/title\n plt.ylim([-1, 110])\n plt.xlim([-1, self._training_steps + 1])\n plt.ylabel('NNZ')\n plt.xlabel('Training Step')\n plt.title('Non Zero Values')\n plt.savefig(os.path.join(plot_dir, \"sparsity_graph.png\"))\n plt.show()\n plt.pause(0.05)",
"def show(self, fig=100):\n plt.figure(fig)\n plt.clf()\n ysnew = self.forward_curve(self.xsr)\n plt.plot(self.xsr, ysnew, '-r', linewidth=4, label='spline')\n plt.plot(self.xsr, self.forward(self.xsr), ':k', label='linear fit')\n plt.legend(numpoints=1)",
"def PlotTransmissioncurve(self):\n from matplotlib import pyplot as plt\n t = self.Transmissioncurve()\n fig, ax = plt.subplots()\n ax.set_xlabel(r\"$\\lambda\\quad [{\\rm \\AA}]$\")\n ax.set_ylabel(r\"$T_{\\lambda}$\")\n plt.plot(t.array['Wavelength'], t.array['Transmission'])",
"def show_sensfunc(self):\n if self.sens_dict is None:\n msgs.warn(\"You need to generate the sensfunc first!\")\n return None\n plt.rcdefaults()\n plt.rcParams[\"xtick.top\"] = True\n plt.rcParams[\"ytick.right\"] = True\n plt.rcParams[\"xtick.minor.visible\"] = True\n plt.rcParams[\"ytick.minor.visible\"] = True\n plt.rcParams[\"ytick.direction\"] = 'in'\n plt.rcParams[\"xtick.direction\"] = 'in'\n plt.rcParams[\"xtick.labelsize\"] = 13\n plt.rcParams[\"ytick.labelsize\"] = 13\n plt.rcParams['font.family'] = 'times new roman'\n norder = self.sens_dict['norder']\n for iord in range(norder):\n sens_dict_iord = self.sens_dict[str(iord)]\n plt.plot(sens_dict_iord['wave'], sens_dict_iord['sensfunc'])\n plt.xlabel('Wavelength [ang]', fontsize=14)\n plt.ylabel('Sensfunc', fontsize=14)\n plt.ylim([0., 100.0])\n plt.show()",
"def scree_plot(svals, **kws):\n index = np.arange(1, len(svals) + 1)\n plt.plot(index, svals, marker='.', **kws)\n plt.xlabel(\"Component\")\n # plt.ylabel(\"Singular_value\")\n plt.xlim(0)\n # plt.ylim(0)",
"def show_sin_cos_graph():\n # X-Axis\n # Generate 10'000 numbers between 0 and 50\n # Documentation: https://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.linspace.html\n x = np.linspace(start=0,stop=50,num=10000) # 10'000 linearly spaced numbers\n\n # Y-Axis\n # y stores the results of the sinus function when adding the x-values\n # Documentation: https://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.sin.html\n y_sin = np.sin(x)\n\n # y_cos stores the results of the consinus function when adding the x-values\n y_cos = np.cos(x)\n\n # Create the figure\n fig = plt.figure()\n\n # Set a title\n fig.suptitle('Sinus and Cosinus curve')\n\n # Describe the axis\n plt.xlabel('Time [ms]')\n plt.ylabel('Voltage [V]')\n\n # Draw the functions\n plt.plot(x,y_sin, label=\"Sinus\") # Sinus curve\n plt.plot(x,y_cos, label=\"Cosinus\") # Cosinus curve\n\n # Show the legend\n legend = plt.legend(loc=1, shadow=True)\n\n # Show the image\n plt.show()",
"def plot(self):\n\n plt.plot(self.su[:, 0], self.su[:, 1], 'b', label=\"cubic spline\") # spline\n plt.plot(self.control_points[:, 0], self.control_points[:, 1], '-.r', label=\"control polygon\") # control polygon\n plt.scatter(self.control_points[:, 0], self.control_points[:, 1], color='red') # de Boor points\n\n plt.title(\"Cubic Spline\")\n plt.legend()\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.grid()\n plt.show()",
"def plot(self):\n import matplotlib.pyplot as plt\n plt.plot(self.lambdas, self.result['beta'] )\n plt.ylabel('Coefficient')\n plt.xlabel('Regularization Parameter')\n plt.suptitle('Regularization Path')\n plt.show()",
"def _plot_curve(x, y, title, x_lab, y_lab, save_path=False, show=False):\n plt.title(title)\n plt.plot(x, y, 'k')\n plt.plot([(0, 0), (1, 1)], 'r--')\n plt.xlim([-0.1, 1.1])\n plt.ylim([-0.1, 1.1])\n plt.ylabel(x_lab)\n plt.xlabel(y_lab)\n if save_path is not False:\n plt.savefig(save_path)\n if show:\n plt.show()",
"def plot_res(n):\n x = np.array([i for i in range(n)])\n y = gen_array_2(n)\n plt.plot(x, y, 'o')\n plt.show()",
"def plot_curve(self):\r\n plt.plot(self.k_range, self.k_error)\r\n plt.title(\"Error under different choice of K\")\r\n plt.xlabel(\"Value of K for KNN\")\r\n plt.ylabel(\"Error\")\r\n plt.show()",
"def splot(y, y0, yd, title=\"Denoising\"):\n fig = plt.figure(figsize=(20, 12))\n _y0 = y0[:2000]\n _y = y[:2000]\n _yd = yd[:2000]\n plt.subplot(221)\n plt.plot(_y0)\n plt.title('Raw signal :')\n plt.subplot(222)\n plt.plot(_y)\n plt.title('Noised signal')\n# plt.plot(utils.gaussian_filter(y, mu))\n# plt.title('Result for the gaussian filter - SNR :' + str(utils.snr(y0, utils.gaussian_filter(y, mu))))\n plt.subplot(223)\n plt.plot(_yd, \"r\")\n plt.plot(_y0, linewidth=2.5, alpha=0.3)\n plt.title('Denoised signal - SNR : %0.2f dB' % utils.snr(y0, yd))\n plt.subplot(224)\n plt.plot(_y0 - _yd)\n plt.title('Differences between raw and denoised signal :')\n fig.suptitle(title, fontsize=30, fontweight=\"bold\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Plot SIR curves and their average. and show each infection curve on the plot too.
|
def plot_averaged_SIRs(SIRs,
max_t=None,
lines_to_plot="IR",
means_to_plot="SIR",
figname="SIRs.png",
figtitle=None,
show_plot=False,
save_data=False):
compartments = ("S", "I", "R")
colors = {"S": u'#1f77b4', "I": u'#ff7f0e', "R": u'#2ca02c'}
if max_t is None:
max_t = max(len(line) for SIR in SIRs for line in SIR)
lines_shape = (len(SIRs), max_t+1)
S_lines = np.zeros(lines_shape) + np.nan
I_lines = np.zeros(lines_shape) + np.nan
R_lines = np.zeros(lines_shape) + np.nan
# Create multi-array of all SIR curves up to max_t
for i, SIR in enumerate(SIRs):
S, I, R = SIR
S, I, R = np.array(S), np.array(I), np.array(R)
S_lines[i, :S.shape[0]] = S[:max_t+1]
I_lines[i, :I.shape[0]] = I[:max_t+1]
R_lines[i, :R.shape[0]] = R[:max_t+1]
# Forward fill final values from simulation
S_lines = ffill(S_lines)
I_lines = ffill(I_lines)
R_lines = ffill(R_lines)
# Pack lines in a dict
SIR_lines = {"S": S_lines, "I": I_lines, "R": R_lines}
# Plot the averages of S, I, and R curves
fig = plt.figure(figsize=(13, 8))
for comp in compartments:
if comp in means_to_plot:
plt.plot(SIR_lines[comp].mean(0),
label=comp, color=colors[comp], linewidth=3)
# Plot all I curves to visualize simulation runs
for comp in compartments:
if comp in lines_to_plot:
for comp_line in SIR_lines[comp]:
plt.plot(comp_line, color=colors[comp], linewidth=0.5)
# Record peak of I's average and average of I's peaks
peak_I_avg = SIR_lines["I"].mean(0).max()
avg_I_peak = SIR_lines["I"].max(1).mean()
# Mark max avg. I and avg. max I
plt.axhline(y=peak_I_avg, color=colors["I"], linestyle=":",
label=f"peak of I avg. = {peak_I_avg:.1f}")
plt.axhline(y=avg_I_peak, color="r", linestyle="--",
label=f"Avg. of I peaks = {avg_I_peak:.1f}")
# Configure plot, show, and save
plt.legend()
plt.grid(which="major")
if figtitle is None:
plt.title(f"SIR Curves of {len(SIRs)} Simulations")
else:
plt.title(figtitle)
# Show plot
if show_plot:
plt.show()
if figname is not None:
fig.savefig(figname)
# Save data
if save_data:
# Choose appropriate name, matching with figname if possible
if figname is None:
fname = "SIR_data.pkl"
else:
basename = figname.split(".")[0] or "SIR_data"
fname = basename + ".pkl"
# Pickle data
with open(fname, "wb") as f:
pickle.dump(SIR_lines, f)
|
[
"def plot_SIR(S, I, R):\r\n\tplt.figure()\r\n\tplt.plot(S, label=\"S\")\r\n\tplt.plot(I, label=\"I\")\r\n\tplt.plot(R, label=\"R\")\r\n\tplt.legend()\r\n\tplt.show()",
"def Emergent_IntensityPlot():\n\tI_comp , mean = Emergent_Intensity()\n\n\tplt.title(\"Observed and computed continuum intensity\")\n\tplt.plot(wav, I_comp*1e-14, color = \"royalblue\", label = \"FALC\")\n\tplt.plot(wav,I, color = \"crimson\", label = \"Observed\")\n\tplt.legend()\n\tplt.grid(linestyle = \"--\")\n\tplt.xlabel(r\"Wavelength $\\lambda$ [$\\mu$ m]\")\n\tplt.ylabel(r\"Intensity [$10^{14}$ erg s$^{-1}$ cm$^{-2}$ ster$^{-1}$ $\\mu$m$^{-1}$]\")\n\tplt.legend()\n\tplt.subplots_adjust(bottom = 0.12)\n\t# plt.savefig(savepath3 + \"Observed_computed.pdf\")\n\tplt.show()\n\n\t# Printing info regarding the comparison\n\ti_lambda = np.argwhere(wav == 0.5)[0][0]\n\tprint (\"FALC: \", I_comp[i_lambda]*1e-14)\n\tprint (\"OBSERVED: \", I[i_lambda])\n\tprint (\"DEVIATION: \", 100*(I_comp[i_lambda]*1e-14-I[i_lambda])/(I[i_lambda]))\n\n\t# Pinting info for specific wavelengths\n\twl_list = [0.5, 1, 1.6, 5]\n\tfor wl in wl_list:\n\t\tprint(I_comp[(np.abs(wl-wav)).argmin()]*1e-14)",
"def plotSpikes(self):\n self.getCompleteSpikeTimes()\n b=np.ones_like(self.completeSpikeTimes)\n matplotlib.pyplot.plot(b)\n matplotlib.pyplot.eventplot(self.spikeTimes)\n matplotlib.pyplot.xlabel(\"time\") \n matplotlib.pyplot.title(\"single neuron raster plot of Neuron \"+self.name)\n matplotlib.pyplot.show()",
"def show(self, fig=100):\n plt.figure(fig)\n plt.clf()\n ysnew = self.forward_curve(self.xsr)\n plt.plot(self.xsr, ysnew, '-r', linewidth=4, label='spline')\n plt.plot(self.xsr, self.forward(self.xsr), ':k', label='linear fit')\n plt.legend(numpoints=1)",
"def signal_plot(t, y, **kwargs):\n\n\n fun = kwargs['vin']\n\n plt.figure(figsize=kwargs['figsize'])\n (plt.plot(t, fun(t), 'r', linewidth = 2, label = 'Input'),\n plt.plot(t, y[1].T, 'b', linewidth = 2, label = \"Out \"),\n plt.plot(t, y[0].T*0.2, 'orange', linewidth = 2, label = 'Change in S (Scaled 1 to 0.2)'),\n plt.xlabel('Time [s]'), plt.ylabel('Out [Adm]'),\n plt.title('Dynamic System Evolution'),\n plt.grid(), plt.legend(), plt.axis([0,np.max(t)*1.10, np.min(y*0.2)*1.1, np.max(y*0.2)*1.1]),\n plt.show())",
"def innovation_plot(self):\n\n mean_inn_Wsb = np.mean(self.est.inn_Wsb, axis=1)\n mean_inn_Tsb = np.mean(self.est.inn_Tsb, axis=1)\n mean_inn_Vsb = np.mean(self.est.inn_Vsb, axis=1)\n\n var_inn_Wsb = np.var(self.est.inn_Wsb, axis=1)\n var_inn_Tsb = np.var(self.est.inn_Tsb, axis=1)\n var_inn_Vsb = np.var(self.est.inn_Vsb, axis=1)\n\n plot_titles_Wsb = [\n \"mean/var: {0:10.3g}, {1:10.3g}\".format(mean_inn_Wsb[i], var_inn_Wsb[i])\n for i in range(3)\n ]\n plot_titles_Tsb = [\n \"mean/var: {0:10.3g}, {1:10.3g}\".format(mean_inn_Tsb[i], var_inn_Tsb[i])\n for i in range(3)\n ]\n plot_titles_Vsb = [\n \"mean/var: {0:10.3g}, {1:10.3g}\".format(mean_inn_Vsb[i], var_inn_Vsb[i])\n for i in range(3)\n ]\n time_three_plots(self.time_axis, self.est.inn_Tsb, r\"$T_{sb}$ innovation\",\n titles=plot_titles_Tsb)\n time_three_plots(self.time_axis, self.est.inn_Wsb, r\"$W_{sb}$ innovation\",\n titles=plot_titles_Wsb)\n time_three_plots(self.time_axis, self.est.inn_Vsb, r\"$V_{wb}$ innovation\",\n titles=plot_titles_Vsb)",
"def show_sensfunc(self):\n if self.sens_dict is None:\n msgs.warn(\"You need to generate the sensfunc first!\")\n return None\n plt.rcdefaults()\n plt.rcParams[\"xtick.top\"] = True\n plt.rcParams[\"ytick.right\"] = True\n plt.rcParams[\"xtick.minor.visible\"] = True\n plt.rcParams[\"ytick.minor.visible\"] = True\n plt.rcParams[\"ytick.direction\"] = 'in'\n plt.rcParams[\"xtick.direction\"] = 'in'\n plt.rcParams[\"xtick.labelsize\"] = 13\n plt.rcParams[\"ytick.labelsize\"] = 13\n plt.rcParams['font.family'] = 'times new roman'\n norder = self.sens_dict['norder']\n for iord in range(norder):\n sens_dict_iord = self.sens_dict[str(iord)]\n plt.plot(sens_dict_iord['wave'], sens_dict_iord['sensfunc'])\n plt.xlabel('Wavelength [ang]', fontsize=14)\n plt.ylabel('Sensfunc', fontsize=14)\n plt.ylim([0., 100.0])\n plt.show()",
"def sn_plot(self):\n import matplotlib.pyplot as plt\n\n # Plot of the basic SN curve according to GL2010\n sigma_1 = self.Rp * (1 - self.R) / self.gamma_M\n # Number of load cycles at upper fatigue limit\n N_1 = self.N_D * (2 * self.sigma_D / sigma_1) ** self.m1\n N_e = 10 ** 9\n sigma_e = (self.N_D / N_e) ** (1 / self.m2) * self.sigma_D\n x = [0, N_1, self.N_D, N_e]\n y = [sigma_1, sigma_1, self.sigma_D, sigma_e]\n plt.loglog(x, y, lw=2, marker=\"*\")\n plt.xlabel(\"Cycle Numbers\")\n plt.ylabel(\"Stress Amplitude/MPa\")\n plt.xlim(10, 10 ** 9)\n plt.yticks([10, 100, 1000])\n plt.annotate(s=\"(%.2e,%.2f)\" % (N_1, sigma_1), xy=(N_1, sigma_1))\n plt.annotate(\n s=\"(%.2e,%.2f)\" % (self.N_D, self.sigma_D), xy=(self.N_D, self.sigma_D)\n )\n plt.annotate(s=\"m1=%.2f\" % self.m1, xy=(10 ** 3, 142))\n plt.annotate(s=\"m2=%.2f\" % self.m2, xy=(10 ** 7, 40))\n plt.show()\n return",
"def plotS4noi(self):\n\n close(1)\n figure(1, figsize=(5,5))\n \n SPT = np.loadtxt('SPT_Nl_irreg.csv', delimiter=',').T\n\n cl = self.d.S4noisecl\n l = np.arange(cl[0].size)\n ll = np.arange(10000)\n\n loglog(SPT[0], SPT[1], '.k', label='SPTpol 500 deg$^2$')\n fun = lambda l, sigmap, lknee, lexp: np.log(4*np.pi / (41253.*60**2) * (1+(l/lknee)**lexp) * sigmap**2)\n #popt, pcov = curve_fit(fun, SPT[0], np.log(SPT[1]))\n #loglog(ll, np.exp(fun(ll,*popt)), 'k', label=r'SPT model (${:0.2f}\\ \\mu K\\ arcmin, \\ell_{{knee}}={:0.1f}, \\expon.={:0.2f}$)'.format(*popt))\n sigmap = 9.4; lknee = 250.0; lexp = -1.8\n loglog(ll, np.exp(fun(ll,sigmap,lknee,lexp)),'k', label=r'SPT best fit ($9.4 \\mu K\\ arcmin$)')\n\n loglog(l,cl[0], label='S4 sim (rlz 1)', color='gray')\n sigmap = 1.2; lknee = 250.0; lexp = -1.8\n loglog(ll, np.exp(fun(ll,sigmap,lknee,lexp)),'k', label=r'S4 best fit ($1.2 \\mu K\\ arcmin$)')\n \n xlim(20,6000)\n ylim(1e-7,1e-3)\n grid('on')\n\n legend(loc='upper right')\n\n xlabel(r'Multipole $\\ell$')\n ylabel(r'$C_{\\ell}^{EE,noise} [\\mu K^2]$')\n \n tight_layout()\n savefig('figs/S4Nl.pdf', bbox_inches='tight', pad_inches=0)",
"def u_sines():\n import matplotlib.pyplot as plt\n x = np.linspace(0, 4, 1001)\n psi0 = np.sin(2*np.pi/4*x)\n psi1 = np.sin(2*np.pi*x)\n psi2 = np.sin(2*np.pi*4*x)\n #u = 4*psi0 - 0.5*psi1 - 0*psi2\n u = 4*psi0 - 0.5*psi1\n plt.plot(x, psi0, 'r-', label=r\"$\\psi_0$\")\n plt.plot(x, psi1, 'g-', label=r\"$\\psi_1$\")\n #plt.plot(x, psi2, label=r\"$\\psi_2$\")\n plt.plot(x, u, 'b-', label=r\"$u=4\\psi_0 - \\frac{1}{2}\\psi_1$\")\n plt.legend()\n plt.savefig('u_example_sin.pdf')\n plt.savefig('u_example_sin.png')\n plt.show()",
"def plot_interpolated(self, inter_control, x, y):",
"def error_plot():\n\n global b,V,I,w,dt,f,t\n n=100\n b = 2.2\n V = 2\n I = 1\n w = 2.*np.pi\n dt_array = np.linspace(0.0005,0.3,n) # store dt values\n eps_array = np.zeros(n) #store deviation\n num_periods = 5\n P = 2.*np.pi/w # one period\n T = P*num_periods\n\n f = ode_source_term(f_numerical(b, V, I, t)) \n f_ = sym.lambdify(t,f)\n\n for i in range(0,n):\n u_num, t_num = solver(I=I, w=w, dt=dt_array[i], T=T, V=V, f=f_)\n\n u_analytic = f_numerical(b, V, I, t_num)\n eps_array[i] = np.abs(u_num - u_analytic(t_num)).max()\n\n plt.plot(dt_array,eps_array)\n plt.xlabel('dt')\n plt.ylabel('deviation')\n plt.title('deviation between numerical and analytical')\n umin = 1.2*eps_array.min(); umax = 1.2*eps_array.max()\n plt.axis([dt_array[0], dt_array[-1], umin, umax])\n plt.show()",
"def updateIntensity(self):\n\n self.MinusDiodeCurve.setData(self.MinusDiode_Average)\n self.PlusDiodeCurve.setData(self.PlusDiode_Average)\n self.AverageDiodeCurve.setData((self.MinusDiode_Average +\n self.PlusDiode_Average)/2)",
"def plot_SI(quark_eff, gluon_eff, color = 'blue', label = '',\n xlim = [.1, 1], ylim = [0, 3], show = True):\n\n plt.plot(*SI(quark_eff, gluon_eff), color = color, label = label)\n plt.xticks(np.linspace(0,1,11))\n plt.xlim(*xlim)\n plt.ylim(*ylim)\n plt.xlabel('Quark Signal Efficiency')\n plt.ylabel('Significance Improvement')\n if show:\n plt.show()",
"def plot_sincs(wave):\n t0 = wave.ts[0]\n for i in range(0, len(wave), factor):\n sinc = make_sinc(t0, i, wave.ys[i])\n seg = sinc.segment(start, duration)\n seg.plot(color='green', linewidth=0.5, alpha=0.3)\n if i == 0:\n total = sinc\n else:\n total += sinc\n \n seg = total.segment(start, duration) \n seg.plot(color='blue', alpha=0.5)",
"def old_run_plots(self,params):\n lw = 2\n \n \n # Plot voltage at soma and dendrites (apical proximal and distal)\n pylab.figure(1)\n pylab.plot(h.tvec,h.vsoma,lw=lw,c='k',label='v_soma')\n #pylab.plot(h.tvec,h.vdend,lw=lw,c='r',label='v_dend')\n #pylab.plot(h.tvec,h.vdend2,lw=lw,c='b',label='v_dend2')\n pylab.xlim(h.tstart-20,h.tstop+20)\n pylab.ylim(-120,40)\n # If optogenetics were included, draw blocks for times that illumination occurred in appropriate colours \n if params.has_key('opdict'):\n for (opsin,opexpressions) in params['opdict'].iteritems():\n for opexp in opexpressions:\n if opexp[0] is None or opexp[0].lower() == 'none':\n continue\n for pulsenum in range(opexp[1][6]): \n pulse_start = opexp[1][2]+pulsenum*(opexp[1][3]+opexp[1][4])\n self.plot_optogenetic(opsin,pulse_start,pulse_start+opexp[1][3],yoffset=40)\n # once we've plotted an activation for one area, that should be sufficient i.e. we don't need to plot apical *and* soma, only the first \n # TODO: think how to extend this to allow for different areas to be indicated i.e. ChR in soma vs ChR in apical dendritic arbor\n break\n pylab.title('V')\n ax = pylab.gca()\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n spine.set_position(('outward',5))\n ax.tick_params(direction='out')\n elif loc in ['right','top']:\n spine.set_color('none') \n pylab.legend()\n pylab.xlabel('time (ms)')\n pylab.ylabel('V (mV)')\n \n \"\"\"\n # Plot currents at soma and i_syn\n pylab.figure(2)\n pylab.plot(h.tvec,h.isyn,lw=lw,c='g',label='i_syn')\n pylab.plot(h.tvec,h.isoma,lw=lw,c='k',label='i_soma')\n if params.has_key('opdict'):\n for (opsin,opexpressions) in params['opdict'].iteritems():\n for opexp in opexpressions:\n if opexp[0] is None or opexp[0].lower() == 'none':\n continue\n h('objref list_i_opsin')\n h('list_i_opsin = new List()')\n h('list_i_opsin.append(i_%s)'%opsin)\n pylab.plot(h.tvec,h.list_i_opsin.object(0),color=opsin_dict[opsin]['color'],label='i_%s'%opsin)\n break\n pylab.xlim(h.tstart-20,h.tstop+20)\n #pylab.ylim(-3,6)\n pylab.title('I')\n ax = pylab.gca()\n for loc, spine in ax.spines.iteritems():\n if loc in ['left','bottom']:\n spine.set_position(('outward',5))\n ax.tick_params(direction='out')\n elif loc in ['right','top']:\n spine.set_color('none') \n pylab.legend()\n pylab.xlabel('time (ms)')\n pylab.ylabel('I (nA)')\n \"\"\"\n \n if params['expname'] is not None:\n savename = params['expname']\n pylab.figure(1)\n pylab.savefig(savename+'_voltage.png')\n #pylab.figure(2)\n #pylab.savefig(savename+'_current.png')\n print \"Saved figures under %s*.png\"%savename\n pylab.close('all')\n else:\n pylab.show()",
"def function_graph(self):\n x = value_range()\n y = self.a * x ** 2 + self.b * x + self.c\n plt.plot(x, y)\n plt.show()",
"def plot(self, n=2**5, show=True, out=None):\n if self.leveltype == 'fixed-multi':\n raise ParameterError('Cannot plot fixed-multilevel Asian option.')\n tvw0 = hstack((0,self.measure.time_vector)) # time vector including 0\n x = self.distribution.gen_samples(n)\n y = self.f(x)\n sw0 = hstack((self.start_price*ones((n,1)),self.s_fine)) # x including 0 and time 0\n from matplotlib import pyplot\n pyplot.rc('font', size=16)\n pyplot.rc('legend', fontsize=16)\n pyplot.rc('figure', titlesize=16)\n pyplot.rc('axes', titlesize=16, labelsize=16)\n pyplot.rc('xtick', labelsize=16)\n pyplot.rc('ytick', labelsize=16)\n fig,ax = pyplot.subplots()\n for i in range(n):\n ax.plot(tvw0,sw0[i])\n ax.axhline(y=self.strike_price, color='k', linestyle='--', label='Strike Price')\n ax.set_xlim([0,1])\n ax.set_xticks([0,1])\n ax.set_xlabel('Time')\n ax.set_ylabel('Option Price')\n ax.legend(loc='upper left')\n s = '$2^{%d}$'%log2(n) if log2(n)%1==0 else '%d'%n \n ax.set_title(s+' Asset Price Paths')\n fig.tight_layout()\n if out: pyplot.savefig(out,dpi=250)\n if show: pyplot.show()\n return fig,ax",
"def plot_average_ecc_func_time(time_values_to_use,ecc_values_to_use,num_body_func_time,second_time=None,ce_to_plot=None):\n\n avg_ecc = []\n max_ecc = []\n for i in range(len(ecc_values_to_use)):\n max_ecc.append(max(ecc_values_to_use[i]) )\n total = np.sum(ecc_values_to_use[i])\n avg_ecc.append(total/float(len(ecc_values_to_use[i])) )\n\n\n fig = pp.figure()\n\n ax = fig.add_subplot(111)\n ax2 = ax.twinx()\n\n line1 = ax.plot(time_values_to_use,avg_ecc,label='<e>',lw=0.5,color='red')\n line2 = ax.plot(time_values_to_use,max_ecc,label='max e',lw=0.5,color='cyan')\n\n if second_time == None:\n second_time = time_values_to_use\n line3 = ax2.step(second_time,num_body_func_time,where='post',lw=2.2,label='numbodies',color='blue')\n ax.set_xscale('log')\n ax.set_xlabel('Time (years)')\n ax.set_ylabel('ecc. of final planets')\n ax2.set_ylabel('Num bodies outside Roche')\n\n lines_for_legend = line1 + line2 + line3\n fig.legend(lines_for_legend, [l.get_label() for l in lines_for_legend], loc='best')\n\n if ce_to_plot != None:\n ax.scatter( ce_to_plot, [0.002]*len(ce_to_plot), marker=markers.CARETUP,color='red')\n\n #ax.plot((1.5e4,1.5e4),(0,0.05))\n ax.set_ylim(bottom=0)\n return fig"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Repeats an outbreak simulation given its config.
|
def repeat_simulation(sim_config={},
num_sim=100,
parallel=None):
# Define simulation task based on given config.
# This will return a function `sim(sim_id)` that takes an identifier.
sim = functools.partial(outbreak_simulation, **sim_config)
sim_ids = list(range(num_sim))
# Run simulations in parallel
if parallel is None or num_sim < 5:
results = [sim(sim_id) for sim_id in sim_ids]
else:
processes = parallel if parallel > 0 else None
with Pool(processes=processes) as pool:
results = pool.map(sim, sim_ids)
SIRs, I0s, I0_rounds, I0_infections, infections = zip(*results)
#inspect_results(sim_config["G"], results, x_axis="max_I")
return SIRs
|
[
"def cycle(self, outlet, expectation=True):\n outlet_state = self.state\n result = ReturnCode(True)\n outlet = str(outlet)\n logger.info(\"cycle %s\" % outlet)\n self.send(\"1\")\n self.expect(\"Control Sub Menu\")\n self.send(\"1\")\n self.expect(\"Outlet State Sub Menu\")\n if int(outlet) in range(1, 9):\n self.send(\"1\")\n elif int(outlet) in range(9, 17):\n self.send(\"2\")\n elif int(outlet) in range(17, 25):\n self.send(\"3\")\n self.expect(\"Outlet Control Sub Menu\")\n self.send(outlet)\n self.expect(\"%s Command Choices\" % outlet_state[outlet]['name'])\n self.send(\"3\")\n self.expect(\"%s Requested Command is Reboot\" % outlet_state[outlet]['name'])\n self.send(\"\\r\")\n self.expect(\"Outlet State Sub Menu\")\n if not expectation:\n return result\n elif re.search(\"error|fail\", self.before, re.MULTILINE | re.I):\n raise ApplianceError(\"pdu cycle failed\")\n # get back to main menu\n for i in range(0, 2):\n self.send('\\x1b')\n self.expect(\"Select Item Number\")",
"def run(config_file, report_n, verbose, outpath=\"\"):\n s = Simulation(config_file, report_n, verbose, outpath)\n s.execute()\n s.finalise()",
"def continue_sim(self):\n self._tell_sim('put', 'continue')",
"def loop():\n subseq = api.seqstart(\"turn:\" + str(loop.counter), above=loop.mainseq)\n\n loop.world.turn()\n graph.update(end if test() else loop)\n\n api.seqend(subseq)",
"def main(args):\n config_dict = read_configfile(args.configfile, args.simulation,\n args.verbose)\n for i, s in enumerate(config_dict['simulation']):\n simulation_config_dict = config_dict['simulation'][s]\n user_config_dict = config_dict['user_input']\n catalog_name = os.path.join(\n user_config_dict['data_dir'],\n simulation_config_dict['catalog'])\n # Set parameter values in param\n param = get_config_class(simulation_config_dict,\n catalog_name, args.verbose)\n # Set seed\n np.random.seed(int(param.seed))\n # Generate images of blends in all the observing bands\n draw_blend_generator = make_draw_generator(\n param, user_config_dict, simulation_config_dict)\n # Create generator for measurement algorithm outputs\n measure_generator = make_measure_generator(param, user_config_dict,\n draw_blend_generator)\n # get metrics class that can generate metrics\n metrics_class = get_metrics_class(user_config_dict,\n param.verbose)\n test_size = int(simulation_config_dict['test_size'])\n metrics_param = metrics_class(measure_generator, param)\n ouput_path = get_ouput_path(user_config_dict, param.verbose)\n output_name = os.path.join(ouput_path, s + '_metrics_results.dill')\n results = btk.compute_metrics.run(metrics_param, test_size=test_size)\n with open(output_name, 'wb') as handle:\n dill.dump(results, handle)\n print(\"BTK outputs saved at \", output_name)\n save_config_file(param, user_config_dict, simulation_config_dict,\n s, ouput_path)",
"def at_repeat(self):\r\n pass",
"def MindtPy_iteration_loop(self, config):\n while self.mip_iter < config.iteration_limit:\n # solve MILP main problem\n with time_code(self.timing, 'main'):\n main_mip, main_mip_results = self.solve_main(config)\n if self.handle_main_mip_termination(main_mip, main_mip_results):\n break\n # Call the MILP post-solve callback\n with time_code(self.timing, 'Call after main solve'):\n config.call_after_main_solve(main_mip)\n\n # Regularization is activated after the first feasible solution is found.\n if config.add_regularization is not None:\n self.add_regularization(main_mip)\n\n if self.algorithm_should_terminate(config, check_cycling=True):\n self.last_iter_cuts = False\n break\n\n if not config.single_tree: # if we don't use lazy callback, i.e. LP_NLP\n # Solve NLP subproblem\n # The constraint linearization happens in the handlers\n if not config.solution_pool:\n fixed_nlp, fixed_nlp_result = self.solve_subproblem(config)\n self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, config)\n\n # Call the NLP post-solve callback\n with time_code(self.timing, 'Call after subproblem solve'):\n config.call_after_subproblem_solve(fixed_nlp)\n\n if self.algorithm_should_terminate(config, check_cycling=False):\n self.last_iter_cuts = True\n break\n else:\n solution_name_obj = self.get_solution_name_obj(main_mip_results)\n for index, (name, _) in enumerate(solution_name_obj):\n # the optimal solution of the main problem has been added to integer_list above\n # so we should skip checking cycling for the first solution in the solution pool\n if index > 0:\n copy_var_list_values_from_solution_pool(\n self.mip.MindtPy_utils.variable_list,\n self.fixed_nlp.MindtPy_utils.variable_list,\n config,\n solver_model=main_mip_results._solver_model,\n var_map=main_mip_results._pyomo_var_to_solver_var_map,\n solution_name=name,\n )\n self.curr_int_sol = get_integer_solution(self.fixed_nlp)\n if self.curr_int_sol in set(self.integer_list):\n config.logger.info(\n 'The same combination has been explored and will be skipped here.'\n )\n continue\n else:\n self.integer_list.append(self.curr_int_sol)\n fixed_nlp, fixed_nlp_result = self.solve_subproblem(config)\n self.handle_nlp_subproblem_tc(\n fixed_nlp, fixed_nlp_result, config\n )\n\n # Call the NLP post-solve callback\n with time_code(self.timing, 'Call after subproblem solve'):\n config.call_after_subproblem_solve(fixed_nlp)\n\n if self.algorithm_should_terminate(config, check_cycling=False):\n self.last_iter_cuts = True\n break # TODO: break two loops.\n\n # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable.\n # we correct it after the iteration.\n if (\n (config.add_no_good_cuts or config.use_tabu_list)\n and not self.should_terminate\n and config.add_regularization is None\n ):\n self.fix_dual_bound(config, self.last_iter_cuts)\n config.logger.info(\n ' ==============================================================================================='\n )",
"def simulate( self, finishTime ):\n ...",
"def run_simulations():\r\n plot_type = LOGLOG # we choose the logarithmic plot\r\n days = 70\r\n inc_0 = greedy_boss(days, 0, plot_type)\r\n inc_500 = greedy_boss(days, 500, plot_type)\r\n inc_1000 = greedy_boss(days, 1000, plot_type)\r\n inc_2000 = greedy_boss(days, 2000, plot_type)\r\n simpleplot.plot_lines(\"Greedy boss\", 600, 600, \"days\", \"total earnings\", [inc_0, inc_500, inc_1000, inc_2000], False, [\"Bribe increment = 0\", \"Bribe increment = 500\", \"Bribe increment = 1000\", \"Bribe increment = 2000\"])\r\n simpleplot._block()",
"def test_MultiRun2():\n\n # Keep testing configurations for this many minutes.\n max_minutes = 10\n\n # Start a timer.\n timer = Timer()\n\n # For the desired number of minutes we select random configurations to test.\n while timer.less_than(minutes=max_minutes):\n # Select an optimizer at random.\n optimizer = random.choice([PSO, MOL, DE, LUS, PS])\n\n # Search-space dimensionality.\n dim = np.random.randint(1, 1000)\n\n # Display intervals.\n display_interval = np.random.randint(0, 250)\n\n # Max fitness evaluations.\n max_evaluations = np.random.randint(1, 2000)\n\n # Number of optimization runs.\n num_runs = np.random.randint(1, 10)\n\n # Fitness-trace-length.\n trace_len = np.random.randint(0, 1000)\n\n # Take a benchmark problem at random.\n problem_class = random.choice(Problem.all_benchmark_problems)\n problem = problem_class(dim=dim)\n\n # Either parallel or not.\n parallel = random.choice([True, False])\n\n # Run the test using this configuration.\n yield _do_test_MultiRun, optimizer, problem, dim, max_evaluations, display_interval, trace_len, parallel, num_runs",
"def at_repeat(self):\r\n self.obj.blink()",
"def do_next_cycle(game):\n # game.simulation.now += 1\n # game.runner._update_patient(game.simulation.now)\n # game.runner._update_network(game.simulation.now)\n # game.runner._update_agents(game.simulation.now)\n # game.runner._exogenous_event(game.simulation.now)\n\n game.runner._make_decision(game.simulation.now)\n remove_human_controlled_agents_decisions(game)\n game.parse_decisions()\n game.runner._apply_decision(game.simulation.now)\n\n # try:\n # draw_figures(game)\n # except Exception as e:\n # print e\n\n game.simulation.now += 1\n game.runner._update_patient(game.simulation.now)\n game.runner._update_network(game.simulation.now)\n game.runner._update_agents(game.simulation.now)\n game.runner._exogenous_event(game.simulation.now)",
"def end():\n graph.end()\n\n api.info(\"------------------- End of simulation -------------------\")\n api.seqend(loop.mainseq)\n\n api.newline()\n api.info(\"Seed: \" + str(api.settings('randomSeed')))\n api.info(\"(you will need this seed to replay the exact same game.)\")",
"def go_to(self, args):\n return Simulations(args).simulation()",
"def output(self, config_list):\r\n # new\r\n all_result = []\r\n for i in range(len(config_list) - 1):\r\n result_list = self.collision_free_steps_generator(config_list[i], config_list[i + 1])\r\n all_result.extend(result_list)\r\n print(i, \":\", len(all_result))\r\n for j in range(len(all_result)):\r\n print(all_result[j], \"\\n\")\r\n return all_result",
"def different_ee_goal_generator(self, based_config, point):\r\n # test: initial_config = self.generate_random_RobotConfig()\r\n # point = (0.6516529983330152, 0.25761455641847525)\r\n # step1: generate goal config\r\n is_valid_sample = False\r\n while not is_valid_sample:\r\n sample = self.generate_random_RobotConfig(based_config)\r\n # the config's ee_point\r\n ee_point = self.get_config_non_grappled_ee_point(sample)\r\n # the config's ee_point connected arm\r\n ee_arm_length = self.get_config_non_grappled_ee_arm_length(sample)\r\n if ee_point[0] >= point[0] - ee_arm_length and ee_point[0] <= point[0] + ee_arm_length and ee_point[1] >= point[1] - ee_arm_length and ee_point[1] <= point[1] + ee_arm_length:\r\n is_valid_sample = True\r\n\r\n # print(\"length:\",sample.lengths)\r\n point0 = sample.points[-4]\r\n # print(\"point0:\",point0)\r\n point1 = sample.points[-3] # the inverse 3rd point\r\n # print(\"point1:\",point1)\r\n # point2 = self.goal.points[-1] # the inverse 1st point\r\n point2 = point\r\n # print(\"point2:\",point2)\r\n r1 = sample.lengths[-2] # the inverse 3rd length\r\n r2 = sample.lengths[-1] # the inverse 1st length\r\n print(\"r1:\",r1)\r\n print(\"r2:\",r2)\r\n\r\n if r1 + r2 > self.points_distance(point1,point2):\r\n print(\"ok\")\r\n c1, c2 = self.circle_insec(point1,r1,point2,r2)\r\n # generate config for c1\r\n v1 = np.array(point1) - np.array(point0)\r\n v2 = np.array(c1) - np.array(point1)\r\n v3 = np.array(point2) - np.array(c1)\r\n\r\n angle1_c1 = math.acos(np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2)))\r\n angle2_c1 = math.acos(np.dot(v2,v3)/(np.linalg.norm(v2)*np.linalg.norm(v3)))\r\n\r\n if based_config.ee1_grappled:\r\n angles = sample.ee1_angles[:-2]\r\n angles1 = -Angle(radians=angle1_c1)\r\n angles2 = -Angle(radians=angle2_c1)\r\n angles.extend([angles1,angles2])\r\n goal_config_1 = make_robot_config_from_ee1(based_config.get_ee1()[0], based_config.get_ee1()[1], angles, sample.lengths, ee1_grappled=True)\r\n else:\r\n angles = sample.ee2_angles[:-2]\r\n angles1 = -Angle(radians=angle1_c1)\r\n angles2 = -Angle(radians=angle2_c1)\r\n angles.extend([angles1,angles2])\r\n goal_config_1 = make_robot_config_from_ee2(based_config.get_ee1()[0], based_config.get_ee1()[1], angles, sample.lengths, ee2_grappled=True)\r\n\r\n # generate config for c2\r\n v1 = np.array(point1) - np.array(point0)\r\n v2 = np.array(c2) - np.array(point1)\r\n v3 = np.array(point2) - np.array(c2)\r\n\r\n angle1_c2 = math.acos(np.dot(v1,v2)/(np.linalg.norm(v1)*np.linalg.norm(v2)))\r\n angle2_c2 = math.acos(np.dot(v2,v3)/(np.linalg.norm(v2)*np.linalg.norm(v3)))\r\n if based_config.ee1_grappled:\r\n angles = sample.ee1_angles[:-2]\r\n angles1 = -Angle(radians=angle1_c2)\r\n angles2 = -Angle(radians=angle2_c2)\r\n angles.extend([angles1,angles2])\r\n goal_config_2 = make_robot_config_from_ee1(based_config.get_ee1()[0], based_config.get_ee1()[1], angles, sample.lengths, ee1_grappled=True)\r\n else:\r\n angles = sample.ee2_angles[:-2]\r\n angles1 = -Angle(radians=angle1_c2)\r\n angles2 = -Angle(radians=angle2_c2)\r\n angles.extend([angles1,angles2])\r\n goal_config_2 = make_robot_config_from_ee2(based_config.get_ee1()[0], based_config.get_ee1()[1], angles, sample.lengths, ee2_grappled=True)\r\n\r\n if self.valid_config(goal_config_1):\r\n # print(\"goal_config_1_points:\", goal_config_1.points)\r\n # print(\"c1:\",c1)\r\n return goal_config_1\r\n if self.valid_config(goal_config_2):\r\n # print(\"goal_config_2_points:\", goal_config_2.points)\r\n # print(\"c2:\",c2)\r\n return goal_config_2\r\n else:\r\n return False\r\n else:\r\n return False",
"def pause_sim(self):\n self._tell_sim('put', 'pause')",
"def run_simulation():\n # setup the environment\n env = gym.make('CartPole-v0')\n env._max_episode_steps = max_steps * 100\n env.reset()\n\n # initial dataset to train the neural net on\n initial_data = get_random_moves(env)\n\n input_list, output_list = get_inputs(initial_data)\n\n # get the trained instance of the neural network back\n curr_nn = train(input_list, output_list)\n\n # play game using the trained curr_nn\n play_game(env, curr_nn)",
"def run(self, steps_per_update=1):\n def loop(sim):\n sim.run(steps_per_update)\n self.loop(loop)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Deletes all flashcards of given Note object and then deletes given Note object itself.
|
def delete_note(self, note:Note):
if note:
# Delete card(s) of given note
for card in note.flashcards:
db.session.delete(card)
db.session.commit()
# Delete note
if self.user_id == current_user.id:
db.session.delete(note)
db.session.commit()
|
[
"def delete_cards(self):\n self._stage = []\n self._hand = []",
"def delete_all_objects_and_materials():\n # select all objects and delete them\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete(use_global=False, confirm=False)\n # delete all physics bakes\n bpy.ops.ptcache.free_bake_all()\n # delete all materials\n for material in D.materials:\n material.user_clear()\n D.materials.remove(material)",
"def delete_all(self, obj):\n ctype = ContentType.objects.get_for_model(obj)\n self.filter(content_type=ctype, object_id=obj.pk).delete()",
"def deleteQCs(self, ctx):\n for qc in self.ObjectQualityCharacteristics:\n qc.DeleteQC()",
"def deleteCabinet(deleteObj):\n\n cabinets = models.Cabinet.objects.get(pk=deleteObj.id)\n cabinets.delete()",
"def del_note(self):\n try:\n # First delete the note\n note_id = self.note_id\n __notes__.del_note(note_id)\n # Then delete the reference to the note\n del self.note_id \n except AttributeError:\n # Note does not exist, but don't print nothing becase there are a lot of models to delete\n pass",
"def removeStripes(self):\n for i in range(len(self.verticalStripes)):\n self.verticalStripes[i].removeStripe(self)\n\n for i in range(len(self.horizontalStripes)):\n self.horizontalStripes[i].removeStripe(self)",
"def delete_unfingerprinted_audios(self) -> None:\n with self.cursor() as cur:\n cur.execute(self.DELETE_UNFINGERPRINTED)",
"def delete(self, *devices):\n for d in devices:\n d.delete()",
"def clean_up(keep_materials=None, keep_objects=None):\n print(\"Now in clean_up()...\", keep_materials, keep_objects)\n\n if keep_materials is None:\n keep_materials = []\n if keep_objects is None:\n keep_objects = []\n\n # Delete materials not in list\n for mat in bpy.data.materials:\n print(mat.name)\n if mat.name not in keep_materials:\n bpy.data.materials.remove(mat)\n\n # print()\n # print(bpy.data.scenes[0].view_layers)\n # print(bpy.data.scenes[0].view_layers[0])\n # print()\n\n # Delete objects not in list\n for obj in bpy.data.objects:\n print(obj.name, obj)\n if obj.name not in keep_objects:\n # pass\n obj.select_set(\n True\n ) # , view_layer=bpy.data.scenes[0].view_layers[0]) # https://developer.blender.org/T66725\n bpy.ops.object.delete(use_global=True) # False)",
"def destroy(self, request, pk):\n queryset = request.user.wantToWatchMediaItem.all()\n mediaItem = queryset.filter(mdbID=pk)\n obj = get_object_or_404(mediaItem)\n self.perform_destroy(obj)\n return Response(status=status.HTTP_204_NO_CONTENT)",
"def clear (self):\n for object in self._objects[:]:\n object.destroy ()\n self._objects = []",
"def _delete(self, params):\n project = params[\"project\"]\n qs = get_media_queryset(project, params)\n media_ids = list(qs.values_list('pk', flat=True).distinct())\n count = qs.count()\n if count > 0:\n # Get info to populate ChangeLog entry\n first_obj = qs.first()\n project = first_obj.project\n ref_table = ContentType.objects.get_for_model(first_obj)\n delete_dicts = []\n ref_ids = []\n for obj in qs:\n delete_dicts.append(obj.delete_dict)\n ref_ids.append(obj.id)\n\n # Mark media for deletion.\n qs.update(deleted=True,\n modified_datetime=datetime.datetime.now(datetime.timezone.utc),\n modified_by=self.request.user)\n\n # Any states that are only associated to deleted media should also be marked \n # for deletion.\n not_deleted = State.objects.filter(project=project, media__deleted=False)\\\n .values_list('id', flat=True)\n deleted = State.objects.filter(project=project, media__deleted=True)\\\n .values_list('id', flat=True)\n all_deleted = set(deleted) - set(not_deleted)\n state_qs = State.objects.filter(pk__in=all_deleted)\n state_qs.update(deleted=True,\n modified_datetime=datetime.datetime.now(datetime.timezone.utc),\n modified_by=self.request.user)\n\n # Delete any localizations associated to this media\n loc_qs = Localization.objects.filter(project=project, media__in=media_ids)\n loc_qs.update(deleted=True,\n modified_datetime=datetime.datetime.now(datetime.timezone.utc),\n modified_by=self.request.user)\n\n # Clear elasticsearch entries for both media and its children.\n # Note that clearing children cannot be done using has_parent because it does\n # not accept queries with size, and has_parent also does not accept ids queries.\n query = get_media_es_query(self.kwargs['project'], params)\n TatorSearch().delete(self.kwargs['project'], query)\n loc_ids = [f'box_{id_}' for id_ in loc_qs.iterator()] \\\n + [f'line_{id_}' for id_ in loc_qs.iterator()] \\\n + [f'dot_{id_}' for id_ in loc_qs.iterator()]\n TatorSearch().delete(self.kwargs['project'], {'query': {'ids': {'values': loc_ids}}})\n state_ids = [f'state_{id_}' for id_ in state_qs.iterator()]\n TatorSearch().delete(self.kwargs['project'], {'query': {'ids': {'values': state_ids}}})\n\n # Create ChangeLogs\n objs = (\n ChangeLog(project=project, user=self.request.user, description_of_change=dd)\n for dd in delete_dicts\n )\n change_logs = bulk_create_from_generator(objs, ChangeLog)\n\n # Associate ChangeLogs with deleted objects\n objs = (\n ChangeToObject(ref_table=ref_table, ref_id=ref_id, change_id=cl)\n for ref_id, cl in zip(ref_ids, change_logs)\n )\n bulk_create_from_generator(objs, ChangeToObject)\n return {'message': f'Successfully deleted {count} medias!'}",
"def delete_all_blobs(self):\n\t\tblobs = self.get_all_blobs()\n\t\tfor blob in blobs :\n\t\t\tblob.delete()",
"def delete(self):\n # TODO should probably warn user relation may be broken\n\n for prod_model in self.model.producers:\n prod_model.consumerRemove(self)\n\n for cons_model in self.model.consumers:\n cons_model.producerRemove(self)\n\n for service in self.children:\n service.delete()\n\n self.model.delete()\n j.sal.fs.removeDirTree(self.path)",
"def delete_note(self, note_id):\n return self.__delete_object('notes', note_id)",
"def delete_all_recalls():\n for memo in current_user.memos:\n for recall in memo.recalls:\n if recall.user_id == current_user.id:\n db.session.delete(recall)\n flash(f'Deleted recalls', 'danger')\n db.session.commit()\n return redirect(url_for('user', username=current_user.username))",
"def remove_destructed_objects():\n for ob in simulate.obj_list_destruct:\n simulate.destruct2(ob)\n simulate.obj_list_destruct.remove(ob)",
"def delete_wish(wish):\n\n deleted_wish = Answer.query.filter_by(wish=wish).delete()\n\n db.session.commit()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filter the provided entries.
|
def _filter_entries(self,
entries: List[VICEmergencyIncidentsFeedEntry]) \
-> List[VICEmergencyIncidentsFeedEntry]:
filtered_entries = super()._filter_entries(entries)
if self._filter_inc_categories:
filtered_entries = list(filter(lambda entry:
entry.category1 in self._filter_inc_categories,
filtered_entries))
if self._filter_exc_categories:
filtered_entries = list(filter(lambda entry:
entry.category1 not in self._filter_exc_categories,
filtered_entries))
if not self._filter_statewide:
filtered_entries = list(filter(lambda entry:
entry.statewide not in ['Y'],
filtered_entries))
return filtered_entries
|
[
"def _filter_entries(self, entries: List[FeedEntry]) -> List[FeedEntry]:\n filtered_entries = entries\n if self._apply_filters:\n # Always remove entries without coordinates.\n filtered_entries = list(\n filter(\n lambda entry: (entry.coordinates is not None)\n and (entry.coordinates != (None, None)),\n filtered_entries,\n )\n )\n # Always remove entries on the ground (altitude: 0).\n filtered_entries = list(\n filter(lambda entry: entry.altitude > 0, filtered_entries)\n )\n # Filter by distance.\n if self._filter_radius:\n filtered_entries = list(\n filter(\n lambda entry: entry.distance_to_home <= self._filter_radius,\n filtered_entries,\n )\n )\n return filtered_entries",
"def filter_entry(node):\n return node.type == \"Entry\"",
"def filter(self, *args, **kwargs):\n return self.list().filter(*args, **kwargs)",
"def filter(self, filter_params):\n pass",
"def filterResults(self):\n\t\tif self.filter_predicate:\n\t\t\tif self.verbose:\n\t\t\t\tprint \"filtering from %d records\" % len(self)\n\t\t\tfn = self.filter_predicate\n\t\t\tself.data = filter (lambda rslt:fn(rslt), self.data)",
"def _filter(self, starred):\n endpoint = \"starred_entries\" if starred else \"unread_entries\"\n # TODO: split entries into groups of <= 1000\n r = self.session.delete(self.API_URL.format(endpoint),\n data=demjson.encode({endpoint: self.to_be_filtered}),\n headers={\"Content-Type\": \"application/json; charset=utf-8\"})\n if not r.ok:\n r.raise_for_status()",
"def filter(self, items):\n if self.filters:\n result = deepcopy(items)\n for f in self.filters:\n LOG.debug('applying filter \"%s\"' % f.__class__.__name__)\n result = f.filter(result)\n else:\n result = items\n return result",
"def filter(self, **kwargs):\n return self",
"def _apply_filter(self, feed, patterns):\n\n entries = [entry for entry in self.entries if entry[u\"feed_id\"] == feed[u\"feed_id\"]]\n if not entries:\n # no unread entries\n return None\n\n print u\"Searching \\\"{}\\\" for matching items...\".format(feed[u\"title\"]),\n sys.stdout.flush()\n\n count = len(self.to_be_filtered)\n for pattern in patterns:\n regex = re.compile(pattern)\n for entry in entries:\n if not entry[u\"title\"]:\n # Untitled entries are both valid and extant\n continue\n if regex.search(entry[u\"title\"]):\n # TODO: remove entry from entries\n self.to_be_filtered.append(entry[u\"id\"])\n\n return len(self.to_be_filtered) - count",
"def test_filter(self):\n\n self.assertEqual(fnmatch.filter(['name', 'test'], '*', exclude='test'), ['name'])",
"def match_filters(self, sub_entry: dict) -> bool:\n for attribute, keep in self.map_filter.items():\n if attribute in sub_entry.keys():\n if not keep(sub_entry[attribute]):\n return False\n return True",
"def _filter_items(items):\n names_to_filter = []\n for name, item in items.items():\n if not (item['attrib'] or item['lore'] and\n ('greevil' not in item['dname'].lower())):\n names_to_filter.append(name)\n\n for name in names_to_filter:\n del items[name]\n\n return items",
"def _filterOutput(self, pipelines, filter_dict, bIn):\n filtered = []\n for line in pipelines[:]:\n check = False # \"check\" means \"match\"\n # This inner for loop is deceiving: the filter_dict usually has a\n # single key:value and then the break/else is pure confusion.\n for key, value in filter_dict.items():\n if 'any' in value or value == ['']:\n check = True if key in line.keys() else False\n else:\n # Use full match for numerical values, and use substring\n # match for string values\n if str(line[key]).isdigit():\n check = str(line[key]) in value\n else:\n check = any([em in str(line[key]) for em in value])\n if check is bIn:\n break\n else:\n # No 'break': include this pipeline\n filtered.append(line)\n return filtered",
"def filter(self, **kwargs):\n\n for filter_name, filter_value in kwargs.iteritems():\n self._filters[filter_name] = filter_value\n return self",
"def get_matching_entries(self, entry):\n return [e for e in self.entries if e.matches(entry)]",
"def filter_events( events):\n\t# By default, this method is empty\n\treturn events",
"def test_filter_rows_list_input():\n ls = [\n {'s': 'a', 'i': 1, 'f': 1.0},\n {'s': 'b', 'i': 2, 'f': 2.0},\n {'s': 'c', 'i': 3, 'f': 3.0},\n ]\n filtered = query_csv.filter_rows(ls, {'s': 'a'})\n assert list(filtered) == [\n {'s': 'a', 'i': 1, 'f': 1.0},\n ]",
"def filter(self, **kwargs):\r\n preds = []\r\n for k, v in kwargs.iteritems():\r\n def pred(field, value, item):\r\n for suffix, p in _BUILTIN_PREDS.iteritems():\r\n if field.endswith(suffix):\r\n f = field[:field.index(suffix)]\r\n if not hasattr(item, f) or getattr(item, f) is None:\r\n return False\r\n return p(getattr(item, f), value)\r\n if not hasattr(item, field) or getattr(item, field) is None:\r\n return False\r\n if isinstance(value, type(lambda x: x)):\r\n return value(getattr(item, field))\r\n return getattr(item, field) == value\r\n preds.append(functools.partial(pred, k, v))\r\n\r\n gen = itertools.ifilter(lambda item: all([f(item) for f in preds]),\r\n self)\r\n return self.__class__(gen)",
"def _filter_observations(self, observations):\n filter_out = set(observations.keys()).difference(\n self._observations_allowlist\n )\n # Remove unwanted keys from the observation list.\n for filter_key in filter_out:\n del observations[filter_key]\n return observations"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract global metadata from feed.
|
def _extract_from_feed(self, feed: FeatureCollection) -> Optional[Dict]:
return None
|
[
"def iter_feed_metadata(\n self, feed: Union[str, Feed]\n ) -> Iterable[Tuple[str, JSONType]]:\n feed_url = feed_argument(feed)\n return self._storage.iter_feed_metadata(feed_url)",
"def get_feed_metadata(\n self,\n feed: FeedInput,\n key: Optional[str] = None,\n ) -> Iterable[Tuple[str, JSONType]]:\n\n # get_feed_metadata(feed, *, key=None) -> (key, value), ...\n feed_url = _feed_argument(feed)\n return self._storage.iter_metadata((feed_url,), key)",
"def get_metadata(self):\n\n tree = lxml.etree.parse(self.manifest)\n\n self.get_remotes(tree)\n self.get_projects(tree)",
"def _scrape_metadata(self):\n return",
"def parse(feed):\n rss = fp.parse(feed)\n return (rss, rss.feed.title, rss.feed.subtitle)",
"def entries(feed):\n for entry in feed.entries:\n details = []\n for item in ['title', 'summary', 'link']:\n details.append(entry.get(item, ''))\n yield details",
"def metadata(self):\n return metadata_for_forecasts()",
"def get( self ):\n #using urlgrabber so it doesn't matter whether feed is a file or a url\n logger.debug(\"Opening feed: \" + self.feed)\n fd = urlopen( self.feed )\n feed = {}\n #is this an OPML file?\n try:\n outlines = OPML.parse( fd ).outlines\n logger.debug(\"Feed is OPML\")\n for opmlfeed in outlines:\n feed = {}\n feed[\"title\"] = opmlfeed[\"title\"]\n feed[\"url\"] = opmlfeed[\"xmlUrl\"]\n self.feedlist.append( feed )\n logger.debug(\"Feed has been imported: %s - %s\" % (feed[\"title\"], feed[\"url\"]))\n except Exception, e:\n feed = {}\n try:\n if self.title:\n feed[\"title\"] = self.title\n else:\n outlines = feedparser.parse( self.feed )[\"feed\"]\n feed[\"title\"] = outlines.title\n feed[\"url\"] = self.feed\n self.feedlist.append(feed)\n logger.debug(\"Feed has been imported: %s - %s\" % (feed[\"title\"], feed[\"url\"]))\n except Exception, e:\n print \"Feedparser exception:\", e\n sys.exit(-1)\n self.toXML()",
"def _parse_entry(self,entry):\n item_meta={'title':entry.title,\n 'description':entry.description,\n 'category':entry.category,\n 'tags':entry.tags,\n 'page_url':entry.url,\n 'lq_url':None,\n 'hq_url':None,\n 'hd_url':None,\n 'search-id':self.search_id,\n 'source':'5',}\n self._logger.debug('Video Metadata: %s',item_meta)\n return item_meta",
"def get_metadata(self):\n return meta.get_metadata(self.ast)",
"def get_all_metadata(self):\n metadata = {}\n for key in self.METADATA_KEYS:\n try:\n val = self.get_metadata(key)\n except MissingMetadataError:\n pass\n else:\n metadata[key] = val\n\n return metadata",
"def extract_dc_metadata(soup):\n metatags = [] \n\n for m in soup.find_all('meta'):\n \n # Skip over fields we don't like explicitly\n if m.has_attr('name') is False: continue\n if 'width' in m['content']: continue\n if m['name'] == 'GENERATOR': continue\n\n meta = {}\n meta[m['name']] = m['content']\n metatags.append(meta)\n\n return metatags",
"def metadata(self) -> dict[str, Any]:",
"def extract_metadata(url: str) -> dict:\n\n r = requests.get(url)\n base_url = get_base_url(r.text, r.url)\n return extruct.extract(r.text, base_url=base_url)",
"def _FetchCommonMetadata(self, callback):\n paths = [ \"meta-data/hostname\", \"meta-data/instance-id\", \"user-data/passphrase\" ]\n self.FetchMetadata(paths, callback)",
"def get_feed_dict(self):\n return {}",
"def _get_site_meta(self, article):\n\n source_url = article.source_url\n proto, url = [p.strip('/') for p in urlparse.splittype(source_url)]\n name = article.meta_site_name\n if not name:\n for xpath in META_SITE_NAME_EX:\n name = article.extractor.get_meta_content(article.clean_doc,\n xpath)\n if name:\n break\n else:\n self.log.warning(f'{article.url} did not have a meta_site_name')\n name = tldextract.extract(source_url).domain.capitalize()\n\n favicon = article.meta_favicon\n if favicon:\n if favicon[:2] == '//':\n # protocol-relative URL\n favicon = f'{proto}:{favicon}'\n elif favicon[0] == '/':\n # relative URL to site base\n favicon = source_url + favicon\n\n return {'url': url, 'name': name, 'icon_url': favicon}",
"def metadata(self):\r\n metadataurlpath = 'content/items/' + self.itemid + '/info/metadata/metadata.xml'\r\n try:\r\n return self._portal.con.get(metadataurlpath, try_json=False)\r\n\r\n # If the get operation returns a 400 HTTP Error then the metadata simply\r\n # doesn't exist, let's just return None in this case\r\n except HTTPError as e:\r\n if e.code == 400 or e.code == 500:\r\n return None\r\n else:\r\n raise e",
"def readExistingMetaData(self: object) -> dict[str, list[str]]:\n\t\twith exiv.Image(f\"{self.rootPath}/{self.fileName}\") as f:\n\t\t\tdata = f.read_xmp()\n\t\treturn data",
"def __getitem__(self, feed):\n return self._feed_config[feed]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parses a nhentai url to its digit.
|
def parse_to_n_digit(url: str) -> Optional[str]:
n_digit_match = re.search('([1-9][0-9]*)', url)
return n_digit_match.group(1) if n_digit_match is not None else None
|
[
"def parseURI(self,url):\n addr = \"\"\n parts = []\n ip = False\n parts = url.split('/')\n #extract ip address with port\n if(len(parts)>2):\n addr = parts[2] #this contains X.X.X.X:PORT\n else:\n addr = parts[0] #it is possible the mtURL is \"X.X.X.X:PORT/\" (no http), then parts[0] will still be X.X.X.X:PORT\n # extract the ip address \n addr = addr.split(':')\n if(len(addr)>1):\n ip = addr[0]\n port = addr[1]\n else:\n ip = False\n port = False\n return ip, port",
"def parseURI(url):\n\thostport = url.split(':')\n\thost = hostport[0] if hostport[0] != 'localhost' else socket.gethostname()\n\treturn host, hostport[1] if len(hostport) > 1 else '80'",
"def get_seamus_id_from_url(url):\n if url.startswith('http://www.npr.org') or url.startswith('http://npr.org'):\n url_parts = url.split('/')\n id = url_parts[-2]\n if id.isdigit():\n return id\n\n return None",
"def parseID(self,url):\n\tif validateUrl(url):\n\t splitURL = (url).split(\"/\")\n\t itemID = \"BHL-\" + splitURL[4].split('#')[0]\n\telse:\n\t return \"URL not valid\"\n\treturn itemID",
"def href_to_number(href):\n base = os.path.basename(href)\n if \".\" in base:\n base = \".\".join(base.split(\".\")[:-1])\n numbers = re.findall(r\"\\d+\", base)\n if len(numbers) > 0:\n base = str(numbers[0]).zfill(6)\n return base",
"def parse_unique_id_from_url(page_url):\n\ttokens = page_url.split(\"/\")\n\tlast_token = tokens[len(tokens)-1]\n\tbook_id = \"\"\n\tfor char in last_token:\n\t\tif char.isdigit():\n\t\t\tbook_id += char\n\t\telse:\n\t\t\tbreak\n\treturn int(book_id)",
"def _get_port(url):\n\n if url.find('http://') == 0:\n url = url.replace('http://', '')\n port = 80\n if url.find('https://') == 0:\n url = url.replace('https://', '')\n port = 443\n\n url_parts = url.split(':')\n\n if len(url_parts) == 1:\n return port\n else:\n port_part = url_parts[1]\n port_section = port_part.split('/')[0]\n try:\n int(port_section)\n except:\n return port\n return int(port_section)\n\n return port",
"def parse_pr_url(url: str) -> Tuple[str, int]:\n arr = url.split('/')\n full_repo_name = '{}/{}'.format(arr[-4], arr[-3])\n pr_num = int(arr[-1])\n\n return full_repo_name, pr_num",
"def extract_user_id(url):\n REGEX = re.compile(r'https?://.*.bilibili.com/(\\d+)')\n match = REGEX.match(url)\n return match.group(1)",
"def company_id(url):\n p = re.compile('-\\d+')\n aa = re.search(p, url).group()[1:]\n return aa",
"def url_parser(url):\r\n if url.startswith(URL_SCHEMES):\r\n return url\r\n else:\r\n return 'https://' + url",
"def splitUrl(url, n):\n\n return url.split('/')[-n:]",
"def parse_url(self, url):\n parsed = urlparse(url)\n return parsed",
"def get_review_page_number_from_url(url : str) -> int:\n return int(\n url[url.find(\n REVIEW_PAGE_NO_URL_IDENTIFIER[1]\n ) + len(REVIEW_PAGE_NO_URL_IDENTIFIER[1]):]\n )",
"def normalize_url(self, url):\n match = self.url_matcher.match(url)\n url = match.group(0)\n url = self.url_matcher.sub(\"https://arxiv.org/abs/\\\\3\", url)\n return url",
"def get_tweet_id(tweet_url):\n tweet_id = re.search(r'\\d+$', tweet_url)\n return tweet_id.group(0)",
"def parse_reddit_url(url):\n segments = url.split(\"/\")\n if len(segments) is not 7:\n logging.error(\"Invalid sub-reddit url: {}\".format(url))\n return None\n return {\n \"id\": segments[4],\n \"sub-reddit\": segments[2],\n \"safe_title\": segments[5]\n }",
"def _extract_uuid(url):\n segments = url.split('/')\n for idx, segment in enumerate(segments):\n dash_count = 0\n for char in segment:\n if char == '-':\n dash_count += 1\n if dash_count == 4:\n return segments[idx]\n raise Exception('Url does not contain a valid uuid4')",
"def parseUrl(self, url):\n\n url = self._chopProtocol(url)\n\n if not \"/\" in url: raise KopyException(\"Bad URL.\")\n\n chunk = url.split(\"/\")[-1]\n if \"#\" in chunk:\n # FIXME support # in passphrases? urlencode them?\n documentId, passphrase = chunk.split(\"#\")\n if not documentId: raise KopyException(\"Bad URL; no document ID.\")\n if not passphrase: passphrase = None\n else:\n documentId = chunk\n passphrase = None\n if not documentId: raise KopyException(\"No document ID.\")\n\n return documentId, passphrase"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
True when at top of game board.
|
def at_top(self) -> bool:
return self.ycor() >= self.max_top
|
[
"def is_top(self) -> bool:\n return self.timestamp.is_top",
"def is_top_block(self):\n return self._parent_block is None",
"def is_at_home(self):\n return self.position == self.home_position",
"def _on_board(self, point):\n return self.board[point]!= BORDER",
"def make_top(self):\n self._top = True",
"def outside_window(self):\n if self.ball.y >= self.window.height:\n return True",
"def __check_above(self):\n possible = [True, False]\n if self.above not in possible:\n self.above = DEF_ABOVE",
"def isWin(self):\n return self.pos in self.data['win_states']",
"def CanTopDeck(self, deck):\n num_cards = len(self._game_state.development_cards[deck])\n num_cards_revealed = len(self.revealed_cards[deck])\n num_cards_left = num_cards - num_cards_revealed\n return num_cards_left > 0",
"def isFull(board):\n pass",
"def game_draw(self):\n\t\tfor num in np.ravel(self.boardStatus):\n\t\t\tif num == self.type[\"blank\"]:\n\t\t\t\treturn False\n\t\tif self.game_won() != self.type[\"blank\"]:\n\t\t\treturn False\n\t\treturn True",
"def on_board(self, square):\n x, y = square\n return x >= 0 and y >= 0 and x < self.x_dim and y < self.y_dim and self.rows[y][x] != 'O'",
"def off_screen(self):\n # Note: this will be used for testing, but not used in the final version of the code for the sake of simplicity.\n # TODO 13: Return True if the y position of this Raindrop is greater than 800.\n pass",
"def is_open(self, square):\n return self.board[square] == ''",
"def _is_player_off_screen_bottom(current_game: Game, player_height: int=None):\n player_y = current_game.player.y\n if player_height is None:\n player_height = current_game.player.down.height\n\n return (player_y > (current_game.player.y_bottom_barrier + player_height))",
"def in_window(self):\n if self.actions == -1:\n return True\n else:\n return False",
"def _is_visible(self, obj):\n # TODO: FINISH THIS\n window_w = SCREEN_W\n window_h = SCREEN_H\n return obj.right >= 0 and obj.left <= window_w and obj.top >= 0 and obj.bottom <= window_h",
"def can_move_top(self, x, y):\n cell_value = self.grid[x][y-1]\n dir = 0\n size = 1\n \n if y-2 >= 0 and self.grid[x][y-2] == cell_value:\n size = 2\n \n if x+1 < CONST_WIDTH and self.grid[x+1][y-1] == cell_value: # continues right\n if self.grid[x+1][y] != \"0\":\n return [False]\n return [True, 1, size]\n \n if x > 0 and self.grid[x-1][y-1] == cell_value: # continues left\n if self.grid[x-1][y] != \"0\":\n return [False]\n return [True, -1, size]\n \n if not (x+1 < CONST_WIDTH and self.grid[x+1][y-1] == cell_value) and not (x > 0 and self.grid[x-1][y-1] == cell_value): # 1-size cell\n return [True, 0, size]\n \n return [False]",
"def item_focus (self, obj):\n if isinstance(obj, Process):\n self.at_top = self.index(obj) is 0\n return\n key = obj\n if key is 'up':\n self.at_top = self.focus is 0\n elif key is 'down':\n self.at_top = False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return True if the ball and paddle are close enough on the game board for us to say they have collided.
|
def collides(self, paddle: Paddle) -> bool:
x_ball = self.xcor()
if abs(x_ball - paddle.xcor()) < 12:
y_ball = self.ycor()
if y_ball < paddle.top and y_ball > paddle.bottom:
if x_ball < 0 and x_ball >= paddle.xcor():
return True
elif x_ball > 0 and x_ball <= paddle.xcor():
return True
return False
|
[
"def collide_paddle(self):\n # just check the bottom side of the ball\n if self.obj3() == self.paddle or self.obj4() == self.paddle:\n return True",
"def has_ball_moved(self, ball_1, ball_2):\r\n dist = dist_between_two_balls(ball_1, ball_2)\r\n if not self.white_is_moving:\r\n if dist > 0.1:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False",
"def has_ball_stopped(self, ball_1, ball_2):\r\n dist = dist_between_two_balls(ball_1, ball_2)\r\n if self.white_is_moving:\r\n if dist <= 0.1:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False",
"def ball_collisions(self):\n up_l_corner = self.window.get_object_at(self.ball.x, self.ball.y)\n up_r_corner = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y)\n down_l_corner = self.window.get_object_at(self.ball.x, self.ball.y + self.ball.height)\n down_r_corner = self.window.get_object_at(self.ball.x + self.ball.width, self.ball.y + self.ball.height)\n\n # The situation that the ball hits the paddle.\n if down_l_corner == self.paddle:\n self.__dy = self.reverse_dy\n elif down_r_corner == self.paddle:\n self.__dy = self.reverse_dy\n\n # The situation that the ball hits bricks and remove them.\n if up_l_corner is not None and up_l_corner is not self.paddle and up_l_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(up_l_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif up_r_corner is not None and up_r_corner is not self.paddle and up_r_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(up_r_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif down_l_corner is not None and down_l_corner is not self.paddle and down_l_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(down_l_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)\n elif down_r_corner is not None and down_r_corner is not self.paddle and down_r_corner is not self.__board:\n self.__dy = -self.__dy\n self.window.remove(down_r_corner)\n self.__count -= 1\n self.__score += 1\n self.__board.text = 'Score: ' + str(self.__score)",
"def is_ball_hits_board(self, ball_coord, delta_x, delta_y):\n ball_x = delta_x + ball_coord[0]\n ball_y = delta_y + ball_coord[1]\n ball_r = ball_coord[2]\n\n x1 = self.board.get_rect().left - ball_x\n x2 = self.board.get_rect().right - ball_x\n\n y1 = self.board.get_rect().top - ball_y\n y2 = self.board.get_rect().top - ball_y\n\n dx = float(x2 - x1)\n dy = float(y2 - y1)\n dr = math.sqrt(dx ** 2 + dy ** 2)\n D = float(x1 * y2 - x2 * y1)\n\n discriminant = (ball_r ** 2) * (dr ** 2) - D ** 2\n\n if discriminant < 0:\n return False\n x_intersect_1 = (((D * dy) - dx * sgn(dy) * math.sqrt(discriminant))\n / dr ** 2)\n x_intersect_2 = (((D * dy) + dx * sgn(dy) * math.sqrt(discriminant))\n / dr ** 2)\n\n if ((x1 <= x_intersect_1 and x_intersect_1 <= x2)\n or (x1 <= x_intersect_2 and x_intersect_2 <= x2)):\n return True\n else:\n return False",
"def _check_collision(self, state):\n point = state[0:2]\n\n if len(self._obstacles) == 0:\n return False\n\n for i in range(len(self._obstacles)):\n obstacle = self._obstacles[i]\n center = obstacle[0:2]\n radius = obstacle[2]\n if np.linalg.norm(center-point) < radius:\n return True\n\n return False",
"def paddle_interact(self):\n\n min_x, paddle_top, max_x, _ = self._model.get_paddle_box()\n\n ball_xs, ball_ys = self._model.get_ball_speed()\n ball_x, ball_y = self._model.get_ball_position()\n\n x1, y1 = ball_x + ball_xs, ball_y + ball_ys\n\n if y1 + self._radius <= paddle_top: # still in play above paddle\n return False\n\n if x1 + self._radius < min_x or x1 - self._radius > max_x: # sewer ball\n self._model.exit_ball()\n return False\n\n # ball still in play above paddle\n # will the ball also hit the wall at the same time?\n xs_sign, ys_sign = sign(ball_xs), sign(ball_ys)\n\n # the cell containing the ball centre\n r, c = self._grid_info.pos2rc(ball_x, ball_y)\n\n # If block exists in the adjacent column, ball will collide with wall\n if self._model.is_block_at((r, c + xs_sign)):\n p_x = ball_x + xs_sign * self._radius\n p_y = ball_y - ys_sign * self._radius\n p_xt, _ = self.times_to_cell_boundary(p_x, p_y, ball_xs, ball_ys,\n self._grid_info.rc2rect(r, c))\n\n if p_xt <= 1: # next to wall so bounce off wall and paddle\n ty = (paddle_top - (ball_y + self._radius)) / ball_ys\n self.do_reflect(p_xt, -1, ty, -1)\n\n return True\n\n # at this point the ball bounces off paddle and paddle not near wall\n self.do_paddle_reflect()\n\n return True",
"def is_ball_close(self):\n self.fetch_world_state()\n\n # check if the balls is in close enough to the robot to be grabbed\n ball_kicker_vector = self.vector_from_kicker_to_ball()\n ball_close_x = abs(ball_kicker_vector.x) < self.grab_threshold_x\n ball_close_y = abs(ball_kicker_vector.y) < self.grab_threshold_y\n return ball_close_x and ball_close_y",
"def do_they_collide(ball1, ball2):\n\tif point_distance(ball1._x, ball2._x, ball1._y, ball2._y) < (ball1._radius + ball2._radius):\n\t\treturn True\n\telse:\n\t\treturn False",
"def ball_is_further_in(self):\n return ((self.ball_pos.y >= 0) and (self.pos.y > self.ball_pos.y)\n or (self.ball_pos.y < 0 and self.pos.y < self.ball_pos.y))",
"def has_collided(self):\n return any(self._joint_collision) or any(self._cartesian_collision)",
"def detect_collision(self):\n\n has_collided, obstacle = check_collision(\n self.model.ship, self.model.current_obstacles)\n\n if has_collided:\n self.model.ship.lives -= 1\n self.model.current_obstacles.remove(obstacle)\n\n if not self.model.ship.lives:\n self.model.current_screen = \"Game Over\"",
"def opposing_pieces_check(self, row, col):\n # Check for 'w' in 'BLACK' footprint.\n if self.get_player_turn() == \"BLACK\":\n for val in self.foot_values(row, col):\n if val == \"w\":\n return False\n return True\n # Check for 'b' in 'WHITE' footprint.\n if self.get_player_turn() == \"WHITE\":\n for val in self.foot_values(row, col):\n if val == \"b\":\n return False\n return True",
"def has_move(self, x, y):\n origin = x, y\n return any(self.get_color(x, y) == EMPTY for x, y in self.edge_neighbours(origin))",
"def check_crash(self) -> bool:\n # if player crashes into ground\n if self.player_y + PLAYER_HEIGHT >= self.base_y - 1:\n return True\n else:\n player_rect = pygame.Rect(self.player_x, self.player_y,\n PLAYER_WIDTH, PLAYER_HEIGHT)\n\n for up_pipe, low_pipe in zip(self.upper_pipes, self.lower_pipes):\n # upper and lower pipe rects\n up_pipe_rect = pygame.Rect(up_pipe['x'], up_pipe['y'],\n PIPE_WIDTH, PIPE_HEIGHT)\n low_pipe_rect = pygame.Rect(low_pipe['x'], low_pipe['y'],\n PIPE_WIDTH, PIPE_HEIGHT)\n\n # check collision\n up_collide = player_rect.colliderect(up_pipe_rect)\n low_collide = player_rect.colliderect(low_pipe_rect)\n\n if up_collide or low_collide:\n return True\n\n return False",
"def collision(self):\n t = self.currtetri\n a = t.angle / 90\n for i in range(5):\n for j in range(5):\n if t.matrix[a][i][j] and self.grid[i + t.row][j + t.col]:\n return True\n return False",
"def was_winning_move(self):\n game_is_won = False\n\n action_row = self._lowest_free_row_per_column[self._last_action] - 1\n action_col = self._last_action\n winning_sequence = np.full(\n shape=self._num_connect, fill_value=self.active_player\n )\n\n # Calculate candidate vectors\n row_candidates = self.grid[\n action_row,\n max(0, action_col - self._num_connect + 1) : min(\n self._num_cols, action_col + self._num_connect\n ),\n ]\n if utils.search_sequence_numpy(row_candidates, winning_sequence):\n game_is_won = True\n else:\n col_candidates = self.grid[\n max(0, action_row - self._num_connect + 1) : min(\n self._num_rows, action_row + self._num_connect\n ),\n action_col,\n ]\n if utils.search_sequence_numpy(col_candidates, winning_sequence):\n game_is_won = True\n else:\n diag_index_up = action_col - action_row\n diag_up_candidates = np.diagonal(self.grid, diag_index_up)\n if utils.search_sequence_numpy(diag_up_candidates, winning_sequence):\n game_is_won = True\n else:\n diag_index_down = action_row + action_col - (self._num_rows - 1)\n diag_down_candidates = np.diagonal(self.grid[::-1], diag_index_down)\n if utils.search_sequence_numpy(\n diag_down_candidates, winning_sequence\n ):\n game_is_won = True\n\n if self._verbose and game_is_won:\n print(\"Player '\", self.active_player, \"' has won the game!\")\n return game_is_won",
"def checkForEndOfGame(self):\n # Find list of items on canvas that overlap with region of square\n (x1, y1, x2, y2) = self.wallCanvas.coords(self.mySquare)\n onItems = self.wallCanvas.find_overlapping(x1, y1, x2, y2)\n # If more than one overlaps, then the square is touching a wall or the goal\n if len(onItems) > 1:\n for item in onItems:\n if item in self.wallIDList:\n self.gameOver = \"loss\"\n self.wallCanvas.addtag_withtag()\n break\n elif item == self.goal:\n self.gameOver = \"win\"\n break\n # Display win/loss message if game is over\n if self.gameOver == 'win':\n self.wallCanvas.create_oval(50, 50, 350, 350, fill=\"yellow\")\n self.wallCanvas.create_text(200, 200, text=\"You've won!\")\n elif self.gameOver == 'loss':\n self.wallCanvas.create_oval(50, 50, 350, 350, fill=\"saddle brown\")\n self.wallCanvas.create_text(200, 200, text=\"You've lost!\")",
"def is_win(self):\n for wp in TicTacToe.WINNING_POSITIONS:\n if self.board[wp[0]] == self.board[wp[1]] == self.board[wp[2]] is not None:\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function finds the lowest score of two shredded images. It does so by finding the score by aligning them one way and then the other. The lowest one is then returned as a tuple with the score, orientation, and image
|
def findCurrScore(image1, image2):
scoreleft = Score(calculateScore(image1, image2), True, image2)
scoreright = Score(calculateScore(image2, image1), False, image2)
currminscore = None
if (scoreleft.score < scoreright.score):
currminscore = scoreleft
else:
currminscore = scoreright
return currminscore
|
[
"def calculateScore(image1, image2):\n\timage1col = image1[-1]\n\timage2col = image2[0]\n\n\ttuples = zip(image1col, image2col)\n\n\tscore = 0\n\tfor pixel1, pixel2 in tuples:\n\t\tscore += comparePixels(pixel1, pixel2)\n\n\treturn score",
"def my_best_align(s1, s2):\n s1, s2, l1, l2 = set_variables(s1, s2) #calls for set_variables function\n # now try to find the best match (highest score) for the two sequences\n best_align = None\n best_score = -1\n for i in range(l1): # Note that you just take the last alignment with the highest score\n z = calculate_score(s1, s2, l1, l2, i) #calls calculate_score function\n if z > best_score:\n best_align = \".\" * i + s2 # adding \".\" to show where alignment is starting\n best_score = z\n print(best_align)\n print(s1)\n print(\"Best score:\", best_score)\n return best_align, s1, best_score",
"def Compare(input_avg, avgs):\n \n # input image average \n avg = input_avg \n \n # get the closest RGB value to input, based on x/y/z distance \n index = 0\n min_index = 0\n min_dist = float(\"inf\") \n for val in avgs: \n dist = ((val[0] - avg[0])*(val[0] - avg[0]) +\n (val[1] - avg[1])*(val[1] - avg[1]) +\n (val[2] - avg[2])*(val[2] - avg[2])) \n if dist < min_dist: \n min_dist = dist \n min_index = index \n index += 1\n \n return min_index",
"def compareImages(path1,path2):\n img1 = cv2.imread(path1)\n img2 = cv2.imread(path2)\n\n score = structural_similarity(img1, img2, multichannel=True)\n\n print(f\"SSIM: {score}\")\n return score",
"def main():\n img_haystack = skiutil.img_as_float(data.camera()) # the image in which to search\n img_needle = img_haystack[140:190, 220:270] # the template to search for\n img_sad = np.zeros(img_haystack.shape) # score image\n\n height_h, width_h = img_haystack.shape\n height_n, width_n = img_needle.shape\n\n # calculate score for each pixel\n # stop iterating over pixels when the whole template cannot any more (i.e. stop\n # at bottom and right border)\n for y in range(height_h - height_n):\n for x in range(width_h - width_n):\n patch = img_haystack[y:y+height_n, x:x+width_n]\n img_sad[y, x] = sad(img_needle, patch)\n img_sad = img_sad / np.max(img_sad)\n\n # add highest score to bottom and right borders\n img_sad[height_h-height_n:, :] = np.max(img_sad[0:height_h, 0:width_h])\n img_sad[:, width_h-width_n:] = np.max(img_sad[0:height_h, 0:width_h])\n\n # plot results\n util.plot_images_grayscale(\n [img_haystack, img_needle, img_sad],\n [\"Image\", \"Image (Search Template)\", \"Matching (darkest = best match)\"]\n )",
"def score_slide(slide_1, slide_2):\n number_common = len(list(set(slide_1).intersection(slide_2))) # Number of common elements between both slides\n number_diff_left = len(list(set(slide_1) - set(slide_2)))\n number_diff_right = len(list(set(slide_2) - set(slide_1)))\n min_score = min(number_common, min(number_diff_right, number_diff_left))\n return min_score",
"def align_match(img1, img2, ROI=False):\n alignedImg2 = speckle.conditioning.align_frames(img2, align_to=img1, region=ROI)\n return speckle.conditioning.match_counts(img1, alignedImg2, region=ROI)",
"def find_stitch_index(img1, img2, orientation):\n if orientation == 0:\n for i in range(len(img1)):\n for j in range(len(img2)):\n # calculate the co-variance\n cur = cal_correlation_coefficient(img1[i], img2[j])\n if cur > 0.99:\n nex = cal_correlation_coefficient(img1[i], img2[j + 1])\n if nex > cur:\n continue\n else:\n return j - i\n\n elif orientation == 1:\n for x in range(len(img1[0])):\n for y in range(len(img2[0])):\n # if find the same column\n cur = cal_correlation_coefficient(img1[:, x], img2[:, y])\n if cur > 0.99:\n nex = cal_correlation_coefficient(img1[:, x], img2[:, y + 1])\n if nex > cur:\n continue\n else:\n return y - x",
"def cal_ssim(img1, img2):\n img1 = img1.transpose((1, 2, 0))\n img2 = img2.transpose((1, 2, 0))\n return compare_ssim(img1, img2, multichannel=True)",
"def stitch_images(im1: Image, im2: Image):\n im1_gray, im2_gray = np.array(im1.convert('L')), np.array(im2.convert('L'))\n hips1 = find_harris_interest_points(im1_gray)\n hips2 = find_harris_interest_points(im2_gray)\n possible_translations = calculate_image_translations(im1_gray, hips1, im2_gray, hips2)\n assert len(possible_translations), \"No matches found for Harris interest points\"\n dy, dx = exhaustive_ransac(possible_translations) # Returns (row, column), need to change to (x, y)\n size, im1_offset, im2_offset = {\n (True, True): ((max(im2.size[0]+dx, im1.size[0]), max(im2.size[1]+dy, im1.size[1])), (0, 0), (dx, dy)), # +dx, +dy\n (True, False): ((max(im2.size[0]+dx, im1.size[0]), max(im1.size[1]-dy, im2.size[1])), (0, -dy), (dx, 0)), # +dx, -dy\n (False, True): ((max(im1.size[0]-dx, im2.size[0]), max(im2.size[1]+dy, im1.size[1])), (-dx, 0), (0, dy)), # -dx, +dy\n (False, False): ((max(im1.size[0]-dx, im2.size[0]), max(im1.size[1]-dy, im2.size[1])), (-dx, -dy), (0, 0)) # -dx, -dy\n }[(dx > 0, dy > 0)]\n combined_images = Image.new(im1.mode, size)\n combined_images.paste(im1, im1_offset)\n combined_images.paste(im2, im2_offset)\n return combined_images",
"def dice_score(image_0, image_1):\n if image_0.shape != image_1.shape:\n raise Exceptions.ShapeMismatch()\n\n numerator = 2 * np.sum(image_0 * image_1)\n denominator = np.sum(image_0 > 0) + np.sum(image_1 > 0)\n\n return numerator / float(denominator)",
"def compareImageAgainstAnotherImageGetScore_Features(img1, img2, flag_debug):\n\n # parameters\n filterMatchRatio = 0.75\n\n\n # create a detector and matcher object\n detector, matcher = createDetectorMatcher()\n\n # error if no descriptors were created for either image\n features1, descriptors1 = (detector.detectAndCompute(img1, None))\n if descriptors1 is None or not len(descriptors1):\n print \"No features in img1: %d\" % len(features1)\n return 0.0\n features2, descriptors2 = (detector.detectAndCompute(img2, None))\n if descriptors2 is None or not len(descriptors2):\n print \"No features in img2: %d.\" % len(features2)\n return 0.0\n\n # calc matches between features\n raw_matches = matcher.knnMatch(descriptors1, trainDescriptors=descriptors2, k=2)\n p1, p2, matching_feature_pairs = filterMatches(features1, features2, raw_matches, filterMatchRatio)\n\n # now that we have features lined up, we want to see if there is actually a nice homography transform (rotation, scale) that is consistent with bringing features into alignment.\n\n # numpy arrays and constants used below\n origin = numpy.array([0,0,1])\n dx = numpy.array([1,0,1])\n dy = numpy.array([0,1,1])\n\n # default returns\n match_count = 0\n scale_amount = float('Inf')\n \n # We need at least 4 points to align.\n if len(p1)>=4:\n homography_mat, inlier_pt_mask = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n if homography_mat is not None:\n match_count = numpy.sum(inlier_pt_mask)\n # Sometimes matching faces are visible but the die is rotated. That is,\n # this die has 5 on top but 19 visible to the side, and the other die\n # has 19 on top but 5 visible. OpenCV may find a match, but the match\n # will not be pure translation/rotation, and will distort scale.\n h = homography_mat\n scale_amount = sum([abs(1.0 - numpy.linalg.norm(h.dot(dv) - h.dot(origin))) for dv in (dx, dy)])\n if scale_amount < 1.0:\n scale_amount = (1.0 / scale_amount if scale_amount > 0 else float('Inf'))\n\n # we may want to test scale_amount and disallow the matches if holography alignment scale is too far from 1.0\n\n return match_count",
"def mjpeg_info_cmp(x,y):\n name_x = x[0]\n name_y = y[0]\n value_x = int(name_x.replace('camera_', ''))\n value_y = int(name_y.replace('camera_', ''))\n if value_x > value_y:\n return 1\n elif value_y > value_x:\n return -1\n else:\n return 0",
"def _sort_by_score(im_inds, scores):\n num_im = im_inds[-1] + 1\n rois_per_image = scores.new(num_im.item())\n lengths = []\n for i, s, e in enumerate_by_image(im_inds):\n rois_per_image[i] = 2 * (s - e) * num_im + i\n lengths.append(e - s)\n lengths = sorted(lengths, reverse=True)\n inds, ls_transposed = transpose_packed_sequence_inds(lengths) # move it to TxB form\n inds = torch.LongTensor(inds).cuda(im_inds.get_device())\n\n # ~~~~~~~~~~~~~~~~\n # HACKY CODE ALERT!!!\n # we're sorting by confidence which is in the range (0,1), but more importantly by longest\n # img....\n # ~~~~~~~~~~~~~~~~\n roi_order = scores - 2 * rois_per_image[im_inds]\n _, perm = torch.sort(roi_order, 0, descending=True)\n perm = perm[inds]\n _, inv_perm = torch.sort(perm)\n\n return perm, inv_perm, ls_transposed",
"def findMatchesBetweenImages(image_1, image_2, num_matches):\n # matches - type: list of cv2.DMath\n matches = None\n # image_1_kp - type: list of cv2.KeyPoint items.\n image_1_kp = None\n # image_1_desc - type: numpy.ndarray of numpy.uint8 values.\n image_1_desc = None\n # image_2_kp - type: list of cv2.KeyPoint items.\n image_2_kp = None\n # image_2_desc - type: numpy.ndarray of numpy.uint8 values.\n image_2_desc = None\n\n # COPY YOUR CODE FROM A7 HERE.\n\n # sift = SIFT()\n # image_1_kp, image_1_desc = sift.detectAndCompute(image_1, None)\n # image_2_kp, image_2_desc = sift.detectAndCompute(image_2, None)\n # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matches = bf.match(image_1_desc,image_2_desc)\n # matches = sorted(matches, key = lambda x:x.distance)\n # matches = matches[:num_matches]\n\n alg = cv2.ORB()\n # alg = cv2.SIFT()\n\n # 1. Compute SIFT keypoints and descriptors for both images\n image_1_kp, image_1_desc = alg.detectAndCompute(image_1,None)\n image_2_kp, image_2_desc = alg.detectAndCompute(image_2,None)\n\n # 2. Create a Brute Force Matcher, using the hamming distance (and set crossCheck to true).\n bf_matcher = cv2.BFMatcher(normType=cv2.NORM_HAMMING,crossCheck=True)\n\n # 3. Compute the matches between both images.\n matches = bf_matcher.match(image_1_desc,image_2_desc)\n\n # 4. Sort the matches based on distance so you get the best matches.\n # 5. ...the top 10 matches in a list.\n matches = sorted(matches, key = lambda x:x.distance)[:num_matches]\n\n\n return image_1_kp, image_2_kp, matches",
"def get_score(img):\n target = img.copy()\n\n target = get_drawing(target)\n\n pos = numpy.where(numpy.logical_and(target == 0, POS_SCORE == 0), 0, 255)\n neg = numpy.where(numpy.logical_and(target == 0, NEG_SCORE == 0), 0, 255)\n \n pos_count = numpy.count_nonzero(pos == 0)\n neg_count = numpy.count_nonzero(neg == 0)\n \n # negative threshold\n neg_count = 0 if neg_count < TOTAL_NEG/10 else neg_count - TOTAL_NEG/10\n \n # subsract negative pixels and scale with maximum scaler\n score = (pos_count - neg_count)/TOTAL_POS\n score = 0 if score < 0 else score\n \n return pos, neg, score",
"def compare_on_image(self, image_id: int, synset_1: str, synset_2: str) -> List[float]:\n img = self.get_image(image_id)\n max_sizes_1: List[int] = list()\n max_sizes_2: List[int] = list()\n # TODO might be better to first index all synsets for each img to avoid this loop\n for object in img['objects']:\n # TODO think about area vs max(width, height). The second seems to be more consistent with the linguistic bootstrapping\n if synset_1 in object['synsets']:\n max_sizes_1.append(max(object['w'], object['h']))\n if synset_2 in object['synsets']:\n max_sizes_2.append(max(object['w'], object['h']))\n relative_sizes: List[float] = list()\n for size_1 in max_sizes_1:\n for size_2 in max_sizes_2:\n try:\n relative_sizes.append(size_1 / size_2)\n except ZeroDivisionError:\n continue\n return relative_sizes",
"def most_similar_image():\n most_similar_index = -1\n return most_similar_index",
"def alignImages(im1, im2):\n #adjust max_features and good_match_percents\n #print('Max features', MAX_FEATURES)\n #print('Using % good', GOOD_MATCH_PERCENT)\n #GOOD_MATCH_PERCENT = 0.15\n #print('Saved Homography:', H_SHORT)\n \n # Detect ORB features and compute descriptors.\n orb = cv2.ORB_create(MAX_FEATURES)\n keypoints1, descriptors1 = orb.detectAndCompute(im1, None)\n keypoints2, descriptors2 = orb.detectAndCompute(im2, None)\n\n # Match features.\n matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)\n try:\n matches = matcher.match(descriptors1, descriptors2, None)\n except:\n matches = None\n #print('could not match features')\n \n if matches is not None:\n # Sort matches by likelihood of being a match.\n matches.sort(key=lambda x: x.distance, reverse=False)\n\n # Remove not so good matches.\n numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)\n matches = matches[:numGoodMatches]\n\n # Draw top matches.\n imMatches = cv2.drawMatches(im1, keypoints1, im2, keypoints2, matches, None)\n #cv2.imshow('Matches', imMatches)\n #cv2.waitKey(2000)\n cv2.imwrite(\"/home/pi/PlantHealth/SavedImages/matches.jpg\", imMatches)\n \n # Extract location of good matches.\n points1 = np.zeros((len(matches), 2), dtype=np.float32)\n points2 = np.zeros((len(matches), 2), dtype=np.float32)\n\n for i, match in enumerate(matches):\n points1[i, :] = keypoints1[match.queryIdx].pt\n points2[i, :] = keypoints2[match.trainIdx].pt\n\n # Find homography or use a stored version in case of emergency\n # or if the homography seems inaccurate.\n try:\n h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)\n except:\n h = H_SHORT #using stored homography\n #print('Using saved homography h:', h)\n finally:\n # Use homography.\n if h is None or not (np.allclose(h, H_SHORT, rtol=0, atol=0.5) \n or np.allclose(h, H, rtol=0, atol=0.5)):\n h = H_SHORT\n #print('Using saved homography h:', h)\n #print('Using homography h:', repr(h))\n height, width, channels = im2.shape\n im1Reg = cv2.warpPerspective(im1, h, (width, height))\n else:\n im1Reg = im1 #could not register the image\n return im1Reg"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function calculates the score of putting image1 on the left of image 2. It does so by going pixel by pixel in the farthest column and summing the differences of each pixels. It returns the score.
|
def calculateScore(image1, image2):
image1col = image1[-1]
image2col = image2[0]
tuples = zip(image1col, image2col)
score = 0
for pixel1, pixel2 in tuples:
score += comparePixels(pixel1, pixel2)
return score
|
[
"def findCurrScore(image1, image2):\n\tscoreleft = Score(calculateScore(image1, image2), True, image2)\n\tscoreright = Score(calculateScore(image2, image1), False, image2)\n\n\tcurrminscore = None\n\tif (scoreleft.score < scoreright.score):\n\t\tcurrminscore = scoreleft\n\telse:\n\t\tcurrminscore = scoreright\n\n\treturn currminscore",
"def merge_left(self):\n\n for i in range(4):\n for j in range(3):\n if(self.grid[i][j] == self.grid[i][j+1]):\n self.grid[i][j] = self.grid[i][j] * 2\n self.grid[i][j + 1] = 0\n self.score += self.grid[i][j]\n return self.grid, self.score",
"def dice_score(image_0, image_1):\n if image_0.shape != image_1.shape:\n raise Exceptions.ShapeMismatch()\n\n numerator = 2 * np.sum(image_0 * image_1)\n denominator = np.sum(image_0 > 0) + np.sum(image_1 > 0)\n\n return numerator / float(denominator)",
"def get_score(img):\n target = img.copy()\n\n target = get_drawing(target)\n\n pos = numpy.where(numpy.logical_and(target == 0, POS_SCORE == 0), 0, 255)\n neg = numpy.where(numpy.logical_and(target == 0, NEG_SCORE == 0), 0, 255)\n \n pos_count = numpy.count_nonzero(pos == 0)\n neg_count = numpy.count_nonzero(neg == 0)\n \n # negative threshold\n neg_count = 0 if neg_count < TOTAL_NEG/10 else neg_count - TOTAL_NEG/10\n \n # subsract negative pixels and scale with maximum scaler\n score = (pos_count - neg_count)/TOTAL_POS\n score = 0 if score < 0 else score\n \n return pos, neg, score",
"def __compare_columns(self, col1, col2, diff=0, y=0):\n pixel1 = self.__get_pixel_value(col1 * self.__shredSize, y)\n pixel2 = self.__get_pixel_value(col2 * self.__shredSize + self.__shredSize - 1, y)\n diff += self.__calculate_pixel_difference(pixel1, pixel2)\n if y < self.__imageSize[1] - 1:\n y += 1\n return self.__compare_columns(col1, col2, diff, y)\n else:\n return diff / self.__imageSize[1]",
"def comparePixels(pixel1, pixel2):\n\ttotal = 0\n\ttotal += (pixel1.red - pixel2.red)**2\n\ttotal += (pixel1.green - pixel2.green)**2\n\ttotal += (pixel1.blue - pixel2.blue)**2\n\ttotal += (pixel1.alpha - pixel2.alpha)**2\n\treturn total",
"def _image_difference(image_1_path, image_2_path):\n\n image_1 = Image.open(image_1_path)\n image_2 = Image.open(image_2_path)\n\n if image_1.mode != image_2.mode:\n # Different kinds of images.\n return 100\n\n if image_1.size != image_2.size:\n # Different sizes\n return 100\n\n pairs = zip(image_1.getdata(), image_2.getdata())\n if len(image_1.getbands()) == 1:\n # for gray-scale JPEGS\n dif = sum(abs(p1 - p2) for p1, p2 in pairs)\n else:\n dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))\n\n n_components = image_1.size[0] * image_1.size[1] * 3\n return (dif / 255.0 * 100) / n_components",
"def _average_pixel_distance(pic1, pic2):\r\n \r\n resized = pic1.resize(pic2.size)\r\n size = pic2.size[0] * pic2.size[1]\r\n # Create a new image using difference function\r\n pixel_difference = difference(resized, pic2).getdata()\r\n pixel_distance = sum([(((data[0] ** 2) + (data[1] ** 2) + \r\n (data[2] ** 2)) ** 0.5) \r\n for data in pixel_difference]) / size\r\n return [pixel_distance, resized]",
"def compare(t1, t2):\n score = 0\n\n for i in range(len(t1)):\n for j in range(len(t1[0])):\n a = t1[i][j]\n b = t2[i][j]\n cell_score = abs(a - b)\n score += cell_score\n return score",
"def compareImageAgainstAnotherImageGetScore_Features(img1, img2, flag_debug):\n\n # parameters\n filterMatchRatio = 0.75\n\n\n # create a detector and matcher object\n detector, matcher = createDetectorMatcher()\n\n # error if no descriptors were created for either image\n features1, descriptors1 = (detector.detectAndCompute(img1, None))\n if descriptors1 is None or not len(descriptors1):\n print \"No features in img1: %d\" % len(features1)\n return 0.0\n features2, descriptors2 = (detector.detectAndCompute(img2, None))\n if descriptors2 is None or not len(descriptors2):\n print \"No features in img2: %d.\" % len(features2)\n return 0.0\n\n # calc matches between features\n raw_matches = matcher.knnMatch(descriptors1, trainDescriptors=descriptors2, k=2)\n p1, p2, matching_feature_pairs = filterMatches(features1, features2, raw_matches, filterMatchRatio)\n\n # now that we have features lined up, we want to see if there is actually a nice homography transform (rotation, scale) that is consistent with bringing features into alignment.\n\n # numpy arrays and constants used below\n origin = numpy.array([0,0,1])\n dx = numpy.array([1,0,1])\n dy = numpy.array([0,1,1])\n\n # default returns\n match_count = 0\n scale_amount = float('Inf')\n \n # We need at least 4 points to align.\n if len(p1)>=4:\n homography_mat, inlier_pt_mask = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n if homography_mat is not None:\n match_count = numpy.sum(inlier_pt_mask)\n # Sometimes matching faces are visible but the die is rotated. That is,\n # this die has 5 on top but 19 visible to the side, and the other die\n # has 19 on top but 5 visible. OpenCV may find a match, but the match\n # will not be pure translation/rotation, and will distort scale.\n h = homography_mat\n scale_amount = sum([abs(1.0 - numpy.linalg.norm(h.dot(dv) - h.dot(origin))) for dv in (dx, dy)])\n if scale_amount < 1.0:\n scale_amount = (1.0 / scale_amount if scale_amount > 0 else float('Inf'))\n\n # we may want to test scale_amount and disallow the matches if holography alignment scale is too far from 1.0\n\n return match_count",
"def compareImages(path1,path2):\n img1 = cv2.imread(path1)\n img2 = cv2.imread(path2)\n\n score = structural_similarity(img1, img2, multichannel=True)\n\n print(f\"SSIM: {score}\")\n return score",
"def scoreDiff(ups, downs):\n return ups - downs",
"def image_diff_score(screenshot: Image, reference: Image, binary_diff=True) -> float:\n img_rgb = _read_cv_image(screenshot)\n ref_rgb = _read_cv_image(reference)\n if img_rgb.shape != ref_rgb.shape:\n raise ValueError(\n f'Images have different shapes: {img_rgb.shape}, {ref_rgb.shape}'\n )\n if binary_diff:\n diff = img_rgb != ref_rgb\n pixel_diff = np.max(diff, -1)\n return np.sum(pixel_diff) / np.prod(pixel_diff.shape)\n else:\n # note: numpy difference won't work because they are uint8\n diff = cv.absdiff(img_rgb, ref_rgb)\n return np.sum(diff) / np.prod(diff.shape) / 255",
"def recall(im1, im2):\n tp = np.count_nonzero((im2 + im1) == 2)\n allr = np.count_nonzero(im1 == 1)\n return tp * 1.0 / allr",
"def distance(image1, image2):\n m1 = 1\n m2 = 2\n assert image1.size() == image2.size()\n flat_1 = [col for dim in image1.pixels for row in dim for col in row]\n flat_2 = [col for dim in image2.pixels for row in dim for col in row]\n dist = [((flat_1[i] - flat_2[i]) ** m2) for i in range(len(flat_1))]\n return sum(dist) ** (m1/m2)",
"def img_diff(img1, img2, caption):\n # Take the absolute difference of the images\n res = cv2.absdiff(img1, img2)\n\n # Convert the result to integer type\n res = res.astype(np.uint8)\n\n # Find percentage difference based on number of pixels that are not zero\n percentage = (np.count_nonzero(res) * 100) / res.size\n\n output = f\"{caption} Percentage: {percentage}\"\n print(output)",
"def calculateScore(board,gameState):\n pass",
"def increment_diff(self, image1, image2, image_show) -> int:\n img1 = self.get_image(image1)\n img2 = self.get_image(image2)\n score_list = HashSimilar.get_attention(img1, img2)\n img1_feature, img2_feature = self.get_image_feature(img1, img2)\n line1, line2 = self.get_line_list(m_diff(img1_feature, img2_feature, equal_obj=LineFeatureEqual()))\n line = line1 + line2\n line = self.line_filter(line)\n img_show = img2.copy() if img2.shape[0] > img1.shape[0] else img1.copy()\n (h, w) = img_show.shape\n img_show = cv2.cvtColor(img_show, cv2.COLOR_GRAY2BGR)\n points = []\n line_attention = []\n for l in line:\n i = int((len(score_list) * (l - 1) / h))\n i = 0 if i < 0 else i\n if score_list[i] < 0.98:\n line_attention.append(l)\n line = line_attention\n for y in range(int(h*0.95)):\n if y > int(w * self.head_scale):\n if y in line:\n for x in range(w-self.padding):\n p1 = int(self.get_pixel(img1, x, y))\n p2 = int(self.get_pixel(img2, x, y))\n if abs(p1 - p2) < self.pixel_value:\n pass\n else:\n points.append([x, y])\n for point in points:\n cv2.circle(img_show, (point[0], point[1]), 1, (0, 0, 255), -1)\n cv2.imwrite(image_show, img_show)\n return len(points)",
"def cal_ssim(img1, img2):\n img1 = img1.transpose((1, 2, 0))\n img2 = img2.transpose((1, 2, 0))\n return compare_ssim(img1, img2, multichannel=True)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function calculates the difference between two pixels by summing the squares of the differences of the different components, R,G,B, and A. It returns the total difference.
|
def comparePixels(pixel1, pixel2):
total = 0
total += (pixel1.red - pixel2.red)**2
total += (pixel1.green - pixel2.green)**2
total += (pixel1.blue - pixel2.blue)**2
total += (pixel1.alpha - pixel2.alpha)**2
return total
|
[
"def __calculate_pixel_difference(self, pixel1, pixel2):\n return sum( [ (math.log(color1 / 255.0 + 1.0 / 255) - \n math.log(color2 / 255.0 + 1.0 / 255)) ** 2 \n for color1, color2 in zip(pixel1, pixel2) ])\n # This algorithm is not working as properly.\n # return sum( [ abs(color1 - color2) for color1, color2 in zip(pixel1, pixel2) ] ) ",
"def color_diff(a, b):\n \n arr_ = (c_double * len(self.a))(*self.a)\n \n rgb2srgb(arr_)\n \n srgb2linear(arr_)\n \n linear2xyz(arr_)\n \n xyz2Lab(arr_)\n \n arr_2 = (c_double * len(self.b))(*self.b)\n \n rgb2srgb(arr_2)\n \n srgb2linear(arr_2)\n \n linear2xyz(arr_2)\n \n xyz2Lab(arr_2)\n \n delta = delta_cie_2000_(arr_, arr_2)\n \n return delta",
"def _image_difference(image_1_path, image_2_path):\n\n image_1 = Image.open(image_1_path)\n image_2 = Image.open(image_2_path)\n\n if image_1.mode != image_2.mode:\n # Different kinds of images.\n return 100\n\n if image_1.size != image_2.size:\n # Different sizes\n return 100\n\n pairs = zip(image_1.getdata(), image_2.getdata())\n if len(image_1.getbands()) == 1:\n # for gray-scale JPEGS\n dif = sum(abs(p1 - p2) for p1, p2 in pairs)\n else:\n dif = sum(abs(c1 - c2) for p1, p2 in pairs for c1, c2 in zip(p1, p2))\n\n n_components = image_1.size[0] * image_1.size[1] * 3\n return (dif / 255.0 * 100) / n_components",
"def color_diff(rgb1, rgb2):\n diff = math.sqrt((rgb1[0]-rgb2[0])**2 + (rgb1[1]-rgb2[1])**2 + (rgb1[2]-rgb2[2])**2)\n return diff",
"def rgb_distance(rgb1: RGB, rgb2: RGB) -> int:\n return sum(map(lambda c: (c[0] - c[1]) ** 2,\n zip(rgb1, rgb2)))",
"def image_diff_score(screenshot: Image, reference: Image, binary_diff=True) -> float:\n img_rgb = _read_cv_image(screenshot)\n ref_rgb = _read_cv_image(reference)\n if img_rgb.shape != ref_rgb.shape:\n raise ValueError(\n f'Images have different shapes: {img_rgb.shape}, {ref_rgb.shape}'\n )\n if binary_diff:\n diff = img_rgb != ref_rgb\n pixel_diff = np.max(diff, -1)\n return np.sum(pixel_diff) / np.prod(pixel_diff.shape)\n else:\n # note: numpy difference won't work because they are uint8\n diff = cv.absdiff(img_rgb, ref_rgb)\n return np.sum(diff) / np.prod(diff.shape) / 255",
"def rgb_distance(rgb1, rgb2):\n return math.sqrt(np.sum((np.array(rgb1, np.float32) - np.array(rgb2, np.float32))**2))",
"def sad(img1, img2):\n return np.sum(np.abs(img1 - img2))",
"def img_diff(img1, img2, caption):\n # Take the absolute difference of the images\n res = cv2.absdiff(img1, img2)\n\n # Convert the result to integer type\n res = res.astype(np.uint8)\n\n # Find percentage difference based on number of pixels that are not zero\n percentage = (np.count_nonzero(res) * 100) / res.size\n\n output = f\"{caption} Percentage: {percentage}\"\n print(output)",
"def _average_pixel_distance(pic1, pic2):\r\n \r\n resized = pic1.resize(pic2.size)\r\n size = pic2.size[0] * pic2.size[1]\r\n # Create a new image using difference function\r\n pixel_difference = difference(resized, pic2).getdata()\r\n pixel_distance = sum([(((data[0] ** 2) + (data[1] ** 2) + \r\n (data[2] ** 2)) ** 0.5) \r\n for data in pixel_difference]) / size\r\n return [pixel_distance, resized]",
"def calculateError(A, B):\n errorMatrix = A - B\n total = 0\n for row in errorMatrix.Rowsp:\n for entry in row:\n total += (entry * entry)\n return math.sqrt(total)",
"def color_dist( c1, c2):\n return sum( (a-b)**2 for a,b in zip(to_ycc(c1),to_ycc(c2)) )",
"def difference(self, array1, array2):\n difference = np.sum(array2.flatten() - array1.flatten()) / np.sum(array2.flatten())\n return difference",
"def squared_difference(A,B):\n diff = A-B\n sq_diff = np.einsum('ijk,ijk->',diff,diff)\n return sq_diff",
"def ImageDelta (image1, image2, mask = False):\n img1_factor = np.mean(image1)\n img2_factor = np.mean(image2)\n\n img1 = np.clip(image1/(img1_factor/10000),0,64000)\n img2 = np.clip(image2/(img2_factor/10000),0,64000)\n\n contrast_image = np.absolute(img1 - img2)\n raw_contrast_image = np.absolute(image1 - image2)\n\n if np.any(mask) == False:\n RMS_norm = math.sqrt(np.square(contrast_image).mean())\n RMS_raw = math.sqrt(np.square(raw_contrast_image).mean())\n else:\n RMS_norm = math.sqrt(np.square(contrast_image[~mask]).mean())\n RMS_raw = math.sqrt(np.square(raw_contrast_image[~mask]).mean())\n\n return RMS_norm, RMS_raw, contrast_image",
"def color_pair_distance(color_pair_1, color_pair_2):\n lux1 = np.average(color_pair_1)\n lux2 = np.average(color_pair_2)\n dux1 = (np.array(color_pair_1) / max(8, lux1)).tolist()\n dux2 = (np.array(color_pair_2) / max(8, lux2)).tolist()\n ds = list([rgb_distance(c1, c2) for c1 in dux1 for c2 in dux2])\n return min(ds[0] + ds[3], ds[1] + ds[2])",
"def _tile_tile_distance(self, tile1, tile2):\n return average(norm(tile1.astype(float) - tile2.astype(float), axis=2)) / math.sqrt(255.0**2 + 255.0**2 + 255.0**2)",
"def img_diff(im1, im2):\n im1 = Image.open(io.BytesIO(im1))\n im2 = Image.open(io.BytesIO(im2))\n\n # Ensure we have the same color channels (RGBA vs RGB)\n if im1.mode != im2.mode:\n raise ValueError(\n (\"Differing color modes:\\n {}\\n {}\\n\"\n \"Ensure image color modes are the same.\").format(im1.mode, im2.mode))\n\n # Coerce 2nd dimensions to same as 1st\n im2 = im2.resize((im1.width, im1.height))\n\n # Generate diff image in memory.\n diff_img = ImageChops.difference(im1, im2)\n\n r, g, b, _ = diff_img.split()\n rgb_image = Image.merge('RGB', (r, g, b))\n\n num_diff_pixel = sum(\n rgb_image.point(lambda x: 255\n if x else 0).convert(\"L\").point(bool).getdata())\n diff_ratio = num_diff_pixel / rgb_image.width / rgb_image.height\n return ImageOps.invert(rgb_image), diff_ratio",
"def pixel_distance(pixel1, pixel2):\n return np.power(np.power(pixel1[0] - pixel2[0], 2) + np.power(pixel1[1] - pixel2[1], 2), 0.5)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function takes two images and an orientation and returns the two images put together. It does so by manually manipulating the data and appending the data from the right image to the one on the left. Returns the final merged image.
|
def merge(image1, image2, onleft):
if not onleft:
return merge(image2, image1, True)
finalimage = image1
for col in image2:
finalimage.append(col)
return finalimage
|
[
"def merge_images_side_by_side(image1, image2):\n (width1, height1) = image1.size\n (width2, height2) = image2.size\n\n result_width = width1 + width2\n result_height = max(height1, height2)\n\n result = Image.new('RGB', (result_width, result_height))\n result.paste(im=image1, box=(0, 0))\n result.paste(im=image2, box=(width1, 0))\n return result",
"def mergeImageHorizontally(image1,image2): \r\n (width1, height1) = image1.size\r\n (width2, height2) = image2.size \r\n\r\n if(width2 == 1):\r\n result_width = width1 + 1\r\n else:\r\n result_width = width1 + resize_width\r\n result_height = max(height1, height2)\r\n\r\n result = Image.new('RGBA', (result_width,result_height))\r\n \r\n result.paste(im=image1, box=(0, 0), mask=0)\r\n result.paste(im=image2, box=(width1, 0), mask=0)\r\n \r\n return result",
"def merge_images(file1, file2):\n image1 = file1\n image2 = file2\n\n (width1, height1) = image1.size\n (width2, height2) = image2.size\n\n result_width = width1 + width2\n result_height = max(height1, height2)\n\n result = Image.new('RGB', (result_width, result_height))\n result.paste(im=image1, box=(0, 0))\n result.paste(im=image2, box=(width1, 0))\n return result",
"def merge_right(img1, img2, h, matches_lst, x_shift_cum=0, y_shift_cum=0):\r\n h_inv = np.reshape(np.linalg.inv(np.reshape(h, (3, 3))), (9,))\r\n h_inv = h_inv / np.sum(h_inv)\r\n img2, img2_x_shift, img2_y_shift = homography_img(img2, h_inv, output_shape=(2000, 800), align_left=False)\r\n p1, p2 = matches_lst[0]\r\n h_x, h_y = homography_pnt((p2[0] + x_shift_cum, p2[1] + y_shift_cum, 1), h_inv)\r\n h_x += img2_x_shift\r\n h_y += img2_y_shift\r\n img1_x_shift = int(h_x - p1[0])\r\n img1_y_shift = int(h_y - p1[1])\r\n\r\n img2[img1_y_shift:img1_y_shift + img1.shape[0], img1_x_shift:img1_x_shift + img1.shape[1], :] = img1\r\n return img2, img1_x_shift, img1_y_shift",
"def stitch_images(im1: Image, im2: Image):\n im1_gray, im2_gray = np.array(im1.convert('L')), np.array(im2.convert('L'))\n hips1 = find_harris_interest_points(im1_gray)\n hips2 = find_harris_interest_points(im2_gray)\n possible_translations = calculate_image_translations(im1_gray, hips1, im2_gray, hips2)\n assert len(possible_translations), \"No matches found for Harris interest points\"\n dy, dx = exhaustive_ransac(possible_translations) # Returns (row, column), need to change to (x, y)\n size, im1_offset, im2_offset = {\n (True, True): ((max(im2.size[0]+dx, im1.size[0]), max(im2.size[1]+dy, im1.size[1])), (0, 0), (dx, dy)), # +dx, +dy\n (True, False): ((max(im2.size[0]+dx, im1.size[0]), max(im1.size[1]-dy, im2.size[1])), (0, -dy), (dx, 0)), # +dx, -dy\n (False, True): ((max(im1.size[0]-dx, im2.size[0]), max(im2.size[1]+dy, im1.size[1])), (-dx, 0), (0, dy)), # -dx, +dy\n (False, False): ((max(im1.size[0]-dx, im2.size[0]), max(im1.size[1]-dy, im2.size[1])), (-dx, -dy), (0, 0)) # -dx, -dy\n }[(dx > 0, dy > 0)]\n combined_images = Image.new(im1.mode, size)\n combined_images.paste(im1, im1_offset)\n combined_images.paste(im2, im2_offset)\n return combined_images",
"def hybrid_image(self):\n\n src1 = np.copy(self.imagesData[\"2_1\"])\n src2 = np.copy(self.imagesData[\"2_2\"])\n\n # Minimum required shape\n min_shape = (min(src1.shape[0], src2.shape[0]),\n min(src1.shape[1], src2.shape[1]))\n\n # resize images to ensure both have same shapes\n src1_resized = cv2.resize(src1, min_shape, interpolation=cv2.INTER_AREA)\n src2_resized = cv2.resize(src2, min_shape, interpolation=cv2.INTER_AREA)\n\n # Apply filters\n image1_dft = FrequencyFilters.high_pass_filter(source=src1_resized, size=20)\n image2_dft = FrequencyFilters.low_pass_filter(source=src2_resized, size=15)\n\n # Mix 2 images\n hybrid_image = image1_dft + image2_dft\n\n self.display_image(source=hybrid_image, widget=self.img2_output)",
"def merge_left(img1, img2, h, matches_lst, x_shift_cum=0, y_shift_cum=0):\r\n img1, img1_x_shift, img1_y_shift = homography_img(img1, h, (2000, 1200))\r\n p1, p2 = matches_lst[0]\r\n h_x, h_y = homography_pnt((p1[0] + x_shift_cum, p1[1] + y_shift_cum, 1), h)\r\n h_x += img1_x_shift\r\n h_y += img1_y_shift\r\n img2_x_shift = int(h_x - p2[0])\r\n img2_y_shift = int(h_y - p2[1])\r\n\r\n img1_slice = img1[img2_y_shift:img2_y_shift + img2.shape[0], img2_x_shift:img2_x_shift + img2.shape[1], :]\r\n img1_slice[img1_slice < 3] = \\\r\n img2[0:img1_slice.shape[0], 0:img1_slice.shape[1], 0:img1_slice.shape[2]][img1_slice < 3]\r\n\r\n # Show img1 for debugging purposes\r\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 11))\r\n ax.imshow(img1)\r\n plt.show()\r\n\r\n return img1, img2_x_shift, img2_y_shift",
"def concatenate_images(filename1, filename2):\n images = [Image.open(i) for i in [filename1, filename2]]\n\n # resize larger image to size of the smaller one\n min_shape = sorted([(np.sum(i.size), i.size) for i in images])[0][1]\n imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in images))\n\n new_filename = filename1.split(\".\")[0] + filename2.split(\"/\")[-1]\n\n # save that beautiful picture\n imgs_comb = Image.fromarray(imgs_comb)\n imgs_comb.save(new_filename)\n\n return new_filename",
"def mix_images(image1, image2, channel1, channel2):\n\n out = None\n ### YOUR CODE HERE\n h = image1.shape[0]\n w = image1.shape[1]\n r1, g1, b1 = image1[:,:int(w/2),0], image1[:,:int(w/2),1], image1[:,:int(w/2),2]\n r2, g2, b2 = image2[:,int(w/2):,0], image2[:,int(w/2):,1], image2[:,int(w/2):,2]\n if channel1 == 'R':\n r1 = np.zeros((h, int(w/2)))\n elif channel1 == 'G':\n g1 = np.zeros((h, int(w/2)))\n elif channel1 == 'B':\n b1 = np.zeros((h, int(w/2)))\n else:\n print('Input channel1 is not RGB!')\n \n if channel2 == 'R':\n r2 = np.zeros((h, int(w/2)))\n elif channel2 == 'G':\n g2 = np.zeros((h, int(w/2)))\n elif channel2 == 'B':\n b2 = np.zeros((h, int(w/2)))\n else:\n print('Input channel2 is not RGB!') \n \n out = np.concatenate((np.stack([r1, g1, b1], axis=2), np.stack([r2, g2, b2], axis=2)), axis=1)\n ### END YOUR CODE\n\n return out",
"def combineImages(center, left, right, measurement, correction):\r\n imagePaths = []\r\n imagePaths.extend(center)\r\n imagePaths.extend(left)\r\n imagePaths.extend(right)\r\n measurements = []\r\n measurements.extend(measurement)\r\n measurements.extend([x + correction for x in measurement])\r\n measurements.extend([x - correction for x in measurement])\r\n return (imagePaths, measurements)",
"def concatenate(\n left,\n right,\n vertical=False,\n center=True,\n pad_color=(0., 0., 0.)\n):\n if vertical:\n if left.shape[1] > right.shape[1]:\n right = pad_crop(\n right,\n left.shape[1],\n None,\n center=center,\n pad_color=pad_color\n )\n elif left.shape[1] < right.shape[1]:\n left = pad_crop(\n left,\n right.shape[1],\n None,\n center=center,\n pad_color=pad_color\n )\n else:\n if left.shape[0] > right.shape[0]:\n right = pad_crop(\n right,\n None,\n left.shape[0],\n center=center,\n pad_color=pad_color\n )\n elif left.shape[0] < right.shape[0]:\n left = pad_crop(\n left,\n None,\n right.shape[0],\n center=center,\n pad_color=pad_color\n )\n\n return np.concatenate((left, right), axis=1 - int(vertical))",
"def create_hybrid_image(image1, image2, filter):\n\n assert image1.shape[0] == image2.shape[0]\n assert image1.shape[1] == image2.shape[1]\n assert image1.shape[2] == image2.shape[2]\n\n ############################\n ### TODO: YOUR CODE HERE ###\n\n low_frequencies = my_imfilter(image1, filter)\n high_frequencies = image2 - my_imfilter(image2, filter)\n\n hybrid_image = low_frequencies + (high_frequencies)\n np.clip(hybrid_image, 0, 1, out=hybrid_image)\n\n ### END OF STUDENT CODE ####\n ############################\n\n return low_frequencies, high_frequencies, hybrid_image",
"def blend_images(img1, img2, alpha):\n\n assert img1.shape == img2.shape\n assert np.isscalar(alpha) or alpha.shape == img1.shape[0:2]\n\n img = np.zeros(img1.shape)\n for i in range(3):\n img[:, :, i] = img1[:, :, i] * (1 - alpha) + img2[:, :, i] * alpha\n\n return np.cast[np.uint8](img)",
"def overlay_two_images(img1: Image, img2: Image):\n img1 = img1.convert(\"RGBA\")\n img2 = img2.convert(\"RGBA\")\n\n (width1, height1) = img1.size\n (width2, height2) = img2.size\n assert width1 == width2 and height1 == height2\n\n background = Image.new(\"RGBA\", (width1, height1), \"BLACK\")\n background.paste(img1, (0, 0), img1)\n background.paste(img2, (0, 0), img2)\n\n return background",
"def combine_images_vertically(images):\n widths, heights = zip(*(i.size for i in images))\n total_height = sum(heights)\n total_width = max(widths)\n\n new_im = Image.new(\"RGB\", (total_width, total_height))\n\n y_offset = 0\n for im in images:\n # center the x difference if an image is slightly smaller width\n x_offset = int((total_width - im.size[0]) / 2)\n new_im.paste(im, (x_offset, y_offset))\n y_offset += im.size[1]\n return new_im",
"def imcalc(image1, image2, out_im, op='-'):\n min_ext = 2\n\n pf_1 = pyfits.open(image1)\n pf_2 = pyfits.open(image2)\n\n next_1 = len(pf_1)\n next_2 = len(pf_2)\n\n # Inputs must have at least 1 primary header and 1 data ext\n if next_1 < min_ext:\n pf_1.close()\n pf_2.close()\n raise ValueError('image1 has {} ext but expect >={}.'.format(\n next_1, min_ext))\n\n # Inputs must have same number of extensions\n if next_1 != next_2:\n pf_1.close()\n pf_2.close()\n raise ValueError('image1 has {} ext but image2 has {}.'.format(\n next_1, next_2))\n\n out_phdr = pyfits.PrimaryHDU()\n out_phdr.header.add_history('IMAGE1 {}'.format(os.path.basename(image1)))\n out_phdr.header.add_history('IMAGE2 {}'.format(os.path.basename(image2)))\n out_phdr.header.add_history('IMAGE1 {} IMAGE2'.format(op))\n\n out_hdu = pyfits.HDUList([out_phdr])\n\n for i in xrange(1, next_1):\n data_1 = pf_1[i].data\n data_2 = pf_2[i].data\n\n if data_1 is None or data_2 is None:\n module_logger.warn('input(s) has NoneType data.')\n hdu = pyfits.ImageHDU()\n\n else:\n if data_1.dtype != data_2.dtype:\n module_logger.warn(\n 'In ext {}, image1 is {} but image2 is {}'.format(\n i, data_1.dtype, data_2.dtype))\n\n if op == '/':\n out_data = data_1 / data_2\n else:\n out_data = data_1 - data_2\n\n hdu = pyfits.ImageHDU(out_data)\n\n # Inherit EXTNAME and EXTVER from image1\n hdu.update_ext_name(pf_1[i].name)\n hdu.update_ext_version(pf_1[i]._extver)\n\n out_hdu.append(hdu)\n\n out_hdu.writeto(out_im, clobber=True)\n\n pf_1.close()\n pf_2.close()",
"def create_diffimg(fits1,ext1,fits2,ext2,outfile,overwrite=True,header=False,verbose=True):\n img1 = afits.open(fits1)[ext1].data\n img2 = afits.open(fits2)[ext2].data\n\n if header:\n hdr = afits.open(fits1)[ext1].header\n else:\n hdr = None\n\n diffimage = img1-img2\n\n hdu = afits.PrimaryHDU(diffimage, header=hdr)\n hdu.writeto(outfile, overwrite=overwrite)",
"def prepare_input_data(self, img1, img2, data_format):\n # scale images if necessary\n if img1.size[0] != 256 or img1.size[1] != 192:\n img1 = img1.resize((256,192))\n if img2.size[0] != 256 or img2.size[1] != 192:\n img2 = img2.resize((256,192))\n img2_2 = img2.resize((64,48))\n \n # transform range from [0,255] to [-0.5,0.5]\n img1_arr = np.array(img1).astype(np.float32)/255 -0.5\n img2_arr = np.array(img2).astype(np.float32)/255 -0.5\n img2_2_arr = np.array(img2_2).astype(np.float32)/255 -0.5\n \n if data_format == 'channels_first':\n img1_arr = img1_arr.transpose([2,0,1])\n img2_arr = img2_arr.transpose([2,0,1])\n img2_2_arr = img2_2_arr.transpose([2,0,1])\n image_pair = np.concatenate((img1_arr,img2_arr), axis=0)\n else:\n image_pair = np.concatenate((img1_arr,img2_arr),axis=-1)\n \n result = {\n 'image_pair': image_pair[np.newaxis,:],\n 'image1': img1_arr[np.newaxis,:], # first image\n 'image2_2': img2_2_arr[np.newaxis,:], # second image with (w=64,h=48)\n }\n return result",
"def merge_with_rotation_data(images, labels, num_rot_examples):\n img_to_rot = images[-num_rot_examples:]\n img_rotated = rotate_images(img_to_rot, rot90_scalars=(1, 2, 3))\n img_rotated_labels = labels[-num_rot_examples:].repeat(3)\n all_img = torch.cat([images, img_rotated], 0)\n all_labels = torch.cat([labels, img_rotated_labels], 0)\n labels_rotated = tile(torch.tensor([1, 2, 3], dtype=labels.dtype), 0, num_rot_examples)\n all_labels_rotated = torch.cat([torch.zeros_like(labels), labels_rotated], 0)\n return all_img, all_labels, all_labels_rotated"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Build a Beautiful Soup object from markup.
|
def soup(self, markup, **kwargs):
...
|
[
"def build_soup(url):\n # query the website and return the html to the variable 'page'\n page = requests.get(url)\n # parse the html using beautiful soup and store in variable 'soup'\n return BeautifulSoup(page.content, \"html.parser\")",
"def parse(self, beautiful_html):\n return beautiful_html",
"def _soupify(self, response):\n\n content_type = response.headers.get('content-type', 'unknown')\n\n def get_soup(parser):\n \"\"\"\"Instantiate BeautifulSoup object\"\"\"\n return BeautifulSoup(response.text, parser)\n\n # XML markup.\n if 'xml' in content_type:\n return get_soup('lxml')\n\n # HTML markup.\n return get_soup('html.parser')",
"def make_soup(self):\n self.soup = BeautifulSoup(self.xml_fp, 'lxml-xml')\n self.xml_fp.close()",
"def build(element_unparsed, owner):\n\n short_tags = ['area', 'base', 'basefont', 'br', 'embed', 'hr', 'input',\n 'img', 'link', 'param', 'meta']\n\n required = {\n 'a': {'href': ''},\n 'base': {'href': ''},\n 'abbr': {'title': ''},\n 'acronym':{'title': ''},\n 'bdo': {'dir': ''},\n 'link': {'rel': 'stylesheet', 'href': ''},\n 'style': {'type': 'text/css'},\n 'script': {'type': 'text/javascript'},\n 'img': {'src':'', 'alt':''},\n 'iframe': {'src': '', 'frameborder': '0'},\n 'embed': {'src': '', 'type': ''},\n 'object': {'data': '', 'type': ''},\n 'param': {'name': '', 'value': ''},\n 'form': {'action': '', 'method': 'post'},\n 'table': {'cellspacing': '0'},\n 'input': {'type': '', 'name': '', 'value': ''},\n 'area': {'shape': '', 'coords': '', 'href': '', 'alt': ''},\n 'select': {'name': ''},\n 'option': {'value': ''},\n 'textarea':{'name': ''},\n 'meta': {'content': ''},\n }\n\n result = re.match(r'\\s*(\\w*)' # Name\n r'\\s*(?:#(\\w+))?' # ID\n r'((?:\\s*\\.\\w+)*)' # Classes\n r'((?:\\s*\\[[^\\]]+\\])*)' # Properties\n r'(?:\\s*\\{(\\s*[^\\}]+)\\s*\\})?' # Content\n r'(?:\\s*\\*\\s*([0-9]+))?', # Multiplier\n element_unparsed)\n\n name, id_, classes, properties, content, multiplier = result.groups()\n\n element = owner.createElement('div' if not name else name)\n\n if id_ is not None:\n element.setAttribute(\"id\", id_)\n\n if classes is not None and classes != '':\n element.setAttribute(\"class\",\n ' '.join(re.split(r'\\s*\\.', classes)[1:]))\n\n if multiplier is not None:\n element.setAttribute(\"_embiggen_multiplier\", multiplier)\n\n\n if required.has_key(name):\n for property_name, property_value in required[name].iteritems():\n element.setAttribute(property_name, property_value)\n\n if properties is not None:\n props = re.split(r'[\\[\\],]\\s*', properties)\n for prop in props:\n if prop.strip() == '':\n continue\n\n if '=' in prop:\n prop_name, prop_value = prop.split('=')\n else:\n prop_name, prop_value = prop, ''\n\n element.setAttribute(prop_name.strip(), prop_value.strip())\n\n if content is None and name not in short_tags:\n content = ''\n\n if content is not None:\n text = owner.createTextNode(content.strip())\n element.appendChild(text)\n\n return element",
"def __init__(self, page_content):\n self.soup = BeautifulSoup(page_content, \"html.parser\")",
"def get_soup_from_content(content):\n return BeautifulSoup(content, \"lxml\")",
"def get_soup(self):\n if self._soup is None:\n self._soup = BeautifulSoup(self.get_data(), \"lxml\", from_encoding='utf8')\n return self._soup",
"def pre_parse(old_bs: BeautifulSoup, keep_poems: bool = False) -> BeautifulSoup:\n def add_text(old_tag: Tag, new_tag: Tag, new_parent: Tag):\n text = ' '.join(old_tag.get_text().split())\n if text:\n new_tag.append(text)\n new_parent.append(new_tag)\n\n old_body = old_bs.find('body')\n tmp_bs = BeautifulSoup(html_template)\n new_body = tmp_bs.html.body\n for tag in old_body.find_all(re.compile('^(?:p|ul|ol|h[1-5]|div)$')):\n if tag.name != 'div':\n new_tag = tmp_bs.new_tag(tag.name)\n if tag.name == 'p':\n add_text(tag, new_tag, new_body)\n elif headerp.match(tag.name):\n add_text(tag, new_tag, new_body)\n elif listp.match(tag.name):\n for li in tag.children:\n if isinstance(li, Tag) and li.name == 'li':\n new_li = tmp_bs.new_tag('li')\n add_text(li, new_li, new_tag)\n if new_tag.contents:\n new_body.append(new_tag)\n else: # div\n if 'poem' in tag.get('class', []) and keep_poems:\n if tag.find('div', {'class': 'stanza'}):\n # One p per stanza\n for stanza in tag.find_all('div', {'class': 'stanza'}):\n new_p = tmp_bs.new_tag('p')\n for line in stanza.find_all('span'):\n new_p.append(line.get_text().strip() + '\\n')\n if new_p.contents:\n new_body.append(new_p)\n else:\n # Unstructured poem with lines as paragraphs\n new_p = tmp_bs.new_tag('p')\n for line in tag.find_all('p'):\n new_p.append(line.get_text().strip() + '\\n')\n if new_p.contents:\n new_body.append(new_p)\n return tmp_bs",
"def _soup(self) -> object:\n driver_html = self.driver.find_element_by_class_name(\"stretch\")\n\n soup = BeautifulSoup(\n driver_html.get_attribute(\"innerHTML\"),\n \"html.parser\")\n\n return soup",
"def makesoup(url):\n html = requests.get(url)\n soup = bs(html.content)\n return soup",
"def parse_html(html_string):\n return BeautifulSoup(html_string, \"html.parser\")",
"def customized_html_parse(html_txt: str, tag_configs: Iterable[\"general_cfg.TagConfig\"]) -> str:\n soup = BeautifulSoup(html_txt, \"html.parser\")\n for tag_config in tag_configs:\n # find tags\n tags = soup.find_all(tag_config.tagName)\n for tag in tags:\n for key, val in tag_config.propKV.items():\n tag[key] = val\n return soup.__str__()",
"def parse_page(self, soup):\n posts = self.get_posts(soup)\n out = \"\"\n for post in posts:\n out += self.format_post(post)\n return out",
"def get_soup(response):\n return BeautifulSoup(response.content, features=\"html.parser\")",
"def generate_soup(url):\n \n socket = urllib.request.urlopen(url)\n html_page = socket.read()\n return BeautifulSoup(html_page)",
"def test_beautiful_soup_can_parse_html_from_returned_content(self):\n soup = self.soupify(self.response)\n self.assertIsNotNone(soup)",
"def __parse_html(self, html):\n soup = bs4.BeautifulSoup(html, 'lxml')\n return soup.text",
"def inline_markup_to_html(astr):\n\n markup_to_elem = [(r'\\*', '<b>', '</b>'),\n (r'\\/', '<i>', '</i>'),\n (r'`', '<code>', '</code>')]\n\n def replace(matched):\n \"\"\" Take matched, add opening & closing tags, cgi escape if code \"\"\"\n\n matched_str = matched.groups()[0]\n if match == '`':\n matched_str = cgi.escape(matched_str)\n return opener + matched_str + closer\n\n for match, opener, closer in markup_to_elem:\n astr = wrap_match(match).sub(replace, astr)\n\n return fu.pipe(astr, [convert_markup_links, convert_raw_links])",
"def html2etree(tag_soup):\n return lxml.html.fromstring(tag_soup)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify that all HTML4 and HTML5 empty element (aka void element) tags are handled correctly.
|
def test_empty_element_tags(self):
...
|
[
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def test_simple_complete_html_start_tag_with_no_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \">\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 1",
"def test_complete_html_start_tag_with_single_no_value_attributes_and_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 7",
"def test_complete_html_start_tag_with_single_no_value_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 6",
"def test_complete_html_start_tag_with_invalidly_named_no_value_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" sh*ow>\"\n parse_index = 0\n expected_is_valid = False\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 1",
"def test_simple_complete_html_start_tag_with_no_attributes_and_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 2",
"def strip_empty_tags(self):\n tag = self.root\n while True:\n next_tag = tag.findNext(True)\n if not next_tag: break\n if next_tag.contents or next_tag.attrs:\n tag = next_tag\n continue\n next_tag.extract()",
"def test_simple_complete_html_start_tag_with_bad_tag_name():\n\n # Arrange\n input_tag_name = \"a*b\"\n string_to_parse = \">\"\n parse_index = 0\n expected_is_valid = False\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 1",
"def test_unclosed_tags_get_closed(self):\n ...",
"def get_unnecessary_elements(tag, clear_elem):\n tag_list = list(filter(lambda e: 'none' not in e, tag))\n\n garbage_full = list()\n\n for each_tag in tag_list:\n split_tag = each_tag.split('\"')\n try:\n clear_tag = split_tag[1]\n if clear_tag in clear_elem or 'inline' in clear_tag or re.search(r'^\\d+$', clear_tag):\n pass\n else:\n garbage_full.append(each_tag)\n except IndexError:\n garbage_full.append(each_tag)\n return garbage_full",
"def test_complete_html_start_tag_with_single_attribute_with_bad_value():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show=>\"\n parse_index = 0\n expected_is_valid = False\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 1",
"def testBasicTagPresence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' hello')",
"def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('ExpectedTemplateSyntaxError')\r\n except TemplateSyntaxError as e:\r\n self.assertEqual('test.html', e.filename)\r\n if sys.version_info[:2] > (2,4):\r\n self.assertEqual(2, e.lineno)",
"def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('ExpectedTemplateSyntaxError')\r\n except TemplateSyntaxError, e:\r\n self.assertEqual('test.html', e.filename)\r\n if sys.version_info[:2] > (2,4):\r\n self.assertEqual(2, e.lineno)",
"def test_self_closing_attrs():\n page = Html()\n page.append(\"some plain text.\")\n page.append(\"some 2 plain text.\")\n page_body = Body()\n page_body.append(P(\"A simple paragraph of text\", \\\n style=\"text-align: center; font-style: oblique;\"))\n page_body.append(P(\"Another simple paragraph of text\"))\n page_hr = Hr()\n page_body.append(page_hr)\n page.append(page_body)\n page.append(\"Some more plain text.\")\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A simple paragraph of text\" in file_contents\n assert \"Some more plain text.\" in file_contents\n assert \"some plain text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n assert '<p style=\"text-align: center; font-style: oblique;\">' in file_contents\n assert \"</p>\" in file_contents\n assert \"<hr />\" in file_contents\n #assert False",
"def test_empty(test_empty_tree):\n assert find(test_empty_tree) == False",
"def testMultiTagPresence(self):\n template = '{{ ifpresent [one] [two] }} good {{ endif }}'\n self.assertEqual(self.parse(template, one=1, two=2), ' good')\n self.assertFalse(self.parse(template, one=1))\n self.assertFalse(self.parse(template, two=2))",
"def Xtest_strip_doctypehtml(self):\n inp = '''Something\n <body>\n Result\n </body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'\n \n # a failing piece of HTML\n inp = '''Something\n <body>\n Result\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip().split() == ['Result','Else'], res.strip().split()\n \n # With a more complicated piece of HTML\n inp = '''Something\n <BODY \n onLoad=\"foo()\"\n >\n Result\n </Body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'",
"def test_starttag_bad_closing():\n inst = _encoder.TextEncoder('utf-8')\n with raises(RuntimeError):\n inst.starttag(b'x', [], _test.badbool)",
"def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n foo\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Assert that a given doctype string is handled correctly.
|
def assertDoctypeHandled(self, doctype_fragment):
...
|
[
"def doctype(self, irc, msg, args, url):\n size = conf.supybot.protocols.http.peekSize()\n s = utils.web.getUrl(url, size=size)\n m = self._doctypeRe.search(s)\n if m:\n s = utils.str.normalizeWhitespace(m.group(0))\n irc.reply(s)\n else:\n irc.reply('That URL has no specified doctype.')",
"def parse_doctype(self):\n if self.seen_doctype == 1:\n xmlproc.XMLProcessor.parse_doctype(self)\n else:\n arizonareport.send_out(4, str(\"Ignoring DOCTYPE (%s,%d)\" % (self.get_current_sysid(), self.get_line())) )\n self.scan_to(\"]>\")\n self.seen_doctype = 1",
"def return_doctype(self, document_id):\n if not isinstance(document_id, str):\n raise Exception(f\"document_id not a string\")\n for doctype in self.doctypes.values():\n if doctype.is_valid(document_id):\n return doctype\n raise Exception(\"No associated doctype\")",
"def verify_with_schema(xml_str, schema_file):\n\n xmlschema_doc = etree.parse(open( schema_file, \"r\", encoding=\"utf-8\"))\n xmlschema = etree.XMLSchema(xmlschema_doc)\n xmlschema.assertValid(etree.XML(xml_str))",
"def test_type_nonascii(self):\n self.assert_input(\n 'Failed to type Spanish string',\n u'Teclado (informática)')\n self.assert_input(\n 'Failed to type Russian string',\n u'Компьютерная клавиатура')",
"def test_should_raise_error_if_type_is_invalid(self):\n with self.assertRaises(ValueError):\n self.spec_parser.parse_statement({'type': 'sugar'})",
"def test_string_direct(self):\n for source in (\"direct\", \"default\"):\n self.assertEqual(self.setting.detect_type(u\"Hello\", source), \"unicode\")\n self.assertEqual(self.setting.detect_type(\"Hello\", source), \"unicode\")",
"def test_determine_function_returns_a_string(self):\n typ = determine_content_type(\"me.pdf\")\n\n assert isinstance(typ, str)",
"def test_type_ascii(self):\n self.assert_input(\n 'Failed to type ascii string',\n u'abc123, \"quoted!\"')",
"def guess_type(cls, string):\n if string.strip():\n for name, datatype in cls.datatypes.items():\n if name.startswith('observable'):\n if datatype.validate_string(string):\n return datatype\n return False",
"def _basic_type_check_on_function(self, fname, o):\n valid_html_types = (html.Div, html.Span, html.Table)\n if fname.endswith(\"_html\"):\n self.assertTrue(isinstance(o, valid_html_types))\n elif fname.endswith(\"_style\"):\n self.assertTrue(isinstance(o, str))\n else:\n self.assertFalse(isinstance(o, valid_html_types))",
"def test_type_latin(self):\n self.assert_input(\n 'Failed to type latin string',\n u'Hello World')",
"def IsValidDataType(str_val, data_type, charset='B', icvn='00401'):\n if not data_type:\n return True\n if not isinstance(str_val, str):\n return False\n\n try:\n if data_type[0] == 'N':\n if not match_re('N', str_val):\n raise IsValidError # not a number\n elif data_type == 'R':\n if not match_re('R', str_val):\n raise IsValidError # not a number\n elif data_type in ('ID', 'AN'):\n if not_match_re('ID', str_val, charset, icvn):\n raise IsValidError\n elif data_type == 'RD8':\n if '-' in str_val:\n (start, end) = str_val.split('-')\n return IsValidDataType(start, 'D8', charset) and IsValidDataType(end, 'D8', charset)\n else:\n return False\n elif data_type in ('DT', 'D8', 'D6'):\n if not is_valid_date(data_type, str_val):\n raise IsValidError\n elif data_type == 'TM':\n if not is_valid_time(str_val):\n raise IsValidError\n elif data_type == 'B':\n pass\n else:\n raise IsValidError('Unknown data type %s' % data_type)\n except IsValidError:\n return False\n return True",
"def verify(typestring, typeTable):\n\tif typestring == inspect.Parameter.empty:\n\t\treturn True\n\n\tif isinstance(typestring, str):\n\t\tpass\n\telif isinstance(typestring, tuple) and len(typestring) == 2 and isinstance(typestring[0], str) and callable(typestring[1]):\n\t\ttypestring = typestring[0]\n\telse:\n\t\traise ValueError(\"Invalid typestring `%s': not a string or string/predicate\")\n\n\tif typestring.strip() == '':\n\t\treturn True\n\tdescribeTypestring(typestring, typeTable) # Will throw ValueError if bad\n\treturn True",
"def test_dtypes(self):\n np = self.compile_test('dtypes.sv')\n self.assertTrue(np.get_dtype_width('logic') == 1)\n self.assertTrue(np.get_vertex_dtype_width('dtypes.logic_bit') == 1)\n self.assertTrue(np.get_vertex_dtype_str('dtypes.logic_bit') == 'logic')\n self.assertTrue(np.get_dtype_width('packed_struct_nested3_t') == 3+4+3)\n self.assertTrue(np.get_vertex_dtype_width('dtypes.packstruct_nested3') == 3+4+3)\n self.assertTrue(np.get_vertex_dtype_str('dtypes.packstruct_nested3') == 'packed struct')\n # Check that exceptions are raised\n self.assertRaises(RuntimeError, np.get_dtype_width, 'foo')\n self.assertRaises(RuntimeError, np.get_vertex_dtype_str, 'foo')\n self.assertRaises(RuntimeError, np.get_vertex_dtype_width, 'foo')",
"def test_invalid_string():\n with pytest.raises(ValueError):\n assert validate_datetime(\"January 1, blah blah blah\")",
"def test_validator_valid_stdtype_values_should_not_raise_exception(self):\n try:\n self.dummy.stdtype_bool = True\n self.dummy.stdtype_bytearray = bytearray(b'bytearray')\n self.dummy.stdtype_bytes = b'bytes'\n self.dummy.stdtype_complex = 1j\n self.dummy.stdtype_dict = {'Dictionary': True}\n self.dummy.stdtype_float = 1.1\n self.dummy.stdtype_frozenset = frozenset({1, 2, 3})\n self.dummy.stdtype_int = 666\n self.dummy.stdtype_list = ['List']\n self.dummy.stdtype_memoryview = memoryview(b'')\n self.dummy.stdtype_range = range(1, 10)\n self.dummy.stdtype_set = {1, 2, 3}\n self.dummy.stdtype_str = 'String'\n self.dummy.stdtype_tuple = ('Tuple',)\n self.dummy.stdtype_type = type\n except Exception as e:\n self.fail(e)",
"def test_validate_schema_of_strs(self):\n schema = [(\"C0\", str), (\"C1\", str), (\"C2\", str)]\n # should not throw an exception\n # if the datatype can be cast to the schema-specified\n # datatype validate schema should just cast it\n # since ints and floats can be cast to string\n # it should not error but should cast all of the data to strings\n frame = self.context.frame.create(self.dataset, schema=schema, validate_schema=True)\n for row in frame.take(frame.count()):\n # the data should all be cast to str by validate_schema=True\n for item in row:\n self.assertEqual(type(item), str)",
"def check_rec_scada_data_type(rec):\n err = False\n dtyp = rec['SCADA_Data_Type'].capitalize()\n if dtyp not in cfg.SCADA_valid_types:\n err = True\n print(\"Error - SCADA Data Type provided is invalid for tag %s\" % (rec['Tag_name']))\n return (err,dtyp)",
"def test_bad_message_type(type_str):\n with pytest.raises(InvalidType):\n MsgType(type_str)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A real XHTML document should come out more or less the same as it went in.
|
def test_real_xhtml_document(self):
...
|
[
"def Xtest_strip_doctypehtml(self):\n inp = '''Something\n <body>\n Result\n </body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'\n \n # a failing piece of HTML\n inp = '''Something\n <body>\n Result\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip().split() == ['Result','Else'], res.strip().split()\n \n # With a more complicated piece of HTML\n inp = '''Something\n <BODY \n onLoad=\"foo()\"\n >\n Result\n </Body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'",
"def test_self_closing_attrs():\n page = Html()\n page.append(\"some plain text.\")\n page.append(\"some 2 plain text.\")\n page_body = Body()\n page_body.append(P(\"A simple paragraph of text\", \\\n style=\"text-align: center; font-style: oblique;\"))\n page_body.append(P(\"Another simple paragraph of text\"))\n page_hr = Hr()\n page_body.append(page_hr)\n page.append(page_body)\n page.append(\"Some more plain text.\")\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A simple paragraph of text\" in file_contents\n assert \"Some more plain text.\" in file_contents\n assert \"some plain text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n assert '<p style=\"text-align: center; font-style: oblique;\">' in file_contents\n assert \"</p>\" in file_contents\n assert \"<hr />\" in file_contents\n #assert False",
"def test_is_html_tag_properly(self):\r\n file=\"HTMLDOC.txt\"\r\n html_doc=p.read_file(file)\r\n result=p.is_html_tag_properly(html_doc)\r\n self.assertTrue(result,True)",
"def testParseContent(self):\n # XXX not sure it is good to store parsed document everytime\n self.assertTrue(isinstance(self.oodocument.parsed_content, etree._Element))\n self.assertTrue(self.oodocument.parsed_content.tag.endswith(\n 'document-content'))",
"def testSAX2DOM(self):\n sax2dom = pulldom.SAX2DOM()\n sax2dom.startDocument()\n sax2dom.startElement(\"doc\", {})\n sax2dom.characters(\"text\")\n sax2dom.startElement(\"subelm\", {})\n sax2dom.characters(\"text\")\n sax2dom.endElement(\"subelm\")\n sax2dom.characters(\"text\")\n sax2dom.endElement(\"doc\")\n sax2dom.endDocument()\n\n doc = sax2dom.document\n root = doc.documentElement\n (text1, elm1, text2) = root.childNodes\n text3 = elm1.childNodes[0]\n\n self.assertIsNone(text1.previousSibling)\n self.assertIs(text1.nextSibling, elm1)\n self.assertIs(elm1.previousSibling, text1)\n self.assertIs(elm1.nextSibling, text2)\n self.assertIs(text2.previousSibling, elm1)\n self.assertIsNone(text2.nextSibling)\n self.assertIsNone(text3.previousSibling)\n self.assertIsNone(text3.nextSibling)\n\n self.assertIs(root.parentNode, doc)\n self.assertIs(text1.parentNode, root)\n self.assertIs(elm1.parentNode, root)\n self.assertIs(text2.parentNode, root)\n self.assertIs(text3.parentNode, elm1)\n doc.unlink()",
"def test_thorough_sax2dom(self):\n pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)\n self._test_thorough(pd, False)",
"def test_html_is_not_valid(self):\n url = \"\"\n single_date = date(2019, 3, 4)\n\n coins = {}\n with patch.object(\n BCRASMLScraper,\n 'fetch_content',\n return_value=' '\n ):\n scraper = BCRASMLScraper(url, coins, intermediate_panel_path=None, use_intermediate_panel=False)\n content = scraper.fetch_content(single_date)\n\n soup = BeautifulSoup(content, \"html.parser\")\n\n table = soup.find('table')\n head = table.find('thead') if table else None\n body = table.find('tbody') if table else None\n\n assert table is None\n assert head is None\n assert body is None",
"def check_doc1(html, has_base_url=True):\r\n assert html.root_element.tag == 'html'\r\n assert [child.tag for child in html.root_element] == ['head', 'body']\r\n _head, body = html.root_element\r\n assert [child.tag for child in body] == ['h1', 'p', 'ul']\r\n h1 = body[0]\r\n assert h1.text == 'WeasyPrint test document (with Ünicōde)'\r\n if has_base_url:\r\n url = urljoin(html.base_url, 'pattern.png')\r\n assert url.startswith('file:')\r\n assert url.endswith('weasyprint/tests/resources/pattern.png')\r\n else:\r\n assert html.base_url is None",
"def render_main_document_as_one(self, document, filepath, measures_annex=True):\n with DocumentRenderingContext(filepath) as ctx:\n doc, tag, text = ctx.doc_tag_text\n\n self.labeler = self.create_labeler(doc)\n self.doc = doc\n\n with tag('html'):\n doc.head(document.title)\n\n with tag('body'):\n doc.p('gegenereerd op ', datetime.datetime.now().isoformat(), style=\"font-size:11px\")\n\n with tag('div', klass='container'):\n self._render_fragment(document, self.doc.h1, self._render_chapter)\n\n with tag('div', klass='container'):\n self.render_verifier_annex(document)\n\n if measures_annex:\n with tag('div', klass='container'):\n self.render_measures_annex()\n\n self.doc = None\n self.labeler = None",
"def create_test_html():\n return lxml.html.fromstring(\"\"\"<html>\n <head>\n </head>\n <body>\n <div class=\"test\">Some <em>text</em></div>\n <img src=\"some_location\" alt=\"Alt text\" width=540>\n More <b>text</b>\n </body>\n </html>\"\"\")",
"def setup_empty_pagecontent_file(self):\n basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)\n filename = \"{wiki}-{date}-pages-articles.xml.bz2\".format(\n wiki=self.en['wiki'].db_name, date=self.today)\n path = os.path.join(basedir, filename)\n with open(path, \"w\") as output:\n output.write(\"fake\\n\")",
"def test_document_write(self):\n\n self.set_html('foo')\n self.assert_warnings({\n 'id': ('js', 'document.write', 'evil'),\n 'message': Matches('document\\.write.*strongly discouraged'),\n 'description': Matches('should not be used')})",
"def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')",
"def test_content_exists(self):\n content = self.app.get(\"/\")\n assert content.data[0:9] == \"<!DOCTYPE\"",
"def test_beautiful_soup_can_parse_html_from_returned_content(self):\n soup = self.soupify(self.response)\n self.assertIsNotNone(soup)",
"def test_direct_html(self):\n direct_html = '<p>Hello, world.</p>'\n params = {\n 'workspace_name': self.getWsName(),\n 'direct_html': direct_html\n }\n result = self.getImpl().create_extended_report(self.getContext(), params)\n obj = self.dfu.get_objects({'object_refs': [result[0]['ref']]})\n self.assertEqual(obj['data'][0]['data']['direct_html'], direct_html)",
"def inHTML(text, index, body):\n # if there is a < then lxml will interpret that as a tag, so only search for the stuff before it\n text = text.split(b\"<\")[0]\n paths = pathsToText([(fromstring(body), \"\")], text.decode(\"utf-8\"), found=[])\n try:\n path = paths[index]\n return \"script\" not in path\n except IndexError:\n return False",
"def make_soup(self):\n self.soup = BeautifulSoup(self.xml_fp, 'lxml-xml')\n self.xml_fp.close()",
"def test_starttag_bad_closing():\n inst = _encoder.TextEncoder('utf-8')\n with raises(RuntimeError):\n inst.starttag(b'x', [], _test.badbool)",
"def KeepEncoding(self):\n self.chunk = XMLEntity.ChunkSize"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
When a namespaced XML document is parsed as HTML it should be treated as HTML with weird tag names.
|
def test_namespaced_html(self):
...
|
[
"def has_html_ns(el: bs4.Tag) -> bool:\n\n ns = getattr(el, 'namespace') if el else None\n return bool(ns and ns == NS_XHTML)",
"def supports_namespaces(self) -> bool:\n\n return self.is_xml or self.has_html_namespace",
"def xmls_to_etree(xml_input):\n return etree.HTML(xml_input)",
"def test_namespace_on_removed_elem(self):\r\n tmpl = MarkupTemplate(\"\"\"<?xml version=\"1.0\"?>\r\n <Test xmlns:py=\"http://genshi.edgewall.org/\">\r\n <Size py:if=\"0\" xmlns:t=\"test\">Size</Size>\r\n <Item/>\r\n </Test>\"\"\")\r\n self.assertEqual(\"\"\"<?xml version=\"1.0\"?>\\n<Test>\r\n \r\n <Item/>\r\n </Test>\"\"\", str(tmpl.generate()))",
"def test_ns_tag():\r\n namespaces = ['http://purl.org/dc/elements/1.1/',\r\n 'urn:schemas-upnp-org:metadata-1-0/upnp/',\r\n 'urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/']\r\n for ns_in, namespace in zip(['dc', 'upnp', ''], namespaces):\r\n res = data_structures.ns_tag(ns_in, 'testtag')\r\n correct = '{{{}}}{}'.format(namespace, 'testtag')\r\n assert res == correct",
"def remove_namespace(doc, namespace):\n ns = u'{%s}' % namespace\n nsl = len(ns)\n for elem in doc.getiterator():\n if elem.tag.startswith(ns):\n elem.tag = elem.tag[nsl:]",
"def test_is_html_tag_properly(self):\r\n file=\"HTMLDOC.txt\"\r\n html_doc=p.read_file(file)\r\n result=p.is_html_tag_properly(html_doc)\r\n self.assertTrue(result,True)",
"def testMultiTopLevel():\n parsed = parseHTML('<h1>Hello!</h1><h1>Goodbye!</h1>')\n assert parsed.flattenXML() == (\n '<h1 xmlns=\"http://www.w3.org/1999/xhtml\">Hello!</h1>'\n '<h1 xmlns=\"http://www.w3.org/1999/xhtml\">Goodbye!</h1>'\n )",
"def render_html_tree(tree):\n # Restore any tag names that were changed in get_html_tree()\n for el in tree.iter():\n if \"__tag_name\" in el.attrib:\n actual_tag_name = el.attrib.pop(\"__tag_name\")\n el.tag = actual_tag_name\n\n html = lxml.html.tostring(tree, encoding=\"utf8\").decode(\"utf8\")\n\n return strip_wrapping(html)",
"def __strip_ns__(self, tree):\n\t\tfor node in tree.iter():\n\t\t\ttry:\n\t\t\t\thas_namespace = node.tag.startswith('{')\n\t\t\texcept AttributeError:\n\t\t\t\tcontinue\n\t\t\tif has_namespace:\n\t\t\t\tnode.tag = node.tag.split('}', 1)[1]",
"def html2etree(tag_soup):\n return lxml.html.fromstring(tag_soup)",
"def Xtest_strip_doctypehtml(self):\n inp = '''Something\n <body>\n Result\n </body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'\n \n # a failing piece of HTML\n inp = '''Something\n <body>\n Result\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip().split() == ['Result','Else'], res.strip().split()\n \n # With a more complicated piece of HTML\n inp = '''Something\n <BODY \n onLoad=\"foo()\"\n >\n Result\n </Body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'",
"def fixSelfClosingTags(self, stringifiedSoup):\n return self.selfClosingTagRegex.sub('', stringifiedSoup)",
"def _text_category(self, node_tag):\n #the pdf xhtml nodes are always \"words\"\n return 'w'",
"def fix_tags(input, removeEmptyTags = False, changeTagsNameCase = 0,\n unNestTags = None, check = False, verbose = False):\n\n if verbose:\n def assume(cond, msg):\n if not cond: print('tagsoupfixer: Parser bug:', msg)\n else:\n def assume(cond, msg): pass\n\n # Tags name comparator\n if changeTagsNameCase == 0: tagNameEqual = lambda a, b: a.lower() == b.lower()\n else: tagNameEqual = lambda a, b: a == b\n # Normalize tags to unNest\n if unNestTags:\n if changeTagsNameCase > 0: unNestTags = map(str.upper, unNestTags)\n else: unNestTags = map(str.lower, unNestTags)\n unNestTags = set(unNestTags)\n\n # Tokenize input\n tokens = _reTag.split(input)\n\n # Debugging\n #~ f = open('pat.txt', mode='w'); f.write(_patTag); f.close()\n #~ print(str(tokens).encode('cp1252'))\n\n # Initialize parser state\n # -- text output\n output = ''\n # -- tags stack; format: [(name, textBefore, markup)*]\n # example: [('div', '... blah <b>di dum</b> ...', '<div class=\"main\">'), ...]\n stack = []\n TAG_NAME = 0; TEXT_BEFORE = 1; MARKUP = 2; ATTRIBUTES = 3\n # -- contextual boolean states\n markupComplete = inTag = endTag = emptyElementTag = False\n # -- buffers for tag name and attributes\n curTagName = curTagAttributes = ''\n\n # http://www.w3.org/TR/2008/REC-xml-20081126/#sec-starttags\n for tok in tokens:\n\n # Simplistic XML parser (don't parse attributes)\n # Open StartTag / EmptyElementTag\n if tok == '<':\n assume(not inTag, 'Unexpected \"<\" inside markup.')\n inTag = True\n # Open EndTag\n elif tok == '</':\n assume(not inTag, 'Unexpected \"</\" inside markup.')\n inTag = endTag = True\n # Close StartTag / EndTag\n elif tok == '>':\n assume(inTag, 'Unexpected \">\" outside markup.')\n markupComplete = True\n # Close EmptyElementTag\n elif tok == '/>':\n assume(inTag, 'Unexpected \"/>\" outside markup.')\n markupComplete = emptyElementTag = True\n # Continue *Tag\n elif inTag:\n # Tag name\n if not curTagName:\n if changeTagsNameCase > 0: curTagName = tok.upper()\n elif changeTagsNameCase < 0: curTagName = tok.lower()\n else: curTagName = tok\n # Tag attributes\n else: curTagAttributes = tok\n # Text\n else:\n output += tok\n\n # We parsed a complete tag (StartTag, EndTag or EmptyElementTag)\n if markupComplete:\n # Quick'n'dirty hack to deal with BRs\n if tagNameEqual(curTagName, 'br'):\n emptyElementTag = True\n # Produce current tag\n curTag = \"<{}{}{}{}>\".format(\n '/' if endTag else '',\n curTagName,\n curTagAttributes,\n '/' if emptyElementTag else ''\n )\n # Process current tag\n # -- EmptyElementTag\n if emptyElementTag:\n # No text to process, output the markup\n output += curTag\n # -- StartTag\n elif not endTag:\n # Push current tag on the stack with current output as textBefore\n # and reset output.\n if unNestTags and curTagName in unNestTags:\n attrs = parse_attributes(curTagAttributes)\n # 20/01/2011: we HAVE to merge the parent's attributes if any\n if len(stack) and stack[-1][TAG_NAME] == curTagName and stack[-1][ATTRIBUTES] and attrs:\n tmp = stack[-1][ATTRIBUTES].copy()\n tmp.update(attrs)\n attrs = tmp\n tag = [curTagName, output, curTag, attrs]\n else: tag = [curTagName, output, curTag]\n output = ''\n stack.append(tag)\n # -- EndTag, try to match a StartTag\n else:\n if len(stack) == 0:\n # Drop this tag\n if verbose: print('tagsoupfixer: '+curTag+': End tag with no match, tag dropped.')\n elif tagNameEqual(stack[-1][TAG_NAME], curTagName):\n # Unnest of the poor (with the parent)\n if unNestTags and len(stack) > 1 and curTagName in unNestTags and stack[-2][TAG_NAME] == curTagName:\n attrs = stack[-1][ATTRIBUTES]\n # 20/01/2011: already done at StartTag\n #attrs.update(stack[-2][ATTRIBUTES])\n attrs = build_attributes(attrs)\n stack[-1][MARKUP] = '</' + curTagName + '>' + '<' + curTagName + attrs + '>'\n #if verbose: print('tagsoupfixer: '+curTag+': rewrote parent: '+stack[-1][MARKUP])\n curTag += stack[-2][MARKUP]\n # Properly nested tags\n if not removeEmptyTags or len(output.strip()) > 0:\n # Tag is not empty / We don't have to strip empty tags\n output = stack[-1][TEXT_BEFORE] + stack[-1][MARKUP] + output + curTag\n else:\n # Tag is empty and we have to strip its nasty markup\n output = stack[-1][TEXT_BEFORE] + output\n if verbose: print('tagsoupfixer: '+curTag+': Removed empty tag.')\n stack.pop()\n elif len(stack) > 1:\n # Detect improperly nested tags\n overlap = None\n for i in reversed(range(len(stack)-1)):\n # Overlapping tags !!\n if tagNameEqual(stack[i][TAG_NAME], curTagName):\n overlap = i; break\n if overlap is not None:\n if verbose:\n print('tagsoupfixer: ['+curTagName+','+stack[overlap-1][TAG_NAME]+']: Overlapping tags.')\n # Fix overlapping by properly closing the tag\n tag = stack[overlap]\n for i in range(overlap+1, len(stack)):\n stack[i][MARKUP] = '</'+tag[TAG_NAME]+'>'+stack[i][MARKUP]+tag[MARKUP]\n output += curTag\n stack[overlap+1][TEXT_BEFORE] = tag[TEXT_BEFORE] + tag[MARKUP] + stack[overlap+1][TEXT_BEFORE]\n stack.pop(overlap)\n # Reset tag parser state\n markupComplete = inTag = endTag = emptyElementTag = False\n curTagName = curTagAttributes = ''\n\n # Output remaining elements on the stack\n for i in reversed(range(len(stack))):\n output = stack[i][TEXT_BEFORE] + stack[i][MARKUP] + output\n\n # Cludgy hack to fix empty tags when unnesting\n if unNestTags and removeEmptyTags:\n output = fix_tags(output, removeEmptyTags=True)\n\n if check:\n oh = strip_tags(input)\n my = strip_tags(output)\n if oh != my:\n print('tagsoupfixer: Sorry, I stripped out some text, aaaaaaargh.\\n', oh, '\\n', my)\n\n return output",
"def test_html_entities():\r\n for quote in ['\"', '"', '"', '"']:\r\n assert_tree(parse('<p>{0}abc{1}'.format(quote, quote)), [\r\n ('p', 'Block', [\r\n ('p', 'Text', '\"abc\"')])])",
"def xml_to_etree(xml_input):\n f = open(xml_input, 'r')\n xml = f.read()\n f.close()\n return etree.HTML(xml)",
"def test_self_closing_attrs():\n page = Html()\n page.append(\"some plain text.\")\n page.append(\"some 2 plain text.\")\n page_body = Body()\n page_body.append(P(\"A simple paragraph of text\", \\\n style=\"text-align: center; font-style: oblique;\"))\n page_body.append(P(\"Another simple paragraph of text\"))\n page_hr = Hr()\n page_body.append(page_hr)\n page.append(page_body)\n page.append(\"Some more plain text.\")\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A simple paragraph of text\" in file_contents\n assert \"Some more plain text.\" in file_contents\n assert \"some plain text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n assert '<p style=\"text-align: center; font-style: oblique;\">' in file_contents\n assert \"</p>\" in file_contents\n assert \"<hr />\" in file_contents\n #assert False",
"def stripXmlTags(inData, isNxmlFormat=False, isElsevier=False):\n\n # do not process PMC files without a body tag\n if isNxmlFormat and not nxmlHasBody(inData):\n return None\n\n try:\n root = etreeFromXml(inData)\n if isElsevier:\n asciiData = treeToAscii_Elsevier(root)\n #elif isNxmlFormat:\n #pmcTags = set([\"title\",\"sec\",\"p\",\"section\",\"caption\",\"label\",\"table\"])\n #asciiData = treeToAsciiText(root, addNewlineTags=pmcTags)\n # it doesn't hurt to always try the PMC tags -> \\n replacement\n else:\n pmcTags = set([\"title\",\"sec\",\"p\",\"section\",\"caption\",\"label\",\"table\"])\n asciiData = treeToAsciiText(root, addNewlineTags=pmcTags)\n return asciiData\n\n except SyntaxError:\n logging.error(\"Error while converting xml to text\")\n exObj, exMsg, exTrace = sys.exc_info()\n logging.error(\"Exception %s, traceback: %s\" % (exMsg, traceback.format_exc(exTrace)))\n return None",
"def ns(tag):\n return '{http://schema.primaresearch.org/PAGE/gts/pagecontent/2019-07-15}'+tag"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A tag that's not closed by the end of the document should be closed. This applies to all tags except emptyelement tags.
|
def test_unclosed_tags_get_closed(self):
...
|
[
"def close(self, tag):\n return \"</{}>\".format(self.tags[tag].split(\" \", 1)[0])",
"def handle_endtag(self, tag) -> None:\n if tag in self.keeptags:\n self.textdata += f'</{tag}>'",
"def __create_closing_html_tag(self, tag):\n\n tag = tag.replace('<', '</')\n if tag.count('<') > 1:\n tag = tag.split('>')\n tag = tag[1] + '>' + tag[0] + '>'\n return tag",
"def _close_tag(self, tag):\n\n if tag in [\"B\", \"I\", \"U\"]:\n self._set_style(tag, False)\n\n if tag == \"BLOCKQUOTE\":\n self._set_ident_minus()\n\n if tag == \"A\":\n self._href = \"\"\n\n if tag == \"S\":\n if self._last_font is not None:\n self.set_font(\n self._last_font[\"font_family\"],\n size=self._last_font[\"size\"],\n style=self._last_font[\"style\"],\n )\n self._last_font = None",
"def close_tags_to(self, elem_cp):\n while len(self._stack) and self._stack[-1] != elem_cp:\n self.end_tag()\n if len(self._stack):\n self.end_tag()",
"def __enclose_in_html_tag(self, elem, tag):\n\n return tag + elem.strip() + self.__create_closing_html_tag(tag)",
"def test_starttag_bad_closing():\n inst = _encoder.TextEncoder('utf-8')\n with raises(RuntimeError):\n inst.starttag(b'x', [], _test.badbool)",
"def strip_empty_tags(self):\n tag = self.root\n while True:\n next_tag = tag.findNext(True)\n if not next_tag: break\n if next_tag.contents or next_tag.attrs:\n tag = next_tag\n continue\n next_tag.extract()",
"def endTagToString(self):\n\n return \"</\" + self.nodeType + \">\\n\"",
"def close_tags(html):\n parser = OpenTagsParser()\n parser.feed(html)\n open_tags = parser.get_result()\n return html + ''.join('</{0}>'.format(tag) for tag in open_tags)",
"def fixSelfClosingTags(self, stringifiedSoup):\n return self.selfClosingTagRegex.sub('', stringifiedSoup)",
"def test_end_document(self):\n items = pulldom.parseString(SMALL_SAMPLE)\n # Read all of the nodes up to and including </html>:\n for evt, node in items:\n if evt == pulldom.END_ELEMENT and node.tagName == \"html\":\n break\n try:\n # Assert that the next node is END_DOCUMENT:\n evt, node = next(items)\n self.assertEqual(pulldom.END_DOCUMENT, evt)\n except StopIteration:\n self.fail(\n \"Ran out of events, but should have received END_DOCUMENT\")",
"def test_starttag_closing():\n inst = _encoder.TextEncoder('foo')\n\n result = inst.starttag(b'xx', iter([]), True)\n assert result == b'[[xx]]'\n\n result = inst.starttag(b'yy', iter([(b'aa', None), (b'bb', b'cc')]), True)\n assert result == b'[[yy aa bb=cc]]'",
"def test_section__end_tag_with_no_start_tag(self):\n template = '{{/section}}'\n try:\n self._assert_render(None, template)\n except ParsingError, err:\n self.assertEqual(str(err), \"Section end tag mismatch: section != None\")",
"def finish_parsing(self):\n if self.current_element:\n self.diagnostics.append(Diagnostic(\n Severity.ERROR,\n self.current_element.code_range,\n f\"Unclosed element <{self.current_element.tagname}>\\n\"\n f\"Did you mean <{self.current_element.tagname} \"\n f\"...attributes... /> ?\"\n ))",
"def test_endtag_badtype():\n inst = _encoder.TextEncoder('utf-8')\n with raises(TypeError):\n inst.endtag(u's')",
"def popTag(self, tag, omitTagFlag=0):\n while self.tagStack:\n oldTag, tagProperties, useMacroLocation = self.tagStack.pop()\n endTagSymbol = tagProperties.get('endTagSymbol', None)\n popCommandList = tagProperties.get('popFunctionList', [])\n singletonTag = tagProperties.get('singletonTag', 0)\n for func in popCommandList:\n func()\n self.log.debug(\"Popped tag %s off stack\" % oldTag[0])\n if (oldTag[0] == tag[0]):\n # We've found the right tag, now check to see if we have any\n # TAL commands on it\n if (endTagSymbol is not None):\n # We have a command (it's a TAL tag)\n # Note where the end tag symbol should point (i.e. the next\n # command)\n self.symbolLocationTable[endTagSymbol] = len(\n self.commandList)\n\n # We need a \"close scope and tag\" command\n self.addCommand((TAL_ENDTAG_ENDSCOPE, (tag[0], omitTagFlag,\n singletonTag)))\n return\n elif (omitTagFlag == 0 and singletonTag == 0):\n # We are popping off an un-interesting tag, just add the\n # close as text\n self.addCommand((TAL_OUTPUT, '</' + tag[0] + '>'))\n return\n else:\n # We are suppressing the output of this tag, so just return\n return\n else:\n # We have a different tag, which means something like <br> which never closes is in\n # between us and the real tag.\n\n # If the tag that we did pop off has a command though it means\n # un-balanced TAL tags!\n if (endTagSymbol is not None):\n # ERROR\n msg = (\n \"TAL/METAL Elements must be balanced - found close tag %s expecting %s\"\n % (tag[0], oldTag[0]))\n self.log.error(msg)\n raise TemplateParseException(self.tagAsText(oldTag), msg)\n self.log.error(\n \"Close tag %s found with no corresponding open tag.\" % tag[0])\n raise TemplateParseException(\n \"</%s>\" % tag[0],\n \"Close tag encountered with no corresponding open tag.\")",
"def _open_tag(self):\n open_tag = [\"<{}\".format(self.tag)]\n for key, value in self.attributes.items():\n open_tag.append(' {}=\"{}\"'.format(key, value))\n open_tag.append(\">\")\n return ''.join(open_tag)",
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Inline elements can be nested indefinitely.
|
def test_nested_inline_elements(self):
...
|
[
"def test_block_in_inline():\r\n box = parse('''\r\n<style>\r\n p { display: inline-block; }\r\n span, i { display: block; }\r\n</style>\r\n<p>Lorem <em>ipsum <strong>dolor <span>sit</span>\r\n <span>amet,</span></strong><span><em>conse<i></i></em></span></em></p>''')\r\n box = build.inline_in_block(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n # No whitespace processing here.\r\n ('strong', 'Text', '\\n '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])])]),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse'),\r\n ('i', 'Block', [])])])])])])])])])\r\n\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [\r\n # Whitespace processing not done yet.\r\n ('strong', 'Text', '\\n ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])]),\r\n\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [])])])]),\r\n ('span', 'Block', [\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse')])])]),\r\n ('i', 'Block', []),\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [])])])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [])])])])])])",
"def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])",
"def is_nested(self, ):\n\t\tpass",
"def IsInline(self):\n return self.InlineChildren()",
"def inline_in_block(box):\r\n if not isinstance(box, boxes.ParentBox):\r\n return box\r\n\r\n children = [inline_in_block(child) for child in box.children\r\n # Remove empty text boxes.\r\n # (They may have been emptied by process_whitespace().)\r\n if not (isinstance(child, boxes.TextBox) and not child.text)]\r\n\r\n if not isinstance(box, boxes.BlockContainerBox):\r\n return box.copy_with_children(children)\r\n\r\n new_line_children = []\r\n new_children = []\r\n for child_box in children:\r\n assert not isinstance(child_box, boxes.LineBox)\r\n if new_line_children and child_box.is_absolutely_positioned():\r\n new_line_children.append(child_box)\r\n elif isinstance(child_box, boxes.InlineLevelBox) or (\r\n new_line_children and child_box.is_floated()):\r\n # Do not append white space at the start of a line:\r\n # It would be removed during layout.\r\n if new_line_children or not (\r\n isinstance(child_box, boxes.TextBox) and\r\n # Sequence of white-space was collapsed to a single\r\n # space by process_whitespace().\r\n child_box.text == ' ' and\r\n child_box.style.white_space in (\r\n 'normal', 'nowrap', 'pre-line')):\r\n new_line_children.append(child_box)\r\n else:\r\n if new_line_children:\r\n # Inlines are consecutive no more: add this line box\r\n # and create a new one.\r\n line_box = boxes.LineBox.anonymous_from(box, new_line_children)\r\n anonymous = boxes.BlockBox.anonymous_from(box, [line_box])\r\n new_children.append(anonymous)\r\n new_line_children = []\r\n new_children.append(child_box)\r\n if new_line_children:\r\n # There were inlines at the end\r\n line_box = boxes.LineBox.anonymous_from(box, new_line_children)\r\n if new_children:\r\n anonymous = boxes.BlockBox.anonymous_from(box, [line_box])\r\n new_children.append(anonymous)\r\n else:\r\n # Only inline-level children: one line box\r\n new_children.append(line_box)\r\n\r\n return box.copy_with_children(new_children)",
"def _do_one_inner_iteration(self):\n self._logger.warning(self.indent+\"One Inner Iteration: Implement me\")",
"def _inner_block_in_inline(box, skip_stack=None):\r\n new_children = []\r\n block_level_box = None\r\n resume_at = None\r\n changed = False\r\n\r\n is_start = skip_stack is None\r\n if is_start:\r\n skip = 0\r\n else:\r\n skip, skip_stack = skip_stack\r\n\r\n for index, child in box.enumerate_skip(skip):\r\n if isinstance(child, boxes.BlockLevelBox) and \\\r\n child.is_in_normal_flow():\r\n assert skip_stack is None # Should not skip here\r\n block_level_box = child\r\n index += 1 # Resume *after* the block\r\n else:\r\n if isinstance(child, boxes.InlineBox):\r\n recursion = _inner_block_in_inline(child, skip_stack)\r\n skip_stack = None\r\n new_child, block_level_box, resume_at = recursion\r\n else:\r\n assert skip_stack is None # Should not skip here\r\n new_child = block_in_inline(child)\r\n # block_level_box is still None.\r\n if new_child is not child:\r\n changed = True\r\n new_children.append(new_child)\r\n if block_level_box is not None:\r\n resume_at = (index, resume_at)\r\n box = box.copy_with_children(\r\n new_children, is_start=is_start, is_end=False)\r\n break\r\n else:\r\n if changed or skip:\r\n box = box.copy_with_children(\r\n new_children, is_start=is_start, is_end=True)\r\n\r\n return box, block_level_box, resume_at",
"def parseinline(registry:Registry,\n element:Union[Element,str], text:str, parent=None):\n if text == '': return ['']\n\n block = registry[element] if isinstance(element, str) else element\n subinline = list(registry.inline_subscriptions(block.subinline, parent))\n\n # a map of regexes to parsing function\n inlines = [(x.regex, (x.parser, x)) for x in subinline]\n\n # combine all escaped characters from all subscribed inline objects.\n escapes = ''.join(t.reduce(set.union,\n (x.escape for x in subinline), set())).replace('[', '\\\\[').replace(']', '\\\\]')\n # function that will unescape body code so eg `\\\\\\*` -> `\\*`\n unescape = ((lambda t: re.compile('\\\\\\\\(['+re.escape(escapes)+'])').sub(r'\\1', t))\n if len(escapes) > 0\n else t.identity)\n\n # if there are no inline styles declared in the registry, then we need\n # to handle that as a special case before all the regex stuff.\n if len(inlines) == 0:\n return [text]\n \n # combine all inline patterns into one regex.\n # might not be efficient for very complex parsers....\n patt = re.compile('|'.join(t.map(lambda x: '(?:'+(\n x[0] if isinstance(x[0], str) else x[0].pattern)+')', inlines)), re.V1 | re.S | re.M)\n\n # how many groups are in each regex, in order, so we can assign the final\n # match to the right parser function.\n grouplengths = list(\n t.cons(0, t.accumulate(op.add, t.map(lambda x: num_groups(x[0]), inlines))))\n\n ind = 0\n l = []\n while ind < len(text):\n m = patt.search(text, ind)\n if m is None:\n l.append(unescape(text[ind:]))\n break\n\n # untouched text should be made into its own child\n if m.span()[0] > ind:\n l.append(unescape(text[ind:m.span()[0]]))\n \n # figure out which parser the match is corresponding to.\n # first not-None group index.\n groupind = indexby(lambda x: x is not None, m.groups())\n # the index of the regex in `inlines` that the groupind corresponds to\n matchind = indexby(lambda x: x >= groupind, grouplengths)\n parser, elem = inlines[matchind][1]\n # stripping all the groups corresponding to the matched sub-regex\n groups = m.groups()[grouplengths[matchind]:\n grouplengths[min(m.re.groups, matchind+1)]]\n\n # doing the parsing based on nesting type\n if elem.nest == Nesting.FRAME:\n # frames are simple, by default they have inherit behavior\n # and deal with one group\n l.append((elem, list(splicehtmlmap(lambda t: parseinline(\n registry, block, t, parent), parser(groups[0]) )) ) )\n elif elem.nest == Nesting.NONE:\n l.append((elem, parser(groups)))\n elif elem.nest == Nesting.POST:\n # post requires a tree-traversal to reparse all the body elements.\n # the only difference is that we have to take into account the inheritance\n # rules.\n l.append((elem, list(\n splicehtmlmap(\n lambda t: parseinline(\n registry,\n block if elem.subinline == ['inherit'] else elem,\n t,\n parent if elem.subinline == ['inherit'] else block),\n parser(groups)))))\n\n ind = m.span()[1]\n\n return l",
"def deal_with_inline_expression(self, node):\n self.result += (node.getText())",
"def is_inline_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"inline\"",
"def test_nested_three_block_max_block_max_block_max_empty():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >\\n > > > ]\",\n \"[BLANK(1,15):]\",\n \"[para(2,16):]\",\n \"[text(2,16):list:]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def _do_one_outer_iteration(self):\n self._logger.info(self.indent+\"One Outer Iteration: Basic version\")\n indent=self.indent\n self.indent=self.indent + ' '\n self._do_inner_iteration_stage()\n self.indent=indent\n print self.indent, \"-\"*39",
"def remove_noop_inline_elements(context, content):\n for node in content.findall('.//span'):\n if node.attrib:\n continue\n drop_node(node, add_padding=False, keep_content=True)",
"def wrap_nested(self):\n for i in range(self.n_blocks):\n block = self.GetBlock(i)\n if not is_pyvista_dataset(block):\n self.SetBlock(i, wrap(block))",
"def test_inlinebox_spliting():\r\n for width in [10000, 100, 10, 0]:\r\n page, = parse('''\r\n <style>p { font-family:%(fonts)s; width: %(width)spx; }</style>\r\n <p><strong>WeasyPrint is a free software visual rendering engine\r\n for HTML and CSS.</strong></p>\r\n ''' % {'fonts': FONTS, 'width': width})\r\n html, = page.children\r\n body, = html.children\r\n paragraph, = body.children\r\n lines = paragraph.children\r\n if width == 10000:\r\n assert len(lines) == 1\r\n else:\r\n assert len(lines) > 1\r\n text_parts = []\r\n for line in lines:\r\n strong, = line.children\r\n text, = strong.children\r\n text_parts.append(text.text)\r\n assert ' '.join(text_parts) == ('WeasyPrint is a free software visual '\r\n 'rendering engine for HTML and CSS.')",
"def IsNestedFamANDAssem(self) -> bool:",
"def test_nested_three_block_max_block_max_block_max_empty_no_bq2():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > \\n > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >]\",\n \"[BLANK(1,15):]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[icode-block(2,10): :]\",\n \"[text(2,10):\\a>\\a>\\a list: ]\",\n \"[end-icode-block:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n</blockquote>\n</blockquote>\n<pre><code> > list\n</code></pre>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def inline(expression):\n return expression",
"def IsNestedFamORAssem(self) -> bool:",
"def code_comment_documentation_block_inner():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Block elements can be nested.
|
def test_nested_block_level_elements(self):
...
|
[
"def is_nested(self, ):\n\t\tpass",
"def wrap_nested(self):\n for i in range(self.n_blocks):\n block = self.GetBlock(i)\n if not is_pyvista_dataset(block):\n self.SetBlock(i, wrap(block))",
"def parseBlock(self, block):\n\t\tcontainer = Container()\n\t\tif container.set(self.matcher.matchHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = HeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 1)\n\n\t\telif container.set(self.matcher.matchSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, 2) \n\n\t\telif container.set(self.matcher.matchSubSubHeading(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = SubSubHeadingMatch(match)\n\t\t\tsubelement = self.parseText(em.text())\n\t\t\telement = HeadingElement(subelement, em.level()) \n\n\t\telif container.set(self.matcher.matchTable(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = TableMatch(match)\n\t\t\ttableHeaders = map(self.parseBlock, em.tableHeaders())\n\t\t\ttableItems = map(lambda row: map(self.parseBlock, row), em.tableItems())\n\t\t\telement = TableElement(tableHeaders, tableItems)\n\n\t\telif container.set(self.matcher.matchOrderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = OrderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = OrderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchUnorderedList(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = UnorderedListMatch(match)\n\t\t\tlistItems = map(self.parseText, em.listItems())\n\t\t\telement = UnorderedListElement(listItems)\n\n\t\telif container.set(self.matcher.matchBlockEquation(block)):\n\t\t\tmatch = container.get()\n\t\t\tem = BlockEquationMatch(match)\n\t\t\tequationStr = em.equation()\n\t\t\tequation = self.equationParser.parseEquation(equationStr)\n\t\t\telement = BlockEquationElement(equation)\n\n\t\telse:\n\t\t\telement = ParagraphElement(self.parseText(block))\n\n\t\treturn element",
"def test_block_in_inline():\r\n box = parse('''\r\n<style>\r\n p { display: inline-block; }\r\n span, i { display: block; }\r\n</style>\r\n<p>Lorem <em>ipsum <strong>dolor <span>sit</span>\r\n <span>amet,</span></strong><span><em>conse<i></i></em></span></em></p>''')\r\n box = build.inline_in_block(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n # No whitespace processing here.\r\n ('strong', 'Text', '\\n '),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])])]),\r\n ('span', 'Block', [ # This block is \"pulled up\"\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse'),\r\n ('i', 'Block', [])])])])])])])])])\r\n\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('body', 'Line', [\r\n ('p', 'InlineBlock', [\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lorem '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'ipsum '),\r\n ('strong', 'Inline', [\r\n ('strong', 'Text', 'dolor ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'sit')])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [\r\n # Whitespace processing not done yet.\r\n ('strong', 'Text', '\\n ')])])])]),\r\n ('span', 'Block', [\r\n ('span', 'Line', [\r\n ('span', 'Text', 'amet,')])]),\r\n\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [\r\n ('strong', 'Inline', [])])])]),\r\n ('span', 'Block', [\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'conse')])])]),\r\n ('i', 'Block', []),\r\n ('span', 'AnonBlock', [\r\n ('span', 'Line', [\r\n ('em', 'Inline', [])])])]),\r\n ('p', 'AnonBlock', [\r\n ('p', 'Line', [\r\n ('em', 'Inline', [])])])])])])",
"def IsBlock(self) -> bool:",
"def test_nested_three_block_block_block():\n\n # Arrange\n source_markdown = \"\"\"> > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::]\",\n \"[block-quote(1,3)::]\",\n \"[block-quote(1,5)::> > > \\n> > > ]\",\n \"[para(1,7):\\n]\",\n \"[text(1,7):list\\nitem::\\n]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list\nitem</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def test_body(self):\n child_block = self.block.child_blocks[\"body\"]\n\n self.assertIsInstance(child_block, RichTextBlock)\n self.assertFalse(child_block.required)",
"def test_inline_in_block():\r\n source = '<div>Hello, <em>World</em>!\\n<p>Lipsum.</p></div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])]),\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n source = '<div><p>Lipsum.</p>Hello, <em>World</em>!\\n</div>'\r\n expected = [\r\n ('div', 'Block', [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Lipsum.')])]),\r\n ('div', 'AnonBlock', [\r\n ('div', 'Line', [\r\n ('div', 'Text', 'Hello, '),\r\n ('em', 'Inline', [\r\n ('em', 'Text', 'World')]),\r\n ('div', 'Text', '!\\n')])])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n\r\n # Absolutes are left in the lines to get their static position later.\r\n source = '''<p>Hello <em style=\"position:absolute;\r\n display: block\">World</em>!</p>'''\r\n expected = [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])]\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n assert_tree(box, expected)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, expected)\r\n\r\n # Floats are pull to the top of their containing blocks\r\n source = '<p>Hello <em style=\"float: left\">World</em>!</p>'\r\n box = parse(source)\r\n box = build.inline_in_block(box)\r\n box = build.block_in_inline(box)\r\n assert_tree(box, [\r\n ('p', 'Block', [\r\n ('p', 'Line', [\r\n ('p', 'Text', 'Hello '),\r\n ('em', 'Block', [\r\n ('em', 'Line', [\r\n ('em', 'Text', 'World')])]),\r\n ('p', 'Text', '!')])])])",
"def is_block(self):\n if self.get_level() == 1:\n return True\n else:\n return False",
"def collect(cls, block_structure):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_nested_three_block_max_block_max_block_max_empty():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >\\n > > > ]\",\n \"[BLANK(1,15):]\",\n \"[para(2,16):]\",\n \"[text(2,16):list:]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n<p>list</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def is_block_tag(tag):\n return getattr(tag, \"tag_display\", None) == \"block\"",
"def buildBlock(self, b):\n \"\"\"\n s = self.style\n colClass = self.getColClass(s.colWidth)\n b.block(self)\n b.div(class_=colClass, marginright=s.columnMarginRight, width=s.colWidth,\n marginleft=s.columnMarginLeft, margintop=s.columnMarginTop,\n paddingleft=s.columnPaddingLeft, float=s.columnFloat,\n display=s.columnDisplay,\n media=(\n \tMedia(width=s.columnWidthMobile,\n\t\t\t\tdisplay=s.columnDisplayMobile,\n float=s.columnFloatMobile,\n marginleft=s.columnMarginLeftMobile,\n marginright=s.columnMarginRightMobile,\n paddingleft=s.columnPaddingLeftMobile,\n paddingright=s.columnPaddingRightMobile,),\n ))\n \"\"\"\n self.buildColumn(b)\n \"\"\"\n b._div(comment=colClass)\n b._block(self)\n \"\"\"",
"def test_nested_three_block_nl_block_nl_block_no_bq1():\n\n # Arrange\n source_markdown = \"\"\">\n> >\n > > list\n> > > item\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::>]\",\n \"[BLANK(1,2):]\",\n \"[block-quote(2,1)::> >\\n > > ]\",\n \"[BLANK(2,4):]\",\n \"[para(3,7):]\",\n \"[text(3,7):list:]\",\n \"[end-para:::True]\",\n \"[block-quote(4,1)::> > > ]\",\n \"[para(4,7):]\",\n \"[text(4,7):item:]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<p>list</p>\n<blockquote>\n<p>item</p>\n</blockquote>\n</blockquote>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def test_nested_three_block_max_block_max_block_max_empty_no_bq2():\n\n # Arrange\n source_markdown = \"\"\" > > >\n > > list\"\"\"\n expected_tokens = [\n \"[block-quote(1,4): : > \\n > ]\",\n \"[block-quote(1,9):: > > ]\",\n \"[block-quote(1,14):: > > >]\",\n \"[BLANK(1,15):]\",\n \"[end-block-quote:::True]\",\n \"[end-block-quote:::True]\",\n \"[icode-block(2,10): :]\",\n \"[text(2,10):\\a>\\a>\\a list: ]\",\n \"[end-icode-block:::True]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<blockquote>\n<blockquote>\n</blockquote>\n</blockquote>\n<pre><code> > list\n</code></pre>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)",
"def Block(self):\n self._tab_level += 1\n yield\n self._tab_level -= 1",
"def __init__(self, root_block):\n self.root_block = root_block\n self.blocks = {'@': root_block}\n self.block_names = {\"default\":[]}\n #registering blocks by id\n self.register_blocks(root_block.ch_blocks)\n self.register_block_names()",
"def div(self, elem, theme, width):\n block, indent = [], ansi.length(theme.margin)\n\n for child in self.children(elem, self.BLOCK):\n if child.tag not in [u('ul'), u('ol'), u('br')] and len(block) != 0:\n block += ['']\n\n block += [line for line in self.call(child, width - indent)]\n\n if len(self.links) > 0:\n block += ['']\n\n for t, link in enumerate(self.links):\n block += [theme.links.format(index=t + 1, href=link)]\n\n return [theme.margin + line for line in block]",
"def IsNestedFamANDAssem(self) -> bool:",
"def get_right_sub_block(self):\n return self._right_sub_block"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
One table can go inside another one.
|
def test_correctly_nested_tables(self):
...
|
[
"def table_mother_not_concordant():\n pass",
"def table_father_not_concordant():\n pass",
"def visit_table(self, table):\n pass",
"def changeSubTable(self, subtable):\n\t\tself.nt = NetworkTables.getTable(\"SmartDashboard/\" + subtable)",
"def create_as_another_table(self, node=None):\n user_name = f\"user_{getuid()}\"\n table_name = f\"table_{getuid()}\"\n source_table_name = f\"source_table_{getuid()}\"\n exitcode, message = errors.not_enough_privileges(name=f\"{user_name}\")\n\n if node is None:\n node = self.context.node\n\n with table(node, f\"{source_table_name}\"):\n with user(node, f\"{user_name}\"):\n\n try:\n with When(\"I grant CREATE TABLE privilege to a user\"):\n node.query(f\"GRANT CREATE TABLE ON {table_name} TO {user_name}\")\n\n with Then(\"I try to create a table as another table\"):\n node.query(f\"CREATE TABLE {table_name} AS {source_table_name}\", settings = [(\"user\", f\"{user_name}\")])\n\n finally:\n with Finally(\"I drop the tables\"):\n node.query(f\"DROP TABLE IF EXISTS {table_name}\")",
"def _tableSubHeaderTag( self ):",
"def test_get_table_list_3(self):\n table_list = querying.get_table_list(self.PhageID)\n\n self.assertTrue(self.phage in table_list)\n self.assertFalse(self.gene in table_list)",
"def test_get_table_list_4(self):\n columns = [self.PhageID, self.GeneID]\n table_list = querying.get_table_list(columns)\n\n self.assertTrue(self.phage in table_list)\n self.assertTrue(self.gene in table_list)",
"def test_get_table_2(self):\n table_obj = querying.get_table(self.metadata, \"phage\")\n\n self.assertEqual(table_obj, self.phage)",
"def get_wrapped_table(self):\r\n if self.is_table_wrapper:\r\n for child in self.children:\r\n if isinstance(child, TableBox):\r\n return child\r\n else: # pragma: no cover\r\n raise ValueError('Table wrapper without a table')",
"def test_if_two_tables(small_table, large_table):\n assert left_join(small_table, large_table) == [['yellow', 'blue', 'green'], ['gray', 'brown', 'pink'], ['black', 'red', 'orange'], ['cyan', 'puce', 'white']]",
"def add_table(self, name):\n self.results = True\n table_names = self.tables_struct.keys()\n if self.subtest != 0:\n with self.subTest('Test {}'.format(str(self.subtest))):\n self.assertIn(name, table_names)\n self.current_table = name\n else:\n self.assertIn(name, self.table_names)",
"def showTable(self, table):\n layoutManager = slicer.app.layoutManager()\n currentLayout = layoutManager.layout\n layoutWithTable = slicer.modules.tables.logic().GetLayoutWithTable(currentLayout)\n layoutManager.setLayout(layoutWithTable)\n appLogic = slicer.app.applicationLogic()\n appLogic.GetSelectionNode().SetActiveTableID(table.GetID())\n appLogic.PropagateTableSelection()",
"def __create_tableone(self):\n table = [self._n_row] + self._cont_table + self._cat_table\n\n return table",
"def compare_tables(test_table: Table, expected_table: Table):\n assert test_table.name == expected_table.name\n # pylint: disable=unidiomatic-typecheck\n assert type(test_table) == type(expected_table)\n assert test_table.fields == expected_table.fields",
"def parent_tablename(self) -> str:\n return self._parent.tablename",
"def _has_arrow_table(self):\n return self._partitions is not None and isinstance(\n self._partitions[0][0].get(), pyarrow.Table\n )",
"def setUp(self):\n # Level 0 0 table. I.e., first table on level 0\n self.category0 = DynamicTable(name='level0_0', description=\"level0_0 DynamicTable\")\n self.category0.add_row(id=10)\n self.category0.add_row(id=11)\n self.category0.add_row(id=12)\n self.category0.add_row(id=13)\n self.category0.add_column(data=['tag1', 'tag2', 'tag2', 'tag1', 'tag3', 'tag4', 'tag5'],\n name='tags',\n description='custom tags',\n index=[1, 2, 4, 7])\n self.category0.add_column(data=np.arange(4),\n name='myid',\n description='custom ids',\n index=False)\n\n # Aligned table\n self.aligned_table = AlignedDynamicTable(name='aligned_table',\n description='parent_table',\n columns=[VectorData(name='a1', description='a1', data=np.arange(4)), ],\n colnames=['a1', ],\n category_tables=[self.category0, ])\n\n # Parent table\n self.parent_table = DynamicTable(name='parent_table',\n description='parent_table',\n columns=[VectorData(name='p1', description='p1', data=np.arange(4)),\n DynamicTableRegion(name='l1', description='l1',\n data=np.arange(4), table=self.aligned_table)])\n # Super-parent table\n dtr_sp = DynamicTableRegion(name='sl1', description='sl1', data=np.arange(4), table=self.parent_table)\n vi_dtr_sp = VectorIndex(name='sl1_index', data=[1, 2, 3], target=dtr_sp)\n self.super_parent_table = DynamicTable(name='super_parent_table',\n description='super_parent_table',\n columns=[VectorData(name='sp1', description='sp1', data=np.arange(3)),\n dtr_sp, vi_dtr_sp])",
"def _add_table_object(self, table: Union[TABLEH1, TABLEHT, TABLES1, TABLEST]) -> None:\n key = table.tid\n if key in self.tables:\n if not table == self.tables[key]:\n assert key not in self.tables, '\\ntable=\\n%s old_table=\\n%s' % (\n table, self.tables[key])\n assert key > 0\n self.tables[key] = table\n self._type_to_id_map[table.type].append(key)",
"def vrijednostTable (cls):\n\n return 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify consistent handling of emptyelement tags, no matter how they come in through the markup.
|
def test_empty_element_tags(self):
...
|
[
"def testBasicTagAbsence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertFalse(self.parse(template))",
"def strip_empty_tags(self):\n tag = self.root\n while True:\n next_tag = tag.findNext(True)\n if not next_tag: break\n if next_tag.contents or next_tag.attrs:\n tag = next_tag\n continue\n next_tag.extract()",
"def test_complete_html_start_tag_with_single_no_value_attributes_and_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 7",
"def IsEmpty(self):\n if hasattr(self.__class__, 'XMLCONTENT'):\n return self.__class__.XMLCONTENT == XMLEmpty\n else:\n return False",
"def test_complete_html_start_tag_with_single_no_value_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" show>\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 6",
"def test_empty(test_empty_tree):\n assert find(test_empty_tree) == False",
"def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('ExpectedTemplateSyntaxError')\r\n except TemplateSyntaxError as e:\r\n self.assertEqual('test.html', e.filename)\r\n if sys.version_info[:2] > (2,4):\r\n self.assertEqual(2, e.lineno)",
"def test_for_with_empty_value(self):\r\n try:\r\n MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <py:for each=\"\">\r\n empty\r\n </py:for>\r\n </doc>\"\"\", filename='test.html').generate()\r\n self.fail('ExpectedTemplateSyntaxError')\r\n except TemplateSyntaxError, e:\r\n self.assertEqual('test.html', e.filename)\r\n if sys.version_info[:2] > (2,4):\r\n self.assertEqual(2, e.lineno)",
"def test_complete_html_start_tag_with_invalidly_named_no_value_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" sh*ow>\"\n parse_index = 0\n expected_is_valid = False\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 1",
"def test_simple_complete_html_start_tag_with_no_attributes():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \">\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 1",
"def test_simple_complete_html_start_tag_with_no_attributes_and_whitespace():\n\n # Arrange\n input_tag_name = \"a\"\n string_to_parse = \" >\"\n parse_index = 0\n expected_is_valid = True\n\n # Act\n actual_is_valid, parse_index = HtmlHelper.is_complete_html_start_tag(\n input_tag_name, string_to_parse, parse_index\n )\n\n # Assert\n assert expected_is_valid == actual_is_valid\n assert parse_index == 2",
"def testBasicTagPresence(self):\n template = '{{ ifpresent [tag] }} hello {{ endif }}'\n self.assertEqual(self.parse(template, tag='spam'), ' hello')",
"def test_otherwise_without_test(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <py:otherwise>foo</py:otherwise>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n foo\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def get_unnecessary_elements(tag, clear_elem):\n tag_list = list(filter(lambda e: 'none' not in e, tag))\n\n garbage_full = list()\n\n for each_tag in tag_list:\n split_tag = each_tag.split('\"')\n try:\n clear_tag = split_tag[1]\n if clear_tag in clear_elem or 'inline' in clear_tag or re.search(r'^\\d+$', clear_tag):\n pass\n else:\n garbage_full.append(each_tag)\n except IndexError:\n garbage_full.append(each_tag)\n return garbage_full",
"def testMultiTagPresence(self):\n template = '{{ ifpresent [one] [two] }} good {{ endif }}'\n self.assertEqual(self.parse(template, one=1, two=2), ' good')\n self.assertFalse(self.parse(template, one=1))\n self.assertFalse(self.parse(template, two=2))",
"def test_when_with_strip(self):\r\n tmpl = MarkupTemplate(\"\"\"<doc xmlns:py=\"http://genshi.edgewall.org/\">\r\n <div py:choose=\"\" py:strip=\"\">\r\n <span py:otherwise=\"\">foo</span>\r\n </div>\r\n </doc>\"\"\")\r\n self.assertEqual(\"\"\"<doc>\r\n <span>foo</span>\r\n </doc>\"\"\", tmpl.generate().render(encoding=None))",
"def test_non_valid_elementtree_input(self):\n\n validation_result = validify.validate(input_elementtree=\"1234\", validation_rules=compile_test_rules(), log_to_console=False)\n\n assert len(validation_result) == 1\n if len(validation_result) == 1:\n assert validation_result[0][\"message_id\"] == \"e0002\"",
"def test_section__end_tag_with_no_start_tag(self):\n template = '{{/section}}'\n try:\n self._assert_render(None, template)\n except ParsingError, err:\n self.assertEqual(str(err), \"Section end tag mismatch: section != None\")",
"def test_xform_empty_question_label_patch_content_add(self):\n xml_template = self.xml_template\n expected = \" \"\n xml_input = xml_template.format(\"\")\n parsed, _ = xform_patch._xform_empty_question_label_patch_content(\n xml_input)\n itext = parsed[\"h:html\"][\"h:head\"][\"model\"][\"itext\"]\n observed = itext[\"translation\"][0][\"text\"][0][\"value\"]\n self.assertIn(expected, observed)",
"def IsEmptyDoc(doc):\n if re.search(r'^\\s*$', doc):\n return True\n if re.search(r'^\\s*<para>\\s*(FIXME)?\\s*<\\/para>\\s*$', doc):\n return True\n return False"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A real XHTML document should come out exactly the same as it went in.
|
def test_real_xhtml_document(self):
...
|
[
"def Xtest_strip_doctypehtml(self):\n inp = '''Something\n <body>\n Result\n </body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'\n \n # a failing piece of HTML\n inp = '''Something\n <body>\n Result\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip().split() == ['Result','Else'], res.strip().split()\n \n # With a more complicated piece of HTML\n inp = '''Something\n <BODY \n onLoad=\"foo()\"\n >\n Result\n </Body>\n Else\n '''\n doc = self.folder.index_html\n res = doc._strip_doctypehtml(inp)\n assert res.strip() == 'Result'",
"def test_self_closing_attrs():\n page = Html()\n page.append(\"some plain text.\")\n page.append(\"some 2 plain text.\")\n page_body = Body()\n page_body.append(P(\"A simple paragraph of text\", \\\n style=\"text-align: center; font-style: oblique;\"))\n page_body.append(P(\"Another simple paragraph of text\"))\n page_hr = Hr()\n page_body.append(page_hr)\n page.append(page_body)\n page.append(\"Some more plain text.\")\n file_contents = render_result(page)\n print(file_contents) # so we can see it if the test fails\n\n # note: The previous tests should make sure that the tags are getting\n # properly rendered, so we don't need to test that here.\n assert \"some plain text\" in file_contents\n assert \"A simple paragraph of text\" in file_contents\n assert \"Some more plain text.\" in file_contents\n assert \"some plain text\" in file_contents\n # but make sure the embedded element's tags get rendered!\n assert '<p style=\"text-align: center; font-style: oblique;\">' in file_contents\n assert \"</p>\" in file_contents\n assert \"<hr />\" in file_contents\n #assert False",
"def testSAX2DOM(self):\n sax2dom = pulldom.SAX2DOM()\n sax2dom.startDocument()\n sax2dom.startElement(\"doc\", {})\n sax2dom.characters(\"text\")\n sax2dom.startElement(\"subelm\", {})\n sax2dom.characters(\"text\")\n sax2dom.endElement(\"subelm\")\n sax2dom.characters(\"text\")\n sax2dom.endElement(\"doc\")\n sax2dom.endDocument()\n\n doc = sax2dom.document\n root = doc.documentElement\n (text1, elm1, text2) = root.childNodes\n text3 = elm1.childNodes[0]\n\n self.assertIsNone(text1.previousSibling)\n self.assertIs(text1.nextSibling, elm1)\n self.assertIs(elm1.previousSibling, text1)\n self.assertIs(elm1.nextSibling, text2)\n self.assertIs(text2.previousSibling, elm1)\n self.assertIsNone(text2.nextSibling)\n self.assertIsNone(text3.previousSibling)\n self.assertIsNone(text3.nextSibling)\n\n self.assertIs(root.parentNode, doc)\n self.assertIs(text1.parentNode, root)\n self.assertIs(elm1.parentNode, root)\n self.assertIs(text2.parentNode, root)\n self.assertIs(text3.parentNode, elm1)\n doc.unlink()",
"def test_is_html_tag_properly(self):\r\n file=\"HTMLDOC.txt\"\r\n html_doc=p.read_file(file)\r\n result=p.is_html_tag_properly(html_doc)\r\n self.assertTrue(result,True)",
"def create_test_html():\n return lxml.html.fromstring(\"\"\"<html>\n <head>\n </head>\n <body>\n <div class=\"test\">Some <em>text</em></div>\n <img src=\"some_location\" alt=\"Alt text\" width=540>\n More <b>text</b>\n </body>\n </html>\"\"\")",
"def testParseContent(self):\n # XXX not sure it is good to store parsed document everytime\n self.assertTrue(isinstance(self.oodocument.parsed_content, etree._Element))\n self.assertTrue(self.oodocument.parsed_content.tag.endswith(\n 'document-content'))",
"def render_main_document_as_one(self, document, filepath, measures_annex=True):\n with DocumentRenderingContext(filepath) as ctx:\n doc, tag, text = ctx.doc_tag_text\n\n self.labeler = self.create_labeler(doc)\n self.doc = doc\n\n with tag('html'):\n doc.head(document.title)\n\n with tag('body'):\n doc.p('gegenereerd op ', datetime.datetime.now().isoformat(), style=\"font-size:11px\")\n\n with tag('div', klass='container'):\n self._render_fragment(document, self.doc.h1, self._render_chapter)\n\n with tag('div', klass='container'):\n self.render_verifier_annex(document)\n\n if measures_annex:\n with tag('div', klass='container'):\n self.render_measures_annex()\n\n self.doc = None\n self.labeler = None",
"def test_html_is_not_valid(self):\n url = \"\"\n single_date = date(2019, 3, 4)\n\n coins = {}\n with patch.object(\n BCRASMLScraper,\n 'fetch_content',\n return_value=' '\n ):\n scraper = BCRASMLScraper(url, coins, intermediate_panel_path=None, use_intermediate_panel=False)\n content = scraper.fetch_content(single_date)\n\n soup = BeautifulSoup(content, \"html.parser\")\n\n table = soup.find('table')\n head = table.find('thead') if table else None\n body = table.find('tbody') if table else None\n\n assert table is None\n assert head is None\n assert body is None",
"def check_doc1(html, has_base_url=True):\r\n assert html.root_element.tag == 'html'\r\n assert [child.tag for child in html.root_element] == ['head', 'body']\r\n _head, body = html.root_element\r\n assert [child.tag for child in body] == ['h1', 'p', 'ul']\r\n h1 = body[0]\r\n assert h1.text == 'WeasyPrint test document (with Ünicōde)'\r\n if has_base_url:\r\n url = urljoin(html.base_url, 'pattern.png')\r\n assert url.startswith('file:')\r\n assert url.endswith('weasyprint/tests/resources/pattern.png')\r\n else:\r\n assert html.base_url is None",
"def test_direct_html(self):\n direct_html = '<p>Hello, world.</p>'\n params = {\n 'workspace_name': self.getWsName(),\n 'direct_html': direct_html\n }\n result = self.getImpl().create_extended_report(self.getContext(), params)\n obj = self.dfu.get_objects({'object_refs': [result[0]['ref']]})\n self.assertEqual(obj['data'][0]['data']['direct_html'], direct_html)",
"def setup_empty_pagecontent_file(self):\n basedir = os.path.join(TestXmlDump.PUBLICDIR, 'enwiki', self.today)\n filename = \"{wiki}-{date}-pages-articles.xml.bz2\".format(\n wiki=self.en['wiki'].db_name, date=self.today)\n path = os.path.join(basedir, filename)\n with open(path, \"w\") as output:\n output.write(\"fake\\n\")",
"def test_thorough_sax2dom(self):\n pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)\n self._test_thorough(pd, False)",
"def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')",
"def render_xml(self, d):\n\t\tself.set_flag(\"render\", False)\n\t\tself.response.headers[\"Content-Type\"] = \"application/xml\"\n\t\txml_txt = xml.dicttoxml(d)\n\t\tself.response.out.write(xml_txt)",
"def pp_html(self, filename=None, filehandle=None, standalone=False):\n fh = open(filename, 'w') if filename else filehandle\n if standalone:\n html_graph_prefix(fh)\n fh.write(u\"<table cellpadding=0 cellspacing=0 border=0>\\n\")\n fh.write(u\"<tr><td>\\n\")\n nodes = list(self.nodes.keys())\n # removed compare_id comparison function for python 3 compatibility\n nodes.sort()\n self._html_nodes_table(fh, nodes)\n fh.write(u\"</td>\\n\\n\")\n fh.write(u\"<td valign=top>\\n\")\n self._html_added_table(fh)\n fh.write(u\"</td></tr>\\n\\n\")\n fh.write(u\"</table>\\n\\n\")\n if standalone:\n fh.write(u\"</body>\\n</html>\\n\\n\")",
"def test_dtml_document(self):\n # Add the rendering script\n factory = self.root.source.manage_addProduct['OFS']\n factory.manage_addDTMLDocument('cool.css', 'Cool CSS')\n css = self.root.source._getOb('cool.css')\n css.munge(TEST_DTML)\n\n # Locations doesn't match, so the export fails.\n installable = CodeSourceInstallable('other:', '/')\n with self.assertRaises(InstallationError):\n installable.export(self.root.source)\n\n # Nothing got exported.\n self.assertItemsEqual(os.listdir(self.directory), [])\n\n # With an explicit location it will work, and not touch the installable.\n installable.export(self.root.source, directory=self.directory)\n\n self.assertItemsEqual(\n os.listdir(self.directory),\n ['parameters.xml', 'cool.css.dtml', 'source.ini'])\n self.assertIsFile('cool.css.dtml')\n self.assertIsFile('source.ini')\n self.assertIsFile('parameters.xml')\n with open(self.get_path('cool.css.dtml'), 'rb') as script:\n self.assertEqual(script.read(), TEST_DTML)\n with open(self.get_path('source.ini'), 'rb') as script:\n self.assertEqual(script.read(), TEST_SOURCE)",
"def test_document_write(self):\n\n self.set_html('foo')\n self.assert_warnings({\n 'id': ('js', 'document.write', 'evil'),\n 'message': Matches('document\\.write.*strongly discouraged'),\n 'description': Matches('should not be used')})",
"def make_soup(self):\n self.soup = BeautifulSoup(self.xml_fp, 'lxml-xml')\n self.xml_fp.close()",
"def render_xml(xml):\n response = make_response(xml, 200)\n response.headers['Content-Type'] = 'application/xml'\n return response",
"def test_beautiful_soup_can_parse_html_from_returned_content(self):\n soup = self.soupify(self.response)\n self.assertIsNotNone(soup)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A large XML document should come out the same as it went in.
|
def test_large_xml_document(self):
...
|
[
"def has_30k_or_fewer_records(medline_xml, parser=None, tree=None):",
"def testParseContent(self):\n # XXX not sure it is good to store parsed document everytime\n self.assertTrue(isinstance(self.oodocument.parsed_content, etree._Element))\n self.assertTrue(self.oodocument.parsed_content.tag.endswith(\n 'document-content'))",
"def KeepEncoding(self):\n self.chunk = XMLEntity.ChunkSize",
"def test_export_xml_to_file(self):\n pass",
"def _parse_document(self, filename):\n\n print 'Parsing %s ' % filename\n self.__current_file = filename\n\n root_element = self._get_root_element_from_xml(filename)\n # Parse the metadata element block and store in new document\n document = self._process_metadata_and_create_document(root_element)\n if document is not None:\n if self.verbose:\n self._print_metadata(document)\n\n # Parse and store the location elements\n locations = self._process_locations(root_element, document)\n from django.db.models import Count\n if self.verbose:\n print '\\tLocations mentions'.upper()\n for location in LocationMention.objects.filter(document=document).values('text').annotate(total=Count('text')) :\n print '\\t- {0} {1}'.format(location['text'], location['total'])\n print '\\tLocations ignored'.upper()\n print '\\t- ',self.__ignored_locations\n print ''\n return",
"def testGetContentXml(self):\n content_xml = self.oodocument.getContentXml()\n self.assertTrue('The content of this file is just' in content_xml)",
"def test_xmlparser_serialiser(self):\n\n sampletext = \"\"\"<root>\n <doc><name>\"Guido Rossum\"</name><address> \"88 Palo Alto\"</address><phone> \"776985411\"</phone></doc>\n <doc><name>\"John Smith\"</name><address> \"38 Driver Avenue\"</address><phone> \"091234567\"</phone></doc>\n <doc><name>\"Jane Doe\"</name><address> \"17 Waine Street\"</address><phone> \"0494512390\"</phone></doc>\n</root>\n\"\"\" \n self.assertEqual(self.xmlo.serialise(\"./test_input.txt\"), sampletext)\n\n return",
"def test_export_xml(self):\n pass",
"def ScXMLDocument_readXMLData(xmldoc: 'cc_xml_doc *') -> \"ScXMLDocument *\":\n return _coin.ScXMLDocument_readXMLData(xmldoc)",
"def test_read_xml_correction(self):\n gfile = GarminParse(filename=GMNFILE, corr_list={'2011-05-07T15:43:08Z': {0: [1.1, 300]}})\n gfile.read_file()\n tmp = '%s' % gfile\n test0 = 'GarminFile<filename=test.gmn, filetype=gmn, ' + \\\n 'begin_datetime=2011-05-07 15:43:08, sport=biking, ' + \\\n 'total_calories=61, total_distance=1770.2784, ' + \\\n 'total_duration=300, total_hr_dur=0, total_hr_dis=0>'\n test1 = test0.replace('total_distance=1770.2784', 'total_distance=1770.2784000000001')\n self.assertTrue(gfile.filetype == 'gmn')\n self.assertEqual(gfile.begin_datetime.date(), datetime.date(year=2011, month=5, day=7))\n self.assertIn(tmp, [test0, test1])\n gsum = GarminSummary(filename=GMNFILE, corr_list={'2011-05-07T15:43:08Z': {0: [1.1, 300]}})\n gsum.read_file()\n tmp = '%s' % gsum\n test0 = 'GarminSummary<filename=test.gmn, begin_datetime=' \\\n '2011-05-07 10:43:08-05:00, sport=biking, ' \\\n 'total_calories=61, total_distance=1770.2784, ' \\\n 'total_duration=300, total_hr_dur=0, total_hr_dis=0, ' \\\n 'number_of_items=1, md5sum=af6f79ef18f4ec5526d3f987b6f00f9b>'\n test1 = test0.replace('total_distance=1770.2784', 'total_distance=1770.2784000000001')\n test2 = test0.replace('10:43:08-05:00', '11:43:08-04:00')\n test3 = test1.replace('10:43:08-05:00', '11:43:08-04:00')\n print(tmp)\n print(test0)\n print(test1)\n print(test2)\n self.assertIn(tmp, [test0, test1, test2, test3])",
"def test_thorough_sax2dom(self):\n pd = SAX2DOMTestHelper(None, SAX2DOMExerciser(), 12)\n self._test_thorough(pd, False)",
"def load_xml(self):\n try:\n self.root = XMLReader(self.path).root\n\n #for sign in self.root.findall('./signs/sign'):\n # self.load_sign_xml(sign)\n\n for block in self.root.findall('./blocks/block'):\n self.load_block_xml(block)\n\n # load replacments etc...\n except Exception, e:\n log.exception('error loading buildfile')",
"def log_xml(self):\n\n lFH = self.logger.getLogHandle();\n # xml_print( self.puke_dom, lFH )\n # lFH.write( MyXML.getRootDocumentXML(self) )\n lFH.write(self.getRootDocumentXML())",
"def escribir(self):\n tree.write('metadata1.xml')\n bs = BeautifulSoup(open('metadata1.xml'), 'xml')\n archivo1 = open('metadata1.xml', \"w+\")\n archivo1.write(bs.prettify())",
"def testXMLWithUknownData(self):\n self.XMLSchemaService.loadSchema('http://queue.amazonaws.com/doc/2008-01-01/QueueService.xsd', self)\n self.runLoop.run()\n assert(self.schema)\n parser = self.schema.newParser()\n parser.feed(message_response_with_uknown_elements)\n result = parser.finish()\n self.assertEqual('8f2770293f9b94ad705d5fd742f5f885', result.ReceiveMessageResult.Message[0].MD5OfBody)",
"def _readMoreXML(self,xmlNode):\n self.outputDeck = -1 # default is the last deck!\n for child in xmlNode:\n if child.tag == 'outputDeckNumber':\n try : self.outputDeck = int(child.text)\n except ValueError: raise ValueError(\"can not convert outputDeckNumber to integer!!!! Got \"+ child.text)",
"def test_repval_large_elem(self):\n elem = DataElement(0x00820003, 'UT', 'a' * 1000)\n assert len(elem.repval) < 100",
"def generate_document(self):\n\n resp = requests.get(self.link)\n return BeautifulSoup(resp.text, 'xml')",
"def test_large(test_large_tree):\n assert find(test_large_tree) == 12334",
"def storePageXmlSetofFiles(self,lListOfDocs):\n for i,(doc,img) in enumerate(lListOfDocs):\n if img is None: img='fakeimage.jpg' # generated\n if self.storagePath == \"\":\n if os.path.dirname(self.inputFileName) =='':\n self.outputFileName = img[:-3]+\"_%.4d\"%(i+1) + \".xml\"\n else:\n self.outputFileName = os.path.dirname(self.inputFileName)+os.sep+img[:-3]+\"_%.4d\"%(i+1) + \".xml\"\n else:\n self.outputFileName = self.storagePath + os.sep+img[:-4]+\"_%.4d\"%(i+1) + \".xml\"\n print(\"output: %s\" % self.outputFileName)\n try:self.writeDom(doc, bIndent=True)\n except IOError as e:\n print(e)\n return -1 \n return 0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Worker function for doing FVA with multiprocessing. For use as a target of multiprocessing.Process. Each entry in job_queue should be a string giving a variable in the model (or None, as a termination signal). The corresponding entry of
|
def _fva_worker(model, job_queue, result_queue, guess):
done = 0
while True:
try:
key = job_queue.get(timeout=3600)
except Empty:
print 'FVA worker finishing anomalously after completing %d tasks' % done
return
if key is None:
print 'FVA worker finishing normally after completing %d tasks' % done
return
try:
result = single_fva(model, key, guess)
result_queue.put({key: result})
except nlcm.OptimizationFailure:
result_queue.put({key: 'failure'})
done += 1
|
[
"def do_fva(model, variables=None, guess=None,\n n_procs=default_n_parallel_procs, cache={},\n check_failures=True, log_interval=100, log_filename=None):\n if log_filename:\n logger = logging.getLogger(log_filename)\n logger.setLevel(logging.INFO)\n fh = logging.FileHandler(filename=log_filename + '.txt')\n logger.addHandler(fh)\n fh.setFormatter(logging.Formatter('%(asctime)s %(message)s'))\n \n if variables is None:\n variables = model.variables\n\n new_variables = []\n results = {}\n for v in variables:\n if v in cache:\n results[v] = cache[v]\n else:\n new_variables.append(v)\n\n logging.info('Total FVA results requested: %d' % len(variables))\n logging.info('Found results for old variables: %d' % len(results))\n logging.info('Analyzing new variables: %d' % len(new_variables))\n if not new_variables:\n return results\n\n original_objective = model.objective_function\n try:\n if n_procs > 1:\n # I think that manually setting up a bunch of worker \n # processes with information about the model may be faster\n # than using a Pool and providing the model as an argument \n # each time, though there may be a cleaner way to do this\n # using the tools in the multiprocessing module.\n argument_queue = mp.Queue()\n result_queue = mp.Queue()\n processes = [mp.Process(target=_fva_worker, \n args=(model,\n argument_queue, result_queue,\n guess)) for i in xrange(n_procs)]\n for v in new_variables:\n argument_queue.put(v)\n # Add termination signals\n for p in processes:\n argument_queue.put(None)\n for p in processes:\n p.start()\n results = {}\n # We won't get them back in order, but we know how many\n # there will be:\n counter = 0 \n counter_max = len(new_variables)\n temp_results = {}\n for v in new_variables:\n result = result_queue.get()\n result_key = result.keys()[0]\n results.update(result)\n if log_filename:\n temp_results.update(result)\n if (counter+1) % log_interval == 0:\n temp_filename = (log_filename +\n '_%d.pickle' % counter)\n with open(temp_filename,'w') as f:\n pickle.dump(temp_results, f)\n logger.info('(%d/%d) ' % (counter+1, counter_max) + \n ', '.join(temp_results.keys())) \n temp_results = {}\n counter += 1 \n for p in processes:\n p.join()\n failed_variables = [v for v, result in results.iteritems()\n if result == 'failure']\n if failed_variables and check_failures:\n raise nlcm.OptimizationFailure(\n 'FVA encountered %d optimization failures (%s, ...)' %\n (len(failed_variables), failed_variables[0])\n )\n\n else:\n for var in new_variables:\n try:\n extrema = single_fva(model, var, guess)\n results[var] = tuple(extrema)\n except nlcm.OptimizationFailure:\n if check_failures:\n raise nlcm.OptimizationFailure('FVA failed checking %s' % var)\n else:\n results[var] = 'failure'\n\n finally:\n model.objective_function = original_objective\n model.compile()\n return results",
"def worker(log_dir: str, python_file_name: str, gpu: int,\n job_queue: multiprocessing.Queue, done_queue: multiprocessing.Queue):\n while not job_queue.empty():\n params = job_queue.get()\n if params is None:\n return\n done_queue.put(launch_experiment(log_dir, python_file_name, gpu, params))",
"def _process_worker(call_queue, result_queue):\r\n while True:\r\n call_item = call_queue.get(block=True)\r\n if call_item is None:\r\n # Wake up queue management thread\r\n result_queue.put(None)\r\n return\r\n try:\r\n r = call_item.fn(*call_item.args, **call_item.kwargs)\r\n except BaseException:\r\n e = sys.exc_info()[1]\r\n result_queue.put(_ResultItem(call_item.work_id,\r\n exception=e))\r\n else:\r\n result_queue.put(_ResultItem(call_item.work_id,\r\n result=r))",
"def run_job():",
"def scene_worker() :\n while True : \n f = work_mgr.work.get()\n if f is None : \n break \n\n idx = f[0]\n args = f[1:]\n scene_pts = GOESVector.from_scene(*args)\n work_mgr.product.put( (idx, scene_pts) )",
"def runit(func_args_queue):\n while True:\n try:\n f, args = func_args_queue.get(block=False)\n f(*args)\n except Empty:\n break",
"def process_queue(self):\n try:\n if not self.searching:\n self.end_search()\n else:\n task_result = self.queue.get(0)\n if task_result != 'end_of_list':\n link = network.get_filename_from_url(self.pdfs_links[self.pdf_index])\n self.append_to_text_area(\"(%d/%d) Verificando '%s'.\\n\" % (self.pdf_index + 1, len(self.pdfs_links), link), log=True)\n if task_result[0]:\n self.append_to_text_area(\">>> '\" + ', '.join(task_result[0]) + \"' encontrado em '\" + str(task_result[1]) + \"'\\n\", tag='success', log=True)\n self.pdf_index += 1\n\n PdfStringSearcherTask(self.queue, self.pdf_index, self.pdfs_links, self.expressions).start()\n self.master.after(100, self.process_queue)\n else:\n self.end_search()\n except Queue.Empty:\n self.master.after(100, self.process_queue)",
"def lsf(self, *args, **kwargs):\n # Optional name #\n name = kwargs.get('name')\n # Get extra optional keyword parameters #\n queue = kwargs.pop('queue') if 'queue' in kwargs else None\n # Call the user defined function #\n cmd_dict = self.function(*args, **kwargs)\n cmd_dict['arguments'] = [str(a) for a in cmd_dict['arguments']]\n # Compose the command #\n bsub_cmd = [\"bsub\", \"-o\", \"/dev/null\", \"-e\", \"/dev/null\", \"-K\", \"-r\"]\n if queue: bsub_cmd += ['-q', queue]\n cmd_dict[\"arguments\"] = bsub_cmd + cmd_dict[\"arguments\"]\n # Start a process #\n proc = start_process(cmd_dict['arguments'])\n # Write the standard in #\n if 'stdin' in cmd_dict:\n proc.stdin.write(cmd_dict[\"stdin\"])\n proc.stdin.close()\n # The FutureLSF object takes it from here #\n future = Future(proc, cmd_dict, name)\n # Let's keep a reference of it #\n PARRALEL_JOBS.append(future)\n # Hand it back to the user #\n return future",
"def _queue_analysis(self):",
"def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M apirani@med.umich.edu\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job_joyce.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M apirani@med.umich.edu\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job_joyce.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)",
"def __init__(self, inp_queue, out_queue, error_queue):\n super(QLearningRecorrerWorker, self).__init__()\n self._inp_queue = inp_queue\n self._out_queue = out_queue\n self._error_queue = error_queue\n self._stoprequest = multiprocessing.Event()\n self._pauserequest = multiprocessing.Event()\n self.name = \"QLearningRecorrerWorker\"\n self.input_data = None",
"def shipper_process(config, my_name, my_data, full_address, which_untrusted):\n\n which_machine = full_address\n my_capabilities = my_data['capabilities']\n my_folder = worker_folder(my_name)\n\n # ignore keyboard interrupts in the shipper processes\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\n counter = 0\n while True:\n try:\n my_job = get_job(config, my_name, which_machine, my_capabilities, which_untrusted)\n if not my_job == \"\":\n counter = 0\n grade_queue_file(\n config, my_name, which_machine, which_untrusted, os.path.join(my_folder, my_job)\n )\n continue\n else:\n if counter == 0 or counter >= 10:\n # do not log this message, only print it to console when manual testing &\n # debugging\n print(\"{0} {1}: no available job\".format(my_name, which_untrusted))\n counter = 0\n counter += 1\n time.sleep(1)\n\n except Exception as e:\n config.logger.log_stack_trace(traceback.format_exc())\n my_message = (\n f\"ERROR in get_job {which_machine} {which_untrusted} {str(e)}. \"\n \"For more details, see traces entry\"\n )\n config.logger.log_message(my_message)\n time.sleep(1)",
"async def prediction_worker(self):\n q = self.prediction_queue\n margin = 10 # avoid finishing before other searches starting.\n while self.running_simulation_num > 0 or margin > 0:\n if q.empty():\n if margin > 0:\n margin -= 1\n await asyncio.sleep(self.prediction_worker_sleep_sec)\n continue\n item_list = [q.get_nowait() for _ in range(q.qsize())]\n # logger.debug(f\"predicting {len(item_list)} items\")\n data = np.array([x.state for x in item_list])\n policy_ary, value_ary = self.model.predict(data)\n for p, v, item in zip(policy_ary, value_ary, item_list):\n item.future.set_result((p, v))",
"def processSubmission(faFname, genome, pam, bedFname, batchBase, batchId, queue):\n if doEffScoring and not cpf1Mode:\n queue.startStep(batchId, \"effScores\", \"Calculating guide efficiency scores\")\n createBatchEffScoreTable(batchId)\n\n if genome==\"noGenome\":\n # skip off-target search\n if cpf1Mode:\n errAbort(\"Sorry, no efficiency score has been published yet for Cpf1.\")\n open(bedFname, \"w\") # create a 0-byte file to signal job completion\n queue.startStep(batchId, \"done\", \"Job completed\")\n return\n\n if useBowtie:\n findOfftargetsBowtie(queue, batchId, batchBase, faFname, genome, pam, bedFname)\n else:\n findOfftargetsBwa(queue, batchId, batchBase, faFname, genome, pam, bedFname)\n\n return bedFname",
"def worker (func, inque, outque):\n for index, item in iter(inque.get, '__STOP__'):\n # inque.get is called until it returns the sentinel\n output = func(item)\n outque.put((index, output))\n inque.task_done()\n #print 'died'",
"def process(f,d_args,params=[],nproc=2):\n\t\n\tt=time()\n\tmn=mp.Manager()\n\tsys_q=mn.Queue()\n\terr_q=mn.Queue()\n\targ_q=mn.Queue()\n\tlock=mn.Lock()\n\tprint('[ %.4f ] init' %(time()-t))\n\n\tfor i,part in enumerate(splitter(d_args,mparts=nproc)):\n\t\targ_q.put(tuple([i,t,part+params]))\n\tprint('[ %.4f ] arg_q formed' %(time()-t))\n\n\tjobs=[mp.Process( target=wrapper, args=(f,arg_q,sys_q,err_q,lock))\\\n\t\tfor i in range(nproc)]\n\n\tprint('[ %.4f ] submiting jobs' %(time()-t))\n\tr=submit(jobs,sys_q,err_q,nproc,t)\n\n\tprint('[ %.4f ] completed' %(time()-t))\n\treturn r",
"def predict_from_queue(self):\n# features = self.get_input_features([\"据此,订约方同意终止认购协议,而公司及认购方概无责任根据认购协议分別发行及认购股可换股债券。\"]*2)\n for i in self.estimator.predict(input_fn=self.queued_predict_input_fn,\n checkpoint_path=self.config[\"init_checkpoint\"]):\n# if self.verbose:\n# print('Putting in output queue')\n print(i)\n print('Putting in output queue')\n print(\"===========\")\n self.output_queue.put(i)",
"def compute_ref_mp(self, outdir, inrefparam=CURefPy.InputRefparam(), savescaled=True, savemoveout=True, \\\n verbose=False, subsize=1000, deleteref=True, deletepost=True, nprocess=None):\n print '================================== Receiver Function Analysis ======================================'\n print 'Preparing data for multiprocessing'\n refLst=[]\n for staid in self.waveforms.list():\n netcode, stacode=staid.split('.')\n print 'Station: '+staid\n stla, elev, stlo=self.waveforms[staid].coordinates.values()\n evnumb=0\n outsta=outdir+'/'+staid\n if not os.path.isdir(outsta): os.makedirs(outsta)\n for event in self.events:\n evnumb+=1\n evid='E%05d' %evnumb\n tag='body_ev_%05d' %evnumb\n try: st=self.waveforms[staid][tag]\n except KeyError: continue\n phase=st[0].stats.asdf.labels[0]\n if inrefparam.phase != '' and inrefparam.phase != phase: continue\n evlo=event.origins[0].longitude; evla=event.origins[0].latitude; evdp=event.origins[0].depth\n otime=event.origins[0].time\n for tr in st:\n tr.stats.sac=obspy.core.util.attribdict.AttribDict()\n tr.stats.sac['evlo']=evlo; tr.stats.sac['evla']=evla; tr.stats.sac['evdp']=evdp\n tr.stats.sac['stlo']=stlo; tr.stats.sac['stla']=stla; tr.stats.sac['kuser0']=evid; tr.stats.sac['kuser1']=phase\n if verbose:\n magnitude=event.magnitudes[0].mag; Mtype=event.magnitudes[0].magnitude_type\n event_descrip=event.event_descriptions[0].text+', '+event.event_descriptions[0].type\n print 'Event ' + str(evnumb)+' : '+event_descrip+', '+Mtype+' = '+str(magnitude) \n refTr=CURefPy.RFTrace()\n refTr.get_data(Ztr=st.select(component='Z')[0], RTtr=st.select(component=inrefparam.reftype)[0],\n tbeg=inrefparam.tbeg, tend=inrefparam.tend)\n refLst.append( refTr )\n print 'Start multiprocessing receiver function analysis !'\n if len(refLst) > subsize:\n Nsub = int(len(refLst)/subsize)\n for isub in xrange(Nsub):\n print 'Subset:', isub,'in',Nsub,'sets'\n cstream=refLst[isub*subsize:(isub+1)*subsize]\n REF = partial(ref4mp, outdir=outsta, inrefparam=inrefparam)\n pool = multiprocessing.Pool(processes=nprocess)\n pool.map(AFTAN, cstream) #make our results with a map call\n pool.close() #we are not adding any more processes\n pool.join() #tell it to wait until all threads are done before going on\n cstream=refLst[(isub+1)*subsize:]\n REF = partial(ref4mp, outdir=outsta, inrefparam=inrefparam)\n pool = multiprocessing.Pool(processes=nprocess)\n pool.map(REF, cstream) #make our results with a map call\n pool.close() #we are not adding any more processes\n pool.join() #tell it to wait until all threads are done before going on\n else:\n REF = partial(ref4mp, outdir=outsta, inrefparam=inrefparam)\n pool = multiprocessing.Pool(processes=nprocess)\n pool.map(REF, refLst) #make our results with a map call\n pool.close() #we are not adding any more processes\n pool.join() #tell it to wait until all threads are done before going on\n print 'End of multiprocessing receiver function analysis !'\n print 'Start reading receiver function data !'\n for staid in self.waveforms.list():\n netcode, stacode=staid.split('.')\n print 'Station: '+staid\n stla, elev, stlo=self.waveforms[staid].coordinates.values()\n outsta=outdir+'/'+staid\n evnumb=0\n for event in self.events:\n evnumb+=1\n evid='E%05d' %evnumb\n sacfname=outsta+'/'+evid+'.sac'; postfname = outsta+'/'+evid+'.post.npz'\n if not os.path.isfile(sacfname): continue\n evlo=event.origins[0].longitude; evla=event.origins[0].latitude; evdp=event.origins[0].depth\n otime=event.origins[0].time\n refTr=obspy.read(sacfname)[0]\n ref_header = ref_header_default.copy()\n ref_header['otime'] = str(otime)\n ref_header['network'] = netcode\n ref_header['station'] = stacode\n ref_header['stla'] = stla\n ref_header['stlo'] = stlo\n ref_header['evla'] = evla\n ref_header['evlo'] = evlo\n ref_header['evdp'] = evdp\n ref_header['dist'] = refTr.stats.sac['dist']\n ref_header['az'] = refTr.stats.sac['az']\n ref_header['baz'] = refTr.stats.sac['baz']\n ref_header['delta'] = refTr.stats.delta\n ref_header['npts'] = refTr.stats.npts\n ref_header['b'] = refTr.stats.sac['b']\n ref_header['e'] = refTr.stats.sac['e']\n ref_header['arrival'] = refTr.stats.sac['user5']\n ref_header['phase'] = refTr.stats.sac['kuser1']\n ref_header['tbeg'] = inrefparam.tbeg\n ref_header['tend'] = inrefparam.tend\n ref_header['hslowness'] = refTr.stats.sac['user4']\n ref_header['ghw'] = inrefparam.f0\n ref_header['VR'] = refTr.stats.sac['user2']\n staid_aux=netcode+'_'+stacode+'_'+phase+'/'+evid\n self.add_auxiliary_data(data=refTr.data, data_type='Ref'+inrefparam.reftype, path=staid_aux, parameters=ref_header)\n if deleteref: os.remove(sacfname)\n if not os.path.isfile(postfname): continue\n ref_header['moveout'] = 1\n postArr = np.load(postfname)\n ampC=postArr['arr_0']; ampTC=postArr['arr_1']; strback=postArr['arr_2']\n if deletepost: os.remove(postfname)\n if savescaled: self.add_auxiliary_data(data=ampC, data_type='Ref'+inrefparam.reftype+'scaled', path=staid_aux, parameters=ref_header)\n if savemoveout: self.add_auxiliary_data(data=ampTC, data_type='Ref'+inrefparam.reftype+'moveout', path=staid_aux, parameters=ref_header)\n self.add_auxiliary_data(data=strback, data_type='Ref'+inrefparam.reftype+'streback', path=staid_aux, parameters=ref_header)\n if deleteref*deletepost: shutil.rmtree(outsta)\n print 'End reading receiver function data !' \n return",
"def predict(self, job, current_time, list_running_jobs):\n\t\tprint(\"Do it\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Minimize/maximize (serially) variables in model. The model's existing upper/lower bounds are preserved, and the existing objective function will be restored after FVA completes. If variables is None, use all variables in the model. If cache is given, tuples of extrema will be taken from the cache instead of recalculated, wherever possible. If n_procs is >1, multiple processes will be spawned to parallelize the FVA process. This may not be faster, if the total number of calculations per process is not high.
|
def do_fva(model, variables=None, guess=None,
n_procs=default_n_parallel_procs, cache={},
check_failures=True, log_interval=100, log_filename=None):
if log_filename:
logger = logging.getLogger(log_filename)
logger.setLevel(logging.INFO)
fh = logging.FileHandler(filename=log_filename + '.txt')
logger.addHandler(fh)
fh.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
if variables is None:
variables = model.variables
new_variables = []
results = {}
for v in variables:
if v in cache:
results[v] = cache[v]
else:
new_variables.append(v)
logging.info('Total FVA results requested: %d' % len(variables))
logging.info('Found results for old variables: %d' % len(results))
logging.info('Analyzing new variables: %d' % len(new_variables))
if not new_variables:
return results
original_objective = model.objective_function
try:
if n_procs > 1:
# I think that manually setting up a bunch of worker
# processes with information about the model may be faster
# than using a Pool and providing the model as an argument
# each time, though there may be a cleaner way to do this
# using the tools in the multiprocessing module.
argument_queue = mp.Queue()
result_queue = mp.Queue()
processes = [mp.Process(target=_fva_worker,
args=(model,
argument_queue, result_queue,
guess)) for i in xrange(n_procs)]
for v in new_variables:
argument_queue.put(v)
# Add termination signals
for p in processes:
argument_queue.put(None)
for p in processes:
p.start()
results = {}
# We won't get them back in order, but we know how many
# there will be:
counter = 0
counter_max = len(new_variables)
temp_results = {}
for v in new_variables:
result = result_queue.get()
result_key = result.keys()[0]
results.update(result)
if log_filename:
temp_results.update(result)
if (counter+1) % log_interval == 0:
temp_filename = (log_filename +
'_%d.pickle' % counter)
with open(temp_filename,'w') as f:
pickle.dump(temp_results, f)
logger.info('(%d/%d) ' % (counter+1, counter_max) +
', '.join(temp_results.keys()))
temp_results = {}
counter += 1
for p in processes:
p.join()
failed_variables = [v for v, result in results.iteritems()
if result == 'failure']
if failed_variables and check_failures:
raise nlcm.OptimizationFailure(
'FVA encountered %d optimization failures (%s, ...)' %
(len(failed_variables), failed_variables[0])
)
else:
for var in new_variables:
try:
extrema = single_fva(model, var, guess)
results[var] = tuple(extrema)
except nlcm.OptimizationFailure:
if check_failures:
raise nlcm.OptimizationFailure('FVA failed checking %s' % var)
else:
results[var] = 'failure'
finally:
model.objective_function = original_objective
model.compile()
return results
|
[
"def local_search(self, max_variables):\n assignments = self.assignments.copy()\n\n best_var = None\n best_improvement = 0\n\n for _ in range (0, max_variables):\n for var in range(0, self.cnf.num_variables):\n self.assignments[:,var] = 1-self.assignments[:,var]\n score, _, __, ___ = self.cnf.evaluate(assignments)\n improvement = score - self.get_score()\n if improvement > 0 and improvement > best_improvement:\n best_improvement = improvement\n best_var = var\n\n self.assignments[:,var] = 1-self.assignments[:,var]\n\n if best_improvement > 0:\n self.assignments[:,best_var] = 1-self.assignments[:,best_var]\n\n self.assignments = assignments",
"def compute_variables(self, experiments):\n for experiment in experiments:\n # Convert all lists to strings\n DictUtils.lists_to_strings(experiment)\n # Build initial version of a forward index (variables -> their dependencies)\n self.fwd_index = {}\n for variable in experiment:\n self.update_index(experiment, variable)\n # iteratively compute variables\n while len(self.fwd_index) > 0:\n computable_vars = self.get_computable_variables()\n # print(\"Computable vars: %s\" % (str(computable_vars)))\n if len(computable_vars) == 0:\n self.report_unsatisfied_deps(experiment)\n exit(1)\n # Compute variables. We are either done with a variable or\n # this variable has nested references and we need to continue\n # computing it.\n computed, partially_computed = self.compute_current_variables(experiment, computable_vars)\n # print(\"Computed vars: %s\" % (str(computed)))\n # print(\"Partially computed vars: %s\" % (str(partially_computed)))\n # Remove computed vars from index and update dependencies of\n # remaining variables\n for computed_var in computed:\n self.fwd_index.pop(computed_var)\n for var in self.fwd_index:\n self.fwd_index[var]['udeps'].difference_update(set(computed))\n # Update partially computed variables - these are variables\n # that have nested references.\n for var in partially_computed:\n self.update_index(experiment, var)\n deps = self.fwd_index[var]['udeps'].copy()\n for dep in deps:\n if dep not in self.fwd_index:\n self.fwd_index[var]['udeps'].remove(dep)\n # exit(0)\n\n # We need to remove all internal temp variables\n # We need to remove all internal temp variables.\n # In P2, keys() makes a copy. In P3 it returns an iterator -> this\n # 'dictionary changed size during iteration' error. So, making copy\n for name in list(experiment.keys()):\n if name.startswith('__dlbs_'):\n experiment.pop(name)",
"def evaluateModel(model, variables):\r\n return eval(model.replace('variable_', ''), globals(), variables)",
"def fit(model, variables=None, evidence=None, verbose=3):\n if verbose>=3: print('[BNLEARN][inference] Variable Elimination..')\n if isinstance(model, dict):\n model = model['model']\n\n model_infer = VariableElimination(model)\n # Computing the probability of Wet Grass given Rain.\n q = model_infer.query(variables=variables, evidence=evidence)\n print(q)\n # for varname in variables: print(q[varname])\n return(q)",
"def optimize_core(\n self,\n z0: torch.Tensor,\n ado_ids: typing.List[str],\n tag: str = mantrap.constants.TAG_OPTIMIZATION,\n max_cpu_time: float = mantrap.constants.IPOPT_MAX_CPU_TIME_DEFAULT,\n approx_jacobian: bool = False,\n **solver_kwargs\n ) -> typing.Tuple[torch.Tensor, typing.Dict[str, torch.Tensor]]:\n # Clean up & detaching graph for deleting previous gradients.\n self._env.detach()\n\n # Build constraint boundary values (optimisation variables + constraints). The number of constraints\n # depends on the filter, that was selected, as it (might) result in a few number of other agents in\n # the \"optimization scene\", especially it might lead to zero agents (so an interactively) unconstrained\n # optimization.\n lb, ub = self.optimization_variable_bounds()\n cl, cu = list(), list()\n for name, module in self.module_dict.items():\n lower, upper = module.constraint_boundaries(ado_ids=ado_ids)\n cl += list(lower)\n cu += list(upper)\n\n # Formulate optimization problem as in standardized IPOPT format.\n z0_flat = z0.flatten().numpy().tolist()\n\n # Create ipopt problem with specific tag.\n problem = IPOPTProblem(self, ado_ids=ado_ids, tag=tag)\n\n # Use definition above to create IPOPT problem.\n nlp = ipopt.problem(n=len(z0_flat), m=len(cl), problem_obj=problem, lb=lb, ub=ub, cl=cl, cu=cu)\n nlp.addOption(\"max_cpu_time\", max_cpu_time)\n nlp.addOption(\"tol\", mantrap.constants.IPOPT_OPTIMALITY_TOLERANCE) # tolerance for optimality error\n # nlp.addOption(\"acceptable_tol\", mantrap.constants.IPOPT_OPTIMALITY_TOLERANCE)\n\n # An adaptive strategy might increase IPOPT internal computational effort but will decrease the number of\n # function evaluations which clearly is the bottleneck of the algorithm (see IPOPT documentation).\n nlp.addOption(\"mu_strategy\", \"adaptive\")\n\n # According to the documentation the `mehrotra-algorithm` improves performance in case of strictly\n # convex problem formulation. Although the prediction model generally is not convex, it turned out\n # that it can approximated as convex (with the interactive cost being the only non-convex module).\n nlp.addOption(\"mehrotra_algorithm\", \"yes\")\n\n if approx_jacobian:\n nlp.addOption(\"jacobian_approximation\", mantrap.constants.IPOPT_AUTOMATIC_JACOBIAN)\n # Due to the generalized automatic differentiation through large graphs the computational bottleneck\n # of the underlying approach clearly is computing computing derivatives. While calculating the Hessian\n # theoretically would be possible, it would introduce the need of a huge amount of additional computational\n # effort (squared size of gradient !), therefore it will be approximated automatically when needed.\n nlp.addOption(\"hessian_approximation\", mantrap.constants.IPOPT_AUTOMATIC_HESSIAN)\n\n # The larger the `print_level` value, the more print output IPOPT will provide.\n nlp.addOption(\"print_level\", 5 if self.logger.is_logging else 0)\n if self.logger.is_logging:\n nlp.addOption(\"print_timing_statistics\", \"yes\")\n # nlp.addOption(\"derivative_test\", \"first-order\")\n # nlp.addOption(\"derivative_test_tol\", 1e-4)\n\n # Solve optimization problem for \"optimal\" ego trajectory `x_optimized`.\n z_opt, info = nlp.solve(z0_flat)\n nlp.close()\n\n # Return solution as torch tensor.\n z2_opt = torch.from_numpy(z_opt).view(-1, 2)\n return z2_opt, self.logger.log",
"def run_optimizer():\n\n # Build the model\n prob = om.Problem()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n prob.model.add_subsystem('myfunc', objective_function())\n\n # Optimizer\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'COBYLA'#'SLSQP'\n\n # Variables\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n\n # Output, Connections and Design variables\n indeps.add_output(key, listval[0])\n prob.model.connect('indeps.'+key, 'myfunc.'+key)\n prob.model.add_design_var('indeps.'+key, lower=minval, upper=maxval)\n\n\n # Objective function\n prob.model.add_objective('myfunc.f_xy')\n\n #passnb = 440\n # define the component whose output will be constrained\n prob.model.add_subsystem('const', constraint())\n prob.model.add_constraint('const.passengers', upper=450, lower=440)\n\n # Run\n prob.setup()\n prob.run_driver()\n\n\n # Results (TODO: improve)\n log.info('=========================================')\n log.info('min = ' + str(prob['myfunc.f_xy']))\n \n iterations = arange(0,follower[\"Counter\"])\n\n plot(iterations, follower[\"optimVar\"])\n show()\n\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' = ' + str(prob['indeps.'+key]))\n\n log.info('Variable history')\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' => ' + str(listval))\n\n log.info('=========================================')",
"def map_query(\n self,\n variables=None,\n evidence=None,\n virtual_evidence=None,\n elimination_order=\"MinFill\",\n show_progress=True,\n ):\n variables = [] if variables is None else variables\n evidence = evidence if evidence is not None else dict()\n common_vars = set(evidence if evidence is not None else []).intersection(\n variables\n )\n if common_vars:\n raise ValueError(\n f\"Can't have the same variables in both `variables` and `evidence`. Found in both: {common_vars}\"\n )\n\n if isinstance(self.model, BayesianNetwork) and (virtual_evidence is not None):\n self._virtual_evidence(virtual_evidence)\n virt_evidence = {\"__\" + cpd.variables[0]: 0 for cpd in virtual_evidence}\n return self.map_query(\n variables=variables,\n evidence={**evidence, **virt_evidence},\n virtual_evidence=None,\n elimination_order=elimination_order,\n show_progress=show_progress,\n )\n\n if isinstance(self.model, BayesianNetwork):\n model_reduced, evidence = self._prune_bayesian_model(variables, evidence)\n else:\n model_reduced = self.model\n\n reduced_ve = VariableElimination(model_reduced)\n reduced_ve._initialize_structures()\n\n final_distribution = reduced_ve._variable_elimination(\n variables=variables,\n operation=\"marginalize\",\n evidence=evidence,\n elimination_order=elimination_order,\n joint=True,\n show_progress=show_progress,\n )\n argmax = compat_fns.argmax(final_distribution.values)\n assignment = final_distribution.assignment([argmax])[0]\n\n map_query_results = {}\n for var_assignment in assignment:\n var, value = var_assignment\n map_query_results[var] = value\n\n return map_query_results",
"def make_feasible(self, model):\n\n for idx, (name, param) in enumerate(model.named_parameters()):\n param.copy_(self.prox(param))",
"def pso(func, bounds, swarm_size=10, inertia=0.5, pa=0.8, ga=0.9, \n max_vnorm=10, num_iters=100, verbose=False, func_name=None):\n bounds = np.array(bounds)\n assert np.all(bounds[:,0] < bounds[:,1]) # each boundaries have to satisfy this condition\n dim = len(bounds)\n X = np.random.rand(swarm_size, dim) # range:0~1, domain:(swarm_size,dim)\n print('## Optimize:',func_name)\n\n def clip_by_norm(x, max_norm):\n norm = np.linalg.norm(x)\n return x if norm <=max_norm else x * max_norm / norm\n\n # --- step 1 : Initialize all particle randomly in the search-space\n particles = X * (bounds[:,1]-bounds[:,0]) + bounds[:,0]\n velocities = X * (bounds[:,1]-bounds[:,0]) + bounds[:,0]\n personal_bests = np.copy(particles)\n personal_best_fitness = [np.inf for p in particles] # np.inf\n # global_best_idx = -1 # np.inf\n # global_best = [np.inf, np.inf] # np.inf or particles[global_best_idx]\n # global_best_fitness = np.inf # func(global_best)\n global_best_idx = np.argmin(personal_best_fitness)\n global_best = personal_bests[global_best_idx]\n global_best_fitness = func(global_best)\n history = {'particles':[], \n 'global_best_fitness':[], \n 'global_best':[[np.inf, np.inf] for i in range(num_iters)],\n 'obj_func': func_name,}\n\n # --- step 2 : Iteration starts\n for i in range(num_iters):\n history['particles'].append(particles)\n history['global_best_fitness'].append(global_best_fitness)\n # history['global_best'].append(global_best) # seems not working\n history['global_best'][i][0] = global_best[0]\n history['global_best'][i][1] = global_best[1]\n\n if verbose: print('iter# {}:'.format(i), end='')\n # --- step 3 : Evaluate current swarm\n # personal best\n for p_i in range(swarm_size):\n fitness = func(particles[p_i])\n if fitness < personal_best_fitness[p_i]:\n personal_bests[p_i] = particles[p_i] # particle\n personal_best_fitness[p_i] = fitness # its fitness\n \n # global best\n if np.min(personal_best_fitness) < global_best_fitness:\n global_best_idx = np.argmin(personal_best_fitness)\n global_best = personal_bests[global_best_idx]\n global_best_fitness = func(global_best)\n\n # --- step 4 : Calculate the acceleration and momentum\n m = inertia * velocities\n acc_local = pa * np.random.rand() * (personal_bests - particles)\n acc_global = ga * np.random.rand() * (global_best - particles)\n\n # --- step 5 : Update the velocities\n velocities = m + acc_local + acc_global\n velocities = clip_by_norm(velocities, max_vnorm)\n\n # --- step 6 : Update the position of particles\n particles = particles + velocities\n\n # logging\n if verbose:\n print(' Fitness:{:.5f}, Position:{}, Velocity:{}'.format(global_best_fitness, global_best, np.linalg.norm(velocities)))\n\n return history",
"def run(self):\n import scipy.optimize\n\n logger = logging.getLogger(\"optimize\")\n\n self.timing[\"start\"] = time.time()\n logger.info(\"{0}: Starting optimization jobs...\".format(self.job))\n\n # optimization methods work best with number around 1, here we\n # normalize the optimization variables and save the multiplier to be\n # used when the function gets called by the optimizer.\n xfac = []\n for ival in self.idata:\n mag = eval(\"1.e\" + \"{0:12.6E}\".format(ival).split(\"E\")[1])\n xfac.append(mag)\n continue\n xfac = np.array(xfac)\n x0 = self.idata / xfac\n\n if self.bounds is not None:\n # user has specified bounds on the parameters to be optimized. Here,\n # we convert the bounds to inequality constraints (for cobyla) and\n # normalized bounds (for brute).\n lcons, ucons = [], []\n normalized_bounds = []\n for ibnd, bound in enumerate(self.bounds):\n lbnd, ubnd = bound\n lcons.append(lambda z, idx=ibnd, bnd=lbnd: z[idx] - bnd / xfac[idx])\n ucons.append(lambda z, idx=ibnd, bnd=ubnd: bnd / xfac[idx] - z[idx])\n normalized_bounds.append((lbnd / xfac[ibnd], ubnd / xfac[ibnd]))\n continue\n cons = lcons + ucons\n\n args = (\n self.func,\n self.funcargs,\n self.rootd,\n self.halt_on_err,\n self.job,\n self.names,\n self.descriptors,\n self.tabular,\n xfac,\n )\n\n if self.dryrun:\n # do a dry run of the function\n err = run_job(x0, *args)\n if err == np.nan:\n s = \"Optimization dry run failed\"\n logger.error(s)\n else:\n s = \"Optimization dry run successful\"\n logger.info(s)\n if environ.notebook:\n print(s)\n self.dryrun_error = err\n return\n\n if self.method == \"simplex\":\n xopt = scipy.optimize.fmin(\n run_job,\n x0,\n xtol=self.tolerance,\n ftol=self.tolerance,\n maxiter=self.maxiter,\n args=args,\n disp=0,\n )\n\n elif self.method == \"powell\":\n xopt = scipy.optimize.fmin_powell(\n run_job,\n x0,\n xtol=self.tolerance,\n ftol=self.tolerance,\n maxiter=self.maxiter,\n args=args,\n disp=0,\n )\n\n elif self.method == \"cobyla\":\n xopt = scipy.optimize.fmin_cobyla(\n run_job, x0, cons, consargs=(), args=args, disp=0\n )\n\n elif self.method == \"brute\":\n xopt = scipy.optimize.brute(\n run_job, normalized_bounds, args=args, Ns=self.Ns, disp=0, finish=None\n )\n\n self.xopt = xopt * xfac\n\n self.timing[\"end\"] = time.time()\n\n logger.info(\"\\nOptimization jobs complete\")\n\n self.finish()\n\n return",
"def optimize(self, enc):\n\n # a dummy model (everything is deselected)\n model = [v for v in range(enc.nv)]\n all_vars = set()\n\n # MaxSAT formula to work with\n formula = WCNF()\n\n # hard clauses\n for cl in enc.clauses:\n formula.append(cl)\n\n # we have to introduce selector variables (because of hitman)\n top_id = enc.nv\n\n # soft clauses (unweighted) comprise p and n literals\n for j in range(1, self.nof_terms + 1):\n for r in range(1, self.nof_feats + 1):\n formula.append([self.pvar(j, r)], 1)\n formula.append([self.nvar(j, r)], 1)\n all_vars.add(self.pvar(j, r))\n all_vars.add(self.nvar(j, r))\n\n if self.options.approx:\n hitman = LBX(formula, use_cld=self.options.use_cld,\n solver_name=self.options.solver)\n\n hses = []\n for i, hs in enumerate(hitman.enumerate()):\n hitman.block(hs)\n hses.append(hs)\n\n if i + 1 == self.options.approx:\n break\n\n hs = list(map(lambda v: -formula.soft[v - 1][0], min(hses, key=lambda x: len(x))))\n hitman.delete()\n else:\n hitman = RC2(formula, solver=self.options.solver, adapt=True,\n exhaust=True, incr=False, minz=False, trim=self.options.trim)\n\n hs = list(filter(lambda v: v < 0 and -v in all_vars, hitman.compute()))\n hitman.delete()\n\n # filling the model with the right values\n for e in hs:\n model[-e - 1] = -1\n\n return model",
"def generate_solvers(constraints, variables='x', nvars=None, locals=None):\n _constraints = constraints_parser(constraints, \\\n variables=variables, nvars=nvars)\n\n # default is globals with numpy and math imported\n globals = {}\n code = \"\"\"from math import *; from numpy import *;\"\"\"\n code += \"\"\"from numpy import mean as average;\"\"\" # use np.mean not average\n code += \"\"\"from mystic.math.measures import spread, variance, mean;\"\"\"\n code += \"\"\"from mystic.math.measures import impose_spread, impose_mean;\"\"\"\n code += \"\"\"from mystic.math.measures import impose_sum, impose_product;\"\"\"\n code += \"\"\"from mystic.math.measures import impose_variance;\"\"\"\n code = compile(code, '<string>', 'exec')\n exec code in globals\n if locals is None: locals = {}\n globals.update(locals) #XXX: allow this?\n \n # build an empty local scope to exec the code and build the functions\n results = {'solver':[]}\n for func in _constraints:\n fid = str(id(func))\n fdict = {'name':fid, 'equation':func, 'container':'solver'}\n # build the condition function\n code = \"\"\"\ndef %(container)s_%(name)s(x):\n '''%(equation)s'''\n exec('%(equation)s')\n return x\n%(container)s_%(name)s.__name__ = '%(container)s'\n\"\"\" % fdict #XXX: better, check if constraint satisfied... if not, then solve\n #XXX: should locals just be the above dict of functions, or should we...\n # add the condition to container then delete the condition\n code += \"\"\"\n%(container)s.append(%(container)s_%(name)s)\ndel %(container)s_%(name)s\"\"\" % fdict\n code = compile(code, '<string>', 'exec')\n exec code in globals, results\n\n #XXX: what's best form to return? will couple these with ctypes ?\n return tuple(results['solver'])",
"def make_feasible(model, proxes):\n for param, prox in zip(model.parameters(), proxes):\n if prox is not None:\n param.copy_(prox(param.unsqueeze(0)).squeeze(0))",
"def vmodel(\n nlag,\n step,\n azi,\n nug,\n nst,\n tstr1,\n c1,\n azi1,\n rmaj1,\n rmin1,\n tstr2=1,\n c2=0,\n azi2=0,\n rmaj2=0,\n rmin2=0,\n):\n lag = []\n gamma = []\n\n with open(\"vmodel.par\", \"w\") as f:\n f.write(\" \\n\")\n f.write(\" Parameters for VMODEL \\n\")\n f.write(\" ********************* \\n\")\n f.write(\" \\n\")\n f.write(\"START OF PARAMETERS: \\n\")\n f.write(\"vmodel.var -file for variogram output \\n\")\n f.write(\"1 \" + str(nlag) + \" -number of directions and lags \\n\")\n f.write(str(azi) + \" 0.0 \" + str(step) + \" -azm, dip, lag distance \\n\")\n f.write(str(nst) + \" \" + str(nug) + \" -nst, nugget effect \\n\")\n f.write(str(tstr1) + \" \" + str(c1) + \" \" + str(azi1) + \" 0.0 0.0 0.0 -it,cc,ang1,ang2,ang3 \\n\")\n f.write(str(rmaj1) + \" \" + str(rmin1) + \" 0.0 -a_hmax, a_hmin, a_vert \\n\")\n f.write(str(tstr2) + \" \" + str(c2) + \" \" + str(azi2) + \" 0.0 0.0 0.0 -it,cc,ang1,ang2,ang3 \\n\")\n f.write(str(rmaj2) + \" \" + str(rmin2) + \" 0.0 -a_hmax, a_hmin, a_vert \\n\")\n\n os.system(\"vmodel.exe vmodel.par\")\n\n with open(\"vmodel.var\") as f:\n next(f) # skip the first line\n\n for line in f:\n _, l, g, *_ = line.split()\n lag.append(float(l))\n gamma.append(float(g))\n\n return lag, gamma",
"def prepare_grid(evolution_model='mist',\n variables=['log_L', 'log_Teff', 'log_g', 'M_H'],\n parameters=['mass_init', 'M_H_init', 'phase'],\n set_default=True, \n return_all_variables=False,\n **kwargs):\n \n files, fehs = get_files(evolution_model)\n \n grid_pars = []\n grid_vars = []\n \n fehlim = kwargs.pop('M_H_lim', (-np.inf, np.inf))\n \n #-- get list of all availabel variables but remove the parameters\n # and make sure that the variables are the first in the list\n all_variables = fits.getdata(files[0]).dtype.names\n remove = np.hstack([parameters, variables])\n all_variables = np.delete(all_variables, np.where(np.in1d(all_variables, remove)))\n \n if return_all_variables:\n variables = np.hstack([variables, all_variables])\n \n \n for filename, z in zip(files, fehs):\n \n #-- skip the file if it is out of metalicity range\n if z < fehlim[0] or z > fehlim[1]: continue\n \n data = fits.getdata(filename)\n \n keep = np.ones(len(data),bool)\n \n #-- run over all provided kwargs to check if any limitations on the grid\n # are requested, and apply them. Limits can be given for any parameter\n # in the grid.\n for key in kwargs:\n if not '_lim' in key: continue\n \n low, high = kwargs[key][0], kwargs[key][1]\n key = key.replace('_lim', '')\n in_range = (low<=data[key]) & (data[key]<=high)\n \n keep = keep & in_range\n data = data[keep]\n \n #-- only keep the parameters and variables that are needed to reduce memory\n pars_ = np.vstack([data[name] for name in parameters])\n vars_ = np.vstack([data[name] for name in variables])\n \n if sum(keep):\n grid_pars.append(pars_)\n grid_vars.append(vars_)\n \n grid_pars = np.hstack(grid_pars)\n grid_vars = np.hstack(grid_vars)\n \n axis_values, pixelgrid = interpol.create_pixeltypegrid(grid_pars, grid_vars)\n \n if set_default:\n #-- store the prepared pixel grid to be used by interpolation functions\n global defaults\n defaults = (axis_values, pixelgrid, variables)\n \n return axis_values, pixelgrid, variables",
"def compute_regularization_energy(self, I0_source, variables_from_forward_model=None,\n variables_from_optimizer=None):\n pass",
"def optimize(self):\r\n self.view2idx()\r\n x0 = np.hstack((self.camera_params.ravel(), self.points_3d.ravel()))\r\n print(len(self.camera_params.ravel()), len(self.points_3d.ravel()))\r\n fun(x0, self.n_cameras, self.n_points, self.camera_indices, self.point_indices, self.points_2d, self.K)\r\n A = bundle_adjustment_sparsity(self.n_cameras, self.n_points, self.camera_indices, self.point_indices)\r\n t0 = time.time()\r\n res = least_squares(fun, x0, jac_sparsity=A, verbose=2, x_scale='jac', ftol=1e-4, method='trf', xtol=1e-12,\r\n args=(self.n_cameras, self.n_points, self.camera_indices, self.point_indices,\r\n self.points_2d, self.K))\r\n t1 = time.time()\r\n logging.info(f\"Optimized {self.n_points} in {t1-t0} seconds.\")\r\n\r\n points_3d = res.x[self.n_cameras * 12:].reshape(self.n_points, 3)\r\n poses = res.x[:self.n_cameras * 12].reshape(self.n_cameras, 12)\r\n\r\n return poses, points_3d",
"def compute_current_variables(self, experiment, computable_variables):\n computed = []\n partially_computed = []\n for var in computable_variables:\n is_str = isinstance(experiment[var], Six.string_types)\n if not is_str:\n computed.append(var)\n continue\n\n if is_str and len(self.fwd_index[var]['deps']) > 0:\n for ref_var in self.fwd_index[var]['deps']:\n replace_pattern = \"${%s}\" % ref_var\n if ref_var in experiment:\n replace_value = ParamUtils.to_string(experiment[ref_var])\n elif ref_var in os.environ:\n replace_value = ParamUtils.to_string(os.environ[ref_var])\n else:\n msg = \"Cannot determine value of the parameter %s = %s because variable `%s` not found. \"\\\n \"Either this variable is not in the list of benchmark parameters, or this may happen \"\\\n \"if variable's name depends on other variable that's empty or set to an incorrect \"\\\n \"value. For instance, the name of ${${exp.framework}.docker.image} variable depends \"\\\n \"on ${exp.framework} value. If it's empty, the variable name becomes '.docker.image' \"\\\n \"what's wrong.\"\n raise LogicError(msg % (var, experiment[var], ref_var))\n experiment[var] = experiment[var].replace(replace_pattern, replace_value)\n\n # Search for computable components\n while True:\n idx = experiment[var].find('$(')\n if idx < 0:\n break\n end_idx = experiment[var].find(')$', idx+2)\n if end_idx < 0:\n raise ConfigurationError(\"Cannot find ')$' in %s. Variable cannot be computed\" % (experiment[var]))\n try:\n eval_res = eval(experiment[var][idx+2:end_idx])\n except NameError as err:\n logging.error(\"Cannot evaluate python expression: %s\", experiment[var][idx+2:end_idx])\n raise err\n logging.debug(\"\\\"%s\\\" -> \\\"%s\\\"\", experiment[var][idx+2:end_idx], str(eval_res))\n experiment[var] = experiment[var][:idx] + str(eval_res) + experiment[var][end_idx+2:]\n\n if self.fwd_index[var]['finalized'] is True:\n computed.append(var)\n self.cast_variable(experiment, var)\n self.check_variable_value(experiment, var)\n else:\n partially_computed.append(var)\n\n return computed, partially_computed",
"def optimize(method, force=False, niter=1, dimTags=[]):\n api_dimTags_, api_dimTags_n_ = _ivectorpair(dimTags)\n ierr = c_int()\n lib.gmshModelMeshOptimize(\n c_char_p(method.encode()),\n c_int(bool(force)),\n c_int(niter),\n api_dimTags_, api_dimTags_n_,\n byref(ierr))\n if ierr.value != 0:\n raise ValueError(\n \"gmshModelMeshOptimize returned non-zero error code: \",\n ierr.value)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A function that initialises a maze with a number of larger rooms, halls. A number of attempts to generate overlapping rooms in the maze are made. If the larger room is fully inside the maze, and all rooms from which it is created are nonflagged, the walls of all rooms, except walls leading out of the larger room, will be removed, and the rooms will be flagged.
|
def initialize(maze, randomizer, attempts = 20, max_width = None,
max_height = None):
max_width = max_width or maze.width // 3
max_height = max_height or maze.height // 3
def rooms(x, y, width, height):
"""Yields all rooms in the given hall.
"""
for i in range(width):
for j in range(height):
room_pos = (x + i, y + j)
if room_pos in maze:
yield room_pos
def walls(x, y, width, height):
"""Returns all walls surrounding a hall.
"""
def inside(wall):
if wall.room_pos[0] < x or wall.room_pos[0] >= x + width:
return False
if wall.room_pos[1] < y or wall.room_pos[1] >= y + height:
return False
return True
result = []
for i in range(width - 2): # Top
result.extend(wall
for wall in maze.walls((x + 1 + i, y))
if not inside(wall.back))
for i in range(height - 2): # Right
result.extend(wall
for wall in maze.walls((x + width - 1, y + 1 + i))
if not inside(wall.back))
for i in range(width - 2): # Bottom
result.extend(wall
for wall in maze.walls((x + 1 + width - 1 - i, y + height - 1))
if not inside(wall.back))
for i in range(height - 2): # Left
result.extend(wall
for wall in maze.walls((x, y + 1 + height - 1 - i))
if not inside(wall.back))
return result
while attempts:
attempts -= 1
# Randomize the room
width = randomizer(maze.width // 3) + 1
height = randomizer(maze.height // 3) + 1
x = randomizer(maze.width - width)
y = randomizer(maze.height - height)
# If any room inside the large room is not unknown, do nothing; keep a
# one-room margin
if any(not maze[room_pos].unknown
for room_pos in rooms(x - 1, y - 1, width + 2, height + 2)):
continue
# Open all internal walls of the hall
for room_pos in rooms(x, y, width, height):
for w in maze.walls(room_pos):
back_room_pos = w.back.room_pos
if back_room_pos[0] < x or back_room_pos[0] >= x + width:
continue
if back_room_pos[1] < y or back_room_pos[1] >= y + height:
continue
maze.set_door(room_pos, w, True)
maze[room_pos].flagged = True
# Open up some of the external walls of the hall
hall_walls = walls(x, y, width, height)
for wall in hall_walls:
if not wall.back in maze:
continue
if randomizer(len(hall_walls)) < 4:
maze.set_door(wall.room_pos, wall, True)
|
[
"def generate_rooms():\n total_rooms_cnt = random.randrange(min_rooms_cnt,max_rooms_cnt) # generate max number of rooms randomly\n for i in range(total_rooms_cnt):\n r,c = random.randrange(0,cell_width), random.randrange(0,cell_length)\n width, length = random.randrange(min_room_length,max_room_length), random.randrange(min_room_length,max_room_length)\n room = Room(r,c,width,length)\n # check overlapping between rooms\n if Room.overlap(room, rooms) == False:\n rooms.append(room)\n for room in rooms:\n for r in range(room.r, room.r+room.width):\n for c in range(room.c, room.c+room.length):\n cells[(r,c)] = 1",
"def build_maze(rows, cols, horizontal_walls, vertical_walls):\n\n maze = [ [False] * cols for _ in range(rows) ]\n\n for r, c, len in horizontal_walls:\n for _ in range(len):\n try: \n maze[r][c] = True\n except IndexError:\n print(\"?\", r,c, len)\n c += 1\n\n for r, c, len in vertical_walls:\n for _ in range(len):\n try: \n maze[r][c] = True\n except IndexError:\n print(\"?\", r,c, len)\n r += 1\n\n return maze",
"def maze(width, height, players=1, random_disposition=False, randseed=None):\n \n \"\"\"\n If given, use randseed to initialize random\n \"\"\"\n if randseed:\n seed(randseed)\n\n width = width / 2\n height = height / 2\n cellsize = 1\n cellsize1 = cellsize+1 # cellsize including one wall\n field_width = width*cellsize1+1\n field_height = height*cellsize1+1\n field = [1]*(field_width*field_height)\n stack = [(0, 0, shuffled(DIRECTIONS))]\n while stack:\n x, y, directions = stack[-1]\n dx, dy = directions.pop()\n # no other ways to go from here\n if not directions:\n stack.pop()\n # new cell\n nx = x+dx\n ny = y+dy\n # out of bounds\n if not (0 <= nx < width and 0 <= ny < height):\n continue\n # index of new cell in field\n fx = 1+nx*cellsize1\n fy = 1+ny*cellsize1\n fi = fx+fy*field_width\n # already visited\n if not field[fi]:\n continue\n # tear down walls\n if dx > 0:\n a = -1\n b = field_width\n elif dx < 0:\n a = cellsize\n b = field_width\n elif dy > 0:\n a = -field_width\n b = 1\n else:\n a = cellsize*field_width\n b = 1\n for offset in xrange(cellsize):\n field[fi+a+b*offset] = 0\n # clear cell\n for y in xrange(0, cellsize):\n for x in xrange(0, cellsize):\n field[fi+x+y*field_width] = 0\n # visit cell\n stack.append([nx, ny, shuffled(DIRECTIONS)])\n res = []\n w = (cellsize+1)*width+1\n h = (cellsize+1)*height+1\n for y in xrange(h):\n res.append(field[y*w:y*w+w])\n\n if random_disposition:\n \"\"\"\n Insert the 2 at the next-to bottom rows \n if there is any 0\n \"\"\"\n if 0 in res[-2]:\n random_insert(res, 2, len(res) - 2)\n elif 0 in res[-3]:\n random_insert(res, 2, len(res) - 3)\n else:\n \"\"\"\n No 0's in the bottom rows\n \"\"\"\n random_insert(res, 2)\n \"\"\"\n Insert the players (3, 4, ...)\n \"\"\"\n for p in range(players):\n random_insert(res, p + 3)\n else:\n \"\"\"\n Place the 2 at the bottom-right\n \"\"\"\n res[len(res) - 2][len(res[0]) - 2] = 2\n \"\"\"\n Place first player at top left\n \"\"\"\n res[1][1] = 3\n \"\"\"\n Place second player at top right\n \"\"\"\n if players > 1:\n res[1][len(res[0]) - 2] = 4\n \"\"\"\n Place third player at bottom left \n \"\"\"\n if players > 2:\n res[len(res) - 2][1] = 5\n return res",
"def empty_room(size2d):\n \n sizex, sizey = size2d\n room = []\n\n # top wall section\n room.append('w' * sizex)\n # rows with empty space in between\n room += ['w' + ' ' * (sizex - 2) + 'w' for i in range(sizey - 2)]\n # bottom wall section\n room.append('w' * sizex)\n\n return Place(room, w='wall')",
"def hallways():\n for i in range(len(rooms)-1):\n roomA = rooms[i]\n roomB = rooms[i+1]\n for r in range(roomA.r,roomB.r):\n cells[(r,roomA.c)] = 1\n for c in range(roomA.c, roomB.c):\n cells[(roomA.r,c)] = 1\n for r in range(roomB.r,roomA.r):\n cells[(r,roomA.c)] = 1\n for c in range(roomB.c, roomA.c):\n cells[(roomA.r,c)] = 1",
"def generate_map(self):\n while (self.room_count < self.room_limit):\n self.room_count += 1\n\n if (self.room_count <= self.room_limit/2):\n Dungeon.map.append(Direction.North.value)\n self.branches.append([self.current_room])\n self.current_room.north = Room(self.room_count)\n self.current_room.north.south = self.current_room\n self.current_room = self.current_room.north\n else:\n flag = False\n\n\n while (flag == False):\n self.random_integer = random.randint(1, self.room_limit/2)\n current_branch = self.branches[random_integer-1]\n room_list = getAvailableRooms(self.branches[random_integer-1])\n if(len(room_list)>0):\n \n \n\n \n\n\n # self.random_integer = random.randint(1, 4)\n # if(self.random_integer <= 6):\n # Dungeon.map.append(Direction.North.value)\n # self.current_room.north = Room(self.room_count)\n # self.current_room.north.south = self.current_room\n # self.current_room = self.current_room.north\n # elif(self.random_integer == 7 or self.random_integer == 8):\n # Dungeon.map.append(Direction.West.value)\n # self.current_room.west = Room(self.room_count)\n # self.current_room.west.east = self.current_room\n # self.current_room = self.current_room.west\n # elif(self.random_integer == 9 or self.random_integer == 10):\n # Dungeon.map.append(Direction.East.value)\n # self.current_room.east = Room(self.room_count)\n # self.current_room.east.west = self.current_room\n # self.current_room = self.current_room.east\n\n self.current_room = self.first_room",
"def grow_maze(width, height):\n # Initialise used array. This keeps track of which cells have been used\n row = [0] * width\n used = []\n for i in range(height):\n used.append(row.copy())\n used[0][0] = 1\n # Initialise array wall\n cell = [1, 1]\n row = []\n for i in range(width):\n row.append(cell.copy())\n wall = []\n for i in range(height):\n wall.append(deepcopy(row))\n # active walls will be the list of panels we're considering knocking down.\n active_panels = [[0, 0, 0], [0, 0, 1]]\n while active_panels:\n # knock down a panel\n knock_down = random.choice(active_panels)\n wall[knock_down[0]][knock_down[1]][knock_down[2]] = 0\n if used[knock_down[0]][knock_down[1]]:\n if knock_down[2] == 0:\n added_cell = [knock_down[0] + 1, knock_down[1]]\n else:\n assert knock_down[2] == 1\n added_cell = [knock_down[0], knock_down[1] + 1]\n else:\n added_cell = [knock_down[0], knock_down[1]]\n used[added_cell[0]][added_cell[1]] = 1\n\n def toggle(panel):\n if panel in active_panels:\n index = active_panels.index(panel)\n active_panels[index: index + 1] = []\n else:\n active_panels.append(panel)\n\n if added_cell[0] > 0:\n toggle([added_cell[0] - 1, added_cell[1], 0])\n if added_cell[1] > 0:\n toggle([added_cell[0], added_cell[1] - 1, 1])\n if added_cell[0] < height - 1:\n toggle([added_cell[0], added_cell[1], 0])\n if added_cell[1] < width - 1:\n toggle([added_cell[0], added_cell[1], 1])\n return wall, (width-1, height-1), (0, 0)",
"def rooms(x, y, width, height):\n for i in range(width):\n for j in range(height):\n room_pos = (x + i, y + j)\n if room_pos in maze:\n yield room_pos",
"def add_rooms(self) -> None:\n for i in range(self.num_room_tries):\n size = random.randint(1, 3 + self.room_extra_size) * 2 + 1\n rectangularity = random.randint(0, int(1 + size / 2)) * 2\n width = size\n height = size\n if random.randint(1, 3) == 1:\n width += rectangularity\n else:\n height += rectangularity\n\n x = random.randint(1, int((self.current_map_width - width - 1) / 2)) * 2 + 1\n y = random.randint(1, int((self.current_map_height - height - 1) / 2)) * 2 + 1\n\n room = pygame.Rect(x, y, width, height)\n\n overlaps = room.collidelist(self.rooms) != -1\n\n if not overlaps:\n self.rooms.append(room)\n self.start_region()\n self.carve(room, self.tile_texture)",
"def generate_dungeon(self):\n rooms = []\n num_rooms = 0\n \n for r in range(self._max_rooms):\n # Random width and height\n w = tcod.random_get_int(0, self._min_room_size, self._max_room_size)\n h = tcod.random_get_int(0, self._min_room_size, self._max_room_size)\n # Random position\n x = tcod.random_get_int(0, 0, self._width - w - 1)\n y = tcod.random_get_int(0, 0, self._height - h - 1)\n \n new_room = Rectangle(x, y, w, h)\n\n failed = False\n for other_room in rooms:\n if new_room.is_intersecting(other_room):\n failed = True\n break\n if not failed:\n # room is valid\n self.create_room(new_room)\n new_x, new_y = new_room.center()\n \n if num_rooms > 0:\n prev_x, prev_y = rooms[num_rooms - 1].center()\n\n if tcod.random_get_int(0, 0, 1) == 1:\n self.create_h_tunnel(prev_x, new_x, prev_y)\n self.create_v_tunnel(prev_y, new_y, new_x)\n else:\n self.create_v_tunnel(prev_y, new_y, prev_x)\n self.create_h_tunnel(prev_x, new_x, new_y)\n rooms.append(new_room)\n num_rooms += 1\n\n\n for y in range(self._height):\n for x in range(self._width):\n tcod.map_set_properties(self._fov_map, x, y, \\\n not self._map[x][y].block_sight, not self._map[x][y].blocking)\n return rooms[0].center()",
"def make_raw_maze(length, breadth, depth, start=(0.5,0.5,0.5)):\n #make maze full of walls and unvisited cells (this works, don't touch)\n maze = [[[1+((i%2)*(j%2)*(k%2)) for i in range(2*length+1)] for j in range(2*breadth+1)] for k in range(2*depth+1)]\n\n #make top and bottom borders\n for i in range(len(maze[0])):\n for j in range(len(maze[0][0])):\n maze[0][i][j]=3\n maze[-1][i][j]=3\n #make front and back borders\n for i in range(len(maze)):\n for j in range(len(maze[0][0])):\n maze[i][0][j]=3\n maze[i][-1][j]=3\n #make right and left borders\n for i in range(len(maze)):\n for j in range(len(maze[0])):\n maze[i][j][0]=3\n maze[i][j][-1]=3\n\n wall_list=[]\n finished=0\n \n #mark start cell as visited\n start_depth=2*int(start[2]*(depth-1))+1\n start_breadth=2*int(start[1]*(breadth-1))+1\n start_length=2*int(start[0]*(length-1))+1\n maze[start_depth][start_breadth][start_length] = 0\n\n while finished==0:\n #append the six walls of start cell to wall list\n #top\n if maze[start_depth+1][start_breadth][start_length]==1:\n wall_list.append((start_depth+1,start_breadth,start_length))\n #bottom\n if maze[start_depth-1][start_breadth][start_length]==1:\n wall_list.append((start_depth-1,start_breadth,start_length))\n #front\n if maze[start_depth][start_breadth+1][start_length]==1:\n wall_list.append((start_depth,start_breadth+1,start_length))\n #back\n if maze[start_depth][start_breadth-1][start_length]==1:\n wall_list.append((start_depth,start_breadth-1,start_length))\n #right\n if maze[start_depth][start_breadth][start_length+1]==1:\n wall_list.append((start_depth,start_breadth,start_length+1))\n #left\n if maze[start_depth][start_breadth][start_length-1]==1:\n wall_list.append((start_depth,start_breadth,start_length-1))\n\n #randomly select wall to check\n active_wall = random.choice(wall_list)\n wall_list.remove(active_wall)\n\n #look for adjacent, unvisited cells\n adjacent = (-1,-1,-1)\n if maze[active_wall[0]+1][active_wall[1]][active_wall[2]]==2:\n adjacent = (active_wall[0]+1,active_wall[1],active_wall[2])\n if maze[active_wall[0]-1][active_wall[1]][active_wall[2]]==2:\n adjacent = (active_wall[0]-1,active_wall[1],active_wall[2])\n if maze[active_wall[0]][active_wall[1]+1][active_wall[2]]==2:\n adjacent = (active_wall[0],active_wall[1]+1,active_wall[2])\n if maze[active_wall[0]][active_wall[1]-1][active_wall[2]]==2:\n adjacent = (active_wall[0],active_wall[1]-1,active_wall[2])\n if maze[active_wall[0]][active_wall[1]][active_wall[2]+1]==2:\n adjacent = (active_wall[0],active_wall[1],active_wall[2]+1)\n if maze[active_wall[0]][active_wall[1]][active_wall[2]-1]==2:\n adjacent = (active_wall[0],active_wall[1],active_wall[2]-1)\n\n #if there are any adjacent unvisited cells, knock the wall down and treat the unvisited cell as a new cell\n if adjacent != (-1,-1,-1):\n maze[active_wall[0]][active_wall[1]][active_wall[2]]=0\n start_depth=adjacent[0]\n start_breadth=adjacent[1]\n start_length=adjacent[2]\n maze[start_depth][start_breadth][start_length]=0\n\n #check if finished by looking for unvisited cells\n finished=1\n for i in range(len(maze)):\n for j in range(len(maze[0])):\n if 2 in maze[i][j]:\n finished=0\n\n #rewrite top and bottom borders\n for i in range(len(maze[0])):\n for j in range(len(maze[0][0])):\n maze[0][i][j]=1\n maze[-1][i][j]=1\n #rewrite front and back borders\n for i in range(len(maze)):\n for j in range(len(maze[0][0])):\n maze[i][0][j]=1\n maze[i][-1][j]=1\n #rewrite right and left borders\n for i in range(len(maze)):\n for j in range(len(maze[0])):\n maze[i][j][0]=1\n maze[i][j][-1]=1\n\n # for floor in maze:\n # for row in floor:\n # print row\n # print\n print(\"Maze generated.\")\n return maze",
"def base_builder (min_rooms=0, top_left=None, top_right=None, bottom_left=None, bottom_right=None, tl_corr=False, tr_corr=False, bl_corr=False, br_corr=False,top_height=None, bottom_height=None):\n if top_left == None:\n top_left = random.choice(ROOM_WIDTH_LIST)\n if top_right == None:\n top_right = random.choice(ROOM_WIDTH_LIST)\n if bottom_left == None:\n bottom_left = random.choice(ROOM_WIDTH_LIST)\n if bottom_right == None:\n bottom_right = random.choice(ROOM_WIDTH_LIST)\n\n # tl_corr = True\n # tr_corr = True\n # bl_corr = True\n # br_corr = True\n print \"tl: %s, tr: %s, bl: %s, br: %s\" % (top_left, top_right, bottom_left, bottom_right)\n print \"tl: %s, tr: %s, bl: %s, br: %s\" % (tl_corr, tr_corr, bl_corr, br_corr)\n # Top row of rooms\n row1 = []\n # Corridor, then bottom row of rooms\n row2 = []\n\n max_length = 6*12 # currently unused\n # manor_width = random.randint(max_length/2, max_length)\n\n # Decide the row heights.\n if top_height == None:\n top_height = random_room_height()\n if bottom_height == None:\n bottom_height = random_room_height()\n\n print \"top_height: %s, bottom_height: %s\" % (top_height, bottom_height)\n\n # first rooms on either row\n height1 = top_height\n height2 = bottom_height\n check_overlap = False\n if top_left < bottom_left or top_left == bottom_left and coinflip():\n height1 += 2\n else:\n height2 += 2\n check_overlap = True\n\n first = room.Room(width=top_left, height=height1)\n row1.append(first)\n first = room.Room(width=bottom_left, height=height2)\n row2.append(first)\n # print \"first rooms: height1=%s, height2=%s\" % (height1, height2)\n\n length1 = top_left + top_right - 2\n if tl_corr:\n length1 += 2\n if tr_corr:\n length1 += 2\n length2 = bottom_left + bottom_right - 2\n if bl_corr:\n length2 += 2\n if br_corr:\n length2 += 2\n print \"Row 1:\"\n print \"room 1: w=%s, length1: %s\" % (top_left, length1)\n while len(row1) <= 5:\n # If we have four rooms, one in three chance of not adding any more\n # rooms.\n if len(row1) > 3 and one_chance_in(3):\n break\n\n new_room = room.Room(width=random.choice(ROOM_WIDTH_LIST), height=top_height)\n row1.append(new_room)\n length1 += new_room.width - 1\n print \"room %s: w=%s, length1: %s\" % (len(row1), new_room.width, length1)\n print \"room %s: w=%s\" % (len(row1)+1, top_right)\n\n manor_width = length1\n\n print \"\\nRow 2:\"\n print \"room 1: w=%s, length2: %s\" % (bottom_left, length2)\n while length2 < manor_width:\n dist_left = manor_width - length2 + 1\n if dist_left < 14:\n new_width = dist_left\n else:\n new_width = random.choice(ROOM_WIDTH_LIST)\n next_width = dist_left - new_width\n if next_width < 7:\n new_width = random.choice((6,7,8))\n new_room = room.Room(width=new_width, height=bottom_height)\n row2.append(new_room)\n length2 += new_width - 1\n print \"room %s: w=%s, length2: %s\" % (len(row2), new_width, length2)\n print \"room %s: w=%s\" % (len(row2)+1, bottom_right)\n\n # last rooms on either row\n height1 = top_height\n height2 = bottom_height\n if top_right < bottom_right or top_right == bottom_right and coinflip():\n height1 += 2\n check_overlap = False\n else:\n height2 += 2\n # check_overlap = True\n # print \"last rooms: height1=%s, height2=%s\" % (height1, height2)\n\n last = room.Room(width=top_right, height=height1)\n row1.append(last)\n last = room.Room(width=bottom_right, height=height2)\n row2.append(last)\n print \"\\nrow1: %s rooms, row2: %s rooms, manor width: %s\" % (len(row1), len(row2), manor_width)\n\n # Try to get the minimum number of rooms.\n if len(row1) + len(row2) < min_rooms:\n return base_builder(min_rooms - 1)\n\n # Now, start drawing it! YAY!\n\n # First row\n row1_collection = join_row_rooms(row1, tl_corr, tr_corr)\n\n # second row\n row2_collection = join_row_rooms(row2, bl_corr, br_corr, True)\n\n # Finally, make a corridor!\n overlap = 3\n if check_overlap:\n overlap = 1\n my_collection = shape.underneath(row1_collection, row2_collection, overlap=overlap, collect=True)\n m = BuilderCollection(my_collection)\n\n noncorr_left = min(top_left, bottom_left)\n noncorr_right = min(top_right, bottom_right)\n corridor_length = my_collection.width() - noncorr_left - noncorr_right\n # print \"noncorr_left: %s, noncorr_right: %s, corridor_length: %s\" % (noncorr_left, noncorr_right, corridor_length)\n corridor = MainCorridor(shape.Row(width=corridor_length, fill=\".\"))\n\n m.append(collection.ShapeCoord(corridor, coord.Coord(noncorr_left, top_height)))\n\n return m",
"def _make_holes(self, depth = None):\n depth = depth or self.z_drilling\n direction = 1\n for row in range(self.s.rows):\n cols = range(self.s.cols)\n if direction==-1:\n cols = list(reversed(cols))\n if not filter(lambda k : self.s.is_hole(k, row), cols):\n continue\n for col in cols:\n if not self.s.is_hole(col, row):\n continue\n self._set_cutting(False)\n xpos, ypos, zpos = self._get_position(col, row)\n self.g.move(x=xpos + self.s.get_cell_width(col)/2.0, y=ypos + self.s.get_cell_height(row)/2.0)\n self._set_cutting(True, depth = depth)\n direction = -direction",
"def make(self):\n if self.method == 'dfs':\n cell_stack = [self.maze_map[self.i0][self.j0]]\n nv = 1\n N = self.p * self.q\n while nv < N:\n neighbours = self.get_neighbours(cell_stack[-1], kind='unvisited')\n if not neighbours:\n cell_stack.pop()\n continue\n cell_stack.append(random.choice(neighbours))\n Cell.break_wall(cell_stack[-2], cell_stack[-1])\n nv += 1\n elif self.method == 'prim':\n current_cell = self.maze_map[self.i0][self.j0]\n current_cell.prim_visited = True\n cell_stack = self.get_neighbours(current_cell)\n next_cell = random.choice(cell_stack)\n Cell.break_wall(current_cell, next_cell)\n next_cell.prim_visited = True\n cell_stack = list(set(cell_stack).union(self.get_neighbours(next_cell, kind='unvisited')))\n cell_stack.remove(next_cell)\n while cell_stack:\n next_cell = random.choice(cell_stack)\n next_cell.prim_visited = True\n valid_neighbours = [c for c in self.get_neighbours(next_cell) if c.prim_visited]\n if valid_neighbours:\n other_cell = random.choice(valid_neighbours)\n Cell.break_wall(next_cell, other_cell)\n cell_stack = list(set(cell_stack).union(self.get_neighbours(next_cell, kind='unvisited')))\n cell_stack.remove(next_cell)\n else:\n raise ValueError('{0} is an unknow/unsupported method for maze generation'.format(self.method))\n self.break_dead_ends()",
"def generate_level(self):\n room_coords = self.recursive_divide(0, 0, GAME_HEIGHT, GAME_WIDTH)\n debug(room_coords)\n\n self.rooms = [Room(y, x, h, w) for y, x, h, w in room_coords]\n self.entrance, self.exit = self.generate_stairs()\n\n for object_ in self.architecture:\n self.add_to_grid(object_)\n\n for room in self.rooms:\n threshold = 0.2 + 4 * (room.w * room.h) / (GAME_WIDTH * GAME_HEIGHT)\n while random.random() < threshold:\n tile_y = random.randrange(room.y, room.y + room.h)\n tile_x = random.randrange(room.x, room.x + room.w)\n monster = self.generate_random_monster(tile_y, tile_x)\n self.creatures += [monster]\n threshold = max(0.1, threshold - 0.1)",
"def generateMapStr(XMAX_in,YMAX_in,minWidth=3,wallWidth=1,start_location = [0,0],TOTAL_CHECKPOINTS=10,START_DIRECTION = \"E\",LEVEL = 2):\n \n # Normalize inputs. I take the required full grid size and make it a proper \n # multiple of the width of the passage and walls. \n final_XMAX = (XMAX_in-1)//(minWidth+wallWidth)*(minWidth+wallWidth)+1\n final_YMAX = (YMAX_in-1)//(minWidth+wallWidth)*(minWidth+wallWidth)+1\n\n # final_grid is what we will return at the end of the day, 1 for wall \n # 0 for empty and 2-9 a-z A-Z or something for checkpoints\n final_grid = [[0 for _ in range(final_YMAX)] for _ in range(final_XMAX)]\n \n # Reduced effective grid by a factor of (minWidth+wallWidth)for throwing a sort of path finding algorithm in \n # and then generatiung the larger grid from that \n XMAX,YMAX = final_XMAX//(minWidth+wallWidth),final_YMAX//(minWidth+wallWidth)\n AREA = XMAX*YMAX\n \n # Will fill with directions [N]orth [S]outh [E]ast [W]est or None if the position has not been assigned yet. \n effective_grid = [[None for _ in range(YMAX)] for _ in range(XMAX)] \n \n # List version of the paths in effective_grid\n relative_path = []\n from_end_path = []\n \n # Decide acceptable threshold\n MAX_UNUSED = 8*XMAX#Maximum numbver of \"dead\" spots in the effective grid\n MAX_ITERATION = 50000# Maximum number of times we will try to make a maze\n \n # some constants and initializations\n current_pos = [s for s in start_location]\n end_pos = [s for s in start_location]\n \n # Loop variables\n done = False # Signifies when the loop is closed, it might have done = True but not be long enough and return to building\n count = 0\n \n # MAIN LOOP\n while done == False:\n \n if done:\n # Here we are next to the start location but we got here too early\n # Go back a random number of times\n togoback = random.randint(int(len(relative_path)/3+2),int(len(relative_path)/1.1+2)) # probably needs tuning\n goBack(relative_path,effective_grid,current_pos,togoback) # Go back specified number of steps\n done = False\n continue\n \n # Normal attempt to exit if we are right beside the start location \n # Add in last path step to complete the circuit, then continue loop \n # to check if we have used enough spaces\n rel_pos = tuple([p-d for p,d in zip(current_pos,end_pos)])\n if abs(rel_pos[0]) + abs(rel_pos[1]) == 1 and count > 1 and AREA-len(relative_path) < MAX_UNUSED:\n done = True\n print(\"attempting to end\")\n temp_letter = REVERSE_DIRECTIONS[rel_pos]\n relative_path.append(temp_letter)\n effective_grid[start_location[0]+rel_pos[0]][start_location[1]+rel_pos[1]] = temp_letter\n current_pos = [s for s in end_pos]\n continue\n \n # Generate dictionary of direction options that are available this time by copying \n # all options and removing any that are blocked by our previous path or boundaries\n temp_directions = copy.copy(directions)\n if(current_pos[0] == 0 or effective_grid[current_pos[0]+directions[\"N\"][0]][current_pos[1]+directions[\"N\"][1]] is not None): del temp_directions[\"N\"]\n if(current_pos[0] == XMAX-1 or effective_grid[current_pos[0]+directions[\"S\"][0]][current_pos[1]+directions[\"S\"][1]] is not None): del temp_directions[\"S\"]\n if(current_pos[1] == YMAX-1 or effective_grid[current_pos[0]+directions[\"E\"][0]][current_pos[1]+directions[\"E\"][1]] is not None): del temp_directions[\"E\"]\n if(current_pos[1] == 0 or effective_grid[current_pos[0]+directions[\"W\"][0]][current_pos[1]+directions[\"W\"][1]] is not None): del temp_directions[\"W\"]\n \n \n # If no options remain, we have hit a dead end and must go back some steps and restart loop\n if len(temp_directions) == 0: \n # Go back a random number of times\n togoback = random.randint(int(len(relative_path)/8)+2,int(len(relative_path)/2)+2)\n goBack(relative_path,effective_grid,current_pos,togoback)\n continue # for i in range(1000)\n \n # Choose a direction option and apply it\n if len(relative_path) == 0:\n temp_dir = START_DIRECTION\n else:\n temp_dir = random.choice(list(temp_directions.keys()))\n relative_path.append(temp_dir)\n effective_grid[current_pos[0]][current_pos[1]] = temp_dir\n current_pos = [p+d for p,d in zip(current_pos,temp_directions[temp_dir])]\n\n # Emergency exit, for if we are trying too long on one attempt\n count +=1\n if count > MAX_ITERATION:\n #print(\"Iteration number too high, ABORT\")\n return False\n \n print(\"DONE final grid:::\")\n print2DGrid(effective_grid)\n \n # Time to transfer to 1s and 0s:\n # Basic version of the grid, essentially a lined grid where we will break down the walls\n # Where our path from the previous part of the algorithm takes us.\n # can optimise!!\n for i in range(final_XMAX):\n for j in range(final_YMAX):\n if i==0 or j==0 or i%(minWidth+wallWidth)==0 or j%(minWidth+wallWidth)==0 :\n final_grid[i][j] = 1\n \n \n \n \n # SMASH WALLS\n # Walk through the grid using the path we generated and knock down all the walls to create a closed track\n current_pos = [s*(minWidth+wallWidth)+wallWidth for s in start_location] # top left\n total_path_length = len(relative_path)\n for n,p in enumerate(relative_path):\n deltapos = [s*(minWidth+wallWidth) for s in directions[p]]\n deltazero = [minWidth if s==0 else s*(minWidth+wallWidth) for s in directions[p]]\n for i in range(current_pos[0],current_pos[0]+deltazero[0],deltazero[0]//abs(deltazero[0])):\n for j in range(current_pos[1],current_pos[1]+deltazero[1],deltazero[1]//abs(deltazero[1])):\n final_grid[i][j] = 0\n current_pos = [c+d for c,d in zip(current_pos,deltapos)] \n \n \n # CHECKPOINTS \n # Steps through the grid and every so often drops a checkpoint equally spaced \n current_pos = [s*(minWidth+wallWidth)+wallWidth for s in start_location] # top left \n temp_last_time = 1E99\n for n,p in enumerate(relative_path):\n deltapos = [s*(minWidth+wallWidth) for s in directions[p]]\n deltazero = [minWidth if s==0 else s*(minWidth+wallWidth) for s in directions[p]]\n if (n*TOTAL_CHECKPOINTS)%total_path_length < temp_last_time:\n # We place a checkpoint\n print(current_pos,deltazero,len(final_grid),len(final_grid[0]))\n final_grid[current_pos[0]+deltazero[0]//2][current_pos[1]+deltazero[1]//2] = chr(65+(n*TOTAL_CHECKPOINTS)//total_path_length)\n pass\n current_pos = [c+d for c,d in zip(current_pos,deltapos)]\n temp_last_time = (n*TOTAL_CHECKPOINTS)%total_path_length \n print2DGrid(final_grid)\n \n #SAVE GRID\n write2DGrid(FILE_PREFIX+str(LEVEL)+FILE_SUFFIX)\n \n \n return True",
"def building_ruined_house(w=6, h=6, material=None):\n\n # Initial checks. Don't accept too small/big house.\n if w < 6 or h < 6:\n raise ValueError('Building is too small: w or h < 6')\n elif w > 10 or h > 10:\n raise ValueError('Building is too big: w or h > 10')\n\n # Choose materials\n wall_material = None\n if not material:\n wall_material = random.choice([C.wall_block, C.wall_plank, C.wall_stone, C.wall_brick])\n elif material not in (['block', 'plank', 'stone', 'brick']):\n raise ValueError('Material should be \"block\", \"plank\", \"stone\" or \"brick\"')\n\n if material == 'stone':\n wall_material = C.wall_stone\n elif material == 'block':\n wall_material = C.wall_block\n elif material == 'plank':\n wall_material = C.wall_plank\n elif material == 'brick':\n wall_material = C.wall_brick\n\n M = room_default(w, h, wall_type=wall_material, floor_type=C.floor_rocks)\n\n # Calculate % of replaced walls and added grass. 10% for walls and 20% for grass.\n grass_count = int((w - 2) * (h - 2) * 0.2)\n wall_ruined = int(w * h * 0.1)\n M[w//2, h-1] = C.door_open_dark()\n\n # Place some furniture and animals.\n all_coord = [(w//2, h-1), (w//2, h-2)]\n for item_class in (\n T.furniture_chimney, \n A.animal_bat,\n A.animal_spider,\n T.web,\n T.furniture_barrel\n ):\n while True:\n x = random.randint(1, w-2)\n y = random.randint(1, h-2)\n if (x, y) not in all_coord:\n M[x, y].put(item_class())\n all_coord.append((x, y))\n break\n\n # Place some grass.\n for _ in range(grass_count):\n while True:\n x = random.randint(0, w-1)\n y = random.randint(0, h-1)\n if (x, y) not in all_coord:\n M[x, y] = C.flora_grass()\n all_coord.append((x, y))\n break\n\n # Replace some walls with rocks.\n for _ in range(wall_ruined):\n while True:\n x = random.randint(0, w-1)\n y = random.choice([0, h-1])\n if (x, y) not in all_coord:\n M[x, y] = C.floor_rocks()\n all_coord.append((x, y))\n break\n\n return M",
"def generate_floor_and_ceiling(room):\n if room.is_filled():\n return\n\n floor_max = min(room.height - CEILING_MINIMUM - FLOOR_TO_CEILING_MINIMUM, FLOOR_MAXIMUM)\n ceiling_max = min(room.height - FLOOR_MINIMUM - FLOOR_TO_CEILING_MINIMUM, CEILING_MAXIMUM)\n\n while True:\n floor_height = random.randrange(FLOOR_MINIMUM, floor_max + 1)\n ceiling_height = random.randrange(CEILING_MINIMUM, ceiling_max + 1)\n if room.height - ceiling_height - floor_height >= FLOOR_TO_CEILING_MINIMUM:\n break\n room.floor_height = floor_height\n room.ceiling_height = ceiling_height\n room.floor_subview().fill(TILE_FLOOR)\n room.ceiling_subview().fill(TILE_CEILING)",
"def __init__(self, width=30, height=20, nbombs=99):\n\n assert nbombs <= (width * height), \"You can't have more bombs than cells\"\n\n self.__width = width\n self.__height = height\n self.__nbBombs = nbombs\n self.__nbCellsUnrevealed = height * width\n self.__gameState = GameState.unfinished\n self.__grid = []\n\n #On crée des cellules de classe cell, pour l'instant vides de tout contenu\n\n for i in range(self.__height) :\n line = []\n for j in range(self.__width) :\n line.append(Cell())\n line[j].set_coords(j, i)\n self.__grid.append(line)\n\n #On ajoute ensuite des bombes à des endroits aléatoires\n\n bombsPlaced = 0\n bombList = []\n while bombsPlaced < self.__nbBombs :\n x = random.randint(0, self.__width - 1)\n y = random.randint(0, self.__height - 1)\n randCoords = (x, y)\n if randCoords not in bombList :\n bombList += [randCoords]\n self.get_cell(x, y).set_bomb()\n bombsPlaced += 1\n\n # Il faut ensuite indiquer le nombre de bombes voisines a chaque cellule\n\n for i in range(self.__height) :\n for j in range(self.__width) :\n if (j, i) not in bombList :\n for (u, v) in neighborhood(j, i, self.__width, self.__height) :\n if self.get_cell(u, v).is_bomb() :\n self.get_cell(j, i).incr_number_of_bombs_in_neighborhood()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Yields all rooms in the given hall.
|
def rooms(x, y, width, height):
for i in range(width):
for j in range(height):
room_pos = (x + i, y + j)
if room_pos in maze:
yield room_pos
|
[
"def _get_all_rooms(klass, floor):\n unidentified_rooms = floor.get(\"unidentified_rooms\", [])\n unidentified_rooms = (\n (None, room) for room in unidentified_rooms )\n rooms = floor.get(\"rooms\", {})\n room_items = (\n (rid, room) for rid, room in rooms.items() if \"polygon\" in room\n )\n return chain(room_items, unidentified_rooms)",
"def list(self, **kwargs):\n apiparm = ['teamId', 'max,', 'type']\n items = self.api.session.get_items(\n self._API_ENTRY_SUFFIX, apiparm, **kwargs)\n # Yield Room objects created from the returned items JSON objects\n for item in items:\n yield Room(item)",
"def hallways():\n for i in range(len(rooms)-1):\n roomA = rooms[i]\n roomB = rooms[i+1]\n for r in range(roomA.r,roomB.r):\n cells[(r,roomA.c)] = 1\n for c in range(roomA.c, roomB.c):\n cells[(roomA.r,c)] = 1\n for r in range(roomB.r,roomA.r):\n cells[(r,roomA.c)] = 1\n for c in range(roomB.c, roomA.c):\n cells[(roomA.r,c)] = 1",
"def roomates_of(agent, graph, agents_at):\n if agent.room:\n yield from (roomate for roomate in agents_at[agent.room]\n if roomate != agent)",
"def getRoomsByBuildingAndFloor(self, bid, rfloor):\n cursor = self.conn.cursor()\n query = sql.SQL(\"select {fields} from {table1} \"\n \"left outer join {table2} \"\n \"on {table1}.{table1Identifier} = {table2}.{table2Identifier} \"\n \"where {pkey1}= %s and {pkey2}= %s\"\n \"order by {orderkey};\").format(\n fields=sql.SQL(',').join([\n sql.Identifier('rid'),\n sql.Identifier('bid'),\n sql.Identifier('rcode'),\n sql.Identifier('rfloor'),\n sql.Identifier('rdescription'),\n sql.Identifier('roccupancy'),\n sql.Identifier('rdept'),\n sql.Identifier('rcustodian'),\n sql.Identifier('rlongitude'),\n sql.Identifier('rlatitude'),\n sql.Identifier('raltitude'),\n sql.Identifier('photourl')\n ]),\n table1=sql.Identifier('rooms'),\n table2=sql.Identifier('photos'),\n table1Identifier=sql.Identifier('photoid'),\n table2Identifier=sql.Identifier('photoid'),\n pkey1=sql.Identifier('bid'),\n pkey2=sql.Identifier('rfloor'),\n orderkey=sql.Identifier('rcode'))\n cursor.execute(query, (int(bid), int(rfloor)))\n result = []\n for row in cursor:\n result.append(row)\n return result",
"def get_all_rides():",
"def parse_rooms(data):\n room_names = data.find_all('h2')\n rooms = []\n for r_name in room_names[2:8]: \n room_bundles = parse_room_bundles(data, r_name.text)\n rooms.append(CommunityCenterRoom(r_name.text, room_bundles))\n return rooms",
"def all_games(self):\r\n\t\tfor game in self.games.values():\r\n\t\t\tyield game",
"def explore_room(room):\n time_check()\n items = [i[\"name\"] for i in object_relations[room[\"name\"]]]\n print(\"You explore the room. This is \" + room[\"name\"] + \". You find \" + \", \".join(items))",
"def map_the_home(hlst):\n\n hdct = {}\n for e in hlst:\n if e.room in hdct.keys():\n hdct[e.room].append(e)\n else:\n hdct[e.room] = [e]\n return hdct",
"def extract_house_rooms(self, intent_message):\n house_rooms = []\n if intent_message.slots.house_room:\n for room in intent_message.slots.house_room.all():\n type(room.value)\n house_rooms.append(room.value)\n return house_rooms",
"def getrooms(self):\n for room in self.rooms:\n if room[0] != self.spawnHunter or self.spawnWumpus:\n whatItem = randrange(0, 3) # A 1 in 3 chance to get one of the items\n if randrange(0, 101) <= self.chance:\n if whatItem == 0:\n room[1] = \"bat\"\n elif whatItem == 1:\n room[1] = \"gold\"\n else:\n room[1] = \"pit\"\n return self.rooms",
"def test_get_rooms(self):\n self.board.get_rooms",
"async def list_Room(\n fields: str = Query(\n None, description=\"Comma-separated properties to be provided in response\"\n ),\n offset: int = Query(\n 0,\n description=\"Requested index for start of resources to be provided in response\",\n ),\n limit: int = Query(\n 10, description=\"Requested number of resources to be provided in response\"\n ),\n) -> List[Room]:\n\n try:\n return await get_db().find(Room, skip=offset, limit=limit)\n except (HTTPException, Exception) as e:\n # TODO handel 400 401 403 405 409\n raise e",
"def find_room(self, t_date, t_period):\r\n room_id = []\r\n with sqlite3.connect('system.db') as conn:\r\n cursor = conn.cursor()\r\n if self.instrument == 0:\r\n sql = \"SELECT roomID from tblRooms WHERE piano=1\"\r\n elif self.instrument == 1:\r\n sql = \"SELECT roomID from tblRooms WHERE drum=1\"\r\n else:\r\n sql = \"SELECT roomID from tblRooms\"\r\n for result in cursor.execute(sql):\r\n room_id.append(result[0])\r\n\r\n random.shuffle(room_id)\r\n\r\n for i in range(len(room_id)):\r\n room = room_id[i]\r\n new_booking = Booking.Booking(room, t_date, t_period)\r\n if new_booking.check_avail():\r\n record = [room, str(t_date), t_period]\r\n return record\r\n\r\n record = [random.choice(room_id), str(t_date), t_period]\r\n self.wait_list.append(record)\r\n return []",
"def get(self):\n results = db.session.query(RoomModel).all()\n\n if not results:\n abort(404, error_code=404, error_msg='No room exists in the database')\n\n rooms = {}\n for record in results:\n rooms[record._id] = {\n 'name': record.name, \n 'room_admin_name': record.user.name\n }\n\n return [rooms], 200",
"def exits_of_rooms(self, rooms):\n return self.model.objects.filter(Q(location__in=rooms) or Q(destination__in=rooms))",
"def find_room(self, day, start='00:00', end='24:00'):\n rooms, rooms_joined = {}, {}\n dehu_start, dehu_end = dehumanize_time(start), dehumanize_time(end)\n for room, time in self.c.execute('SELECT room, time FROM rooms WHERE day = {} AND '\n 'time >= {} AND time <= {} AND taken = 0 '\n 'ORDER BY room, time'\n .format(DAYS[day], dehu_start, dehu_end)):\n if room not in rooms:\n rooms[room] = [(time, time+25)]\n else:\n rooms[room].append((time, time+25))\n\n for room, times in rooms.items():\n consolidated_times = consolidate_times(times)\n for time_range in consolidated_times:\n if time_range[0] <= dehu_start and time_range[1] >= dehu_end:\n rooms_joined[room] = consolidated_times\n break\n return rooms_joined",
"def allergens(self, *, allergens):\n\n sql = \"\"\"\n WITH food_allergens AS (\n SELECT food.id AS food_id\n FROM recipes_food food\n LEFT JOIN recipes_food_allergens food_allergen\n ON food.id = food_allergen.food_id\n LEFT JOIN recipes_allergen allergen\n ON food_allergen.allergen_id = allergen.id\n WHERE allergen.name IN %s\n ),\n recipes_with_allergens AS (\n SELECT DISTINCT recipe.id\n FROM recipes_recipe recipe\n LEFT JOIN recipes_ingredient ingredient\n ON recipe.id = ingredient.recipe_id\n LEFT JOIN recipes_food food\n ON ingredient.food_id = food.id\n WHERE food.id IN (\n SELECT DISTINCT food_id \n FROM food_allergens\n ) \n )\n SELECT *\n FROM recipes_recipe recipe\n WHERE recipe.id NOT IN (\n SELECT id\n FROM recipes_with_allergens\n );\n \"\"\"\n return self.raw(raw_query=sql, params=[allergens])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns all walls surrounding a hall.
|
def walls(x, y, width, height):
def inside(wall):
if wall.room_pos[0] < x or wall.room_pos[0] >= x + width:
return False
if wall.room_pos[1] < y or wall.room_pos[1] >= y + height:
return False
return True
result = []
for i in range(width - 2): # Top
result.extend(wall
for wall in maze.walls((x + 1 + i, y))
if not inside(wall.back))
for i in range(height - 2): # Right
result.extend(wall
for wall in maze.walls((x + width - 1, y + 1 + i))
if not inside(wall.back))
for i in range(width - 2): # Bottom
result.extend(wall
for wall in maze.walls((x + 1 + width - 1 - i, y + height - 1))
if not inside(wall.back))
for i in range(height - 2): # Left
result.extend(wall
for wall in maze.walls((x, y + 1 + height - 1 - i))
if not inside(wall.back))
return result
|
[
"def getRoomWalls(self):\n return self.tile_holder[0].getTileWalls()",
"def walls(self):",
"def get_walls_positions(self):\n walls_positions = []\n for y, line in enumerate(self.lines_list):\n for x, char in enumerate(line):\n if char == \"W\":\n walls_positions.append((x, y))\n return walls_positions",
"def get_walls(origin, tiles):\n\n res = []\n for p, tile in tiles.items():\n if tile == Tile.wall:\n # Transform into world coordinates from the room coordinates\n p1 = (p[0] + origin[0], p[1] + origin[1])\n res.append(p1)\n\n return res",
"def get_walls_data(self, level_bin_data):\n\n walls = []\n\n for i in range(_ROWS - 1):\n for j in range(_COLUMNS - 1):\n if level_bin_data[34][i][j] == 1:\n if level_bin_data[34][i + 1][j] == 1:\n walls.append([(i, j), (i + 1, j)])\n\n if level_bin_data[34][i][j + 1] == 1:\n walls.append([(i, j), (i, j + 1)])\n\n return walls",
"def draw_walls(self):\n\t\twall_keys = list(self.gridworld.wall_map.keys())\n\t\tfor i in range(0, len(wall_keys)):\n\t\t\twall_loc = eval(wall_keys[i])\n\t\t\t#top left triangle\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, wall_loc[1]*self.cell_height) #top left of cell\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom left of cell\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, wall_loc[1]*self.cell_height) #top right of cell\n\t\t\t#bottom right triangle\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom right of cell\n\t\t\tpyglet.gl.glVertex2f(wall_loc[0]*self.cell_width, (wall_loc[1]+1)*self.cell_height) #bottom left of cell\n\t\t\tpyglet.gl.glVertex2f((wall_loc[0]+1)*self.cell_width, wall_loc[1]*self.cell_height) #top right of cell",
"def get_neighbourhood(self, winner):\n\t\tnr_rows = self.W.shape[0]\n\t\tnr_cols = self.W.shape[1]\n\n\t\trow_span = np.arange(winner[0] - self.radius, winner[0] + self.radius + 1)\n\t\tcol_span = np.arange(winner[1] - self.radius, winner[1] + self.radius + 1)\n\n\t\tneighbourhood = []\n\t\tfor i in range((2*self.radius) + 1):\n\t\t\tfor j in range((2*self.radius) + 1):\n\t\t\t\tif((row_span[i] > (nr_rows - 1)) or (row_span[i] < 0) \\\n\t\t\t\t\tor (col_span[j] > (nr_cols - 1)) or (col_span[j] < 0)):\n\t\t\t\t\tcontinue\n\t\t\t\telse: \n\t\t\t\t\tneighbourhood.append([row_span[i], col_span[j]])\n\n\t\treturn neighbourhood",
"def singular_ranges(self):\n walls_on_coordinates = {'top': {}, 'bottom': {},\n 'left': {}, 'right': {}}\n for direction, coordinates in self.__walls.items():\n if direction in ('left', 'right'):\n for i in coordinates:\n if i[0] not in walls_on_coordinates[direction]:\n walls_on_coordinates[direction][i[0]] = []\n self.__wall_ranges[direction][i[0]] = []\n walls_on_coordinates[direction][i[0]].append(i[1])\n else:\n for i in coordinates:\n if i[1] not in walls_on_coordinates[direction]:\n walls_on_coordinates[direction][i[1]] = []\n self.__wall_ranges[direction][i[1]] = []\n walls_on_coordinates[direction][i[1]].append(i[0])\n\n for direction, coordinates in walls_on_coordinates.items():\n for cross_coord, coord_list in coordinates.items():\n self.__wall_ranges[direction][cross_coord] = self.get_ranges(\n coord_list)",
"def draw_house_walls(x, y, width, height):\n print('Drawing house walls', x, y, width, height)\n pass",
"def find_corners(self, list_of_walls):\n list_of_corners = CornerList()\n\n\n for first_wall in list_of_walls.wall_list:\n for second_wall in list_of_walls.wall_list:\n if first_wall == second_wall:\n continue\n if first_wall.wall_end == second_wall.wall_start:\n corner_angle = self.angle_between_lines(first_wall, second_wall)\n if 50 < corner_angle < 310:\n self.create_corner(list_of_corners, first_wall, second_wall)\n if first_wall.wall_start_rupture or first_wall.wall_start_break or first_wall.wall_end_rupture or first_wall.wall_end_break:\n # we are not only wanting normal corners but also potential corners\n\n # however we probably will need to refine the selection of potential corners\n # TODO refine the selection of potential corners :)\n self.create_potential_corner(list_of_corners, first_wall)\n\n\n\n\n return list_of_corners",
"def getBorderCells(self, gameState):\n\n # List that will hold the cells that lie on our teams side, but border the other team\n borderCells = []\n\n # Obtain the matrix of walls around the entire map\n wallsMatrix = gameState.data.layout.walls\n wallsList = wallsMatrix.asList()\n\n # Using the width of the map, calculate the Red and Blue teams border cell column value\n # Offset the final values by 1 to ensure our agent stays in friendly territory\n layoutX = wallsMatrix.width\n redX = ((layoutX - 1) / 2) - 1\n blueX = ((int)(math.ceil((float)(layoutX - 1) / 2))) + 1\n\n # Using the height of the map, the number of rows, loop through the number of rows\n # and add the cells to the return list that are not walls\n layoutY = wallsMatrix.height - 1\n if (gameState.isOnRedTeam(self.index)):\n for y in range(1, layoutY - 1):\n if ((redX, y) not in wallsList):\n borderCells.append((redX, y))\n else:\n for y in range(1, layoutY - 1):\n if ((blueX, y) not in wallsList):\n borderCells.append((blueX, y))\n\n return borderCells",
"def getBorderCells(self, gameState):\n\n # List that will hold the cells that lie on our teams side, but border the other team\n borderCells = []\n\n # Obtain the matrix of walls around the entire map\n wallsMatrix = gameState.data.layout.walls\n wallsList = wallsMatrix.asList()\n\n # Using the width of the map, calculate the Red and Blue teams border cell column value\n #Offset the final values by 3 to bring our agents further into friendly territory\n layoutX = wallsMatrix.width\n redX = ((layoutX - 1) / 2) - 3\n blueX = ((int)((math.ceil((float)(layoutX - 1) / 2)))) + 3\n\n # Using the height of the map, the number of rows, loop through the number of rows\n # and add the cells to the return list that are not walls\n layoutY = wallsMatrix.height - 1\n if (gameState.isOnRedTeam(self.index)):\n for y in range(1, layoutY - 1):\n if ((redX, y) not in wallsList):\n borderCells.append((redX, y))\n else:\n for y in range(1, layoutY - 1):\n if ((blueX, y) not in wallsList):\n borderCells.append((blueX, y))\n\n return borderCells",
"def __collectWallTiles(self):\n for tile in self.tiles:\n if not tile.isWalkable:\n self.wallTiles.append(tile)",
"def drawWalls(maze):\n \n for wall in maze.walls:\n coord_0 = (wall.start.x, wall.start.y)\n coord_1 = (wall.end.x, wall.end.y)\n pygame.draw.line(screen, color_BONES, coord_0, coord_1, THICKNESS)",
"def building_roadhouse(w=15, h=15, wall_material=None, floor_material=None):\n # Initial checks. Don't accept too small/big inn\n if w < 15 or h < 15:\n raise ValueError('Building is too small: w or h < 15')\n elif w > 21 or h > 21:\n raise ValueError('Building is too big: w or h > 21')\n # Choose materials\n if not wall_material:\n wall_material = random.choice([C.wall_block, C.wall_plank, C.wall_brick, C.wall_stone])\n elif wall_material not in (['block', 'plank', 'brick', 'stone']):\n raise ValueError('Wall material should be \"block\", \"plank\", \"brick\" or \"stone\"')\n if wall_material == 'block':\n wall_material = C.wall_block\n elif wall_material == 'plank':\n wall_material = C.wall_plank\n elif wall_material == 'brick':\n wall_material = C.wall_brick\n elif wall_material == 'stone':\n wall_material = C.wall_stone\n\n if not floor_material:\n floor_material = random.choice([C.floor_dirt, C.floor_parquet, C.floor_cobblestone])\n elif floor_material not in (['dirt', 'parquet', 'cobblestone']):\n raise ValueError('Floor material should be \"dirt\", \"parquet\" or \"cobblestone\"')\n if floor_material == 'dirt':\n floor_material = C.floor_dirt\n elif floor_material == 'parquet':\n floor_material = C.floor_parquet\n elif floor_material == 'cobblestone':\n floor_material = C.floor_cobblestone\n M = room_default(w, h, wall_type=wall_material, floor_type=floor_material)\n M[13, h-1] = C.door_closed_window()\n kitchen = _room_kitchen(w, 6, wall_material, floor_material)\n M.meld(kitchen, 0, 0)\n living_room = _room_living(9, h-5, wall_material, floor_material)\n M.meld(living_room, 0, 5)\n vending = _interior_vending(w-10, h-7, wall_material, floor_material)\n M.meld(vending, 9, 6)\n\n return M",
"def test_hangingwall_nodes():\n grid = RasterModelGrid((3, 7), xy_spacing=2500.0)\n grid.add_zeros(\"topographic__elevation\", at=\"node\")\n extender = ListricKinematicExtender(grid, fault_location=2500.0)\n\n assert_array_equal(\n extender._hangwall, [2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 16, 17, 18, 19, 20]\n )",
"def get_all_holes(self) -> List[Position]:\n return list(self._get_holes())",
"def walls(self, walls):\n self._walls = walls\n\n if Map.paths is None:\n self._calculate_all_paths()",
"def find_clearing_to_land():\n # Find a place on the lower half of the screen where there is no identifiable objects\n # Move closer... check again... repeat till height is near 0\n # land and power down\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove C style comments from input string
|
def remove_c_comments(string):
string_pattern = r"(?P<string>\".*?\"|\'.*?\')"
comment_pattern = r"(?P<comment>/\*.*?\*/|//[^\r\n]*$)"
pattern = re.compile(string_pattern + r'|' + comment_pattern,
re.MULTILINE | re.DOTALL)
def replacer(match):
if match.lastgroup == 'comment':
return ""
return match.group()
return pattern.sub(replacer, string)
|
[
"def clean_comment(comment):\n return comment.strip(\"# \")",
"def _remove_comments(source):\n comment_re = r'(/[*].*?[*]/)|(//[^\\n]*)'\n return re.sub(comment_re, '', source, flags=re.MULTILINE | re.DOTALL)",
"def remove_comments(source):\n return re.sub(r'#[^#\\n]+', '', source)",
"def remove_comments(source):\n return re.sub(r\";.*\\n\", \"\\n\", source)",
"def remove_comments(text):\n return re.sub(r' //.*\\n', r'', text)",
"def _strip_comments(code):\n return re.sub(r'(?m)^ *#.*\\n?', '', code)",
"def strip_comments(text):\n \n # (m?) enables multiline mode\n return re.sub(r'(?m)^ *#.*\\n?', '', text).strip()",
"def remove_comments(tex):\n return re.sub(r'%(.+)\\n', r'', tex)",
"def cleanup_comment(raw_comment):\n def pop_prepending_empty_lines(lines):\n first_non_empty_line_idx = 0\n for line in lines:\n if line == '':\n first_non_empty_line_idx += 1\n else:\n break\n return lines[first_non_empty_line_idx:]\n\n import string\n lines = raw_comment.split('\\n')\n chars_to_strip = '/' + '*' + '!' + string.whitespace\n lines = [line.lstrip(chars_to_strip) for line in lines]\n lines = pop_prepending_empty_lines(lines)\n clean_lines = []\n is_brief_comment = True\n for line in lines:\n if line == '' and is_brief_comment:\n # Skip lines that belong to brief comment.\n is_brief_comment = False\n continue\n if is_brief_comment:\n continue\n clean_lines.append(line)\n return '\\n'.join(clean_lines)",
"def remove_comment_lines_in_str(text_data):\n try:\n from StringIO import StringIO # python 2\n except ImportError:\n from io import StringIO # python 3\n\n newData = ''\n\n for line in StringIO(text_data).readlines():\n # rstrip() will keep the _indent but remove all white spaces including '\\n'\n stripped_line = line.strip()\n line = line.rstrip()\n # The Shebang line should survive. shouldn't she?\n if stripped_line.startswith(('#!', '# -*-')):\n newData += line + '\\n'\n # user wants to leave a comment\n elif stripped_line.startswith(('##', '!!')):\n newData += line.replace(stripped_line[0:2], stripped_line[:1], 1) + '\\n'\n # Also keep existing empty lines\n elif not stripped_line:\n newData += line + '\\n'\n # But remove lines that only contains comments\n elif stripped_line.startswith(('#', '!', 'REM')):\n pass\n else:\n # the comments after the code will remain.\n newData += line + '\\n'\n\n return newData",
"def comment(s):\n return '\\n'.join('// ' + line if line else '' for line in s.split('\\n'))",
"def remove_comments_and_spaces(segment):\n pattern = re.compile(r\"\\s+\") # remove spaces\n segment = re.sub(pattern, '', segment)\n pattern = re.compile(r\"//.*\") # remove comments\n segment = re.sub(pattern, '', segment)\n return segment",
"def rem_comment(line):\n return line.split(\"#\", 1)[0].rstrip()",
"def remove_commentlines(self):\n\n tmp = self.main.splitlines()\n tmp = list(itertools.filterfalse(re.compile(r\"^\\s*%.*$\").match, tmp))\n self.main = \"\\n\".join(tmp)",
"def strip_comment(line):\n tokens = []\n try:\n for tok in py_tokenize.generate_tokens(StringIO(line).readline):\n token = Token(tok)\n if token.is_comment():\n continue\n tokens.append(token)\n except py_tokenize.TokenError:\n pass\n return untokenize(tokens)",
"def sanitize_source(src):\n src_lines = src.splitlines(True)\n for i, line in enumerate(src_lines[:2]):\n if _CODING_PATTERN.match(line):\n src_lines[i] = re.sub('#.*$', '# (removed coding)', line)\n return ''.join(src_lines)",
"def comment_quote(s):\r\n comment = str(s)\r\n #comment = _bad_chars_re.sub('', comment)\r\n #print 'in ', repr(str(s))\r\n #print 'out', repr(comment)\r\n comment = _comment_quote_re.sub('->', comment)\r\n return comment",
"def to_comment(comment):\n return '#' + re.sub(r'[^\\x00-\\xFF]', _esc,\n re.sub(r'\\n(?![#!])', '\\n#',\n re.sub(r'\\r\\n?', '\\n', comment)))",
"def is_comment(string):\n return string.lstrip()[0] == '#'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Simple preprocessor for C source code. Only processes condition directives without expanding them. Yield object according to the classes input. Most match firstly If the directive pair does not match , raise CondDirectiveNotMatch. Assume source code does not include comments and compile pass.
|
def preprocess_c_source_code(source, *classes):
pattern = re.compile(r"^[ \t]*#[ \t]*" +
r"(?P<directive>(if[ \t]|ifndef[ \t]|ifdef[ \t]|else|endif))" +
r"[ \t]*(?P<param>(.*\\\n)*.*$)",
re.MULTILINE)
stack = []
def _yield_objects(s, d, p, st, end):
"""
Output matched source piece
"""
nonlocal stack
start_line, end_line = '', ''
if stack:
start_line = '#{} {}'.format(d, p)
if d == 'if':
end_line = '#endif /* {} */'.format(p)
elif d == 'ifdef':
end_line = '#endif /* defined({}) */'.format(p)
else:
end_line = '#endif /* !defined({}) */'.format(p)
has_instance = False
for cls in classes:
for instance in cls.extract(s, st, end):
if has_instance is False:
has_instance = True
yield pair_start, start_line
yield instance.span()[0], instance
if has_instance:
yield start, end_line
for match in pattern.finditer(source):
directive = match.groupdict()['directive'].strip()
param = match.groupdict()['param']
start, end = match.span()
if directive in ('if', 'ifndef', 'ifdef'):
stack.append((directive, param, start, end))
continue
if not stack:
raise CondDirectiveNotMatch()
pair_directive, pair_param, pair_start, pair_end = stack.pop()
yield from _yield_objects(source,
pair_directive,
pair_param,
pair_end,
start)
if directive == 'endif':
continue
if pair_directive == 'if':
directive = 'if'
param = "!( {} )".format(pair_param)
elif pair_directive == 'ifdef':
directive = 'ifndef'
param = pair_param
else:
directive = 'ifdef'
param = pair_param
stack.append((directive, param, start, end))
assert not stack, len(stack)
|
[
"def preprocess( self, source ):\n\n\t\t# open file\n\t\tfiles = []\n\t\tfiles.append( open( source ) )\n\n\t\t# Output\n\t\tlines = []\n\t\t\n\t\t# depth and value of conditional directives\n\t\tskip = [ False ]\n\t\t\n\t\t# whilst there are still files to preprocess\n\t\twhile len( files ) > 0:\n\t\t\t\n\t\t\twhile True:\n\t\t\t\t\n\t\t\t\t# get line from current file\n\t\t\t\tline = files[-1].readline()\n\t\t\t\t\n\t\t\t\tif line != \"\":\n\t\t\t\t\t\n\t\t\t\t\t# pre-processor directive\n\t\t\t\t\tif line.startswith( \"#\" ):\n\n\t\t\t\t\t\t# Include\n\t\t\t\t\t\tmatch = re.match( '#include\\s+\"(.*)\"\\s*$', line )\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tif not skip[ -1 ]:\n\t\t\t\t\t\t\t\tfiles.append( open( match.group( 1 ) ) )\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# Definition\n\t\t\t\t\t\tmatch = re.match( '#define\\s+(\\S+)\\s+(\\S+)\\s*$', line )\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tif not skip[ -1 ]:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# Check if recursive definition\n\t\t\t\t\t\t\t\tvalue = self.definitions.get( match.group( 2 ), match.group( 2 ) )\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# if determined to be an int, stop processing\n\t\t\t\t\t\t\t\tif isinstance( value, int ):\n\t\t\t\t\t\t\t\t\tself.definitions[ match.group( 1 ) ] = value\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# attempt to evaluate complex expression\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\t\tself.definitions[ match.group( 1 ) ] = eval( value, self.definitions )\n\t\t\t\t\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t# could not parse as expression, assume constant\n\t\t\t\t\t\t\t\t\t\tself.definitions[ match.group( 1 ) ] = value\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# if defined \n\t\t\t\t\t\tmatch = re.match( '#ifdef\\s+(\\S+)\\s*$', line )\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tif not skip[ -1 ]:\n\t\t\t\t\t\t\t\tif match.group( 1 ) in self.definitions:\n\t\t\t\t\t\t\t\t\tskip.append( False )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tskip.append( True )\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# if not defined \n\t\t\t\t\t\tmatch = re.match( '#ifndef\\s+(\\S+)\\s*$', line )\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tif not skip[ -1 ]:\n\t\t\t\t\t\t\t\tif match.group( 1 ) in self.definitions:\n\t\t\t\t\t\t\t\t\tskip.append( True )\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tskip.append( False )\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# end if\n\t\t\t\t\t\tmatch = re.match( '#endif\\s*$', line )\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tif len( skip ) <= 1:\n\t\t\t\t\t\t\t\traise ValueError( \"Unexpected #endif\" )\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tskip.pop( -1 )\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\t# else\n\t\t\t\t\t\tmatch = re.match( '#else\\s*$', line )\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tskip[ -1 ] = not skip[ -1 ]\t\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\traise ValueError( \"Unrecognised preprocessor directive: {0}\".format( line ) )\n\t\t\t\t\t\t\t\n\t\t\t\t\telif not skip[ -1 ]:\n\t\t\t\t\t\tlines.append( line )\n\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tf = files.pop()\n\t\t\t\t\tf.close()\n\t\t\t\t\tbreak\n\n\t\treturn lines",
"def compileClass(self):\n self.current_compile = \"compileClass\"\n self.eat(\"class\")\n self.class_name = self.eatTag(\"identifier\")\n self.eat(\"{\")\n\n while self.currentTokenEquals([\"field\", \"static\"]):\n self.compileClassVarDec()\n\n while self.currentTokenEquals([\"constructor\", \"function\", \"method\"]):\n self.compileSubroutineDec()\n\n self.eat(\"}\")",
"def compile_if(self):\n\n # peeked on if\n # now advanced to if\n current_token = self.tokenizer.get_current_token()[1]\n if current_token != IF:\n self.tokenizer.advance()\n current_token = self.tokenizer.get_current_token()[1]\n\n L1 = self.label_generator()\n L2 = self.label_generator()\n\n # was if now (\n self.tokenizer.advance()\n\n # cond\n # build if expression\n self.compile_expression()\n\n # ~(cond)\n # negate condition\n negate = BINARY_DICT[\"~\"]\n self.VMWriter.write_arithmetic(negate)\n # -------------------- #\n\n # )\n self.tokenizer.advance()\n\n # if-goto L1\n self.VMWriter.write_if(L1)\n # -------------------- #\n\n # {\n self.tokenizer.advance()\n\n # insert whats inside if() { lalla }\n\n # VM code for s1\n self.compile_statements()\n # -------------------- #\n\n # goto L2\n self.VMWriter.write_goto(L2)\n # -------------------- #\n\n # }\n self.tokenizer.advance()\n\n # now we might have else:\n current_token = self.tokenizer.get_current_token()[1]\n current_peek = self.tokenizer.peek_at_next_token()[1]\n\n # label L1\n self.VMWriter.write_label(L1)\n # -------------------- #\n\n # statements 2 is else :\n if (current_peek == ELSE) | (current_token == ELSE):\n if current_peek == ELSE:\n self.tokenizer.advance()\n # now else\n self.tokenizer.advance()\n # {\n self.tokenizer.advance()\n\n self.compile_statements()\n\n # }\n self.tokenizer.advance()\n\n # label L2\n self.VMWriter.write_label(L2)\n # -------------------- #",
"def crit_compile(code: Union[List[str], str]) -> List[str]:\n if isinstance(code, str):\n raw_code = [ln for ln in code.split(\"\\n\") if len(ln) > 0]\n else:\n raw_code = code\n \n do_open = False\n atk_open = False\n crit_open = False\n self_open = False\n hit_open = False\n miss_open = False\n last_crit_idx = -1\n last_atk_idx = -1\n last_do_idx = -1\n last_hit_idx = -1\n last_self_idx = -1\n last_miss_idx = -1\n last_do_line = \"\"\n last_atk_line = \"\"\n\n stripped_code = [\n ln.strip().lower() \n for ln in raw_code \n if not ln.strip().startswith('#')\n if len(ln.strip()) > 0]\n \n for line_no, line in enumerate(stripped_code):\n eff = EFF_PATTERN.match(line)\n if line == \"endcrit\":\n if not crit_open:\n raise EarlyEndCritError(line_no, line)\n else:\n crit_open = False\n elif line == \"endatk\":\n if not atk_open:\n raise EarlyEndAtkError(line_no, line)\n else:\n atk_open = False\n elif line == \"done\":\n if not do_open:\n raise EarlyDoneError(line_no, line)\n else:\n do_open = False\n elif line == \"endmiss\":\n if not miss_open:\n raise EarlyEndMissError(line_no, line)\n else:\n miss_open = False\n elif line == \"endself\":\n if not self_open:\n raise EarlyEndSelfError(line_no, line)\n else:\n self_open = False\n elif line == \"endhit\":\n if not hit_open:\n raise EarlyEndHitError(line_no, line)\n else:\n hit_open = False\n elif line == \"crit\": \n if crit_open:\n raise NestedCritBlockError(line_no, line)\n elif not atk_open:\n raise CritWithoutAtkError(line_no, line)\n else:\n crit_open = True\n last_crit_idx = line_no\n elif line == \"hit\":\n if hit_open:\n raise NestedHitBlockError(line_no, line)\n elif not atk_open:\n raise HitWithoutAtkError(line_no, line)\n else:\n hit_open = True\n last_hit_idx = line_no\n elif line == \"miss\":\n if miss_open:\n raise NestedHitBlockError(line_no, line)\n elif not atk_open:\n raise MissWithoutAtkError(line_no, line)\n else:\n miss_open = True\n last_miss_idx = line_no\n elif line == \"self\":\n if self_open:\n raise NestedSelfBlockError(line_no, line)\n else:\n self_open = True\n last_self_idx = line_no\n elif DO_PATTERN.match(line):\n if do_open:\n raise NestedDoBlockError(line_no, line)\n else:\n do_open = True\n last_do_idx = line_no\n last_do_line = line\n elif ATK_PATTERN.match(line):\n if atk_open:\n raise NestedAtkBlockError(line_no, line)\n else:\n atk_open = True\n last_atk_idx = line_no\n last_atk_line = line\n elif eff:\n if eff.group(\"eff\") not in EFF_NAMES:\n raise BadEffectError(line_no, line)\n elif DMG_PATTERN.match(line):\n pass\n elif line == \"weaponcrit\":\n pass\n else:\n raise UnknownCritSyntaxError(line_no, line)\n \n if crit_open:\n raise NoEndCritError(last_crit_idx, \"crit\")\n \n if do_open:\n raise NoDoneError(last_do_idx, last_do_line)\n \n if atk_open:\n raise NoEndAtkError(last_atk_idx, last_atk_line)\n \n if miss_open:\n raise NoEndMissError(last_miss_idx, \"miss\")\n \n if self_open:\n raise NoEndSelfError(last_self_idx, \"self\")\n \n if hit_open:\n raise NoEndHitError(last_hit_idx, \"hit\")\n \n return stripped_code",
"def compile_class(self):\n self.tokenizer.advance() # class\n self.class_name = self.tokenizer.advance()[TOKEN_NAME]\n self.tokenizer.advance() # {\n # compile the variables declaration part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods o functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine_dec()\n self.tokenizer.advance() # }",
"def _designate_wrapped_lines(lines):\n class Flag(Enum):\n WRAP = 1\n NO_WRAP = -1\n DONT_CARE = 0\n\n # Regexs to match various kinds of code patterns.\n is_include = re.compile(r'^\\s*#\\s*include\\s*[\"<].*$')\n is_preprocessor = re.compile(r'^\\s*#.*$')\n is_blank = re.compile(r'^\\s*$')\n is_blank_cpp_comment = re.compile(r'^\\s*//.*$')\n is_blank_c_comment_begin = re.compile(r'^\\s*/\\*.*$')\n is_c_comment_end = re.compile(r'^.*\\*/\\s*(.*)$')\n\n # Loop over all lines and determine each one's flag.\n flags = [None] * len(lines)\n i = 0\n while i < len(lines):\n line = lines[i]\n # When the prior line has continuation, this line inherits its Flag.\n if i > 0 and lines[i - 1].endswith('\\\\'):\n flags[i] = flags[i - 1]\n i += 1\n continue\n # We must NOT wrap #include statements.\n if is_include.match(line):\n flags[i] = Flag.NO_WRAP\n i += 1\n continue\n # Other preprocessor directives can go either way.\n if is_preprocessor.match(line):\n flags[i] = Flag.DONT_CARE\n i += 1\n continue\n # Blank lines (or lines that are blank other than their comments)\n # can go either way.\n if is_blank.match(line) or is_blank_cpp_comment.match(line):\n flags[i] = Flag.DONT_CARE\n i += 1\n continue\n # For C-style comments, consume the entire comment block immediately.\n if is_blank_c_comment_begin.match(line):\n first_c_comment_line = i\n while True:\n line = lines[i]\n match = is_c_comment_end.match(line)\n flags[i] = Flag.DONT_CARE\n i += 1\n if match:\n break\n # If the close-comment marker had code after it, we need to go back\n # and set the entire C-style comment to WRAP.\n (trailing,) = match.groups()\n if trailing:\n for fixup in range(first_c_comment_line, i):\n flags[fixup] = Flag.WRAP\n continue\n # We MUST wrap all C/C++ code.\n flags[i] = Flag.WRAP\n i += 1\n\n # We want to insert inline namespaces such that:\n #\n # - all WRAP lines are enclosed;\n # - no NO_WRAP lines are enclosed;\n # - the only DONT_CARE lines enclosed are surrouneded by WRAP.\n #\n # We'll do that by growing the NO_WRAP spans as large as possible.\n\n # Grow the start-of-file run of NO_WRAP:\n for i in range(len(flags)):\n if flags[i] == Flag.DONT_CARE:\n flags[i] = Flag.NO_WRAP\n else:\n break\n\n # Grow the end-of-file run of NO_WRAP:\n for i in range(len(flags) - 1, -1, -1):\n if flags[i] == Flag.DONT_CARE:\n flags[i] = Flag.NO_WRAP\n else:\n break\n\n # Grow any interior regions of NO_WRAP:\n for i in range(len(flags)):\n if flags[i] == Flag.NO_WRAP:\n # Change all of the immediately prior and subsequent homogeneous\n # runs of DONT_CARE to NO_WRAP.\n for j in range(i - 1, -1, -1):\n if flags[j] == Flag.DONT_CARE:\n flags[j] = Flag.NO_WRAP\n else:\n break\n for j in range(i + 1, len(flags)):\n if flags[j] == Flag.DONT_CARE:\n flags[j] = Flag.NO_WRAP\n else:\n break\n\n # Anything remaining is DONT_CARE bookended by WRAP, so we'll WRAP it.\n for i in range(len(flags)):\n if flags[i] == Flag.DONT_CARE:\n flags[i] = Flag.WRAP\n\n # Return True only for the wrapped lines.\n return [x == Flag.WRAP for x in flags]",
"def compile_if(self):\n if_end, if_true, if_false = self.create_if_labels()\n self.if_counter += 1\n\n self.advance_tokens(2) # if(\n self.compile_expression()\n self.tokenizer.advance() # )\n\n self.vm_writer.write_if(if_true)\n self.vm_writer.write_go_to(if_false)\n self.vm_writer.write_label(if_true)\n\n self.tokenizer.advance() # {\n self.compile_statements()\n self.tokenizer.advance() # }\n\n if self.tokenizer.peek_next_token()[TOKEN_NAME] == 'else':\n self.vm_writer.write_go_to(if_end)\n self.vm_writer.write_label(if_false)\n self.advance_tokens(2) # else {\n self.compile_statements()\n self.tokenizer.advance() # }\n self.vm_writer.write_label(if_end)\n else:\n self.vm_writer.write_label(if_false)",
"def scanColorDirectives(self,p):\n\n p = p.copy() ; c = self.c\n if c == None: return # self.c may be None for testing.\n\n self.language = language = c.target_language\n self.comment_string = None\n self.rootMode = None # None, \"code\" or \"doc\"\n\n for p in p.self_and_parents_iter():\n # g.trace(p)\n s = p.v.t.bodyString\n theDict = g.get_directives_dict(s)\n #@ << Test for @comment or @language >>\n #@+node:ekr.20060530091119.65:<< Test for @comment or @language >>\n # @comment and @language may coexist in the same node.\n\n if theDict.has_key(\"comment\"):\n k = theDict[\"comment\"]\n self.comment_string = s[k:]\n\n if theDict.has_key(\"language\"):\n i = theDict[\"language\"]\n tag = \"@language\"\n assert(g.match_word(s,i,tag))\n i = g.skip_ws(s,i+len(tag))\n j = g.skip_c_id(s,i)\n self.language = s[i:j].lower()\n\n if theDict.has_key(\"comment\") or theDict.has_key(\"language\"):\n break\n #@nonl\n #@-node:ekr.20060530091119.65:<< Test for @comment or @language >>\n #@nl\n #@ << Test for @root, @root-doc or @root-code >>\n #@+node:ekr.20060530091119.66:<< Test for @root, @root-doc or @root-code >>\n if theDict.has_key(\"root\") and not self.rootMode:\n\n k = theDict[\"root\"]\n if g.match_word(s,k,\"@root-code\"):\n self.rootMode = \"code\"\n elif g.match_word(s,k,\"@root-doc\"):\n self.rootMode = \"doc\"\n else:\n doc = c.config.at_root_bodies_start_in_doc_mode\n self.rootMode = g.choose(doc,\"doc\",\"code\")\n #@nonl\n #@-node:ekr.20060530091119.66:<< Test for @root, @root-doc or @root-code >>\n #@nl\n\n # g.trace(self.language)\n\n return self.language # For use by external routines.",
"def compile_if(self):\n # write <if_statement>\n self.non_terminal_open(XML_IF_STATEMENT)\n # write <keyword> if <keyword>\n self.one_liner(XML_KEY_WORD, self.tokenizer.current_token)\n self.tokenizer.advance()\n # write <symbol> ( <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n self.tokenizer.advance()\n self.compile_expression()\n # write <symbol> ) <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n self.tokenizer.advance()\n # write <symbol> { <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n self.tokenizer.advance()\n self.compile_statements()\n # write <symbol> } <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n self.tokenizer.advance()\n if self.tokenizer.current_token == 'else':\n # write <keyword> else <keyword>\n self.one_liner(XML_KEY_WORD, self.tokenizer.current_token)\n self.tokenizer.advance()\n # write <symbol> { <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n self.tokenizer.advance()\n self.compile_statements()\n # write <symbol> } <symbol>\n self.one_liner(XML_SYMBOL, self.tokenizer.current_token)\n self.tokenizer.advance()\n # write <if_statement>\n self.non_terminal_end(XML_IF_STATEMENT)\n return",
"def compile_tokens(tokens, pc, context):\n\n it = iter(tokens)\n ignore = False\n subtokens = None\n\n for token in it:\n # Handle comments. Whether or not a Forth permits nested comments is\n # pretty up-in-the-air; this Forth does not permit nesting of\n # comments.\n if token == \"(\":\n ignore = True\n continue\n elif token == \")\":\n ignore = False\n continue\n\n if ignore:\n continue\n\n # Look for subroutines.\n if token == \":\":\n subtokens = []\n continue\n elif token == \";\":\n if not subtokens:\n raise Exception(\"Empty word definition!\")\n name = subtokens[0]\n pc = subroutine(name, subtokens[1:], pc, context)\n continue\n elif subtokens is not None:\n subtokens.append(token)\n continue\n\n raise Exception(\"Lone word %r in tokenizer!\" % token)\n\n return pc",
"def walk_c_files(topdir=\"src\"):\n\n for dirpath, dirnames, fnames in os.walk(topdir):\n for fname in fnames:\n if fname_is_c(fname):\n fullpath = os.path.join(dirpath,fname)\n with open(fullpath) as f:\n for err in consider_include_rules(fullpath, f):\n yield err",
"def cli(ctx, sink, opt_includes, opt_excludes):\n \n from vframe.settings.app_cfg import LOG, SKIP_FRAME\n \n \n while True:\n\n M = yield\n\n # skip frame if flagged\n if ctx.obj[SKIP_FRAME]:\n sink.send(M)\n continue\n\n # if exc/inc classes\n valid_inc = M.includes_labels(opt_includes) if opt_includes else True\n valid_exc = M.excludes_labels(opt_excludes) if opt_excludes else True\n\n skip = not (valid_inc or valid_exc)\n ctx.obj[SKIP_FRAME] = skip\n \n sink.send(M)",
"def _compiler_directive(self):\n # compiler directives\n self._compiler_directive = Combine(\"`\" +\n oneOf(\n \"define undef ifndef ifdef else endif default_nettype \"\n \"include resetall timescale unconnected_drive \"\n \"nounconnected_drive celldefine endcelldefine\") +\n restOfLine)\n return self._compiler_directive",
"def compile_class(self):\n self.xml_lines.append(\"<class>\")\n # keyword: class\n # identifier: name of class\n # symbol: {\n self.append_xml_lines(3)\n # compile the variable declarations part of the class if exist\n self.compile_var_dec(True)\n # class can contain constructor and one or more methods or functions (subroutines)\n # here we will compile all of the subroutines\n while self.tokenizer.peek_next_token()[TOKEN_NAME] in keywords_mapping.keys() \\\n and keywords_mapping[self.tokenizer.peek_next_token()[TOKEN_NAME]] == \\\n 'subroutineDec':\n self.compile_subroutine()\n # symbol: }\n self.append_next_xml_line()\n self.xml_lines.append(\"</class>\")",
"def condition(cond, brule):\n def conditioned_brl(expr):\n if cond(expr):\n yield from brule(expr)\n else:\n pass\n return conditioned_brl",
"def preprocess(source, include_dirs=[], skip_includes=[], use_compiler=False):\n compiler_commands = {\n 'gcc': ['gcc', '-E', '-']\n }\n for compiler in compiler_commands:\n if use_compiler and shutil.which(compiler):\n p = subprocess.run(compiler_commands[compiler], input=source,\n encoding='ascii', stdout=subprocess.PIPE)\n source = re.sub(r'^#.*\\n', '', p.stdout, flags=re.MULTILINE)\n break\n else: # naive c preprocessor\n source = _resolve_includes(source, include_dirs=include_dirs,\n skip_includes=skip_includes)\n source = _remove_comments(source)\n source = _resolve_macros(source, identifiers={})\n return source",
"def testMakeMatchSimpleConditionContent2(self):\n data_in = {'indent': 0, 'body': 'if True:2', 'filename': '', 'line': 0}\n token = IfToken.make(data_in)\n self.assertTrue(token)\n _globals = {}\n _locals = {}\n self.assertTrue(token.content)\n self.assertTrue(eval(token.condition, _globals, _locals))",
"def preprocess (self,\r\n source,\r\n output_file=None,\r\n macros=None,\r\n include_dirs=None,\r\n extra_preargs=None,\r\n extra_postargs=None):\r\n pass",
"def compileRegexp(class_):\n if not class_.allowParseDep:\n return\n\n d = dict(flagFormat=class_.flagFormat, depFormat=class_.depFormat,\n WORD=class_.WORD, IDENT=class_.IDENT)\n\n # zero or more space-separated flags\n flagFmt = '(?:\\( *(%(flagFormat)s?(?: +%(flagFormat)s)*) *\\))?'\n # add ^ and $ to ensure we match the entire string passed in\n regexp = ('^ *(%(depFormat)s) *' + flagFmt + ' *$') % d\n # word is a slightly larger group of chars than ident -\n # includes . and +, because those are used in paths and\n # sonames. May need to be larger some day, and probably\n # could be more restrictive for some groups. Should not contain\n # /, as that's used as a special char in many dep classes.\n regexp = regexp.replace('WORD', d['WORD'])\n regexp = regexp.replace('IDENT',d['IDENT'])\n class_.regexpStr = regexp\n class_.regexp = re.compile(regexp)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function performs scaling on the retrieved CSI data to account for automatic gain control and other factors. Code within this section is largely based on the Linux 802.11n CSI Tool's MATLAB implementation (get_scaled_csi.m).
|
def scale_csi_entry(csi: np.array, header: list) -> np.array:
n_rx = header[3]
n_tx = header[4]
rssi_a = header[5]
rssi_b = header[6]
rssi_c = header[7]
noise = header[8]
agc = header[9]
#Calculate the scale factor between normalized CSI and RSSI (mW).
csi_sq = np.multiply(csi, np.conj(csi))
csi_pwr = np.sum(csi_sq)
csi_pwr = np.real(csi_pwr)
rssi_pwr_db = IWLBeamformReader.get_total_rss(rssi_a, rssi_b, rssi_c, agc)
rssi_pwr = dbinv(rssi_pwr_db)
#Scale CSI -> Signal power : rssi_pwr / (mean of csi_pwr)
scale = rssi_pwr / (csi_pwr / 30)
#Thermal noise may be undefined if the trace was captured in monitor mode.
#If so, set it to 92.
noise_db = noise
if (noise == -127):
noise_db = -92
noise_db = float(noise_db)
thermal_noise_pwr = dbinv(noise_db)
#Quantization error: the coefficients in the matrices are 8-bit signed numbers,
#max 127/-128 to min 0/1. Given that Intel only uses a 6-bit ADC, I expect every
#entry to be off by about +/- 1 (total across real and complex parts) per entry.
#The total power is then 1^2 = 1 per entry, and there are Nrx*Ntx entries per
#carrier. We only want one carrier's worth of error, since we only computed one
#carrier's worth of signal above.
quant_error_pwr = scale * (n_rx * n_tx)
#Noise and error power.
total_noise_pwr = thermal_noise_pwr + quant_error_pwr
# ret now has units of sqrt(SNR) just like H in textbooks.
ret = csi * np.sqrt(scale / total_noise_pwr)
if n_tx == 2:
ret = ret * np.sqrt(2)
elif n_tx == 3:
#Note: this should be sqrt(3)~ 4.77dB. But 4.5dB is how
#Intel and other makers approximate a factor of 3.
#You may need to change this if your card does the right thing.
ret = ret * np.sqrt(dbinv(4.5))
return ret
|
[
"def ikHandleDisplayScale():\n pass",
"def scale_data(data,mins,maxs,targ_mins,targ_maxs,scale_prop):\n channels = data[0].shape[1]\n \n # calculate scale and shift\n if scale_prop==\"global\":\n # scales channel to global min/max\n glob_max = maxs.max()\n glob_min = mins.min() \n targ_max = targ_maxs.max()\n targ_min = targ_mins.min()\n \n glob_scale = (targ_max-targ_min) / (glob_max-glob_min)\n glob_shift = (-1)*glob_min * glob_scale + targ_min\n \n scale = np.ones(channels) * glob_scale\n shift = np.ones(channels) * glob_shift\n \n elif scale_prop==\"local\":\n # shift each channel to its individual min/max\n scale = (targ_maxs-targ_mins) / (maxs-mins)\n shift = (-1)*mins * scale + targ_mins\n\n # if infinity (same max as min, constant input)\n # we set the input to zero\n ind1 = np.nonzero(np.isinf(scale)==True)\n ind2 = np.nonzero(np.isinf(shift)==True)\n scale[ind1] = 0.\n shift[ind2] = 0\n \n else:\n # no scaling\n scale = np.ones(channels)\n shift = np.zeros(channels)\n return data,scale,shift\n\n # scale and shift data\n for ex in data:\n for n in range(channels):\n ex[:,n] = ex[:,n]*scale[n] + shift[n]\n \n return data,scale,shift",
"def __scale_cb(self, data):\n self.__scale = data.data",
"def scale(self, sval: complex) -> None:\n\n sval = complex(sval) # type: ignore\n\n for sector in self._civec.values():\n sector.scale(sval)",
"def control_scale(self):\n\t\tfor bac in self.bacteria():\n\t\t\tval = np.mean(self.map[\"control\"][str(bac)].values())\n\t\t\tfor pref in self.map[str(bac)].keys():\n\t\t\t\tself.map[str(bac)][pref].scale(val)",
"def rescaled_image():",
"def _calc_scales():\n raw_h, raw_w = raw_img.shape[0], raw_img.shape[1]\n min_scale = min(np.floor(np.log2(np.max(clusters_w[normal_idx] / raw_w))),\n np.floor(np.log2(np.max(clusters_h[normal_idx] / raw_h))))\n max_scale = min(1.0, -np.log2(max(raw_h, raw_w) / MAX_INPUT_DIM))\n scales_down = pl.frange(min_scale, 0, 1.)\n scales_up = pl.frange(0.5, max_scale, 0.5)\n scales_pow = np.hstack((scales_down, scales_up))\n scales = np.power(2.0, scales_pow)\n return scales",
"def convert_image_scaled(*args, **kwargs): # real signature unknown; restored from __doc__\n pass",
"def scale_raw_data(self, data):\n data -= self.scalar_mean\n data /= self.scalar_std\n return data",
"def update_attributes_with_scales(self):\n # transfer buffer info from v. Useful for writing files.\n\n # purge the frame scales # probably develop a better way to handle this.\n for p in [att for att in self.data[\"attributes\"] if att.find(\"FrameScale\") == 0]:\n self.data[\"attributes\"].pop(p)\n\n for p in [att for att in self.data[\"attributes\"] if att.find(\"_SCALE_\") == 0]:\n self.data[\"attributes\"].pop(p)\n\n for a in self.data[\"buffer\"]:\n if a.find(\"scale\") == 0:\n sc = self.data[\"buffer\"][a]\n\n # parse the info from the buffer in suitable format\n\n # only work for frame0 at present\n\n _key = \"_SCALE_%s\" % a[-1].upper()\n _val = \"{0:0.6f} {1:0.6f}\\n{2}\\n\\n\".format(\n sc[\"factor\"], sc[\"offset\"], sc[\"unit\"].strip(\"\\n\").strip(\"\\x00\")\n )\n self.data[\"attributes\"][_key] = _val\n\n val = \"{0:0.6g}\\n{1:0.6g}\\n{2}\\n\".format(\n sc[\"factor\"], sc[\"offset\"], sc[\"unit\"].strip(\"\\n\").strip(\"\\x00\")\n )\n key = \"FrameScale%s0\" % a[-1].upper()\n self.data[\"attributes\"][key] = val",
"def HR2_RescaleGain(self,gain0,gain1,idif=0,iasic=0):\n\n scale=gain1*1./gain0\n print(\" Rescale factor\",scale)\n\n for a in self.asiclist:\n if (idif != 0 and a[\"dif\"] != idif):\n continue\n if (iasic != 0 and a[\"num\"] != iasic):\n continue\n for ipad in range(0,64):\n a[\"slc\"][\"PAGAIN\"][ipad]=int(scale*a[\"slc\"][\"PAGAIN\"][ipad])\n a[\"_id\"]=None",
"def scale_pixels(data):\n data /= 255",
"def rescale_480band(bio_optical_config, abs_cff, k):\n\n # rescaling variables to BioSNICAR resolution (10nm)\n wvl_rescaled = bio_optical_config.wvl[5::10]\n abs_cff_rescaled = abs_cff[5::10]\n k_rescaled = k[5::10]\n n_rescaled = (\n bio_optical_config.n_algae * np.ones(np.size(bio_optical_config.wvl))\n )[5::10]\n\n return wvl_rescaled, abs_cff_rescaled, k_rescaled, n_rescaled",
"def _scale_obs(obs, old_obs, scale_factor):\n for row, x in enumerate(old_obs):\n for col, y in enumerate(x):\n obs[row*scale_factor:(row+1)*scale_factor,\n col*scale_factor:(col+1)*scale_factor] = y\n return obs",
"def set_scale_values(self):\n\n for param_n in self.log_ids:\n log_data = self.__log_scale(param_n, self.channel)\n self.scale[param_n] = log_data\n\n if self.gain_ids:\n for param_n in self.gain_ids:\n gain_data = self.__gain_scale(param_n, self.channel)\n self.scale[param_n] = gain_data",
"def scale_data(self):\n pass",
"def scaling(sx,sy,mat2):\n\n\tmat1=[[sx,0,0],[0,sy,0],[0,0,1]]\n\tscaled=matrix_multiplication(mat1,mat2)\n\treturn scaled",
"def scaleVoxels(self, scale=1):\n rsl = vtk.vtkImageReslice()\n rsl.SetInputData(self.imagedata())\n rsl.SetScalarScale(scale)\n rsl.Update()\n return self._update(rsl.GetOutput())",
"def _rescale(self, samp, **kwargs):\n \"\"\"\n Here is where the subclass where overwrite rescale method\n \"\"\"\n return samp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns extent of gateways (parameter gtws).
|
def get_extent(gtws):
minx = float("inf")
miny = float("inf")
maxx = float("-inf")
maxy = float("-inf")
for gtw in gtws:
if gtws[gtw][0] < minx:
minx = gtws[gtw][0]
if gtws[gtw][0] > maxx:
maxx = gtws[gtw][0]
if gtws[gtw][1] < miny:
miny = gtws[gtw][1]
if gtws[gtw][1] > maxy:
maxy = gtws[gtw][1]
# print (minx, miny, maxx, maxy)
return minx, miny, maxx, maxy
|
[
"def _get_extent(inp):\n d = inp.dimensions\n return [0, d[0]-1, 0, d[1]-1, 0, d[2]-1]",
"def get_extent(y, x):\n dy = y[1] - y[0]\n dx = x[1] - x[0]\n extent = [x[0] - 0.5 * dx, x[-1] + 0.5 * dx, y[-1] + 0.5 * dy, y[0] - 0.5 * dy]\n return extent",
"def extent(self):\n cos = np.cos(self.position_angle)\n sin = np.sin(self.position_angle)\n x = np.hypot(cos * self.x_fwhm, sin * self.y_fwhm)\n y = np.hypot(cos * self.y_fwhm, sin * self.x_fwhm)\n return Coordinate2D(coordinates=[x, y])",
"def get_model_extent(self, target_epsg_code=\"\", **kwargs):\n bbox = kwargs.get(\"extra_extent\", [])\n if self.is_rpc:\n # Proxy function via RPC\n return self.h5py_file.get_model_extent(\n target_epsg_code=target_epsg_code, bbox=bbox\n )\n\n for k in constants.SUBSET_NAME_H5_ATTR_MAP.keys():\n sub_extent = self.get_extent_subset(\n subset_name=k, target_epsg_code=target_epsg_code\n )\n if sub_extent is None:\n continue\n bbox.append(sub_extent)\n\n x = np.array(bbox)[:, [0, 2]]\n y = np.array(bbox)[:, [1, 3]]\n\n return np.array([np.min(x), np.min(y), np.max(x), np.max(y)])",
"def get_global_extent(self) -> Tuple[float, float, float, float]:\n pass",
"def raster_extent(imagepath: str, epsg = 'EPSG:4326') -> shapely.geometry.Polygon:\n with rasterio.open(imagepath) as dataset:\n _geom = shapely.geometry.mapping(shapely.geometry.box(*dataset.bounds))\n return shapely.geometry.shape(rasterio.warp.transform_geom(dataset.crs, epsg, _geom, precision=6))",
"def get_index_extent(self):\n DBSession.query(func.gstoredata.generate_tileindex_bbox(self.id))",
"def extent(self) -> Sequence[float]:\n half_size_xyz = self._vol_shape_xyz * self.vox_size / 2\n return [hs * sign for hs in half_size_xyz for sign in [-1, +1]]",
"def getHalfExtent(self):\n return _almathswig.Pill_getHalfExtent(self)",
"def _get_grid_extents(self):\n try:\n return self._grid_extents\n except AttributeError:\n self._grid_extents = (self.xmin, self.xmax, self.ymin, self.ymax)\n return self._grid_extents",
"def layer_envelope(self, layer):\n e = layer.envelope()\n return [e.minx, e.miny, e.maxx, e.maxy]",
"def gate_names(self):\n return self.gg",
"def getSymExtent(self, printerScale):\n h = self.attributes['width'] * printerScale * self._pointSize[0]\n w = 5 * h\n return (w, h)",
"def GetWriteExtent(self):\n ...",
"def compute_geo_extent(geo_transform, shape):\n assert isinstance(geo_transform, (tuple, list, np.ndarray)) and len(geo_transform) == 6, \\\n \"Argument geo_transform should be a tuple or list or ndarray and have 6 values\"\n assert isinstance(shape, (list, tuple)) and len(shape) >= 2, \"Argument shape should be (height, width)\"\n return np.array([\n # (P=0, L=0)\n [geo_transform[0], geo_transform[3]],\n # (P=W-1, L=0)\n [geo_transform[0] + (shape[1] - 1) * geo_transform[1], geo_transform[3] + (shape[1] - 1) * geo_transform[4]],\n # (P=W-1, L=H-1)\n [geo_transform[0] + (shape[1] - 1) * geo_transform[1] + (shape[0] - 1) * geo_transform[2],\n geo_transform[3] + (shape[1] - 1) * geo_transform[4] + (shape[0] - 1) * geo_transform[5]],\n # (P=0, L=H-1)\n [geo_transform[0] + (shape[0] - 1) * geo_transform[2], geo_transform[3] + (shape[0] - 1) * geo_transform[5]]\n ])",
"def getRasterExtent(raster_in):\n if not os.path.isfile(raster_in):\n return []\n raster = gdal.Open(raster_in, GA_ReadOnly)\n if raster is None:\n return []\n geotransform = raster.GetGeoTransform()\n originX = geotransform[0]\n originY = geotransform[3]\n spacingX = geotransform[1]\n spacingY = geotransform[5]\n r, c = raster.RasterYSize, raster.RasterXSize\n\n minX = originX\n maxY = originY\n maxX = minX + c * spacingX\n minY = maxY + r * spacingY\n return [minX, maxX, minY, maxY]",
"def extent_2d(self):\n cosine = math.cos(math.radians(self.rot))\n sine = math.sin(math.radians(self.rot))\n width = (self.nx - 1) * self.dx\n height = (self.ny - 1) * self.dy\n xy0 = (self.x0, self.y0)\n xy1 = (self.x0 + width * cosine, self.y0 + width * sine)\n xy2 = (self.x0 + width * cosine - height * sine, self.y0 + width * sine + height * cosine)\n xy3 = (self.x0 - height * sine, self.y0 + height * cosine)\n\n minxy = (min(xy0[0], xy1[0], xy2[0], xy3[0]),\n min(xy0[1], xy1[1], xy2[1], xy3[1]))\n maxxy = (max(xy0[0], xy1[0], xy2[0], xy3[0]),\n max(xy0[1], xy1[1], xy2[1], xy3[1]))\n\n return minxy, maxxy",
"def _get_default_gws():\n\n result = []\n dr_list = _ipr.get_default_routes(family=socket.AF_INET)\n for dr in dr_list:\n ip = dr.get_attr(\"RTA_GATEWAY\")\n oif = dr.get_attr(\"RTA_OIF\")\n met = dr.get_attr(\"RTA_PRIORITY\")\n ifname = _ipr.get_links(oif)[0].get_attr(\"IFLA_IFNAME\")\n result.append((ip, ifname, met))\n return result",
"def get_network_bounds(net_index: int):\n # NOTE currently hardcoded\n return _canvas.realsize"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns best suiting pixel according to timestamps (gtts) and positions of gateways (gtws).
|
def get_position(gtws, gtts):
minx, miny, maxx, maxy = get_extent(gtws)
cellsize = 50
rows = int((maxy - miny) / cellsize) + 1 # +1 to cover whole area
cols = int((maxx - minx) / cellsize) + 1 # +1 to cover whole area
difference = float("inf")
bestx = 0
besty = 0
for row in range(rows):
for col in range(cols):
currentx = minx + (row * cellsize)
currenty = miny + (col * cellsize)
min_dif = float("inf")
differences = {}
for gtw in gtws:
differences[gtw] = math.hypot(currentx - gtws[gtw][0], currenty - gtws[gtw][1]) / 300
if differences[gtw] < min_dif:
min_dif = differences[gtw]
current_difference = 0
min_dif -= 1
for gtw in gtts:
differences[gtw] = differences[gtw] - min_dif
current_difference += abs(differences[gtw] - gtts[gtw].microseconds)
if (current_difference < difference):
bestx = currentx + (cellsize / 2)
besty = currenty + (cellsize / 2)
difference = current_difference
return bestx, besty
|
[
"def find_closest_image(self, img, imtype): \n obstime = datetime.datetime.strptime('%s-%s-%s %s' % ('2015', '01', '23', self.obs_dict[img][3]), '%Y-%m-%d %H:%M:%S')\n if obstime.hour < 12: ot = obstime + datetime.timedelta(days=1)\n keys = self.obs_dict.keys()\n keys.sort()\n best_time=1e5\n best_image = None\n for k in keys:\n if self.obs_dict[k][1]==imtype:\n ot = datetime.datetime.strptime('%s-%s-%s %s' % ('2015', '01', '23', self.obs_dict[k][3]), '%Y-%m-%d %H:%M:%S')\n if ot.hour < 12: ot = ot + datetime.timedelta(days=1)\n t = min((obstime-ot).seconds, (ot-obstime).seconds)\n if t < best_time:\n best_time = t\n best_img = k\n return best_img",
"def get_best_pixel(pixels):\n\n pixel_ave = get_average(pixels)\n red = pixel_ave[0]\n green = pixel_ave[1]\n blue = pixel_ave[2]\n\n count = -1\n mini = 0\n pixel_dist_min = get_pixel_dist(pixels[0], red, green, blue)\n\n for each in pixels:\n pixel_dist = get_pixel_dist(each, red, green, blue)\n count += 1\n if pixel_dist < pixel_dist_min:\n mini = count\n pixel_dist_min = pixel_dist\n\n best = pixels[mini]\n return best",
"def _get_completed_image(self, timestamp=0):\n closest_frame, closest_time, time_difference = None, None, sys.maxsize\n for frame, list_data in self._video_data.items():\n for data in list_data:\n if (data['index'] + 1) == data['total_segments']:\n if not closest_frame or abs(timestamp - data['timestamp']) < time_difference:\n closest_frame, closest_time, time_difference = frame, data['timestamp'], abs(timestamp - data['timestamp'])\n return closest_frame, closest_time, time_difference",
"def pixel_blender(sat_set_path, match_df):\r\n #Get the USAID point coordinates:\r\n pxl_points = match_df[['pxlX','pxlY']].values.tolist()\r\n #Get the mean and std one band at a time:\r\n for band in BAND_LIST:\r\n print(band, end='|', flush=True)\r\n #Load up the band:\r\n band_img = get_band(band,sat_set_path)\r\n #If the image is not on the 10980x10980 grid, use bilinear interpolation to blow it up:\r\n if band_img.shape[0]!=SAT_SIZE:\r\n band_img = scipy.misc.imresize(band_img,SAT_SIZE/band_img.shape[0],interp='bilinear',mode='F')\r\n #Crop the image around each USAID point and get distilled values for each:\r\n band_mean = []\r\n band_std = []\r\n for pair in pxl_points:\r\n x0 = int(np.round(pair[0]-VIEW_SIZE))\r\n x1 = int(np.round(pair[0]+VIEW_SIZE))\r\n y0 = int(np.round(pair[1]-VIEW_SIZE))\r\n y1 = int(np.round(pair[1]+VIEW_SIZE))\r\n img_crop = band_img[y0:y1,x0:x1]\r\n #Some points will be too close to the borders of the satellite image, so we can't get the whole crop:\r\n if img_crop.shape!=(2*VIEW_SIZE,2*VIEW_SIZE):\r\n print('ERROR:')\r\n print(img_crop.shape)\r\n band_mean += [np.mean(img_crop)]\r\n band_std += [np.std(img_crop)]\r\n #Check for strange problem with zeros:\r\n if np.mean(img_crop) < .1:\r\n print('Dark image problem!')\r\n #Put new features into dataframe:\r\n match_df[band+'_mean'] = band_mean\r\n match_df[band+'_std'] = band_std\r\n print(' -OK')\r\n return match_df",
"def obtain_tm_results(img, tpl, method) -> Tuple[int, Spot]:\r\n result = cv2.matchTemplate(img, tpl, method)\r\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\r\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\r\n similarity = abs(min_val - 1)\r\n position = min_loc\r\n else:\r\n similarity = max_val\r\n position = max_loc\r\n return similarity, Spot(position, tpl.shape)",
"def get_window(stats,g_speed,params):\n # Properties of trace\n s_0 = int((stats.npts-1)/2)\n dist = stats.sac.dist\n \n Fs = stats.sampling_rate\n n = stats.npts\n\n if g_speed is not None and params['hw'] is not None:\n \n # Find indices for window bounds\n ind_lo = int((dist/g_speed - params['hw'])*Fs) + s_0\n ind_hi = int((dist/g_speed + params['hw'])*Fs) + s_0\n ind_lo_n = ind_hi + int(params['sep_noise']*params['hw']*Fs)\n ind_hi_n = ind_lo_n + int(2*params['hw']*Fs)\n\n else:\n\n ind_lo = s_0\n ind_hi = n - 1\n ind_lo_n = s_0\n ind_hi_n = n-1\n \n \n # Checks..overlap, out of bounds\n scs = window_checks(ind_lo,ind_hi,ind_lo_n,ind_hi_n,n,params['win_overlap'])\n \n if scs:\n \n # Fill signal window\n win_signal = window(params['wtype'],n,ind_lo,ind_hi)\n # Fill noise window\n win_noise = window(params['wtype'],n,ind_lo_n,ind_hi_n)\n \n \n return win_signal, win_noise, scs\n\n else:\n return [],[],scs",
"def find_next_tile(tiles: [], # All the tiles that exist.\n used_tiles: [], # Tile that have already been placed into the image.\n image: dict, # Image that has been made so far.\n\n # Position in the image that we want to put the next tile into.\n x: int, y: int) \\\n -> (Tile, int): # Tile its variant number of a tile that fits into the (x, y).\n\n left_check_needed = (x != 0) # Need to check new tile matches tile on its left (if there is a tile on its left)\n top_check_needed = (y != 0) # Need to check new tile matches tile above (if there is a tile above it).\n\n for nt in tiles:\n if nt not in used_tiles: # Try every tile that hasn't already been used.\n lt_right_edge, tt_bottom_edge = '', ''\n\n if left_check_needed:\n lt, lt_variant = image[(x - 1, y)] # lt = tile to the left.\n lt_right_edge = lt.right[lt_variant]\n if top_check_needed:\n tt, tt_variant = image[(x, y - 1)] # tt = tile to the top.\n tt_bottom_edge = tt.bottom[tt_variant]\n\n if left_check_needed and top_check_needed:\n nt_variant = nt.left_and_top_match(left_pattern=lt_right_edge, top_pattern=tt_bottom_edge)\n\n elif left_check_needed:\n # Try to find a left edge in candidate new tile that matches right edge of tile to the left.\n nt_variant = nt.left_match(pattern=lt_right_edge)\n\n else:\n # Try to find a top edge in candidate new tile that matches bottom edge of tile to the top of it.\n nt_variant = nt.top_match(pattern=tt_bottom_edge)\n\n if nt_variant != -1: # Not -1 means a matching edge was found.\n return nt, nt_variant\n return None, None",
"def meter_to_pixel(src_points, dst_meter, image):\n # Get the image size\n (height, width) = image.shape[: 2]\n\n # --- Get the top down view of the entire pool --- #\n # Get the coordinates in pixel of dst_meter in the entire pull\n dst_pixel_full_pool = np.zeros((4, 2))\n # We take one meter from each side to be sure that we do not lose information\n # Hence : + 1 and 52 meters instead of 50 meters.\n dst_pixel_full_pool[:, 0] = (dst_meter[:, 0] + 1) * width / 52\n dst_pixel_full_pool[:, 1] = dst_meter[:, 1] * height / 25\n\n # Transformation of the original image\n homography = get_homography(src_points, dst_pixel_full_pool)\n top_down_image = get_top_down_image(image, homography)\n\n # --- Get the top down view of the pool that we can see --- #\n # Find the first column that is not black\n index_w = 0\n while index_w < width and np.sum(top_down_image[:, index_w]) == 0:\n index_w += 1\n left_column = index_w\n\n # Find the last column that is not black\n index_w = width - 1\n while index_w >= 0 and np.sum(top_down_image[:, index_w]) == 0:\n index_w -= 1\n right_column = index_w\n\n # Compute the extreme points\n # We add -1 since the top left point of the top down view of the full pool is [-1, 0]\n top_left = [left_column * 52 / width - 1, 0]\n bottom_right = [right_column * 52 / width - 1, 25]\n\n # Get the coordinates in pixel of dst_pixel_full_pool in the top down view of the pool that we can see\n dst_pixel_full_pool[:, 0] = (dst_pixel_full_pool[:, 0] - left_column) / (right_column - left_column) * width\n\n return dst_pixel_full_pool, [top_left, bottom_right]",
"def Screenshot(self, dt:datetime=datetime.now(), within_seconds:int=10) -> ChImage:\n timestamp = int(dt.timestamp() * 1000)\n \n # if less than 10 ms from time it's the latest time\n buffer_to_ts = timestamp\n if abs(int(time.time() * 1000) - timestamp) > 10:\n # otherwise we're searching withing_seconds/2 ahead\n logger.debug(\"searching 1/2 of withing seconds in the future\")\n buffer_to_ts = timestamp + int(within_seconds / 2 * 1000)\n\n min_found = None\n # seeks in the past 5 seconds (second per second)\n \n for stride in range(within_seconds):\n buffer_to_ts = buffer_to_ts - (stride * 1000)\n # reverse 1 second back in time\n buffer_from_ts = buffer_to_ts - (stride * 1000) - 1000\n logger.debug(\"querying stream \" + self.rtmp_video_stream + \"between \" + str(buffer_from_ts) + \" and \" + str(buffer_to_ts) + \", diff[ms]: \" + str(buffer_to_ts-buffer_from_ts))\n buffer = self.redis_conn.xrange(name=self.rtmp_video_stream, min=buffer_from_ts, max=buffer_to_ts, count=60)\n frames = Chunker(self.video_codec).frames(buffer)\n\n # find closest image to given timestamp\n min_diff = None\n for ts, frame in frames.items():\n if frame.pict_type.name == \"I\":\n logger.debug(\"found I frame at \" + str(ts))\n if min_found is None:\n d = frame.to_ndarray(format=\"bgr24\")\n chImage = ChImage(data=d, width=frame.width, height=frame.height, timestamp=ts)\n min_found = chImage\n min_diff = abs(timestamp - ts)\n continue\n\n diff = abs(timestamp - ts)\n if diff < min_diff:\n d = frame.to_ndarray(format=\"bgr24\")\n chImage = ChImage(data=d, width=frame.width, height=frame.height, timestamp=ts)\n min_diff = diff\n min_found = chImage\n if min_found is not None:\n break\n\n return min_found",
"def getNextFrame(self, frame=..., gtMask=...) -> Tuple[frame, gtMask]:\n ...",
"def get_time_station():\n # To transform latitude and longitude into kilometers\n a = 6378.136\n e = 0.006694470\n lat0 = 41.0\n lon0 = -123.0\n dx = (pi / 180.0) * a * cos(lat0 * pi / 180.0) / sqrt(1.0 - e * e * \\\n sin(lat0 * pi / 180.0) * sin(lat0 * pi / 180.0))\n dy = (3.6 * pi / 648.0) * a * (1.0 - e * e) / ((1.0 - e * e * sin(lat0 * \\\n pi / 180.0) * sin(lat0 * pi / 180.0)) ** 1.5)\n\n # Get the locations of the sources of the LFEs\n LFEloc = np.loadtxt('../data/Plourde_2015/templates_list.txt', \\\n dtype={'names': ('name', 'family', 'lat', 'lon', 'depth', 'eH', \\\n 'eZ', 'nb'), \\\n 'formats': ('S13', 'S3', np.float, np.float, np.float, \\\n np.float, np.float, np.int)}, \\\n skiprows=1)\n lats = np.zeros(len(LFEloc))\n lons = np.zeros(len(LFEloc))\n for ie in range(0, len(LFEloc)):\n lats[ie] = LFEloc[ie][2]\n lons[ie] = LFEloc[ie][3]\n xs = dx * (lons - lon0)\n ys = dy * (lats - lat0)\n\n # Get the locations of the stations\n staloc = pd.read_csv('../data/Plourde_2015/station_locations.txt', \\\n sep=r'\\s{1,}', header=None)\n staloc.columns = ['station', 'network', 'channels', 'location', \\\n 'server', 'latitude', 'longitude']\n\n # Get the origin time for each of the templates\n origintime = pickle.load(open('timearrival/origintime.pkl', 'rb'))\n\n slowness = {}\n # Loop on the stations\n for ir in range(0, len(staloc)):\n # Compute source-receiver distances\n distance = []\n maxEWlist = []\n maxNSlist = []\n maxUDlist = []\n timeEWlist = []\n timeNSlist = []\n timeUDlist = []\n # Loop on the templates\n for ie in range(0, len(LFEloc)):\n filename = LFEloc[ie][0].decode('utf-8')\n # Open time arrival files\n data = pickle.load(open('timearrival/' + filename +'.pkl', 'rb'))\n stations = data[0]\n maxEW = data[1]\n maxNS = data[2]\n maxUD = data[3]\n timeEW = data[4]\n timeNS = data[5]\n timeUD = data[6]\n # If the station was used for this template\n for i in range(0, len(stations)):\n if (stations[i] == staloc['station'][ir]):\n latr = staloc['latitude'][ir]\n lonr = staloc['longitude'][ir]\n xr = dx * (lonr - lon0)\n yr = dy * (latr - lat0)\n distance.append(sqrt((xr - xs[ie]) ** 2.0 + \\\n (yr - ys[ie]) ** 2.0))\n maxEWlist.append(maxEW[i])\n maxNSlist.append(maxNS[i])\n maxUDlist.append(maxUD[i])\n timeEWlist.append(timeEW[i] - origintime[filename])\n timeNSlist.append(timeNS[i] - origintime[filename])\n timeUDlist.append(timeUD[i] - origintime[filename])\n # Linear regression\n if (len(distance) > 0):\n x = np.reshape(np.array(distance + distance + distance), \\\n (3 * len(distance), 1))\n y = np.reshape(np.array(timeEWlist + timeNSlist + timeUDlist), \\\n (3 * len(distance), 1))\n w = list(map(lambda x : pow(x, 3.0), maxEWlist)) + \\\n list(map(lambda x : pow(x, 3.0), maxNSlist)) + \\\n list(map(lambda x : pow(x, 3.0), maxUDlist))\n w = np.array(w)\n regr = linear_model.LinearRegression(fit_intercept=False)\n regr.fit(x, y, w)\n y_pred = regr.predict(x)\n R2 = r2_score(y, y_pred)\n s = regr.coef_[0][0]\n # Plot\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, y, 'ko')\n plt.plot(x, y_pred, 'r-')\n plt.xlabel('Distance (km)', fontsize=24)\n plt.ylabel('Travel time (s)', fontsize=24)\n plt.title('{} - R2 = {:4.2f} - slowness = {:4.3f} s/km'.format( \\\n staloc['station'][ir], R2, s), fontsize=24)\n plt.savefig('timearrival/' + staloc['station'][ir] + \\\n '.eps', format='eps')\n plt.close(1)\n slowness[staloc['station'][ir]] = s\n return slowness",
"def getRTTbyTS(self, outtype=DATA_FLOAT, path=PATH_FORWARD):\n\t\t\n\t\tif path == PATH_FORWARD:\n\t\t\tldata = self.forward\n\t\t\tlack = self.backward\n\t\telse:\n\t\t\tlack = self.forward\n\t\t\tldata = self.backward\n\t\t\n\t\trtt = []\n\t\ttimes = []\n\t\t\n\t\tlastsentTS = None # float of TCP ts\n\t\tlastsentTSsent = None #datetime of pcap TS\n\t\ti = 0\n\t\tfor ts, p in lack:\n\t\t\t#Grab the timestamp!\n\t\t\t\n\t\t\t\n\t\t\topts = TCPOptions(p.opts)\n\t\t\tpktts = opts.get(dpkt.tcp.TCP_OPT_TIMESTAMP)\n\t\t\t\n\t\t\tif pktts == None:\n\t\t\t\t#print \"Na1\"\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif lastsentTS == None:\n\t\t\t\tlastsentTS = pktts[1]\n\t\t\t\tlastsentTSsent = ts\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif lastsentTS > pktts[1]:\n\t\t\t\t#print \"hey\"\n\t\t\t\tcontinue\n\t\t\tlastsentTS = pktts[1]\n\t\t\tlastsentTSsent = ts\n\t\t\t\n\t\t\t#for each one find corrisponding one?\n\t\t\t\n\t\t\twhile i < len(ldata):\n\t\t\t\t\n\t\t\t\ttss, ps = ldata[i]\n\t\t\t\t\n\t\t\t\toptss = TCPOptions(ps.opts)\n\t\t\t\tpkttss = optss.get(dpkt.tcp.TCP_OPT_TIMESTAMP)\n\t\t\t\t\n\t\t\t\tif pkttss == None:\n\t\t\t\t\tprint \"Na2\", i\n\t\t\t\t\ti += 1\n\t\t\t\t\tcontinue\n\t\t\t\tif pkttss[0] < pktts[1]:\n\t\t\t\t\ti += 1\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\ttd = ts - tss # Pcap of sent+echo TS dif\n\t\t\t\ttime = ts - self.origin #pcap to origin\n\t\t\t\tif outtype == DATA_FLOAT:\n\t\t\t\t\t\t\n\t\t\t\t\t\ttime = dfToFloat(time)\n\t\t\t\t#print \"Added\", i\n\t\t\t\trtt.append(dfToFloat(td))\n\t\t\t\ttimes.append(time)\n\t\t\t\t\n\t\t\t\tbreak\n\t\t\t\n\t\t\t\n\t\t\tif i >= len(ldata):\n\t\t\t\tprint \"Empty!\"\n\t\t\t\tbreak\n\t\t#print type(times[0]), type(rtt[0])\n\t\treturn times, rtt",
"def _get_tile(self):\r\n\r\n tile_url = \"https://mts1.google.com/vt/\"\r\n # tile_url = \"http://mt1.google.com/vt/\"\r\n params = {\r\n 'lyrs': 'y',\r\n 'x': self.x,\r\n 'y': self.y,\r\n 'z': self.zoom,\r\n 'src': 'app'}\r\n self.img = get_pic(requests.get(tile_url, params=params))\r\n return self.img",
"def find_tfl_lights(c_image: np.ndarray, **kwargs):\n threshold = 652350\n kernel = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, -255, -255, -255, -255, 0, 0, 0, 0],\n [0, 0, 0, -255, 255, 255, 255, 255, -255, 0, 0, 0],\n [0, 0, -255, 255, 255, 255, 255, 255, 255, -255, 0, 0],\n [0, -255, 255, 255, 255, -255, -255, 255, 255, 255, -255, 0],\n [0, -255, 255, 255, 255, -255, -255, -255, 255, 255, -255, 0],\n [0, -255, 255, 255, -255, -255, -255, 255, 255, 255, -255, 0],\n [0, 0, -255, 255, 255, 255, 255, 255, 255, 255, -255, 0],\n [0, 0, -255, 255, 255, 255, 255, 255, 255, -255, 0, 0],\n [0, 0, 0, -255, -255, 255, 255, 255, -255, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, -255, -255, -255, 0, 0, 0]]) # 12x11\n red_img = c_image[:, :, 0]\n red_x, red_y = calc_find_tfl(red_img, kernel, threshold)\n green_img = c_image[:, :, 1]\n threshold = 842350\n green_x, green_y = calc_find_tfl(green_img, kernel, threshold)\n\n return red_x, red_y, green_x, green_y",
"def getStarter():\r\n masker.loadPixels()\r\n while True:\r\n x, y = int(random(width)), int(random(height))\r\n p = masker.pixels[y*width + x]\r\n h, s, b = hue(p), saturation(p), brightness(p)\r\n if b < 0.1 and s < 0.1:\r\n break\r\n return x, y",
"def _find_brightest_star(search):\n \n i=0\n checked = np.zeros(len(search),dtype=bool)\n to_check = np.ones(len(search),dtype=bool)\n min_mags, min_ind = [], []\n while i<len(FILTER_COLUMNS):\n phot_vector = np.array(search[FILTER_COLUMNS[i]].filled(np.nan))\n phot_vector[~to_check] = np.nan\n no_phot = np.isnan(phot_vector)\n n_bad, n_good = np.sum(no_phot), np.sum(~no_phot)\n if n_good>0:\n ind = np.argmin(phot_vector[~no_phot])\n checked[~no_phot] = True\n to_check[~no_phot] = False\n true_ind = np.where(~no_phot)[0][ind]\n to_check[true_ind] = True\n min_mags.append(phot_vector[true_ind])\n min_ind.append(true_ind)\n if np.sum(checked)==len(search): break\n \n i+=1\n \n min_mags, min_ind = np.array(min_mags), np.array(min_ind)\n if len(min_ind)==0: return -1\n elif len(np.unique(min_ind))==1:\n return min_ind[0]\n else:\n i_min = np.argmin(min_mags)\n return min_ind[i_min]",
"def get_extent(gtws):\n\n minx = float(\"inf\")\n miny = float(\"inf\")\n maxx = float(\"-inf\")\n maxy = float(\"-inf\")\n\n for gtw in gtws:\n if gtws[gtw][0] < minx:\n minx = gtws[gtw][0]\n if gtws[gtw][0] > maxx:\n maxx = gtws[gtw][0]\n if gtws[gtw][1] < miny:\n miny = gtws[gtw][1]\n if gtws[gtw][1] > maxy:\n maxy = gtws[gtw][1]\n\n # print (minx, miny, maxx, maxy)\n return minx, miny, maxx, maxy",
"def track_NV_2D(self,template, size_x,size_y,scan_rate=200): \n # pattern match\n\n # Find NV through either LabView or Python\n # Scan image, say 10 x 10 um with the NV at center, 100 x 100 pts\n # store the image\n # use the center 5 x 5 um as a template (50 x 50 pts)\n templateX_pt = template.shape[1] # number of columns\n templateY_pt = template.shape[0] # number of rows\n\n startX_pt = int(np.floor(templateX_pt/4))\n endX_pt = int(np.ceil(templateX_pt*3/4))\n \n startY_pt = int(np.floor(templateY_pt/4))\n endY_pt = int(np.ceil(templateY_pt*3/4))\n\n row_center = startY_pt #(1+template_pt)/2\n col_center = startX_pt #(1+template_pt)/2 \n\n template_center = template[startY_pt:endY_pt, startX_pt:endX_pt]\n\n # use current x ,y\n \n \n# curr_x,curr_y = self.read_position('micron')\n# curr_x = self.volts_to_micron(self.position['x'], 'x')\n# curr_y = self.volts_to_micron(self.position['y'], 'y')\n curr_x, curr_y = self.return_position('um')\n self.go_to_position(curr_x,curr_y)\n xx1,yy1,FSM2D = self.scan_2D(curr_x,curr_y,x_size=size_x,y_size=size_y,mesh_x=templateX_pt,mesh_y=templateY_pt,scan_rate=scan_rate)\n # tracking\n # scan 10 x 10 um\n # use the 5 x 5 um image as a template and find maximum\n # compute the difference\n\n result = match_template(FSM2D, template_center)\n ij = np.unravel_index(np.argmax(result), result.shape)\n col,row = ij[::-1]\n\n dy = size_y/templateY_pt*(row_center-row)\n dx = size_x/templateX_pt*(col-col_center)\n# print(dx,dy)\n if abs(dx)>size_x or abs(dy)>size_y:\n print(\"tracked (dx, yy) = \", dx ,dy)\n print(\"moving back to old (x, y) = \", curr_x ,curr_y)\n # set the new current position\n self.position['x'], self.position['y'] = self.read_position()\n return (curr_x,curr_y) \n if curr_x+dx <-50 or curr_x+dx>50 or curr_y+dx <-50 or curr_y+dy>50:\n print(\"Out of range, tracked (x, y) = \", curr_x+dx ,curr_y+dy)\n print(\"moving back to old (x, y) = \", curr_x ,curr_y)\n # set the new current position\n# self.position['x'], self.position['y'] = self.read_position()\n self.position['x'], self.position['y'] = (self.micron_to_volts(curr_x,'x'),self.micron_to_volts(curr_y,'y'))\n# self.position_um['x'], self.position_um['y'] = (x,y)\n return (curr_x,curr_y)\n else: \n # go to new location\n self.go_to_position(curr_x+dx,curr_y+dy)\n # set the new current position\n# self.position['x'], self.position['y'] = self.read_position()\n self.position['x'], self.position['y'] = (self.micron_to_volts(curr_x+dx,'x'),self.micron_to_volts(curr_y+dy,'y'))\n# self.position_um['x'], self.position_um['y'] = (x,y)\n return (curr_x+dx,curr_y+dy)",
"def find_nearest_mono_idx(t: int, mts: List[List[float]], last_search_idx: int) -> int:\n\n del_t = 0.050 # the match must be within 50ms of t\n # First check if t is outside monocular frames but still within thr close\n # Check if t comes before monocular frames\n if t < mts[last_search_idx]:\n return last_search_idx if mts[last_search_idx]-t < del_t else -1\n # Check if t comes after monocular frames\n if t > mts[-1]:\n return len(mts)-1 if t-mts[-1] < del_t else -1\n # Otherwise search within monocular frames\n for i in range(last_search_idx, len(mts)-1):\n if t > mts[i] and t < mts[i+1]:\n idx = i if (t-mts[i]) < (mts[i+1]-t) else i+1\n idx = idx if abs(mts[idx]-t) < del_t else -1\n return idx\n return -1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function will find a solution for the the customer based on a target price and the list of items previously set Intended to use self.target_price but this can be ovveridden
|
def make_suggestion(self, price_target=None):
if price_target is None:
price_target = self.target_price
elif _check_money(price_target):
price_target = Decimal(str(price_target))
else:
raise UserWarning("Bad price Target: %s!" % (price_target,))
if price_target == 0:
return []
if len(self.menu_items) == 0:
return []
#in the rare case when the item prices are divisable by 1,
#we dont have to convert them to integers. We spend time doing
#this check becase it will greatly reduce our solution space
multiply = 100
if(price_target % 1 == 0) and (
0 == len([x for x in self.menu_items if x.price % 1 != 0])):
multiply = 1
price_target *= multiply
#we solve this problem like a standard knapsack problem using
#dynamic programing and a bottem up traversal of the solution
#space. Solve time is n*r where r is the price_target.
#
#If memory is a concern or we need every solution saved
#the best we can do is probably a
#bactrace tree with enumarting the multiple item duplicates
#into individual items to reduce to a 0-1 knapsack.
#This would be (n * r)(reduction time) -> (n * r) * r , or nr^2
#This solution would often run faster becasue we are not
#solving the entire space, like with DP. The worst case of
#no solution would be much slower, however
table = dict()
table[0] = 0
TableEntry = collections.namedtuple(
'TableEntry', 'menu_item back_pointer')
for item in self.menu_items:
price = item.price * multiply
if price_target not in table:
for target in xrange(price, price_target+1):
if target not in table and (target-price) in table:
#save the item, and the location of the last
#"optimal" solution
table[target] = TableEntry(item, target - price)
if price_target not in table:
return []
else:
#here we walk back across the table to generate the return
#list. Saving the full list each step above would be faster
#but much more memory intensive
solution_list = []
current_location = price_target
while current_location != 0:
solution_list.append(table[current_location].menu_item)
current_location = table[current_location].back_pointer
return solution_list
|
[
"def _match_item(self,item):\n\n\t\t#get all items for merchant\n\t\tmerchant_items=yield self._get_merchant_items(item.merchant[\"merchantId\"])\n\t\n\t\t#filter out items that do not have a propoer merchantItemId\n\t\tmerchant_items=[it for it in merchant_items if it.merchantItemId is not None and it.merchantItemId.strip()!=\"\" ]\t\n\n\t\t#set if this is a zero price item or not\n\t\titem_zero_price = float(item.pricePlusTax) == 0 \n\t\titem_nonzero_price = not item_zero_price\n\t\t\n\t\t#set as 'update for price' if the item was already on the system\n\t\titem_update = None\n\t\titem_update_zero_price = None\n\t\tfor it in merchant_items:\n\t\t\t if it.merchantItemId==item.merchantItemId:\n\t\t\t\titem_update=it\n\t\t\t\titem_update_zero_price = it if int(it.status)==CimriStatus.get_status(\"PAUSED_BY_CIMRI_ZERO_PRICE\").get_code() else None\n\n\t\t#set as new, if this is a new item\n\t\titem_new = item if item_update is None else None\n\t\t\n\t\t#update info for item\n\t\tif item_update is not None:\n\t\t\t#update info\n\t\t\titem_update.update(item)\n\n\t\t\t#update status\n\t\t\titem_update.status=CimriStatus(item_update.status).get_active_status().get_code()\n\n\t\t#if this is a new item, try finding the same merchant item by other merchants\n\t\titem_direct_match=None\n\t\tif item_new is not None:\t\t\t\n\t\t\titem_direct_match=yield self._match_direct(item_new, merchant_items)\n\n\t\t#if this is a new item and direct match did not work, try matching against the catalogue\n\t\titem_insert=None\n\t\tif item_direct_match is None and item_new is not None:\n\t\t\titem_insert=yield self._match_merchant_item(item_new) \n\n\t\t#update status for 0 price\n\t\tif item_zero_price:\n\t\t\t#item_update==item_zero_price\n\t\t\tif item_update is not None:\n\t\t\t\titem_update.status=CimriStatus.get_status(\"PAUSED_BY_CIMRI_ZERO_PRICE\").get_code()\n\n\t\t\t#item_direct_match==item_zero_price\n\t\t\tif item_direct_match is not None:\n\t\t\t\titem_direct_match.status=CimriStatus.get_status(\"PAUSED_BY_CIMRI_ZERO_PRICE\").get_code()\n\n\t\t\t#item_insert==item_zero_price\n\t\t\tif item_insert is not None:\n\t\t\t\titem_insert.status=CimriStatus.get_status(\"PAUSED_BY_CIMRI_ZERO_PRICE\").get_code()\n\n\t\t#update status if the price became non-zero\n\t\tif item_nonzero_price:\n\t\t\t#item_nonzero_price==item_update_zero_price\n\t\t\tif item_update_zero_price is not None:\t\t\t\n\t\t\t\t#if matched before, activate\n\t\t\t\tif item_update_zero_price.item is not None:\n\t\t\t\t\titem_update_zero_price.status=CimriStatus.get_status(\"ACTIVE\").get_code()\n\n\t\t\t\t#if not matched before and automatically matched:\n\t\t\t\telif item_update_zero_price.possibleSolrItem is not None:\n\t\t\t\t\titem_update_zero_price.status=CimriStatus.get_status(\"SOLR_MATCHED\").get_code()\n\n\t\t\t\t#otherwise:\n\t\t\t\telse:\n\t\t\t\t\titem_update_zero_price.status=CimriStatus.get_status(\"PAUSED_BY_CIMRI\").get_code()\n\n\t\t#make sure the merchantItemURl and pricePLusTax values are not null for items to be updated/inserted\n\n\t\t#return action and matched item\n\t\titem_matched=None\n\t\taction=None\n\t\tif item_update is not None:\n\t\t\titem_matched=item_update\n\t\t\taction=\"update\"\n\t\telif item_direct_match is not None:\n\t\t\titem_matched=item_direct_match\n\t\t\taction=\"match\"\n\t\telif item_insert is not None:\n\t\t\titem_matched=item_insert\n\t\t\taction=\"insert\"\n\t\t\n\t\tdefer.returnValue({\"meta.action\":action, \"data\":item_matched})",
"def cost(user_requirements, proposed_solution):",
"def solve_ce_prices_lp(V, matching, minimize=True, flag_debug_print=False):\n # Construct the prices LP\n ce_prices_lp = pulp.LpProblem('CEPrices', pulp.LpMinimize if minimize else pulp.LpMaximize)\n # Generate variables\n prices_vars = pulp.LpVariable.dicts(\"price\",\n [j for j in range(0, np.size(V, 1))],\n lowBound=0.0,\n cat='Continuous')\n # Generate objective\n ce_prices_lp += pulp.lpSum([prices_vars[j] for j in range(0, np.size(V, 1))])\n\n # Prices bounded below by 0\n for j in range(0, np.size(V, 1)):\n ce_prices_lp += prices_vars[j] >= 0.0\n\n # Generate market clearance constraints\n allocated_items = matching.values()\n for j in range(0, np.size(V, 1)):\n if j not in allocated_items:\n ce_prices_lp += prices_vars[j] == 0.0\n\n # Generate utility maximization constraints. These do not include I.R., which we do next.\n for i, j in it.product(range(0, np.size(V, 0)), range(0, np.size(V, 1))):\n if i not in matching:\n ce_prices_lp += 0 >= V[i][j] - prices_vars[j]\n elif j != matching[i]:\n ce_prices_lp += V[i][matching[i]] - prices_vars[matching[i]] >= V[i][j] - prices_vars[j]\n\n # Generate I.R. constraints.\n for i in range(0, np.size(V, 0)):\n if i in matching:\n ce_prices_lp += V[i][matching[i]] - prices_vars[matching[i]] >= 0\n\n # Solve LP and find solution\n ce_prices_lp.solve()\n final_prices = [prices_vars[j].varValue for j in range(0, np.size(V, 1))]\n if flag_debug_print:\n print(f'\\n\\nminimize = {minimize}, status = {pulp.LpStatus[ce_prices_lp.status]}')\n # print(ce_prices_lp)\n print(f'final_prices = {final_prices} \\n\\n')\n return final_prices",
"def compute_price(self):\n # Check if it is ok to call.\n if len(self._prices) <= 0:\n raise Exception(\"No buy and sell prices registered.\")\n # This is the current index\n idx = _bisect.bisect_left(self._prices,self._price_ref)\n p = _Inf\n \n # Check if we are lucky and already at a solution.\n # Then pick it.\n if self._balance == 0:\n p = (self._prices[idx-1] + self._prices[idx])/2.0\n else:\n # Otherwise search\n # Towards lower\n p_lo = self._search_price(idx,-1)\n # And towards higher.\n p_hi = self._search_price(idx, 1)\n # Pick the one closest to the current price.\n p = min(p_lo,p_hi, key = lambda p : abs(p - self._price_ref))\n if p == _Inf:\n raise Exception(\"No solution found. All registered prices distinct?\")\n self._price_ref = p\n return p",
"def _search_price(self,start_idx,d):\n if d not in set([-1,1]):\n raise ValueError(\"Search direction must be -1 or 1.\")\n idx = start_idx\n b = self._balance\n p = _Inf\n # If going in the positive direction, back up\n # one step due to the while loop checking\n # before add.\n if d == 1:\n idx -= 1;\n while b != 0 and idx+d >= 0 \\\n and idx+d < len(self._balance_differences):\n idx+=d\n b += d*self._balance_differences[idx]\n if b == 0 and idx+d >= 0 and idx+d < len(self._prices):\n p = (self._prices[idx] + self._prices[idx + d])/2.0\n return p",
"def getSolutionAtNextTime(self):",
"def searchSolutions(self):\n\n # Looking up solutions\n self.solver.NewSearch(self.db)\n\n soln = 0\n\n while self.solver.NextSolution():\n # show solutions on console\n soln = soln + 1\n r= self.showSolutionToScreen(soln, self.cost.Value(), self.shifts)\n if (r == 0):\n break\n if not(self.solver.NextSolution()):\n print(\"No se han encontrado soluciones!\")\n self.solver.EndSearch()",
"def greedy_selection(candidate_compositions):\n\n \"\"\"If only one candidate return that one\"\"\"\n if len(candidate_compositions) == 1:\n print(\"\\tgreedly seelected the only candidate\")\n return candidate_compositions[0]\n\n best_candidates = []\n lowest_cost = float('inf')\n\n print(\"Choosing greedly one composition...\")\n\n for composition in candidate_compositions:\n cost = 0\n for component in composition:\n cost += component.cost()\n \"\"\"Adding a cost for the number of components\"\"\"\n cost += 0.1\n if cost < lowest_cost:\n best_candidates = [composition]\n elif cost == lowest_cost:\n best_candidates.append(composition)\n\n if len(best_candidates) == 1:\n print(\"\\tgreedly seelected the best candidate based on cost\")\n return best_candidates[0]\n\n else:\n \"\"\"Keep score of the candidates\"\"\"\n\n \"\"\"Dict: candidate_id -> points\"\"\"\n candidates_points = {}\n for candidate in best_candidates:\n candidates_points[tuple(candidate)] = 0\n\n print(\"Generating pairs for all \" + str(len(best_candidates)) + \" candidates\")\n candidate_pairs = it.combinations(best_candidates, 2)\n\n n_comparisons = 0\n for candidate_a, candidate_b in candidate_pairs:\n\n contract_a = Contract()\n contract_b = Contract()\n\n for component_a in candidate_a:\n contract_a.add_assumptions(component_a.get_list_assumptions())\n contract_a.add_guarantees(component_a.get_list_guarantees())\n\n for component_b in candidate_b:\n contract_b.add_assumptions(component_b.get_list_assumptions())\n contract_b.add_guarantees(component_b.get_list_guarantees())\n\n try:\n is_refinement_correct(contract_a, contract_b)\n candidates_points[tuple(candidate_a)] += 1\n except:\n candidates_points[tuple(candidate_b)] += 1\n\n n_comparisons += 1\n\n print(str(n_comparisons) + \" comparisons have been made\")\n \"\"\"Extract the candidate with the highest score (the most refined)\"\"\"\n best_candidate = max(candidates_points.items(), key=operator.itemgetter(1))[0]\n\n print(\"\\tgreedly seelected the best candidate based on biggest assumption set\")\n return list(best_candidate)",
"def calculate(self):\n for company in self.active_offers:\n operations = self.active_offers[company]\n sellers = [seller for seller in operations if seller[2] < 0]\n buyers = [buyer for buyer in operations if buyer[2] > 0]\n prices = []\n sellers.sort(key=lambda x: float(x[1]))\n for seller in sellers:\n for buyer in buyers:\n if buyer[1] >= float(seller[1]):\n sell = abs(seller[2])\n buy = buyer[2]\n if sell > buy:\n quant = sell - buy\n else:\n quant = sell\n\n prices.append(seller[1])\n if seller[0] is None:\n if buyer[0].money >= buyer[1] * buyer[2]:\n seller[2] += quant\n buyer[0].money -= quant * float(seller[1])\n buyer[2] -= quant\n buyer[0].stocks[company] += quant\n else:\n if buyer[0].money >= buyer[1] * buyer[2]:\n seller[0].money += quant * float(seller[1])\n seller[2] += quant\n seller[0].stocks[company] -= quant\n buyer[0].money -= quant * float(seller[1])\n buyer[2] -= quant\n buyer[0].stocks[company] += quant\n\n if buyer[2] == 0:\n buyers.remove(buyer)\n\n if seller[2] == 0:\n sellers.remove(seller)\n\n del self.basic[company][0]\n if len(prices) > 0:\n self.basic[company].append(min(prices))\n else:\n self.basic[company].append(self.basic[company][-1])",
"def ideal_use_calculation(self):\n\n\t\tdemands = cvx.Variable(self._num_timesteps)\n\t\tmin_demand = cvx.Parameter()\n\t\tmax_demand = cvx.Parameter()\n\t\ttotal_demand = cvx.Parameter()\n\t\tprices = cvx.Parameter(self._num_timesteps)\n\n\t\tmin_demand = self.min_demand\n\t\tmax_demand = self.max_demand\n\t\ttotal_demand = self.total_demand\n\t\tprices = self.prices\n\t\tconstraints = [cvx.sum(demands, axis=0, keepdims=True) == total_demand]\n\t\t# constraints = [np.ones(self._num_timesteps).T * demands == total_demand]\n\t\tfor i in range(self._num_timesteps):\n\t\t\tconstraints += [demands[i] <= max_demand]\n\t\t\tconstraints += [min_demand <= demands[i]]\n\t\t\t# if i != 0:\n\t\t\t# \tconstraints += [cvx.abs(demands[i] - demands[i-1]) <= 100]\t\n\n\n\t\tobjective = cvx.Minimize(demands.T * prices)\n\t\tproblem = cvx.Problem(objective, constraints)\n\n\t\tproblem.solve(solver = cvx.OSQP, verbose=False)\n\t\treturn np.array(demands.value)",
"def search_price(self, search_term: str) -> int:\n\n def _get_row(\n targets: List[EntityAnnotation],\n search_term: str,\n orientation: str = Orientations.VERTICAL\n ) -> List[str]:\n \"\"\"\n Get row containing search term and corresponding price.\n \"\"\"\n ADJUSTMENT = 0.98\n RESULT_LENGTH_LIMIT = 5\n\n if not targets:\n print(\"No targets\")\n return []\n\n # Gets last coord because more likely to be total\n coords = {\n \"y\": targets[-1].bounding_poly.vertices[0].y,\n \"x\": targets[-1].bounding_poly.vertices[0].x\n }\n\n match orientation:\n case Orientations.VERTICAL:\n height = coords[\"y\"]\n case Orientations.HORIZONTAL:\n height = coords[\"x\"]\n\n adjusted_up = height / ADJUSTMENT\n adjusted_down = height * ADJUSTMENT\n\n results = []\n for line in self._text[1:]:\n description = line.description\n line_height = line.bounding_poly.vertices[0].y\n\n if orientation == Orientations.HORIZONTAL:\n line_height = line.bounding_poly.vertices[0].x\n if (\n line_height > adjusted_down\n and line_height < adjusted_up\n and search_term not in description\n ):\n results.append(description)\n\n if (\n len(results) >= RESULT_LENGTH_LIMIT \n and orientation == Orientations.VERTICAL\n ):\n return _get_row(targets, search_term, orientation=Orientations.HORIZONTAL)\n\n return results\n\n\n def _extract_value(row: List[str]) -> int:\n \"\"\"\n Attempts to extract an integer value from a row.\n \"\"\"\n for element in row:\n try:\n return int(element.replace(\" \", \"\").replace(\",\", \"\"))\n except ValueError:\n continue\n return 0\n\n targets: List[EntityAnnotation] = []\n\n for line in self._text[1:]:\n if search_term in line.description:\n targets.append(line)\n\n if not targets:\n print(f\"{search_term} not found.\")\n print({\"DEBUG\": self._text[0].description})\n\n row = _get_row(targets, search_term)\n total = _extract_value(row)\n return total",
"def search_market(self, bg_set, risk=0):\n if risk:\n # If the player is risk averse, the player wishes to first buy up to the goal amount\n action = 0\n product_targets = [product for product, amount in self.goal.items()\n if self.inventory[product][0] < amount]\n # If this is empty, the player has reached the goal amount, and wishes to sell\n # The player will sell any excess stock\n if not product_targets:\n action = 1\n product_targets = [product for product in self.profit_order\n if self.excess_stock(product)]\n # If this is also empty, the player has only the minimum amount of goods\n # The player must then choose to buy\n if not product_targets:\n action = 0\n product_targets = self.profit_order\n\n # If the player is not risk averse, the player will choose to buy or sell according to\n # profit amounts\n else:\n action = 1\n product_targets = [product for product in self.profit_order\n if self.inventory[product][0] > 0]\n for product, stats in self.price_stats.items():\n if self.gold > stats[2] and self.inventory[product][0] == 0:\n action = 0\n product_targets.append(product)\n break\n\n # The player's target region is still one without black or grey markets, since the purchase\n # function will empty our gold coffers completely, and we must avoid going into the\n # negative.\n target_region = set(self.market_prices.keys()) - bg_set\n possible_targets = set()\n if product_targets:\n for market in target_region:\n for product in product_targets:\n market_price = self.market_prices[market][product][0]\n if action == 0:\n # get the 25th percentile price of this product to buy\n curr_price = self.price_stats[product][2]\n\n # check if market is not blacklisted for this product\n if market not in self.blacklist[product]:\n if market_price < curr_price:\n possible_targets.add(market)\n else:\n # get the 75th percentile price of this product to sell\n curr_price = self.price_stats[product][1]\n if market_price > curr_price:\n possible_targets.add(market)\n\n # calculate the distances to these markets\n dist_to_target = {market: len(self.get_path_to(market, bg_set))\n for market in possible_targets if market}\n if dist_to_target:\n target_market = min(dist_to_target, key=dist_to_target.get)\n else:\n target_market = None\n\n return target_market",
"def solve(self):\n self._cp_solver.SearchForAllSolutions(self._cp_model, self._solution_printer)\n print('Total Solutions: %i' % self._solution_printer.count())",
"def find_matched_clothes(self, source_item, k):\n pass",
"def getBestSolutionValue(self) -> float:",
"def profit_max(self, target_market, buy, sell, prices, bg_set, risk=0):\n # If the player knows the prices of the current market, the player proceeds with deciding what todo\n # If the player doesn't know, the player will research the market.\n if prices:\n # If the statistics of the player demands it, buy will be not empty\n # The player then checks if he can afford to buy anything at this market\n # If he can, he buys for profit.\n if buy and self.afford_anything(prices, buy):\n return self.profit_buy(prices, buy, bg_set)\n\n # Otherwise, the player then checks if he has any excess stock if \n # the current market is statistically relevant to sell.\n # The player then chooses to sell according to the current risk taking\n # behaviour.\n elif sell and self.any_excess(sell):\n return self.profit_sell(prices, sell, bg_set, risk)\n\n # The player then decides if a target market has been acquired as a result\n # of a previous search, and moves to that market to make a purchase decision.\n elif target_market:\n self.target_loc = target_market\n return self.move_to_buy(prices, buy, bg_set)\n\n # Otherwise, the player needs more market information, and must therefore\n # wander the map\n else:\n return self.wander(prices, bg_set)\n\n return Command.RESEARCH, None",
"def incumbent_firm(self, price): \n\n # a. initialize \n VF_old = np.zeros(self.Nz)\n VF = np.zeros(self.Nz)\n \n # b. solve the static firm problem\n firm_profit, firm_output, pol_n = self.static_profit_max(price)\n \n # c. given prices and hiring decision, iterate on incumbent firm vf\n for it in range(self.maxit):\n \n VF = firm_profit + self.beta * np.dot(self.pi, VF_old).clip(min=0)\n \n dist = np.abs(VF_old - VF).max()\n \n if dist < self.tol :\n break\n \n VF_old = np.copy(VF)\n\n # d. enter/stay in the market policy function \n pol_enter = np.ones(self.Nz)*(VF>0)\n \n # e. productivity exit threshold\n idx = np.searchsorted(pol_enter, 1) #index of self.pol_enter closest to one on the left\n exit_cutoff = self.grid_z[idx]\n \n # f. alternative way to do steps d and e\n #avg_VF = np.dot(self.pi, VF)\n #idx = np.searchsorted(avg_VF, 0) #index of avg_VF closest to zero on the left\n \n #exit_cutoff = self.grid_z[idx]\n #pol_exit = np.where(self.grid_z < exit_cutoff, 1, 0)\n #pol_enter = 1 - pol_exit\n\n return VF, firm_profit, firm_output, pol_n, pol_enter, exit_cutoff",
"def optimize_mealplan(meals, dietary_constraints, *, meals_limits=None, params=None):\n\n # =============================================================================\n # PARSE INPUT ARGUMENTS\n # =============================================================================\n\n assert isinstance(meals, (list, tuple))\n assert isinstance(dietary_constraints, (dict,))\n assert isinstance(params, (dict,))\n assert (meals_limits is None) or isinstance(meals_limits, (list, tuple))\n\n meals = meals.copy()\n dietary_constraints = dietary_constraints.copy()\n\n if meals_limits is None:\n meals_limits = [(None, None) for meal in meals]\n\n if params is None:\n params = dict()\n\n # Get parameters\n num_days = params.get(\"num_days\", 1)\n num_meals = params.get(\"num_meals\", 4)\n time_limit_secs = params.get(\"time_limit_secs\", 10)\n\n # A small number such as 0.001. x_ij >= EPSILON <=> z_ij = 1\n EPSILON = params.get(\"epsilon\", 1e-3)\n\n # These weights found to be good by in experiments\n weight_price = params.get(\"weight_price\", 0.1)\n weight_nutrients = params.get(\"weight_nutrients\", 2.0)\n weight_range = params.get(\"weight_range\", 0.75)\n\n # Used for normalization of the cost associated with price\n expected_daily_price = params.get(\"expected_daily_price\", 75)\n\n M1 = params.get(\"M1\", 20) # Upper bound on x_ij\n M2 = params.get(\n \"M2\", 20\n ) # Upper bound on x[i][j] * meal.kcal, i.e. calories in a meal\n\n # A strange bug is that sometime the optimizer will return INFEASIBLE on attempt #1,\n # but calling this function again with the same inputs works. So we allow calling it\n # two times. This is very hacky and not suitable for production, but it works for\n # 'hobby usage'.\n first_call = params.get(\"first_call\", True)\n\n # Create a solver and an objective function\n solver = pywraplp.Solver(\"meals\", pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n solver.set_time_limit(time_limit_secs * 1000)\n objective_function = 0\n INF = solver.infinity()\n\n # =============================================================================\n # ERROR CHECKING AND USER INPUT SANITATION\n # =============================================================================\n\n # Error checking\n meals_total = num_days * num_meals\n maximum_limit = sum(9999 if high is None else high for (low, high) in meals_limits)\n if maximum_limit < meals_total:\n msg = f\"Cannot achieve {meals_total} totals meals with a total of {maximum_limit} meals.\"\n raise RuntimeError(msg)\n\n allowed_macros = (\"kcal\", \"protein\", \"fat\", \"carbs\")\n assert all(key in allowed_macros for key in dietary_constraints.keys())\n\n assert len(meals_limits) == len(meals)\n\n # =============================================================================\n # CREATE VARIABLES\n # =============================================================================\n\n x = [[None for j in range(num_days)] for i in range(len(meals))]\n z = [[None for j in range(num_days)] for i in range(len(meals))]\n\n # Loop over every combination of meal and days, create variables\n for i, meal in enumerate(meals):\n for j in range(num_days):\n z[i][j] = solver.IntVar(0, 1, f\"z_{i}{j}\")\n\n if meal.discrete:\n x[i][j] = solver.IntVar(0, INF, f\"x_{i}{j}\")\n else:\n x[i][j] = solver.NumVar(0, INF, f\"x_{i}{j}\")\n\n # These constraints ensure that z_ij = 1 iff x_ij >= EPSILON\n solver.Add(EPSILON * z[i][j] <= x[i][j])\n eps = EPSILON / 10\n solver.Add(x[i][j] <= (M1 + eps) * z[i][j] + EPSILON - eps)\n\n # =============================================================================\n # CREATE CONSTRAINTS / OBJECTIVE FUNCTION TERM\n # =============================================================================\n\n # OBJECTIVE FUNCTION TERM 1: Total price of the meals in the program\n denom = expected_daily_price * num_days\n for j in range(num_days):\n daily_price = sum(x[i][j] * meal.price for i, meal in enumerate(meals))\n objective_function += (weight_price / denom) * daily_price\n\n # OBJECTIVE FUNCTION TERM 2: Deviation from nutrients (on a daily basis)\n for j in range(num_days):\n for macro, (low, high) in dietary_constraints.items():\n\n # No point in adding any constraints if it's None\n if low is None and high is None:\n continue\n\n food_macros = [getattr(meal, macro) for meal in meals]\n\n # Create the sum: sum_i food_i * macro_i\n x_meals = [x[i][j] for i in range(len(meals))]\n total_macro = sum(c * x for x, c in zip(x_meals, food_macros))\n\n # The maximal deviation in a day is approx mean([low, high]) * nutrients\n # The maximal deviation is the above times the number of days\n denom = statistics.mean(\n [value for value in [low, high] if value is not None]\n )\n denom = denom * num_days # * len(dietary_constraints)\n\n # Slack variables related to the lower limit. Only \"undershooting\" is penalized.\n if low is not None:\n low_positive = solver.NumVar(0, INF, \"over_low_lim_\" + macro + str(j))\n low_negative = solver.NumVar(0, INF, \"under_low_lim_\" + macro + str(j))\n solver.Add(total_macro + low_positive - low_negative == low)\n objective_function += (weight_nutrients / denom) * low_positive\n\n # Slack variables related to the upper limit. Only \"overshooting\" is penalized.\n if high is not None:\n high_positive = solver.NumVar(0, INF, \"over_up_lim_\" + macro + str(j))\n high_negative = solver.NumVar(0, INF, \"under_upp_lim_\" + macro + str(j))\n solver.Add(total_macro + high_positive - high_negative == high)\n objective_function += (weight_nutrients / denom) * high_negative\n\n # OBJECTIVE FUNCTION TERM 3: Minimal range on calories (on a daily basis)\n for j in range(num_days):\n lower = solver.NumVar(0, INF, f\"lower_kcal_{j}\")\n upper = solver.NumVar(0, INF, f\"upper_kcal_{j}\")\n\n for i, meal in enumerate(meals):\n\n solver.Add(lower <= x[i][j] * meal.kcal + (1 - z[i][j]) * M2)\n solver.Add(upper >= x[i][j] * meal.kcal)\n\n # The maximal spread per day is approximately mean([kcal_low, kcal_high]) / meals\n # The maximal spread is the above times the number of days. Normalize w.r.t this\n denom = statistics.mean(\n [value for value in dietary_constraints[\"kcal\"] if value is not None]\n )\n denom = denom * num_days / num_meals\n objective_function += (weight_range / denom) * (upper - lower)\n\n # HARD CONSTRAINT 1 : Number of meals per day\n for j in range(num_days):\n solver.Add(sum(z[i][j] for i in range(len(meals))) == num_meals)\n\n # HARD CONSTRAINT 2: Number of times a food is used\n for i, (meal, (low, high)) in enumerate(zip(meals, meals_limits)):\n\n times_used = sum(z[i])\n assert len(z[i]) == num_days\n\n # Add lower limit\n if low is not None:\n if low > num_days:\n msg = f\"Lower limit on '{meal.name}' is {low}, but there are {num_days} days.\"\n raise RuntimeError(msg)\n solver.Add(times_used >= low)\n\n # Add upper limit\n if high is not None:\n solver.Add(times_used <= high)\n\n # =============================================================================\n # SOLVE THE OPTIMIZATION PROBLEM\n # =============================================================================\n\n # Minimize the deviation from the goal\n solver.Minimize(objective_function)\n result_status = solver.Solve()\n if result_status == solver.INFEASIBLE:\n\n if first_call:\n params[\"first_call\"] = False\n return optimize_mealplan(\n meals=meals,\n dietary_constraints=dietary_constraints,\n meals_limits=meals_limits,\n params=params,\n )\n\n else:\n raise RuntimeError(\"Infeasible problem.\")\n\n assert solver.VerifySolution(1e-7, True)\n\n # =============================================================================\n # POSTPROCESS THE SOLUTION AND RETURN\n # =============================================================================\n\n # Parse the variables and get the solution values\n for j in range(num_days):\n for i, meal in enumerate(meals):\n x[i][j] = x[i][j].solution_value()\n z[i][j] = z[i][j].solution_value()\n\n # Food is chosen\n if z[i][j] > 0.5:\n # If the food is chosen, x_ij is no smaller than epsilon\n x[i][j] = max(x[i][j], EPSILON)\n else:\n x[i][j] = 0\n\n # Compute the total price\n total_price = 0\n for j in range(num_days):\n daily_price = sum(x[i][j] * meal.price for i, meal in enumerate(meals))\n total_price += daily_price\n\n return (\n x,\n {\n \"obj_func_value\": round(solver.Objective().Value(), 6),\n \"wall_time\": round(solver.wall_time() / 1000, 3),\n \"iterations\": solver.iterations(),\n \"total_price\": round(total_price, 1),\n },\n )",
"def splitting_algorithm(target_trade_size, price_estimator, delta=0.005, exclude_dexs=[], precision=4):\n\n # get a ranking of dexs by lowest cost at target_trade_size\n sorted_dexs = price_estimator.dexs_ranked_by_cost(target_trade_size)\n if exclude_dexs: sorted_dexs = [d for d in sorted_dexs if d not in exclude_dexs]\n\n # start with baseline candidate: 1 DEX with the lowest cost at target_trade_size\n best_new_candidate = {sorted_dexs[0]: target_trade_size}\n\n # loop over remaining DEXs adding some amount of each as long as cost gets lower\n for dex in sorted_dexs[1:]:\n best_candidate_for_dex = None\n # Iterate over all j because the slippage cost function has multiple local minima\n for j in range(1,100):\n frac = j/100\n # create a new candidate with frac allocated to the new dex and (1 - frac) allocated to the the existing ones\n new_candidate = { existing_dex: round(alloc*(1-frac),precision) for existing_dex,alloc in best_new_candidate.items() }\n new_alloc = round(target_trade_size - sum(new_candidate.values()), precision)\n new_candidate[dex] = new_alloc # i.e. frac * target_trade_size\n\n # Find the candidate with the minimum cost (no delta) for this exchange. If it beats\n # the existing best by delta, then we'll add the exchange to the best solution\n if price_estimator.is_better(new_candidate, best_candidate_for_dex):\n best_candidate_for_dex = new_candidate\n\n # If the best candidate for this dex beats the best overall by more than delta then\n # make it the best overall\n if price_estimator.is_better(best_candidate_for_dex, best_new_candidate, delta):\n best_new_candidate = best_candidate_for_dex\n\n return best_new_candidate"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Upgrade a (possibly old) state dict for new versions of fairseq.
|
def upgrade_state_dict(self, state_dict):
return state_dict
|
[
"def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):\n weights_key = \"{}.embed_positions.weights\".format(name)\n if weights_key in state_dict:\n del state_dict[weights_key]\n state_dict[\n \"{}.embed_positions._float_tensor\".format(name)\n ] = torch.FloatTensor(1)\n\n if f\"{name}.output_projection.weight\" not in state_dict:\n if self.share_input_output_embed:\n embed_out_key = f\"{name}.embed_tokens.weight\"\n else:\n embed_out_key = f\"{name}.embed_out\"\n if embed_out_key in state_dict:\n state_dict[f\"{name}.output_projection.weight\"] = state_dict[\n embed_out_key\n ]\n if not self.share_input_output_embed:\n del state_dict[embed_out_key]\n\n for i in range(self.num_layers):\n # update layer norms\n layer_norm_map = {\n \"0\": \"self_attn_layer_norm\",\n \"1\": \"encoder_attn_layer_norm\",\n \"2\": \"final_layer_norm\",\n }\n for old, new in layer_norm_map.items():\n for m in (\"weight\", \"bias\"):\n k = \"{}.layers.{}.layer_norms.{}.{}\".format(name, i, old, m)\n if k in state_dict:\n state_dict[\n \"{}.layers.{}.{}.{}\".format(name, i, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n version_key = \"{}.version\".format(name)\n if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:\n # earlier checkpoints did not normalize after the stack of layers\n self.layer_norm = None\n self.normalize = False\n state_dict[version_key] = torch.Tensor([1])\n\n return state_dict",
"def backwards_state_dict(state_dict):\n # List of layer names to change\n changes = (('model.model', 'model'),\n ('pose_network', 'pose_net'),\n ('disp_network', 'depth_net'))\n # Iterate over all keys and values\n updated_state_dict = OrderedDict()\n for key, val in state_dict.items():\n # Ad hoc changes due to version changes\n key = '{}.{}'.format('model', key)\n if 'disp_network' in key:\n key = key.replace('conv3.0.weight', 'conv3.weight')\n key = key.replace('conv3.0.bias', 'conv3.bias')\n # Change layer names\n for change in changes:\n key = key.replace('{}.'.format(change[0]),\n '{}.'.format(change[1]))\n updated_state_dict[key] = val\n # Return updated state dict\n return updated_state_dict",
"def _update_state_key(self, old_state_key, action, elapsed_time):\n pass",
"def update_state_from_model(\n cls, versioned_question_state, current_state_schema_version):\n versioned_question_state['state_schema_version'] = (\n current_state_schema_version + 1)\n\n conversion_fn = getattr(cls, '_convert_state_v%s_dict_to_v%s_dict' % (\n current_state_schema_version, current_state_schema_version + 1))\n\n versioned_question_state['state'] = conversion_fn(\n versioned_question_state['state'])",
"def update_state(self):\n self.state = self.new_state",
"def load_state_dict(self, state_dict: Dict):",
"def update(self, previous_state, sess=None):\n user_da = previous_state['user_da'][-1]\n new_inform_slots = user_da['inform'].keys()\n current_slots_inform = copy.deepcopy(previous_state['current_slots']['inform_slots'])\n # current_slots = copy.deepcopy(previous_state['current_slots'])\n for slot in new_inform_slots:\n current_slots_inform[slot] = new_inform_slots['slot']\n\n new_state = copy.deepcopy(previous_state)\n new_state['belief_state']['inform_slots'] = current_slots_inform\n kb_result_dict = self.kb_query.query(new_state)\n new_state['kb_result_dict'] = kb_result_dict\n return new_state",
"def load_state(self, state):\r\n \r\n self.baselearner.load_state_dict(state)",
"def update_state(self):\n self.oldstate = self.state\n if self.localtime in self.timeline and state[self.timeline[self.localtime]] != self.state:\n self.state = state[self.timeline[self.localtime]]\n self.flags = NEED_RESCHED\n #print \"%s[%d]: Cambio de estado: %s -> %s\" % (self.name, self.localtime, self.oldstate, self.state)\n else:\n self.flags = None",
"def _update_source_model(self, source_name, state_dict):\n logger.info(f\"Updating state dict of the {source_name} model.\")\n self.get_source_model(source_name).load_state_dict(state_dict)",
"def _update_reward(self, old_state_key, action, elapsed_time, new_state_key):\n pass",
"def reorder_decoder_incremental_state(\n self, incremental_state: Dict[int, dict], inds: torch.Tensor\n ) -> Dict[int, dict]:\n incremental_state = fix_incremental_state(\n self.generation_model, incremental_state\n )\n if not incremental_state:\n return incremental_state\n return {\n idx: layer.reorder_incremental_state(incremental_state[idx], inds)\n for idx, layer in enumerate(self.seq2seq_decoder.layers)\n }",
"def createNewState(self,name):\n self.state[\"name\"]=name\n self.state[\"version\"]=1\n self.state[\"asics\"]=[]",
"def _convert_state_v43_dict_to_v44_dict(cls, question_state_dict):\n question_state_dict['card_is_checkpoint'] = False\n return question_state_dict",
"def set_old_state(self):\n self.old_state = self.state",
"def updateStates(self):\n self.Automaton.states = []\n for key in list(self.Automaton.table):\n self.Automaton.states.append(key)",
"def update_state(self, new_state, *args, **kwargs):\n raise NotImplementedError(\"An equipment definition must implement \"\n \"update_state\")",
"def change(self, new_dict):\n self.dict = new_dict",
"def set_state(self, state_dict: Mapping[ModuleID, Any]) -> None:\n for module_id, state in state_dict.items():\n self._rl_modules[module_id].set_state(state)",
"def apply_state(self, state):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generates initials for a person's or organization's name. Name can be a string or list. If inputted as a list, input names in desired order of initials, such as [first, last]. If an element of that list has multiple names (e.g. a middle name or multiple last names), those names will also be taken into account.
|
def generate_initials(name, max_initials=2):
if not name:
return None
if isinstance(name, str):
name_split = name.split(' ', max(max_initials - 1, -1))
name_split.insert(0, '_recursive')
initials = generate_initials(name_split, max_initials)
elif isinstance(name, list):
if name[0] == '_recursive':
del name[0]
else:
name = ' '.join(name).split(' ', max(max_initials - 1, -1))
if len(name) > 1:
initials = name.pop(0)[0]
s_n = name.pop(-1)[0]
name_count = 2
if initials:
for s in name:
if name_count < max_initials - 1:
initials += s[0]
name_count += 1
else:
break
if name_count <= max_initials:
initials += s_n
else:
initials = name[0][0]
initials = initials.upper()
else:
raise Exception("Name must be a string or a list of strings.")
return initials
|
[
"def get_initials(fullname):\r\n # TODO your code here\r\n # Make name uppercase\r\n names = fullname.upper()\r\n # Separate into different words\r\n names = names.split()\r\n initials = \"\"\r\n for name in names:\r\n initials += name[0]\r\n return initials",
"def initials(self) -> str:\n # pylint does not recognize a CharField object as being an instance of a string\n initials_list = [\n name_part[0]\n for name_part in self.name.split(\" \") # pylint: disable=no-member\n ]\n initials = \"\".join(initials_list)\n return initials",
"def get_author_name_forms(name):\n\n name = utils.cleanup_string(name)\n splited = name.split(' ')\n\n for item in splited:\n if len(item) == 1: # The name is already formated.\n return [name]\n\n forms = []\n\n # normal form\n forms.append(name)\n\n # surname, given_names not abbreviated\n splited = [i for i in splited if len(i) >= 3]\n\n if len(splited) > 1:\n splited.insert(0, splited.pop())\n forms.append(' '.join(splited))\n\n # surname, given_names not abbreviated\n forms.append(' '.join([splited[0]]+[i[0] for i in splited[1:]]))\n\n return forms",
"def create_name():\n # Creating a list with the symbols of the Latin alphabet.\n com_list = \"\"\n j = 0\n name = \"\"\n for i in range(97, 122, 1):\n com_list += com_list + str(chr(i))\n while j != 1:\n name = input(\"\\nPlease enter you name. You name: \")\n name_1 = name.lower()\n if len(name) not in range(3, 10 + 1) or len(name) == 0:\n print(\"\\nThe name cannot be shorter than 3 characters.\")\n print(\"\\nThe name must not be longer than 10 characters.\")\n else:\n for i in name_1:\n if i not in com_list:\n print(\"\\nThe name can only consist of letters of the Latin alphabet.\")\n break\n else:\n j = 1\n return name",
"def make_name(*words):\n import itertools\n\n words = itertools.chain.from_iterable(w.split() for w in words)\n\n return ''.join(w.lower().capitalize() for w in words)",
"def transform_name(name):\n split_name = name.split(' ', 1)\n firstname = str.strip(str(split_name[0]))\n lastname = str.strip(str(split_name[1]))\n s = '{}, {}'.format(lastname, firstname)\n normalized_name = normalize_name(s)\n # DLF and MFL have different ways of storing name of Odell Beckham Jr\n if normalized_name == 'beckhamjrodell':\n return 'beckhamodell'\n else:\n return normalized_name\n return normalized_name",
"def create_random_surname(self):\n surname = ''\n for _ in range(self.NAME_LENGTH):\n surname += choice(ascii_letters)\n return surname",
"def GetNames(): #input data\n strName = raw_input (\"Enter the first and last name of the employee or press ENTER when finished.\")\n return strName",
"def gen_first_name(ucase=2, lcase=2, gender=False):\n gen_name = {}\n \n if not gender:\n if random.randrange(1,100) > GENDER_BIAS:\n gender = 'f'\n else:\n gender = 'm'\n\n _name = None\n _male_name_seed = random.randrange(1, 90040)\n _female_name_seed = random.randrange(1500, 90024) #1500? Too many Patricia's\n \n if gender == 'f':\n _name = female_name_tuples[bisect.bisect(FEMALE_NAME_KEYS, _female_name_seed)][1]\n else:\n _name = male_name_tuples[bisect.bisect(MALE_NAME_KEYS, _male_name_seed)][1]\n\n\n _random = random.randrange(0, 100)\n if _random < ucase:\n gen_name['given_name'] = _name\n gen_name['case'] = 'u'\n elif _random > 100 - lcase:\n gen_name['given_name'] = _name.swapcase()\n gen_name['case'] = 'l'\n else:\n gen_name['given_name'] = _name.title()\n gen_name['case'] = 'p'\n gen_name['gender'] = gender\n\n return gen_name",
"def parse_name(cls, flat_name):\n\n def unique(alias):\n try_alias = alias\n n = 2\n\n # keep on looping until an alias becomes available.\n # --\n # WARNING: this isn't going to work at high volumes, since the alias\n # that we return might be taken before we have time to do anything\n # with it! This should logic should probably be moved to the\n # initializer, to make the find/grab alias loop atomic\n while cls.objects.filter(alias__iexact=try_alias).count():\n try_alias = \"%s%d\" % (alias, n)\n n += 1\n\n return try_alias\n\n patterns = [\n # try a few common name formats.\n # this is crappy but sufficient\n r\"([a-z]+)\\s+([a-z]+)\", # Evan Wheeler\n r\"([a-z]+)\\s+[a-z]+\\.?\\s+([a-z]+)\",# Mark E. Johnston\n r\"([a-z]+)\\s+([a-z]+\\-[a-z]+)\" # Erica Kochi-Fabian\n ]\n\n # try each pattern, returning as\n # soon as we find something that fits\n for pat in patterns:\n\n m = re.match(pat, flat_name, re.I)\n if m is not None:\n first_name, last_name = m.groups()\n\n # generate an alias from the first letter of the first\n # name, and the letters (no dots or dashes) from the last\n alias = (first_name[0] + re.sub(r\"[^a-zA-Z]\", \"\", last_name)).lower()\n return (unique(alias), first_name.title(), last_name.title())\n\n # flat_name doesn't look like a full name, so generate an alias\n # from the alphanumerics (some aliases are entirely numeric),\n # and a name from just the letters (there might not be any)\n alias = unique(re.sub(r\"[^a-zA-Z0-9]\", \"\", flat_name).lower())\n name = re.sub(r\"[^a-zA-Z]\", \"\", flat_name)\n return (alias, name, \"\")",
"def name_parts():\n # name parts\n letters = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta',\n 'theta', 'iota', 'kappa', 'lambda', 'mu', 'nu', 'xi', 'omicron',\n 'pi', 'rho', 'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi',\n 'omega']\n colours = ['red', 'orange', 'yellow', 'green', 'blue', 'violet']\n\n # randomize order\n random.shuffle(letters)\n random.shuffle(colours)\n\n # yield initial sequence (letter-colour)\n parts = [letters, colours]\n yield parts\n\n # forever generate longer sequences by appending the letter list\n # over and over. Note that this is the *same* letter list, so it will have\n # the exact order.\n while True:\n random.shuffle(letters)\n random.shuffle(colours)\n parts.append(letters)\n yield parts",
"def generate_name(first_letter):\n\n # validate input\n assert(first_letter in string.ascii_lowercase)\n # keep a list of all the generated letters (starting with the given first letter)\n letters = [first_letter]\n\n with torch.no_grad():\n # use the given first letter to start the process\n x = helpers.letter_to_onehot(first_letter)\n # convert to the shape that the LSTM module requires as input\n x = x.view(1, 1, -1)\n # the first hidden input will be zeros since we're starting a new\n # sequence, or new name\n hidden = None\n\n # loop until ternimal character is predicted\n while True:\n y_pred, hidden = model(x, hidden)\n # here we can choose between deterministic or random conversion\n # from prediction logits to letter\n letter = pred_to_letter_rand(y_pred)\n # stop the process when the terminal character is predicted\n if letter == '_':\n break\n # add this predicted letter to the list of letters\n letters.append(letter)\n # convert predicted letter to a onehot so it can be used as\n # input for the next step (with the required shape)\n x = helpers.letter_to_onehot(letter).view(1, 1, -1)\n\n # put all the predicted letters together to get a name\n name = ''.join(letters)\n # validate output type\n assert(type(name) is str)\n # make sure we didn't add in some strange characters by accident\n for letter in name:\n assert(letter in string.ascii_lowercase)\n # names should start with a capital letter\n name = name.capitalize()\n\n return name",
"def acronym(input):\n words = input.split()\n res = ''\n for word in words:\n res = res + word[0].upper()\n return res",
"def setup_initials_generators(self):\n initials_generator: InitialsGenerator = InitialsGenerator()\n initials_generator.first_name_tag = self.initial_name_tags.children[0].children[1].value\n initials_generator.middle_initial_tag = self.initial_name_tags.children[1].children[1].value\n initials_generator.last_name_tag = self.initial_name_tags.children[2].children[1].value\n initials_generator.initials_examples = {\n \"Xiang-Zhen\": get_labelled_input_value(self.initial_examples.children[0]),\n \"Jun Soo\": get_labelled_input_value(self.initial_examples.children[1]),\n \"Baskin-Sommers\": get_labelled_input_value(self.initial_examples.children[2]),\n \"van Rooij\": get_labelled_input_value(self.initial_examples.children[3])\n }\n return initials_generator",
"def create_random_name(self):\n name = ''\n for _ in range(self.NAME_LENGTH):\n name += choice(ascii_letters)\n return name",
"def first_name_list(self, data):\n\n\t\tfirst_name= []\n\t\ttry:\n\t\t\tfor path, _, node in jxmlease.parse(data, generator=\"tir38:CustomerInfo/tir38:PersonName\"):\n\t\t\t\t\n\t\t\t\tfirst_name1 = node['tir38:GivenName']\n\t\t\t\tfirst_name.append(str(first_name1))\n\t\texcept:\n\t\t\tfirst_name = ['N/A']\n\t\treturn first_name",
"def test_invalid_first_name(self):\n\n invalid_name_to_test = [\"!St3v3n\", \" T0m\", \" \", \" N!ck\", \"@rthur\", \"Ry@n\", \"S@! \", \"D@v3\"]\n option = \"first name\"\n\n for name in invalid_name_to_test:\n self.database.first_name = name\n self.assertFalse(self.database.validate_NamePart(option, self.database.first_name))",
"def uncapitalize_name(name):\n buf = []\n for c in name:\n if 'A' <= c <= 'Z' and len(buf):\n buf.append('-')\n buf.append(c)\n return ''.join(buf).lower()",
"def user_name_for(name):\n name = name.replace(\"_\", \" \")\n result = \"\"\n last_lower = False\n\n for c in name:\n if c.isupper() and last_lower:\n result += \" \"\n last_lower = c.islower()\n result += c\n\n return result.capitalize()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate an even shorter short UUID generated by the shortuuid library.
|
def shorter_uuid(length=7, starter=None, with_original=False):
original_id = str(shortuuid.uuid()) if starter is None else starter
n = len(original_id)
dx = min(length, len(original_id)) # ID length
if starter is not None and len(starter) < dx * 2:
original_id = str(shortuuid.uuid())
start_point = random.randint(0, n - dx)
shorter_id = original_id[start_point:(start_point + dx)]
return shorter_id if not with_original else [shorter_id, original_id]
|
[
"def new_uuid(length=25):\n letters = [random.choice(string.hexdigits) for _ in range(length)]\n return ''.join(letters).lower()",
"def get_uuid():\n return str(UUID(int=random.randint(0, 2**128 - 1))) # nosec",
"def gen_uuid():\n return str(uuid.uuid1().hex)",
"def _gen_uuid(self):\r\n return uuid.uuid4().hex",
"def generate_short_string(string: str) -> str:\n hashed_sting = sha256(string.encode()).hexdigest()\n short_string = ''.join(choices(hashed_sting, k=5))\n\n return short_string",
"def get_uuid(limit=10):\n uuid_sample = str(uuid.uuid4()).replace('-', '')\n if limit and limit <= len(uuid_sample):\n return (uuid_sample[:limit]).upper()\n return uuid_sample.upper()",
"def generate_snapshot_id():\n return shortuuid.uuid()",
"def _generate_shortname(cls):\n return ''.join([cls.letters[random.randrange(0, cls.num_letters)] for idx in range(0, cls.SHORTNAME_LEN)])",
"def generate_uuid():\n return uuid1(node=random.randint(0, 2**31 - 1))",
"def _createIdentifier(bits=160, _urandom=urandom):\n return urandom(bits // 8).encode(\"hex\")",
"def create_unique_id():\n from uuid import uuid4\n\n return str(uuid4())",
"def __generateuuid(self):\n uuid = virtinst.util.uuidToString(virtinst.util.randomUUID())\n\n return uuid",
"def get_unique_id(length=5):\n return str(int(time.time())) + base_token_factory(length)",
"def card_digit_gen ():\n return uuid.uuid4().hex[:8]",
"def new_id():\n bs = uuid4().bytes\n return urlsafe_b64encode(bs).strip().replace('=', '')",
"def uuid36():\r\n return str(uuid.uuid4())",
"def _random_uuid():\n uuid = \"\".join(random.choices(string.hexdigits, k=32))\n return UUID(uuid)",
"def get_zfill_hex_uuid(base_uuid, number):\n\n if len(base_uuid) != 20:\n raise RuntimeError('Wrong base_uuid length')\n\n return '{}{}'.format(base_uuid, str(number).zfill(12))",
"def generate_id():\n length = 6\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Searches the system path looking for pttransport.dll. Returns the name of any directory containing it. Returns "" if none found
|
def is_pttrans_on_path():
for i in os.environ["PATH"].split(";"):
# Fix up msys style paths
if i[0] == "/":
i = i[1] + ":" + i[2:]
# Ignore the current directory, if people happen to have that on their path
if i == ".":
continue
# Get the contents of this directory
result = []
try:
result = os.listdir(i)
except WindowsError:
pass
# Search the contents
for j in result:
if j.lower() == "pttransport.dll":
return i
return ""
|
[
"def find_path():\n if sys.platform == \"linux2\" or sys.platform == \"linux\":\n extension = \".so\"\n elif sys.platform == \"darwin\":\n extension = \".dylib\"\n elif sys.platform == \"win32\":\n extension = \".dll\"\n else:\n print(\"Unknown system type!\")\n return (True,0,0)\n\n path_lgc = imp.find_module('localgraphclustering')[1]\n return path_lgc+\"/src/lib/graph_lib_test/libgraph\"+extension",
"def find_base_path():\n if platform.system() == 'windows':\n base_path = os.path.join('K:', 'ptestbend')\n else:\n base_path = os.path.join('/mnt','K', 'ptestbend')\n return base_path",
"def get_system_path():\n # imported here to avoid circular import\n from ... import compat\n _bpath = []\n sys_dir = compat.win32api.GetSystemDirectory()\n # Ensure C:\\Windows\\system32 and C:\\Windows directories are\n # always present in PATH variable.\n # C:\\Windows\\system32 is valid even for 64bit Windows. Access do DLLs are\n # transparently redirected to C:\\Windows\\syswow64 for 64bit applactions.\n # http://msdn.microsoft.com/en-us/library/aa384187(v=vs.85).aspx\n _bpath = [sys_dir, get_windows_dir()]\n return _bpath",
"def _getDelphiPath(env, version = None): \r\n if not version: version = r'DELPHI7' \r\n if not '\\\\' in version: version = VERSIONS.__dict__.get(version, VERSIONS.DELPHI7)\r\n KEYPATH = r'SOFTWARE\\%s\\RootDir' % version \r\n return env.RegGetValue(KEYPATH) or ''",
"def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'dlr'),\n os.path.join(sys.prefix, 'local', 'dlr'),\n os.path.join(sys.exec_prefix, 'local', 'dlr'),\n os.path.join(os.path.expanduser('~'), '.local', 'dlr')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'dlr.dll') for p in dll_path]\n elif sys.platform.startswith('linux') or sys.platform.startswith('freebsd'):\n dll_path = [os.path.join(p, 'libdlr.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libdlr.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n if not lib_path and not os.environ.get('DLR_BUILD_DOC', False):\n raise DLRLibraryNotFound(\n 'Cannot find DLR Library in the candidate path, ' +\n 'List of candidates:\\n' + ('\\n'.join(dll_path)))\n return lib_path",
"def get_dotnet_dir() -> Optional[str]:\n tmp = \"DOTNETHOME_X{}\".format(\"64\" if dotnet_const.X64 else \"86\")\n if tmp in dotnet_const.ENVIRON:\n tmp = dotnet_const.ENVIRON[tmp]\n if isdir(tmp):\n return tmp\n if \"DOTNETHOME\" in dotnet_const.ENVIRON:\n tmp = dotnet_const.ENVIRON[\"DOTNETHOME\"]\n if isdir(tmp):\n return tmp\n if \"DOTNET_ROOT\" in dotnet_const.ENVIRON:\n tmp = dotnet_const.ENVIRON[\"DOTNET_ROOT\"]\n if isdir(tmp):\n return tmp\n tmp = shutil.which(get_exe_name(\"dotnet\"))\n if tmp:\n try:\n tmp2 = os.readlink(tmp) if dotnet_const.PYTHON_3 else tmp\n tmp = tmp2 if os.path.isabs(tmp2) else abspath(join(dirname(tmp), tmp2))\n except OSError:\n pass\n tmp = dirname(tmp)\n if isdir(tmp):\n return tmp\n return None",
"def ptvsd_folder_name():\n\n try:\n for tag in sys_tags():\n folder_name = f\"ptvsd-{tag.interpreter}-{tag.abi}-{tag.platform}\"\n folder_path = os.path.join(PYTHONFILES, folder_name)\n if os.path.exists(folder_path):\n print(folder_path, end=\"\")\n return\n except:\n # Fallback to use base PTVSD path no matter the exception.\n print(PYTHONFILES, end=\"\")\n return\n\n # Default fallback to use base PTVSD path.\n print(PYTHONFILES, end=\"\")",
"def get_runtime_dll_paths(self) -> List[Tuple[str, str]]:\n return [(dll_name, self.dll_paths.get(dll_name, 'NOT FOUND'))\n for dll_name in self.dlls]",
"def GetRegisteredNamedPath(name):\n keyStr = BuildDefaultPythonKey() + \"\\\\PythonPath\"\n if name:\n keyStr = keyStr + \"\\\\\" + name\n try:\n return win32api.RegQueryValue(GetRootKey(), keyStr)\n except win32api.error as exc:\n import winerror\n\n if exc.winerror != winerror.ERROR_FILE_NOT_FOUND:\n raise\n return None",
"def get_pth_dir(executable):\n output = runner.run([\n executable,\n '-c',\n 'import json, sys; print(json.dumps([sys.prefix, sys.version_info]))'\n ]).std_out\n prefix, version_parts = json.loads(output)\n version = '{0}.{1}'.format(version_parts[0], version_parts[1])\n if os.name == 'nt':\n return '{0}/Lib/site-packages'.format(prefix)\n elif os.name == 'posix':\n return '{0}/lib/python{1}/site-packages'.format(prefix, version)\n else:\n raise NonRecoverableError('Unsupported OS: {0}'.format(os.name))",
"def get_package_dir() -> str:\n ret = subprocess.run(\n [\"rospack\", \"find\", \"kimera_vio\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n if ret.returncode != 0:\n raise RuntimeError(\n \"catkin locate failed: {}\".format(ret.stderr.decode(\"utf-8\"))\n )\n\n return pathlib.Path(ret.stdout.decode(\"utf-8\").strip(\"\\n\"))",
"def get_tree_sitter_so() -> str:\n tree_sitter_dir = get_tree_sitter_dir()\n bin_loc = os.path.join(tree_sitter_dir, \"build\", \"langs.so\")\n return bin_loc",
"def host_python(self):\n\n self._check_python_component()\n\n return os.path.join(self.host_bin_dir, self.host_exe('python'))",
"def find_PROJ_LIB():\n pathnames = []\n roots = site.getsitepackages()\n for root in roots:\n pathnames+=glob.glob(root+\"/osgeo/**/proj.db\",recursive=True)\n if len(pathnames):\n break\n return justpath(pathnames[0]) if len(pathnames) else \"\"",
"def get_pcgr_bin():\n return os.path.dirname(os.path.realpath(sys.executable))",
"def locate(program: str):\n wineprefix = os.getenv(\"WINEPREFIX\") or os.path.join(os.getenv(\"HOME\"), \".wine\")\n ans = []\n for file in Path(wineprefix).rglob(\"**/*.exe\"):\n if program in str(file):\n ans.append(str(file))\n return ans[-1]",
"def get_nuke_path():\n\n return nuke.EXE_PATH",
"def find_swig (self):\r\n\r\n if os.name == \"posix\":\r\n return \"swig\"\r\n elif os.name == \"nt\":\r\n\r\n # Look for SWIG in its standard installation directory on\r\n # Windows (or so I presume!). If we find it there, great;\r\n # if not, act like Unix and assume it's in the PATH.\r\n for vers in (\"1.3\", \"1.2\", \"1.1\"):\r\n fn = os.path.join(\"c:\\\\swig%s\" % vers, \"swig.exe\")\r\n if os.path.isfile(fn):\r\n return fn\r\n else:\r\n return \"swig.exe\"\r\n\r\n elif os.name == \"os2\":\r\n # assume swig available in the PATH.\r\n return \"swig.exe\"\r\n\r\n else:\r\n raise DistutilsPlatformError, \\\r\n (\"I don't know how to find (much less run) SWIG \"\r\n \"on platform '%s'\") % os.name",
"def libpath(self) -> Optional[str]:\n lpp = self.libparts\n if lpp:\n return \"/\".join(lpp)\n else:\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sleeps for the specified amount of time while keeping odometry uptodate
|
def sleep(self, time_in_sec):
start = self.time.time()
while True:
state = self.create.update()
if state is not None:
self.odometry.update(state.leftEncoderCounts, state.rightEncoderCounts)
# print("[{},{},{}]".format(self.odometry.x, self.odometry.y, math.degrees(self.odometry.theta)))
t = self.time.time()
if start + time_in_sec <= t:
break
|
[
"def wait_for_time():\n while rospy.Time().now().to_sec() == 1:\n pass",
"def rand_sleep():\n time.sleep(random.uniform(0.75, 1.5))",
"def delay():\n latency = 0.49\n sleep(latency)",
"def wait():\n t = random.triangular(config.WAIT_MIN, config.WAIT_MAX)\n time.sleep(t)",
"def sleepDelay(ms):\r\n time.sleep(ms/1000.0)",
"def sleep_n_sec(n=20):\n time.sleep(n)",
"def doSleep(self):\n if os.environ.get(\"TRAVIS\"):\n time.sleep(10)\n else:\n time.sleep(20)\n return",
"def sleep(seconds):\n if hasattr(config,'hardwareSpeedup'):\n speed = config.hardwareSpeedup\n if not (speed == None):\n time.sleep(seconds/speed)\n return\n\n time.sleep(seconds)",
"def sleepMicroseconds(us):\r\n time.sleep(us * 1e-6)",
"def delay(ms):\r\n ms = int(ms*1000)\r\n libc.usleep(ms)",
"def sleep_asynchronously():\n time.sleep(20)",
"def _sleep(self):\n self.kill()",
"def sleepTask(self, delay=0):\r\n return self.timeKeeper.sleep(delay)",
"async def _sleep_on_error(self, delay: float = 1.0, deviation: float = 1.0):\n await asyncio.sleep(delay - deviation + 2 * deviation * random.random())",
"def deep_sleep():\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n rtc.alarm(rtc.ALARM0, config.INTERVAL * 1000)\n machine.deepsleep()",
"def set_timeout(self, sleep_ms):\n self.__RXTX_SLEEP_TIME = sleep_ms",
"def _delay(self):\n time.sleep(random.randint(self.min_delay,self.max_delay)/1000.0)",
"def wait(self, millis=0, nanos=0):\n pass",
"def sleep(self):\n # Put the creature to sleep\n self.is_sleeping = True\n self.tiredness -=3\n self.boredom -=2\n print(\"Zzzzzzzzzzzzzz........Zzzzzzzzzzzzzz........Zzzzzzzzzzzzzz\")\n\n # If tiredness and boredome is less than zero then set it to zero.\n\n if self.tiredness <0:\n self.tiredness = 0\n if self.boredom < 0:\n self.boredom = 0",
"def sleep(self, *args):\n if (len(args) == 0):\n interval = self.default_interval\n else:\n interval=args[0]\n \n time.sleep(interval)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Set the parameters of the model. When called, this function sets the model parameters tha are used to make predictions. Assumes parameters are stored in self.w, self.b.
|
def set_params(self, w, b):
self.w = w
self.b = b
|
[
"def set_model_params(self, params):",
"def set_model_params(self, new_model_params: ModelParamsType):\n new_prototypes, new_omega = new_model_params\n\n self.set_prototypes(new_prototypes)\n self.set_omega(new_omega)\n\n if self.relevance_normalization:\n LGMLVQ._normalize_omega(self.omega_)",
"def set_model_params(self, new_model_params: Union[tuple, np.ndarray]):\n raise NotImplementedError(\"You should implement this!\")",
"def __init__(self, model_params, training_params):\n # Set model and training parameters as instance variables\n\n\n self._model_params = deepcopy(model_params)\n self._training_params = deepcopy(training_params)",
"def set_parameters(self, params, **kargs):\n self._solver.set_parameters(params, **kargs)",
"def set_parameters(self, params):\r\n #raise NotImplementedError(\"You need to write this part!\")\r\n i = 0\r\n for param in self.network.parameters():\r\n param = prams[i]\r\n i = i+1",
"def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)",
"def set_params(self, **params):\n for key, value in params.items():\n if hasattr(self, key):\n if key == 'layers':\n value = list(value)\n setattr(self, key, value)\n else:\n # accessing deep parameters\n param, sep, param_of_param = key.partition('__')\n if sep != '__':\n raise ValueError(key + ' is an invalid parameter a Theanets estimator')\n if param == 'trainers':\n index, sep, param = param_of_param.partition('_')\n index = int(index)\n if index >= len(self.trainers):\n raise ValueError('{} is an invalid parameter for a Theanets estimator: index '\n 'too big'.format(key))\n if param == '':\n # e.g. trainers__0 = {'optimize': 'sgd', 'learning_rate': 0.3}\n self.trainers[index] = value\n else:\n # e.g. trainers__0_optimize = 'sgd'\n self.trainers[index][param] = value\n elif param == 'layers':\n index = int(param_of_param)\n if index >= len(self.layers):\n raise ValueError('{} is an invalid parameter for a Theanets estimator: index '\n 'too big'.format(key))\n self.layers[index] = value\n elif param == 'scaler':\n try:\n self.scaler.set_params(**{param_of_param: value})\n except Exception:\n raise ValueError('was unable to set parameter {}={} '\n 'to scaler {}'.format(param_of_param, value, self.scaler))\n else:\n raise ValueError(key + ' is an invalid parameter for a Theanets estimator')",
"def set_parameters(self, params: List[Union[torch.Tensor, list]]) -> None:\n self.means = params[0]\n self.precs = params[1]",
"def initialize_parameters(self):\n\n self.n_inputs = len(self.df.columns[:-1])\n self.n_hidden_per_layer = 3\n self.n_hidden = 2\n self.n_outputs = len(self.df.Class.unique()) if self.c_t == \"classification\" else 1\n self.learning_rate = .07\n self.epochs = 3\n self.momentum_factor = .5\n self.performance = 0",
"def set_params(self, **params):\n if not params:\n return self\n valid_params = self.get_params()\n for key, value in params.items():\n if key not in valid_params:\n raise ValueError('Invalid parameter %s for posterior '\n 'approximation %s. '\n 'Check the list of available parameters '\n 'with `PosteriorApproximation.get_params().keys()`.' %\n (key, self.__class__.__name__))\n setattr(self, key, value)\n return self",
"def update_parameters(self, params):\n self.tbf.update_lengthscales(np.exp(params[:self.D])) # update TBF lengthscales\n self.tbf.update_amplitude(np.exp(2*params[self.D])) # update TBF amplitude\n self.var_n = np.exp(2*params[self.D + 1]) # update noise variance\n self.tbf.update_frequencies(params[self.D + 2:]) # update the TBF spectral frequencies",
"def _set_params_toy(self,hmm) :\n\t\thmm.length = 12\n\t\thmm.dims = [(2,3)]*hmm.length # (latent,emit) dimspace\n\t\thmm.emit = [\n\t\t\t[[0.6,0.2,0.2],[0.2,0.6,0.2]]\n\t\t]*hmm.length\n\t\thmm.trans = [\n\t\t\t[[0.7,0.3],[0.3,0.7]]\n\t\t]*hmm.length\n\n\t\thmm.seqmap = [{'a':0,'b':1}]*hmm.length\n\t\thmm.seqmap2 = [{0:'a',1:'b'}]*hmm.length\n\t\thmm.featmap = [{'H':0,'B':1,'L':2}]*hmm.length\n\t\thmm.initprob = [0.5,0.5]\n\t\thmm.trained = True",
"def setModelPars(self, pars):\n pass",
"def set_parameters(self, parameters: DecisionForestParameters):\n self.parameters = tensor_forest.ForestHParams(\n num_classes=parameters.num_classes,\n num_features=parameters.num_features,\n num_trees=parameters.num_trees,\n max_nodes=parameters.max_nodes,\n inference_tree_paths=parameters.inference_tree_paths\n ).fill()\n\n self.batch_size = parameters.batch_size\n self.use_training_loss = parameters.use_training_loss\n self.report_feature_importances = parameters.report_feature_importances\n self.model_dir = parameters.model_dir",
"def _update_trainable_params(self):\n self._trainable_params = set(self._par_info)",
"def _update_trainable_params(self):\n self._trainable_params = list(self._par_info)",
"def InitializeMetaParameters(self):\n\n\n\t\t#To set Meta Parameters, as done in the paper.\n\t\t#Note:- \n\t\t#\tself.MiscParamList == [eta, tau_squared, sigma2, nu_1, nu_2]\n\n\n\t\twith torch.no_grad():\n\n\t\t\t#For MiscParamList\n\t\t\ttrain_pred = self.Model(self.TrainData[:,:self.D_in])\n\t\t\ttrain_truth = self.TrainData[:,self.D_in:]\n\t\t\teta = np.log( np.mean(np.var( np.array(train_pred - train_truth) )) )\n\t\t\ttau_squared = np.exp(eta)\n\t\t\tsigma_squared = 25\n\t\t\tnu_1 = 0\n\t\t\tnu_2 = 0\n\n\t\t\tself.MiscParamList = [eta, tau_squared, sigma_squared, nu_1, nu_2]\n\n\t\t\t#For CurrentPriorProb, Note that we entered the list of current model weights.\n\t\t\tself.CurrentPriorProb, _ = self.PriorLikelihood(self.MiscParamList, list(self.Model.state_dict().values()) )\n\n\t\t\t#For CurrentLikelihoodProb\n\t\t\tself.CurrentLikelihoodProb, _ = self.Likelihood(self.MiscParamList, list(self.Model.state_dict().values()) )",
"def _set_up_model(self) -> None:\n self._add_vars_x_i_in_theta_i()\n self._add_vars_z_i()\n self._add_linear_cons()\n self._add_objective()",
"def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.diagonal_weight_indices = torch.LongTensor([[node for node in range(self.ncount)], [node for node in range(self.ncount)]])\n self.diagonal_weight_indices = self.diagonal_weight_indices\n self.diagonal_weight_filter = torch.nn.Parameter(torch.Tensor(self.ncount, 1))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load page corresponding to self.external_id and update Exoplanet.articles with parsed sources
|
def retrieve(self):
try:
if (response := requests.get("http://exoplanet.eu/catalog/" + self.external_id)).status_code != 200:
self.trace('http response: {}'.format(response.status_code), 40)
return
except requests.exceptions.RequestException as e:
self.trace(e.__str__(), 40)
return
page = BeautifulSoup(response.content, 'html.parser')
for p in page.find_all('p', {'class': 'publication'}):
try:
if p.get('id') not in ExoplanetEu.articles and (ref_id := ExoplanetEu.parse_publication(p)):
ExoplanetEu.articles[p.get('id')] = ref_id
except ValueError as e:
self.trace('Found {} results while looking for source {} by title'.format(e.args[0], p.get('id')))
return page
|
[
"def load_article(self, title):\n wikipedia.set_lang(self.language)\n self.article_name = title\n page = wikipedia.page(title)\n page = self.store_images(page)\n self.content = self.process_html(page)",
"def load_article(self, title):\n self.articleName = title\n url = \"\"\n title = urllib.quote(title.replace(\" \", \"_\").encode('utf-8'))\n try:\n url = (self.site or self.ownUrl)\n if not url.endswith('/') and title <> '': url += '/'\n if '://' not in url: url = 'http://' + url\n url += title\n net = urllib.urlopen(url)\n page = net.read()\n net.close()\n except IOError, error:\n self.content = _(u\"Unable to download from %s <br/>Please check the spelling and connection and try again.\") % url\n return\n\n page = unicode(page, \"utf8\")\n # FIXME avoid problems with numeric entities in attributes\n page = page.replace(u' ', u' ')\n\n # avoidParserProblems is set to False because BeautifulSoup's\n # cleanup was causing a \"concatenating Null+Str\" error,\n # and Wikipedia's HTML doesn't need cleaning up.\n # BeautifulSoup is faster this way too.\n soup = BeautifulSoup(page, False)\n content = soup.first('div', {'id': \"content\"})\n\n # remove the wiktionary, wikimedia commons, and categories boxes\n # and the protected icon and the needs citations box\n if content:\n infoboxes = content.findAll('div',\n {'class' : 'infobox sisterproject'})\n [infobox.extract() for infobox in infoboxes]\n catboxes = content.findAll('div', {'id' : 'catlinks'})\n [catbox.extract() for catbox in catboxes]\n amboxes = content.findAll('table',\n {'class' : re.compile(r'.*\\bambox\\b.*')})\n [ambox.extract() for ambox in amboxes]\n protecteds = content.findAll('div', {'id' : 'protected-icon'})\n [protected.extract() for protected in protecteds]\n else:\n content = soup.first('body')\n\n if not content:\n self.content = _(u\"Unable to download from %s <br/>Please check the spelling and connection and try again.\") % url\n # set the other elements as well\n return\n \n bits = url.split('/')\n netloc = '%s//%s' % (bits[0], bits[2])\n self.content = self.reformatArticle(netloc, unicode(content))\n # now that these are supporting images, any direct manipulation\n # of the content field must also store this updated information\n # into the other corresponding fields of TextAreaField:\n # (perhaps eventually a property should be made for TextAreaField \n # such that these extra set's are not necessary, but for now, here:)",
"def fetch_articles():\n url_list = [f'http://{PUBLISHER_DOMAIN}/{i}/rss' for i in ['ts', 'hs', 'ks', 'kl', 'ss']]\n for src in url_list:\n feed = feedparser.parse(src)\n try:\n url = feed.feed.link.replace('http://', '')\n except:\n return make_response(f'{feed}\\n{src}')\n author = Publisher.query.filter_by(url=url).first()\n if author:\n for i, entry in enumerate(feed.entries):\n url = entry.link\n if not Article.query.filter_by(url=url).first():\n img = entry.media_content[0]['url']\n if not img:\n img = author.image\n category = Category(entry.category)\n day = date.fromisoformat(entry.published)\n article = Article(name=entry.title, publisher=author, image=img, url=url,\n description=entry.description, date=day, category=category)\n db.session.add(article)\n db.session.commit()\n print(f'fetched {author.url} articles succesfully')\n else:\n print('Couldnt find author with given url')\n print(f'URL:\\n{url}')\n print('\\n'*3)\n publishers = [i.url for i in Publisher.query.all()]\n for i in publishers:\n print(i)\n print('\\n'*3)\n return make_response('ok', 200)",
"def loadArticles():\n #print \"loadArticles()\"\n topDir = \"/home/phil/proj/euroelection\"\n allFns = os.listdir(topDir)\n artFns = [fn\n for fn in allFns\n if fn.endswith(\".art\")]\n #print \"loadArticles() artFns=%r\" % (artFns,)\n for artFn in artFns:\n artPn = topDir+\"/\"+artFn\n #print \"loading articles in <%s>...\" % (artPn,)\n artFStr = butil.readFile(topDir+\"/\"+artFn)\n readArticles(artFStr)\n #//for",
"def create_article_objects(article_urls):\r\n articles = []\r\n for url in article_urls:\r\n a = Article(make_request(url))\r\n a.get_title()\r\n print(a.title) #show progress by printing title\r\n a.get_content()\r\n articles.append(a)\r\n return articles",
"def fetch_all_articles(self):\n \n url_args = {\n 'api-key': self.guardian_api_key,\n 'format': 'json',\n 'show-fields': 'body,byline,headline,publication,shortUrl,standfirst,thumbnail',\n 'show-factboxes': 'all',\n }\n \n for section_index, section in enumerate(self.contents['sections']):\n for link_index, link in enumerate(section['links']):\n article_url = 'http://content.guardianapis.com' + link['path']\n \n self.message('Fetching JSON: ' + article_url)\n\n error_message = ''\n \n try:\n response = requests.get(article_url, params=url_args,\n timeout=10)\n except requests.exceptions.ConnectionError as e:\n error_message = \"Can't connect to domain.\"\n except requests.exceptions.ConnectTimeout as e:\n error_message = \"Connection timed out.\"\n except requests.exceptions.ReadTimeout as e:\n error_message = \"Read timed out.\"\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n error_message = \"HTTP Error: %s\" % response.status_code\n\n if error_message:\n if response.status_code == 403:\n error_message = \"This article can only be read on theGuardian.com due to rights issues.\"\n elif response.status_code == 404:\n error_message = \"This article was missing when we tried to fetch it.\"\n self.message(error_message)\n # Fake a JSON structure, so that we still have a page for this\n # story.\n result = {\n 'response': {\n 'content': {\n 'id': link['path'],\n 'webTitle': link['title'],\n 'webUrl': 'http://www.theguardian.com' + link['path'],\n 'fields': {\n 'headline': link['title'],\n 'body': '<div class=\"error\"><p>'+error_message+'</p><p><a href=\"http://www.theguardian.com'+link['path']+'\">View on theGuardian.com</a></p></div>'\n }\n }\n }\n }\n \n else:\n # We got a pagea successfully.\n result = response.json()\n \n html = self.make_article_html(result['response']['content'])\n \n # Get the last part of the article's URL\n # eg 'trident-savings-nuclear-deterrent' from\n # '/uk/2010/may/19/trident-savings-nuclear-deterrent'\n match_filename = re.compile(r'/([^/]*)$')\n filename = match_filename.search(link['path']).groups()[0] + '.html'\n self.contents['sections'][section_index]['links'][link_index]['file'] = filename\n self.contents['sections'][section_index]['links'][link_index]['id'] = result['response']['content']['id']\n \n if 'body' in result['response']['content']['fields']:\n [words, lines] = self.count_words(result['response']['content']['fields']['body'])\n self.contents['sections'][section_index]['links'][link_index]['words'] = words\n if words > self.contents['meta']['max_words']:\n self.contents['meta']['max_words'] = words\n else:\n self.contents['sections'][section_index]['links'][link_index]['words'] = 0\n \n try:\n article_file = open(self.issue_archive_dir + filename, 'w')\n try:\n article_file.write(html.encode('utf-8'))\n finally:\n article_file.close()\n except IOError:\n raise ScraperError(\"IOError when writing \" + self.issue_archive_dir + filename)\n \n # Pause, be nice.\n time.sleep(0.5)",
"def call_all_urls(self, response):\n parse_all_urls(response, self.articles, self.url_limit)\n\n # retrieve additional data from articles\n for article in self.articles:\n if not article.is_populated():\n yield scrapy.Request(article.link, callback=parse_additional_data, errback=self.errback_httpbin,\n dont_filter=True, meta={'article_object': article, 'articles': self.articles, 'date_limit': self.date_limit})\n\n # retrieve comments from articles\n for article in self.articles:\n yield scrapy.Request(article.comment_link, callback=parse_comments, errback=self.errback_httpbin, dont_filter=True, meta={'article_object': article})",
"def load_news_page(l):\n simulate_loading_news_page(l)\n l.interrupt()",
"def generic_article_scraping(url, source='default', delay=1):\n day = int(datetime.datetime.now().strftime(\"%d\"))\n response = requests.get(url)\n print(url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n # article[\"date\"] = soup.find(\"time\")[\"datetime\"]\n article_date = get_article_date(soup, source=source)\n date = article_date.day\n print(date)\n #Check article is 0 or 1 day old\n # if int(date) - day <= delay:\n article = get_article_content(soup, source=source)\n article[\"url\"] = url\n article[\"date\"] = article_date\n #Load into database\n add_article = Article(url=url, image_url=article[\"image\"],\\\n description=article[\"description\"], source=article[\"source\"],\\\n pub_date = article[\"date\"], title = article[\"title\"])\n add_article.save()\n return article",
"def scrape_article_for_external_links(article_title, file=sys.stdout, header=False): \n\t# the url to be scraped\n\turl = \"https://en.wikipedia.org/w/api.php?action=query&prop=extlinks&format=json&ellimit=5000&titles=%s\"%(article_title.replace(' ', '%20'))\n\n # scrape the url\n\ttry: \n\t\traw_data = read_in_data_from_url(url, headers=False, json=True)\n\texcept: \n\t\treturn SCRAPING_FAILED\n\n\tif header: \n\t\tprint(\"page_id\\tname\\texternal_link\", file=file)\n\n\t# walk through all the external links\n\tfor page_id, page in raw_data['query']['pages'].items(): \n\t\tif 'extlinks' in page: # check whether there are external links\n\t\t\tfor extlink in page['extlinks']: \n\t\t\t\t# print to the output file\n\t\t\t\tprint('%s\\t%s\\t%s'%(page_id, article_title, extlink['*']), file=file)\n\n\treturn SCRAPING_SUCCESSFULL",
"def load_articles(articles_list, location):\n\n # to do: is there a better way to get location_id for location?\n location_obj = Location.query.filter(Location.location_name == location).one()\n location_id = location_obj.location_id\n\n for article in articles_list:\n web_url = article['web_url']\n headline = article['headline']['main']\n pub_date = article['pub_date']\n\n new_article = Article(glocation=location, web_url=web_url, headline=headline, pub_date=pub_date, location_id=location_id)\n\n db.session.add(new_article)\n\n db.session.commit()",
"def scrape_all(self):\n\n items = self.content.find_all(\"item\")\n for item in items:\n details = {}\n details[\"article_url\"] = item.find_all(\"link\")[0].nextSibling\n details[\"article_title\"] = item.find_all(\"title\")[0].string\n naive_date = datetime.datetime.strptime(\n item.find_all(\"pubdate\")[0].string,\n self.date_format)\n details[\"pub_date\"] = pytz.utc.localize(naive_date)\n self.articles.append(details)",
"def fetch_articles(self, n):\n \n article_keywords = create_search_list(n)\n self.articles = create_corpus(article_keywords, self.articles)",
"def HN_frontPage():\n\tbase = \"http://api.ihackernews.com/page\"\n\tr = requests.get(base)\n\tjresp = json.loads(r.content)\n\tarticles = [] # url, source, pub_date, tags, title\n\tsource = \"Hacker News\"\n\tfor link in jresp['items']:\n\t\ttry:\n\t\t\turl = link['url']\n\t\t\ttitle = link['title']\n\t\t\tpub_date = link['postedAgo']\n\t\t\ttags = title.split(' ') # lack of tags :(\n\t\t\ta = createArticle(url, source, pub_date, tags, title)\n\t\t\tarticles.append(a)\n\t\texcept: pass\n\treturn articles",
"def load_first_article():\n\n print \"First Article loading...\"\n article = Article(title=\"Mr. Smith Goes to Washington\", \n url=\"http://www.google.com\",\n img_src=\"http://www.placekitten.com/300/300\",\n user_id=942)\n\n db.session.add(article)\n \n db.session.commit()\n print \"First Article loaded.\"",
"def scan_articles(self):\n content_dir = self.fs.dir(\"content\")\n if not content_dir:\n return\n self.articles = Tree()\n for path_str, _, files in os.walk(content_dir):\n path = Path(path_str)\n for name in files:\n file_path = path / name\n if file_path.suffix not in (\".md\", \".yml\"):\n continue\n relative = file_path.relative_to(content_dir).with_suffix(\"\")\n ref: Ref[Article] = Ref(tuple(Label(p) for p in relative.parts))\n self.articles.create(ref, Article, file_path)\n logging.debug(\"found article at %s\", file_path)",
"def walk(self):\n call_plugins('on_pre_walk')\n for fullname in self._walk():\n article = self.fetch_article(fullname)\n call_plugins('on_visit_article', article)\n call_plugins('on_post_walk')",
"def load_abstract(pmid_list, separate_title=True):\r\n global JSON_DB\r\n require_remote_fetch = list()\r\n # try to see if abstracts are available locally\r\n local_abstracts = dict()\r\n for pmid in pmid_list:\r\n print('## Retrieving PMID ' + str(pmid), file=sys.stderr)\r\n if pmid in JSON_DB:\r\n local_abstracts[pmid] = JSON_DB[pmid]\r\n else:\r\n require_remote_fetch.append(pmid)\r\n # download abstracts that are not available locally\r\n print('# {number} abstracts require remote download.'.format(number=len(require_remote_fetch)), file=sys.stderr)\r\n for batch in pmid_batch(require_remote_fetch):\r\n xml_result = fetch_abstract(batch)\r\n xml_tree = ET.fromstring(xml_result)\r\n for pubmed_article in xml_tree.findall('PubmedArticle'):\r\n pubmed_data = pubmed_article.findall('MedlineCitation')\r\n assert len(pubmed_data) == 1\r\n pmid = pubmed_data[0].findall('PMID')\r\n assert len(pmid) == 1\r\n extracted_pmid = pmid[0].text\r\n print('extracted_pmid', extracted_pmid, file=sys.stderr)\r\n cache_file_path = os.path.join(os.path.abspath(os.path.curdir), CACHE_DIR, str(extracted_pmid) + '.txt')\r\n article = pubmed_data[0].findall('Article')\r\n assert len(article) == 1\r\n article_title = article[0].findall('ArticleTitle')\r\n assert len(article_title) == 1\r\n extracted_article_title = article_title[0].text\r\n abstract = article[0].findall('Abstract')\r\n assert len(abstract) <= 1\r\n content = ''\r\n # output the title\r\n content += extracted_article_title + '\\n'\r\n # print a separator\r\n content += '=====separator line=====\\n'\r\n if len(abstract) == 1:\r\n abstract_text = abstract[0].findall('AbstractText')\r\n assert len(abstract_text) >= 1\r\n extracted_abstract_text = list()\r\n # output the abstract\r\n for paragraph in abstract_text:\r\n if paragraph.text is not None:\r\n content += paragraph.text + '\\n'\r\n if len(abstract) == 0:\r\n content += '[No abstract available.]'\r\n # add the newl downloaded abstract to JSON_DB\r\n print('# Adding', extracted_pmid, content, file=sys.stderr)\r\n JSON_DB[extracted_pmid] = content\r\n # read the newly downloaded abstracts\r\n newly_downloaded_abstracts = dict()\r\n for pmid in require_remote_fetch:\r\n print('# Newly downloaded', pmid, file=sys.stderr)\r\n print(JSON_DB.keys(), file=sys.stderr)\r\n if pmid in JSON_DB:\r\n newly_downloaded_abstracts[pmid] = JSON_DB[pmid]\r\n else:\r\n JSON_DB[pmid] = 'Not a PubMed article.\\n=====separator line=====\\n[Abstract not retrieved.]'\r\n newly_downloaded_abstracts[pmid] = JSON_DB[pmid]\r\n # prepare the output\r\n assert len(newly_downloaded_abstracts) + len(local_abstracts) == len(pmid_list)\r\n output_list = list()\r\n for pmid in pmid_list:\r\n if pmid in local_abstracts:\r\n output_list.append([pmid, local_abstracts[pmid]])\r\n else:\r\n output_list.append([pmid, newly_downloaded_abstracts[pmid]])\r\n # separate the title and abstract\r\n if separate_title:\r\n new_output_list = list()\r\n for item in output_list:\r\n pmid, abstract_and_title = item\r\n title, abstract = abstract_and_title.split('=====separator line=====')\r\n title = title.lstrip().rstrip()\r\n abstract = abstract.lstrip().rstrip()\r\n new_output_list.append([pmid, title, abstract])\r\n return new_output_list\r\n return output_list",
"def subarticles():"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Standardizes pixels; Facenet expects standardized pixels as input.
|
def standardize_pixels(pixels):
pixels = pixels.astype('float32')
mean, std = pixels.mean(), pixels.std()
return (pixels - mean) / std
|
[
"def standardize_pixel_values(pixels):\n mean, std = pixels.mean(), pixels.std()\n pixels = (pixels - mean) / std\n pixels = np.clip(pixels, -1.0, 1.0)\n pixels = (pixels + 1.0) / 2.0\n return pixels",
"def normalize(img,max_=255.0):\n img -= img.min()\n img = (img*max_/img.max()).astype('uint8')\n return img",
"def normalizeImage(image):\n image -= np.min(image)\n image *= 1.0 / np.max(image)",
"def scale_pixels(data):\n data /= 255",
"def normalization(image):\r\n image = (image - 128) / 128\r\n return image",
"def grayScaleStandard(image):\n for y in range(image.height):\n for x in range(image.width):\n (r, g, b) = image.getpixel((x, y))\n r = int(r * 0.299)\n g = int(g * 0.587)\n b = int(b * 0.114)\n lum = r + g + b\n image.putpixel((x, y), (lum, lum, lum))",
"def rescaled_image():",
"def restore_normalization(image):\r\n image = image * 128\r\n image = image + 128\r\n return image",
"def _imagenet_standardization_zero_value():\n return (-np.array(data_provider._MEAN_RGB_DICT[\"imagenet\"]) /\n np.array(data_provider._STDDEV_RGB_DICT[\"imagenet\"]))",
"def unnormalize_img(img):\n assert img.shape == (3, 800, 1088)\n img = torchvision.transforms.functional.normalize(img, mean=[0.0, 0.0, 0.0],\n std=[1.0 / 0.229, 1.0 / 0.224, 1.0 / 0.225])\n img = torchvision.transforms.functional.normalize(img, mean=[-0.485, -0.456, -0.406],\n std=[1.0, 1.0, 1.0])\n return img",
"def channel_normalization(image, rgb_mean, rgb_std):\r\n image = (image - rgb_mean) / rgb_std\r\n return image",
"def scale( pix, pixelMax, floatMin, floatMax):\n\n return (pix / pixelMax) * (floatMax - floatMin) + floatMin",
"def normalize(self, max_val=255.0):\n\t\treturn(OCRImage(self.image/float(max_val)))",
"def scale(img, vmax, vmin):\n # img = img.copy()\n max_ = img.max() \n min_ = img.min() \n if max_ != 0:\n img[img > 0] *= (vmax / max_)\n if min_ != 0: \n img[img < 0] *= (vmin / min_)\n return img",
"def normalize(x):\n \n output = np.array([image/255 for image in x])\n return output",
"def preprocess(image):\n\timg = skimage.transform.resize(image, (64, 64, 3))\n\timg = img / 255.\n\toutput = np.zeros((64, 64, 3), dtype = float)\n\tfor rgb in range(3):\n\t\tcolor = img[:,:,rgb]\n\t\tcolor = color - np.mean(color)\n\t\tcolor = color / np.std(color)\n\t\toutput[:,:,rgb] = color\n\treturn output",
"def preprocess_image(image):\n return normalize(rgb_to_gray_image(image))",
"def normalize(img, new_min=0, new_max=255):\n old_min = np.min(img)\n old_max = np.max(img)\n if old_min == old_max:\n return img - old_min # return 0s\n img = (img - old_min) * ((new_max - new_min) / (old_max - old_min)) + new_min\n return img",
"def SetFixedImageStandardDeviation(self, _arg: 'double') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUC2IUC2_SetFixedImageStandardDeviation(self, _arg)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
We override the paint event to allow us to draw with nice rounded edges
|
def paintEvent(self, event):
qp = qute.QPainter()
qp.begin(self)
qp.setRenderHint(
qute.QPainter.Antialiasing,
True,
)
qsize = self.size()
gradient = qute.QLinearGradient(0, 0, 0, qsize.height())
gradient.setColorAt(0, qute.QColor(100, 20, 0, a=175))
gradient.setColorAt(1, qute.QColor(50, 50, 50, a=175))
qp.setPen(self.PEN)
qp.setBrush(gradient) # self.BACKGROUND_COLOR)
qp.drawRoundedRect(
0,
0,
qsize.width(),
qsize.height(),
self.ROUNDING,
self.ROUNDING,
)
qp.end()
|
[
"def paint(self, painter, option, widget):\n painter.setPen(self.pen)\n\n painter.setBrush(self.brush)\n if self.highlighted:\n painter.setBrush(self.highlightBrush)\n\n painter.drawEllipse(self.boundingRect())",
"def paintEvent(self, event):\n\n painter = QPainter()\n painter.begin(self)\n\n painter.drawPixmap(self.pixmap.rect(), self.pixmap)\n self.draw_progress_bar(painter)\n self.draw_lines(painter)\n self.draw_zooming_rect(painter)\n\n painter.end()",
"def _overpaint(self, event, painter):\n self.drawBubbles(event, painter)\n\n # draw instructions in half transparent rectangle on top of\n # the viewport\n self.drawInstructions(painter)\n\n if self._drawbox:\n # draw selection rectangle\n self.drawBox(painter)",
"def paint(event):\n x = event.x\n y = event.y\n color='black'\n\n x1, y1 = (x-brush_size), (y-brush_size)\n x2, y2 = (x+brush_size), (y+brush_size)\n c.create_rectangle(x1,y1,x2,y2,fill=color,outline=color) # TKinter canvas\n img_draw.rectangle([(x1,y1), (x2,y2)], fill=color, outline=color) # PIL Parallel",
"def draw_zooming_rect(self, painter):\n\n if self.select_zooming and self.zoom_point != QPoint():\n painter.setPen(QPen(Qt.black, 3, Qt.SolidLine))\n painter.drawRect(self.zooming_rect)",
"def paintEvent(self, event: QPaintEvent):\n\n painter = QPainter()\n painter.begin(self)\n painter.drawPixmap(0, 0, self.pixmap)\n painter.end()",
"def draw_naught(self, padding=6, outline='red'):\n width, height = int(self.getWidth()) - padding, int(self.getHeight()) - padding\n self.__shapes.append(self.drawOval(padding, padding, width, height, outline=outline))",
"def OnPaint(self, event):\n\n dc = wx.BufferedPaintDC(self)\n gc = wx.GraphicsContext.Create(dc)\n dc.SetBackground(wx.Brush(self.GetParent().GetBackgroundColour()))\n dc.Clear()\n\n clientRect = self.GetClientRect()\n gradientRect = wx.Rect(*clientRect)\n capture = wx.Window.GetCapture()\n\n x, y, width, height = clientRect\n\n gradientRect.SetHeight(gradientRect.GetHeight()/2 + ((capture==self and [1] or [0])[0]))\n if capture != self:\n if self._mouseAction == HOVER:\n topStart, topEnd = self.LightColour(self._topStartColour, 10), self.LightColour(self._topEndColour, 10)\n else:\n topStart, topEnd = self._topStartColour, self._topEndColour\n\n rc1 = wx.Rect(x, y, width, height/2)\n path1 = self.GetPath(gc, rc1, 8)\n br1 = gc.CreateLinearGradientBrush(x, y, x, y+height/2, topStart, topEnd)\n gc.SetBrush(br1)\n gc.FillPath(path1) #draw main\n\n path4 = gc.CreatePath()\n path4.AddRectangle(x, y+height/2-8, width, 8)\n path4.CloseSubpath()\n gc.SetBrush(br1)\n gc.FillPath(path4)\n\n else:\n\n rc1 = wx.Rect(x, y, width, height)\n path1 = self.GetPath(gc, rc1, 8)\n gc.SetPen(wx.Pen(self._pressedTopColour))\n gc.SetBrush(wx.Brush(self._pressedTopColour))\n gc.FillPath(path1)\n\n gradientRect.Offset((0, gradientRect.GetHeight()))\n\n if capture != self:\n\n if self._mouseAction == HOVER:\n bottomStart, bottomEnd = self.LightColour(self._bottomStartColour, 10), self.LightColour(self._bottomEndColour, 10)\n else:\n bottomStart, bottomEnd = self._bottomStartColour, self._bottomEndColour\n\n rc3 = wx.Rect(x, y+height/2, width, height/2)\n path3 = self.GetPath(gc, rc3, 8)\n br3 = gc.CreateLinearGradientBrush(x, y+height/2, x, y+height, bottomStart, bottomEnd)\n gc.SetBrush(br3)\n gc.FillPath(path3) #draw main\n\n path4 = gc.CreatePath()\n path4.AddRectangle(x, y+height/2, width, 8)\n path4.CloseSubpath()\n gc.SetBrush(br3)\n gc.FillPath(path4)\n\n shadowOffset = 0\n else:\n\n rc2 = wx.Rect(x+1, gradientRect.height/2, gradientRect.width, gradientRect.height)\n path2 = self.GetPath(gc, rc2, 8)\n gc.SetPen(wx.Pen(self._pressedBottomColour))\n gc.SetBrush(wx.Brush(self._pressedBottomColour))\n gc.FillPath(path2)\n shadowOffset = 1\n\n font = gc.CreateFont(self.GetFont(), self.GetForegroundColour())\n gc.SetFont(font)\n\n font = wx.Font(self.settings.FONTSIZE, wx.DEFAULT, wx.NORMAL, wx.BOLD)\n pencolor = (255,255,255)\n gc.SetFont(font, pencolor)\n\n label = self.GetLabel()\n tw, th = gc.GetTextExtent(label)\n\n if self._bitmap:\n bw, bh = self._bitmap.GetWidth(), self._bitmap.GetHeight()\n else:\n bw = bh = 0\n\n pos_x = (width-bw)/2+shadowOffset # adjust for bitmap and text to centre\n if self._bitmap:\n pos_y = (height-bh)/2+shadowOffset-20\n gc.DrawBitmap(self._bitmap, pos_x, pos_y, bw, bh) # draw bitmap if available\n pos_x = pos_x + 2 # extra spacing from bitmap\n\n #gc.DrawText(label, pos_x + bw + shadowOffset, (height-th)/2+shadowOffset)\n\n\n #gc.SetPen(wx.Pen(pencolor, 2))\n #colorbrush = wx.Brush(pencolor)\n #gc.SetBrush(colorbrush)\n\n #gc.SetPen(wx.WHITE_PEN)\n\n if len(label) <= self.settings.LINEBREAK:\n print len(label)\n gc.DrawText(label, (width-tw)/2 + shadowOffset, (height-(2*th))/2+shadowOffset+40)\n else:\n\n temp = label.split()\n holder = ''\n holder2 = ''\n x = 0\n for i in temp:\n holder2 += i + ' '\n tw, th = gc.GetTextExtent(holder2)\n print tw\n if tw > self.settings.BUTTONWIDTH-10:\n break\n else:\n holder = holder2\n x+= 1\n ns = holder\n nse = ' '.join(temp[x:])\n '''\n print len(label)\n temp = label.split()\n ns = ''\n x = 0\n for i in temp:\n ns += i +' '\n x += 1\n if len(ns) > 15:\n break\n\n nse = ' '.join(temp[x:])\n '''\n print nse\n tw1, th1 = gc.GetTextExtent(ns)\n tw2, th2 = gc.GetTextExtent(nse)\n gc.DrawText(ns, (width-tw1)/2 + shadowOffset, (height-(2*th))/2+shadowOffset+40)\n gc.DrawText(nse, (width-tw2)/2 + shadowOffset, (height-th + 15)/2+shadowOffset+40)",
"def OnPaint(self, event):\n DC = wx.PaintDC(self)\n self.DoDrawing(DC)",
"def __init__(self, width, height, color, radius, surface_above_border = False):\n super(RoundedBorder, self).__init__(width, height, color)\n self.radius = radius\n self.remove_background_after_draw = surface_above_border\n self.draw_surface_above_border = surface_above_border",
"def roundRect(self, x, y, width, height, radius, stroke=1, fill=0):\n #use a precomputed set of factors for the bezier approximation\n #to a circle. There are six relevant points on the x axis and y axis.\n #sketch them and it should all make sense!\n t = 0.4472 * radius\n\n x0 = x\n x1 = x0 + t\n x2 = x0 + radius\n x3 = x0 + width - radius\n x4 = x0 + width - t\n x5 = x0 + width\n\n y0 = y\n y1 = y0 + t\n y2 = y0 + radius\n y3 = y0 + height - radius\n y4 = y0 + height - t\n y5 = y0 + height\n\n self._code.append('n %0.4f %0.4f m' % (x2, y0))\n self._code.append('%0.4f %0.4f l' % (x3, y0)) # bottom row\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x4, y0, x5, y1, x5, y2)) # bottom right\n\n self._code.append('%0.4f %0.4f l' % (x5, y3)) # right edge\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x5, y4, x4, y5, x3, y5)) # top right\n\n self._code.append('%0.4f %0.4f l' % (x2, y5)) # top row\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x1, y5, x0, y4, x0, y3)) # top left\n\n self._code.append('%0.4f %0.4f l' % (x0, y2)) # left edge\n self._code.append('%0.4f %0.4f %0.4f %0.4f %0.4f %0.4f c' %\n (x0, y1, x1, y0, x2, y0)) # bottom left\n\n self._code.append('h') #close off, although it should be where it started anyway\n\n self._code.append(PATH_OPS[stroke, fill, self._fillMode])\n ##################################################\n #\n # Text methods\n #\n # As with graphics, a separate object ensures that\n # everything is bracketed between text operators.\n # The methods below are a high-level convenience.\n # use PDFTextObject for multi-line text.\n ##################################################",
"def PaintBackground(self, rect, attr):\n\t\tprint \"rorCellEditor: PaintBackground\\n\"",
"def draw_area(self, events):\r\n self.current_points_list.append((events.x, events.y))\r\n\r\n self.create_oval(events.x - 1, events.y - 1, events.x + 1, events.y + 1, fill=\"yellow\", tags='indicator')",
"def paint_pattern(self):\n pass",
"def draw(self, view):\n super().draw()",
"def handle_mouse_press(self, event):\r\n\r\n self._color_index = (self._color_index + 1) % len(self._colors)\r\n self._color = self._colors[self._color_index]\r\n self._circle.set_fill_color(self._color)\r\n self._circle.set_border_color(self._color)",
"def draw_outline(\n self,\n colour='green',\n thickness=2,\n fill=None,\n rect=None):\n raise NotImplementedError()",
"def _draw_simple_background(self):\n # Simple clock background\n self._gc.set_foreground(self._COLOR_WHITE)\n x_delta = self._center_x - self._radius\n y_delta = self._center_y - self._radius\n\n self.window.draw_arc(self._gc, True, x_delta, y_delta,\n 2 * self._radius, 2 * self._radius, 0, 360 * 64)\n self._gc.set_foreground(self.get_style().fg[gtk.STATE_NORMAL])\n self._gc.set_line_attributes(4 * self._line_width,\n gdk.LINE_SOLID, gdk.CAP_ROUND, gdk.JOIN_ROUND)\n self.window.draw_arc(self._gc, False, x_delta, y_delta,\n 2 * self._radius, 2 * self._radius, 0, 360 * 64)\n\n # Clock ticks\n self._gc.set_line_attributes(4 * self._line_width,\n gdk.LINE_SOLID, gdk.CAP_ROUND, gdk.JOIN_ROUND)\n for i in xrange(60):\n if i % 15 == 0:\n inset = 0.175 * self._radius\n elif i % 5 == 0:\n inset = 0.1 * self._radius\n else:\n inset = 0.05 * self._radius\n\n cos = math.cos(i * math.pi / 30.0)\n sin = math.sin(i * math.pi / 30.0)\n self.window.draw_line(self._gc,\n int(self._center_x + (self._radius - inset) * cos),\n int(self._center_y + (self._radius - inset) * sin),\n int(self._center_x + self._radius * cos),\n int(self._center_y + self._radius * sin))",
"def paint_borders(self, color: ColorsType, width: int) -> None:"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print dir tree. Input str or Pathlike obj. If print_files is True, print files, limited to num_files.
|
def tree(
path: Union[str, Path] = ".",
ident: int = 0,
print_files: bool = False,
num_files: int = 3,
) -> None:
path = Path(path)
dirs, files = get_dirs_files(path)
print(" " * ident, f"{path.name} - {len(dirs)} dirs {len(files)} files")
for dir_entry in dirs:
tree(Path(dir_entry), ident + 4, print_files, num_files)
if print_files:
len_files = len(files)
for dir_entry in files[:num_files]:
print(" " * (ident + 4), "-", dir_entry.name)
if len_files > num_files and len_files != 0:
print(
" " * (ident + 4),
"--",
f"{len_files - num_files} more files in this dir",
)
|
[
"def print_tree(self, maxresults=100, maxdepth=None):\n self.ignore_caller()\n for depth, refid, rep in self.walk(maxresults, maxdepth):\n print (\"%9d\" % refid), (\" \" * depth * 2), rep",
"def print_tree_helper(path, sep, depth):\r\n for item in path_iterator(path): \r\n # For every file/dir in the mentioned path\r\n title = os.path.basename(item) # Get the basename of the path\r\n # i.e. the file/dir (foo/bar => bar)\r\n if os.path.isdir(item):\r\n # If the item is a directory, call the print_tree_helper again\r\n # and print the directory title\r\n\r\n print((depth)*sep + title)\r\n\r\n print_tree_helper(item, sep, depth + 1) # Increase depth by 1\r\n elif os.path.isfile(item):\r\n # Item is a file, print its title with the depth*sep \r\n print((depth)*sep + title)",
"def print_tree(path, sep=' '):\r\n print_tree_helper(path, sep, 0)",
"def printTree(self):\n pass",
"def print_tree(self, tabwidth=0):\n\n # if teststr == \"silent\":\n print(tabwidth * \" \", self.ele, '*' if self.mark else '', sep=\"\")\n\n \"\"\" Debugging purposes\n elif teststr == \"loud\":\n print(tabwidth*\" \", end = \" \")\n show((self.ele, id(self)))\n #input()#\n \"\"\"\n for childtree in self.children_generator():\n childtree.print_tree(tabwidth + 1)",
"def main():\n size = checkArgs()\n printTree(size)",
"def printPaths(\n object,\n maxRecursionDepth: SymbolicConstant = None,\n maxElementsInSequence: SymbolicConstant = None,\n pathRoot: str = \"\",\n):\n pass",
"def print_tree(self):\r\n f = open(os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)), 'Tree.txt'), 'w')\r\n node = self.root\r\n self.print_node(f, node, \"\")\r\n f.close()",
"def print_tree(entries, prefix, depth=0, indent=0, pad_char='.', pad_width=4):\n\n # pad string\n if indent == 0 or pad_width == 0:\n pad = ''\n elif indent > 0:\n pad = ' {}'.format(indent * pad_width * pad_char)\n\n # iteration\n done = []\n for size, path in entries:\n\n if not path in done:\n\n # handle level entry\n print('{1:>6}{0} {2}'.format(\n pad, number_iso(size), os.path.sep.join(prefix + path)\n )\n )\n done.append(path)\n \n # handle children\n if len(path) == depth + 1: \n\n # find all children starting with parent path\n children = [(s,p) for s,p in entries if p[:depth+1] == path[:depth+1] and not p in done]\n print_tree(children, prefix, depth+1, indent+1, pad_char, pad_width)\n done.extend([p for s,p in children])",
"def traverse(pathname, d):\n\n for item in os.listdir(pathname):\n next1 = os.path.join(pathname, item)\n\n try:\n print('{}{}'.format(' ' * d, next1))\n traverse(next1, d + 1)\n except:\n pass",
"def PrintTree(self, *args):\n return _itkKdTreePython.itkKdTreeLSVF2_PrintTree(self, *args)",
"def print_tree(self, level=None):\n levels = range(self.tree_levels) if level is None else [level]\n for k in levels:\n for j in range(2 ** k - 1, 2 ** (k + 1) - 1):\n print(self.tree[j], end=' ')\n print()",
"def PrintTree(self, *args):\n return _itkKdTreePython.itkKdTreeLSVF3_PrintTree(self, *args)",
"def print_folder(folder, indent=\"\"):\r\n print(f\"{indent}{folder['name']:<12} {folder['timestamp']}\")\r\n # Add your code below this line.\r\n indent = indent + \"\\t\"\r\n if folder['type'] == 'dir':\r\n for i in range(len(folder['files'])):\r\n if folder['files'][i]['type'] == 'dir':\r\n print_folder(folder['files'][i], indent)\r\n if folder['files'][i]['type'] == 'file':\r\n print_file_listing(folder['files'][i], indent)",
"def _printFile(\n darwinFile: DarwinFile,\n seenFiles: set[DarwinFile],\n level: int,\n noRecurse=False,\n):\n print(\"{}{}\".format(level * \"| \", os.fspath(darwinFile.path)), end=\"\")\n print(\" (already seen)\" if noRecurse else \"\")\n if noRecurse:\n return\n for ref in darwinFile.machOReferenceForTargetPath.values():\n if not ref.is_copied:\n continue\n file = ref.target_file\n _printFile(\n file,\n seenFiles=seenFiles,\n level=level + 1,\n noRecurse=(file in seenFiles),\n )\n seenFiles.add(file)\n return",
"def print_tree(t, indent=0):\n print(' ' * indent + str(entry(t)))\n for subtree in subtrees(t):\n print_tree(subtree, indent + 1)",
"def print_tree(tree, str):\n if type(tree) == dict:\n print(\"%s%s\" % (str, list(tree.keys())[0]))\n for item in list(tree.values())[0].keys():\n print(\"%s\\t%s%s\" % (str, item, \"-\\\\\"))\n print_tree(list(tree.values())[0][item], str + \"\\t\\t\")\n print(\"\")\n else: #printing leaves\n print(\"%s->%s\" % (str, tree))",
"def show_dir_contents(dir_path=Path('')):\n if not isinstance(dir_path, Path):\n dir_path = Path(dir_path)\n\n if not dir_path.exists():\n raise ValueError(\"Directory does not exist.\")\n if not dir_path.is_dir():\n raise ValueError(\"The given path is not a path to a directory.\")\n name_lengths = [len(item.name) for item in dir_path.iterdir()]\n\n if len(name_lengths) == 0:\n return\n\n align_num = max([len(file.name) for file in dir_path.iterdir()]) + 4\n\n print(\"Contents of \\'{}\\':\\n\".format(dir_path))\n print(\"{0:<{align_len}} {1}\".format(\"Name\", \"Length (kB)\", align_len=align_num))\n print(\"{0:<{align_len}} {1}\".format(\"----\", \"-----------\", align_len=align_num))\n\n contents = sorted((item for item in dir_path.iterdir()), key=lambda x: not x.is_dir())\n\n for item in contents:\n if item.is_file():\n print(\"{0:<{align_len}} {1}\".format(item.name, round(item.stat().st_size / 1024),\n align_len=align_num))\n else:\n print(f\"{item.name}\")",
"def printTree(size: int):\n print(\"*\".center(((size * 2) + 1)))\n midSpace = 1\n for sect in reversed(range(size)):\n print(\"/\".rjust(sect + 1), \"\\\\\".rjust(midSpace))\n midSpace += 2\n print(\"-\".center(((size * 2) + 1), \"-\"))\n print(\"#\".center(((size * 2) + 1)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Reset the server to initial state, clear out all Onboardingcreated roles Onboardingcreated channels
|
async def reset_server(ctx):
# Delete onboarding-created roles
removed_roles = []
for role in ctx.guild.roles:
if role.name.startswith("o-"):
try:
await role.delete()
removed_roles.append(role.name)
except discord.Forbidden:
await spit_log(f'The role {role.name} cannot be removed!',
status=Status.error)
if len(removed_roles) > 0:
await spit_log(f'Roles {", ".join(removed_roles)} have been swept up!',
status=Status.success)
else:
await spit_log(f'No roles were removed!', status=Status.info)
# Delete onboarding-created channels
removed_channels = []
verification_category = discord.utils.get(ctx.guild.categories,
id=VERIFICATION_CATEGORY_ID)
for channel in verification_category.channels:
await channel.delete()
removed_channels.append(channel.name)
if len(removed_channels) > 0:
await spit_log(
f'Channels {", ".join(removed_channels)} have been swept up!',
status=Status.success)
else:
await spit_log(f'No channels were removed!', status=Status.info)
|
[
"async def reset(self, ctx):\n # TODO: Add confirmation message\n await sql.deleteserver(ctx.message.guild.id)\n await sql.initserver(ctx.message.guild.id)\n em = discord.Embed(title=\"Reset all data for this server\",\n colour=discord.Colour.dark_green())\n await ctx.send(embed=em)",
"async def reset(self, ctx):\n await self.config.guild(ctx.guild).channel_whitelist.set([\"general\"])\n await self.config.guild(ctx.guild).channel_blacklist.set([])\n await ctx.send(\"Done\")",
"def resetClient(self):\n\n\t\tlogging.info(\"Resetting backend...\")\n\t\tself.__setup()\n\t\tself.lobby.reset()\n\t\tself.__updateClientStatus(ClientStatus.NOGAMERUNNING)",
"def reset_server():\n ServerConfig.objects.conf(\"server_epoch\", time.time())\n from evennia.server.sessionhandler import SESSIONS\n\n logger.log_info(\"Initial setup complete. Restarting Server once.\")\n SESSIONS.portal_reset_server()",
"def clear(self):\n self._connection.clearServer()",
"def reset_all_channels(self):\n for channel in range(0,self.rack_size):\n self.reset_channel(channel)",
"def clear(self) -> None:\n # Because discord.py recreates the HTTPClient session, may as well\n # follow suit and recreate our own stuff here too.\n self._recreate()\n super().clear()",
"def pop_empty_channels(self):\n\n murderlist = []\n\n for chname in self.channels:\n channel = self.channels[chname]\n\n if len(channel.clients) == 0:\n murderlist.append(channel)\n\n for victim in murderlist:\n del self.channels[victim.name]",
"def reset_server() -> None:\n if __designated_connection is not None:\n __designated_connection.reset_server()\n else:\n connect()\n if __designated_connection is not None:\n __designated_connection.reset_server()",
"async def clear_winners(self, interaction: core.InteractionType):\n await interaction.response.defer(thinking=True)\n\n for member in self.winner_role.members:\n await member.remove_roles(self.winner_role)\n\n return await interaction.followup.send(\"Cleared winners.\", ephemeral=False)",
"def _reset_connection(self):\n self.connected = False\n self.gearman_socket = None\n\n self.allowed_connect_time = 0.0\n\n self._is_client_side = None\n self._is_server_side = None\n\n # Reset all our raw data buffers\n self._incoming_buffer = array.array(\"b\")\n self._outgoing_buffer = b\"\"\n\n # Toss all commands we may have sent or received\n self._incoming_commands = collections.deque()\n self._outgoing_commands = collections.deque()",
"def reset(self):\r\n self._gameboard = [[0 for dummy_index in range(self._grid_width)]\r\n for dummy_index2 in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()",
"def ai_reset():\n light_pieces.clear()\n dark_pieces.clear()",
"def reset(self):\n self.membership = None",
"async def cleanup(self, ctx: GuildContext):\n # cleanup roles\n for role in ctx.guild.roles:\n if role >= ctx.me.top_role:\n continue\n \n p = role.permissions.value | ctx.guild.default_role.permissions.value\n if role.permissions.value == p:\n continue\n \n await role.edit(permissions=discord.Permissions(p))\n await ctx.send(f\"Cleaned {role.mention if role.mentionable else role.name}\", allowed_mentions=discord.AllowedMentions.none())\n # cleanup permissions\n for channel in ctx.guild.channels:\n clean = {target:overwrite for target, overwrite in channel.overwrites.items() if not overwrite.is_empty()}\n if clean == channel.overwrites:\n continue # don't make extra requests\n \n await channel.edit(overwrites=clean) # type: ignore - wtf is wrong\n await ctx.send(f\"Cleaned {channel.mention}\")\n await ctx.send(\"Cleanup complete\")",
"def reset(self) -> None:\n self._sensors = []\n self._actuators = []\n self._controller = None\n self._plant_state = None",
"def exposed_reset(self):\n tournament_service.reset()",
"def set_empty_conn(self):\n self.conn = []\n for i in range(self.natoms):\n self.conn.append([])\n return",
"def reset_state(self):\n self._state = self.STATE_INIT\n self._mail_from = None\n self._helo = None\n self._rcpt_to = []"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the current_version string in .bumpversion.cfg
|
def read_current_version():
config = RawConfigParser()
config.add_section('bumpversion')
config.read_file(io.open('.bumpversion.cfg', 'rt', encoding='utf-8'))
items = dict(config.items('bumpversion'))
current_version = items.get('current_version')
return current_version
|
[
"def get_version() -> str:\n config = configparser.ConfigParser()\n path = Path(__file__).parent.parent / \"setup.cfg\"\n config.read(path)\n return str(config[\"metadata\"][\"version\"])",
"def find_current_version():\n with open(VERSION_FILE) as v:\n return v.read()",
"def _get_version() -> str:\n _dirpath = path.split(path.realpath(__file__))[0]\n version = \"UNKNOWN???\"\n for _ in range(3):\n _filepath = path.join(_dirpath, \"pyproject.toml\")\n if path.exists(_filepath):\n with open(_filepath, encoding=\"utf8\") as f:\n version = (\n [ln for ln in f.read().split(\"\\n\") if \"version\" in ln][0]\n .replace(\"version = \", \"\")\n .strip('\"')\n )\n return version\n _dirpath = path.split(_dirpath)[0]\n return version",
"def version(self):\n try:\n print((\"Reading version from\", self.version_file))\n with open(self.version_file) as fh:\n version = fh.read().strip()\n except FileNotFoundError:\n self.log.debug(f\"No version file found at {self.version_file}\")\n return \"0.0.0\"\n return version",
"def get_version() -> str:\n return VERSION",
"def old_version():\n with open(version_file, 'r') as file_:\n for line in file_.readlines():\n if \"__version__\" in line:\n version = line.strip().split('=')[-1].strip(\" '\\\"\")\n break\n else:\n raise ValueError(\"Could not read or generate version\")\n return version",
"def get_version():\n file = open(\"assets/version\", \"r\")\n ret = ''\n for line in file:\n ret += line\n file.close()\n return ret",
"def get_product_build():\n return read_file_value(\"VersionFile.json\", \"version_build\")",
"def getAppVersion():\n return os.environ.get('CURRENT_VERSION_ID')",
"def get_version():\n version_file = repository_root / f\"{package_root}/{package_name}/__init__.py\"\n initfile_lines = version_file.open(\"rt\").readlines()\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n for line in initfile_lines:\n mo = re.search(VSRE, line, re.M)\n if mo:\n return mo.group(1)\n return \"unknown\"",
"def get_version():\n with open(os.path.join(\n os.path.dirname(__file__), MODULE_NAME, '__init__.py')\n ) as init:\n for line in init.readlines():\n res = re.match(r'^__version__ = [\\'\"](.*)[\\'\"]$', line)\n if res:\n return res.group(1)",
"def make_plugin_version_string():\n with open(version_filesystem_location(), encoding='utf-8') as fp:\n version = json.load(fp)\n return '{major}.{minor}.{revision}'.format(**version)",
"def get_base_version():\n version = None\n with open(os.path.join(current_dir, package, \"__init__.py\")) as fd:\n for line in fd.readlines():\n if line.startswith(\"__version__ = \"):\n match = re.match(\"__version__\\s=\\s\\\"(?P<version>.+).__VERSION__\", line)\n if match:\n version = match.groupdict().get(\"version\")\n if version:\n break\n if not version:\n raise ValueError(\"failed to get base version number\")\n return version",
"def get_local_releasever():\n dnf_base = dnf.Base()\n return dnf_base.conf.releasever",
"def get_current_pkg_version():\n current_major_minor = _find_in_file(os.path.join(here, PKG_NAME, '__init__.py'))\n last_jenkins_build_num = get_next_jenkins_build()\n\n full_version = f'{current_major_minor}.{last_jenkins_build_num}'\n\n return full_version",
"def get_version():\n with open(VERSION_FILE) as handle:\n lines = handle.read()\n result = VERSION_REGEX.search(lines)\n if result:\n return result.groupdict()[\"version\"]\n else:\n raise ValueError(\"Unable to determine __version__\")",
"def version(self):\n return self.get(\"active\", \"\", \"rev\")",
"def _get_version_from_image_config(self, conf: ImageConfig) -> str:\r\n env: List[str] = conf.config.get(\"Env\")\r\n for var in env:\r\n if \"\".join(var).split(\"=\")[0] == self.version_var:\r\n version = \"\".join(var).split(\"=\")[1]\r\n return version\r\n return \"\"",
"def version_file(self):\n return os.path.join(self.repo_ctl_dir, \"VERSION\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get usable buffer from datetime
|
def get_timestamp_buffer(dt: datetime) -> bytes:
filetime = filetimes.dt_to_filetime(dt)
return struct.pack("!Q", filetime)
|
[
"def get_timestamp_buffer(self, dt: datetime) -> bytes:\n filetime = filetimes.dt_to_filetime(dt)\n return struct.pack('!Q', filetime)",
"def readDate(self):\n ms = self.stream.read_double() / 1000.0\n tz = self.stream.read_short()\n\n # Timezones are ignored\n d = datetime.datetime.utcfromtimestamp(ms)\n self.context.addObject(d)\n\n return d",
"def getData(self, key, startTime, endTime):",
"def datetime():\n return _get_rtc().datetime()",
"def berv(dp):\n from lib.utils import typetest\n import numpy as np\n import pdb\n from astropy.io import ascii\n from astropy.time import Time\n from astropy import units as u, coordinates as coord\n typetest('dp',dp,str)\n d=ascii.read(dp+'obs_times',comment=\"#\")#,names=['mjd','time','exptime','airmass'])\n #Removed the named columns because I may not know for sure how many columns\n #there are, and read-ascii breaks if only some columns are named.\n #The second column has to be a date array though.\n berv = d['col5']\n return berv.data",
"def read_datetime(stream):\n lowdatetime = read_dword(stream)\n highdatetime = read_dword(stream)\n return DateTime(lowdatetime, highdatetime)",
"def generate_daterange(date: datetime.datetime, width: int, buffer: bool = False) -> typing.Tuple[datetime.datetime]:\n try:\n # Generate the start of the daterange\n start = shift_date(date, -width)\n # Generate the end of the daterange\n end = shift_date(date, width) if buffer else date\n\n # Return the tuple of dates\n return (start, end)\n\n except Exception as e:\n raise RuntimeError(f\"could not generate daterange. error: {e}\")",
"def datagram_python_datetime(datagram):\n return python_datetime(datagram.dgheader.datetime)",
"def _full_times(self, index):\n # Number of points in the buffer arrays\n n_before = int(self._buffers[index][0]/self.dt)\n if self._buffers[index][0]%self.dt:\n n_before += 1\n n_after = int(self._buffers[index][1]/self.dt)\n if self._buffers[index][1]%self.dt:\n n_after += 1\n # Proper starting points of buffer arrays to preserve dt\n t_min = self.times[0] - n_before*self.dt\n t_max = self.times[-1] + n_after*self.dt\n return np.concatenate((\n np.linspace(t_min, self.times[0], n_before, endpoint=False),\n self.times,\n np.linspace(self.times[-1], t_max, n_after+1)[1:]\n ))",
"def getBufferString(self):\n return self.buffer[0:self.usedBufferSize].tostring()",
"def get_obs_datetime_obj(cls):\n obs_time = []\n #SAMPLE: '2014-06-04T06:00:00.000000000'\n year = np.array(cls.obs_data['YEAR'], dtype=int)\n month = np.array(cls.obs_data['MONTH'], dtype=int)\n day = np.array(cls.obs_data['DAY'], dtype=int)\n hour = np.array(cls.obs_data['HOUR'], dtype=int)\n minute = np.array(cls.obs_data['MINUTE'], dtype=int)\n for idx in range(len(year)):\n utc_dt = datetime(year[idx], month[idx], day[idx], hour[idx], minute[idx])\n obs_time.append(utc_dt)\n cls.t_time = obs_time",
"def get_buffer(self, piece):\n curr_buffer = self._original_buffer if piece.buffer_type==self.BUFFER.ORIGINAL else self._added_buffer\n return curr_buffer",
"def filetime(d):\n return d.highdatetime * 4294967296 + d.lowdatetime",
"def get_memento(record, accept_datetime):\n if accept_datetime >= record.updated:\n return record\n for revision in reversed(record.revisions):\n updated = revision.updated.replace(microsecond=0)\n if updated <= accept_datetime:\n return revision\n return revision",
"def getLastReadingStrInBuffer(self):\n\n if len(self.data) > 0: \n outStr = str(conNumToDateTime(self.data[-1][0]))+' - '+self.mesureType + ' : '+str(self.data[-1][1])+' '+ self.mesureUnit\n return outStr\n else:\n return \"No data\"",
"def test_sample_buffer():\n buf = SampleBuffer()\n first = Sample(VALID_INPUT_LINE)\n buf.append(first)\n\n second = Sample(VALID_INPUT_LINE)\n second.time += timedelta(seconds=1)\n buf.append(second)\n assert not buf.has_time_anomaly() # 1 second interval -- less than 1.5s\n\n # Generate a contiguous buffer by adding 5 samples with 1-sec intervals.\n third = Sample(VALID_INPUT_LINE)\n third.time += timedelta(seconds=2)\n buf.append(third)\n fourth = Sample(VALID_INPUT_LINE)\n fourth.time += timedelta(seconds=3)\n buf.append(fourth)\n\n assert not buf.is_contiguous()\n\n fifth = Sample(VALID_INPUT_LINE)\n fifth.time += timedelta(seconds=4)\n buf.append(fifth)\n\n assert buf.is_contiguous()\n # Calculate 5-second averages.\n assert buf.get_avg('kW') == 441.781\n assert buf.get_avg('V') == 477.470\n assert buf.get_avg('I_') == 925.254\n\n # Introduce \"discontinuities\" in time and value.\n buf = SampleBuffer()\n buf.append(first)\n buf.append(second)\n # missing third sample!\n buf.append(fourth)\n buf.append(fifth)\n assert not buf.is_contiguous()\n\n # Try a value discontinuity.\n third.V = 473.15\n assert third.has_V_anomaly()\n buf = SampleBuffer()\n buf.append(first)\n buf.append(second)\n buf.append(third)\n buf.append(fourth)\n buf.append(fifth)\n assert not buf.is_contiguous()",
"def _access(self, at=None, just_value=True):\n if at is None:\n at = datetime.now()\n times = sorted(self._data.keys(), reverse=True)\n if len(times) == 0:\n return self.__value_time(set(), datetime.min, just_value)\n time = times.pop(0)\n value = self._data[time]['value']\n if time <= at:\n return self.__value_time(value, time, just_value)\n for time in times:\n value = ((value | self._data[time]['adds']) -\n self._data[time]['dels'])\n if time <= at:\n return self.__value_time(value, time, just_value)\n return self.__value_time(set(), datetime.min, just_value)",
"def timeBinToDate(x, width):\n return datetime.datetime.fromtimestamp(x * width)",
"def test_datetime_roundtrip():\n obj = datetime.datetime(2000, 1, 1, 0, 0, 0, 1, tzinfo=datetime.timezone.utc)\n deserialized = ormsgpack.unpackb(ormsgpack.packb(obj))\n parsed = pendulum.parse(deserialized)\n for attr in (\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"microsecond\"):\n assert getattr(obj, attr) == getattr(parsed, attr)",
"def _get_message_in_buffer(self):\n if len(self._buffered_messages) > 0:\n return json.loads(self._buffered_messages.pop(0))\n else:\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get big endian uint32 bytesrepresentation from signature version
|
def get_signature_version_buffer(version: int) -> bytes:
return struct.pack("!I", version)
|
[
"def abi_signature(self):\n return big_endian_to_int(sha3(str_to_bytes(self.signature))[:4])",
"def get_v_r_s(sig: HexStr) -> Tuple[int, str, str]:\n return Web3.toInt(sig[-1]) + 27, Web3.toHex(sig[:32]), Web3.toHex(sig[32:64])",
"def bytes(self):\n\n if not self._signature:\n self._signature = self.r.to_bytes(32, 'big') + self.s.to_bytes(32, 'big')\n return self._signature",
"def receive_byte_signed(self):\n return unpack('b', self.read(1))[0]",
"def _read_binary_header(bfile):\n magic, format_version, format_extended, length = struct.unpack('=4sHHi', bfile.read(12))\n # print(magic, format_version, format_extended, length) \n if magic != b\"SMSH\":\n print(\"Fatal error: failed to reproduce magic number.\")\n sys.exit(1)\n smash_version = struct.unpack('%ds' % length, bfile.read(length))\n assert len(smash_version) == 1\n return smash_version[0], format_extended, format_version",
"def test_byte_order(self):\n\n # most significant byte first (default)\n ref = b'\\x02\\x46\\x9a\\xfe\\x00\\x00\\x00'\n packed = pack('>>u19s3f32', 0x1234, -2, -1.0)\n self.assertEqual(packed, ref)\n unpacked = unpack('>>u19s3f32', packed)\n self.assertEqual(unpacked, (0x1234, -2, -1.0))\n\n # least significant byte first\n ref = b'\\x34\\x12\\x18\\x00\\x00\\xe0\\xbc'\n packed = pack('><u19s3f32', 0x1234, -2, -1.0)\n self.assertEqual(packed, ref)\n unpacked = unpack('><u19s3f32', packed)\n self.assertEqual(unpacked, (0x1234, -2, -1.0))\n\n # least significant byte first\n ref = b'\\x34\\x12'\n packed = pack('><u8s8', 0x34, 0x12)\n self.assertEqual(packed, ref)\n unpacked = unpack('><u8s8', packed)\n self.assertEqual(unpacked, (0x34, 0x12))\n\n # least significant byte first\n ref = b'\\x34\\x22'\n packed = pack('><u3u12', 1, 0x234)\n self.assertEqual(packed, ref)\n unpacked = unpack('><u3s12', packed)\n self.assertEqual(unpacked, (1, 0x234))\n\n # least significant byte first\n ref = b'\\x34\\x11\\x00'\n packed = pack('><u3u17', 1, 0x234)\n self.assertEqual(packed, ref)\n unpacked = unpack('><u3s17', packed)\n self.assertEqual(unpacked, (1, 0x234))\n\n # least significant byte first\n ref = b'\\x80'\n packed = pack('><u1', 1)\n self.assertEqual(packed, ref)\n unpacked = unpack('><u1', packed)\n self.assertEqual(unpacked, (1, ))\n\n # least significant byte first\n ref = b'\\x45\\x23\\x25\\x82'\n packed = pack('><u19u5u1u7', 0x12345, 5, 1, 2)\n self.assertEqual(packed, ref)\n unpacked = unpack('><u19u5u1u7', packed)\n self.assertEqual(unpacked, (0x12345, 5, 1, 2))",
"def _version_to_bytes( version : str ) -> bytearray:\n if version is None:\n return None\n version_ = bytearray(version,'utf-8')\n if len(version_) >= SubDir.MAX_VERSION_BINARY_LEN: _log.throw(\"Cannot use version '%s': when translated into a bytearray it exceeds the maximum version lengths of '%ld' (byte string is '%s')\", version, SubDir.MAX_VERSION_BINARY_LEN-1, version_ )\n ver_ = bytearray(SubDir.MAX_VERSION_BINARY_LEN)\n l = len(version_)\n ver_[0] = l\n ver_[1:1+l] = version_\n assert len(ver_) == SubDir.MAX_VERSION_BINARY_LEN, (\"Internal error\", len(ver_), ver_)\n return ver_",
"def to_be_bytes32(self) -> \"Bytes32\":\n return self.to_bytes(32, \"big\")",
"def decode_version_nr(version_nr):\n\n major = version_nr >> 16\n minor = (version_nr >> 8) & 0xff\n patch = version_nr & 0xff\n\n return major, minor, patch",
"def version(self):\n return (self.hdr['type'] >> 13) & 0x7",
"def _read_bytes_as_number(keytab: str, index: int, bytes_to_read: int=1, keytab_format_version: int=1,\n is_signed_int: bool=False):\n # since our string is hex, a byte is represented by 2 characters, so our string offset to read is twice\n # the number of bytes\n offset = bytes_to_read * 2\n end_index = index + offset\n if end_index > len(keytab):\n return 0\n\n hex_string_to_parse = keytab[index:end_index]\n if keytab_format_version == 1:\n converted_from_little_endian = []\n for i in range(0, offset, 2):\n converted_from_little_endian.insert(0, hex_string_to_parse[i:i+2])\n hex_string_to_parse = ''.join(converted_from_little_endian)\n elif keytab_format_version != 2:\n raise KeytabEncodingException('Unrecognized keytab format version {}'.format(keytab_format_version))\n\n unsigned_value = int(hex_string_to_parse, 16)\n if is_signed_int:\n return _twos_complement(unsigned_value, bytes_to_read * 8) # 8 bits per byte\n return unsigned_value",
"def SoHardCopy_getVersion() -> \"char const *\":\n return _coin.SoHardCopy_getVersion()",
"def magic2int(magic):\n return struct.unpack(\"<Hcc\", magic)[0]",
"def decode_signature(cls, signature):\r\n signature = bytes.fromhex(signature)\r\n signature = rlp.decode(signature)\r\n signature = [int.from_bytes(item, 'big') for item in signature]\r\n\r\n return signature",
"def _getOneByteUnsigned( self, r ):\r\n return r",
"def bytes(self):\n \n # Remove all spaces from the contents of the hex view.\n contents = self.hex_view()\n \n # Every two hex digits represents a single byte.\n byte_values = [-1 if contents[i:i+2] == b'??' \n else int(contents[i:i+2], 16) \n for i in range(0, len(contents), 2)]\n\n # The first four bytes of every 20 bytes contains an address, which\n # are not useful for analysis.\n byte_values = [byte_values[i] for i in range(len(byte_values))\n if i % 20 >= 4]\n \n return byte_values",
"def unpack_uuid(data: bytes) -> Tuple[bytes, int]:\n return data[:16], 16",
"def _getTwoBytesUnsigned( self, r1, r2 ):\r\n return r1 << 8 | r2",
"def convertToBinary32x2(data):\r\n binaryData_0 = ''\r\n binaryData_1 = ''\r\n for i in range(0, len(data)/4):\r\n x_0 = struct.pack('>hh', data[4*i], data[4*i+1])\r\n binaryData_0 = binaryData_0 + x_0\r\n x_1 = struct.pack('>hh', data[4*i+2], data[4*i+3])\r\n binaryData_1 = binaryData_1 + x_1\r\n\r\n \r\n \r\n return binaryData_0,binaryData_1",
"def getSignature(firmware_data):\r\n start = firmware_data[:-2].rfind('\\x00') + 1\r\n ret = firmware_data[start:]\r\n if not 'Version' in ret or not 'Date' in ret:\r\n raise Exception(\"Invalid signature\")\r\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Verify signature against digest
|
def verify_digest(
self,
signature: bytes,
digest: bytes,
verifying_key: Optional[VerifyingKey] = None,
) -> bool:
verifier = verifying_key or self.signing_key.verifying_key
return verifier.verify_digest(signature, digest)
|
[
"def verify(hash, signature, key_path=\"~/.ssh/ida_rsa\"):\n key = open(expanduser(key_path), \"r\").read()\n rsakey = RSA.importKey(key) \n pubkey = key.publickey()\n return pubkey.verify(hash, b64decode(signature)) == True",
"def verifies( self, hash, signature ):\n\n # From X9.62 J.3.1.\n\n G = self.generator\n n = G.order()\n r = signature.r\n s = signature.s\n if r < 1 or r > n-1: return False\n if s < 1 or s > n-1: return False\n c = numbertheory.inverse_mod( s, n )\n u1 = ( hash * c ) % n\n u2 = ( r * c ) % n\n xy = u1 * G + u2 * self.point\n v = xy.x() % n\n return v == r",
"def ecdsa_verify_hash(self, message_digest, signature):\n\t\tassert(isinstance(message_digest, bytes))\n\t\tassert(0 < signature.r < self.curve.n)\n\t\tassert(0 < signature.s < self.curve.n)\n\n\t\t# Convert message digest to integer value\n\t\te = Tools.ecdsa_msgdigest_to_int(message_digest, self.curve.n)\n\n\t\t(r, s) = (signature.r, FieldElement(signature.s, self.curve.n))\n\t\tw = s.inverse()\n\t\tu1 = int(e * w)\n\t\tu2 = int(r * w)\n\n\t\tpt = (u1 * self.curve.G) + (u2 * self.point)\n\t\tx1 = int(pt.x) % self.curve.n\n\t\treturn x1 == r",
"def _check_signature(self, signature, data):\n\n hashed = hmac.new(self.secret_token, data, sha1)\n sig_check = f\"sha1={hashed.hexdigest()}\"\n\n return hmac.compare_digest(signature, sig_check)",
"def rsa_check_signature(signature, public_key):\n pass",
"def verify_signature(request_body, signature, hmac_key):\n computed = hmac.new(hmac_key, request_body, hashlib.sha1)\n if not hmac.compare_digest(computed.hexdigest(), signature.encode('ascii', 'ignore')):\n raise SignatureError('Computed signature does not match request signature.')",
"def verify_tx_signature(tx):\n public_key = RSA.importKey(\n binascii.unhexlify(tx.sender)\n )\n\n verifier = PKCS1_v1_5.new(public_key)\n\n data_hash = Hasher.create_data_hash_256(\n tx.sender,\n tx.recipient,\n tx.amount\n )\n\n return verifier.verify(\n data_hash,\n binascii.unhexlify(tx.signature)\n )",
"def verify_data(data, signature, sig_key, hashalg):\n reference = authenticate_data(data, sig_key, hashalg)\n if not compare_constant_time(reference, signature):\n raise CryptoError(\"Invalid Signature\")\n else:\n return True",
"def test_signature_validity(curve, generator, Msg, Qx, Qy, R, S, expectedVerification):\n pubk = Public_key(generator, ellipticcurve.Point(curve, Qx, Qy))\n verificationRes = pubk.verifies(digest_integer(Msg), Signature(R, S))\n assert verificationRes == expectedVerification, \"Signature verification failed\"",
"def verify(self, public_key, message, signature):",
"def ecdsa_verify(self, message, signature):\n\t\tassert(isinstance(message, bytes))\n\t\tdigest_fnc = hashlib.new(signature.hashalg)\n\t\tdigest_fnc.update(message)\n\t\tmessage_digest = digest_fnc.digest()\n\t\treturn self.ecdsa_verify_hash(message_digest, signature)",
"def test_sign_and_verify(self):\n algos = {'sha1':'', \n 'ripemd160':'',\n 'md5':''}\n\n if m2.OPENSSL_VERSION_NUMBER >= 0x90800F:\n algos['sha224'] = ''\n algos['sha256'] = ''\n algos['sha384'] = '' \n algos['sha512'] = '' \n\n message = \"This is the message string\"\n digest = sha.sha(message).digest()\n rsa = RSA.load_key(self.privkey)\n rsa2 = RSA.load_pub_key(self.pubkey)\n for algo in algos.keys():\n signature = rsa.sign(digest, algo)\n #assert signature == algos[algo], 'mismatched signature with algorithm %s: signature=%s' % (algo, signature)\n verify = rsa2.verify(digest, signature, algo) \n assert verify == 1, 'verification failed with algorithm %s' % algo",
"def __verifySignature(self, transaction: Transaction) -> bool:\n senderPublicKey = self.getSenderAccount(transaction.getSender()).get('publicKey')\n publicKey = RSA.importKey(binascii.unhexlify(senderPublicKey))\n verifier = PKCS1_v1_5.new(publicKey)\n txString = str(transaction.getOrderedDict())\n h = TLCUtilities.getDoubleHash256(txString)\n result = verifier.verify(h, binascii.unhexlify(transaction.getSignature()))\n\n if result:\n return True\n else:\n return False",
"async def verify_signature(request: Request):\n verify_key = VerifyKey(bytes.fromhex(config.DISCORD_APPLICATION_PUBLIC_KEY))\n\n try:\n signature = request.headers[\"X-Signature-Ed25519\"]\n timestamp = request.headers[\"X-Signature-Timestamp\"]\n body = (await request.body()).decode(\"utf-8\")\n verify_key.verify(f'{timestamp}{body}'.encode(), bytes.fromhex(signature))\n except (KeyError, BadSignatureError):\n raise HTTPException(401, 'invalid request signature')",
"def verify_signature(self, query_parameters):\n params = CaseInsensitiveDict(query_parameters)\n signature = params.pop(\"signature\", None)\n\n calculated = [\"%s=%s\" % (k, v) for k, v in params.items()]\n calculated.sort()\n calculated = \"\".join(calculated)\n\n calculated = \"{secret}{calculated}\".format(\n secret=self.credentials.secret,\n calculated=calculated\n )\n\n md5 = hashlib.md5()\n md5.update(calculated.encode('utf-8'))\n\n produced = md5.hexdigest()\n\n return produced == signature",
"def verify_receipt_signature(self, receipt_update_retrieve_res):\n pass",
"def verify(self,doc, signature):\n\n\t\tif self.pubKey:\n\t\t\tm = hashlib.sha256()\n\t\t\tm.update(doc.encode())\n\t\t\th = m.digest()\n\n\t\t\treturn self.pubKey.verify(h,signature)\n\n\t\treturn False",
"def test_verify_bad_signature(self):\n rsa = RSA.load_key(self.privkey)\n message = \"This is the message string\"\n digest = sha.sha(message).digest() \n\n otherMessage = \"Abracadabra\"\n otherDigest = sha.sha(otherMessage).digest() \n otherSignature = rsa.sign(otherDigest)\n\n self.assertRaises(RSA.RSAError, rsa.verify, \n digest, otherSignature)",
"def CheckSignature(data_filename, signature_filename):\n print 'Verifying signature of %s using %s...' % (data_filename, signature_filename)\n try:\n subprocess.check_call(['gpg', '--trusted-key=ED97E90E62AA7E34', '--verify',\n signature_filename, data_filename])\n except subprocess.CalledProcessError as err:\n print 'Unable to verify signature'\n print '\\n\\n******'\n print 'If this fails for you, you probably need to import Paul Eggert''s public key:'\n print ' gpg --receive-keys ED97E90E62AA7E34'\n print '******\\n\\n'\n raise"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.