text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import absolute_import, division, print_function
import logging
import docker
from docker import errors
import tempfile
import requests
from urllib3.exceptions import TimeoutError
from requests.exceptions import (RequestException, Timeout)
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
from .decorators import retry
from .container_manager import CONTAINERLESS_MODEL_IMAGE, ClusterAdapter
from .exceptions import ClipperException, UnconnectedException
from .version import __version__, __registry__
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper" # Used Internally for Test; Not Windows Compatible
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
def _validate_versioned_model_name(name, version):
if deployment_regex.match(name) is None:
raise ClipperException(
"Invalid value: {name}: a model name must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(name=name, reg=deploy_regex_str))
if deployment_regex.match(version) is None:
raise ClipperException(
"Invalid value: {version}: a model version must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(
version=version, reg=deploy_regex_str))
class ClipperConnection(object):
def __init__(self, container_manager):
"""Create a new ClipperConnection object.
After creating a ``ClipperConnection`` instance, you still need to connect
to a Clipper cluster. You can connect to an existing cluster by calling
:py:meth:`clipper_admin.ClipperConnection.connect` or create a new Clipper cluster
with :py:meth:`clipper_admin.ClipperConnection.start_clipper`, which will automatically
connect to the cluster once it Clipper has successfully started.
Parameters
----------
container_manager : ``clipper_admin.container_manager.ContainerManager``
An instance of a concrete subclass of ``ContainerManager``.
"""
self.connected = False
self.cm = container_manager
self.logger = ClusterAdapter(logger, {
'cluster_name': self.cm.cluster_identifier
})
def start_clipper(self,
query_frontend_image='{}/query_frontend:{}'.format(
__registry__, __version__),
mgmt_frontend_image='{}/management_frontend:{}'.format(
__registry__, __version__),
frontend_exporter_image='{}/frontend-exporter:{}'.format(
__registry__, __version__),
cache_size=DEFAULT_PREDICTION_CACHE_SIZE_BYTES,
qf_http_thread_pool_size=1,
qf_http_timeout_request=5,
qf_http_timeout_content=300,
num_frontend_replicas=1):
"""Start a new Clipper cluster and connect to it.
This command will start a new Clipper instance using the container manager provided when
the ``ClipperConnection`` instance was constructed.
Parameters
----------
query_frontend_image : str(optional)
The query frontend docker image to use. You can set this argument to specify
a custom build of the query frontend, but any customization should maintain API
compability and preserve the expected behavior of the system.
mgmt_frontend_image : str(optional)
The management frontend docker image to use. You can set this argument to specify
a custom build of the management frontend, but any customization should maintain API
compability and preserve the expected behavior of the system.
frontend_exporter_image : str(optional)
The frontend exporter docker image to use. You can set this argument to specify
a custom build of the management frontend, but any customization should maintain API
compability and preserve the expected behavior of the system.
cache_size : int(optional)
The size of Clipper's prediction cache in bytes. Default cache size is 32 MiB.
qf_http_thread_pool_size : int(optional)
The size of thread pool created in query frontend for http serving.
qf_http_timeout_request : int(optional)
The seconds of timeout on request handling in query frontend for http serving..
qf_http_timeout_content : int(optional)
The seconds of timeout on content handling in query frontend for http serving..
num_frontend_replicas : int(optional)
The number of query frontend to deploy for fault tolerance and high availability.
Raises
------
:py:exc:`clipper.ClipperException`
"""
try:
self.cm.start_clipper(query_frontend_image, mgmt_frontend_image,
frontend_exporter_image, cache_size,
qf_http_thread_pool_size, qf_http_timeout_request,
qf_http_timeout_content, num_frontend_replicas)
except ClipperException as e:
self.logger.warning("Error starting Clipper: {}".format(e.msg))
raise e
# Wait for maximum 5 min.
@retry(RequestException, tries=300, delay=1, backoff=1, logger=self.logger)
def _check_clipper_status():
try:
query_frontend_url = "http://{host}/metrics".format(
host=self.cm.get_query_addr())
mgmt_frontend_url = "http://{host}/admin/ping".format(
host=self.cm.get_admin_addr())
for name, url in [('query frontend', query_frontend_url),
('management frontend', mgmt_frontend_url)]:
r = requests.get(url, timeout=5)
if r.status_code != requests.codes.ok:
raise RequestException(
"{name} end point {url} health check failed".format(name=name, url=url))
except RequestException as e:
raise RequestException("Clipper still initializing: \n {}".format(e))
_check_clipper_status()
self.logger.info("Clipper is running")
self.connected = True
def connect(self):
"""Connect to a running Clipper cluster."""
self.cm.connect()
self.connected = True
self.logger.info(
"Successfully connected to Clipper cluster at {}".format(
self.cm.get_query_addr()))
def register_application(self, name, input_type, default_output,
slo_micros):
# TODO(crankshaw): Add links to user guide section on input types once user guide is
# written:
# "See the `User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
# on picking the right input type for your application."
"""Register a new application with Clipper.
An application in Clipper corresponds to a named REST endpoint that can be used to request
predictions. This command will attempt to create a new endpoint with the provided name.
Application names must be unique. This command will fail if an application with the provided
name already exists.
Parameters
----------
name : str
The unique name of the application.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings".
default_output : str
The default output for the application. The default output will be returned whenever
an application is unable to receive a response from a model within the specified
query latency SLO (service level objective). The reason the default output was returned
is always provided as part of the prediction response object.
slo_micros : int
The query latency objective for the application in microseconds.
This is the processing latency between Clipper receiving a request
and sending a response. It does not account for network latencies
before a request is received or after a response is sent.
If Clipper cannot process a query within the latency objective,
the default output is returned. Therefore, it is recommended that
the SLO not be set aggressively low unless absolutely necessary.
100000 (100ms) is a good starting value, but the optimal latency objective
will vary depending on the application.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/add_app".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"name": name,
"input_type": input_type,
"default_output": default_output,
"latency_slo_micros": slo_micros
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
else:
self.logger.info(
"Application {app} was successfully registered".format(
app=name))
def delete_application(self, name):
# See: https://github.com/ucbrise/clipper/issues/603
self.logger.warning("[DEPRECATED] Use 'unregister_application' API instead of this.")
self.unregister_application(name)
def unregister_application(self, name):
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/delete_app".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"name": name})
headers = {"Content-type": "application/json"}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
else:
self.logger.info(
"Application {app} was successfully unregistered".format(app=name))
def link_model_to_app(self, app_name, model_name):
"""Routes requests from the specified app to be evaluted by the specified model.
Parameters
----------
app_name : str
The name of the application
model_name : str
The name of the model to link to the application
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
Note
-----
Both the specified model and application must be registered with Clipper, and they
must have the same input type. If the application has previously been linked to a different
model, this command will fail.
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/add_model_links".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"app_name": app_name,
"model_names": [model_name]
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
else:
self.logger.info(
"Model {model} is now linked to application {app}".format(
model=model_name, app=app_name))
def unlink_model_from_app(self, app_name, model_name):
"""
Prevents the model with `model_name` from being used by the app with `app_name`.
The model and app should both be registered with Clipper and a link should
already exist between them.
Parameters
----------
app_name : str
The name of the application
model_name : str
The name of the model to link to the application
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/delete_model_links".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"app_name": app_name,
"model_names": [model_name]
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
else:
logger.info(
"Model {model} is now removed to application {app}".format(
model=model_name, app=app_name))
def build_and_deploy_model(self,
name,
version,
input_type,
model_data_path,
base_image,
labels=None,
container_registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
"""Build a new model container Docker image with the provided data and deploy it as
a model to Clipper.
This method does two things.
1. Builds a new Docker image from the provided base image with the local directory specified
by ``model_data_path`` copied into the image by calling
:py:meth:`clipper_admin.ClipperConnection.build_model`.
2. Registers and deploys a model with the specified metadata using the newly built
image by calling :py:meth:`clipper_admin.ClipperConnection.deploy_model`.
Parameters
----------
name : str
The name of the deployed model
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings". See the
`User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
on picking the right input type for your application.
model_data_path : str
A path to a local directory. The contents of this directory will be recursively copied
into the Docker container.
base_image : str
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
container_registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accesible
to the Kubernetes cluster in order to fetch the container from the registry.
num_replicas : int, optional
The number of replicas of the model to create. The number of replicas
for a model can be changed at any time with
:py:meth:`clipper.ClipperConnection.set_num_replicas`.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for
individual replicas of this model.
pkgs_to_install : list (of strings), optional
A list of the names of packages to install, using pip, in the container.
The names must be strings.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
image = self.build_model(name, version, model_data_path, base_image,
container_registry, pkgs_to_install)
self.deploy_model(name, version, input_type, image, labels,
num_replicas, batch_size)
def build_model(self,
name,
version,
model_data_path,
base_image,
container_registry=None,
pkgs_to_install=None):
"""Build a new model container Docker image with the provided data"
This method builds a new Docker image from the provided base image with the local directory
specified by ``model_data_path`` copied into the image. The Dockerfile that gets generated
to build the image is equivalent to the following::
FROM <base_image>
COPY <model_data_path> /model/
The newly built image is then pushed to the specified container registry. If no container
registry is specified, the image will be pushed to the default DockerHub registry. Clipper
will tag the newly built image with the tag [<registry>]/<name>:<version>.
This method can be called without being connected to a Clipper cluster.
Parameters
----------
name : str
The name of the deployed model.
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
model_data_path : str
A path to a local directory. The contents of this directory will be recursively copied
into the Docker container.
base_image : str
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
container_registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accesible
to the Kubernetes cluster in order to fetch the container from the registry.
pkgs_to_install : list (of strings), optional
A list of the names of packages to install, using pip, in the container.
The names must be strings.
Returns
-------
str :
The fully specified tag of the newly built image. This will include the
container registry if specified.
Raises
------
:py:exc:`clipper.ClipperException`
Note
----
Both the model name and version must be valid DNS-1123 subdomains. Each must consist of
lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric
character (e.g. 'example.com', regex used for validation is
'[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z'.
"""
version = str(version)
_validate_versioned_model_name(name, version)
run_cmd = ''
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
"FROM {container_name}\n{run_command}\nCOPY {data_path} /model/\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{cluster}-{name}:{version}".format(
cluster=self.cm.cluster_identifier, name=name, version=version)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
self.logger.info(
"Building model Docker image with model data from {}".format(
model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
if 'stream' in b and b['stream'] != '\n': #log build steps only
self.logger.info(b['stream'].rstrip())
self.logger.info("Pushing model Docker image to {}".format(image))
@retry((docker.errors.APIError, TimeoutError, Timeout),
tries=5, logger=self.logger)
def _push_model():
for line in docker_client.images.push(repository=image, stream=True):
self.logger.debug(line)
_push_model()
return image
def deploy_model(self,
name,
version,
input_type,
image,
labels=None,
num_replicas=1,
batch_size=-1):
"""Deploys the model in the provided Docker image to Clipper.
Deploying a model to Clipper does a few things.
1. It starts a set of Docker model containers running the model packaged
in the ``image`` Docker image. The number of containers it will start is dictated
by the ``num_replicas`` argument, but the way that these containers get started
depends on your choice of ``ContainerManager`` implementation.
2. It registers the model and version with Clipper and sets the current version of the
model to this version by internally calling
:py:meth:`clipper_admin.ClipperConnection.register_model`.
Notes
-----
If you want to deploy a model in some other way (e.g. a model that cannot run in a Docker
container for some reason), you can start the model manually or with an external tool and
call ``register_model`` directly.
Parameters
----------
name : str
The name of the deployed model
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings". See the
`User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
on picking the right input type for your application.
image : str
The fully specified Docker image to deploy. If using a custom
registry, the registry name must be prepended to the image. For example,
if your Docker image is stored in the quay.io registry, you should specify
the image argument as
"quay.io/my_namespace/image_name:tag". The image name and tag are independent of
the ``name`` and ``version`` arguments, and can be set to whatever you want.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
num_replicas : int, optional
The number of replicas of the model to create. The number of replicas
for a model can be changed at any time with
:py:meth:`clipper.ClipperConnection.set_num_replicas`.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for
individual replicas of this model.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
Note
----
Both the model name and version must be valid DNS-1123 subdomains. Each must consist of
lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric
character (e.g. 'example.com', regex used for validation is
'[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z'.
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
_validate_versioned_model_name(name, version)
self.cm.deploy_model(
name=name,
version=version,
input_type=input_type,
image=image,
num_replicas=num_replicas)
self.register_model(
name,
version,
input_type,
image=image,
labels=labels,
batch_size=batch_size)
self.logger.info("Done deploying model {name}:{version}.".format(
name=name, version=version))
def register_model(self,
name,
version,
input_type,
image=None,
labels=None,
batch_size=-1):
"""Registers a new model version with Clipper.
This method does not launch any model containers, it only registers the model description
(metadata such as name, version, and input type) with Clipper. A model must be registered
with Clipper before it can be linked to an application.
You should rarely have to use this method directly. Using one the Clipper deployer
methods in :py:mod:`clipper_admin.deployers` or calling ``build_and_deploy_model`` or
``deploy_model`` will automatically register your model with Clipper.
Parameters
----------
name : str
The name of the deployed model
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings". See the
`User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
on picking the right input type for your application.
image : str, optional
A docker image name. If provided, the image will be recorded as part of the
model descrtipin in Clipper when registering the model but this method will
make no attempt to launch any containers with this image.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for
individual replicas of this model.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/add_model".format(
host=self.cm.get_admin_addr())
if image is None:
image = CONTAINERLESS_MODEL_IMAGE
if labels is None:
labels = DEFAULT_LABEL
req_json = json.dumps({
"model_name": name,
"model_version": version,
"labels": labels,
"input_type": input_type,
"container_name": image,
"model_data_path": "DEPRECATED",
"batch_size": batch_size
})
headers = {'Content-type': 'application/json'}
self.logger.debug(req_json)
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
else:
self.logger.info(
"Successfully registered model {name}:{version}".format(
name=name, version=version))
def get_current_model_version(self, name):
"""Get the current model version for the specified model.
Parameters
----------
name : str
The name of the model
Returns
-------
str
The current model version
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = None
model_info = self.get_all_models(verbose=True)
for m in model_info:
if m["model_name"] == name and m["is_current_version"]:
version = m["model_version"]
break
if version is None:
raise ClipperException(
"No versions of model {} registered with Clipper".format(name))
return version
def get_num_replicas(self, name, version=None):
"""Gets the current number of model container replicas for a model.
Parameters
----------
name : str
The name of the model
version : str, optional
The version of the model. If no version is provided,
the currently deployed version will be used.
Returns
-------
int
The number of active replicas
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
if version is None:
version = self.get_current_model_version(name)
else:
version = str(version)
return self.cm.get_num_replicas(name, version)
def set_num_replicas(self, name, num_replicas, version=None):
"""Sets the total number of active replicas for a model.
If there are more than the current number of replicas
currently allocated, this will remove replicas. If there are
less, this will add replicas.
Parameters
----------
name : str
The name of the model
version : str, optional
The version of the model. If no version is provided,
the currently deployed version will be used.
num_replicas : int, optional
The desired number of replicas.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
if version is None:
version = self.get_current_model_version(name)
else:
version = str(version)
model_data = self.get_model_info(name, version)
if model_data is not None:
input_type = model_data["input_type"]
image = model_data["container_name"]
if image != CONTAINERLESS_MODEL_IMAGE:
self.cm.set_num_replicas(name, version, input_type, image,
num_replicas)
else:
msg = ("Cannot resize the replica set for containerless model "
"{name}:{version}").format(
name=name, version=version)
self.logger.error(msg)
raise ClipperException(msg)
else:
msg = "Cannot add container for non-registered model {name}:{version}".format(
name=name, version=version)
self.logger.error(msg)
raise ClipperException(msg)
def get_all_apps(self, verbose=False):
"""Gets information about all applications registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains application info dictionaries.
These dictionaries have the same attribute name-value pairs that were
provided to :py:meth:`clipper_admin.ClipperConnection.register_application`.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no apps are registered with Clipper, an empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_all_applications".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_app_info(self, name):
"""Gets detailed information about a registered application.
Parameters
----------
name : str
The name of the application to look up
Returns
-------
dict
Returns a dictionary with the specified application's info. This
will contain the attribute name-value pairs that were provided to
:py:meth:`clipper_admin.ClipperConnection.register_application`.
If no application with name ``name`` is
registered with Clipper, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_application".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"name": name})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
self.logger.warning(
"Application {} is not registered with Clipper".format(
name))
return None
return app_info
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_linked_models(self, app_name):
"""Retrieves the models linked to the specified application.
Parameters
----------
app_name : str
The name of the application
Returns
-------
list
Returns a list of the names of models linked to the app.
If no models are linked to the specified app, empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_linked_models".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"app_name": app_name})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_all_models(self, verbose=False):
"""Gets information about all models registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the models' names.
If set to True, the list contains model info dictionaries.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no models are registered with Clipper, an empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_all_models".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_model_info(self, name, version):
"""Gets detailed information about a registered model.
Parameters
----------
model_name : str
The name of the model to look up
model_version : int
The version of the model to look up
Returns
-------
dict
Returns a dictionary with the specified model's info.
If no model with name `model_name@model_version` is
registered with Clipper, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/get_model".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"model_name": name, "model_version": version})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
model_info = r.json()
if len(model_info) == 0:
self.logger.warning(
"Model {name}:{version} is not registered with Clipper.".
format(name=name, version=version))
return None
return model_info
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_all_model_replicas(self, verbose=False):
"""Gets information about all model containers registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains container info dictionaries.
Returns
-------
list
Returns a list of information about all model containers known to Clipper.
If no containers are registered with Clipper, an empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_all_containers".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_model_replica_info(self, name, version, replica_id):
"""Gets detailed information about a registered container.
Parameters
----------
name : str
The name of the container to look up
version : int
The version of the container to look up
replica_id : int
The container replica to look up
Returns
-------
dict
A dictionary with the specified container's info.
If no corresponding container is registered with Clipper, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/get_container".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"model_name": name,
"model_version": version,
"replica_id": replica_id,
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
model_rep_info = r.json()
if len(model_rep_info) == 0:
self.logger.warning(
"No model replica with ID {rep_id} found for model {name}:{version}".
format(rep_id=replica_id, name=name, version=version))
return None
return model_rep_info
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def get_clipper_logs(self, logging_dir="clipper_logs/"):
"""Download the logs from all Clipper docker containers.
Parameters
----------
logging_dir : str, optional
The directory to save the downloaded logs. If the directory does not
exist, it will be created.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_logs(logging_dir)
def inspect_instance(self):
"""Fetches performance metrics from the running Clipper cluster.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/metrics".format(host=self.cm.get_query_addr())
r = requests.get(url)
self.logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
def set_model_version(self, name, version, num_replicas=None):
"""Changes the current model version to "model_version".
This method can be used to perform model roll-back and roll-forward. The
version can be set to any previously deployed version of the model.
Parameters
----------
name : str
The name of the model
version : str | obj with __str__ representation
The version of the model. Note that `version`
must be a model version that has already been deployed.
num_replicas : int
The number of new containers to start with the newly
selected model version.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
Note
-----
Model versions automatically get updated when
py:meth:`clipper_admin.ClipperConnection.deploy_model()` is called. There is no need to
manually update the version after deploying a new model.
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/set_model_version".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"model_name": name, "model_version": version})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
self.logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
self.logger.error(msg)
raise ClipperException(msg)
if num_replicas is not None:
self.set_num_replicas(name, num_replicas, version)
def get_query_addr(self):
"""Get the IP address at which the query frontend can be reached request predictions.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_query_addr()
def get_metric_addr(self):
"""Get the IP address of Prometheus metric server.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_metric_addr()
def _unregister_versioned_models(self, model_versions_dict):
"""Unregister the specified versions of the specified models from Clipper internal.
This function does not be opened to public because it might cause critical operation.
Please use 'stop_models', 'stop_versioned_models', 'stop_inactive_model_versions',
and 'stop_all_model_containers' APIs according to your need.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/delete_versioned_model".format(
host=self.cm.get_admin_addr())
headers = {"Content-type": "application/json"}
for model_name in model_versions_dict:
for model_version in model_versions_dict[model_name]:
req_json = json.dumps({"model_name": model_name,
"model_version": model_version})
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: " \
"{msg}".format(code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
else:
logger.info(
"Model {name}:{ver} was successfully deleted".format(
name=model_name, ver=model_version))
def stop_models(self, model_names):
"""Stops all versions of the specified models.
This is a convenience method to avoid the need to explicitly list all versions
of a model when calling :py:meth:`clipper_admin.ClipperConnection.stop_versioned_models`.
Parameters
----------
model_names : list(str)
A list of model names. All replicas of all versions of each model specified in the list
will be stopped.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
model_info = self.get_all_models(verbose=True)
model_dict = {}
for m in model_info:
if m["model_name"] in model_names:
if m["model_name"] in model_dict:
model_dict[m["model_name"]].append(m["model_version"])
else:
model_dict[m["model_name"]] = [m["model_version"]]
self.cm.stop_models(model_dict)
self._unregister_versioned_models(model_dict)
pp = pprint.PrettyPrinter(indent=4)
self.logger.info(
"Stopped all containers for these models and versions:\n{}".format(
pp.pformat(model_dict)))
def stop_versioned_models(self, model_versions_dict):
"""Stops the specified versions of the specified models.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
Note
----
This method will stop the currently deployed versions of models if you specify them. You
almost certainly want to use one of the other stop_* methods. Use with caution.
"""
if not self.connected:
raise UnconnectedException()
self.cm.stop_models(model_versions_dict)
self._unregister_versioned_models(model_versions_dict)
pp = pprint.PrettyPrinter(indent=4)
self.logger.info(
"Stopped all containers for these models and versions:\n{}".format(
pp.pformat(model_versions_dict)))
def stop_inactive_model_versions(self, model_names):
"""Stops all model containers serving stale versions of the specified models.
For example, if you have deployed versions 1, 2, and 3 of model "music_recommender"
and version 3 is the current version::
clipper_conn.stop_inactive_model_versions(["music_recommender"])
will stop any containers serving versions 1 and 2 but will leave containers serving
version 3 untouched.
Parameters
----------
model_names : list(str)
The names of the models whose old containers you want to stop.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
if not self.connected:
raise UnconnectedException()
model_info = self.get_all_models(verbose=True)
model_dict = {}
for m in model_info:
if m["model_name"] in model_names and not m["is_current_version"]:
if m["model_name"] in model_dict:
model_dict[m["model_name"]].append(m["model_version"])
else:
model_dict[m["model_name"]] = [m["model_version"]]
self.cm.stop_models(model_dict)
self._unregister_versioned_models(model_dict)
pp = pprint.PrettyPrinter(indent=4)
self.logger.info(
"Stopped all containers for these models and versions:\n{}".format(
pp.pformat(model_dict)))
def stop_all_model_containers(self):
"""Stops all model containers started via Clipper admin commands.
This method can be used to clean up leftover Clipper model containers even if the
Clipper management frontend or Redis has crashed. It can also be called without calling
``connect`` first.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
model_info = self.get_all_models(verbose=True)
model_dict = {}
for m in model_info:
if m["model_name"] in model_dict:
model_dict[m["model_name"]].append(m["model_version"])
else:
model_dict[m["model_name"]] = [m["model_version"]]
self.cm.stop_all_model_containers()
self._unregister_versioned_models(model_dict)
pp = pprint.PrettyPrinter(indent=4)
self.logger.info("Stopped all Clipper model containers:\n{}".format(
pp.pformat(model_dict)))
def stop_all(self, graceful=True):
"""Stops all processes that were started via Clipper admin commands.
This includes the query and management frontend Docker containers and all model containers.
If you started Redis independently, this will not affect Redis. It can also be called
without calling ``connect`` first.
If graceful=False, Clipper will issue Docker Kill if it's in the Docker Mode. This parameter
will take not effect in Kubernetes.
"""
self.cm.stop_all(graceful=graceful)
self.logger.info(
"Stopped all Clipper cluster and all model containers")
def test_predict_function(self, query, func, input_type):
"""Tests that the user's function has the correct signature and can be properly saved and
loaded.
The function should take a dict request object like the query frontend expects JSON,
the predict function, and the input type for the model.
For example, the function can be called like:
clipper_conn.test_predict_function({"input": [1.0, 2.0, 3.0]}, predict_func, "doubles")
Parameters
----------
query: JSON or list of dicts
Inputs to test the prediction function on.
func: function
Predict function to test.
input_type: str
The input_type to be associated with the registered app and deployed model.
One of "integers", "floats", "doubles", "bytes", or "strings".
"""
if not self.connected:
self.connect()
query_data = list(x for x in list(query.values()))
query_key = list(query.keys())
if query_key[0] == "input_batch":
query_data = query_data[0]
try:
flattened_data = [
item for sublist in query_data for item in sublist
]
except TypeError:
return "Invalid input type or JSON key"
numpy_data = None
if input_type == "bytes":
numpy_data = list(np.int8(x) for x in query_data)
for x in flattened_data:
if type(x) != bytes:
return "Invalid input type"
if input_type == "integers":
numpy_data = list(np.int32(x) for x in query_data)
for x in flattened_data:
if type(x) != int:
return "Invalid input type"
if input_type == "floats" or input_type == "doubles":
if input_type == "floats":
numpy_data = list(np.float32(x) for x in query_data)
else:
numpy_data = list(np.float64(x) for x in query_data)
for x in flattened_data:
if type(x) != float:
return "Invalid input type"
if input_type == "string":
numpy_data = list(np.str_(x) for x in query_data)
for x in flattened_data:
if type(x) != str:
return "Invalid input type"
s = StringIO()
c = CloudPickler(s, 2)
c.dump(func)
serialized_func = s.getvalue()
reloaded_func = pickle.loads(serialized_func)
try:
assert reloaded_func
except AssertionError:
self.logger.error(
"Function does not properly serialize and reload")
return "Function does not properly serialize and reload"
return reloaded_func(numpy_data)
| {
"repo_name": "ucbrise/clipper",
"path": "clipper_admin/clipper_admin/clipper_admin.py",
"copies": "1",
"size": "61713",
"license": "apache-2.0",
"hash": 8835951370822467000,
"line_mean": 40.0598802395,
"line_max": 100,
"alpha_frac": 0.5821949994,
"autogenerated": false,
"ratio": 4.533387203408506,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012877446274631962,
"num_lines": 1503
} |
from __future__ import absolute_import, division, print_function
import logging
import docker
import tempfile
import requests
from requests.exceptions import RequestException
import json
import pprint
import time
import re
import os
import tarfile
import sys
from cloudpickle import CloudPickler
import pickle
import numpy as np
if sys.version_info < (3, 0):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
PY3 = False
else:
from io import BytesIO as StringIO
PY3 = True
from .container_manager import CONTAINERLESS_MODEL_IMAGE
from .exceptions import ClipperException, UnconnectedException
from .version import __version__
DEFAULT_LABEL = []
DEFAULT_PREDICTION_CACHE_SIZE_BYTES = 33554432
CLIPPER_TEMP_DIR = "/tmp/clipper"
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
deploy_regex_str = "[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z"
deployment_regex = re.compile(deploy_regex_str)
def _validate_versioned_model_name(name, version):
if deployment_regex.match(name) is None:
raise ClipperException(
"Invalid value: {name}: a model name must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(name=name, reg=deploy_regex_str))
if deployment_regex.match(version) is None:
raise ClipperException(
"Invalid value: {version}: a model version must be a valid DNS-1123 "
" subdomain. It must consist of lower case "
"alphanumeric characters, '-' or '.', and must start and end with "
"an alphanumeric character (e.g. 'example.com', regex used for "
"validation is '{reg}'".format(
version=version, reg=deploy_regex_str))
class ClipperConnection(object):
def __init__(self, container_manager):
"""Create a new ClipperConnection object.
After creating a ``ClipperConnection`` instance, you still need to connect
to a Clipper cluster. You can connect to an existing cluster by calling
:py:meth:`clipper_admin.ClipperConnection.connect` or create a new Clipper cluster
with :py:meth:`clipper_admin.ClipperConnection.start_clipper`, which will automatically
connect to the cluster once it Clipper has successfully started.
Parameters
----------
container_manager : ``clipper_admin.container_manager.ContainerManager``
An instance of a concrete subclass of ``ContainerManager``.
"""
self.connected = False
self.cm = container_manager
def start_clipper(
self,
query_frontend_image='clipper/query_frontend:{}'.format(
__version__),
mgmt_frontend_image='clipper/management_frontend:{}'.format(
__version__),
cache_size=DEFAULT_PREDICTION_CACHE_SIZE_BYTES):
"""Start a new Clipper cluster and connect to it.
This command will start a new Clipper instance using the container manager provided when
the ``ClipperConnection`` instance was constructed.
Parameters
----------
query_frontend_image : str(optional)
The query frontend docker image to use. You can set this argument to specify
a custom build of the query frontend, but any customization should maintain API
compability and preserve the expected behavior of the system.
mgmt_frontend_image : str(optional)
The management frontend docker image to use. You can set this argument to specify
a custom build of the management frontend, but any customization should maintain API
compability and preserve the expected behavior of the system.
cache_size : int, optional
The size of Clipper's prediction cache in bytes. Default cache size is 32 MiB.
Raises
------
:py:exc:`clipper.ClipperException`
"""
try:
self.cm.start_clipper(query_frontend_image, mgmt_frontend_image,
cache_size)
while True:
try:
url = "http://{host}/metrics".format(
host=self.cm.get_query_addr())
r = requests.get(url, timeout=5)
if r.status_code != requests.codes.ok:
raise RequestException
break
except RequestException:
logger.info("Clipper still initializing.")
time.sleep(1)
logger.info("Clipper is running")
self.connected = True
except ClipperException as e:
logger.warning("Error starting Clipper: {}".format(e.msg))
raise e
def connect(self):
"""Connect to a running Clipper cluster."""
self.cm.connect()
self.connected = True
logger.info("Successfully connected to Clipper cluster at {}".format(
self.cm.get_query_addr()))
def register_application(self, name, input_type, default_output,
slo_micros):
# TODO(crankshaw): Add links to user guide section on input types once user guide is
# written:
# "See the `User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
# on picking the right input type for your application."
"""Register a new application with Clipper.
An application in Clipper corresponds to a named REST endpoint that can be used to request
predictions. This command will attempt to create a new endpoint with the provided name.
Application names must be unique. This command will fail if an application with the provided
name already exists.
Parameters
----------
name : str
The unique name of the application.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings".
default_output : str
The default output for the application. The default output will be returned whenever
an application is unable to receive a response from a model within the specified
query latency SLO (service level objective). The reason the default output was returned
is always provided as part of the prediction response object.
slo_micros : int
The query latency objective for the application in microseconds.
This is the processing latency between Clipper receiving a request
and sending a response. It does not account for network latencies
before a request is received or after a response is sent.
If Clipper cannot process a query within the latency objective,
the default output is returned. Therefore, it is recommended that
the SLO not be set aggressively low unless absolutely necessary.
100000 (100ms) is a good starting value, but the optimal latency objective
will vary depending on the application.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/add_app".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"name": name,
"input_type": input_type,
"default_output": default_output,
"latency_slo_micros": slo_micros
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
else:
logger.info("Application {app} was successfully registered".format(
app=name))
def delete_application(self, name):
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/delete_app".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"name": name})
headers = {"Content-type": "application/json"}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
else:
logger.info(
"Application {app} was successfully deleted".format(app=name))
def link_model_to_app(self, app_name, model_name):
"""Routes requests from the specified app to be evaluted by the specified model.
Parameters
----------
app_name : str
The name of the application
model_name : str
The name of the model to link to the application
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
Note
-----
Both the specified model and application must be registered with Clipper, and they
must have the same input type. If the application has previously been linked to a different
model, this command will fail.
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/add_model_links".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"app_name": app_name,
"model_names": [model_name]
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
else:
logger.info(
"Model {model} is now linked to application {app}".format(
model=model_name, app=app_name))
def build_and_deploy_model(self,
name,
version,
input_type,
model_data_path,
base_image,
labels=None,
container_registry=None,
num_replicas=1,
batch_size=-1,
pkgs_to_install=None):
"""Build a new model container Docker image with the provided data and deploy it as
a model to Clipper.
This method does two things.
1. Builds a new Docker image from the provided base image with the local directory specified
by ``model_data_path`` copied into the image by calling
:py:meth:`clipper_admin.ClipperConnection.build_model`.
2. Registers and deploys a model with the specified metadata using the newly built
image by calling :py:meth:`clipper_admin.ClipperConnection.deploy_model`.
Parameters
----------
name : str
The name of the deployed model
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings". See the
`User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
on picking the right input type for your application.
model_data_path : str
A path to a local directory. The contents of this directory will be recursively copied
into the Docker container.
base_image : str
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
container_registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accesible
to the Kubernetes cluster in order to fetch the container from the registry.
num_replicas : int, optional
The number of replicas of the model to create. The number of replicas
for a model can be changed at any time with
:py:meth:`clipper.ClipperConnection.set_num_replicas`.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for
individual replicas of this model.
pkgs_to_install : list (of strings), optional
A list of the names of packages to install, using pip, in the container.
The names must be strings.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
image = self.build_model(name, version, model_data_path, base_image,
container_registry, pkgs_to_install)
self.deploy_model(name, version, input_type, image, labels,
num_replicas, batch_size)
def build_model(self,
name,
version,
model_data_path,
base_image,
container_registry=None,
pkgs_to_install=None):
"""Build a new model container Docker image with the provided data"
This method builds a new Docker image from the provided base image with the local directory
specified by ``model_data_path`` copied into the image. The Dockerfile that gets generated
to build the image is equivalent to the following::
FROM <base_image>
COPY <model_data_path> /model/
The newly built image is then pushed to the specified container registry. If no container
registry is specified, the image will be pushed to the default DockerHub registry. Clipper
will tag the newly built image with the tag [<registry>]/<name>:<version>.
This method can be called without being connected to a Clipper cluster.
Parameters
----------
name : str
The name of the deployed model.
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
model_data_path : str
A path to a local directory. The contents of this directory will be recursively copied
into the Docker container.
base_image : str
The base Docker image to build the new model image from. This
image should contain all code necessary to run a Clipper model
container RPC client.
container_registry : str, optional
The Docker container registry to push the freshly built model to. Note
that if you are running Clipper on Kubernetes, this registry must be accesible
to the Kubernetes cluster in order to fetch the container from the registry.
pkgs_to_install : list (of strings), optional
A list of the names of packages to install, using pip, in the container.
The names must be strings.
Returns
-------
str :
The fully specified tag of the newly built image. This will include the
container registry if specified.
Raises
------
:py:exc:`clipper.ClipperException`
Note
----
Both the model name and version must be valid DNS-1123 subdomains. Each must consist of
lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric
character (e.g. 'example.com', regex used for validation is
'[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z'.
"""
version = str(version)
_validate_versioned_model_name(name, version)
run_cmd = ''
if pkgs_to_install:
run_as_lst = 'RUN apt-get -y install build-essential && pip install'.split(
' ')
run_cmd = ' '.join(run_as_lst + pkgs_to_install)
with tempfile.NamedTemporaryFile(
mode="w+b", suffix="tar") as context_file:
# Create build context tarfile
with tarfile.TarFile(
fileobj=context_file, mode="w") as context_tar:
context_tar.add(model_data_path)
# From https://stackoverflow.com/a/740854/814642
try:
df_contents = StringIO(
str.encode(
"FROM {container_name}\nCOPY {data_path} /model/\n{run_command}\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd)))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
except TypeError:
df_contents = StringIO(
"FROM {container_name}\nCOPY {data_path} /model/\n{run_command}\n".
format(
container_name=base_image,
data_path=model_data_path,
run_command=run_cmd))
df_tarinfo = tarfile.TarInfo('Dockerfile')
df_contents.seek(0, os.SEEK_END)
df_tarinfo.size = df_contents.tell()
df_contents.seek(0)
context_tar.addfile(df_tarinfo, df_contents)
# Exit Tarfile context manager to finish the tar file
# Seek back to beginning of file for reading
context_file.seek(0)
image = "{name}:{version}".format(name=name, version=version)
if container_registry is not None:
image = "{reg}/{image}".format(
reg=container_registry, image=image)
docker_client = docker.from_env()
logger.info(
"Building model Docker image with model data from {}".format(
model_data_path))
image_result, build_logs = docker_client.images.build(
fileobj=context_file, custom_context=True, tag=image)
for b in build_logs:
logger.info(b)
logger.info("Pushing model Docker image to {}".format(image))
for line in docker_client.images.push(repository=image, stream=True):
logger.debug(line)
return image
def deploy_model(self,
name,
version,
input_type,
image,
labels=None,
num_replicas=1,
batch_size=-1):
"""Deploys the model in the provided Docker image to Clipper.
Deploying a model to Clipper does a few things.
1. It starts a set of Docker model containers running the model packaged
in the ``image`` Docker image. The number of containers it will start is dictated
by the ``num_replicas`` argument, but the way that these containers get started
depends on your choice of ``ContainerManager`` implementation.
2. It registers the model and version with Clipper and sets the current version of the
model to this version by internally calling
:py:meth:`clipper_admin.ClipperConnection.register_model`.
Notes
-----
If you want to deploy a model in some other way (e.g. a model that cannot run in a Docker
container for some reason), you can start the model manually or with an external tool and
call ``register_model`` directly.
Parameters
----------
name : str
The name of the deployed model
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings". See the
`User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
on picking the right input type for your application.
image : str
The fully specified Docker image to deploy. If using a custom
registry, the registry name must be prepended to the image. For example,
if your Docker image is stored in the quay.io registry, you should specify
the image argument as
"quay.io/my_namespace/image_name:tag". The image name and tag are independent of
the ``name`` and ``version`` arguments, and can be set to whatever you want.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
num_replicas : int, optional
The number of replicas of the model to create. The number of replicas
for a model can be changed at any time with
:py:meth:`clipper.ClipperConnection.set_num_replicas`.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for
individual replicas of this model.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
Note
----
Both the model name and version must be valid DNS-1123 subdomains. Each must consist of
lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric
character (e.g. 'example.com', regex used for validation is
'[a-z0-9]([-a-z0-9]*[a-z0-9])?\Z'.
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
_validate_versioned_model_name(name, version)
self.cm.deploy_model(
name=name,
version=version,
input_type=input_type,
image=image,
num_replicas=num_replicas)
self.register_model(
name,
version,
input_type,
image=image,
labels=labels,
batch_size=batch_size)
logger.info("Done deploying model {name}:{version}.".format(
name=name, version=version))
def register_model(self,
name,
version,
input_type,
image=None,
labels=None,
batch_size=-1):
"""Registers a new model version with Clipper.
This method does not launch any model containers, it only registers the model description
(metadata such as name, version, and input type) with Clipper. A model must be registered
with Clipper before it can be linked to an application.
You should rarely have to use this method directly. Using one the Clipper deployer
methods in :py:mod:`clipper_admin.deployers` or calling ``build_and_deploy_model`` or
``deploy_model`` will automatically register your model with Clipper.
Parameters
----------
name : str
The name of the deployed model
version : str
The version to assign this model. Versions must be unique on a per-model
basis, but may be re-used across different models.
input_type : str
The type of the request data this endpoint can process. Input type can be
one of "integers", "floats", "doubles", "bytes", or "strings". See the
`User Guide <http://clipper.ai/user_guide/#input-types>`_ for more details
on picking the right input type for your application.
image : str, optional
A docker image name. If provided, the image will be recorded as part of the
model descrtipin in Clipper when registering the model but this method will
make no attempt to launch any containers with this image.
labels : list(str), optional
A list of strings annotating the model. These are ignored by Clipper
and used purely for user annotations.
batch_size : int, optional
The user-defined query batch size for the model. Replicas of the model will attempt
to process at most `batch_size` queries simultaneously. They may process smaller
batches if `batch_size` queries are not immediately available.
If the default value of -1 is used, Clipper will adaptively calculate the batch size for
individual replicas of this model.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/add_model".format(
host=self.cm.get_admin_addr())
if image is None:
image = CONTAINERLESS_MODEL_IMAGE
if labels is None:
labels = DEFAULT_LABEL
req_json = json.dumps({
"model_name": name,
"model_version": version,
"labels": labels,
"input_type": input_type,
"container_name": image,
"model_data_path": "DEPRECATED",
"batch_size": batch_size
})
headers = {'Content-type': 'application/json'}
logger.debug(req_json)
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
else:
logger.info(
"Successfully registered model {name}:{version}".format(
name=name, version=version))
def get_current_model_version(self, name):
"""Get the current model version for the specified model.
Parameters
----------
name : str
The name of the model
Returns
-------
str
The current model version
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = None
model_info = self.get_all_models(verbose=True)
for m in model_info:
if m["model_name"] == name and m["is_current_version"]:
version = m["model_version"]
break
if version is None:
raise ClipperException(
"No versions of model {} registered with Clipper".format(name))
return version
def get_num_replicas(self, name, version=None):
"""Gets the current number of model container replicas for a model.
Parameters
----------
name : str
The name of the model
version : str, optional
The version of the model. If no version is provided,
the currently deployed version will be used.
Returns
-------
int
The number of active replicas
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
if version is None:
version = self.get_current_model_version(name)
else:
version = str(version)
return self.cm.get_num_replicas(name, version)
def set_num_replicas(self, name, num_replicas, version=None):
"""Sets the total number of active replicas for a model.
If there are more than the current number of replicas
currently allocated, this will remove replicas. If there are
less, this will add replicas.
Parameters
----------
name : str
The name of the model
version : str, optional
The version of the model. If no version is provided,
the currently deployed version will be used.
num_replicas : int, optional
The desired number of replicas.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
if version is None:
version = self.get_current_model_version(name)
else:
version = str(version)
model_data = self.get_model_info(name, version)
if model_data is not None:
input_type = model_data["input_type"]
image = model_data["container_name"]
if image != CONTAINERLESS_MODEL_IMAGE:
self.cm.set_num_replicas(name, version, input_type, image,
num_replicas)
else:
msg = ("Cannot resize the replica set for containerless model "
"{name}:{version}").format(
name=name, version=version)
logger.error(msg)
raise ClipperException(msg)
else:
msg = "Cannot add container for non-registered model {name}:{version}".format(
name=name, version=version)
logger.error(msg)
raise ClipperException(msg)
def get_all_apps(self, verbose=False):
"""Gets information about all applications registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains application info dictionaries.
These dictionaries have the same attribute name-value pairs that were
provided to :py:meth:`clipper_admin.ClipperConnection.register_application`.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no apps are registered with Clipper, an empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_all_applications".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_app_info(self, name):
"""Gets detailed information about a registered application.
Parameters
----------
name : str
The name of the application to look up
Returns
-------
dict
Returns a dictionary with the specified application's info. This
will contain the attribute name-value pairs that were provided to
:py:meth:`clipper_admin.ClipperConnection.register_application`.
If no application with name ``name`` is
registered with Clipper, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_application".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"name": name})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
app_info = r.json()
if len(app_info) == 0:
logger.warning(
"Application {} is not registered with Clipper".format(
name))
return None
return app_info
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_linked_models(self, app_name):
"""Retrieves the models linked to the specified application.
Parameters
----------
app_name : str
The name of the application
Returns
-------
list
Returns a list of the names of models linked to the app.
If no models are linked to the specified app, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_linked_models".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"app_name": app_name})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_all_models(self, verbose=False):
"""Gets information about all models registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the models' names.
If set to True, the list contains model info dictionaries.
Returns
-------
list
Returns a list of information about all apps registered to Clipper.
If no models are registered with Clipper, an empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_all_models".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_model_info(self, name, version):
"""Gets detailed information about a registered model.
Parameters
----------
model_name : str
The name of the model to look up
model_version : int
The version of the model to look up
Returns
-------
dict
Returns a dictionary with the specified model's info.
If no model with name `model_name@model_version` is
registered with Clipper, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/get_model".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"model_name": name, "model_version": version})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
model_info = r.json()
if len(model_info) == 0:
logger.warning(
"Model {name}:{version} is not registered with Clipper.".
format(name=name, version=version))
return None
return model_info
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_all_model_replicas(self, verbose=False):
"""Gets information about all model containers registered with Clipper.
Parameters
----------
verbose : bool
If set to False, the returned list contains the apps' names.
If set to True, the list contains container info dictionaries.
Returns
-------
list
Returns a list of information about all model containers known to Clipper.
If no containers are registered with Clipper, an empty list is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/admin/get_all_containers".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"verbose": verbose})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_model_replica_info(self, name, version, replica_id):
"""Gets detailed information about a registered container.
Parameters
----------
name : str
The name of the container to look up
version : int
The version of the container to look up
replica_id : int
The container replica to look up
Returns
-------
dict
A dictionary with the specified container's info.
If no corresponding container is registered with Clipper, None is returned.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/get_container".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({
"model_name": name,
"model_version": version,
"replica_id": replica_id,
})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
model_rep_info = r.json()
if len(model_rep_info) == 0:
logger.warning(
"No model replica with ID {rep_id} found for model {name}:{version}".
format(rep_id=replica_id, name=name, version=version))
return None
return model_rep_info
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def get_clipper_logs(self, logging_dir="clipper_logs/"):
"""Download the logs from all Clipper docker containers.
Parameters
----------
logging_dir : str, optional
The directory to save the downloaded logs. If the directory does not
exist, it will be created.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_logs(logging_dir)
def inspect_instance(self):
"""Fetches performance metrics from the running Clipper cluster.
Returns
-------
str
The JSON string containing the current set of metrics
for this instance. On error, the string will be an error message
(not JSON formatted).
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
"""
if not self.connected:
raise UnconnectedException()
url = "http://{host}/metrics".format(host=self.cm.get_query_addr())
r = requests.get(url)
logger.debug(r.text)
if r.status_code == requests.codes.ok:
return r.json()
else:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
def set_model_version(self, name, version, num_replicas=None):
"""Changes the current model version to "model_version".
This method can be used to perform model roll-back and roll-forward. The
version can be set to any previously deployed version of the model.
Parameters
----------
name : str
The name of the model
version : str | obj with __str__ representation
The version of the model. Note that `version`
must be a model version that has already been deployed.
num_replicas : int
The number of new containers to start with the newly
selected model version.
Raises
------
:py:exc:`clipper.UnconnectedException`
:py:exc:`clipper.ClipperException`
Note
-----
Model versions automatically get updated when
py:meth:`clipper_admin.ClipperConnection.deploy_model()` is called. There is no need to
manually update the version after deploying a new model.
"""
if not self.connected:
raise UnconnectedException()
version = str(version)
url = "http://{host}/admin/set_model_version".format(
host=self.cm.get_admin_addr())
req_json = json.dumps({"model_name": name, "model_version": version})
headers = {'Content-type': 'application/json'}
r = requests.post(url, headers=headers, data=req_json)
logger.debug(r.text)
if r.status_code != requests.codes.ok:
msg = "Received error status code: {code} and message: {msg}".format(
code=r.status_code, msg=r.text)
logger.error(msg)
raise ClipperException(msg)
if num_replicas is not None:
self.set_num_replicas(name, num_replicas, version)
def get_query_addr(self):
"""Get the IP address at which the query frontend can be reached request predictions.
Returns
-------
str
The address as an IP address or hostname.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
return self.cm.get_query_addr()
def stop_models(self, model_names):
"""Stops all versions of the specified models.
This is a convenience method to avoid the need to explicitly list all versions
of a model when calling :py:meth:`clipper_admin.ClipperConnection.stop_versioned_models`.
Parameters
----------
model_names : list(str)
A list of model names. All replicas of all versions of each model specified in the list
will be stopped.
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
"""
if not self.connected:
raise UnconnectedException()
model_info = self.get_all_models(verbose=True)
model_dict = {}
for m in model_info:
if m["model_name"] in model_names:
if m["model_name"] in model_dict:
model_dict[m["model_name"]].append(m["model_version"])
else:
model_dict[m["model_name"]] = [m["model_version"]]
self.cm.stop_models(model_dict)
pp = pprint.PrettyPrinter(indent=4)
logger.info(
"Stopped all containers for these models and versions:\n{}".format(
pp.pformat(model_dict)))
def stop_versioned_models(self, model_versions_dict):
"""Stops the specified versions of the specified models.
Parameters
----------
model_versions_dict : dict(str, list(str))
For each entry in the dict, the key is a model name and the value is a list of model
Raises
------
:py:exc:`clipper.UnconnectedException`
versions. All replicas for each version of each model will be stopped.
Note
----
This method will stop the currently deployed versions of models if you specify them. You
almost certainly want to use one of the other stop_* methods. Use with caution.
"""
if not self.connected:
raise UnconnectedException()
self.cm.stop_models(model_versions_dict)
pp = pprint.PrettyPrinter(indent=4)
logger.info(
"Stopped all containers for these models and versions:\n{}".format(
pp.pformat(model_versions_dict)))
def stop_inactive_model_versions(self, model_names):
"""Stops all model containers serving stale versions of the specified models.
For example, if you have deployed versions 1, 2, and 3 of model "music_recommender"
and version 3 is the current version::
clipper_conn.stop_inactive_model_versions(["music_recommender"])
will stop any containers serving versions 1 and 2 but will leave containers serving
version 3 untouched.
Parameters
----------
model_names : list(str)
The names of the models whose old containers you want to stop.
Raises
------
:py:exc:`clipper.UnconnectedException`
"""
if not self.connected:
raise UnconnectedException()
model_info = self.get_all_models(verbose=True)
model_dict = {}
for m in model_info:
if m["model_name"] in model_names and not m["is_current_version"]:
if m["model_name"] in model_dict:
model_dict[m["model_name"]].append(m["model_version"])
else:
model_dict[m["model_name"]] = [m["model_version"]]
self.cm.stop_models(model_dict)
pp = pprint.PrettyPrinter(indent=4)
logger.info(
"Stopped all containers for these models and versions:\n{}".format(
pp.pformat(model_dict)))
def stop_all_model_containers(self):
"""Stops all model containers started via Clipper admin commands.
This method can be used to clean up leftover Clipper model containers even if the
Clipper management frontend or Redis has crashed. It can also be called without calling
``connect`` first.
"""
self.cm.stop_all_model_containers()
logger.info("Stopped all Clipper model containers")
def stop_all(self):
"""Stops all processes that were started via Clipper admin commands.
This includes the query and management frontend Docker containers and all model containers.
If you started Redis independently, this will not affect Redis. It can also be called
without calling ``connect`` first.
"""
self.cm.stop_all()
logger.info("Stopped all Clipper cluster and all model containers")
def test_predict_function(self, query, func, input_type):
"""Tests that the user's function has the correct signature and can be properly saved and
loaded.
The function should take a dict request object like the query frontend expects JSON,
the predict function, and the input type for the model.
For example, the function can be called like:
clipper_conn.test_predict_function({"input": [1.0, 2.0, 3.0]}, predict_func, "doubles")
Parameters
----------
query: JSON or list of dicts
Inputs to test the prediction function on.
func: function
Predict function to test.
input_type: str
The input_type to be associated with the registered app and deployed model.
One of "integers", "floats", "doubles", "bytes", or "strings".
"""
if not self.connected:
self.connect()
query_data = list(x for x in list(query.values()))
query_key = list(query.keys())
if query_key[0] == "input_batch":
query_data = query_data[0]
try:
flattened_data = [
item for sublist in query_data for item in sublist
]
except TypeError:
return "Invalid input type or JSON key"
numpy_data = None
if input_type == "bytes":
numpy_data = list(np.int8(x) for x in query_data)
for x in flattened_data:
if type(x) != bytes:
return "Invalid input type"
if input_type == "integers":
numpy_data = list(np.int32(x) for x in query_data)
for x in flattened_data:
if type(x) != int:
return "Invalid input type"
if input_type == "floats" or input_type == "doubles":
if input_type == "floats":
numpy_data = list(np.float32(x) for x in query_data)
else:
numpy_data = list(np.float64(x) for x in query_data)
for x in flattened_data:
if type(x) != float:
return "Invalid input type"
if input_type == "string":
numpy_data = list(np.str_(x) for x in query_data)
for x in flattened_data:
if type(x) != str:
return "Invalid input type"
s = StringIO()
c = CloudPickler(s, 2)
c.dump(func)
serialized_func = s.getvalue()
reloaded_func = pickle.loads(serialized_func)
try:
assert reloaded_func
except AssertionError:
logger.error("Function does not properly serialize and reload")
return "Function does not properly serialize and reload"
return reloaded_func(numpy_data)
| {
"repo_name": "dcrankshaw/clipper",
"path": "clipper_admin/clipper_admin/clipper_admin.py",
"copies": "1",
"size": "53730",
"license": "apache-2.0",
"hash": 5045775758281764000,
"line_mean": 39.2471910112,
"line_max": 100,
"alpha_frac": 0.5814442583,
"autogenerated": false,
"ratio": 4.565771583956492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012111961476307745,
"num_lines": 1335
} |
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from scipy.optimize import leastsq
from scipy.stats import norm
log = logging.getLogger(__name__)
class IdealObs(object):
"""Statistical ideal observer.
Converts input values (usually SNRenv) to a percentage.
Parameters
----------
k : float, optional
(Default value = sqrt(1.2)
q : float, optional
(Default value = 0.5)
sigma_s : float, optional
(Default value = 0.6)
m : int, optional
Number of words in the vocabulary. (Default value = 8000)
Notes
-----
Implemented as described in [jorgensen2011]_.
Examples
--------
Converting values to percent correct using the default parameters
of the ideal observer:
>>> from pambox import central
>>> obs = central.IdealObs()
>>> obs.transform((0, 1, 2, 3))
"""
def __init__(self, k=np.sqrt(1.2), q=0.5, sigma_s=0.6, m=8000.):
self.k = k
self.q = q
self.sigma_s = sigma_s
self.m = m
def get_params(self):
"""Returns the parameters of the ideal observer as dict.
Parameters
----------
None
Returns
-------
params : dict
Dictionary of internal parameters of the ideal observer.
"""
return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}
def fit_obs(self, values, pcdata, sigma_s=None, m=None, tries=10):
"""Finds the parameters of the ideal observer.
Finds the paramaters ``k``, ``q``, and ``sigma_s``, that minimize the
least-square error between a data set and transformed SNRenv.
By default the ``m`` parameter is fixed and the property ``m`` is used.
It can also be defined as an optional parameter.
It is also possible to fix the `sigma_s` parameter by passing it as
an optional argument. Otherwise, it is optimized with `k` and `q`.
Parameters
----------
values : ndarray
The linear SNRenv values that are to be converted to percent
correct.
pcdata : ndarray
The data, in percentage between 0 and 1, of correctly understood
tokens. Must be the same shape as `values`.
sigma_s : float, optional
(Default value = None)
m : float, optional
(Default value = None)
tries : int, optional
How many attempts to fit the observer if the start values do not
converge. The default is 10 times.
Returns
-------
self
"""
values = np.asarray(values)
pcdata = np.asarray(pcdata)
if m is None:
m = self.m
else:
self.m = m
# Set default values for optimization
p0 = [self.k, self.q, self.sigma_s]
fixed_params = {'m': m}
if sigma_s is not None:
p0 = p0[:2]
fixed_params['sigma_s'] = sigma_s
# Reshape the array to have `N` predictions and define the cost
# function to average over those predictions.
if values.shape != pcdata.shape:
values = values.reshape((-1, len(pcdata)))
def errfc(p, fixed):
return np.mean(self._transform(values, *p, **fixed), axis=0
) - pcdata
# They have the same shape, the array should not be averaged
else:
def errfc(p, fixed):
return self._transform(values, *p, **fixed) - pcdata
for try_id in range(tries):
(x, _, _, errmsg, ier) = leastsq(errfc, p0, args=fixed_params,
maxfev=10000, full_output=True)
if ier in [1, 2, 3, 4]:
break
else:
p0 = 2 * np.random.random_sample(len(p0))
log.error("Optimal parameters not found: " + errmsg)
if sigma_s:
self.k, self.q = x
self.sigma_s = sigma_s
else:
self.k, self.q, self.sigma_s = x
return self
@staticmethod
def _transform(values, k=None, q=None, sigma_s=None, m=None):
"""Converts SNRenv values to percent correct using an ideal observer.
Parameters
----------
values : array_like
linear values of SNRenv
k : float
k parameter (Default value = None)
q : float
q parameter (Default value = None)
sigma_s : float
sigma_s parameter (Default value = None)
m : float
m parameter, number of words in the vocabulary. (Default value =
None)
Returns
-------
pc : ndarray
Array of intelligibility percentage values, of the same shape as
`values`.
"""
un = norm.ppf(1.0 - 1.0 / m)
sn = 1.28255 / un
un += 0.577 / un
dp = k * values ** q
return norm.cdf(dp, un, np.sqrt(sigma_s ** 2 + sn ** 2)) * 100
def transform(self, values):
"""Converts inputs values to a percent correct.
Parameters
----------
values : array_like
Linear values to transform.
Returns
-------
pc : ndarray
Array of intelligibility percentage values, of the same shape as
`values`.
"""
values = np.asarray(values)
return self._transform(values, self.k, self.q, self.sigma_s, self.m) | {
"repo_name": "achabotl/pambox",
"path": "pambox/central/decision_metrics.py",
"copies": "1",
"size": "5585",
"license": "bsd-3-clause",
"hash": -5043986350539187000,
"line_mean": 28.7127659574,
"line_max": 79,
"alpha_frac": 0.5375111907,
"autogenerated": false,
"ratio": 4.177262528047868,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5214773718747868,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
from collections import defaultdict
from unittest import TestCase
from manhattan import util
class TestUtil(TestCase):
def assertRandomish(self, s, bits=4):
# Calculate entropy.
counts = defaultdict(int)
for char in s:
counts[char] += 1
n = float(len(s))
entropy = sum((c / n) * math.log(n / c) / math.log(2)
for c in counts.values())
self.assertLess(bits - entropy, .01)
def assertRoughly(self, a, b, f=0.5):
bot = b - (b * f)
top = b + (b * f)
self.assertLessEqual(bot, a)
self.assertLessEqual(a, top)
def test_nonce(self):
n1 = util.nonce()
n2 = util.nonce()
self.assertNotEqual(n1, n2)
s = b''.join(util.nonce() for i in range(100))
self.assertRandomish(s)
def test_choose_population_bool(self):
a = 0
for ii in range(200):
if util.choose_population(util.nonce()):
a += 1
# Make sure it's relatively uniform...
self.assertRoughly(a, 100)
def test_chose_population_bad_value(self):
with self.assertRaises(ValueError):
util.choose_population(util.nonce(), 123)
def test_choose_population_zero_mass(self):
with self.assertRaises(ValueError):
util.choose_population(util.nonce(), {'foo': 0})
def test_choose_population_list(self):
counts = defaultdict(int)
for ii in range(300):
choice = util.choose_population(util.nonce(),
['foo', 'bar', 'baz'])
counts[choice] += 1
self.assertRoughly(counts['foo'], 100)
self.assertRoughly(counts['bar'], 100)
self.assertRoughly(counts['baz'], 100)
def test_choose_population_weighted(self):
counts = defaultdict(int)
for ii in range(300):
choice = util.choose_population(util.nonce(), {'foo': 0.1,
'quux': 0,
'bar': 0.1,
'baz': 0.8})
counts[choice] += 1
self.assertRoughly(counts['foo'], 30)
self.assertRoughly(counts['bar'], 30)
self.assertRoughly(counts['baz'], 240)
self.assertEqual(counts['quux'], 0)
def test_decode_http_header_none(self):
self.assertEqual(util.decode_http_header(None), u'')
def test_decode_http_header(self):
self.assertEqual(util.decode_http_header('hello \xf6 \xe1 world'),
u'hello \xf6 \xe1 world')
class TestSigner(TestCase):
def setUp(self):
self.sample = util.nonce()
def test_round_trip(self):
signer = util.Signer('s3krit')
signed = signer.sign(self.sample)
b = signer.unsign(signed)
self.assertEqual(self.sample, b)
def test_bad_signature(self):
signer = util.Signer('s3krit')
signed = signer.sign(self.sample)
mangled = signed[:-3]
with self.assertRaises(util.BadSignature) as cm:
signer.unsign(mangled)
self.assertIn(mangled, str(cm.exception))
def test_lowercase(self):
signer = util.Signer('s3krit')
signed = signer.sign(self.sample)
b = signer.unsign(signed.lower())
self.assertEqual(self.sample, b)
def test_uppercase(self):
signer = util.Signer('s3krit')
signed = signer.sign(self.sample)
b = signer.unsign(signed.upper())
self.assertEqual(self.sample, b)
def test_bad_data(self):
signer = util.Signer('s3krit')
signed = signer.sign(self.sample)
mangled = signed.split('.')[0]
with self.assertRaises(util.BadData) as cm:
signer.unsign(mangled)
self.assertIn('No separator', str(cm.exception))
self.assertIn(mangled, str(cm.exception))
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/tests/test_util.py",
"copies": "1",
"size": "4050",
"license": "mit",
"hash": -4523721016758489000,
"line_mean": 30.8897637795,
"line_max": 74,
"alpha_frac": 0.5585185185,
"autogenerated": false,
"ratio": 3.8099717779868296,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.486849029648683,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
import numpy as np
import matplotlib
from matplotlib.figure import Figure
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.collections import BrokenBarHCollection
import matplotlib.ticker as mticker
from matplotlib.colors import LogNorm
from enum import Enum
from mpl_toolkits.axes_grid1 import ImageGrid
from atom.api import Atom, Str, observe, Typed, Int, List, Dict, Float, Bool
from skbeam.core.fitting.xrf_model import K_TRANSITIONS, L_TRANSITIONS, M_TRANSITIONS
from skbeam.fluorescence import XrfElement as Element
from ..core.xrf_utils import get_eline_parameters
import logging
logger = logging.getLogger(__name__)
def get_color_name():
# usually line plot will not go beyond 10
first_ten = [
"indigo",
"maroon",
"green",
"darkblue",
"darkgoldenrod",
"blue",
"darkcyan",
"sandybrown",
"black",
"darkolivegreen",
]
# Avoid red color, as those color conflict with emission lines' color.
nonred_list = [
v
for v in matplotlib.colors.cnames.keys()
if "pink" not in v and "fire" not in v and "sage" not in v and "tomato" not in v and "red" not in v
]
return first_ten + nonred_list + list(matplotlib.colors.cnames.keys())
class PlotTypes(Enum):
LINLOG = 0
LINEAR = 1
class EnergyRangePresets(Enum):
SELECTED_RANGE = 0
FULL_SPECTRUM = 1
class MapTypes(Enum):
LINEAR = 0
LOG = 1
class MapAxesUnits(Enum):
PIXELS = 0
POSITIONS = 1
class LinePlotModel(Atom):
"""
This class performs all the required line plots.
Attributes
----------
data : array
Experimental data
_fit : class object
Figure object from matplotlib
_ax : class object
Axis object from matplotlib
_canvas : class object
Canvas object from matplotlib
element_id : int
Index of element
parameters : `atom.List`
A list of `Parameter` objects, subclassed from the `Atom` base class.
These `Parameter` objects hold all relevant xrf information
elist : list
Emission energy and intensity for given element
plot_opt : int
Linear or log plot
total_y : dict
Results for k lines
total_l : dict
Results for l and m lines
prefit_x : array
X axis with limited range
plot_title : str
Title for plotting
fit_x : array
x value for fitting
fit_y : array
fitted data
plot_type_names : list
linear or log plot
max_v : float
max value of data array
incident_energy : float
in KeV
param_model : Typed(object)
Reference to ParamModel object
"""
# data = Typed(object) # Typed(np.ndarray)
exp_data_label = Str("experiment")
number_pts_to_show = Int(3000) # The number of spectrum point to show
# -------------------------------------------------------------
# Preview plot (raw experimental spectra)
_fig_preview = Typed(Figure)
_ax_preview = Typed(Axes)
_lines_preview = List()
_bahr_preview = Typed(BrokenBarHCollection)
plot_type_preview = Typed(PlotTypes)
energy_range_preview = Typed(EnergyRangePresets)
min_v_preview = Float()
max_v_preview = Float()
min_e_preview = Float()
max_e_preview = Float()
# -----------------------------------------------------------
# Preview of Total Count Maps
_fig_maps = Typed(Figure)
map_type_preview = Typed(MapTypes)
map_axes_units_preview = Typed(MapAxesUnits)
map_scatter_plot = Bool(False)
map_preview_color_scheme = Str("viridis")
map_preview_range_low = Float(-1)
map_preview_range_high = Float(-1)
# ------------------------------------------------------------
_fig = Typed(Figure)
_ax = Typed(Axes)
_canvas = Typed(object)
plot_fit_x_min = Float(0) # The variables are used to store x_min and x_max for the current plot
plot_fit_x_max = Float(0)
element_id = Int(0)
elist = List()
scale_opt = Int(0)
# total_y = Dict()
# total_l = Dict()
# total_m = Dict()
# total_pileup = Dict()
prefit_x = Typed(object)
plot_title = Str()
# fit_x = Typed(np.ndarray)
# fit_y = Typed(np.ndarray)
# residual = Typed(np.ndarray)
plot_type_names = List()
max_v = Float()
incident_energy = Float(12.0)
energy_range_names = List()
energy_range_fitting = Str()
eline_obj = List()
plot_exp_opt = Bool(False)
plot_exp_obj = Typed(Line2D)
show_exp_opt = Bool(False) # Flag: show spectrum preview
# Reference to artist responsible for displaying the selected range of energies on the plot
plot_energy_barh = Typed(BrokenBarHCollection)
t_bar = Typed(object)
plot_exp_list = List()
auto_fit_obj = List()
show_autofit_opt = Bool()
plot_fit_obj = List() # Typed(Line2D)
show_fit_opt = Bool(False)
# fit_all = Typed(object)
plot_style = Dict()
roi_plot_dict = Dict()
roi_dict = Typed(object) # OrderedDict()
log_range = List()
linear_range = List()
plot_escape_line = Int(0)
emission_line_window = Bool(True)
det_materials = Int(0)
escape_e = Float(1.73998)
limit_cut = Int()
# prefix_name_roi = Str()
# element_for_roi = Str()
# element_list_roi = List()
# roi_dict = Typed(object) #OrderedDict()
# img_dict = Dict()
# roi_result = Dict()
# Reference to ParamModel object
param_model = Typed(object)
# Reference to FileIOModel object
io_model = Typed(object)
# Location of the vertical (mouse-selected) marker on the plot.
# Value is in kev. Negative value - no marker is placed.
vertical_marker_kev = Float(-1)
# Reference to the respective Matplotlib artist
line_vertical_marker = Typed(object)
vertical_marker_is_visible = Bool(False)
report_marker_state = Typed(object)
def __init__(self, *, param_model, io_model):
# Reference to ParamModel object
self.param_model = param_model
self.io_model = io_model
# self.data = None
self._fig = plt.figure()
self._ax = self._fig.add_subplot(111)
try:
self._ax.set_axis_bgcolor("lightgrey")
except AttributeError:
self._ax.set_facecolor("lightgrey")
self._ax.set_xlabel("Energy (keV)")
self._ax.set_ylabel("Spectrum (Counts)")
self._ax.grid(which="both")
self._ax.set_yscale("log")
self.plot_type_names = ["LinLog", "Linear"]
self.energy_range_names = ["selected", "full"]
self.energy_range_fitting = "selected"
self._ax.autoscale_view(tight=True)
self._ax.legend(loc=2)
self._color_config()
self._fig.tight_layout(pad=0.5)
self.max_v = 1.0
# when we calculate max value, data smaller than 500, 0.5 Kev, can be ignored.
# And the last point of data is also huge, and should be cut off.
self.limit_cut = 100
# self._ax.margins(x=0.0, y=0.10)
# --------------------------------------------------------------
# Spectrum preview figure
self._fig_preview = Figure()
self.plot_type_preview = PlotTypes.LINLOG
self.energy_range_preview = EnergyRangePresets.SELECTED_RANGE
# --------------------------------------------------------------
# Preview of Total Count Maps
self._fig_maps = Figure()
self.map_type_preview = MapTypes.LINEAR
self.map_axes_units_preview = MapAxesUnits.PIXELS
def _color_config(self):
self.plot_style = {
"experiment": {"color": "blue", "linestyle": "", "marker": ".", "label": self.exp_data_label},
"background": {"color": "indigo", "marker": "+", "markersize": 1, "label": "background"},
"emission_line": {"color": "black", "linewidth": 2},
"roi_line": {"color": "red", "linewidth": 2},
"k_line": {"color": "green", "label": "k lines"},
"l_line": {"color": "magenta", "label": "l lines"},
"m_line": {"color": "brown", "label": "m lines"},
"compton": {"color": "darkcyan", "linewidth": 1.5, "label": "compton"},
"elastic": {"color": "purple", "label": "elastic"},
"escape": {"color": "darkblue", "label": "escape"},
"pileup": {"color": "darkgoldenrod", "label": "pileup"},
"userpeak": {"color": "orange", "label": "userpeak"},
# 'auto_fit': {'color': 'black', 'label': 'auto fitted', 'linewidth': 2.5},
"fit": {"color": "red", "label": "fit", "linewidth": 2.5},
"residual": {"color": "black", "label": "residual", "linewidth": 2.0},
}
def plot_exp_data_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.plot_exp_opt = False # exp data for fitting
self.show_exp_opt = False # all exp data from different channels
self.show_fit_opt = False
# Reset currently selected element_id (mostly to reset GUI elements)
self.element_id = 0
def init_mouse_event(self):
"""Set up callback for mouse button-press event"""
# Reference to the toolbar
self.t_bar = self._fig.canvas.toolbar
# Set callback for Button Press event
self._fig.canvas.mpl_connect("button_press_event", self.canvas_onpress)
def _update_canvas(self):
# It may be sufficient to initialize the event only once, but at this point
# it seems to be the most reliable option. May be changed in the future.
self.init_mouse_event()
self.plot_vertical_marker()
self._ax.legend(loc=2)
try:
self._ax.legend(framealpha=0.2).set_draggable(True)
except AttributeError:
self._ax.legend(framealpha=0.2)
self._fig.tight_layout(pad=0.5)
# self._ax.margins(x=0.0, y=0.10)
# when we click the home button on matplotlib gui,
# relim will remember the previously defined x range
self._ax.relim(visible_only=True)
self._fig.canvas.draw()
def _update_ylimit(self):
# manually define y limit, from experience
self.log_range = [self.max_v * 1e-5, self.max_v * 2]
# self.linear_range = [-0.3*self.max_v, self.max_v*1.2]
self.linear_range = [0, self.max_v * 1.2]
def exp_label_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
self.exp_data_label = change["value"]
self.plot_style["experiment"]["label"] = change["value"]
# @observe('exp_data_label')
# def _change_exp_label(self, change):
# if change['type'] == 'create':
# return
# self.plot_style['experiment']['label'] = change['value']
@observe("parameters")
def _update_energy(self, change):
if "coherent_sct_energy" not in self.param_model.param_new:
return
self.incident_energy = self.param_model.param_new["coherent_sct_energy"]["value"]
def set_energy_range_fitting(self, energy_range_name):
if energy_range_name not in self.energy_range_names:
raise ValueError(
f"Unknown energy range name {energy_range_name}. Allowed names: {self.energy_range_names}"
)
self.energy_range_fitting = energy_range_name
self.plot_experiment()
def set_incident_energy(self, change):
"""
The observer function that changes the value of incident energy
and upper bound for fitted energy range. Should not be called directly.
Parameters
----------
change : dict
``change["value"]`` is the new value of incident energy
"""
self.change_incident_energy(change["value"])
def change_incident_energy(self, energy_new):
"""
The function that perfroms the changes the value of incident energy
and upper bound for fitted energy range.
Parameters
----------
incident_energy : float
New value of incident energy
"""
margin = 0.8 # Value by which the upper bound of the range used for fitting
# exceeds the incident energy. Selected for convenience, but
# is subject to change. This is the place to change it to take effect
# throughout the program.
# Limit the number of decimal points for better visual presentation
energy_new = round(energy_new, ndigits=6)
# Change the value twice to ensure that all observer functions are called
self.incident_energy = energy_new + 1.0 # Arbitrary number different from 'energy_new'
self.incident_energy = energy_new
if "coherent_sct_energy" in self.param_model.param_new:
self.param_model.param_new["coherent_sct_energy"]["value"] = energy_new
# Change the value twice to ensure that all observer functions are called
self.param_model.energy_bound_high_buf = energy_new + 1.8 # Arbitrary number
upper_bound = energy_new + margin
# Limit the number of decimal points for better visual presentation
upper_bound = round(upper_bound, ndigits=5)
self.param_model.energy_bound_high_buf = upper_bound
@observe("scale_opt")
def _new_opt(self, change):
self.log_linear_plot()
self._update_canvas()
def energy_bound_high_update(self, change):
"""Observer function for 'param_model.energy_bound_high_buf'"""
if self.io_model.data is None:
return
self.exp_data_update({"value": self.io_model.data})
self.plot_selected_energy_range_original(e_high=change["value"])
self.plot_vertical_marker(e_high=change["value"])
self._update_canvas()
def energy_bound_low_update(self, change):
"""Observer function for 'param_model.energy_bound_low_buf'"""
if self.io_model.data is None:
return
self.exp_data_update({"value": self.io_model.data})
self.plot_selected_energy_range_original(e_low=change["value"])
self.plot_vertical_marker(e_low=change["value"])
self._update_canvas()
def log_linear_plot(self):
if self.plot_type_names[self.scale_opt] == "LinLog":
self._ax.set_yscale("log")
# self._ax.margins(x=0.0, y=0.5)
# self._ax.autoscale_view(tight=True)
# self._ax.relim(visible_only=True)
self._ax.set_ylim(self.log_range)
else:
self._ax.set_yscale("linear")
# self._ax.margins(x=0.0, y=0.10)
# self._ax.autoscale_view(tight=True)
# self._ax.relim(visible_only=True)
self._ax.set_ylim(self.linear_range)
def exp_data_update(self, change):
"""
Observer function to be connected to the fileio model
in the top-level gui.py startup
Parameters
----------
changed : dict
This is the dictionary that gets passed to a function
with the @observe decorator
"""
# TODO: This function does not change the data. Instead it is expected to
# perform a number of operation when data is changed.
# self.data = change['value']
if self.io_model.data is None:
return
e_range = self.energy_range_fitting
e_range_full, e_range_selected = "full", "selected"
if set([e_range_full, e_range_selected]) < set(self.energy_range_names):
raise ValueError(
f"Some names for energy range {(e_range_full, e_range_selected)} are not supported. "
"Please report the error to the development team."
)
if e_range not in (e_range_full, e_range_selected):
logger.error(
f"Spectrum preview: Unknown option for the energy range: {e_range}\n"
"Please report the error to the development team."
)
# This is not a critical error, so we still can proceed
e_range = e_range_full
if not self.param_model.param_new:
return
# The number of points in the displayed dataset
n_dset_points = len(self.io_model.data)
if e_range == e_range_selected:
n_range_low, n_range_high = self.selected_range_indices(n_indexes=n_dset_points)
else:
n_range_low, n_range_high = 0, n_dset_points
n_low = int(np.clip(n_range_low, a_min=0, a_max=n_dset_points - 1))
n_high = int(np.clip(n_range_high, a_min=1, a_max=n_dset_points))
# Find the maximum value (skip the first and last 'limit_cut' points of the dataset
n1, n2 = max(self.limit_cut, n_low), min(n_dset_points - self.limit_cut, n_high)
if n2 <= n1: # This is just a precaution: it is expected that n_dset_points >> 2 * limit_cut
n1, n2 = n_low, n_high
self.max_v = float(np.max(self.io_model.data[n1:n2]))
try:
self.plot_exp_obj.remove()
logger.debug("Previous experimental data is removed.")
except AttributeError:
logger.debug("No need to remove experimental data.")
data_arr = self.io_model.data
x_v = (
self.param_model.param_new["e_offset"]["value"]
+ np.arange(n_low, n_high) * self.param_model.param_new["e_linear"]["value"]
+ np.arange(n_low, n_high) ** 2 * self.param_model.param_new["e_quadratic"]["value"]
)
data_arr = data_arr[n_low:n_high]
(self.plot_exp_obj,) = self._ax.plot(
x_v,
data_arr,
linestyle=self.plot_style["experiment"]["linestyle"],
color=self.plot_style["experiment"]["color"],
marker=self.plot_style["experiment"]["marker"],
label=self.plot_style["experiment"]["label"],
)
# Rescale the plot along x-axis if needed
x_min, x_max = x_v[0], x_v[-1]
if (x_min != self.plot_fit_x_min) or (x_max != self.plot_fit_x_max):
self.plot_fit_x_min = x_min
self.plot_fit_x_max = x_max
self._ax.set_xlim(x_min, x_max)
self._update_ylimit()
self.log_linear_plot()
self._set_eline_select_controls()
self.plot_selected_energy_range_original()
# _show_hide_exp_plot is called to show or hide current plot based
# on the state of _show_exp_opt flag
self._show_hide_exp_plot(self.show_exp_opt or self.plot_exp_opt)
def _show_hide_exp_plot(self, plot_show):
if self.io_model.data is None:
return
try:
if plot_show:
self.plot_exp_obj.set_visible(True)
lab = self.plot_exp_obj.get_label()
self.plot_exp_obj.set_label(lab.strip("_"))
else:
self.plot_exp_obj.set_visible(False)
lab = self.plot_exp_obj.get_label()
self.plot_exp_obj.set_label("_" + lab)
self._update_canvas()
except Exception:
pass
@observe("plot_exp_opt")
def _new_exp_plot_opt(self, change):
if self.io_model.data is None:
return
if change["type"] != "create":
if change["value"]:
self.plot_experiment()
# _show_hide_exp_plot is already called inside 'plot_experiment()',
# but visibility flag was not used correctly. So we need to
# call it again.
self._show_hide_exp_plot(change["value"])
self._set_eline_select_controls()
# @observe('show_exp_opt')
# def _update_exp(self, change):
# if change['type'] != 'create':
# if change['value']:
# if len(self.plot_exp_list):
# for v in self.plot_exp_list:
# v.set_visible(True)
# lab = v.get_label()
# if lab != '_nolegend_':
# v.set_label(lab.strip('_'))
# else:
# if len(self.plot_exp_list):
# for v in self.plot_exp_list:
# v.set_visible(False)
# lab = v.get_label()
# if lab != '_nolegend_':
# v.set_label('_' + lab)
# self._update_canvas()
@observe("show_fit_opt")
def _update_fit(self, change):
if change["type"] != "create":
if change["value"]:
for v in self.plot_fit_obj:
v.set_visible(True)
lab = v.get_label()
if lab != "_nolegend_":
v.set_label(lab.strip("_"))
else:
for v in self.plot_fit_obj:
v.set_visible(False)
lab = v.get_label()
if lab != "_nolegend_":
v.set_label("_" + lab)
self._update_canvas()
def plot_experiment(self):
"""
PLot raw experiment data for fitting.
"""
# Do nothing if no data is loaded
if self.io_model.data is None:
return
data_arr = np.asarray(self.io_model.data)
self.exp_data_update({"value": data_arr})
def plot_vertical_marker(self, *, e_low=None, e_high=None):
# It doesn't seem necessary to force the marker inside the selected range.
# It may be used for purposes that require to set it outside the range
# self._vertical_marker_set_inside_range(e_low=e_low, e_high=e_high)
x_v = (self.vertical_marker_kev, self.vertical_marker_kev)
y_v = (-1e30, 1e30) # This will cover the range of possible values of accumulated counts
if self.line_vertical_marker:
self._ax.lines.remove(self.line_vertical_marker)
self.line_vertical_marker = None
if self.vertical_marker_is_visible:
(self.line_vertical_marker,) = self._ax.plot(x_v, y_v, color="blue")
def set_plot_vertical_marker(self, marker_position=None, mouse_clicked=False):
"""
The function is called when setting the position of the marker interactively
If the parameter `marker_position` is `None`, then don't set or change the value.
Just make the marker visible.
"""
# Ignore the new value if it is outside the range of selected energies.
# If 'marker_position' is None, then show the marker at its current location.
# Totally ignore clicks if 'marker_position' is outside the range (but still
# display the marker if 'mouse_clicked' is False.
marker_in_range = True
if marker_position is not None:
e_low = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"]
e_high = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"]
if e_low <= marker_position <= e_high or not mouse_clicked:
# If the function was called to display marker (e.g. for existing peak) outside
# the selected range, then show it. If button was clicked, then ignore it.
self.vertical_marker_kev = marker_position
else:
marker_in_range = False
if marker_in_range:
# Make the marker visible
self.vertical_marker_is_visible = True
# Compute peak intensity. The displayed value will change only for user defined peak,
# since it is moved to the position of the marker.
self.compute_manual_peak_intensity()
# Update the location of the marker and the canvas
self.plot_vertical_marker()
self._update_canvas()
if mouse_clicked:
try:
self.report_marker_state(True) # This is an externally set callback function
except Exception:
pass
def hide_plot_vertical_marker(self, mouse_clicked=False):
"""Hide vertical marker"""
self.vertical_marker_is_visible = False
self.plot_vertical_marker()
self._update_canvas()
if mouse_clicked:
try:
self.report_marker_state(False) # This is an externally set callback function
except Exception:
pass
def plot_selected_energy_range_original(self, *, e_low=None, e_high=None):
"""
Plot the range of energies selected for processing. The range may be optionally
provided as arguments. The range values that are not provided, are read from
globally accessible dictionary of parameters. The values passed as arguments
are mainly used if the function is called during interactive update of the
range, when the order of update is undetermined and the parameter dictionary
may be updated after the function is called.
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if e_high is None:
e_high = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"]
n_x = 4096 # Set to the maximum possible number of points
# Generate the values for 'energy' axis
x_v = (
self.param_model.param_new["e_offset"]["value"]
+ np.arange(n_x) * self.param_model.param_new["e_linear"]["value"]
+ np.arange(n_x) ** 2 * self.param_model.param_new["e_quadratic"]["value"]
)
ss = (x_v < e_high) & (x_v > e_low)
y_min, y_max = -1e30, 1e30 # Select the max and min values for plotted rectangles
# Remove the plot if it exists
if self.plot_energy_barh in self._ax.collections:
self._ax.collections.remove(self.plot_energy_barh)
# Create the new plot (based on new parameters if necessary
self.plot_energy_barh = BrokenBarHCollection.span_where(
x_v, ymin=y_min, ymax=y_max, where=ss, facecolor="white", edgecolor="yellow", alpha=1
)
self._ax.add_collection(self.plot_energy_barh)
def plot_multi_exp_data(self):
while len(self.plot_exp_list):
self.plot_exp_list.pop().remove()
color_n = get_color_name()
self.max_v = 1.0
m = 0
for (k, v) in self.io_model.data_sets.items():
if v.selected_for_preview:
data_arr = np.asarray(v.data)
# Truncate the array (1D spectrum)
data_arr = data_arr[0 : self.number_pts_to_show]
self.max_v = np.max([self.max_v, np.max(data_arr[self.limit_cut : -self.limit_cut])])
x_v = (
self.param_model.param_new["e_offset"]["value"]
+ np.arange(len(data_arr)) * self.param_model.param_new["e_linear"]["value"]
+ np.arange(len(data_arr)) ** 2 * self.param_model.param_new["e_quadratic"]["value"]
)
(plot_exp_obj,) = self._ax.plot(
x_v,
data_arr,
color=color_n[m],
label=v.filename.split(".")[0],
linestyle=self.plot_style["experiment"]["linestyle"],
marker=self.plot_style["experiment"]["marker"],
)
self.plot_exp_list.append(plot_exp_obj)
m += 1
self.plot_selected_energy_range_original()
self._update_ylimit()
self.log_linear_plot()
self._update_canvas()
def plot_emission_line(self):
"""
Plot emission line and escape peaks associated with given lines.
The value of self.max_v is needed in this function in order to plot
the relative height of each emission line.
"""
while len(self.eline_obj):
self.eline_obj.pop().remove()
escape_e = self.escape_e
if len(self.elist):
for i in range(len(self.elist)):
(eline,) = self._ax.plot(
[self.elist[i][0], self.elist[i][0]],
[0, self.elist[i][1] * self.max_v],
color=self.plot_style["emission_line"]["color"],
linewidth=self.plot_style["emission_line"]["linewidth"],
)
self.eline_obj.append(eline)
if self.plot_escape_line and self.elist[i][0] > escape_e:
(eline,) = self._ax.plot(
[self.elist[i][0] - escape_e, self.elist[i][0] - escape_e],
[0, self.elist[i][1] * self.max_v],
color=self.plot_style["escape"]["color"],
linewidth=self.plot_style["emission_line"]["linewidth"],
)
self.eline_obj.append(eline)
def _set_eline_select_controls(self, *, element_id=None, data="use_self_data"):
if element_id is None:
element_id = self.element_id
if data == "use_self_data":
data = self.io_model.data
def is_line_in_selected_list(self, n_id):
"""
Checks if the line with ID ``n_id`` is in the list of
selected element lines.
Used to enable/disable 'Add Line' and 'Remove Line' buttons.
Parameters
----------
n_id : Int
index of the element emission line in the list
(often equal to ``self.element_id``)
Returns True if the element line
is in the list of selected lines. False otherwise.
"""
ename = self.get_element_line_name_by_id(n_id)
if ename is None:
return False
if self.param_model.EC.is_element_in_list(ename):
return True
else:
return False
def is_element_line_id_valid(self, n_id):
"""
Checks if ID (``n_id``) of the element emission line is valid,
i.e. the name of the line may be obtained by using the ID.
Parameters
----------
n_id : Int
index of the element emission line in the list
(often equal to 'self.element_id')
Returns True if the element line is valid
"""
# There may be a more efficient way to check 'n_id',
# but we want to use the same function as we use
# to retrive the line name
ename = self.get_element_line_name_by_id(n_id)
if ename is None:
return False
else:
return True
def get_element_line_name_by_id(self, n_id):
"""
Retrieves the name of the element emission line from its ID
(the number in the list). The lines are numbered starting with 1.
If the ID is invalid, the function returns None.
Parameters
----------
n_id : int
index of the element emission line in the list
(often equal to 'self.element_id')
Returns the line name (str). If the name can not be retrieved, then
the function returns None.
"""
if n_id < 1:
# Elements are numbered starting with 1. Element #0 does not exist.
# (Element #0 means that no element is selected)
return None
# This is the fixed list of element emission line names.
# The element with ID==1 is found in total_list[0]
total_list = self.param_model.get_user_peak_list()
try:
ename = total_list[n_id - 1]
except Exception:
ename = None
return ename
def _vertical_marker_set_inside_range(self, *, e_low=None, e_high=None):
"""
Don't move the marker if it is inside range. If it is outside range,
then set the marker to the center of the range
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if e_high is None:
e_high = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"]
# By default, place the marker in the middle of the range if its original position
# is outside the range
if (self.vertical_marker_kev > e_high) or (self.vertical_marker_kev < e_low):
self.vertical_marker_kev = (e_low + e_high) / 2.0
def _fill_elist(self):
_elist = []
incident_energy = self.incident_energy
k_len = len(K_TRANSITIONS)
l_len = len(L_TRANSITIONS)
m_len = len(M_TRANSITIONS)
ename = self.get_element_line_name_by_id(self.element_id)
if ename is not None:
_elist = []
if ename.lower().startswith("userpeak"):
# Make sure that the marker is in the selected range of energies
self._vertical_marker_set_inside_range()
# The tuple structure: (center_energy, ratio)
_elist.append((self.vertical_marker_kev, 1.0))
elif "_K" in ename:
e = Element(ename[:-2])
if e.cs(incident_energy)["ka1"] != 0:
for i in range(k_len):
_elist.append(
(
e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1] / e.cs(incident_energy).all[0][1],
)
)
elif "_L" in ename:
e = Element(ename[:-2])
if e.cs(incident_energy)["la1"] != 0:
for i in range(k_len, k_len + l_len):
_elist.append(
(
e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1] / e.cs(incident_energy).all[k_len][1],
)
)
else:
e = Element(ename[:-2])
if e.cs(incident_energy)["ma1"] != 0:
for i in range(k_len + l_len, k_len + l_len + m_len):
_elist.append(
(
e.emission_line.all[i][1],
e.cs(incident_energy).all[i][1] / e.cs(incident_energy).all[k_len + l_len][1],
)
)
return _elist
def _get_pileup_lines(self, eline):
"""
Returns the energy (center) of pileup peak. And the energies of two components.
Parameters
----------
eline: str
Name of the pileup peak, e.g. V_Ka1-Co_Ka1
Returns
-------
list(float)
Energy in keV of pileup peak and two components
"""
try:
element_line1, element_line2 = eline.split("-")
e1_cen = get_eline_parameters(element_line1, self.incident_energy)["energy"]
e2_cen = get_eline_parameters(element_line2, self.incident_energy)["energy"]
en = [e1_cen + e2_cen, e1_cen, e2_cen]
except Exception:
en = []
return en
def _fill_elist_pileup(self, eline=None):
if eline is None:
eline = self.param_model.e_name
elist = []
energies = self._get_pileup_lines(eline)
if energies:
elist = list(zip(energies, [1, 0.2, 0.2]))
return elist
def _fill_elist_userpeak(self):
"""
Fill the list of 'emission lines' for user defined peak. There is only ONE
'emission line', with position determined by the location of the marker.
If the marker is not currently visible, then don't put any emission lines in the list.
The list is used during adding user-defined peaks.
"""
elist = []
energy, marker_visible = self.get_suggested_new_manual_peak_energy()
if marker_visible:
elist.append((energy, 1))
return elist
def _reset_eline_plot(self):
while len(self.eline_obj):
self.eline_obj.pop().remove()
self.elist = []
self._fig.canvas.draw()
@observe("element_id")
def set_element(self, change):
self._set_eline_select_controls(element_id=change["value"])
self.compute_manual_peak_intensity(n_id=change["value"])
if change["value"] == 0:
self._reset_eline_plot()
return
self.plot_current_eline()
def plot_current_eline(self, eline=None):
"""
Plots emission lines for the selected peak based on 'self.element_id` and provided `eline`.
"""
if eline is None:
eline = self.param_model.e_name
incident_energy = self.incident_energy
# Name of the emission line (if emission line is selected)
ename = self.get_element_line_name_by_id(self.element_id)
# Check if pileup peak is selected
is_pileup = self.param_model.get_eline_name_category(eline) == "pileup"
if (ename is not None) or is_pileup:
logger.debug(
"Plot emission line for element: "
"{} with incident energy {}".format(self.element_id, incident_energy)
)
if ename is not None:
self.elist = self._fill_elist()
elif is_pileup:
self.elist = self._fill_elist_pileup(eline)
else:
self.elist = [] # Just in case
self.plot_emission_line()
self._update_canvas()
# Do it the second time, since the 'self.elist' has changed
self.compute_manual_peak_intensity(n_id=self.element_id)
else:
self._reset_eline_plot()
logger.debug(f"Selected emission line with ID #{self.element_id} is not in the list.")
@observe("det_materials")
def _update_det_materials(self, change):
if change["value"] == 0:
self.escape_e = 1.73998
else:
self.escape_e = 9.88640
def change_escape_peak_settings(self, plot_escape_line, det_material):
self.plot_escape_line = plot_escape_line
self.det_materials = det_material
# Now update the displayed emission line
self.plot_emission_line()
self._update_canvas()
def plot_roi_bound(self):
"""
Plot roi with low, high and ceter value.
"""
for k, v in self.roi_plot_dict.items():
for data in v:
data.remove()
self.roi_plot_dict.clear()
if len(self.roi_dict):
# self._ax.hold(True)
for k, v in self.roi_dict.items():
temp_list = []
for linev in np.array([v.left_val, v.line_val, v.right_val]) / 1000.0:
(lineplot,) = self._ax.plot(
[linev, linev],
[0, 1 * self.max_v],
color=self.plot_style["roi_line"]["color"],
linewidth=self.plot_style["roi_line"]["linewidth"],
)
if v.show_plot:
lineplot.set_visible(True)
else:
lineplot.set_visible(False)
temp_list.append(lineplot)
self.roi_plot_dict.update({k: temp_list})
self._update_canvas()
@observe("roi_dict")
def show_roi_bound(self, change):
logger.debug("roi dict changed {}".format(change["value"]))
self.plot_roi_bound()
if len(self.roi_dict):
for k, v in self.roi_dict.items():
if v.show_plot:
for ln in self.roi_plot_dict[k]:
ln.set_visible(True)
else:
for ln in self.roi_plot_dict[k]:
ln.set_visible(False)
self._update_canvas()
def get_suggested_new_manual_peak_energy(self):
"""
Returns energy pointed by the vertical marker in keV and the status of the marker.
Returns
-------
float
Energy of the manual peak center in keV. The energy is determined
by vertical marker on the screen.
bool
True if the vertical marker is visible, otherwise False.
"""
energy = self.vertical_marker_kev
marker_visible = self.vertical_marker_is_visible
return energy, marker_visible
def _compute_intensity(self, elist):
# Some default value
intensity = 1000.0
if (
self.io_model.data is not None
and self.param_model.param_new is not None
and self.param_model.prefit_x is not None
and self.param_model.total_y is not None
and len(self.io_model.data) > 1
and len(self.param_model.prefit_x) > 1
):
# Range of energies in fitting results
e_fit_min = self.param_model.prefit_x[0]
e_fit_max = self.param_model.prefit_x[-1]
de_fit = (e_fit_max - e_fit_min) / (len(self.param_model.prefit_x) - 1)
e_raw_min = self.param_model.param_new["e_offset"]["value"]
e_raw_max = (
self.param_model.param_new["e_offset"]["value"]
+ (len(self.io_model.data) - 1) * self.param_model.param_new["e_linear"]["value"]
+ (len(self.io_model.data) - 1) ** 2 * self.param_model.param_new["e_quadratic"]["value"]
)
de_raw = (e_raw_max - e_raw_min) / (len(self.io_model.data) - 1)
# Note: the above algorithm for finding 'de_raw' is far from perfect but will
# work for now. As a result 'de_fit' and
# 'de_raw' == sself.param_model.param_new['e_linear']['value'].
# So the quadratic coefficent is ignored. This is OK, since currently
# quadratic coefficient is always ZERO. When the program is rewritten,
# the complete algorithm should be revised.
# Find the line with maximum energy. It must come first in the list,
# but let's check just to make sure
max_line_energy, max_line_intensity = 0, 0
if elist:
for e, i in elist:
# e - line peak energy
# i - peak intensity relative to maximum peak
if e >= e_fit_min and e <= e_fit_max and e > e_raw_min and e < e_raw_max:
if max_line_intensity < i:
max_line_energy, max_line_intensity = e, i
# Find the index of peak maximum in the 'fitted' data array
n = (max_line_energy - e_fit_min) / de_fit
n = np.clip(n, 0, len(self.param_model.total_y) - 1)
n_fit = int(round(n))
# Find the index of peak maximum in the 'raw' data array
n = (max_line_energy - e_raw_min) / de_raw
n = np.clip(n, 0, len(self.io_model.data) - 1)
n_raw = int(round(n))
# Intensity of the fitted data at the peak
in_fit = self.param_model.total_y[n_fit]
# Intensity of the raw data at the peak
in_raw = self.io_model.data[n_raw]
# The estimated peak intensity is the difference:
intensity = in_raw - in_fit
# The following step is questionable. We assign some reasonably small number.
# The desired value can always be manually entered
if intensity < 0.0:
intensity = abs(in_raw / 100)
return intensity
def compute_manual_peak_intensity(self, n_id=None):
if n_id is None:
n_id = self.element_id
# Check if the emission line is in the list of supported emission lines (e.g. Ca_K)
if not self.is_element_line_id_valid(n_id):
# This is not a supported emission line (n_id==0)
# This means we are probably dealing with user defined peak.
if self.is_line_in_selected_list(n_id):
# Display intensity if the peak is in the list.
name = self.get_element_line_name_by_id(n_id)
intensity = self.param_model.EC.element_dict[name].maxv
else:
elist = self._fill_elist_userpeak()
intensity = self._compute_intensity(elist)
else:
if self.is_line_in_selected_list(n_id):
# Display intensity if the peak is in the list.
name = self.get_element_line_name_by_id(n_id)
intensity = self.param_model.EC.element_dict[name].maxv
else:
# This is a new peak
elist = self._fill_elist()
intensity = self._compute_intensity(elist)
# Round the intensity for nicer printing
self.param_model.add_element_intensity = round(intensity, 2)
def plot_fit(self, fit_x, fit_y, fit_all, residual=None):
"""
Parameters
----------
fit_x : array
energy axis
fit_y : array
fitted spectrum
fit_all : dict
dict of individual line
residual : array
residual between fit and exp
"""
if fit_x is None or fit_y is None:
return
while len(self.plot_fit_obj):
self.plot_fit_obj.pop().remove()
(ln,) = self._ax.plot(
fit_x,
fit_y,
color=self.plot_style["fit"]["color"],
label=self.plot_style["fit"]["label"],
linewidth=self.plot_style["fit"]["linewidth"],
)
self.plot_fit_obj.append(ln)
if residual is not None:
# shiftv = 1.5 # move residual down by some amount
(ln,) = self._ax.plot(
fit_x,
residual - 0.15 * self.max_v, # shiftv*(np.max(np.abs(self.residual))),
label=self.plot_style["residual"]["label"],
color=self.plot_style["residual"]["color"],
)
self.plot_fit_obj.append(ln)
k_num = 0
l_num = 0
m_num = 0
p_num = 0
for k, v in fit_all.items():
if k == "background":
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["background"]["color"],
# marker=self.plot_style['background']['marker'],
# markersize=self.plot_style['background']['markersize'],
label=self.plot_style["background"]["label"],
)
self.plot_fit_obj.append(ln)
elif k == "compton":
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["compton"]["color"],
linewidth=self.plot_style["compton"]["linewidth"],
label=self.plot_style["compton"]["label"],
)
self.plot_fit_obj.append(ln)
elif k == "elastic":
(ln,) = self._ax.plot(
fit_x, v, color=self.plot_style["elastic"]["color"], label=self.plot_style["elastic"]["label"]
)
self.plot_fit_obj.append(ln)
elif k == "escape":
(ln,) = self._ax.plot(
fit_x, v, color=self.plot_style["escape"]["color"], label=self.plot_style["escape"]["label"]
)
self.plot_fit_obj.append(ln)
elif "user" in k.lower():
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["userpeak"]["color"],
label=self.plot_style["userpeak"]["label"],
)
self.plot_fit_obj.append(ln)
elif "-" in k: # Si_K-Si_K
if p_num == 0:
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["pileup"]["color"],
label=self.plot_style["pileup"]["label"],
)
else:
(ln,) = self._ax.plot(fit_x, v, color=self.plot_style["pileup"]["color"], label="_nolegend_")
self.plot_fit_obj.append(ln)
p_num += 1
elif ("_K" in k.upper()) and (len(k) <= 4):
if k_num == 0:
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["k_line"]["color"],
label=self.plot_style["k_line"]["label"],
)
else:
(ln,) = self._ax.plot(fit_x, v, color=self.plot_style["k_line"]["color"], label="_nolegend_")
self.plot_fit_obj.append(ln)
k_num += 1
elif ("_L" in k.upper()) and (len(k) <= 4):
if l_num == 0:
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["l_line"]["color"],
label=self.plot_style["l_line"]["label"],
)
else:
(ln,) = self._ax.plot(fit_x, v, color=self.plot_style["l_line"]["color"], label="_nolegend_")
self.plot_fit_obj.append(ln)
l_num += 1
elif ("_M" in k.upper()) and (len(k) <= 4):
if m_num == 0:
(ln,) = self._ax.plot(
fit_x,
v,
color=self.plot_style["m_line"]["color"],
label=self.plot_style["m_line"]["label"],
)
else:
(ln,) = self._ax.plot(fit_x, v, color=self.plot_style["m_line"]["color"], label="_nolegend_")
self.plot_fit_obj.append(ln)
m_num += 1
else:
pass
# self._update_canvas()
def canvas_onpress(self, event):
"""Callback, mouse button pressed"""
if self.t_bar.mode == "":
if event.inaxes == self._ax:
if event.button == 1:
xd = event.xdata
self.set_plot_vertical_marker(marker_position=xd, mouse_clicked=True)
else:
self.hide_plot_vertical_marker(mouse_clicked=True)
# ===========================================================
# Functions for plotting spectrum preview
def selected_range_indices(self, *, e_low=None, e_high=None, n_indexes=None, margin=2.0):
"""
The function computes the range of indices based on the selected energy range
and parameters for the energy axis.
Parameters
----------
e_low, e_high: float or None
Energy values (in keV) that set the selected range
n_indexes: int
Total number of indexes in the energy array (typically 4096)
margin: float
The displayed energy range is extended by the value of `margin` in both directions.
Returns
-------
n_low, n_high: int
The range of indices of the energy array (n_low..n_high-1) that cover the selected energy range
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if e_high is None:
e_high = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"]
# Protection for the case if e_high < e_low
e_high = e_high if e_high > e_low else e_low
# Extend the range (by the value of 'margin')
e_low, e_high = e_low - margin, e_high + margin
# The following calculations ignore quadratic term, which is expected to be small
c0 = self.param_model.param_new["e_offset"]["value"]
c1 = self.param_model.param_new["e_linear"]["value"]
# If more precision if needed, then implement more complicated algorithm using
# the quadratic term: c2 = self.param_model.param_new['e_quadratic']['value']
n_low = int(np.clip(int((e_low - c0) / c1), a_min=0, a_max=n_indexes - 1))
n_high = int(np.clip(int((e_high - c0) / c1) + 1, a_min=1, a_max=n_indexes))
return n_low, n_high
def _datasets_max_size(self, *, only_displayed=True):
"""
Return maximum size of the longest available dataset. The datasets that contain
no data are ignored.
Parameters
----------
only_displayed: bool
Limit search to the datasets that are going to be displayed
"""
max_size = 0
for dset in self.io_model.data_sets.values():
if not only_displayed or dset.selected_for_preview:
# Raw data shape: (n_rows, n_columns, n_energy_bins)
max_size = max(max_size, dset.get_raw_data_shape()[2])
return max_size
def plot_selected_energy_range(self, *, axes, barh_existing, e_low=None, e_high=None, n_points=4096):
"""
Plot the range of energies selected for processing. The range may be optionally
provided as arguments. The range values that are not provided, are read from
globally accessible dictionary of parameters. The values passed as arguments
are mainly used if the function is called during interactive update of the
range, when the order of update is undetermined and the parameter dictionary
may be updated after the function is called.
"""
# The range of energy selected for analysis
if e_low is None:
e_low = self.param_model.param_new["non_fitting_values"]["energy_bound_low"]["value"]
if e_high is None:
e_high = self.param_model.param_new["non_fitting_values"]["energy_bound_high"]["value"]
# Model coefficients for the energy axis
c0 = self.param_model.param_new["e_offset"]["value"]
c1 = self.param_model.param_new["e_linear"]["value"]
c2 = self.param_model.param_new["e_quadratic"]["value"]
# Generate the values for 'energy' axis
x_v = c0 + np.arange(n_points) * c1 + np.arange(n_points) ** 2 * c2
ss = (x_v < e_high + c1) & (x_v > e_low - c1)
# Trim both arrays to minimize the number of points
x_v = x_v[ss]
ss = ss[ss]
ss[0] = False
ss[-1] = False
# Negative values will work for semilog plot as well
y_min, y_max = -1e30, 1e30 # Select the max and min values for plotted rectangles
# Remove the plot if it exists
if barh_existing in axes.collections:
axes.collections.remove(barh_existing)
# Create the new plot (based on new parameters if necessary
barh_new = BrokenBarHCollection.span_where(
x_v, ymin=y_min, ymax=y_max, where=ss, facecolor="white", edgecolor="yellow", alpha=1
)
axes.add_collection(barh_new)
return barh_new
def prepare_preview_spectrum_plot(self):
if self._ax_preview:
self._ax_preview.clear()
else:
self._ax_preview = self._fig_preview.add_subplot(111)
self._ax_preview.set_facecolor("lightgrey")
self._ax_preview.grid(which="both")
self._fig_preview.set_visible(False)
def _show_preview_spectrum_plot(self):
# Completely redraw the plot each time the function is called
self.prepare_preview_spectrum_plot()
# Remove all lines from the plot
while len(self._lines_preview):
self._lines_preview.pop().remove()
# The list of color names
color_names = get_color_name()
e_range = self.energy_range_preview
e_range_supported = (EnergyRangePresets.SELECTED_RANGE, EnergyRangePresets.FULL_SPECTRUM)
if e_range not in e_range_supported:
logger.error(
f"Spectrum preview: Unknown option for the energy range: {e_range}\n"
"Please report the error to the development team."
)
# This is not a critical error, so we still can proceed
e_range = EnergyRangePresets.FULL_SPECTRUM
p_type = self.plot_type_preview
p_type_supported = (PlotTypes.LINLOG, PlotTypes.LINEAR)
if p_type not in p_type_supported:
logger.error(
f"Spectrum preview: Unknown option for the plot type: {p_type}\n"
"Please report the error to the development team."
)
p_type = PlotTypes.LINEAR
# Maximum number of points in the displayed dataset
n_dset_points = self._datasets_max_size()
if e_range == EnergyRangePresets.SELECTED_RANGE:
n_range_low, n_range_high = self.selected_range_indices(n_indexes=n_dset_points)
else:
n_range_low, n_range_high = 0, n_dset_points
# All available datasets, we will print only the selected datasets
dset_names = list(self.io_model.data_sets.keys())
if p_type == PlotTypes.LINLOG:
top_margin_coef = 2.0
# Minimum for semilog plots may need to be computed, but 1.0 is good
self.min_v_preview = 1.0
self._ax_preview.set_yscale("log")
else:
top_margin_coef = 1.05
self.min_v_preview = 0.0 # Minimum will always be 0 for linear plots
self.max_v_preview = 1.0
self.min_e_preview = 1000.0 # Start with some large number
self.max_e_preview = 0.1 # Start with some small number
for n_line, dset_name in enumerate(dset_names):
dset = self.io_model.data_sets[dset_name]
# Select color (even if the dataset is not displayed). This is done in order
# to ensure that each dataset is assigned the unique color.
color = color_names[n_line % len(color_names)]
if dset.selected_for_preview:
data_arr = np.asarray(dset.get_total_spectrum())
if data_arr is None: # Just a precaution, it shouldn't happen
logger.error("Spectrum review: attempting to print empty dataset.")
continue
# The assumption is that some datasets may have different length (which is
# currently not the case). So we have to take it into account when using
# maximum dataset length. This is essentially a safety precaution.
n_low = int(np.clip(n_range_low, a_min=0, a_max=data_arr.size - 1))
n_high = int(np.clip(n_range_high, a_min=1, a_max=data_arr.size))
# From now on we work with the trimmed data array
x_v = (
self.param_model.param_new["e_offset"]["value"]
+ np.arange(n_low, n_high) * self.param_model.param_new["e_linear"]["value"]
+ np.arange(n_low, n_high) ** 2 * self.param_model.param_new["e_quadratic"]["value"]
)
data_arr = data_arr[n_low:n_high]
self.max_v_preview = np.max(
[self.max_v_preview, np.max(data_arr[self.limit_cut : -self.limit_cut])]
)
self.max_e_preview = np.max([self.max_e_preview, x_v[-1]])
self.min_e_preview = np.min([self.min_e_preview, x_v[0]])
(line,) = self._ax_preview.plot(
x_v,
data_arr,
color=color,
label=dset.filename.split(".")[0],
linestyle=self.plot_style["experiment"]["linestyle"],
marker=self.plot_style["experiment"]["marker"],
)
self._lines_preview.append(line)
self._ax_preview.set_xlim(self.min_e_preview, self.max_e_preview)
self._ax_preview.set_ylim(self.min_v_preview, self.max_v_preview * top_margin_coef)
self._ax_preview.legend()
self._ax_preview.set_xlabel("Energy (keV)")
self._ax_preview.set_ylabel("Total Spectrum (Counts)")
self._fig_preview.set_visible(True)
# Reset navigation toolbar (specifically clear ZOOM history, since it becomes invalid
# when the new data is loaded, i.e. zooming out may not show the whole plot)
tb = self._fig_preview.canvas.toolbar
tb.update()
self._bahr_preview = self.plot_selected_energy_range(
axes=self._ax_preview, barh_existing=self._bahr_preview
)
def _hide_preview_spectrum_plot(
self,
):
self._fig_preview.set_visible(False)
def update_preview_spectrum_plot(self, *, hide=False):
"""
Update spectrum preview plot based on available/selected dataset and `hide` flag.
Parameters
----------
hide: bool
`False` - plot data if datasets are available and at least one dataset is selected,
otherwise hide the plot, `True` - hide the plot in any case
"""
# Find out if any data is selected
show_plot = False
if self.io_model.data_sets:
show_plot = any([_.selected_for_preview for _ in self.io_model.data_sets.values()])
logger.debug(f"LinePlotModel.update_preview_spectrum_plot(): show_plot={show_plot} hide={hide}")
if show_plot and not hide:
logger.debug("LinePlotModel.update_preview_spectrum_plot(): plotting existing datasets")
self._show_preview_spectrum_plot()
else:
logger.debug("LinePlotModel.update_preview_spectrum_plot(): hiding plots")
self._hide_preview_spectrum_plot()
self._fig_preview.canvas.draw()
# ===========================================================================================
# Plotting the preview of Total Count Maps
def clear_map_preview_range(self):
self.set_map_preview_range(low=-1, high=-1)
def set_map_preview_range(self, *, low, high):
self.map_preview_range_low = low
self.map_preview_range_high = high
def get_selected_datasets(self):
"""Returns the datasets selected for preview"""
return {k: v for (k, v) in self.io_model.data_sets.items() if v.selected_for_preview}
def _compute_map_preview_range(self, img_dict, key_list):
range_min, range_max = None, None
for key in key_list:
data = img_dict[key]
v_min, v_max = np.min(data), np.max(data)
if range_min is None or range_max is None:
range_min, range_max = v_min, v_max
else:
range_min, range_max = min(range_min, v_min), max(range_max, v_max)
return range_min, range_max
def _show_total_count_map_preview(self):
self._fig_maps.set_visible(True)
self._fig_maps.clf()
selected_dsets = self.get_selected_datasets()
data_for_plotting = {k: v.get_total_count() for (k, v) in selected_dsets.items()}
# Check if positions data is available. Positions data may be unavailable
# (not recorded in HDF5 file) if experiment is has not been completed.
# While the data from the completed part of experiment may still be used,
# plotting vs. x-y or scatter plot may not be displayed.
positions_data_available = False
if "positions" in self.io_model.img_dict.keys():
data_for_plotting["positions"] = self.io_model.img_dict["positions"]
positions_data_available = True
# Create local copies of self.pixel_or_pos, self.scatter_show and self.grid_interpolate
pixel_or_pos_local = self.map_axes_units_preview
scatter_show_local = self.map_scatter_plot
# Disable plotting vs x-y coordinates if 'positions' data is not available
if not positions_data_available:
if pixel_or_pos_local:
pixel_or_pos_local = MapAxesUnits.PIXELS # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Plotting vs. x-y coordinates is disabled")
if scatter_show_local:
scatter_show_local = False # Switch to plotting vs. pixel number
logger.error("'Positions' data is not available. Scatter plot is disabled.")
# low_lim = 1e-4 # define the low limit for log image
plot_interp = "Nearest"
grey_use = self.map_preview_color_scheme
ncol = int(np.ceil(np.sqrt(len(selected_dsets))))
try:
nrow = int(np.ceil(len(selected_dsets) / float(ncol)))
except ZeroDivisionError:
ncol = 1
nrow = 1
a_pad_v = 0.8
a_pad_h = 0.5
n_displayed_axes = ncol * nrow # Total number of axes in the grid
grid = ImageGrid(
self._fig_maps,
111,
nrows_ncols=(nrow, ncol),
axes_pad=(a_pad_v, a_pad_h),
cbar_location="right",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
share_all=True,
)
def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):
"""
Compute ranges for x- and y- axes of the plot. Make sure that the ranges for x- and y-axes are
always equal and fit the maximum of the ranges for x and y values:
max(abs(x_max-x_min), abs(y_max-y_min))
The ranges are set so that the data is always centered in the middle of the ranges
Parameters
----------
x_min, x_max, y_min, y_max : float
lower and upper boundaries of the x and y values
Returns
-------
x_axis_min, x_axis_max, y_axis_min, y_axis_max : float
lower and upper boundaries of the x- and y-axes ranges
"""
x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max
x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)
if x_range > y_range:
y_center = (y_max + y_min) / 2
y_axis_max = y_center + x_range / 2
y_axis_min = y_center - x_range / 2
else:
x_center = (x_max + x_min) / 2
x_axis_max = x_center + y_range / 2
x_axis_min = x_center - y_range / 2
return x_axis_min, x_axis_max, y_axis_min, y_axis_max
def _adjust_data_range_using_min_ratio(c_min, c_max, c_axis_range, *, min_ratio=0.01):
"""
Adjust the range for plotted data along one axis (x or y). The adjusted range is
applied to the 'extend' attribute of imshow(). The adjusted range is always greater
than 'axis_range * min_ratio'. Such transformation has no physical meaning
and performed for aesthetic reasons: stretching the image presentation of
a scan with only a few lines (1-3) greatly improves visibility of data.
Parameters
----------
c_min, c_max : float
boundaries of the data range (along x or y axis)
c_axis_range : float
range presented along the same axis
Returns
-------
cmin, c_max : float
adjusted boundaries of the data range
"""
c_range = c_max - c_min
if c_range < c_axis_range * min_ratio:
c_center = (c_max + c_min) / 2
c_new_range = c_axis_range * min_ratio
c_min = c_center - c_new_range / 2
c_max = c_center + c_new_range / 2
return c_min, c_max
# Hide the axes that are unused (they are unsightly)
for i in range(len(selected_dsets), n_displayed_axes):
grid[i].set_visible(False)
grid.cbar_axes[i].set_visible(False)
for i, (k, v) in enumerate(selected_dsets.items()):
data_arr = data_for_plotting[k]
if pixel_or_pos_local == MapAxesUnits.POSITIONS or scatter_show_local:
# xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
# min(self.y_pos), max(self.y_pos)
x_pos_2D = data_for_plotting["positions"]["x_pos"]
y_pos_2D = data_for_plotting["positions"]["y_pos"]
xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(), y_pos_2D.min(), y_pos_2D.max()
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = _compute_equal_axes_ranges(
xd_min, xd_max, yd_min, yd_max
)
xd_min, xd_max = _adjust_data_range_using_min_ratio(xd_min, xd_max, xd_axis_max - xd_axis_min)
yd_min, yd_max = _adjust_data_range_using_min_ratio(yd_min, yd_max, yd_axis_max - yd_axis_min)
# Adjust the direction of each axis depending on the direction in which encoder values changed
# during the experiment. Data is plotted starting from the upper-right corner of the plot
if x_pos_2D[0, 0] > x_pos_2D[0, -1]:
xd_min, xd_max, xd_axis_min, xd_axis_max = xd_max, xd_min, xd_axis_max, xd_axis_min
if y_pos_2D[0, 0] > y_pos_2D[-1, 0]:
yd_min, yd_max, yd_axis_min, yd_axis_max = yd_max, yd_min, yd_axis_max, yd_axis_min
else:
yd, xd = data_arr.shape
xd_min, xd_max, yd_min, yd_max = 0, xd, 0, yd
if (yd <= math.floor(xd / 100)) and (xd >= 200):
yd_min, yd_max = -math.floor(xd / 200), math.ceil(xd / 200)
if (xd <= math.floor(yd / 100)) and (yd >= 200):
xd_min, xd_max = -math.floor(yd / 200), math.ceil(yd / 200)
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = _compute_equal_axes_ranges(
xd_min, xd_max, yd_min, yd_max
)
# Compute range for data values
low_limit = self.map_preview_range_low
high_limit = self.map_preview_range_high
# If limit is not set, then compute the limit based on the selected datasets.
# It is assumed that at least one dataset is selected.
if low_limit == -1 and high_limit == -1:
low_limit, high_limit = self._compute_map_preview_range(data_for_plotting, selected_dsets.keys())
if low_limit is None or high_limit is None:
low_limit, high_limit = 0
# Set some minimum range for the colorbar (otherwise it will have white fill)
if math.isclose(low_limit, high_limit, abs_tol=2e-20):
if abs(low_limit) < 1e-20: # The value is zero
dv = 1e-20
else:
dv = math.fabs(low_limit * 0.01)
high_limit += dv
low_limit -= dv
if self.map_type_preview == MapTypes.LINEAR:
if not scatter_show_local:
im = grid[i].imshow(
data_arr,
cmap=grey_use,
interpolation=plot_interp,
extent=(xd_min, xd_max, yd_max, yd_min),
origin="upper",
clim=(low_limit, high_limit),
)
grid[i].set_ylim(yd_axis_max, yd_axis_min)
else:
xx = self.io_model.img_dict["positions"]["x_pos"]
yy = self.io_model.img_dict["positions"]["y_pos"]
# The following condition prevents crash if different file is loaded while
# the scatter plot is open (PyXRF specific issue)
if data_arr.shape == xx.shape and data_arr.shape == yy.shape:
im = grid[i].scatter(
xx,
yy,
c=data_arr,
marker="s",
s=500,
alpha=1.0, # Originally: alpha=0.8
cmap=grey_use,
vmin=low_limit,
vmax=high_limit,
linewidths=1,
linewidth=0,
)
grid[i].set_ylim(yd_axis_max, yd_axis_min)
grid[i].set_xlim(xd_axis_min, xd_axis_max)
grid_title = k
# Display only the channel name (e.g. 'sum', 'det1' etc.)
grid_title = grid_title.split("_")[-1]
grid[i].text(0, 1.01, grid_title, ha="left", va="bottom", transform=grid[i].axes.transAxes)
grid.cbar_axes[i].colorbar(im)
im.colorbar.formatter = im.colorbar.ax.yaxis.get_major_formatter()
# im.colorbar.ax.get_xaxis().set_ticks([])
# im.colorbar.ax.get_xaxis().set_ticks([], minor=True)
grid.cbar_axes[i].ticklabel_format(style="sci", scilimits=(-3, 4), axis="both")
else:
# maxz = np.max(data_arr)
# # Set some reasonable minimum range for the colorbar
# # Zeros or negative numbers will be shown in white
# if maxz <= 1e-30:
# maxz = 1
if not scatter_show_local:
im = grid[i].imshow(
data_arr,
# norm=LogNorm(vmin=low_lim*maxz,
# vmax=maxz, clip=True),
norm=LogNorm(vmin=low_limit, vmax=high_limit, clip=True),
cmap=grey_use,
interpolation=plot_interp,
extent=(xd_min, xd_max, yd_max, yd_min),
origin="upper",
# clim=(low_lim*maxz, maxz))
clim=(low_limit, high_limit),
)
grid[i].set_ylim(yd_axis_max, yd_axis_min)
else:
im = grid[i].scatter(
self.io_model.img_dict["positions"]["x_pos"],
self.io_model.img_dict["positions"]["y_pos"],
# norm=LogNorm(vmin=low_lim*maxz,
# vmax=maxz, clip=True),
norm=LogNorm(vmin=low_limit, vmax=high_limit, clip=True),
c=data_arr,
marker="s",
s=500,
alpha=1.0, # Originally: alpha=0.8
cmap=grey_use,
linewidths=1,
linewidth=0,
)
grid[i].set_ylim(yd_axis_min, yd_axis_max)
grid[i].set_xlim(xd_axis_min, xd_axis_max)
grid_title = k
# Display only the channel name (e.g. 'sum', 'det1' etc.)
grid_title = grid_title.split("_")[-1]
grid[i].text(0, 1.01, grid_title, ha="left", va="bottom", transform=grid[i].axes.transAxes)
grid.cbar_axes[i].colorbar(im)
im.colorbar.formatter = im.colorbar.ax.yaxis.get_major_formatter()
im.colorbar.ax.get_xaxis().set_ticks([])
im.colorbar.ax.get_xaxis().set_ticks([], minor=True)
im.colorbar.ax.yaxis.set_minor_formatter(mticker.LogFormatter())
grid[i].get_xaxis().set_major_locator(mticker.MaxNLocator(nbins="auto"))
grid[i].get_yaxis().set_major_locator(mticker.MaxNLocator(nbins="auto"))
grid[i].get_xaxis().get_major_formatter().set_useOffset(False)
grid[i].get_yaxis().get_major_formatter().set_useOffset(False)
self._fig_maps.canvas.draw_idle()
def _hide_total_count_map_preview(self):
self._fig_maps.set_visible(False)
def update_total_count_map_preview(self, *, hide=False, new_plot=False):
"""
Update total count map preview based on available/selected dataset and `hide` flag.
Parameters
----------
hide: bool
`True` - plot data if datasets are available and at least one dataset is selected,
otherwise hide the plot, `False` - hide the plot in any case
new_plot: bool
`True` - plotting new data that was just loaded, reset the plot settings
"""
if new_plot and not hide:
# Clear the displayed data range. The range will be computed based on the available data.
self.clear_map_preview_range()
# Find out if any data is selected
show_plot = False
if self.io_model.data_sets:
show_plot = any([_.selected_for_preview for _ in self.io_model.data_sets.values()])
logger.debug(f"LinePlotModel.update_total_count_map_preview(): show_plot={show_plot} hide={hide}")
if show_plot and not hide:
logger.debug("LinePlotModel.update_total_count_map_preview(): plotting existing datasets")
self._show_total_count_map_preview()
else:
logger.debug("LinePlotModel.update_total_count_map_preview(): hiding plots")
self._hide_total_count_map_preview()
self._fig_maps.canvas.draw()
| {
"repo_name": "NSLS-II/PyXRF",
"path": "pyxrf/model/lineplot.py",
"copies": "1",
"size": "78757",
"license": "bsd-3-clause",
"hash": -7027540810785161000,
"line_mean": 38.4179179179,
"line_max": 114,
"alpha_frac": 0.5402186472,
"autogenerated": false,
"ratio": 3.8225986506819396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48628172978819395,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import math
import os
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import root_pandas
from uncertainties import ufloat
from histograms import histogram
from plotting_utilities import (
add_si_formatter,
COLOURS as colours
)
PREFIX = 'root://eoslhcb.cern.ch//eos/lhcb/user/a/apearce/CharmProduction/2015_MagDown' # noqa
FNAME = 'DVntuple.root'
DATA_PATHS = [
os.path.join(PREFIX, str(idx), FNAME)
for idx in range(0, 93)
# File 79 only has a DsTopipipi tree
if idx != 79
]
D0ToKpi = 'D0ToKpi'
DpToKpipi = 'DpToKpipi'
DsToKKpi = 'DsToKKpi'
DstToD0pi = 'DstToD0pi_D0ToKpi'
MODES = [
D0ToKpi,
DpToKpipi,
DsToKKpi,
DstToD0pi
]
CHILDREN = {
D0ToKpi: 'K^{-}\pi^{+}',
DpToKpipi: 'K^{-}\pi^{+}\pi^{+}',
DsToKKpi: 'K^{-}K^{+}\pi^{+}',
DstToD0pi: 'K^{-}\pi^{+}\pi^{+}_{\mathrm{Soft}}'
}
MASS_RANGES = {
D0ToKpi: (1800, 1930),
DpToKpipi: (1805, 1935),
DsToKKpi: (1900, 2040),
DstToD0pi: (1950, 2080)
}
PDG_MASS = {
D0ToKpi: 1864.84,
DstToD0pi: 2010.26,
DpToKpipi: 1869.61,
DsToKKpi: 1968.30
}
def mass_distributions(mode, offline_selection, regions=False):
if mode == 'DsToKKpi':
mmode = 'DsTophipi'
else:
mmode = mode
if offline_selection:
paths = ['~/Physics/CharmProduction/output/{0}/2015/MagDown/DVntuple_Real.root'.format(mmode)] # noqa
tree = 'Tuple{0}/DecayTree'.format(mmode)
else:
paths = DATA_PATHS
tree = 'Tuple{0}/DecayTree'.format(mode)
if mode == DstToD0pi:
paths = [p for p in paths if '67' not in p]
mass_name = mode.split('To')[0] + '_M'
columns = [mass_name]
if mode == DstToD0pi:
columns += ['D0_M', 'Dst_delta_M']
df = root_pandas.read_root(paths, key=tree, columns=columns)
# Each mode ntuple has a different name for the parent mass, so normalise
if mode == DstToD0pi:
df.columns = ['M', 'DzM', 'DM']
else:
df.columns = ['M']
m_min, m_max = MASS_RANGES[mode]
# 1 bin per MeV
nbins = int(m_max - m_min)
fig, ax = histogram(df.M, range=(m_min, m_max), bins=nbins)
if regions and mode != DstToD0pi:
nominal_m = PDG_MASS[mode]
signal_window = patches.Rectangle(
(nominal_m - 20, 0), 40, ax.get_ylim()[1],
facecolor=colours.blue, edgecolor='none', alpha=0.25
)
sideband_lo = patches.Rectangle(
(nominal_m - 60, 0), 20, ax.get_ylim()[1],
facecolor=colours.red, edgecolor='none', alpha=0.25
)
sideband_hi = patches.Rectangle(
(nominal_m + 40, 0), 20, ax.get_ylim()[1],
facecolor=colours.red, edgecolor='none', alpha=0.25
)
ax.add_patch(signal_window)
ax.add_patch(sideband_lo)
ax.add_patch(sideband_hi)
ax.set_ylabel(r'Candidates / ($1\,\mathrm{MeV}/c^{2}$)')
ax.set_xlabel(r'$m(' + CHILDREN[mode] + ')$ [$\mathrm{MeV}/c^{2}$]')
add_si_formatter(ax, xaxis=False)
figname = '{0}_mass'.format(mode)
if offline_selection:
figname += '_offline_selection'
if regions:
figname += '_regions'
fig.savefig('output/{0}.pdf'.format(figname))
if mode == DstToD0pi:
nominal_dz = PDG_MASS[D0ToKpi]
dz_window = ((nominal_dz - 20) < df.DzM) & (df.DzM < (nominal_dz + 20))
fig, ax = histogram(df.DM[dz_window], range=(139, 155), bins=320)
ax.set_ylabel(r'Candidates / ($0.05\,\mathrm{MeV}/c^{2}$)')
ax.set_xlabel((
r'$m(' + CHILDREN[mode] + r') - m(' + CHILDREN[D0ToKpi] + r')$ '
r'[$\mathrm{MeV}/c^{2}$]'
))
add_si_formatter(ax, xaxis=False)
if regions:
nominal_dm = PDG_MASS[DstToD0pi] - nominal_dz
signal_window = patches.Rectangle(
(nominal_dm - 3, 0), 6, ax.get_ylim()[1],
facecolor=colours.blue, edgecolor='none', alpha=0.25
)
sideband_hi = patches.Rectangle(
(nominal_dm + 4.5, 0), 4.5, ax.get_ylim()[1],
facecolor=colours.red, edgecolor='none', alpha=0.25
)
ax.add_patch(signal_window)
ax.add_patch(sideband_hi)
figname = '{0}_delta_mass'.format(mode)
if offline_selection:
figname += '_offline_selection'
if regions:
figname += '_regions'
fig.savefig('output/{0}.pdf'.format(figname))
return df.index.size
MODES = [DstToD0pi]
for mode in MODES:
nonline = mass_distributions(mode, offline_selection=False)
noffline = mass_distributions(mode, offline_selection=True)
mass_distributions(mode, offline_selection=True, regions=True)
nonline = ufloat(nonline, math.sqrt(nonline))
noffline = ufloat(noffline, math.sqrt(noffline))
eff = 100*noffline/nonline
print(r'\{0} & {1:.0f} \pm {2:.0f} & {3:.0f} \pm {4:.0f} & {5:.3f} \pm {6:.3f} \\'.format( # noqa
mode,
nonline.nominal_value, nonline.std_dev,
noffline.nominal_value, noffline.std_dev,
eff.nominal_value, eff.std_dev
))
| {
"repo_name": "alexpearce/thesis",
"path": "scripts/production_mass_distributions.py",
"copies": "1",
"size": "5178",
"license": "mit",
"hash": -2863272786539661300,
"line_mean": 31.3625,
"line_max": 110,
"alpha_frac": 0.5809192739,
"autogenerated": false,
"ratio": 2.8034650785056847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8878572222097516,
"avg_score": 0.0011624260616336164,
"num_lines": 160
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from astropy.io import fits
import example_helpers
import drms
# Series name, carrington rotation and data segment
series = 'hmi.synoptic_mr_720s'
cr = 2150
segname = 'synopMr'
# DRMS-Server URL (or shortcut) and data url (if any) for the data segment
drms_url, data_url = 'jsoc', 'http://jsoc.stanford.edu'
#drms_url, data_url = 'kis', ''
# DRMS query string
qstr = '%s[%s]' % (series, cr)
# Create DRMS JSON client, use debug=True to see the query URLs
c = drms.Client(drms_url)
# Send request to the DRMS server
print('Querying keyword data...\n -> %s' % qstr)
k, s = c.query(qstr, key=drms.const.all, seg=segname)
print(' -> %d lines retrieved.' % len(k))
# Use only the first line of the query result
k = k.iloc[0]
fname = data_url + s[segname][0]
# Read the data segment
# Note: HTTP downloads get cached in ~/.astropy/cache/downloads
print('Reading data from %r...' % fname)
a = fits.getdata(fname)
ny, nx = a.shape
# Convert pixel to world coordinates using WCS keywords
xmin = (1 - k.CRPIX1)*k.CDELT1 + k.CRVAL1
xmax = (nx - k.CRPIX1)*k.CDELT1 + k.CRVAL1
ymin = (1 - k.CRPIX2)*k.CDELT2 + k.CRVAL2
ymax = (ny - k.CRPIX2)*k.CDELT2 + k.CRVAL2
# Convert to Carrington longitude
xmin = k.LON_LAST - xmin
xmax = k.LON_LAST - xmax
# Compute the plot extent used with imshow
extent = (xmin - abs(k.CDELT1)/2, xmax + abs(k.CDELT1)/2,
ymin - abs(k.CDELT2)/2, ymax + abs(k.CDELT2)/2)
# Aspect ratio for imshow in respect to the extent computed above
aspect = abs((xmax - xmin)/nx * ny/(ymax - ymin))
# Create plot
fig, ax = plt.subplots(1, 1, figsize=(13.5, 6))
ax.set_title('%s, Time: %s ... %s' % (qstr, k.T_START, k.T_STOP),
fontsize='medium')
ax.imshow(a, vmin=-300, vmax=300, origin='lower', interpolation='nearest',
cmap='gray', extent=extent, aspect=aspect)
ax.invert_xaxis()
ax.set_xlabel('Carrington longitude')
ax.set_ylabel('Sine latitude')
fig.tight_layout()
plt.show()
| {
"repo_name": "kbg/drms",
"path": "examples/plot_synoptic_mr.py",
"copies": "1",
"size": "2025",
"license": "mit",
"hash": 287313204386167360,
"line_mean": 29.223880597,
"line_max": 74,
"alpha_frac": 0.6790123457,
"autogenerated": false,
"ratio": 2.721774193548387,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3900786539248387,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import example_helpers
import drms
import pandas
pandas_version = tuple(map(int, pandas.__version__.split('.')[:2]))
if pandas_version >= (0, 22):
# Since pandas v0.22, we need to explicitely register matplotlib
# converters to use pandas.Timestamp objects in plots.
pandas.plotting.register_matplotlib_converters()
# Series name and timespan
series = 'hmi.ic_720s'
#tsel = '2014.01.01_TAI/365d@1h'
tsel = '2010.05.01_TAI-2016.04.01_TAI@6h'
# DRMS query string
qstr = '%s[%s]' % (series, tsel)
# Create DRMS JSON client, use debug=True to see the query URLs
c = drms.Client()
# Send request to the DRMS server
print('Querying keyword data...\n -> %s' % qstr)
res = c.query(qstr, key=['T_REC', 'DATAMEAN', 'DATARMS'])
print(' -> %d lines retrieved.' % len(res))
# Convert T_REC strings to datetime and use it as index for the series
res.index = drms.to_datetime(res.pop('T_REC'))
# Note: DATARMS contains the standard deviation, not the RMS!
t = res.index
avg = res.DATAMEAN/1e3
std = res.DATARMS/1e3
# Create plot
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.set_title(qstr, fontsize='medium')
ax.fill_between(
t, avg+std, avg-std, edgecolor='none', facecolor='b', alpha=0.3,
interpolate=True)
ax.plot(t, avg, color='b')
ax.set_xlabel('Time')
ax.set_ylabel('Disk-averaged continuum intensity [kDN/s]')
fig.tight_layout()
plt.show()
| {
"repo_name": "kbg/drms",
"path": "examples/plot_hmi_lightcurve.py",
"copies": "1",
"size": "1464",
"license": "mit",
"hash": 6326022462679325000,
"line_mean": 28.28,
"line_max": 70,
"alpha_frac": 0.6987704918,
"autogenerated": false,
"ratio": 2.853801169590643,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40525716613906426,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import example_helpers
import drms
# Series name, timespan and wavelength
series = 'aia.lev1_euv_12s'
series_lev1 = 'aia.lev1'
wavelen = 335
#tsel = '2015-01-01T00:00:01Z/1h'
#tsel = '2015-01-01T00:00:01Z/1d'
#tsel = '2015-01-01T00:00:01Z/1d@60s'
#tsel = '2015-01-01T00:00:01Z/7d@1h'
#tsel = '2015-01-01T00:00:01Z/30d@6h'
#tsel = '2015-01-01T00:00:01Z/100d@1d'
tsel = '2014-01-01T00:00:01Z/365d@1d'
# DRMS query string
qstr = '%s[%s][%d]' % (series, tsel, wavelen)
# Some keywords we are interested in; you can use c.keys(series) to get a
# list of all available keywords of a series.
keys = ['T_REC', 'T_OBS', 'DATAMIN', 'DATAMAX', 'DATAMEAN', 'DATARMS',
'DATASKEW', 'DATAKURT', 'QUALITY']
# Create DRMS client, uses JSOC baseurl by default, set debug=True to see the
# DRMS query URLs.
c = drms.Client(debug=False)
# Get detailed information about the series. Some keywords from
# aia.lev1_euv_12s are links to keywords in aia.lev1 and unfortunally some
# entries (like note) are missing for linked keywords, so we are using the
# entries from aia.lev1 in this case.
print('Querying series info...')
si = c.info(series)
si_lev1 = c.info(series_lev1)
for k in keys:
linkinfo = si.keywords.loc[k].linkinfo
if linkinfo is not None and linkinfo.startswith('lev1->'):
note_str = si_lev1.keywords.loc[k].note
else:
note_str = si.keywords.loc[k].note
print('%10s : %s' % (k, note_str))
# Get keyword values for the selected timespan and wavelength
print('Querying keyword data...\n -> %s' % qstr)
res = c.query(qstr, key=keys)
print(' -> %d lines retrieved.' % len(res))
# Only use entries with QUALITY==0
res = res[res.QUALITY == 0]
print(' -> %d lines after QUALITY selection.' % len(res))
# Convert T_REC strings to datetime and use it as index for the series
res.index = drms.to_datetime(res.T_REC)
# Create some simple plots
ax = res[['DATAMIN', 'DATAMAX', 'DATAMEAN', 'DATARMS', 'DATASKEW']].plot(
figsize=(8, 10), subplots=True)
ax[0].set_title(qstr, fontsize='medium')
plt.tight_layout()
plt.show()
| {
"repo_name": "kbg/drms",
"path": "examples/plot_aia_ligthcurve.py",
"copies": "1",
"size": "2154",
"license": "mit",
"hash": -5073150351274399000,
"line_mean": 32.65625,
"line_max": 77,
"alpha_frac": 0.6894150418,
"autogenerated": false,
"ratio": 2.6494464944649447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3838861536264945,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
import example_helpers
import drms
# Series name, start time and data segment
series = 'hmi.v_sht_modes'
tstart = '2014.06.20_00:00:00_TAI'
segname = 'm6' # 'm6', 'm18' or 'm36'
# DRMS-Server URL (or shortcut) and data url (if any) for the data segment
drms_url, data_url = 'jsoc', 'http://jsoc.stanford.edu'
#drms_url, data_url = 'kis', ''
# DRMS query string
qstr = '%s[%s]' % (series, tstart)
# Create DRMS JSON client, use debug=True to see the query URLs
c = drms.Client(drms_url)
# Send request to the DRMS server
print('Querying keyword data...\n -> %s' % qstr)
k, s = c.query(
qstr, key=['T_START', 'T_STOP', 'LMIN', 'LMAX', 'NDT'], seg=segname)
print(' -> %d lines retrieved.' % len(k))
# Use only the first line of the query result
k = k.iloc[0]
fname = data_url + s[segname][0]
# Read the data segment
print('Reading data from %r...' % fname)
a = np.genfromtxt(fname)
# For column names, see appendix of Larson & Schou (2015SoPh..290.3221L)
l = a[:, 0].astype(int)
n = a[:, 1].astype(int)
nu = a[:, 2]/1e3
if a.shape[1] in [24, 48, 84]:
# tan(gamma) present
sig_offs = 5
elif a.shape[1] in [26, 50, 86]:
# tan(gamma) not present
sig_offs = 6
snu = a[:, sig_offs + 2]/1e3
# Plot: zoomed in on lower l
fig, ax = plt.subplots(1, 1, figsize=(11, 7))
ax.set_title('Time = %s ... %s, L = %d ... %d, NDT = %d' % (
k.T_START, k.T_STOP, k.LMIN, k.LMAX, k.NDT), fontsize='medium')
for ni in np.unique(n):
idx = (n == ni)
ax.plot(l[idx], nu[idx], 'b.-')
ax.set_xlim(0, 120)
ax.set_ylim(0.8, 4.5)
ax.set_xlabel('Harmonic degree')
ax.set_ylabel('Frequency [mHz]')
fig.tight_layout()
# Plot: higher l, n <= 20, with errors
fig, ax = plt.subplots(1, 1, figsize=(11, 7))
ax.set_title('Time = %s ... %s, L = %d ... %d, NDT = %d' % (
k.T_START, k.T_STOP, k.LMIN, k.LMAX, k.NDT), fontsize='medium')
for ni in np.unique(n):
if ni <= 20:
idx = (n == ni)
ax.plot(l[idx], nu[idx], 'b.', ms=3)
if ni < 10:
ax.plot(l[idx], nu[idx] + 1000*snu[idx], 'g')
ax.plot(l[idx], nu[idx] - 1000*snu[idx], 'g')
else:
ax.plot(l[idx], nu[idx] + 500*snu[idx], 'r')
ax.plot(l[idx], nu[idx] - 500*snu[idx], 'r')
ax.legend(loc='upper right', handles=[
plt.Line2D([0], [0], color='r', label='500 sigma'),
plt.Line2D([0], [0], color='g', label='1000 sigma')])
ax.set_xlim(-5, 305)
ax.set_ylim(0.8, 4.5)
ax.set_xlabel('Harmonic degree')
ax.set_ylabel('Frequency [mHz]')
fig.tight_layout()
plt.show()
| {
"repo_name": "kbg/drms",
"path": "examples/plot_hmi_modes.py",
"copies": "1",
"size": "2623",
"license": "mit",
"hash": -8908345370386376000,
"line_mean": 29.5,
"line_max": 74,
"alpha_frac": 0.5943576058,
"autogenerated": false,
"ratio": 2.4675446848541864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8557662949568915,
"avg_score": 0.0008478682170542634,
"num_lines": 86
} |
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import example_helpers
import drms
pandas_version = tuple(map(int, pd.__version__.split('.')[:2]))
if pandas_version >= (0, 22):
# Since pandas v0.22, we need to explicitely register matplotlib
# converters to use pandas.Timestamp objects in plots.
pd.plotting.register_matplotlib_converters()
# Series name, time range and time steps
series = 'hmi.meanpf_720s'
tsel = '2010.05.01_TAI-2016.04.01_TAI@12h'
# DRMS query string
qstr = '%s[%s]' % (series, tsel)
# Create DRMS JSON client, use debug=True to see the query URLs
c = drms.Client()
# Send request to the DRMS server
print('Querying keyword data...\n -> %s' % qstr)
res = c.query(qstr, key=['T_REC', 'CAPN2', 'CAPS2'])
print(' -> %d lines retrieved.' % len(res))
# Convert T_REC strings to datetime and use it as index for the series
res.index = drms.to_datetime(res.pop('T_REC'))
# Determine smallest timestep
dt = np.diff(res.index.to_pydatetime()).min()
# Make sure the time series contains all time steps (fills gaps with NaNs)
# Note: This does not seem to work with old pandas versions (e.g. v0.14.1)
a = res.asfreq(dt)
# Compute 30d moving average and standard deviation using a boxcar window
win_size = int(30*24*3600/dt.total_seconds())
if pandas_version >= (0, 18):
a_avg = a.rolling(win_size, min_periods=1, center=True).mean()
a_std = a.rolling(win_size, min_periods=1, center=True).std()
else:
# this is deprecated since pandas v0.18.0
a_avg = pd.rolling_mean(a, win_size, min_periods=1, center=True)
a_std = pd.rolling_std(a, win_size, min_periods=1, center=True)
# Plot results
t = a.index.to_pydatetime()
n, mn, sn = a.CAPN2, a_avg.CAPN2, a_std.CAPN2
s, ms, ss = a.CAPS2, a_avg.CAPS2, a_std.CAPS2
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.set_title(qstr, fontsize='medium')
ax.plot(t, n, 'b', alpha=0.5, label='North pole')
ax.plot(t, s, 'g', alpha=0.5, label='South pole')
ax.plot(t, mn, 'r', label='Moving average')
ax.plot(t, ms, 'r', label='')
ax.set_xlabel('Time')
ax.set_ylabel('Mean radial field strength [G]')
ax.legend()
fig.tight_layout()
fig, ax = plt.subplots(1, 1, figsize=(15, 7))
ax.set_title(qstr, fontsize='medium')
ax.fill_between(
t, mn-sn, mn+sn, edgecolor='none', facecolor='b', alpha=0.3,
interpolate=True)
ax.fill_between(
t, ms-ss, ms+ss, edgecolor='none', facecolor='g', alpha=0.3,
interpolate=True)
ax.plot(t, mn, 'b', label='North pole')
ax.plot(t, ms, 'g', label='South pole')
ax.set_xlabel('Time')
ax.set_ylabel('Mean radial field strength [G]')
ax.legend()
fig.tight_layout()
plt.show()
| {
"repo_name": "kbg/drms",
"path": "examples/plot_polarfield.py",
"copies": "1",
"size": "2696",
"license": "mit",
"hash": -5588759549441655000,
"line_mean": 31.8780487805,
"line_max": 74,
"alpha_frac": 0.6821216617,
"autogenerated": false,
"ratio": 2.734279918864097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3916401580564097,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numbers
from datetime import date, datetime
import toolz
from toolz import first
from ..compatibility import basestring
from ..expr import Expr, Symbol, Symbol, eval_str, Union
from ..dispatch import dispatch
__all__ = ['compute', 'compute_up']
base = (numbers.Real, basestring, date, datetime)
@dispatch(object, object)
def compute_up(a, b, **kwargs):
raise NotImplementedError("Blaze does not know how to compute "
"expression of type `%s` on data of type `%s`"
% (type(a).__name__, type(b).__name__))
@dispatch(base)
def compute_up(a, **kwargs):
return a
@dispatch((list, tuple))
def compute_up(seq, scope={}, **kwargs):
return type(seq)(compute(item, scope, **kwargs) for item in seq)
@dispatch(Expr, object)
def compute(expr, o, **kwargs):
""" Compute against single input
Assumes that only one Symbol exists in expression
>>> t = Symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> # list(compute(deadbeats, {t: data}))
>>> list(compute(deadbeats, data))
['Bob', 'Charlie']
"""
ts = set([x for x in expr._subterms() if isinstance(x, Symbol)])
if len(ts) == 1:
return compute(expr, {first(ts): o}, **kwargs)
else:
raise ValueError("Give compute dictionary input, got %s" % str(o))
@dispatch(object)
def compute_down(expr):
""" Compute the expression on the entire inputs
inputs match up to leaves of the expression
"""
return expr
def top_to_bottom(d, expr, **kwargs):
""" Processes an expression top-down then bottom-up """
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
# See if we have a direct computation path
if (hasattr(expr, '_leaves') and compute_down.resolve(
(type(expr),) + tuple([type(d.get(leaf)) for leaf in expr._leaves()]))):
leaves = [d[leaf] for leaf in expr._leaves()]
try:
return compute_down(expr, *leaves, **kwargs)
except NotImplementedError:
pass
# Otherwise...
# Compute children of this expression
children = ([top_to_bottom(d, child, **kwargs)
for child in expr._inputs]
if hasattr(expr, '_inputs') else [])
# Compute this expression given the children
return compute_up(expr, *children, scope=d, **kwargs)
def bottom_up(d, expr):
"""
Process an expression from the leaves upwards
Parameters
----------
d : dict mapping {Symbol: data}
Maps expressions to data elements, likely at the leaves of the tree
expr : Expr
Expression to compute
Helper function for ``compute``
"""
# Base case: expression is in dict, return associated data
if expr in d:
return d[expr]
# Compute children of this expression
children = ([bottom_up(d, child) for child in expr._inputs]
if hasattr(expr, '_inputs') else [])
# Compute this expression given the children
result = compute_up(expr, *children, scope=d)
return result
@dispatch(Expr, dict)
def pre_compute(expr, d):
""" Transform expr prior to calling ``compute`` """
return expr
@dispatch(Expr, object, dict)
def post_compute(expr, result, d):
""" Effects after the computation is complete """
return result
@dispatch(Expr, object)
def optimize(expr, data):
""" Optimize expression to be computed on data """
return expr
def swap_resources_into_scope(expr, scope):
""" Translate interactive expressions into normal abstract expressions
Interactive Blaze expressions link to data on their leaves. From the
expr/compute perspective, this is a hack. We push the resources onto the
scope and return simple unadorned expressions instead.
Example
-------
>>> from blaze import Data
>>> t = Data([1, 2, 3], dshape='3 * int', name='t')
>>> swap_resources_into_scope(t.head(2), {})
(t.head(2), {t: [1, 2, 3]})
"""
resources = expr._resources()
symbol_dict = dict((t, Symbol(t._name, t.dshape)) for t in resources)
resources = dict((symbol_dict[k], v) for k, v in resources.items())
scope = toolz.merge(resources, scope)
expr = expr._subs(symbol_dict)
return expr, scope
@dispatch(Expr, dict)
def compute(expr, d, **kwargs):
""" Compute expression against data sources
>>> t = Symbol('t', 'var * {name: string, balance: int}')
>>> deadbeats = t[t['balance'] < 0]['name']
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> list(compute(deadbeats, {t: data}))
['Bob', 'Charlie']
"""
expr2, d2 = swap_resources_into_scope(expr, d)
expr3 = pre_compute(expr2, d2)
try:
expr4 = optimize(expr3, *[v for e, v in d2.items() if e in expr3])
except NotImplementedError:
expr4 = expr3
result = top_to_bottom(d2, expr4, **kwargs)
return post_compute(expr4, result, d2)
def columnwise_funcstr(t, variadic=True, full=False):
"""Build a string that can be eval'd to return a ``lambda`` expression.
Parameters
----------
t : Broadcast
An expression whose leaves (at each application of the returned
expression) are all instances of ``ScalarExpression``.
For example ::
t.petal_length / max(t.petal_length)
is **not** a valid ``Broadcast``, since the expression ::
max(t.petal_length)
has a leaf ``t`` that is not a ``ScalarExpression``. A example of a
valid ``Broadcast`` expression is ::
t.petal_length / 4
Returns
-------
f : str
A string that can be passed to ``eval`` and will return a function that
operates on each row and applies a scalar expression to a subset of the
columns in each row.
Examples
--------
>>> t = Symbol('t', 'var * {x: real, y: real, z: real}')
>>> cw = t['x'] + t['z']
>>> columnwise_funcstr(cw)
'lambda x, z: x + z'
>>> columnwise_funcstr(cw, variadic=False)
'lambda (x, z): x + z'
>>> columnwise_funcstr(cw, variadic=False, full=True)
'lambda (x, y, z): x + z'
"""
if full:
columns = t._child.fields
else:
columns = t.active_columns()
if variadic:
prefix = 'lambda %s: '
else:
prefix = 'lambda (%s): '
return prefix % ', '.join(map(str, columns)) + eval_str(t._expr)
@dispatch(Union, (list, tuple))
def compute_up(t, children, **kwargs):
return compute_up(t, children[0], tuple(children))
| {
"repo_name": "vitan/blaze",
"path": "blaze/compute/core.py",
"copies": "1",
"size": "6751",
"license": "bsd-3-clause",
"hash": -7933338448692129000,
"line_mean": 27.7276595745,
"line_max": 84,
"alpha_frac": 0.604799289,
"autogenerated": false,
"ratio": 3.717511013215859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48223103022158587,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from addie.plot.constants import BASIC_COLORS
class IndicatorManager(object):
""" Manager for all indicator lines
Indicator's Type =
- 0: horizontal. moving along Y-direction. [x_min, x_max], [y, y];
- 1: vertical. moving along X-direction. [x, x], [y_min, y_max];
- 2: 2-way. moving in any direction. [x_min, x_max], [y, y], [x, x], [y_min, y_max].
"""
def __init__(self):
# Auto color index
self._colorIndex = 0
# Auto line ID
self._autoLineID = 1
self._lineManager = dict()
self._canvasLineKeyDict = dict()
self._indicatorTypeDict = dict() # value: 0 (horizontal), 1 (vertical), 2 (2-way)
def add_2way_indicator(self, x, x_min, x_max, y, y_min, y_max, color):
# Set up indicator ID
this_id = str(self._autoLineID)
self._autoLineID += 1
# Set up vectors
vec_x_horizontal = np.array([x_min, x_max])
vec_y_horizontal = np.array([y, y])
vec_x_vertical = np.array([x, x])
vec_y_vertical = np.array([y_min, y_max])
#
self._lineManager[this_id] = [vec_x_horizontal, vec_y_horizontal, vec_x_vertical, vec_y_vertical, color]
self._indicatorTypeDict[this_id] = 2
return this_id
def add_horizontal_indicator(self, y, x_min, x_max, color):
"""
Add a horizontal indicator moving vertically
"""
# Get ID
this_id = str(self._autoLineID)
self._autoLineID += 1
#
vec_x = np.array([x_min, x_max])
vec_y = np.array([y, y])
#
self._lineManager[this_id] = [vec_x, vec_y, color]
self._indicatorTypeDict[this_id] = 0
return this_id
def add_vertical_indicator(self, x, y_min, y_max, color):
"""
Add a vertical indicator to data structure moving horizontally
:return: indicator ID as an integer
"""
# Get ID
this_id = self._autoLineID
self._autoLineID += 1
# form vec x and vec y
vec_x = np.array([x, x])
vec_y = np.array([y_min, y_max])
#
self._lineManager[this_id] = [vec_x, vec_y, color]
self._indicatorTypeDict[this_id] = 1
return this_id
def delete(self, indicator_id):
"""
Delete indicator
"""
del self._lineManager[indicator_id]
del self._canvasLineKeyDict[indicator_id]
del self._indicatorTypeDict[indicator_id]
def get_canvas_line_index(self, indicator_id):
"""
Get a line's ID (on canvas) from an indicator ID
"""
assert isinstance(indicator_id, int)
if indicator_id not in self._canvasLineKeyDict:
raise RuntimeError('Indicator ID %s cannot be found. Current keys are %s.' % (
indicator_id, str(sorted(self._canvasLineKeyDict.keys()))
))
return self._canvasLineKeyDict[indicator_id]
def get_line_type(self, my_id):
return self._indicatorTypeDict[my_id]
def get_2way_data(self, line_id):
assert line_id in self._indicatorTypeDict, 'blabla'
assert self._indicatorTypeDict[line_id] == 2, 'blabla'
vec_set = [self._lineManager[line_id][0:2], self._lineManager[line_id][2:4]]
return vec_set
def get_data(self, line_id):
"""
Get line's vector x and vector y
:return: 2-tuple of numpy arrays
"""
return self._lineManager[line_id][0], self._lineManager[line_id][1]
def get_indicator_key(self, x, y):
""" Get indicator's key with position
"""
if x is None and y is None:
raise RuntimeError('It is not allowed to have both X and Y are none to get indicator key.')
ret_key = None
for line_key in self._lineManager:
if x is not None and y is not None:
# 2 way
raise NotImplementedError('ASAP')
elif x is not None and self._indicatorTypeDict[line_key] == 1:
# vertical indicator moving along X
if abs(self._lineManager[line_key][0][0] - x) < 1.0E-2:
return line_key
elif y is not None and self._indicatorTypeDict[line_key] == 0:
# horizontal indicator moving along Y
if abs(self._lineManager[line_key][1][0] - y) < 1.0E-2:
return line_key
# END-FOR
return ret_key
@staticmethod
def get_line_style(line_id=None):
if line_id is not None:
style = '--'
else:
style = '--'
return style
def get_live_indicator_ids(self):
return sorted(self._lineManager.keys())
@staticmethod
def get_marker():
"""
Get the marker a line
"""
return '.'
def get_next_color(self):
"""
Get next color by auto color index
:return: string as color
"""
next_color = BASIC_COLORS[self._colorIndex]
# Advance and possibly reset color scheme
self._colorIndex += 1
if self._colorIndex == len(BASIC_COLORS):
self._colorIndex = 0
return next_color
def set_canvas_line_index(self, my_id, canvas_line_index):
self._canvasLineKeyDict[my_id] = canvas_line_index
def set_position(self, my_id, pos_x, pos_y):
""" Set the indicator to a new position
"""
if self._indicatorTypeDict[my_id] == 0:
# horizontal
self._lineManager[my_id][1][0] = pos_y
self._lineManager[my_id][1][1] = pos_y
elif self._indicatorTypeDict[my_id] == 1:
# vertical
self._lineManager[my_id][0][0] = pos_x
self._lineManager[my_id][0][1] = pos_x
elif self._indicatorTypeDict[my_id] == 2:
# 2-way
self._lineManager[my_id][0] = pos_x
self._lineManager[my_id][1] = pos_y
else:
raise RuntimeError('Unsupported indicator of type %d' % self._indicatorTypeDict[my_id])
self._lineManager[my_id][2] = 'black'
def shift(self, my_id, dx, dy):
if self._indicatorTypeDict[my_id] == 0:
# horizontal
self._lineManager[my_id][1] += dy
elif self._indicatorTypeDict[my_id] == 1:
# vertical
self._lineManager[my_id][0] += dx
elif self._indicatorTypeDict[my_id] == 2:
# 2-way
self._lineManager[my_id][2] += dx
self._lineManager[my_id][1] += dy
else:
raise RuntimeError('Unsupported indicator of type %d' % self._indicatorTypeDict[my_id])
def update_indicators_range(self, x_range, y_range):
"""
Update indicator's range
"""
for i_id in self._lineManager:
# NEXT - Need a new flag for direction of the indicating line, vertical or horizontal
if True:
self._lineManager[i_id][1][0] = y_range[0]
self._lineManager[i_id][1][-1] = y_range[1]
else:
self._lineManager[i_id][0][0] = x_range[0]
self._lineManager[i_id][0][-1] = x_range[1]
| {
"repo_name": "neutrons/FastGR",
"path": "addie/plot/indicatormanager.py",
"copies": "1",
"size": "7293",
"license": "mit",
"hash": 6137622095735937000,
"line_mean": 30.7086956522,
"line_max": 112,
"alpha_frac": 0.5512134924,
"autogenerated": false,
"ratio": 3.624751491053678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4675964983453678,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from collections import OrderedDict
from sklearn.metrics import auc, log_loss, precision_recall_curve, roc_auc_score
def loss(labels, predictions):
return log_loss(labels, predictions)
def positive_accuracy(labels, predictions, threshold=0.5):
return 100 * (predictions[labels] > threshold).mean()
def negative_accuracy(labels, predictions, threshold=0.5):
return 100 * (predictions[~labels] < threshold).mean()
def balanced_accuracy(labels, predictions, threshold=0.5):
return (positive_accuracy(labels, predictions, threshold) +
negative_accuracy(labels, predictions, threshold)) / 2
def auROC(labels, predictions):
return roc_auc_score(labels, predictions)
def auPRC(labels, predictions):
precision, recall = precision_recall_curve(labels, predictions)[:2]
return auc(recall, precision)
def recall_at_precision_threshold(labels, predictions, precision_threshold):
precision, recall = precision_recall_curve(labels, predictions)[:2]
return 100 * recall[np.searchsorted(precision - precision_threshold, 0)]
class ClassificationResult(object):
def __init__(self, labels, predictions, task_names=None):
assert labels.dtype == bool
self.results = [OrderedDict((
('Loss', loss(task_labels, task_predictions)),
('Balanced accuracy', balanced_accuracy(
task_labels, task_predictions)),
('auROC', auROC(task_labels, task_predictions)),
('auPRC', auPRC(task_labels, task_predictions)),
('Recall at 5% FDR', recall_at_precision_threshold(
task_labels, task_predictions, 0.95)),
('Recall at 10% FDR', recall_at_precision_threshold(
task_labels, task_predictions, 0.9)),
('Recall at 20% FDR', recall_at_precision_threshold(
task_labels, task_predictions, 0.8)),
('Num Positives', task_labels.sum()),
('Num Negatives', (1 - task_labels).sum())
)) for task_labels, task_predictions in zip(labels.T, predictions.T)]
self.task_names = task_names
self.multitask = labels.shape[1] > 1
def __str__(self):
return '\n'.join(
'{}Loss: {:.4f}\tBalanced Accuracy: {:.2f}%\t '
'auROC: {:.3f}\t auPRC: {:.3f}\n\t'
'Recall at 5%|10%|20% FDR: {:.1f}%|{:.1f}%|{:.1f}%\t '
'Num Positives: {}\t Num Negatives: {}'.format(
'{}: '.format('Task {}'.format(
self.task_names[task_index]
if self.task_names is not None else task_index))
if self.multitask else '', *results.values())
for task_index, results in enumerate(self.results))
def __getitem__(self, item):
return np.array([task_results[item] for task_results in self.results])
| {
"repo_name": "agitter/dragonn",
"path": "dragonn/metrics.py",
"copies": "2",
"size": "2923",
"license": "mit",
"hash": -5109014405069503000,
"line_mean": 39.0410958904,
"line_max": 80,
"alpha_frac": 0.6243585358,
"autogenerated": false,
"ratio": 3.7911802853437093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016911889058007779,
"num_lines": 73
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from glm import glm, glm_diagnostics, glm_multiple
def mean_underlying_noise(data_4d):
""" takes average of data_4d across the 4th dimension (time)
Parameters:
-----------
data_4d: 4 dimensional np.array
(with 4th dimension the one trying to take mean over)
Returns:
--------
y_mean: average of data_4d across the 4th dimension
"""
data_2d=data_4d.reshape(np.prod(data_4d.shape[:-1]),-1)
y_mean=np.mean(data_2d,axis=0)
return y_mean
def fourier_creation(n,p):
""" predicts the underlying noise using fourier series and glm
Parameters:
-----------
n: desired length to run over (assumes 0:(n-1) by integers)
p: number of fourier series (pairs)
Returns:
--------
X: glm_matrix (first column is all 1s) (dim 2p+1)
Note:
-----
Does a backwards approach to fouriers (possibly sacrificing orthogonality), wants to
look at maximum period first
"""
X = np.ones((n,2*p+1))
for i in range(p):
X[:,2*i+1]=np.sin(((i+1)/X.shape[0])*2*np.arange(n))
X[:,2*i+2]=np.cos(((i+1)/X.shape[0])*2*np.arange(n))
return X
def fourier_predict_underlying_noise(y_mean,p):
""" Diagnostics for the fourier creation function
Takes advantage of glm_diagnostics
Parameters:
-----------
y_mean: 1 dimensional np.array
p: number of fourier series (pairs)
Returns:
--------
X: glm_matrix (first column is all 1s)
fitted: the fitted values from glm
residuals: the residuals betwen fitted and y_mean
MRSS: MRSS from glm function (general output from glm_diagnostics)
Note:
-----
Does a backwards approach to fouriers (possibly sacrificing orthogonality), wants to
look at maximum period first
"""
n= y_mean.shape[0]
X=fourier_creation(n,p)
beta, junk=glm_multiple(y_mean,X)
MRSS, fitted, residuals = glm_diagnostics(beta, X, y_mean)
return X,MRSS,fitted,residuals
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/noise_correction.py",
"copies": "1",
"size": "1896",
"license": "bsd-3-clause",
"hash": 6079001658478053000,
"line_mean": 23.3076923077,
"line_max": 85,
"alpha_frac": 0.6867088608,
"autogenerated": false,
"ratio": 2.792341678939617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3979050539739617,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from h5py import File
class SampleEnvironmentHandler(object):
# Specify paths in NeXus files for different sample environments
_dict_samp_env = dict()
_dict_samp_env['cryostat'] = {'samp': {'path_to_time': '/entry/DASlogs/BL1B:SE:SampleTemp/time',
'path_to_value': '/entry/DASlogs/BL1B:SE:SampleTemp/value'},
'envi': {'path_to_time': '/entry/DASlogs/BL1B:SE:Cryo:TempActual/time',
'path_to_value': '/entry/DASlogs/BL1B:SE:Cryo:TempActual/value'}
}
_dict_samp_env['furnace'] = {'samp': {'path_to_time': '/entry/DASlogs/BL1B:SE:SampleTemp/time',
'path_to_value': '/entry/DASlogs/BL1B:SE:SampleTemp/value'},
'envi': {'path_to_time': '/entry/DASlogs/BL1B:SE:ND1:Loop1:SP/time',
'path_to_value': '/entry/DASlogs/BL1B:SE:ND1:Loop1:SP/value'}
}
def __init__(self, samp_env):
self._data = dict()
self._data['samp'] = dict()
self._data['envi'] = dict()
self._data['envi']['lastTime'] = 0.0
self._data['samp']['lastTime'] = 0.0
if samp_env in self._dict_samp_env:
self._data['samp'].update(self._dict_samp_env[samp_env]['samp'])
self._data['envi'].update(self._dict_samp_env[samp_env]['envi'])
else:
raise KeyError('The sample environment '+samp_env+' is not available')
def getDataFromFile(self, filename, data_type):
_data = self._data[data_type]
nf = File(filename, 'r')
_data['time'] = np.array(nf[_data['path_to_time']])
_data['value'] = np.array(nf[_data['path_to_value']])
_data['time'] = np.add(_data['lastTime'], _data['time'])
_data['lastTime'] = _data['time'][-1]
return _data['time'], _data['value']
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/sample_environment_handler.py",
"copies": "1",
"size": "2073",
"license": "mit",
"hash": -4358021509054640000,
"line_mean": 46.1136363636,
"line_max": 107,
"alpha_frac": 0.5233960444,
"autogenerated": false,
"ratio": 3.415156507413509,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9428395070784308,
"avg_score": 0.002031496205840427,
"num_lines": 44
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from numpy.random import gamma
from scipy.special import gammainc
__all__ = ['SersicSamples']
class SersicSamples(object):
"""
Class for sampling sersic profiles in CatSim
"""
def __init__(self, rng):
"""
Parameters
---------
rng : instance of `numpy.random.RandomState`
"""
self.rng = rng
self.fp4 = np.arange(0., 200., 0.01)
self.xp4 = gammainc(8, self.fp4)
self.fp1 = np.arange(0., 20., 0.001)
self.xp1 = gammainc(2, self.fp1)
@staticmethod
def sampleAngles(a, b, numSamples=1, rng=np.random.RandomState()):
"""
return a sample of the angle with respect to a position angle
in units of degrees. For a single float, and `numSamples=1` the
answer will still be in an array of length 1.
Parameters
----------
a : np.float, or `np.ndarray`
semi-major axis of ellipe. If array, must have len of `numSamples`
b : np.float, or `np.ndarray`
semi-minor axis of ellipe. If array, must have len of `numSamples`
numSamples: int, defaults to 1
number of samples desired. must match the length of a and b if a
and b are arrays.
rng : `np.random.RandomState` instance, defaults to no argument
Returns
-------
`np.ndarray` of length `numSamples` in units of degrees.
.. note:: The two lengh parameters a and b need to have the same unit.
The binary parameter is used to distribute the result in all four
quadrants from the two that are in the range of arctan.
"""
if isinstance(a, np.float):
assert isinstance(b, np.float)
if numSamples >=1:
a = np.ones(numSamples)*a
b = np.ones(numSamples)*b
if len(a) != len(b) or len(b) != numSamples:
raise ValueError('a, b, numSamples must have same lengths')
u = rng.uniform(0., 2.0*np.pi, size=numSamples)
# Use the binary parameter to handle other quadrants from arctan
binary = np.random.choice([0, 1], size=numSamples, p=[0.5, 0.5])
return np.degrees(np.arctan(b*np.tan(u)/a) + binary*np.pi)
def sampleRadius(self, halfLightRadius, numSamples=1, sersicIndex=1):
"""
return samples of the position, given the halfLightRadius and the
sersicIndex. The answers are for sersic index of 1 and 4 only.
Parameters
----------
halfLightRadius : `np.float` or `np.ndarray` of dtype `np.float`
half light radius of the galaxy/bulge/disk
numSamples : int, defaults to 1
number of samples desired
sersicIndix : np.float, defaults to 1
sersic index, works for bulges and disks only
Returns
-------
`np.ndarray` of radial distances from the center of the galaxy in the
same units as halfLightRadius
"""
if isinstance(halfLightRadius, np.float) and numSamples >= 1:
halfLightRadius = np.ones(numSamples) * halfLightRadius
elif numSamples != len(halfLightRadius):
raise ValueError('The lengths must match')
u = self.rng.uniform(size=numSamples)
if sersicIndex == 1:
x = np.interp(u, self.xp1, self.fp1)
b1 = 1.678
return halfLightRadius * x / b1
if sersicIndex == 4:
x = np.interp(u, self.xp4, self.fp4)
b4 = 7.669
return halfLightRadius * (x / b4) **4
| {
"repo_name": "rbiswas4/SNsims",
"path": "snsims/samplingGalaxies.py",
"copies": "1",
"size": "3655",
"license": "mit",
"hash": 2026581604255841800,
"line_mean": 38.3010752688,
"line_max": 78,
"alpha_frac": 0.5885088919,
"autogenerated": false,
"ratio": 3.8554852320675104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9882602867958341,
"avg_score": 0.012278251201833732,
"num_lines": 93
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from qtpy.QtWidgets import QMainWindow, QTableWidgetItem
from qtpy import QtCore, QtGui
from addie.utilities import load_ui
from addie.utilities.general import get_list_algo
from addie.processing.mantid.master_table.tree_definition import LIST_COLUMNS_TO_SEARCH_FOR_FULL_HIGHLIGTHING
COLUMNS_WIDTH = [150, 150]
class AlignAndFocusArgsHandling:
def __init__(self, main_window=None, key=None):
if main_window.key_value_pair_ui is None:
o_key_value = AlignAndFocusArgsWindow(main_window=main_window, key=key)
main_window.key_value_pair_ui = o_key_value
if main_window.key_value_pair_ui_position:
main_window.key_value_pair_ui.move(main_window.key_value_pair_ui_position)
o_key_value.show()
else:
main_window.key_value_pair_ui.setFocus()
main_window.key_value_pair_ui.activateWindow()
class AlignAndFocusArgsWindow(QMainWindow):
list_algo_without_blacklist = None
unused_list_algo = None
local_list_key_loaded = []
def __init__(self, main_window=None, key=None):
self.main_window = main_window
self.key = key
QMainWindow.__init__(self, parent=main_window)
self.ui = load_ui('manual_key_value_input.ui', baseinstance=self)
self.init_widgets()
self._check_remove_button()
def init_widgets(self):
self._init_status_of_use_global_checkbox()
self._init_key_value_table()
self._set_column_widths()
self._init_list_algo_combobox()
self.use_global_keys_values_clicked()
def _init_status_of_use_global_checkbox(self):
master_table_list_ui = self.main_window.master_table_list_ui[self.key]
self.ui.use_global_keys_values.setChecked(master_table_list_ui['align_and_focus_args_use_global'])
def _init_list_algo_combobox(self):
list_algo = get_list_algo('AlignAndFocusPowderFromFiles')
list_algo_without_blacklist = self.remove_blacklist_algo(list_algo)
self.list_algo_without_blacklist = list_algo_without_blacklist
self.populate_list_algo()
def is_key_a_global_key(self, key):
global_key_value = self.main_window.global_key_value
return key in global_key_value.keys()
def bring_back_global_key_value_removed(self):
list_key_in_table = self.get_list_key_in_table()
global_key_value = self.main_window.global_key_value
for _key in global_key_value.keys():
if not (_key in list_key_in_table):
value = global_key_value[_key]
self._add_row(row=0, key=_key, value=value)
def get_list_key_in_table(self):
list_key = []
for _row in np.arange(self.ui.key_value_table.rowCount()):
_key = str(self.ui.key_value_table.item(_row, 0).text())
list_key.append(_key)
return list_key
def use_global_keys_values_clicked(self):
use_global_key_value = self.ui.use_global_keys_values.isChecked()
if use_global_key_value:
self.bring_back_global_key_value_removed()
for _row in np.arange(self.ui.key_value_table.rowCount()):
_key = self.get_key_for_this_row(_row)
if self.is_key_a_global_key(_key):
self.set_row_layout(_row, is_editable=False)
self.reset_value(_row)
else:
for _row in np.arange(self.ui.key_value_table.rowCount()):
self.set_row_layout(_row, is_editable=True)
## disable rows with global key and make sure value is the one defined in the settings window (global value)
## user is not allow to remove that row
## if key is not present (has been removed by user) bring it back to the table
# if us_global is unchecked
## enable that row
## user is allowed to remove that row
def get_key_for_this_row(self, _row):
_key = str(self.ui.key_value_table.item(_row, 0).text())
return _key
def reset_value(self, _row):
global_key_value = self.main_window.global_key_value
_key = self.get_key_for_this_row(_row)
value = global_key_value[_key]
self.ui.key_value_table.item(_row, 1).setText(value)
def set_row_layout(self, _row, is_editable=False):
_font = QtGui.QFont()
if is_editable:
_flag = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
_font.setBold(False)
else:
_flag = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
_font.setBold(True)
self.ui.key_value_table.item(_row, 1).setFlags(_flag)
self.ui.key_value_table.item(_row, 1).setFont(_font)
self.ui.key_value_table.item(_row, 0).setFont(_font)
def populate_list_algo(self):
self.ui.list_key_comboBox.clear()
self.create_clean_list_algo()
self.ui.list_key_comboBox.addItems(self.unused_list_algo)
def remove_blacklist_algo(self, list_algo):
list_algo_without_blacklist = []
for _algo in list_algo:
if not(_algo in self.main_window.align_and_focus_powder_from_files_blacklist):
list_algo_without_blacklist.append(_algo)
return list_algo_without_blacklist
def create_clean_list_algo(self):
list_algo = self.list_algo_without_blacklist
previous_key_value_dict = self._get_previous_key_value_dict()
list_key_already_loaded = previous_key_value_dict.keys()
global_unused_list_algo = self.remove_from_list(original_list=list_algo,
to_remove=list_key_already_loaded)
local_list_key_loaded = self.get_local_list_key_loaded()
global_and_local_unused_list_algo = self.remove_from_list(original_list=global_unused_list_algo,
to_remove=local_list_key_loaded)
self.unused_list_algo = global_and_local_unused_list_algo
def get_local_list_key_loaded(self):
nbr_row = self.ui.key_value_table.rowCount()
list_local_key_loaded = []
for _row in np.arange(nbr_row):
_item = self.get_key_for_this_row(_row)
list_local_key_loaded.append(_item)
return list_local_key_loaded
def remove_from_list(self,
original_list=[],
to_remove=[]):
if to_remove:
clean_list_algo = []
for _algo in original_list:
if not(_algo in to_remove):
clean_list_algo.append(_algo)
return clean_list_algo
else:
return original_list
def _set_column_widths(self):
for _col, _width in enumerate(COLUMNS_WIDTH):
self.ui.key_value_table.setColumnWidth(_col, _width)
def _init_key_value_table(self):
previous_key_value_dict = self._get_previous_key_value_dict()
for _row, _key in enumerate(previous_key_value_dict.keys()):
_value = previous_key_value_dict[_key]
self._add_row(row=_row, key=_key, value=_value)
self.local_list_key_loaded.append(_key)
def _get_previous_key_value_dict(self):
master_table_list_ui = self.main_window.master_table_list_ui[self.key]
return master_table_list_ui['align_and_focus_args_infos']
def _save_key_value_infos(self):
master_table_list_ui = self.main_window.master_table_list_ui[self.key]
key_value_dict = self._get_key_value_dict()
master_table_list_ui['align_and_focus_args_infos'] = key_value_dict
self.main_window.master_table_list_ui[self.key] = master_table_list_ui
def _get_key_value_dict(self):
key_value_dict = {}
for _row in np.arange(self.get_nbr_row()):
_key = self.get_key_for_this_row(_row)
_value = str(self.ui.key_value_table.item(_row, 1).text())
key_value_dict[_key] = _value
return key_value_dict
def add_clicked(self):
self._add_new_row_at_bottom()
self.populate_list_algo()
self._check_remove_button()
self.ui.list_key_comboBox.setFocus()
def _add_new_row_at_bottom(self):
value = str(self.ui.new_value_widget.text())
if value.strip() == '':
return
nbr_row = self.get_nbr_row()
key = self.get_current_selected_key()
self.local_list_key_loaded.append(key)
self._add_row(row=nbr_row, key=key, value=value)
self.ui.new_value_widget.setText("")
def get_current_selected_key(self):
return str(self.ui.list_key_comboBox.currentText())
def _add_row(self, row=-1, key='', value=""):
self.ui.key_value_table.insertRow(row)
self._set_item(key, row, 0)
self._set_item(value, row, 1, is_editable=True)
def _set_item(self, text, row, column, is_editable=False):
key_item = QTableWidgetItem(text)
if not is_editable:
key_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.ui.key_value_table.setItem(row, column, key_item)
def remove_clicked(self):
# make sure row of global value, if 'use global keys/values' checked can not be removed
selected_row_range = self._get_selected_row_range()
if selected_row_range is None:
return
self._remove_rows(selected_row_range)
self._check_remove_button()
self.populate_list_algo()
def _remove_rows(self, row_range):
first_row_selected = row_range[0]
for _row in row_range:
_key = self.get_key_for_this_row(_row)
if self.is_key_a_global_key(_key) and self.ui.use_global_keys_values.isChecked():
continue
self.ui.key_value_table.removeRow(first_row_selected)
def _get_selected_row_range(self):
selection = self.ui.key_value_table.selectedRanges()
if not selection:
return None
from_row = selection[0].topRow()
to_row = selection[0].bottomRow()
return np.arange(from_row, to_row+1)
def get_nbr_row(self):
return self.ui.key_value_table.rowCount()
def _check_remove_button(self):
enable = self._what_state_remove_button_should_be()
self.ui.remove_selection_button.setEnabled(enable)
def _what_state_remove_button_should_be(self):
nbr_row = self.get_nbr_row()
if nbr_row > 0:
enable = True
else:
enable = False
return enable
def cancel_clicked(self):
self.close()
def _save_use_global_button_status(self):
_status = self.ui.use_global_keys_values.isChecked()
master_table_list_ui = self.main_window.master_table_list_ui[self.key]
master_table_list_ui['align_and_focus_args_use_global'] = _status
self.main_window.master_table_list_ui[self.key] = master_table_list_ui
def ok_clicked(self):
self._save_key_value_infos()
self._save_use_global_button_status()
self.main_window.check_master_table_column_highlighting(column=LIST_COLUMNS_TO_SEARCH_FOR_FULL_HIGHLIGTHING[-1])
self.close()
def closeEvent(self, c):
self.main_window.key_value_pair_ui_position = self.pos()
self.main_window.key_value_pair_ui = None
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/align_and_focus_args.py",
"copies": "1",
"size": "11480",
"license": "mit",
"hash": -2265650386489986000,
"line_mean": 38.3150684932,
"line_max": 120,
"alpha_frac": 0.6159407666,
"autogenerated": false,
"ratio": 3.422778771615981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9534161401060289,
"avg_score": 0.0009116274311382725,
"num_lines": 292
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.stats import gaussian_kde
import numpy.random as rand
from scipy.integrate import quad
class KDE(object):
"""An implementation of a kernel density estimator allowing for adaptive kernels.
If the `adaptive` keyword is set to `False`, then this will essentially be just
a wrapper for the `scipy.stats.gaussian_kde` class. If adaptive, though, it
allows for different kernels and different kernel widths according to the
"K-nearest-neighbors" algorithm as discussed `here <http://en.wikipedia.org/wiki/Variable_kernel_density_estimation#Balloon_estimators>`_. The `fast` option does the NN calculation using
broadcasting arrays rather than a brute-force sort. By default the
fast option will be used for datasets smaller than 5000.
Parameters
----------
dataset : array-like
Data set from which to calculate the KDE.
kernel : {'tricube','ep','gauss'}, optional
Kernel function to use for adaptive estimator.
adaptive : bool, optional
Flag whether or not to use adaptive KDE. If this is false, then this
class will just be a wrapper for `scipy.stats.gaussian_kde`.
k : `None` or int, optional
Number to use for K-nearest-neighbor algorithm. If `None`, then
it will be set to the `N/4`, where `N` is the size of the dataset.
fast : `None` or bool, optional
If `None`, then `fast = N < 5001`, where `N` is the size of the dataset.
`fast=True` will force array calculations, which will use lots of RAM
if the dataset is large.
norm : float, optional
Allows the normalization of the distribution to be something other
than unity
bandwidth : `None` or float, optional
Passed to `scipy.stats.gaussian_kde` if not using adaptive mode.
weights : array-like, optional
Not yet implemented.
draw_direct : bool, optional
If `True`, then resampling will be just a bootstrap resampling
of the input samples. If `False`, then resampling will actually
resample each individual kernel (not recommended for large-ish
datasets).
kwargs
Keyword arguments passed to `scipy.stats.gaussian_kde` if adaptive
mode is not being used.
"""
def __init__(self,dataset,kernel='tricube',adaptive=True,k=None,
fast=None,norm=1.,bandwidth=None,weights=None,
draw_direct=False,**kwargs):
self.dataset = np.atleast_1d(dataset)
self.weights = weights
self.n = np.size(dataset)
self.kernel = kernelfn(kernel)
self.kernelname = kernel
self.bandwidth = bandwidth
self.draw_direct = draw_direct
if k:
self.k = k
else:
self.k = self.n/4
self.norm=norm
self.adaptive = adaptive
self.fast = fast
if adaptive:
if self.fast==None:
self.fast = self.n < 5001
if self.fast:
#d1,d2 = np.meshgrid(self.dataset,self.dataset) #use broadcasting instead of meshgrid
diff = np.absolute(self.dataset - self.dataset[:,np.newaxis])
diffsort = np.sort(diff,axis=0)
self.h = diffsort[self.k,:]
##Attempt to handle larger datasets more easily:
else:
sortinds = np.argsort(self.dataset)
x = self.dataset[sortinds]
h = np.zeros(len(x))
for i in np.arange(len(x)):
lo = i - self.k
hi = i + self.k + 1
if lo < 0:
lo = 0
if hi > len(x):
hi = len(x)
diffs = abs(x[lo:hi]-x[i])
h[sortinds[i]] = np.sort(diffs)[self.k]
self.h = h
else:
self.gauss_kde = gaussian_kde(self.dataset,bw_method=bandwidth,**kwargs)
def renorm(self,norm):
"""Change the normalization"""
self.norm = norm
def evaluate(self,points):
if not self.adaptive:
return self.gauss_kde(points)*self.norm
points = np.atleast_1d(points).astype(self.dataset.dtype)
k = self.k
npts = np.size(points)
h = self.h
X,Y = np.meshgrid(self.dataset,points)
H = np.resize(h,(npts,self.n))
U = (X-Y)/H.astype(float)
result = 1./self.n*1./H*self.kernel(U)
return np.sum(result,axis=1)*self.norm
__call__ = evaluate
def integrate_box(self,low,high,forcequad=False,**kwargs):
"""Integrates over a box. Optionally force quad integration, even for non-adaptive.
If adaptive mode is not being used, this will just call the
`scipy.stats.gaussian_kde` method `integrate_box_1d`. Else,
by default, it will call `scipy.integrate.quad`. If the
`forcequad` flag is turned on, then that integration will be
used even if adaptive mode is off.
Parameters
----------
low : float
Lower limit of integration
high : float
Upper limit of integration
forcequad : bool
If `True`, then use the quad integration even if adaptive mode is off.
kwargs
Keyword arguments passed to `scipy.integrate.quad`.
"""
if not self.adaptive and not forcequad:
return self.gauss_kde.integrate_box_1d(low,high)*self.norm
return quad(self.evaluate,low,high,**kwargs)[0]
def resample(self,size=None,direct=None):
if direct is None:
direct = self.draw_direct
size=int(size)
if not self.adaptive:
return np.squeeze(self.gauss_kde.resample(size=size))
if direct:
inds = rand.randint(self.n,size=size)
return self.dataset[inds]
else:
if size is None:
size = self.n
indices = rand.randint(0,self.n,size=size)
means = self.dataset[indices]
h = self.h[indices]
fuzz = kerneldraw(size,self.kernelname)*h
return np.squeeze(means + fuzz)
draw = resample
def epkernel(u):
x = np.atleast_1d(u)
y = 3./4*(1-x*x)
y[((x>1) | (x < -1))] = 0
return y
def gausskernel(u):
return 1/np.sqrt(2*np.pi)*np.exp(-0.5*u*u)
def tricubekernel(u):
x = np.atleast_1d(u)
y = 35./32*(1-x*x)**3
y[((x > 1) | (x < -1))] = 0
return y
def kernelfn(kernel='tricube'):
if kernel=='ep':
return epkernel
elif kernel=='gauss':
return gausskernel
elif kernel=='tricube':
return tricubekernel
def kerneldraw(size=1,kernel='tricube',exact=False):
if kernel=='tricube':
fn = lambda x: 1./2 + 35./32*x - 35./32*x**3 + 21./32*x**5 - 5./32*x**7
u = rand.random(size=size)
if not exact:
xs = np.linspace(-1,1,1e4)
ys = fn(xs)
inds = np.digitize(u,ys)
return xs[inds]
else:
#old way (exact)
rets = np.zeros(size)
for i in np.arange(size):
f = lambda x: u[i]-fn(x)
rets[i] = newton(f,0,restrict=(-1,1))
return rets
def deriv(f,c,dx=0.0001):
"""
deriv(f,c,dx) --> float
Returns f'(x), computed as a symmetric difference quotient.
"""
return (f(c+dx)-f(c-dx))/(2*dx)
def fuzzyequals(a,b,tol=0.0001):
return abs(a-b) < tol
def newton(f,c,tol=0.0001,restrict=None):
"""
newton(f,c) --> float
Returns the x closest to c such that f(x) = 0
"""
#print(c)
if restrict:
lo,hi = restrict
if c < lo or c > hi:
print(c)
c = random*(hi-lo)+lo
if fuzzyequals(f(c),0,tol):
return c
else:
try:
return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict)
except:
return None
| {
"repo_name": "timothydmorton/simpledist",
"path": "simpledist/kde.py",
"copies": "1",
"size": "8115",
"license": "mit",
"hash": 2257000055189634300,
"line_mean": 30.9488188976,
"line_max": 191,
"alpha_frac": 0.5672211953,
"autogenerated": false,
"ratio": 3.7327506899724012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9735872418915805,
"avg_score": 0.01281989327131909,
"num_lines": 254
} |
from __future__ import absolute_import, division, print_function
import numpy as np
from sklearn.preprocessing import normalize
def crop_resample(bands, intensities, crops):
intensities = np.atleast_2d(intensities)
crops = sorted(crops)
# check that each chunk is valid and doesn't overlap with any other
prev_ub = float('-inf')
for lb, ub, step in crops:
if ub <= lb:
raise ValueError('Invalid crop region')
if lb < prev_ub:
raise ValueError('Overlapping crop regions')
prev_ub = ub
# do all the band lookups at once
locs = sorted(set(c[0] for c in crops).union(set(c[1] for c in crops)))
idxs = np.searchsorted(bands, locs)
loc_idxs = dict(zip(locs, idxs))
# crop/resample each chunk separately
xs, ys = [], []
for lb, ub, step in crops:
s = slice(loc_idxs[lb], loc_idxs[ub])
x = bands[s]
if step > 0:
lb = lb if np.isfinite(lb) else x[0]
ub = ub if np.isfinite(ub) else x[-1] + step
x_new = np.arange(lb, ub, step)
y_new = np.row_stack([np.interp(x_new, x, y) for y in intensities[:, s]])
xs.append(x_new)
ys.append(y_new)
else:
xs.append(x)
ys.append(intensities[:, s])
# glue all the chunks back together
return np.concatenate(xs), np.hstack(ys)
def cumulative_norm(S):
'''Cumulative intensity normalization method.
"Quality Assessment of Tandem Mass Spectra Based on
Cumulative Intensity Normalization", Na & Paek, J. of Proteome Research
'''
idx = np.arange(S.shape[0])[:,None]
ranks = np.argsort(S, axis=1)
cumsums = np.cumsum(S[idx,ranks], axis=1)
unranks = np.zeros_like(ranks)
unranks[idx,ranks] = np.arange(S.shape[1])
S = cumsums[idx,unranks]
S /= cumsums[:,-1:]
return S
def libs_norm3(shots, wavelengths=None, copy=True):
shots = np.array(shots, copy=copy, ndmin=2)
num_chan = shots.shape[1]
if num_chan == 6143:
a, b = 2047, 4097
elif num_chan == 6144:
a, b = 2048, 4098
elif num_chan == 5485:
a, b = 1884, 3811
elif wavelengths is not None:
a, b = np.searchsorted(wavelengths, (360, 470))
else:
raise ValueError('Invalid # channels for LIBS norm3 method: %d' % num_chan)
normalize(shots[:, :a], norm='l1', copy=False)
normalize(shots[:,a:b], norm='l1', copy=False)
normalize(shots[:, b:], norm='l1', copy=False)
return shots
| {
"repo_name": "all-umass/superman",
"path": "superman/preprocess/utils.py",
"copies": "1",
"size": "2332",
"license": "mit",
"hash": -1645972582357642500,
"line_mean": 31.3888888889,
"line_max": 79,
"alpha_frac": 0.647084048,
"autogenerated": false,
"ratio": 2.963151207115629,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8994234070848697,
"avg_score": 0.02320023685338631,
"num_lines": 72
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import gsd.hoomd
import sklearn
import scipy.optimize as opt
import os
import pdb
from sklearn.neighbors import BallTree
from sklearn.neighbors import radius_neighbors_graph
from scipy.spatial.distance import cdist
from scipy.special import erf
from scipy.sparse.csgraph import connected_components
#from .due import due, Doi
from .smoluchowski import massAvSize
#from mpi4py import MPI
from cdistances import conOptDistanceCython,alignDistancesCython
__all__ = ['fixPBC']
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
'''
due.cite(Doi("10.1167/13.9.30"),
description="Simple data analysis for clustering application",
tags=["data-analysis","clustering"],
path='clustering')
'''
def fixPBC(peps,box,ats,cutoff):
#return positions fixed across PBCs for calculation of structural metrics like Rh and Rg
#create the list of fixed positions
fixedXYZ = peps.copy()
potInds = range(1,len(peps)/(ats*3))
#the first ats*3 coordinates are the coordinates of the first atom
fixedXYZ[0:3*ats] = fixCoords(fixedXYZ[0:3*ats].copy(),fixedXYZ[0:3].copy(),box)
correctInds = [0]
while len(correctInds) > 0:
atom = correctInds.pop()
neighs = getNeigh(atom,cutoff,peps,potInds,ats)
for n in neighs:
potInds.remove(n)
correctInds.append(n)
fixedXYZ[3*ats*n:3*ats*(n+1)] = fixCoords(fixedXYZ[3*ats*n:3*ats*(n+1)].copy(),fixedXYZ[3*atom*ats:3*atom*ats+3].copy(),box)
return fixedXYZ
def fixCoords(pos,posinit,box):
#fix all coords based on the initial coordinate and the periodic boundary conditions
for i in range(len(pos)/3):
dr = pos[3*i:3*i+3] - posinit
dr = dr - box*np.round(dr/box)
pos[3*i:3*i+3] = dr + posinit
return pos
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/morphology.py",
"copies": "1",
"size": "1933",
"license": "mit",
"hash": -6348599829380567000,
"line_mean": 34.1454545455,
"line_max": 127,
"alpha_frac": 0.7366787377,
"autogenerated": false,
"ratio": 3.025039123630673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9054443514903918,
"avg_score": 0.04145486928535084,
"num_lines": 55
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import math
import numbers
import json
import ast
import copy
from scipy.interpolate import interp1d
from lsst.sims.catalogs.decorators import register_class, register_method, compound
from lsst.sims.catUtils.mixins import Variability, ExtraGalacticVariabilityModels
from lsst.sims.catUtils.mixins.VariabilityMixin import _VariabilityPointSources
__all__ = ["TimeDelayVariability", "VariabilityTwinkles"]
class TimeDelayVariability(Variability):
@register_method("applyAgnTimeDelay")
def applyAgnTimeDelay(self, valid_dexes, params, expmjd,
variability_cache=None, redshift=None):
if redshift is None:
redshift_arr = self.column_by_name('redshift')
else:
redshift_arr = redshift
if len(params) == 0:
return np.array([[],[],[],[],[],[]])
if isinstance(expmjd, numbers.Number):
dMags = np.zeros((6, self.num_variable_obj(params)))
expmjd_arr = np.array([expmjd])
else:
dMags = np.zeros((6, self.num_variable_obj(params), len(expmjd)))
expmjd_arr = expmjd
seed_arr = params['seed']
t_delay_arr = params['t0Delay'].astype(float)
tau_arr = params['agn_tau'].astype(float)
sfu_arr = params['agn_sfu'].astype(float)
sfg_arr = params['agn_sfg'].astype(float)
sfr_arr = params['agn_sfr'].astype(float)
sfi_arr = params['agn_sfi'].astype(float)
sfz_arr = params['agn_sfz'].astype(float)
sfy_arr = params['agn_sfy'].astype(float)
start_date = 58580.0
duration_observer_frame = expmjd_arr.max() - start_date
if duration_observer_frame < 0 or expmjd_arr.min() < start_date:
raise RuntimeError("WARNING: Time offset greater than minimum epoch. " +
"Not applying variability. "+
"expmjd: %e should be > start_date: %e " % (expmjd.min(), start_date) +
"in applyAgn variability method")
for i_obj in valid_dexes[0]:
seed = seed_arr[i_obj]
tau = tau_arr[i_obj]
time_dilation = 1.0+redshift_arr[i_obj]
t_delay = t_delay_arr[i_obj]
sfint = {}
sfint['u'] = sfu_arr[i_obj]
sfint['g'] = sfg_arr[i_obj]
sfint['r'] = sfr_arr[i_obj]
sfint['i'] = sfi_arr[i_obj]
sfint['z'] = sfz_arr[i_obj]
sfint['y'] = sfy_arr[i_obj]
rng = np.random.RandomState(seed)
dt = tau/100.
duration_rest_frame = duration_observer_frame/time_dilation
nbins = int(math.ceil(duration_rest_frame/dt))+1
time_dexes = np.round((expmjd_arr-start_date-t_delay)/(time_dilation*dt)).astype(int)
assert time_dexes.min() >= 0
time_dex_map = {}
ct_dex = 0
for i_t_dex, t_dex in enumerate(time_dexes):
if t_dex in time_dex_map:
time_dex_map[t_dex].append(i_t_dex)
else:
time_dex_map[t_dex] = [i_t_dex]
time_dexes = set(time_dexes)
dx2 = 0.0
x1 = 0.0
x2 = 0.0
dt_over_tau = dt/tau
es = rng.normal(0., 1., nbins)*math.sqrt(dt_over_tau)
for i_time in range(nbins):
#The second term differs from Zeljko's equation by sqrt(2.)
#because he assumes stdev = sfint/sqrt(2)
dx1 = dx2
dx2 = -dx1*dt_over_tau + sfint['u']*es[i_time] + dx1
x1 = x2
x2 += dt
if i_time in time_dexes:
if isinstance(expmjd, numbers.Number):
dm_val = ((expmjd-start_date-t_delay)*(dx1-dx2)/time_dilation+dx2*x1-dx1*x2)/(x1-x2)
dMags[0][i_obj] = dm_val
else:
for i_time_out in time_dex_map[i_time]:
local_end = (expmjd_arr[i_time_out]-start_date-t_delay)/time_dilation
dm_val = (local_end*(dx1-dx2)+dx2*x1-dx1*x2)/(x1-x2)
dMags[0][i_obj][i_time_out] = dm_val
for i_filter, filter_name in enumerate(('g', 'r', 'i', 'z', 'y')):
for i_obj in valid_dexes[0]:
dMags[i_filter+1][i_obj] = dMags[0][i_obj]*params['agn_sf%s' % filter_name][i_obj]/params['agn_sfu'][i_obj]
return dMags
class VariabilityTwinkles(_VariabilityPointSources, TimeDelayVariability):
"""
This is a mixin which wraps the methods from the class Variability
into getters for InstanceCatalogs (specifically, InstanceCatalogs
with AGNs). Getters in this method should define columns named like
delta_columnName
where columnName is the name of the baseline (non-varying) magnitude
column to which delta_columnName will be added. The getters in the
photometry mixins will know to find these columns and add them to
columnName, provided that the columns here follow this naming convention.
Thus: merely including VariabilityTwinkles in the inheritance tree of
an InstanceCatalog daughter class will activate variability for any column
for which delta_columnName is defined.
"""
pass
| {
"repo_name": "LSSTDESC/Twinkles",
"path": "python/desc/twinkles/twinklesVariabilityMixins.py",
"copies": "2",
"size": "5426",
"license": "mit",
"hash": 1118112394027245000,
"line_mean": 39.1925925926,
"line_max": 123,
"alpha_frac": 0.5689273867,
"autogenerated": false,
"ratio": 3.4320050600885517,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5000932446788552,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pdb
from scipy.optimize import curve_fit
from scipy.optimize import minimize
plt.ioff()
font = {'weight' : 'bold',
'size' : 22}
matplotlib.rc('font', **font)
__all__ = ['linearFit','nonlinearFit','linearWithErrors',
'nonlinearWithErrorsFromFile','massAvSize',
'getSizesFromFile']
def linearFit(t,mu2):
"""Perform a linear fit to the mass-averaged cluster size.
Parameters
----------
t: numpy vector (length T)
array containing timesteps
mu2: numpy vector (length T)
containing mass-averaged cluster size mu2(t)
Returns
-------
tc: float
coagulation time
R2: float
R^2 value = 1 - SSres/SStot
Notes
-----
Fits mu2(t) = mu2(0) + K * t
where K = 2/tc
SSres is returned from linalg.lstsq
SStot = sum_i(y_i - <y_i>)^2 where y_i is the ith data point
"""
K = np.linalg.lstsq(np.reshape(t,[len(t),1]),
np.reshape(mu2 - mu2[0],[len(mu2),1]))
tc = 2/K[0][0][0]
ssres = K[1][0]
mu2av = np.mean(mu2)
sstot = ((mu2 - mu2av)*(mu2-mu2av)).sum()
R2 = 1 - ssres/sstot
return (tc,R2)
def minSSE(t,mu2s,tol=1e-4):
""" Helper function that finds the tc that minimizes SSE for a fit for a
set of runs
Parameters
----------
t: numpy vector (length T)
array containing timesteps
mu2s: numpy array [T x runs]
array containing mass-averaged cluster size values for a run
tol: float
tolerance for minimization
Returns
-------
tc: float
the optimal tc
etc: float
tolerance of fit (error in tc)
optsse: float
total best SSE for tc
lmbda: float
the optimal lambda
elmbda: float
error in lmbda as the standard deviation of the lmbdas for different
runs
"""
sz = np.shape(mu2s)
def sse(tc):
""" returns the sum-square-error of the best for lambda to Smol"""
sse = 0
def f(t,lmbda):
y = mu2s[0,run]*(1 + 2*t/tc)**(1/(1-lmbda))
return y
for run in range(sz[1]):
#pdb.set_trace()
popt,pcov = curve_fit(f,t,mu2s[:,run],
bounds=([-np.inf,2]),
p0=np.array([-1.]))
bestlambda = popt[0]
ssecurr = (mu2s[:,run] - f(t,bestlambda))\
*(mu2s[:,run] - f(t,bestlambda))
sse += ssecurr.sum()
return sse
#pdb.set_trace()
runresult = minimize(sse,10,tol=1e-4,bounds=[(tol,np.inf)])
opttc = runresult.x
optsse = runresult.fun
lmbdas = np.zeros(sz[1])
for run in range(sz[1]):
def newf(t,lmbda):
"""Smoluchowski style fit with set tc"""
y = mu2s[0,run]*(1 + 2*t/opttc)**(1/(1-lmbda))
return y
#pdb.set_trace()
popt,pcov = curve_fit(newf,t,mu2s[:,run],bounds=([-np.inf,2]),
p0=np.array([-1.]))
lmbda = popt[0]
lmbdas[run] = lmbda
return (opttc,tol,optsse,np.mean(lmbdas),np.std(lmbdas))
def nonlinearFit(t,mu2s,plotstats=None,tol=1e-4):
""" Perform a nonlinear fit to the mass-averaged cluster size.
Parameters
----------
t: numpy vector (length T)
array containing timesteps
mu2s: numpy array [T x runs]
array containing the mass-averaged cluster size at each time step
for a number of independent runs
plotstats: array of data for plotting
None or [plotname,xlabel,ylabel,[marker1,...,markerN]]
if not none, save a plot of the data with the avg fit and std errors
tol: float
tolerance for minimization
Returns
-------
tc: float
coagulation time
etc: float
error in the coagulation time
sse: float
minimum SSE from searching for best tc
lmbda: float
Smoluchowski exponent
elmbda: float
error in the Smoluchowski exponent
Notes
-----
Performs a fit to mu2(t) = mu2(0)*(1+2t/tc)^(1/(1-lambda))
Uses scipy.optimize.curve_fit with sigmas in the y data taken from the
uncertainties computed from the different runs, unless there is only
one run
Note that by definition, the Smoluchowski exponent must be <=2
and the coagulation time must be >=0
"""
sz = np.shape(mu2s)
if plotstats is not None:
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(sz[1]):
#pdb.set_trace()
runl, = ax.plot(t,mu2s[:,i],plotstats[3][i]
,fillstyle='none')
ax.plot(t,np.mean(mu2s,axis=1),linewidth=2)
(tc,etc,sse,lmbda,elmbda) = minSSE(t,mu2s,tol)
def f(t,lmbda):
y = np.mean(mu2s[0,:])*(1 + 2*t/tc)**(1/(1-lmbda))
return y
if plotstats is not None:
p1 = f(t,lmbda-elmbda)
p2 = f(t,lmbda+elmbda)
mincurve = np.min(np.array([p1,p2]),axis=0)
maxcurve = np.max(np.array([p1,p2]),axis=0)
ax.plot(t,f(t,lmbda),'--',linewidth=2,color="black")
ax.fill_between(t,mincurve,maxcurve,facecolor='black',alpha=0.3)
plt.xlabel(plotstats[1])
plt.ylabel(plotstats[2])
ax.grid('on')
fig.savefig(plotstats[0],bbox_inches='tight')
plt.close()
return (tc,etc,sse,lmbda,elmbda)
def nonlinearWithErrorsFromFile(fnames,T,dt=1.0,plotstats=None,tstart=0,tend=-1):
""" Perform a nonlinear fit to a number of different independent sizes
and find the spread in the fit
Parameters
----------
fnames: list of strings
all the filenames containing the sizes
T: int
number of timesteps
tstart: int
timestep to start on, defaults to 0
tend: int
timestep to end on, if it's not -1
dt: float
how large a timestep is
fullreturn: bool
whether to return the full set of mu2s
plotstats: array of data for plotting
None or [plotname,xlabel,ylabel,[marker1,...,markerN]]
if not none, save a plot of the data with the avg fit and std errors
Returns
-------
tc: float
average coagulation time
etc: float
error in the average coagulation time
lmbda: float
average Smoluchowski exponent
elmbda: float
error in Smoluchowski exponent
"""
if tend == -1:
tend = T + tstart
mu2s = np.zeros([tend-tstart,len(fnames)])
f = 0
for fname in fnames:
csizes = getSizesFromFile([fname],T)[tstart:tend,:]
mu2 = [massAvSize(csize) for csize in csizes]
mu2s[:,f] = mu2
f+=1
(tc,etc,sse,lmbda,elmbda) = nonlinearFit(dt*np.arange(tend-tstart),
mu2s,plotstats=plotstats)
return (tc,etc,sse,lmbda,elmbda)
def linearWithErrors(fnames,T,dt=1.0,fullreturn=False,plotstats=None,
tstart=0,tend=-1):
""" Perform a linear fit to a number of different independent sizes
and find the spread in the fit
Parameters
----------
fnames: list of strings
all the filenames containing the sizes
T: int
number of timesteps in the original file
tstart: int
timestep to start on
tend: int
timestep to end on, if it's not -1
dt: float
how large a timestep is
fullreturn: bool
whether to return the full set of mu2s
plotstats: array of data for plotting
None or [plotname,xlabel,ylabel,[marker1,...,markerN]]
if not none, save a plot of the data with the avg fit and std errors
Returns
-------
tc: float
average coagulation time
etc: float
error in the average coagulation time
"""
if tend == -1:
tend = T+tstart
tcs = np.zeros(len(fnames))
i = 0
if plotstats is not None:
fig = plt.figure(1)
if fullreturn:
mu2s = np.zeros([tend-tstart,len(fnames)+1])
for fname in fnames:
csizes = getSizesFromFile([fname],T)[tstart:tend,:]
mu2 = [massAvSize(csize) for csize in csizes]
(tc,R2) = linearFit(dt * np.arange(tend-tstart),mu2)
if plotstats is not None:
ax = fig.add_subplot(111)
#pdb.set_trace()
runl, = ax.plot(dt * np.arange(tend-tstart),mu2,
plotstats[3][i],fillstyle='none')
runl.set_label('$R^2$ = {0}'.format(round(R2,2)))
#ax.legend()
if fullreturn:
mu2s[:,i] = mu2
tcs[i] = tc
i+=1
csizes = getSizesFromFile(fnames,T)[tstart:tend,:]
mu2 = [massAvSize(csize) for csize in csizes]
(tc,R2) = linearFit(dt * np.arange(tend-tstart),mu2)
if fullreturn:
mu2s[:,len(fnames)] = mu2
if plotstats is not None:
ax.plot(dt * np.arange(0,tend-tstart,0.1),
mu2[0] + (2/tc)*dt*np.arange(0,tend-tstart,0.1),
linestyle='--',linewidth=2,color='black')
#plt.tight_layout()
Ks = 2/tcs
sigma = np.std(Ks)
K = 2/tc
ax.fill_between(dt*np.arange(0,tend-tstart,0.1),mu2[0] \
+ (K-sigma)*dt*np.arange(0,tend-tstart,0.1),
mu2[0]+(K+sigma)*dt*np.arange(0,tend-tstart,0.1),
facecolor='black',alpha=0.3)
plt.xlabel(plotstats[1])
plt.ylabel(plotstats[2])
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc='upper center',
bbox_to_anchor=(0.5,-0.2))
ax.grid('on')
fig.savefig(plotstats[0], bbox_extra_artists=(lgd,),
bbox_inches='tight')
plt.close()
if fullreturn:
if plotstats is not None:
f = 0
for fname in fnames:
mu2curr = mu2s[:,f]
plt.figure()
plt.plot(dt*np.arange(0,tend-tstart,0.1),
mu2curr[0]+(2/tcs[f])*dt*np.arange(0,tend-tstart,0.1),
linestyle='--',linewidth=2,color='black')
plt.plot(dt*np.arange(0,tend-tstart),mu2curr,linewidth=2)
plt.plot(dt*np.arange(0,tend-tstart),mu2curr,'o')
plt.xlabel(plotstats[1])
plt.ylabel(plotstats[2])
plt.title('run '+str(f))
plt.savefig(plotstats[0]+str(f))
plt.close()
f+=1
return (tc,np.std(tcs),mu2s)
else:
return (tc,np.std(tcs))
def massAvSize(csizes):
"""
Given a cluster sizes list, returns the mass averaged cluster size
of the snapshot
Parameters
----------
csizes: numpy array as returned by idsToSizes
Returns
-------
mu2: float, the mass-averaged cluster size
"""
umass,counts = np.unique(csizes,return_counts=True)
mu2 = (umass*counts).sum() / (counts).sum()
return mu2
def getSizesFromFile(fnames,T):
""" create an array from a file or files of csizes data
Parameters
----------
fnames: list
can be any number of files containing csizes data at a list of
timesteps, but all files must have the same number of rows & columns
T: total number of timesteps
Returns
-------
csizes: numpy array [T x M]
an array where each number represents another molecule in a cluster
of the given size
"""
fname1 = fnames[0]
fid1 = open(fname1)
line1 = fid1.readline()
M = len(line1.split())
fid1.close()
csizes = np.zeros([T,M * len(fnames)])
f = 0
for fname in fnames:
fid = open(fname)
flines = fid.readlines()
fid.close()
for t in range(T):
fline = flines[t]
spline = [float(m) for m in fline.split()]
try:
csizes[t,(f*M):(f*M+M)] = spline
except:
print("fug")
f += 1
return csizes
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/smoluchowski.py",
"copies": "1",
"size": "12388",
"license": "mit",
"hash": -8126741620238998000,
"line_mean": 31.0932642487,
"line_max": 81,
"alpha_frac": 0.5505327737,
"autogenerated": false,
"ratio": 3.4748948106591864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.94108942364405,
"avg_score": 0.022906669583737205,
"num_lines": 386
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd # new
import sys # instead of os
import scipy.stats
from scipy.stats import gamma
import os
import scipy.stats as stats
# Relative path to subject 1 data
project_path = "../../"
path_to_data = project_path+"data/ds009/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
final_data = "../data/"
smooth_data = final_data + 'smooth/'
hrf_data = final_data + 'hrf/'
#sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
sys.path.append(location_of_functions)
sub_list = os.listdir(path_to_data)[1:]
from glm import glm_multiple, glm_diagnostics
# iv. import image viewing tool
from Image_Visualizing import present_3d
from noise_correction import mean_underlying_noise, fourier_predict_underlying_noise,fourier_creation
from hypothesis import t_stat_mult_regression
# Progress bar
toolbar_width=len(sub_list)
sys.stdout.write("GLM, : ")
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1)) # return to start of line, after '['
for i in sub_list:
img = nib.load(smooth_data+ i +"_bold_smoothed.nii")
data = img.get_data()
n_vols = data.shape[-1]
convolve = np.loadtxt(hrf_data+i+"_hrf.txt")
residual_final = np.zeros((data.shape))
t_final = np.zeros((data.shape[:-1]))
for j in range(data.shape[2]):
data_slice = data[:,:,j,:]
X = np.ones((n_vols,6))
X[:,1] = convolve[:,j]
X[:,2]=np.linspace(-1,1,num=X.shape[0]) #drift
X[:,3:]=fourier_creation(X.shape[0],3)[:,1:]
beta,t,df,p = t_stat_mult_regression(data_slice, X)
t = t[1,:]
MRSS, fitted, residuals = glm_diagnostics(beta, X, data_slice)
t_final[:,:,j] = t.reshape(data_slice.shape[:-1])
residual_final[:,:,j,:] = residuals.reshape(data_slice.shape)
np.save("../data/glm/t_stat/"+i+"_tstat.npy", t_final)
np.save("../data/glm/residual/"+i+"_residual.npy", residual_final)
sys.stdout.write("-")
sys.stdout.flush()
sys.stdout.write("\n")
| {
"repo_name": "reychil/project-alpha-1",
"path": "final/scripts/glm_final.py",
"copies": "1",
"size": "2456",
"license": "bsd-3-clause",
"hash": 1985297413861462800,
"line_mean": 27.9058823529,
"line_max": 101,
"alpha_frac": 0.6034201954,
"autogenerated": false,
"ratio": 3.218872870249017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9060343004984484,
"avg_score": 0.05239001213290662,
"num_lines": 85
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.linalg as npl
def glm(data_4d, conv):
"""
Return a tuple of the estimated coefficients in 4 dimensions and
the design matrix.
Parameters
----------
data_4d: numpy array of 4 dimensions
The image data of one subject
conv: numpy array of 1 dimension
The convolved time course
Note that the fourth dimension of `data_4d` (time or the number
of volumes) must be the same as the length of `convolved`.
Returns
-------
glm_results : tuple
Estimated coefficients in 4 dimensions and the design matrix.
"""
assert(len(conv) == data_4d.shape[-1])
X = np.ones((len(conv), 2))
X[:, 1] = conv
data_2d = np.reshape(data_4d, (-1, data_4d.shape[-1]))
B = npl.pinv(X).dot(data_2d.T)
B_4d = np.reshape(B.T, data_4d.shape[:-1] + (-1,))
return B_4d, X
def glm_diagnostics(B_4d, design, data_4d):
"""
Return a tuple of the MRSS in 3 dimensions, fitted values in 4
dimensions, and residuals in 4 dimensions.
Parameters
----------
B_4d: numpy array of 4 dimensions
The estimated coefficients
design: numpy array
The design matrix used to get the estimated coefficients
data_4d: numpy array of 4 dimensions
The corresponding image data
Returns
-------
diagnostics : tuple
MRSS (3d), fitted values (4d), and residuals (4d).
"""
B_2d = np.reshape(B_4d, (-1, B_4d.shape[-1])).T
data_2d = np.reshape(data_4d, (-1, data_4d.shape[-1]))
fitted = design.dot(B_2d)
residuals = data_2d.T - fitted
df = design.shape[0] - npl.matrix_rank(design)
MRSS = (residuals**2).sum(0)/df
MRSS_3d = np.reshape(MRSS.T, data_4d.shape[:-1])
fitted_4d = np.reshape(fitted.T, data_4d.shape)
residuals_4d = np.reshape(residuals.T, data_4d.shape)
return MRSS_3d, fitted_4d, residuals_4d
# new multiple regression function (takes in slightly different things)
def glm_multiple(data_4d, X):
"""
Return a tuple of the estimated coefficients in 4 dimensions and
the design matrix.
Parameters
----------
data_4d: numpy array of 4 dimensions
The image data of one subject
X: numpy array of 2 dimensions
model matrix of the form ([1, x_1, x_2,...])
Note that the fourth dimension of `data_4d` (time or the number
of volumes) must be the same as the number of rows of `X`.
Returns
-------
glm_results : tuple
Estimated coefficients in 4 dimensions and the design matrix (same as put in).
"""
assert(X.shape[0] == data_4d.shape[-1])
data_2d = np.reshape(data_4d, (-1, data_4d.shape[-1]))
B = npl.pinv(X).dot(data_2d.T)
B_4d = np.reshape(B.T, data_4d.shape[:-1] + (-1,))
return B_4d, X
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/glm.py",
"copies": "1",
"size": "2915",
"license": "bsd-3-clause",
"hash": -2097842571841612800,
"line_mean": 29.6842105263,
"line_max": 86,
"alpha_frac": 0.6113207547,
"autogenerated": false,
"ratio": 3.327625570776256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9317192136956949,
"avg_score": 0.02435083770386127,
"num_lines": 95
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.random
from nose.tools import assert_equal
import skimage.draw as skd
from scipy.ndimage.morphology import binary_dilation
import skbeam.core.image as nimage
def test_find_ring_center_acorr_1D():
for x in [110, 150, 190]:
for y in [110, 150, 190]:
yield (_helper_find_rings,
nimage.find_ring_center_acorr_1D,
(x, y), [10, 25, 50])
def _helper_find_rings(proc_method, center, radii_list):
x, y = center
image_size = (256, 265)
numpy.random.seed(42)
noise = np.random.rand(*image_size)
tt = np.zeros(image_size)
for r in radii_list:
rr, cc = skd.circle_perimeter(x, y, r)
tt[rr, cc] = 1
tt = binary_dilation(tt, structure=np.ones((3, 3))).astype(float) * 100
tt = tt + noise
res = proc_method(tt)
assert_equal(res, center)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| {
"repo_name": "licode/scikit-xray",
"path": "skbeam/core/tests/test_image.py",
"copies": "4",
"size": "1049",
"license": "bsd-3-clause",
"hash": -8291035473804707000,
"line_mean": 27.3513513514,
"line_max": 75,
"alpha_frac": 0.6177311725,
"autogenerated": false,
"ratio": 3.085294117647059,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5703025290147059,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import logging
def _read_amira(src_file):
"""
Reads all information contained within standard AmiraMesh data sets.
Separate the header information from the image/volume, data.
Parameters
----------
src_file : str
The path and file name pointing to the AmiraMesh file to be loaded.
Returns
-------
am_header : list of strings
This list contains all of the raw information contained in the
AmiraMesh file header. Contains all of the raw header information
am_data : str
A compiled string containing all of the image array data, that was
stored in the source AmiraMesh data file. Contains the raw image data
"""
am_header = []
am_data = []
with open(os.path.normpath(src_file), 'r') as input_file:
while True:
line = input_file.readline()
am_header.append(line)
if (line == '# Data section follows\n'):
input_file.readline()
break
am_data = input_file.read()
return am_header, am_data
def _amira_data_to_numpy(am_data, header_dict, flip_z=True):
"""
Transform output of `_read_amira` to a numpy array of the dtype listed in
the AmiraMesh header dictionary. The standard format for Avizo Binary
files is IEEE binary. Big or little endian-ness is stipulated in the header
information, and is be assessed and taken into account by this function as
well, during the conversion process.
Parameters
----------
am_data : str
String object containing all of the image array data, formatted as IEEE
binary. Current dType options include:
float
short
ushort
byte
header_dict : dict
Metadata dictionary containing all relevant attributes pertaining to
the image array. This metadata dictionary is the output from the
function `_create_md_dict`.
flip_z : bool, optional.
Defaults to True
This option is included because the .am data sets evaluated thus far
have opposite z-axis indexing than numpy arrays. This switch currently
defaults to "True" in order to ensure that z-axis indexing remains
consistent with data processed using Avizo.
Setting this switch to "True" will flip the z-axis during processing,
and a value of "False" will keep the array is initially assigned during
the array reshaping step.
Returns
-------
output : ndarray
Numpy ndarray containing the image data converted from the AmiraMesh
file. This data array is ready for further processing using the NSLS-II
function library, or other operations able to operate on numpy arrays.
"""
Zdim = header_dict['array_dimensions']['z_dimension']
Ydim = header_dict['array_dimensions']['y_dimension']
Xdim = header_dict['array_dimensions']['x_dimension']
# Strip out null characters from the string of binary values
# Dictionary of the encoding types for AmiraMesh files
am_format_dict = {'BINARY-LITTLE-ENDIAN': '<',
'BINARY': '>',
'ASCII': 'unknown'}
# Dictionary of the data types encountered so far in AmiraMesh files
am_dtype_dict = {'float': 'f4',
'short': 'h4',
'ushort': 'H4',
'byte': 'b'}
# Had to split out the stripping of new line characters and conversion
# of the original string data based on whether source data is BINARY
# format or ASCII format. These format types require different stripping
# tools and different string conversion tools.
if header_dict['data_format'] == 'BINARY-LITTLE-ENDIAN':
data_strip = am_data.strip('\n')
flt_values = np.fromstring(
data_strip, (am_format_dict[header_dict['data_format']] +
am_dtype_dict[header_dict['data_type']]))
if header_dict['data_format'] == 'ASCII':
data_strip = am_data.translate(None, '\n')
string_list = data_strip.split(" ")
string_list = string_list[0:(len(string_list)-2)]
flt_values = np.array(
string_list).astype(am_dtype_dict[header_dict['data_type']])
# Resize the 1D array to the correct ndarray dimensions
# Note that resize is in-place whereas reshape is not
flt_values.resize(Zdim, Ydim, Xdim)
output = flt_values
if flip_z:
output = flt_values[::-1, ..., ...]
return output
def _clean_amira_header(header_list):
"""
Strip the string list of all "empty" characters,including new line
characters ('\n') and empty lines. Splits each header line (which
originally is stored as a single string) into individual words, numbers or
characters, using spaces between words as the separating operator. The
output of this function is used to generate the metadata dictionary for
the image data set.
Parameters
----------
header_list : list of strings
This is the header output from the function _read_amira()
Returns
-------
clean_header : list of strings
This header list has been stripped and sorted and is now ready for
populating the metadata dictionary for the image data set.
"""
clean_header = []
for row in header_list:
split_header = filter(None, [word.translate(None, ',"')
for word in row.strip('\n').split()])
clean_header.append(split_header)
return clean_header
def _create_md_dict(clean_header):
"""
Populates the a dictionary with all information pertinent to the image
data set that was originally stored in the AmiraMesh file.
Parameters
----------
clean_header : list of strings
This is the output from the _sort_amira_header function.
"""
# Avizo specific metadata
md_dict = {'software_src': clean_header[0][1],
'data_format': clean_header[0][2],
'data_format_version': clean_header[0][3]}
if md_dict['data_format'] == '3D':
md_dict['data_format'] = clean_header[0][3]
md_dict['data_format_version'] = clean_header[0][4]
for header_line in clean_header:
hl = header_line
if 'define' in hl:
hl = hl
md_dict['array_dimensions'] = {
'x_dimension': int(hl[hl.index('define') + 2]),
'y_dimension': int(hl[hl.index('define') + 3]),
'z_dimension': int(hl[hl.index('define') + 4])}
elif 'Content' in hl:
md_dict['data_type'] = hl[hl.index('Content') + 2]
elif 'CoordType' in hl:
md_dict['coord_type'] = hl[hl.index('CoordType') + 1]
elif 'BoundingBox' in hl:
hl = hl
md_dict['bounding_box'] = {
'x_min': float(hl[hl.index('BoundingBox') + 1]),
'x_max': float(hl[hl.index('BoundingBox') + 2]),
'y_min': float(hl[hl.index('BoundingBox') + 3]),
'y_max': float(hl[hl.index('BoundingBox') + 4]),
'z_min': float(hl[hl.index('BoundingBox') + 5]),
'z_max': float(hl[hl.index('BoundingBox') + 6])}
# Parameter definition for voxel resolution calculations
bbox = [md_dict['bounding_box']['x_min'],
md_dict['bounding_box']['x_max'],
md_dict['bounding_box']['y_min'],
md_dict['bounding_box']['y_max'],
md_dict['bounding_box']['z_min'],
md_dict['bounding_box']['z_max']]
dims = [md_dict['array_dimensions']['x_dimension'],
md_dict['array_dimensions']['y_dimension'],
md_dict['array_dimensions']['z_dimension']]
# Voxel resolution calculation
resolution_list = []
for index in np.arange(len(dims)):
if dims[index] > 1:
resolution_list.append(
(bbox[(2*index+1)] - bbox[(2*index)]) /
(dims[index] - 1))
else:
resolution_list.append(0)
# isotropy determination (isotropic res, or anisotropic res)
if (resolution_list[1]/resolution_list[0] > 0.99 and
resolution_list[2]/resolution_list[0] > 0.99 and
resolution_list[1]/resolution_list[0] < 1.01 and
resolution_list[2]/resolution_list[0] < 1.01):
md_dict['resolution'] = {'zyx_value': resolution_list[0],
'type': 'isotropic'}
else:
md_dict['resolution'] = {
'zyx_value': (resolution_list[2],
resolution_list[1],
resolution_list[0]),
'type': 'anisotropic'}
elif 'Units' in hl:
try:
units = str(hl[hl.index('Units') + 2])
md_dict['units'] = units
except:
logging.debug('Units value undefined in source data set. '
'Reverting to default units value of pixels')
md_dict['units'] = 'pixels'
elif 'Coordinates' in hl:
coords = str(hl[hl.index('Coordinates') + 1])
md_dict['coordinates'] = coords
return md_dict
def load_amiramesh(file_path):
"""
Load and convert an AmiraMesh binary file to a numpy array.
Parameters
----------
file_path : str
The path and file name of the AmiraMesh file to be loaded.
Returns
-------
md_dict : dict
Dictionary containing all pertinent header information associated with
the data set.
np_array : ndarray
An ndarray containing the image data set to be loaded. Values contained
in the resulting volume are set to be of float data type by default.
"""
header, data = _read_amira(file_path)
clean_header = _clean_amira_header(header)
md_dict = _create_md_dict(clean_header)
np_array = _amira_data_to_numpy(data, md_dict)
return md_dict, np_array
| {
"repo_name": "CJ-Wright/scikit-beam",
"path": "skbeam/io/avizo_io.py",
"copies": "7",
"size": "10337",
"license": "bsd-3-clause",
"hash": 2091914963800492000,
"line_mean": 39.537254902,
"line_max": 79,
"alpha_frac": 0.5803424591,
"autogenerated": false,
"ratio": 4.149739060618225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00019607843137254904,
"num_lines": 255
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from lsst.sims.catalogs.db import CatalogDBObject
from lsst.sims.catUtils.utils import ObservationMetaDataGenerator
from lsst.sims.catUtils.exampleCatalogDefinitions import DefaultPhoSimHeaderMap
from lsst.sims.photUtils import BandpassDict, SedList
from lsst.sims.photUtils.SignalToNoise import calcSNR_m5
from lsst.sims.photUtils.PhotometricParameters import PhotometricParameters
from desc.monitor.truthCatalogDefs import TruthCatalogPoint
__all__ = ["StarCacheDBObj", "TrueStars"]
class StarCacheDBObj(CatalogDBObject):
"""
CatalogDBObject for the stars in the simulation "truth" database.
"""
tableid = 'star_cache_table'
host = None
port = None
driver = 'sqlite'
objectTypeId = 4
idColKey = 'simobjid'
raColName = 'ra'
decColName = 'decl'
columns = [('id', 'simobjid', int),
('raJ2000', 'ra*PI()/180.'),
('decJ2000', 'decl*PI()/180.'),
('glon', 'gal_l*PI()/180.'),
('glat', 'gal_b*PI()/180.'),
('properMotionRa', '(mura/(1000.*3600.))*PI()/180.'),
('properMotionDec', '(mudecl/(1000.*3600.))*PI()/180.'),
('parallax', 'parallax*PI()/648000000.'),
('galacticAv', 'CONVERT(float, ebv*3.1)'),
('radialVelocity', 'vrad'),
('variabilityParameters', 'varParamStr', str, 256),
('sedFilename', 'sedfilename', str, 256)]
class TrueStars(object):
"""
Gets the stars out of the simulation "truth" database for a specified set
of visits.
"True" in this case refers to the values that come from our LSST
CATSIM database. This CATSIM database stores the LSST simulated universe
model that we use to provide inputs to LSST simulations. It is important
to note that this means that "true" does not refer to actual stars in
sky, but to the known inputs to our simulations. More information
on the LSST Simulations can be found here: bit.ly/lsst-sims-doc.
Note : RA, DEC values in "truth" catalogs are J2000 coordinates. Flux
values in final output here are in nanomaggies.
Parameters
----------
dbConn : dbInterface instance
This is a connection to a cached database of the simulation inputs.
opsimDB_filename : str
The location of the opsim database used with the simulation.
"""
def __init__(self, dbConn, opsimDB_filename):
self.dbConn = dbConn
self.opsimDB = opsimDB_filename
# Set up OpSim database (from Twinkles/bin/generatePhosimInput.py)
self.obs_gen = ObservationMetaDataGenerator(database=self.opsimDB,
driver='sqlite')
def get_true_stars(self, for_obsHistIds=None, catalog_constraints=None):
"""
Get all the fluxes for stars in all visits specified.
Parameters
----------
for_obsHistIds : int or list of ints or None, default=None
Can specify a subset of visits. If set to None will get it from the
file in the repo located at monitor/data/selectedVisits.csv
catalog_constraints : str or None, default=None
Specify SQL constraints on the sims catalog used as
the "truth" input.
Returns
----------
star_df : pandas dataframe
Stores all the star information for the simulation inputs across
the desired visits
"""
if for_obsHistIds is None:
survey_info = np.genfromtxt('../data/selectedVisits.csv',
names=True, delimiter=',')
for_obsHistIds = survey_info['obsHistID']
else:
for_obsHistIds = np.ravel(for_obsHistIds)
obs_metadata_list = []
visit_on = 0
for obsHistID in for_obsHistIds:
if visit_on % 100 == 0:
print("Generated %i out of %i obs_metadata" %
(visit_on+1, len(for_obsHistIds)))
visit_on += 1
obs_metadata_list.append(self.obs_gen.getObservationMetaData(
obsHistID=obsHistID,
fieldRA=(53, 54),
fieldDec=(-29, -27),
boundLength=0.3)[0])
star_df = pd.DataFrame(columns=['uniqueId', 'ra', 'dec', 'filter',
'true_flux', 'true_flux_error',
'obsHistId'])
bp_dict = BandpassDict.loadTotalBandpassesFromFiles()
bp_indices = {}
for bp in list(enumerate(bp_dict.keys())):
bp_indices[bp[1]] = bp[0]
column_names = None
seds_loaded = False
visit_on = 0
for obs_metadata in obs_metadata_list:
if visit_on % 100 == 0:
print("Generated fluxes for %i out of %i visits" %
(visit_on+1, len(for_obsHistIds)))
visit_on += 1
star_cat = TruthCatalogPoint(self.dbConn,
obs_metadata=obs_metadata,
constraint=catalog_constraints)
if column_names is None:
column_names = [x for x in star_cat.iter_column_names()]
star_cat.phoSimHeaderMap = DefaultPhoSimHeaderMap
chunk_data = []
for line in star_cat.iter_catalog():
chunk_data.append(line)
chunk_data = pd.DataFrame(chunk_data, columns=column_names)
# All SEDs will be the same since we are looking at the same point
# in the sky and mag_norms will be the same for stars.
if seds_loaded is False:
sed_list = SedList(chunk_data['sedFilepath'],
chunk_data['phoSimMagNorm'],
specMap=None,
galacticAvList=chunk_data['galacticAv'])
seds_loaded = True
mag_array = bp_dict.magArrayForSedList(sed_list)
flux_array = bp_dict.fluxArrayForSedList(sed_list)
phot_params = PhotometricParameters()
visit_filter = obs_metadata.OpsimMetaData['filter']
# Get flux and convert to nanomaggies
flux_array_visit = flux_array[visit_filter]/3.631e-06
five_sigma_depth = obs_metadata.OpsimMetaData['fiveSigmaDepth']
snr, gamma = calcSNR_m5(mag_array[visit_filter],
bp_dict[visit_filter],
five_sigma_depth,
phot_params)
flux_error = flux_array_visit/snr
obs_hist_id = obs_metadata.OpsimMetaData['obsHistID']
visit_df = pd.DataFrame(np.array([chunk_data['uniqueId'],
chunk_data['raJ2000'],
chunk_data['decJ2000'],
[visit_filter]*len(chunk_data),
flux_array_visit, flux_error,
[obs_hist_id]*len(chunk_data)]).T,
columns=['uniqueId', 'ra', 'dec', 'filter',
'true_flux', 'true_flux_error',
'obsHistId'])
star_df = star_df.append(visit_df, ignore_index=True)
star_df['uniqueId'] = pd.to_numeric(star_df['uniqueId'])
star_df['ra'] = pd.to_numeric(star_df['ra'])
star_df['dec'] = pd.to_numeric(star_df['dec'])
star_df['true_flux'] = pd.to_numeric(star_df['true_flux'])
t_f_error = 'true_flux_error'
star_df[t_f_error] = pd.to_numeric(star_df[t_f_error])
star_df['obsHistId'] = pd.to_numeric(star_df['obsHistId'])
return star_df
def write_to_db(self, star_df, filename, table_name='stars', **kwargs):
"""
Write self.star_df to a sqlite database.
Parameters
----------
star_df : pandas dataframe
Stores all the star information for the simulation inputs across
the desired visits
filename : str
File name to use for the sqlite database.
table_name : str, default='stars'
Table name within the sqlite database for the star_df info.
**kwargs
Keyword arguments for the pandas `to_sql` function.
"""
disk_engine = create_engine('sqlite:///%s' % filename)
star_df.to_sql('stars', disk_engine, **kwargs)
| {
"repo_name": "DarkEnergyScienceCollaboration/Monitor",
"path": "python/desc/monitor/createTruthDB.py",
"copies": "2",
"size": "9341",
"license": "bsd-3-clause",
"hash": 5937648328266738000,
"line_mean": 44.5658536585,
"line_max": 160,
"alpha_frac": 0.5200727973,
"autogenerated": false,
"ratio": 4.302625518194381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00041906348137651846,
"num_lines": 205
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import scipy.optimize as opt
from scipy.special import erf
from .due import due, Doi
__all__ = ["Model", "Fit", "opt_err_func", "transform_data", "cumgauss"]
# Use duecredit (duecredit.org) to provide a citation to relevant work to
# be cited. This does nothing, unless the user has duecredit installed,
# And calls this with duecredit (as in `python -m duecredit script.py`):
due.cite(Doi("10.1167/13.9.30"),
description="Template project for small scientific Python projects",
tags=["reference-implementation"],
path='bha')
def transform_data(data):
"""
Function that takes experimental data and gives us the
dependent/independent variables for analysis
Parameters
----------
data : Pandas DataFrame or string.
If this is a DataFrame, it should have the columns `contrast1` and
`answer` from which the dependent and independent variables will be
extracted. If this is a string, it should be the full path to a csv
file that contains data that can be read into a DataFrame with this
specification.
Returns
-------
x : array
The unique contrast differences.
y : array
The proportion of '2' answers in each contrast difference
n : array
The number of trials in each x,y condition
"""
if isinstance(data, str):
data = pd.read_csv(data)
contrast1 = data['contrast1']
answers = data['answer']
x = np.unique(contrast1)
y = []
n = []
for c in x:
idx = np.where(contrast1 == c)
n.append(float(len(idx[0])))
answer1 = len(np.where(answers[idx[0]] == 1)[0])
y.append(answer1 / n[-1])
return x, y, n
def cumgauss(x, mu, sigma):
"""
The cumulative Gaussian at x, for the distribution with mean mu and
standard deviation sigma.
Parameters
----------
x : float or array
The values of x over which to evaluate the cumulative Gaussian function
mu : float
The mean parameter. Determines the x value at which the y value is 0.5
sigma : float
The variance parameter. Determines the slope of the curve at the point
of Deflection
Returns
-------
g : float or array
The cumulative gaussian with mean $\\mu$ and variance $\\sigma$
evaluated at all points in `x`.
Notes
-----
Based on:
http://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution_function
The cumulative Gaussian function is defined as:
.. math::
\\Phi(x) = \\frac{1}{2} [1 + erf(\\frac{x}{\\sqrt{2}})]
Where, $erf$, the error function is defined as:
.. math::
erf(x) = \\frac{1}{\\sqrt{\\pi}} \int_{-x}^{x} e^{t^2} dt
"""
return 0.5 * (1 + erf((x - mu) / (np.sqrt(2) * sigma)))
def opt_err_func(params, x, y, func):
"""
Error function for fitting a function using non-linear optimization
Parameters
----------
params : tuple
A tuple with the parameters of `func` according to their order of
input
x : float array
An independent variable.
y : float array
The dependent variable.
func : function
A function with inputs: `(x, *params)`
Returns
-------
float array
The marginals of the fit to x/y given the params
"""
return y - func(x, *params)
class Model(object):
""" Class for fitting cumulative Gaussian functions to data"""
def __init__(self, func=cumgauss):
""" Initialize a model object
Parameters
----------
data : Pandas DataFrame
Data from a subjective contrast judgement experiment
func : callable, optional
A function that relates x and y through a set of parameters.
Default: :func:`cumgauss`
"""
self.func = func
def fit(self, x, y, initial=[0.5, 1]):
"""
Fit a Model to data
Parameters
----------
x : float or array
The independent variable: contrast values presented in the
experiment
y : float or array
The dependent variable
Returns
-------
fit : :class:`Fit` instance
A :class:`Fit` object that contains the parameters of the model.
"""
params, _ = opt.leastsq(opt_err_func, initial,
args=(x, y, self.func))
return Fit(self, params)
class Fit(object):
"""
Class for representing a fit of a model to data
"""
def __init__(self, model, params):
"""
Initialize a :class:`Fit` object
Parameters
----------
model : a :class:`Model` instance
An object representing the model used
params : array or list
The parameters of the model evaluated for the data
"""
self.model = model
self.params = params
def predict(self, x):
"""
Predict values of the dependent variable based on values of the
indpendent variable.
Parameters
----------
x : float or array
Values of the independent variable. Can be values presented in
the experiment. For out-of-sample prediction (e.g. in
cross-validation), these can be values
that were not presented in the experiment.
Returns
-------
y : float or array
Predicted values of the dependent variable, corresponding to
values of the independent variable.
"""
return self.model.func(x, *self.params)
| {
"repo_name": "emaudes/bha",
"path": "bha/bha.py",
"copies": "1",
"size": "5752",
"license": "mit",
"hash": -1817461935733462500,
"line_mean": 26.2606635071,
"line_max": 85,
"alpha_frac": 0.5895340751,
"autogenerated": false,
"ratio": 4.334589299171062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5424123374271062,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
def read_process(filname, sep="\t"):
col_names = ["user", "item", "rate", "st"]
df = pd.read_csv(filname, sep=sep, header=None, names=col_names, engine='python')
df["user"] -= 1
df["item"] -= 1
for col in ("user", "item"):
df[col] = df[col].astype(np.int32)
df["rate"] = df["rate"].astype(np.float32)
return df
class ShuffleIterator(object):
"""
Randomly generate batches
"""
def __init__(self, inputs, batch_size=10):
self.inputs = inputs
self.batch_size = batch_size
self.num_cols = len(self.inputs)
self.len = len(self.inputs[0])
self.inputs = np.transpose(np.vstack([np.array(self.inputs[i]) for i in range(self.num_cols)]))
def __len__(self):
return self.len
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
ids = np.random.randint(0, self.len, (self.batch_size,))
out = self.inputs[ids, :]
return [out[:, i] for i in range(self.num_cols)]
class OneEpochIterator(ShuffleIterator):
"""
Sequentially generate one-epoch batches, typically for test data
"""
def __init__(self, inputs, batch_size=10):
super(OneEpochIterator, self).__init__(inputs, batch_size=batch_size)
if batch_size > 0:
self.idx_group = np.array_split(np.arange(self.len), np.ceil(self.len / batch_size))
else:
self.idx_group = [np.arange(self.len)]
self.group_id = 0
def next(self):
if self.group_id >= len(self.idx_group):
self.group_id = 0
raise StopIteration
out = self.inputs[self.idx_group[self.group_id], :]
self.group_id += 1
return [out[:, i] for i in range(self.num_cols)]
| {
"repo_name": "songgc/TF-recomm",
"path": "dataio.py",
"copies": "1",
"size": "1896",
"license": "apache-2.0",
"hash": -1523718131768878800,
"line_mean": 29.5806451613,
"line_max": 103,
"alpha_frac": 0.5849156118,
"autogenerated": false,
"ratio": 3.3978494623655915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.939957535078312,
"avg_score": 0.01663794467649426,
"num_lines": 62
} |
from __future__ import absolute_import, division, print_function
import numpy as np
# import plottool_ibeis.draw_func2 as df2
from plottool_ibeis import fig_presenter
#from plottool_ibeis import custom_figure
#from plottool_ibeis import custom_constants
#from os.path import join
import utool as ut
ut.noinject(__name__, '[plot_helpers]')
#(print, print_, printDBG, rrr, profile) = ut.inject(__name__, '[plot_helpers]', DEBUG=False)
SIFT_OR_VECFIELD = ut.get_argval('--vecfield', type_=bool)
def draw():
fig_presenter.draw()
#def dump_figure(dumpdir, subdir=None, quality=False, overwrite=False, verbose=2,
# reset=True):
# """ Dumps figure to disk based on the figurename """
# if quality is True:
# custom_constants.FIGSIZE = custom_constants.golden_wh2(14)
# #custom_constants.DPI = 120
# custom_constants.DPI = 120
# #custom_constants.FIGSIZE = custom_constants.golden_wh2(12)
# #custom_constants.DPI = 120
# custom_constants.FONTS.figtitle = custom_constants.FONTS.small
# elif quality is False:
# #custom_constants.FIGSIZE = custom_constants.golden_wh2(8)
# #custom_constants.FIGSIZE = custom_constants.golden_wh2(14)
# #custom_constants.DPI = 100
# custom_constants.FIGSIZE = custom_constants.golden_wh2(8)
# custom_constants.DPI = 90
# custom_constants.FONTS.figtitle = custom_constants.FONTS.smaller
# fpath = dumpdir
# if subdir is not None:
# fpath = join(fpath, subdir)
# ut.ensurepath(fpath)
# fpath_clean = custom_figure.save_figure(fpath=fpath, usetitle=True, overwrite=overwrite, verbose=verbose)
# return fpath_clean
def get_square_row_cols(nSubplots, max_cols=None, fix=False, inclusive=True):
r"""
Args:
nSubplots (?):
max_cols (None):
Returns:
tuple: (None, None)
CommandLine:
python -m plottool_ibeis.plot_helpers --test-get_square_row_cols
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.plot_helpers import * # NOQA
>>> # build test data
>>> nSubplots = 9
>>> nSubplots_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> max_cols = None
>>> # execute function
>>> rc_list = [get_square_row_cols(nSubplots, fix=True) for nSubplots in nSubplots_list]
>>> # verify results
>>> result = repr(np.array(rc_list).T)
>>> print(result)
array([[1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3],
[1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4]])
"""
if nSubplots == 0:
return 0, 0
if inclusive:
rounder = np.ceil
else:
rounder = np.floor
if fix:
# This function is very broken, but it might have dependencies
# this is the correct version
nCols = int(rounder(np.sqrt(nSubplots)))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
else:
# This is the clamped num cols version
# probably used in ibeis.viz
if max_cols is None:
max_cols = 5
if nSubplots in [4]:
max_cols = 2
if nSubplots in [5, 6, 7]:
max_cols = 3
if nSubplots in [8]:
max_cols = 4
nCols = int(min(nSubplots, max_cols))
#nCols = int(min(rounder(np.sqrt(nrids)), 5))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
def get_plotdat(ax, key, default=None):
""" returns internal property from a matplotlib axis """
_plotdat = get_plotdat_dict(ax)
val = _plotdat.get(key, default)
return val
def set_plotdat(ax, key, val):
""" sets internal property to a matplotlib axis """
_plotdat = get_plotdat_dict(ax)
_plotdat[key] = val
def del_plotdat(ax, key):
""" sets internal property to a matplotlib axis """
_plotdat = get_plotdat_dict(ax)
if key in _plotdat:
del _plotdat[key]
def get_plotdat_dict(ax):
""" sets internal property to a matplotlib axis """
if '_plotdat' not in ax.__dict__:
ax.__dict__['_plotdat'] = {}
plotdat_dict = ax.__dict__['_plotdat']
return plotdat_dict
def get_bbox_centers(bbox_list):
bbox_centers = np.array([np.array([x + (w / 2), y + (h / 2)])
for (x, y, w, h) in bbox_list])
return bbox_centers
def qt4ensure():
qtensure()
# if ut.inIPython():
# import IPython
# #IPython.get_ipython().magic('pylab qt4')
# IPython.get_ipython().magic('pylab qt4 --no-import-all')
def qtensure():
import guitool_ibeis as gt
if ut.inIPython():
import IPython
ipython = IPython.get_ipython()
if ipython is None:
# we must have exited ipython at some point
return
if gt.__PYQT__.GUITOOL_PYQT_VERSION == 5:
"""
sudo apt-get install python3-pyqt5.qtsvg
"""
# import os
# os.environ['QT_API'] = 'pyqt5'
# import matplotlib
# matplotlib.use('Qt5Agg')
# IPython.get_ipython().magic('matplotlib qt5')
#IPython.get_ipython().magic('pylab qt4')
ipython.magic('pylab qt5 --no-import-all')
else:
#IPython.get_ipython().magic('pylab qt4')
ipython.magic('pylab qt4 --no-import-all')
ensureqt = qt4ensure
#==========================#
# --- TESTING FUNCS --- #
#==========================#
def kp_info(kp):
import vtool_ibeis.keypoint as ktool
kpts = np.array([kp])
xy_str = ktool.get_xy_strs(kpts)[0]
shape_str = ktool.get_shape_strs(kpts)[0]
ori_ = ktool.get_oris(kpts)[0]
ori_str = 'ori=%.2f' % ori_
scale = ktool.get_scales(kpts)[0]
return xy_str, shape_str, scale, ori_str
| {
"repo_name": "Erotemic/plottool",
"path": "plottool_ibeis/plot_helpers.py",
"copies": "1",
"size": "5803",
"license": "apache-2.0",
"hash": 8426317602161440000,
"line_mean": 30.8846153846,
"line_max": 110,
"alpha_frac": 0.5803894537,
"autogenerated": false,
"ratio": 3.2619449128724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43423343665723996,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
import re
import sys
import copy
from periodictable import formula
from qtpy.QtWidgets import QMainWindow, QApplication
from addie.utilities import load_ui
from qtpy import QtGui
from addie.processing.mantid.master_table.table_row_handler import TableRowHandler
from addie.processing.mantid.master_table.periodic_table.isotopes_handler import IsotopesHandler
from addie.processing.mantid.master_table.tree_definition import INDEX_OF_COLUMNS_WITH_CHEMICAL_FORMULA
def get_periodictable_formatted_element_and_number_of_atoms(element):
'''The goal of this method is to go from Mantid format of an element (Si28)2 to
the format accepted by the periodictable library (to calculate the molecular mass for example) (Si[28]2)'''
# if we have a single stable element
regular_expression_1 = r'^(?P<stable_element>[A-Z]{1}[a-z]{0,1}$)'
m1 = re.search(regular_expression_1, element)
if m1 is not None:
return [m1.group('stable_element'), 1.]
# stable with stochiometric coefficient
regular_expression_2 = r'^(?P<stable_element>[A-Z]{1}[a-z]{0,1})(?P<stochiometric_coefficient>\d*\.{0,1}\d*)$'
m2 = re.search(regular_expression_2, element)
if m2 is not None:
return ["{}{}".format(m2.group('stable_element'), m2.group('stochiometric_coefficient')),
np.float(m2.group('stochiometric_coefficient'))]
# isotope with or without stochiometric coefficient
regular_expression_3 = r'\((?P<isotope_element>[A-Z]{1}[a-z]{0,1})(?P<isotope_number>\d+)\)' \
r'(?P<stochiometric_coefficient>\d*\.{0,1}\d*)'
m3 = re.search(regular_expression_3, element)
if m3 is not None:
if m3.group('stochiometric_coefficient') == "":
number_of_atoms = 1.
else:
number_of_atoms = np.float(
m3.group('stochiometric_coefficient'))
return ["{}[{}]{}".format(m3.group('isotope_element'), m3.group('isotope_number'),
m3.group('stochiometric_coefficient')), number_of_atoms]
raise ValueError
def retrieving_molecular_mass_and_number_of_atoms_worked(chemical_formula):
'''this method will parse the formula to go from Mantid format to periodictable library format, in order
to calculate the molecular mass
return: True if the string has been correctly formatted
False if something went wrong
'''
list_element = chemical_formula.split(" ")
periodictable_list_element_format = []
total_number_of_atoms = 0.
try:
for _element in list_element:
[formated_element, number_of_atoms] = get_periodictable_formatted_element_and_number_of_atoms(
_element)
periodictable_list_element_format.append(formated_element)
total_number_of_atoms += number_of_atoms
periodictable_format = " ".join(periodictable_list_element_format)
periodictable_formula = formula(periodictable_format)
return periodictable_formula.mass, total_number_of_atoms
except:
return None, None
class MaterialHandler:
def __init__(self, parent=None, database_window=None, key=None, data_type='sample'):
if parent.material_ui is None:
o_material = PeriodicTable(parent=parent,
database_window=database_window,
key=key,
data_type=data_type)
o_material.show()
parent.material_ui = o_material
if parent.material_ui_position:
parent.material_ui.move(parent.material_ui_position)
else:
parent.material_ui.setFocus()
parent.material_ui.activateWindow()
class PeriodicTable(QMainWindow):
isotope_ui = None
list_ui_color = {'list_ui': None,
'color': None}
list_color = {0: copy.deepcopy(list_ui_color),
1: copy.deepcopy(list_ui_color),
2: copy.deepcopy(list_ui_color),
3: copy.deepcopy(list_ui_color),
4: copy.deepcopy(list_ui_color),
5: copy.deepcopy(list_ui_color),
6: copy.deepcopy(list_ui_color),
7: copy.deepcopy(list_ui_color),
}
column = 0
def __init__(self, parent=None, database_window=None, key=None, data_type='sample'):
self.parent = parent
self.database_window = database_window
self.key = key
self.data_type = data_type
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('periodic_table.ui', baseinstance=self)
self.setWindowTitle("Define Chemical Formula")
self.init_ui_color_dictionary()
self.init_widgets()
self.set_column_index()
def set_column_index(self):
if self.data_type == 'sample':
self.column = INDEX_OF_COLUMNS_WITH_CHEMICAL_FORMULA[0]
else:
self.column = INDEX_OF_COLUMNS_WITH_CHEMICAL_FORMULA[1]
def init_ui_color_dictionary(self):
# color of element buttons
# purple
self.list_color[0]['list_ui'] = [self.ui.h,
self.ui.c,
self.ui.n,
self.ui.o,
self.ui.p,
self.ui.s,
self.ui.se,
]
self.list_color[0]['color'] = "#938ac0"
# cyan
self.list_color[1]['list_ui'] = [self.ui.li,
self.ui.na,
self.ui.k,
self.ui.rb,
self.ui.cs,
self.ui.fr,
]
self.list_color[1]['color'] = "#99d5c2"
# light green
self.list_color[2]['list_ui'] = [self.ui.be,
self.ui.mg,
self.ui.ca,
self.ui.sr,
self.ui.ba,
self.ui.ra,
]
self.list_color[2]['color'] = "#c6e8c1"
# light yellow
self.list_color[3]['list_ui'] = [self.ui.b,
self.ui.si,
self.ui.ge,
self.ui.arsenic,
self.ui.sb,
self.ui.te,
self.ui.po,
]
self.list_color[3]['color'] = "#eef8b9"
# dark yellow
self.list_color[4]['list_ui'] = [self.ui.f,
self.ui.cl,
self.ui.br,
self.ui.i,
self.ui.at,
self.ui.ts,
]
self.list_color[4]['color'] = "#fee9b0"
# blue
self.list_color[5]['list_ui'] = [self.ui.he,
self.ui.ne,
self.ui.ar,
self.ui.kr,
self.ui.xe,
self.ui.rn,
self.ui.og,
]
self.list_color[5]['color'] = "#79afd1"
# light orange
self.list_color[6]['list_ui'] = [self.ui.al,
self.ui.ga,
self.ui.indium,
self.ui.sn,
self.ui.tl,
self.ui.pb,
self.ui.bi,
self.ui.nh,
self.ui.fl,
self.ui.mc,
self.ui.lv,
]
self.list_color[6]['color'] = "#fec796"
# dark orange
self.list_color[7]['list_ui'] = [self.ui.sc,
self.ui.ti,
self.ui.v,
self.ui.cr,
self.ui.mn,
self.ui.fe,
self.ui.co,
self.ui.ni,
self.ui.cu,
self.ui.zn,
self.ui.y,
self.ui.zr,
self.ui.nb,
self.ui.mo,
self.ui.tc,
self.ui.ru,
self.ui.rh,
self.ui.pd,
self.ui.ag,
self.ui.cd,
self.ui.lu,
self.ui.hf,
self.ui.ta,
self.ui.w,
self.ui.re,
self.ui.os,
self.ui.ir,
self.ui.pt,
self.ui.au,
self.ui.hg,
self.ui.lr,
self.ui.rf,
self.ui.db,
self.ui.sg,
self.ui.bh,
self.ui.hs,
self.ui.mt,
self.ui.ds,
self.ui.rg,
self.ui.cn,
]
self.list_color[7]['color'] = "#f79d83"
def init_widgets(self):
self.ui.save_button.setEnabled(False)
# init contain of chemical formula
if self.data_type == 'database':
# retrieve value from import_from_database label
text = str(
self.database_window.list_ui[self.key]['value_label'].text())
else:
# retrieve value from sample or normalization columns in master table
text = str(
self.parent.master_table_list_ui[self.key][self.data_type]['material']['text'].text())
if text == 'N/A':
text = ""
self.ui.chemical_formula.setText(text)
# set color of buttons
for _key in self.list_color.keys():
_list_ui = self.list_color[_key]['list_ui']
_color = self.list_color[_key]['color']
for _ui in _list_ui:
_ui.setStyleSheet("background-color:{}".format(_color))
# clear button icon
self.ui.clear_button.setIcon(
QtGui.QIcon(":/MPL Toolbar/clear_icon.png"))
def reset_text_field(self):
self.ui.chemical_formula.setText("")
def chemical_formula_changed(self, new_formula):
self.check_status_save_button()
def add_new_entry(self, isotope='', number=1, is_natural_element=False):
if isotope == '':
return
previous_chemical_formula = str(self.ui.chemical_formula.text())
if is_natural_element:
if number == 1:
number = ''
new_isotope_string = "{}{}".format(isotope, number)
elif number > 1:
new_isotope_string = "({}){}".format(isotope, number)
else:
new_isotope_string = "({})".format(isotope)
if previous_chemical_formula != '':
new_chemical_formula = previous_chemical_formula + ' ' + new_isotope_string
else:
new_chemical_formula = new_isotope_string
self.ui.chemical_formula.setText(new_chemical_formula)
self.ui.chemical_formula.setFocus()
# make chemical formula editable (bug in pyqt that sometimes turn off editable)
def click_button(self, element):
IsotopesHandler(parent=self, element=element.title())
def h_button(self):
self.click_button('h')
def li_button(self):
self.click_button('li')
def he_button(self):
self.click_button('he')
def be_button(self):
self.click_button('be')
def b_button(self):
self.click_button('b')
def c_button(self):
self.click_button('c')
def n_button(self):
self.click_button('n')
def o_button(self):
self.click_button('o')
def f_button(self):
self.click_button('f')
def ne_button(self):
self.click_button('ne')
def na_button(self):
self.click_button('na')
def mg_button(self):
self.click_button('mg')
def al_button(self):
self.click_button('al')
def si_button(self):
self.click_button('si')
def p_button(self):
self.click_button('p')
def s_button(self):
self.click_button('s')
def cl_button(self):
self.click_button('cl')
def ar_button(self):
self.click_button('ar')
def k_button(self):
self.click_button('k')
def ca_button(self):
self.click_button('ca')
def sc_button(self):
self.click_button('sc')
def ti_button(self):
self.click_button('ti')
def v_button(self):
self.click_button('v')
def cr_button(self):
self.click_button('cr')
def mn_button(self):
self.click_button('mn')
def fe_button(self):
self.click_button('fe')
def co_button(self):
self.click_button('co')
def ni_button(self):
self.click_button('ni')
def cu_button(self):
self.click_button('cu')
def zn_button(self):
self.click_button('zn')
def ga_button(self):
self.click_button('ga')
def ge_button(self):
self.click_button('ge')
def as_button(self):
self.click_button('as')
def se_button(self):
self.click_button('se')
def br_button(self):
self.click_button('br')
def kr_button(self):
self.click_button('kr')
def rb_button(self):
self.click_button('rb')
def sr_button(self):
self.click_button('sr')
def y_button(self):
self.click_button('y')
def zr_button(self):
self.click_button('zr')
def nb_button(self):
self.click_button('nb')
def mo_button(self):
self.click_button('mo')
def tc_button(self):
self.click_button('tc')
def ru_button(self):
self.click_button('ru')
def rh_button(self):
self.click_button('rh')
def pd_button(self):
self.click_button('pd')
def ag_button(self):
self.click_button('ag')
def cd_button(self):
self.click_button('cd')
def in_button(self):
self.click_button('in')
def sn_button(self):
self.click_button('sn')
def sb_button(self):
self.click_button('sb')
def te_button(self):
self.click_button('te')
def i_button(self):
self.click_button('i')
def xe_button(self):
self.click_button('xe')
def cs_button(self):
self.click_button('cs')
def ba_button(self):
self.click_button('ba')
def lu_button(self):
self.click_button('lu')
def hf_button(self):
self.click_button('hf')
def ta_button(self):
self.click_button('ta')
def w_button(self):
self.click_button('w')
def re_button(self):
self.click_button('re')
def os_button(self):
self.click_button('os')
def ir_button(self):
self.click_button('ir')
def pt_button(self):
self.click_button('pt')
def au_button(self):
self.click_button('au')
def hg_button(self):
self.click_button('hg')
def tl_button(self):
self.click_button('tl')
def pb_button(self):
self.click_button('pb')
def bi_button(self):
self.click_button('bi')
def po_button(self):
self.click_button('po')
def at_button(self):
self.click_button('at')
def rn_button(self):
self.click_button('rn')
def fr_button(self):
self.click_button('fr')
def ra_button(self):
self.click_button('ra')
def lr_button(self):
self.click_button('lr')
def rf_button(self):
self.click_button('rf')
def db_button(self):
self.click_button('db')
def sg_button(self):
self.click_button('sg')
def bh_button(self):
self.click_button('bh')
def hs_button(self):
self.click_button('hs')
def mt_button(self):
self.click_button('mt')
def ds_button(self):
self.click_button('ds')
def rg_button(self):
self.click_button('rg')
def cn_button(self):
self.click_button('cn')
def nh_button(self):
self.click_button('nh')
def fl_button(self):
self.click_button('fl')
def mc_button(self):
self.click_button('mc')
def lv_button(self):
self.click_button('lv')
def ts_button(self):
self.click_button('ts')
def og_button(self):
self.click_button('og')
def ok(self):
chemical_formula = str(self.ui.chemical_formula.text())
molecular_mass, total_number_of_atoms = retrieving_molecular_mass_and_number_of_atoms_worked(chemical_formula)
if molecular_mass and total_number_of_atoms:
self.parent.material_ui = None
if self.data_type == 'database':
ui = self.database_window.list_ui[self.key]['value_label']
ui.setText(chemical_formula)
else: # 'sample' or 'normalization'
text_ui = self.parent.master_table_list_ui[self.key][self.data_type]['material']['text']
text_ui.setText(chemical_formula)
o_table = TableRowHandler(main_window=self.parent)
o_table.transfer_widget_states(
from_key=self.key, data_type=self.data_type)
self.parent.master_table_list_ui[self.key][self.data_type]['mass_density_infos']['molecular_mass'] \
= molecular_mass
self.parent.master_table_list_ui[self.key][self.data_type]['mass_density_infos']['total_number_of_atoms'] \
= total_number_of_atoms
self.parent.check_master_table_column_highlighting(
column=self.column)
self.close()
else:
self.ui.statusbar.setStyleSheet("color: red")
self.ui.statusbar.showMessage("Unable to calculate Molecular Mass! CHECK YOUR FORMULA!",
self.parent.statusbar_display_time)
def check_status_save_button(self):
if str(self.ui.chemical_formula.text()) != "":
self.ui.save_button.setEnabled(True)
else:
self.ui.save_button.setEnabled(False)
def cancel(self):
self.parent.material_ui = None
self.close()
def closeEvent(self, c):
self.parent.material_ui = None
self.parent.material_ui_position = self.pos()
if __name__ == "__main__":
app = QApplication(sys.argv)
o_dialog = PeriodicTable()
o_dialog.show()
app.exec_()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/periodic_table/material_handler.py",
"copies": "1",
"size": "20381",
"license": "mit",
"hash": 1706748964892499700,
"line_mean": 31.1466876972,
"line_max": 123,
"alpha_frac": 0.4795152348,
"autogenerated": false,
"ratio": 4.043047014481254,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5022562249281254,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
import scipy.constants
# Constants
avogadro = scipy.constants.N_A
cm3_to_angstroms3 = 1e24
avogadro_term = avogadro / 1e24
PRECISION = 5
def is_int(value):
"""Checks if `value` is an integer
:param value: Input value to check if integer
:type value: Any
:return: If value is an integer
:rtype: bool
"""
is_number = True
try:
int(value)
except ValueError:
is_number = False
return is_number
def is_float(value):
"""Checks if `value` is a float
:param value: Input value to check if float
:type value: Any
:return: If value is an float
:rtype: bool
"""
is_number = True
try:
float(value)
except ValueError:
is_number = False
return is_number
def is_number(value):
"""Checks if `value` is a float
:param value: Input value to check if float
:type value: Any
:return: If value is an float
:rtype: bool
"""
return is_float(value)
def oneAndOnlyOneTrue(iterable):
"""Determine if iterable (ie list) has one and only one `True` value
:param iterable:
:type iterable: list
:return: If there is one and only one True
:rtype: bool
"""
try:
iterator = iter(iterable)
has_true = any(iterator)
has_another_true = any(iterator)
return has_true and not has_another_true
except Exception as e:
print(e)
raise
def volume_of_cylinder(radius=np.NaN, height=np.NaN):
"""Computes volume of a cylinder
:param radius: Radius of cylinder (in units of length)
:type radius: float
:param height: Height of cylinder (in units of length)
:type height: float
:return: Volume of the cylinder in (in units of :math:`length^{3}`)
:rtype: float
"""
return np.float(np.pi) * np.float(radius)**2 * np.float(height)
def volume_of_sphere(radius=np.NaN):
"""Computes volume of a sphere
:param radius: Radius of sphere (in units of length)
:type radius: float
:return: Volume of the sphere in (in units of :math:`length^{3}`)
:rtype: float
"""
return (4. * np.pi * np.float(radius)**3 / np.float(3))
def volume_of_hollow_cylinder(
inner_radius=np.NaN,
outer_radius=np.NaN,
height=np.NaN):
"""Computes volume of a hollow cylinder
:param inner_radius: Inner radius of cylinder (in units of length)
:type inner_radius: float
:param outer_radius: Outer radius of cylinder (in units of length)
:type outer_radius: float
:param height: Height of cylinder (in units of length)
:type height: float
:return: Volume of the cylinder in (in units of :math:`length^{3}`)
:rtype: float
"""
inner_cylinder = volume_of_cylinder(radius=inner_radius, height=height)
outer_cylinder = volume_of_cylinder(radius=outer_radius, height=height)
return outer_cylinder - inner_cylinder
def get_volume_from_geometry(dictionary):
"""calculate volume given a geometry dictionary of the given shape in example below
:examples:
>>> dictionary = {'Shape': "Cylinder", 'Radius': 0.25, 'Radius2': 'N/A', 'Height': 1.0 }
>>> volume = get_volume_from_geometry(dictionary)
:param dictionary: Geometry JSON
:type dictionary: dict
:return: Volume of the geometry
:rtype: float
"""
if dictionary['Shape'].lower() == 'cylinder':
radius = dictionary['Radius']
height = dictionary['Height']
volume = volume_of_cylinder(radius=radius, height=height)
elif dictionary['Shape'].lower() == 'sphere':
radius = dictionary['Radius']
volume = volume_of_sphere(radius=radius)
elif dictionary['Shape'].lower() == 'hollow cylinder':
inner_radius = dictionary['Radius']
outer_radius = dictionary['Radius2']
height = dictionary['Height']
volume = volume_of_hollow_cylinder(
inner_radius=inner_radius,
outer_radius=outer_radius,
height=height
)
else:
raise Exception("Passed unkown shape into get_volume_from_geometry")
return volume
def mass_density2number_density(mass_density, natoms, molecular_mass):
"""Converts from mass_density (:math:`g/cm^{3}`) to number density (atoms/:math:`\\AA^{3}`)
:param mass_density: mass density in (:math:`g/cm^{3}`)
:type mass_density: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: number density in (atoms/:math:`\\AA^{3}`)
:rtype: float
"""
number_density = mass_density * avogadro_term * natoms / molecular_mass
return number_density
def number_density2mass_density(number_density, natoms, molecular_mass):
"""Converts from number density (atoms/:math:`\\AA^{3}`) to mass_density (:math:`g/cm^{3}`)
:param number_density: number density in (atoms/:math:`\\AA^{3}`)
:type number_density: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: mass density in (:math:`g/cm^{3}`)
:rtype: float
"""
mass_density = number_density * molecular_mass / natoms / avogadro_term
return mass_density
def mass2mass_density(mass, volume):
"""Converts mass (:math:`g`) and volume (:math:`cm^{3}`) to mass_density (:math:`g/cm^{3}`)
:param mass: mass in (:math:`g`)
:type mass: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:return: mass density in (:math:`g/cm^{3}`)
:rtype: float
"""
mass_density = mass / volume
return mass_density
def mass2number_density(mass, volume, natoms, molecular_mass):
"""Converts mass (:math:`g`) and volume (:math:`cm^{3}`) to number density (atoms/:math:`\\AA^{3}`)
:param mass: mass in (:math:`g`)
:type mass: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: number density in (atoms/:math:`\\AA^{3}`)
:rtype: float
"""
mass_density = mass2mass_density(mass, volume)
number_density = mass_density2number_density(
mass_density, natoms, molecular_mass)
return number_density
def mass_density2mass(mass_density, volume):
"""Converts from mass_density (:math:`g/cm^{3}`) to mass (:math:`g`)
:param mass_density: mass density in (:math:`g/cm^{3}`)
:type mass_density: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:return: mass in (:math:`g`)
:rtype: float
"""
mass = mass_density * volume
return mass
def number_density2mass(number_density, volume, natoms, molecular_mass):
"""Converts from number density (atoms/:math:`\\AA^{3}`) to mass (:math:`g`)
:param number_density: number density in (atoms/:math:`\\AA^{3}`)
:type number_density: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: mass in (:math:`g`)
:rtype: float
"""
mass_density = number_density2mass_density(
number_density, natoms, molecular_mass)
mass = mass_density2mass(mass_density, volume)
return mass
| {
"repo_name": "neutrons/FastGR",
"path": "addie/utilities/math_tools.py",
"copies": "1",
"size": "7624",
"license": "mit",
"hash": -99103487345452030,
"line_mean": 27.3420074349,
"line_max": 103,
"alpha_frac": 0.6377229801,
"autogenerated": false,
"ratio": 3.5182279649284727,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9654550807702016,
"avg_score": 0.00028002746529148575,
"num_lines": 269
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.signal
import scipy.sparse
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
from .utils import libs_norm3, cumulative_norm
__all__ = [
'BandNormalize', 'BezierSquash', 'CosineSquash', 'CumulativeNormalize',
'HingeSquash', 'L1Normalize', 'L2Normalize', 'LibsNormalize', 'LogSquash',
'MaxNormalize', 'MinZeroNormalize', 'Offset', 'PolynomialSquash',
'PrincipalComponents', 'SavitzkyGolayDerivative', 'SavitzkyGolaySmooth',
'SqrtSquash', 'TanhSquash'
]
class Preprocessor(object):
arg_type = float
def apply(self, spectra, wavelengths):
''' S, w = apply(S, w)'''
raise NotImplementedError('Subclasses must implement apply_vector.')
@classmethod
def from_string(cls, s):
if not s:
return cls()
args = map(cls.arg_type, s.split(':'))
return cls(*args)
class PrincipalComponents(Preprocessor):
name = 'pca'
def __init__(self, num_pcs):
# Hack: may be float in (0,1] or positive int. We'll assume 1-D in the case
# of 1.0, as that's more common.
if num_pcs >= 1:
assert num_pcs - int(num_pcs) == 0
num_pcs = int(num_pcs)
self.model = PCA(n_components=num_pcs)
def apply(self, spectra, wavelengths):
pcs = self.model.fit_transform(spectra)
return pcs, wavelengths
class PolynomialSquash(Preprocessor):
'''Generalized polynomial squashing function.
Derived from a normal cubic polynomial with f(0) = 0 and f(1) = 1.
We also enforce d/dx => 0 and d2/d2x <= 0, for a concave shape.
This constrains -0.5 < a < 1, and -2a-1 < b < min(-3a, 0).
'''
name = 'poly'
def __init__(self, a, b):
assert -0.5 < a < 1
assert -2*a - 1 < b < min(-3*a, 0)
c = 1 - a - b
self.poly = np.poly1d([a, b, c, 0])
def apply(self, spectra, wavelengths):
x = spectra / np.max(spectra, axis=1, keepdims=True)
p = self.poly(x)
return normalize(p, norm='l2', copy=False), wavelengths
class BezierSquash(Preprocessor):
'''Bezier squashing function.
Derived from a bezier curve with control points at [(0,0), (a,b), (1,1)]
Constraints are 0 < a < 1, 0 < b < 1, b > a (upper left of y = x line).
'''
name = 'bezier'
def __init__(self, a, b):
assert 0 < a < 1
assert 0 < b < 1
assert b > a
twoa = 2*a
twob = 2*b
if twoa == 1:
a += 1e-5
twoa = 2*a
self.args = (a, b, twoa, twob)
def apply(self, spectra, wavelengths):
x = spectra / np.max(spectra, axis=1, keepdims=True)
a, b, twoa, twob = self.args
tmp = np.sqrt(a*a-twoa*x+x)
foo = x * (1 - twob)
top = -twoa*(tmp+foo+b) + twob*tmp + foo + twoa*a
p = top / (1-twoa)**2
return normalize(p, norm='l2', copy=False), wavelengths
class HingeSquash(Preprocessor):
name = 'squash:hinge'
def __init__(self, h):
self.hinge = h
def apply(self, spectra, wavelengths):
return np.minimum(spectra, self.hinge), wavelengths
class CosineSquash(Preprocessor):
name = 'squash:cos'
def apply(self, spectra, wavelengths):
np.maximum(spectra, 1e-10, out=spectra) # Hack: fix NaN issues
s = (1 - np.cos(np.pi * spectra)) / 2.0
return s, wavelengths
def _generic_squash(numpy_func_name):
fn = getattr(np, numpy_func_name)
class _GenericSquash(Preprocessor):
name = 'squash:' + numpy_func_name
def apply(self, spectra, wavelengths):
return fn(spectra), wavelengths
_GenericSquash.__name__ = numpy_func_name.title() + 'Squash'
return _GenericSquash
TanhSquash = _generic_squash('tanh')
SqrtSquash = _generic_squash('sqrt')
LogSquash = _generic_squash('log')
class LibsNormalize(Preprocessor):
name = 'normalize:norm3'
def apply(self, spectra, wavelengths):
s = libs_norm3(spectra, wavelengths=wavelengths, copy=False)
return s, wavelengths
class CumulativeNormalize(Preprocessor):
name = 'normalize:cum'
def apply(self, spectra, wavelengths):
return cumulative_norm(spectra), wavelengths
class MinZeroNormalize(Preprocessor):
name = 'normalize:min'
def apply(self, spectra, wavelengths):
spectra -= spectra.min(axis=1)[:, None]
return spectra, wavelengths
class BandNormalize(Preprocessor):
name = 'normalize:band'
def __init__(self, loc):
self.loc = loc
def apply(self, spectra, wavelengths):
idx = np.searchsorted(wavelengths, self.loc)
a = max(0, idx - 2)
b = min(len(wavelengths), idx + 3)
x = spectra[:, a:b].max(axis=1)
spectra /= x[:,None]
return spectra, wavelengths
def _generic_norm(norm):
assert norm in ('max', 'l1', 'l2')
class _GenericNorm(Preprocessor):
name = 'normalize:' + norm
def apply(self, spectra, wavelengths):
return normalize(spectra, norm=norm, copy=False), wavelengths
_GenericNorm.__name__ = norm.title() + 'Normalize'
return _GenericNorm
MaxNormalize = _generic_norm('max')
L1Normalize = _generic_norm('l1')
L2Normalize = _generic_norm('l2')
class SavitzkyGolayDerivative(Preprocessor):
name = 'deriv'
arg_type = int
def __init__(self, window, order):
self.window = window
self.order = order
def apply(self, spectra, wavelengths):
assert not scipy.sparse.issparse(spectra)
d = scipy.signal.savgol_filter(spectra, self.window, self.order, deriv=1)
return d, wavelengths
class SavitzkyGolaySmooth(SavitzkyGolayDerivative):
name = 'smooth'
def apply(self, spectra, wavelengths):
assert not scipy.sparse.issparse(spectra)
d = scipy.signal.savgol_filter(spectra, self.window, self.order, deriv=0)
return d, wavelengths
class Offset(Preprocessor):
name = 'offset'
def __init__(self, x, y=0):
self.intensity_offset = y
self.wavelength_offset = x
def apply(self, spectra, wavelengths):
if self.intensity_offset != 0:
spectra += self.intensity_offset
if self.wavelength_offset != 0:
wavelengths += self.wavelength_offset
return spectra, wavelengths
| {
"repo_name": "all-umass/superman",
"path": "superman/preprocess/steps.py",
"copies": "1",
"size": "5980",
"license": "mit",
"hash": 2396649526054173700,
"line_mean": 25.4601769912,
"line_max": 79,
"alpha_frac": 0.6598662207,
"autogenerated": false,
"ratio": 3.066666666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4226532887366667,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import sys
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from scipy.signal import correlate2d
from simdna.simulations import loaded_motifs
def get_motif_scores(encoded_sequences,
motif_names,
max_scores=None,
return_positions=False,
GC_fraction=0.4):
"""
Computes pwm log odds.
Parameters
----------
encoded_sequences : 4darray
motif_names : list of strings
max_scores : int, optional
return_positions : boolean, optional
GC_fraction : float, optional
Returns
-------
(num_samples, num_motifs, seq_length) complete score array by default.
If max_scores, (num_samples, num_motifs*max_scores) max score array.
If max_scores and return_positions, (num_samples, 2*num_motifs*max_scores)
array with max scores and their positions.
"""
num_samples, _, _, seq_length = encoded_sequences.shape
scores = np.ones((num_samples, len(motif_names), seq_length))
for j, motif_name in enumerate(motif_names):
pwm = loaded_motifs.getPwm(motif_name).getRows().T
log_pwm = np.log(pwm)
gc_pwm = 0.5 * np.array(
[[1 - GC_fraction, GC_fraction, GC_fraction, 1 - GC_fraction]] * len(
pwm[0])).T
gc_log_pwm = np.log(gc_pwm)
scores[:, j, :] = get_pssm_scores(encoded_sequences,
log_pwm) - get_pssm_scores(
encoded_sequences, gc_log_pwm)
if max_scores is not None:
sorted_scores = np.sort(scores)[:, :, ::-1][:, :, :max_scores]
if return_positions:
sorted_positions = scores.argsort()[:, :, ::-1][:, :, :max_scores]
return np.concatenate(
(sorted_scores.reshape((num_samples, len(motif_names) * max_scores)),
sorted_positions.reshape(
(num_samples, len(motif_names) * max_scores))),
axis=1)
else:
return sorted_scores.reshape((num_samples, len(motif_names) * max_scores))
else:
return scores
def get_pssm_scores(encoded_sequences, pssm):
"""
Convolves pssm and its reverse complement with encoded sequences
and returns the maximum score at each position of each sequence.
Parameters
----------
encoded_sequences: 3darray
(num_examples, 1, 4, seq_length) array
pssm: 2darray
(4, pssm_length) array
Returns
-------
scores: 2darray
(num_examples, seq_length) array
"""
encoded_sequences = encoded_sequences.squeeze(axis=1)
# initialize fwd and reverse scores to -infinity
fwd_scores = np.full_like(encoded_sequences, -np.inf, float)
rc_scores = np.full_like(encoded_sequences, -np.inf, float)
# cross-correlate separately for each base,
# for both the PSSM and its reverse complement
for base_indx in range(encoded_sequences.shape[1]):
base_pssm = pssm[base_indx][None]
base_pssm_rc = base_pssm[:, ::-1]
fwd_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, base_indx, :], base_pssm, mode='same')
rc_scores[:, base_indx, :] = correlate2d(
encoded_sequences[:, -(base_indx + 1), :], base_pssm_rc, mode='same')
# sum over the bases
fwd_scores = fwd_scores.sum(axis=1)
rc_scores = rc_scores.sum(axis=1)
# take max of fwd and reverse scores at each position
scores = np.maximum(fwd_scores, rc_scores)
return scores
def one_hot_encode(sequences):
sequence_length = len(sequences[0])
integer_type = np.int8 if sys.version_info[
0] == 2 else np.int32 # depends on Python version
integer_array = LabelEncoder().fit(
np.array(('ACGTN',)).view(integer_type)).transform(
sequences.view(integer_type)).reshape(
len(sequences), sequence_length)
one_hot_encoding = OneHotEncoder(
sparse=False, n_values=5, dtype=integer_type).fit_transform(integer_array)
return one_hot_encoding.reshape(len(sequences), 1, sequence_length,
5).swapaxes(2, 3)[:, :, [0, 1, 2, 4], :]
def reverse_complement(encoded_seqs):
return encoded_seqs[..., ::-1, ::-1]
def get_sequence_strings(encoded_sequences):
"""
Converts encoded sequences into an array with sequence strings
"""
num_samples, _, _, seq_length = np.shape(encoded_sequences)
sequence_characters = np.chararray((num_samples, seq_length))
sequence_characters[:] = 'N'
for i, letter in enumerate(['A', 'C', 'G', 'T']):
letter_indxs = (encoded_sequences[:, :, i, :] == 1).squeeze()
sequence_characters[letter_indxs] = letter
# return 1D view of sequence characters
return sequence_characters.view('S%s' % (seq_length)).ravel()
def encode_fasta_sequences(fname):
"""
One hot encodes sequences in fasta file
"""
name, seq_chars = None, []
sequences = []
with open(fname) as fp:
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name:
sequences.append(''.join(seq_chars).upper())
name, seq_chars = line, []
else:
seq_chars.append(line)
if name is not None:
sequences.append(''.join(seq_chars).upper())
return one_hot_encode(np.array(sequences))
| {
"repo_name": "deepchem/deepchem",
"path": "contrib/dragonn/utils.py",
"copies": "6",
"size": "5203",
"license": "mit",
"hash": -7936609403675035000,
"line_mean": 34.1554054054,
"line_max": 80,
"alpha_frac": 0.6340572746,
"autogenerated": false,
"ratio": 3.480267558528428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.013032608413762336,
"num_lines": 148
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import warnings
# from .due import due, Doi
__all__ = ["tao_impl_angle_beta"]
import astropy
import astropy.units as u
def convert_redshift_to_comoving_distance(redshifts,
cosmo=None,
distance_units=None):
r"""
Returns co-moving distances for a list of redshifts
Parameters:
-----------
redshifts: double, array or scalar
List of redshifts
cosmo : An astropy cosmology object. default is Planck 2015
Sets up the cosmology for calculating the co-moving distances
distance_units : astropy units object, units for the co-moving distance(s)
Returns:
--------
com_dist : double, array or scalar. Same length as the input redshifts
Returned in `distance_units` (if specified); otherwise, returned
in Mpc/h units.
"""
default_units = u.Mpc
if cosmo is None:
from astropy.cosmology import Planck15
cosmo = Planck15
msg = "cosmology is not set. Using Planck 2015 cosmology = {0}"\
.format(cosmo)
warnings.warn(msg)
else:
if not isinstance(cosmo, astropy.cosmology):
msg = '{The cosmology object parameter must be an instance of '\
'`astropy.cosmology`}'
raise ValueError(msg)
if distance_units is not None:
if not isinstance(distance_units, u.Unit):
msg = 'distance units parameter = {0} must be an instance of '\
'`astropy.units`.'.format(distance_units)
raise u.UnitsError(msg)
else:
distance_units = default_units
# calculate all the distances
distances = cosmo.comoving_distance(redshifts)
distances.to(default_units)
# calculate 1/h
H0 = cosmo.H(0)
default_hubble_units = (u.km/u.s)/u.Mpc
hundredkm_per_s_per_Mpc = 100.0 * default_hubble_units
little_h = cosmo.H(0).to(default_hubble_units)/hundredkm_per_s_per_Mpc
print("H0 = {0} little h = {1}".format(H0, little_h))
# convert to co-moving 1/h units
distances = distances * little_h
print("distances = {0}".format(distances))
# Now return in the requested units
# (set to Mpc/h by default)
return distances.to(distance_units)
# # Use duecredit (duecredit.org) to provide a citation to relevant work to
# # be cited. This does nothing, unless the user has duecredit installed,
# # And calls this with duecredit (as in `python -m duecredit script.py`):
# due.cite(Doi("10.1167/13.9.30"),
# description="Template project for small scientific Python projects",
# tags=["reference-implementation"],
# path='lightcone')
def tao_paper_solution_angle_beta(min_ra=10.0, max_ra=20.0,
min_dec=30.0, max_dec=35.0,
zmin=0.0, zmax=2.0,
boxsize=[500.0, 500.0, 500.0]):
r"""
Returns the angle, :math:`\beta`, to construct an unique lightcone
The routine here is an attempt at creating the algorithm presented
in the TAO code paper (http://adsabs.harvard.edu/abs/2016ApJS..223....9B)
Parameters:
-----------
min_ra : double, defaut=10.0 degrees. Must be in range [0.0, 360.0]
Minimum value of Right Ascension for the lightcone
max_ra : double, default=20.0 degrees. Must be in range [0.0, 360.0]
Maximum value of Right Ascension for the lightcone
min_dec : double, default=30.0 degrees, Must be in range [-90.0, 90.0]
Minimum value of Declination for the lightcone
min_dec : double, default=30.0 degrees, Must be in range [-90.0, 90.0]
Maximum value of Declination for the lightcone
zmin : double, default=0.0.
Minimum redshift cut for the lightcone
zmax : double, default=2.0
Maximum redshift cut for the lightcone
boxsize : double, or array of 3 doubles, units=Mpc/h. default = 500 Mpc/h.
The periodic boxsize in each of the 3 dimensions.
Returns:
--------
beta : double, units=degrees
The angle by which the lightcone needs to start off such that an
unique lightcone solution can be generated.
.. note : The solution here might be different from the one presented in
the TAO paper (http://adsabs.harvard.edu/abs/2016ApJS..223....9B)
"""
def tao_impl_angle_beta(min_ra=10.0, max_ra=20.0,
min_dec=30.0, max_dec=35.0,
zmin=0.0, zmax=2.0,
cosmo=None,
boxsize=500.0 * u.Mpc):
r"""
Returns the angle, :math:`\beta`, to construct an unique lightcone
The routine here is a direct translation from C++ in the TAO code-base
to python.
Parameters:
-----------
min_ra : double, defaut=10.0 degrees. Must be in range [0.0, 360.0]
Minimum value of Right Ascension for the lightcone
max_ra : double, default=20.0 degrees. Must be in range [0.0, 360.0]
Maximum value of Right Ascension for the lightcone
min_dec : double, default=30.0 degrees, Must be in range [-90.0, 90.0]
Minimum value of Declination for the lightcone
min_dec : double, default=30.0 degrees, Must be in range [-90.0, 90.0]
Maximum value of Declination for the lightcone
zmin : double, default=0.0.
Minimum redshift cut for the lightcone
zmax : double, default=2.0
Maximum redshift cut for the lightcone
cosmo : astropy cosmology object. default None
boxsize : double, or array of 3 doubles, units=Mpc/h. default = 500 Mpc/h.
The periodic boxsize in each of the 3 dimensions.
Returns:
--------
beta : double, units=degrees
The angle by which the lightcone needs to start off such that an
unique lightcone solution can be generated.
.. note : The solution here might be different from the one presented in
the TAO paper (http://adsabs.harvard.edu/abs/2016ApJS..223....9B)
"""
max_redshift_allowed = 100.0
# Input validation
if min_ra < 0.0 or max_ra > 360:
msg = 'Right Ascension (RA) must be between [0.0, 360.0]. The input '\
'RA min, max values are = {0}, {1}'.format(min_ra, max_ra)
raise ValueError(msg)
if min_dec < -90.0 or max_dec > 90.0:
msg = 'Declination (DEC) must be between [-90.0, 90.0]. The input '\
'DEC min, max values are = {0}, {1}'.format(min_dec, max_dec)
raise ValueError(msg)
# Now attach degrees units
print("Before switching to radians: min ra = {0}".format(min_ra))
min_ra = (min_ra * u.deg).to(u.rad)
max_ra = (max_ra * u.deg).to(u.rad)
min_dec = (min_dec * u.deg).to(u.rad)
max_dec = (max_dec * u.deg).to(u.rad)
print("After switching to radians: min ra = {0}".format(min_ra))
if zmin < 0.0 or zmax > max_redshift_allowed:
msg = 'Redshift (z) must be between [0.0, {0}]. The input '\
'z min, max values are = {1}, {2}'.format(max_redshift_allowed,
zmin,
zmax)
raise ValueError(msg)
units = None
if isinstance(boxsize, u.Quantity):
units = boxsize.unit
d1, d0 = convert_redshift_to_comoving_distance([zmax, zmin],
cosmo=cosmo,
distance_units=units)
# If boxsize did not have units, convert to
# the units of the co-moving distance
if units is None:
boxsize = boxsize * d1.unit
ra_diff = max_ra - min_ra
if (d1 - d0 * np.cos(ra_diff)) <= boxsize:
# all angles are in radians -> convert to degrees
return 0.0 - min_ra.to(deg)
# Use Ridder's method to find the optimal angle for unique cones
# auto res = hpc::ridders(
# [ra, d0, d1, b]( double x )
# {
# double phi = ra + x;
# return b - d1*(cos( x ) - sin( x )/tan( phi ));
# },
# 0.5*M_PI,
# 0.0
# );
# if( res != std::numeric_limits<double>::max() )
# return res - lc.min_ra();
# else
# return boost::none;
def _func(x, ra_diff, d0, d1, boxsize):
x_in_rad = x * u.rad
for name, a in zip(['x', 'ra diff', 'd0', 'd1', 'boxsize'],
[x_in_rad, ra_diff, d0, d1, boxsize]):
print("{0} = {1}".format(name, a))
phi = ra_diff + x_in_rad
res = boxsize - d1 * (np.cos(x) - np.sin(x)/np.tan(phi))
print("res = {0}".format(res))
return res
method = 'ridder'
if method == 'ridder':
from scipy.optimize import ridder as solver
else:
from scipy.optimize import brentq as solver
beta, root_obj = solver(_func, 0.0, 0.5*np.pi,
maxiter=100,
full_output=True,
disp=True,
args=(ra_diff, d0, d1, boxsize))
print("Root object = {0}".format(root_obj))
print("Solved angle = {0}. Converged = {1}"
.format(beta, root_obj.Converged))
| {
"repo_name": "manodeep/lightcone",
"path": "lightcone/lightcone.py",
"copies": "1",
"size": "9662",
"license": "mit",
"hash": -3994173344389349000,
"line_mean": 34.0072463768,
"line_max": 79,
"alpha_frac": 0.5565100393,
"autogenerated": false,
"ratio": 3.614665170220726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9594270591717571,
"avg_score": 0.015380923560630923,
"num_lines": 276
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def adjR2(MRSS,y_1d,df,rank):
"""
Computes a single Adjusted R^2 value for a model (high is good)
Input:
------
MRSS : Mean Squared Error
y_1d : the y vector as a 1d np array ( n x 1)
df : the degrees of the model (n-p-1 generally where = is the number of
features)
rank : the rank of the X feature matrix used to create the MRSS
(assumed to be p+1 generally, where p is the number of features)
Output:
-------
adjR2: the adjusted R^2 value
Comments:
---------
Adjusted R^2 is a comparision tool that penalizes the number of features
"""
n=y_1d.shape[0]
RSS= MRSS*df
TSS= np.sum((y_1d-np.mean(y_1d))**2)
adjR2 = 1- ((RSS/TSS) * ((n-1)/(n-rank)) )
return adjR2
def AIC(MRSS,y_1d,df,rank):
"""
Computes a single AIC value for a model (low is good)
Input:
------
MRSS : Mean Squared Error
y_1d : the y vector as a 1d np array ( n x 1)
df : the degrees of the model (n-p-1 generally where = is the number of
features)
rank : the rank of the X feature matrix used to create the MRSS
(assumed to be p+1 generally, where p is the number of features)
Output:
-------
AIC: the adjusted AIC value
"""
n=y_1d.shape[0]
RSS= MRSS*df
AIC= n * np.log(RSS/n) + 2*(rank)
return AIC
def BIC(MRSS,y_1d,df,rank):
"""
Computes a single BIC value for a model (low is good)
Input:
------
MRSS : Mean Squared Error
y_1d : the y vector as a 1d np array ( n x 1)
df : the degrees of the model (n-p-1 generally where = is the number of
features)
rank : the rank of the X feature matrix used to create the MRSS
(assumed to be p+1 generally, where p is the number of features)
Output:
-------
BIC: the adjusted BIC value
Comments:
---------
BIC is a bayesian approach to model comparision that more strongly
penalizes the number of features than AIC (which was not done, but Ben
wants a bigger penalty than Adjusted R^2 since he hates features)
"""
n=y_1d.shape[0]
RSS= MRSS*df
BIC= n * np.log(RSS/n) + np.log(n)*(rank)
return BIC
##### Second attempt (mult-dimensional)
def AIC_2(MRSS_vec,y_2d,df,rank):
"""
Computes a single AIC value for a model in vector form (low is good)
Input:
------
MRSS_vec : Mean Squared Error Vector (1d np array)
y_1d : the y vector as a 1d np array ( n x 1)
df : the degrees of the model (n-p-1 generally where = is the number of
features)
rank : the rank of the X feature matrix used to create the MRSS
(assumed to be p+1 generally, where p is the number of features)
Output:
-------
AIC: the adjusted AIC value vector
"""
n=y_2d.shape[1]
RSS= MRSS_vec*df
AIC= n * np.log(RSS/n) + 2*(rank)
return AIC
def BIC_2(MRSS_vec,y_2d,df,rank):
"""
Computes a single BIC value for a model in vector form (low is good)
Input:
------
MRSS_vec : Mean Squared Error Vector (1d np array n)
y_2d : the y vector as a 2d np array ( n x t)
df : the degrees of the model (n-p-1 generally where = is the number of
features)
rank : the rank of the X feature matrix used to create the MRSS
(assumed to be p+1 generally, where p is the number of features)
Output:
-------
BIC: the adjusted BIC value vector
Comments:
---------
BIC is a bayesian approach to model comparision that more strongly
penalizes the number of features than AIC (which was not done, but Ben
wants a bigger penalty than Adjusted R^2 since he hates features)
"""
n=y_2d.shape[1]
RSS= MRSS_vec*df
BIC= n * np.log(RSS/n) + np.log(n)*(rank)
return BIC
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/model_comparison.py",
"copies": "1",
"size": "3561",
"license": "bsd-3-clause",
"hash": 2765659924895276500,
"line_mean": 22.8993288591,
"line_max": 75,
"alpha_frac": 0.6641392867,
"autogenerated": false,
"ratio": 2.7392307692307694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8530985342236814,
"avg_score": 0.074476942738791,
"num_lines": 149
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def events2neural(task_fname, tr, n_trs):
""" Return predicted neural time course from event file `task_fname`
Parameters
----------
task_fname : str
Filename of event file
tr : float
TR in seconds
n_trs : int
Number of TRs in functional run
Returns
-------
time_course : array shape (n_trs,)
Predicted neural time course, one value per TR
"""
task = np.loadtxt(task_fname)
# Check that the file is plausibly a task file
if task.ndim != 2 or task.shape[1] != 3:
raise ValueError("Is {0} really a task file?", task_fname)
# Convert onset, duration seconds to TRs
task[:, :2] = task[:, :2] / tr
# Neural time course from onset, duration, amplitude for each event
time_course = np.zeros(n_trs)
for onset, duration, amplitude in task:
time_course[onset:onset + duration] = amplitude
return time_course
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/stimuli.py",
"copies": "1",
"size": "1024",
"license": "bsd-3-clause",
"hash": 6001039594899577000,
"line_mean": 29.1176470588,
"line_max": 72,
"alpha_frac": 0.623046875,
"autogenerated": false,
"ratio": 3.7925925925925927,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9823902772914721,
"avg_score": 0.01834733893557423,
"num_lines": 34
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def multiline(ax, data, labels, line_kw=None, xlabels=None, ylabels=None):
"""Plot a number of datasets on their own line_artist
Parameters
----------
ax : iterable
List of mpl.Axes objects
data : list
If the data is Nx1, the data will be treated as 'x'. If the data is
Nx2, the data will be treated as (x, y)
labels : list
Names of the data sets. These will appear as the legend in each plot
line_kw : dict
Dictionary of kwargs to be passed to **all** of the plotting functions.
xlabels : iterable or string, optional
The name of the x axes. If an iterable is passed in, it should be the
same length as `data`. If a string is passed in, it is assumed that all
'data' should have the same `x` axis
ylabels : iterable or string, optional
Same as `xlabels`.
Returns
-------
arts : list
Dictionary of matplotlib.lines.Line2D objects. These objects can be
used for further manipulation of the plot
"""
if line_kw is None:
line_kw = {}
arts = []
# handle the xlabels
if xlabels is None:
xlabels = [''] * len(data)
if ylabels is None:
ylabels = [''] * len(data)
if isinstance(xlabels, str):
xlabels = [xlabel] * len(data)
if isinstance(ylabels, str):
ylabels = [ylabel] * len(data)
def to_xy(d, label):
shape = d.shape
if len(shape) == 1:
return range(len(d)), d
elif len(shape) == 2:
if shape[0] == 1:
return range(len(d)), d[:, 1]
elif shape[1] == 1:
return range(len(d)), d[0]
elif shape[0] == 2:
return d[0], d[1]
elif shape[1] == 2:
return d[:, 0], d[:, 1]
raise ValueError('data set "%s" has a shape I do not '
'understand. Expecting shape (N), (Nx1), '
'(1xN), (Nx2) or (2xN). I got %s' % (label, shape))
for ax, d, label, xlabel, ylabel in zip(ax, data, labels, xlabels, ylabels):
d = np.asarray(d)
x, y = to_xy(d, label)
art, = ax.plot(x, y, label=label, **line_kw)
arts.append(art)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
return arts
| {
"repo_name": "sameera2004/xray-vision",
"path": "xray_vision/mpl_plotting/utils.py",
"copies": "3",
"size": "2478",
"license": "bsd-3-clause",
"hash": 7959108008217354000,
"line_mean": 34.4,
"line_max": 80,
"alpha_frac": 0.5371267151,
"autogenerated": false,
"ratio": 3.7602427921092563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5797369507209257,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def present_3d(three_d_image):
""" Coverts a 3d image into a 2nd image with slices in 3rd dimension varying across the element
three_d_image: is a 3 dimensional numpy array
# might later add these in (couldn't do so at this time)
num_images: number of 2d images in 3d array (or number wanted to print out)
image_dim: the dimension of the image
#####
With Results just do:
In[0]: full=present_3d(three_d_image)
In[1]: plt.imshow(full,cmap="gray",interpolation="nearest")
In[2]: plt.colorbar()
"""
num_images=three_d_image.shape[-1]
image_dim=three_d_image.shape[0:2]
# formating grid
length=np.ceil(np.sqrt(num_images))
grid_size=(length,np.ceil(num_images/length))
full=np.zeros((image_dim[0]*grid_size[0],image_dim[1]*grid_size[1]))
counter=0
for row in range(int(grid_size[0])):
for col in range(int(grid_size[1])):
if counter< num_images:
full[(row*image_dim[0]):((row+1)*image_dim[0]),(col*image_dim[0]):((col+1)*image_dim[0])]=three_d_image[...,row*grid_size[1]+col]
counter=counter+1
return full
def make_mask(data_3d, mask_data, fit=False):
""" Takes a 3d image and a 3d mask array and fits the mask over the 3d image.
The mask turns all of the points of data_3d that are not part of the mask into 0's.
If 'fit=True', then the resolution of the mask is different from the resolution
of the image and the array change the resolution of the mask.
Parameters
----------
data_3d: numpy array of 3 dimensions
The image data of one subject that you wish to fit mask over
mask_data: numpy array of 3 dimension
The mask for the data_3d
fit: boolean
Whether or not the resolution of the mask needs to be altered to fit onto the data
Returns
-------
new_data: numpy array of 3 dimensions
Same data frame as data_3d but with the mask placed on top.
"""
def shrink(data, rows, cols):
return data.reshape(rows, data.shape[0]/rows, cols, data.shape[1]/cols).sum(axis=1).sum(axis=2)
if fit == False:
if data_3d.shape != mask_data.shape:
raise ValueError('The shape of mask and data are not the same. Trying making "fit=True"')
else:
return data_3d * mask_data
elif fit== True:
new_mask = np.zeros(data_3d.shape)
for j in range(mask_data.shape[-1]):
new_mask[...,j] = shrink(mask_data[...,j], data_3d.shape[0], data_3d.shape[1])
return data_3d * new_mask
def present_3d_options(three_d_image,axis=2):
""" Coverts a 3d image into a 2nd image with slices in 3rd dimension varying across the element
Input:
------
three_d_image: is a 3 dimensional numpy array
axis: The axis you'd like to cut at (0,1, or 2)
Output:
-------
full: a 2d numpy array
#####
With Results just do:
In[0]: full=present_3d(three_d_image)
In[1]: plt.imshow(full,cmap="gray",interpolation="nearest")
In[2]: plt.colorbar()
"""
assert(axis in [0,1,2])
assert(len(three_d_image.shape)==3)
num_images=three_d_image.shape[axis]
if axis==0:
image_dim=list(three_d_image.shape[1:])
image_dim.reverse()
elif axis==1:
image_dim=(three_d_image.shape[2],three_d_image.shape[0])
else:
image_dim=three_d_image.shape[:2]
# formating grid
length=np.ceil(np.sqrt(num_images))
grid_size=[int(x) for x in (length,np.ceil(num_images/length))]
full=np.zeros((image_dim[0]*grid_size[0],image_dim[1]*grid_size[1]))
counter=0
if axis==0:
for row in range(int(grid_size[0])):
for col in range(int(grid_size[1])):
if counter< num_images:
full[(row*image_dim[0]):((row+1)*image_dim[0]),(col*image_dim[1]):((col+1)*image_dim[1])]=np.rot90(three_d_image[row*grid_size[1]+col,...],1)
counter=counter+1
return full
elif axis==1:
for row in range(int(grid_size[0])):
for col in range(int(grid_size[1])):
if counter< num_images:
full[(row*image_dim[0]):((row+1)*image_dim[0]),(col*image_dim[1]):((col+1)*image_dim[1])]=np.rot90(three_d_image[:,row*grid_size[1]+col,:],2).T
counter=counter+1
return full
else: # regular:
for row in range(int(grid_size[0])):
for col in range(int(grid_size[1])):
if counter< num_images:
full[(row*image_dim[0]):((row+1)*image_dim[0]),(col*image_dim[1]):((col+1)*image_dim[1])]=three_d_image[...,row*grid_size[1]+col]
counter=counter+1
return full
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/Image_Visualizing.py",
"copies": "1",
"size": "4668",
"license": "bsd-3-clause",
"hash": 3281199777908891600,
"line_mean": 32.8260869565,
"line_max": 155,
"alpha_frac": 0.6214652956,
"autogenerated": false,
"ratio": 3.154054054054054,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4275519349654054,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def time_shift(convolved, neural_prediction, delta):
""" Returns tuple containing original convolved time course
with the correct number of volumes and a back-shifted
convolved time course.
Parameters:
-----------
convolved: 1-d array of the convolved time course.
neural_prediction: 1-d array of the event stimuli
delta: a single numeric value indicating how much to shift.
Returns:
--------
convolved2: convolved time course, but only up to the number
of volumes
shifted: convolved time course, back-shifted by delta.
"""
# Number of volumes.
N = len(neural_prediction)
# Assert that the shifting factor is reasonable.
assert(delta+N <= len(convolved))
# Knock off the extra volumes.
convolved2 = convolved[:N]
# Backshift by delta.
shifted = convolved[delta:(delta+N)]
return convolved2, shifted
def time_shift_cond(condition, delta):
""" Returns the shifted condition file
Parameters:
-----------
condition: a 1d np.array of stimulus times
delta: a single numeric value indicating how much to shift.
Returns:
--------
shift_condition: 1d np.array time shifted conditional data
"""
shift_condition= condition-delta
return shift_condition
def make_shift_matrix(condition,delta_vector):
""" Returns a matrix of shifted conditions as the columns (depending upon delta_vector)
Parameters:
-----------
condition: a 1d np.array of stimulus times (length n)
delta_vector: a 1d np.array of shifts (length m)
Returns:
--------
shift_matrix: a 2d np.array with time shifted columns (n x m)
"""
m = len(delta_vector)
n = condition.shape[0]
X=np.ones((n,m))
shifts=-X*delta_vector
shift_matrix=np.tile(condition,m).reshape(m,n).T+shifts
return shift_matrix
def time_correct(convolve_lambda,shift_matrix,num_TRs):
""" Returns a prediction for the Hemodyamic response for the given time points
Parameters:
-----------
convolution_lambda: function that takes in 1 parameter (a 1d vector of times to be convolved)
shift_matrix: a 2d np.array with time shifted columns (n x m)
num_TRs: expected dimension of convolve_lambda output
Returns:
--------
hrf_matrix: a 2d np.array with predicted hrf
"""
hrf_matrix=np.zeros((num_TRs,shift_matrix.shape[1]))
for i in range(shift_matrix.shape[1]):
hrf_matrix[:,i]=convolve_lambda(shift_matrix[:,i])
return hrf_matrix
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/time_shift.py",
"copies": "1",
"size": "2684",
"license": "bsd-3-clause",
"hash": -1468209829183042800,
"line_mean": 26.1111111111,
"line_max": 97,
"alpha_frac": 0.6479135618,
"autogenerated": false,
"ratio": 3.8563218390804597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.500423540088046,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from qtpy.QtWidgets import QDialog, QTableWidgetItem, QComboBox, QCheckBox, QSpacerItem, QSizePolicy, QHBoxLayout, \
QWidget
from addie.utilities import load_ui
from qtpy import QtCore
class GlobalRuleHandler:
def __init__(self, parent=None):
o_global = GlobalRuleWindow(parent=parent)
o_global.show()
class GlobalRuleWindow(QDialog):
list_of_rule_names = [] # ['0', '1', '2']
def __init__(self, parent=None):
self.parent = parent
QDialog.__init__(self, parent=parent)
self.ui = load_ui('filter_rule_editor.ui', baseinstance=self)
#self.ui = UiDialog()
#self.ui.setupUi(self)
self.init_widgets()
self.load_global_rule_dict()
self.refresh_global_rule()
self.check_widgets()
def get_list_of_rule_names(self):
"""make the list of rule name defined in the previous ui"""
table_widget = self.parent.ui.tableWidget
nbr_row = table_widget.rowCount()
list_of_rule_names = []
for _row in np.arange(nbr_row):
_name = str(table_widget.item(_row, 1).text())
list_of_rule_names.append(_name)
return list_of_rule_names
def init_widgets(self):
list_of_rule_names = self.get_list_of_rule_names()
self.list_of_rule_names = list_of_rule_names
for _col_index, _name in enumerate(list_of_rule_names):
self.ui.tableWidget.insertColumn(_col_index+2)
item_title = QTableWidgetItem(_name)
self.ui.tableWidget.setHorizontalHeaderItem(_col_index+2, item_title)
def load_global_rule_dict(self):
"""Using the global_rule_dict, populate the interface and check the right rules"""
global_rule_dict = self.parent.global_rule_dict
list_of_rule_names = self.list_of_rule_names
nbr_columns = self.ui.tableWidget.columnCount()
for _row, _key in enumerate(global_rule_dict.keys()):
self.add_row(row=_row)
name_of_group = _key
self.ui.tableWidget.item(_row, 0).setText(str(name_of_group))
list_of_rules_for_this_group = global_rule_dict[_key]['list_rules']
for _col_index, _rule in enumerate(list_of_rule_names):
if _rule in list_of_rules_for_this_group:
self.ui.tableWidget.cellWidget(_row, _col_index+2).children()[1].setChecked(True)
inner_rule = global_rule_dict[_key]['inner_rule']
_inner_index = self.ui.tableWidget.cellWidget(_row, nbr_columns-1).findText(inner_rule)
self.ui.tableWidget.cellWidget(_row, nbr_columns-1).blockSignals(True)
self.ui.tableWidget.cellWidget(_row, nbr_columns-1).setCurrentIndex(_inner_index)
self.ui.tableWidget.cellWidget(_row, nbr_columns-1).blockSignals(False)
if _row > 0:
outer_rule = global_rule_dict[_key]['outer_rule']
_outer_index = self.ui.tableWidget.cellWidget(_row, 1).findText(outer_rule)
self.ui.tableWidget.cellWidget(_row, 1).blockSignals(True)
self.ui.tableWidget.cellWidget(_row, 1).setCurrentIndex(_outer_index)
self.ui.tableWidget.cellWidget(_row, 1).blockSignals(False)
def check_widgets(self):
nbr_row = self.ui.tableWidget.rowCount()
enable_remove_widget = True
if nbr_row == 0:
enable_remove_widget = False
self.ui.remove_group_button.setEnabled(enable_remove_widget)
def define_unique_group_name(self, row):
"""this method makes sure that the name of the group defined is unique and does not exist already"""
nbr_row = self.ui.tableWidget.rowCount()
list_group_name = []
for _row in np.arange(nbr_row):
if self.ui.tableWidget.item(_row, 0):
if self.ui.tableWidget.item(_row, 1):
_group_name = str(self.ui.tableWidget.item(_row, 1).text())
list_group_name.append(_group_name)
offset = 0
while True:
if ("{}".format(offset+row)) in list_group_name:
offset += 1
else:
return "{}".format(offset+row)
def add_row(self, row=-1, check_new_row=False):
self.ui.tableWidget.insertRow(row)
list_of_widgets_to_unlock = []
# group name
_column = 0
_group_name = self.define_unique_group_name(row)
_item = QTableWidgetItem(_group_name)
_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
self.ui.tableWidget.setItem(row, _column, _item)
# group to group rule
list_options = ["and", "or"]
_column += 1
if row > 0:
_widget = QComboBox()
_widget.addItems(list_options)
self.ui.tableWidget.setCellWidget(row, _column, _widget)
_widget.blockSignals(True)
list_of_widgets_to_unlock.append(_widget)
_widget.currentIndexChanged.connect(lambda value=list_options[0]:
self.combobox_changed(value))
else:
_item = QTableWidgetItem("N/A")
_item.setFlags(QtCore.Qt.ItemIsEnabled)
self.ui.tableWidget.setItem(row, _column, _item)
# rule columns
_column += 1
for _offset in np.arange(len(self.list_of_rule_names)):
_row_layout = QHBoxLayout()
_widget = QCheckBox()
_widget.blockSignals(True)
if check_new_row and _offset == row:
_widget.setCheckState(QtCore.Qt.Checked)
list_of_widgets_to_unlock.append(_widget)
_widget.stateChanged.connect(lambda value=0:
self.checkbox_changed(value))
_spacer1 = QSpacerItem(40,20, QSizePolicy.Expanding, QSizePolicy.Minimum)
_row_layout.addItem(_spacer1)
_row_layout.addWidget(_widget)
_spacer2 = QSpacerItem(40,20, QSizePolicy.Expanding, QSizePolicy.Minimum)
_row_layout.addItem(_spacer2)
_rule_widget = QWidget()
_rule_widget.setLayout(_row_layout)
self.ui.tableWidget.setCellWidget(row, _column+_offset, _rule_widget)
# inner group rule
_column += len(self.list_of_rule_names)
_widget = QComboBox()
_widget.blockSignals(True)
list_of_widgets_to_unlock.append(_widget)
_widget.setEnabled(False)
_widget.currentIndexChanged.connect(lambda value=list_options[0]:
self.combobox_changed(value))
list_options = ["and", "or"]
_widget.addItems(list_options)
self.ui.tableWidget.setCellWidget(row, _column, _widget)
self.unlock_signals_ui(list_of_widgets_to_unlock)
def unlock_signals_ui(self, list_ui=[]):
if list_ui == []:
return
for _ui in list_ui:
_ui.blockSignals(False)
def check_status_of_inner_rule(self):
"""the inner rule ['and', 'or'] does not need to be enabled when there is only 1 (or zero)
rule checked in the same row"""
nbr_row = self.ui.tableWidget.rowCount()
nbr_total_columns = self.ui.tableWidget.columnCount()
nbr_rules = nbr_total_columns - 3
for _row in np.arange(nbr_row):
enabled_inner_rule_combobox = False
if nbr_rules > 1:
nbr_rules_checked = 0
for _rule_index in np.arange(nbr_rules):
checkbox_ui = self.ui.tableWidget.cellWidget(_row, _rule_index + 2).children()[1]
is_checkbox_checked = checkbox_ui.isChecked()
if is_checkbox_checked:
nbr_rules_checked += 1
if nbr_rules_checked > 1:
enabled_inner_rule_combobox = True
self.ui.tableWidget.cellWidget(_row, nbr_total_columns-1).setEnabled(enabled_inner_rule_combobox)
def checkbox_changed(self, value):
self.check_status_of_inner_rule()
self.refresh_global_rule()
def combobox_changed(self, value):
self.refresh_global_rule()
def _retrieve_group_relation(self, row=-1, group_type='inner'):
nbr_column = self.ui.tableWidget.columnCount()
if group_type == 'inner':
column = nbr_column - 1
else:
if row == 0:
return ""
column = 1
widget = self.ui.tableWidget.cellWidget(row, column)
if widget:
return widget.currentText()
else:
return ""
def _retrieve_rules_checked(self, row=-1):
nbr_rules = len(self.list_of_rule_names)
list_of_rules_checked = []
global_offset_up_to_rule_name = 2
for _index_rule in np.arange(nbr_rules):
_widget = self.ui.tableWidget.cellWidget(row, global_offset_up_to_rule_name+_index_rule).children()[1]
if _widget.checkState() == QtCore.Qt.Checked:
rule_name= str(self.ui.tableWidget.horizontalHeaderItem(global_offset_up_to_rule_name+_index_rule).text())
list_of_rules_checked.append("#{}".format(rule_name))
return list_of_rules_checked
def refresh_global_rule(self):
self.save_global_rule_dict()
global_rule = self.parent.create_global_rule_string()
self.ui.rule_result.setText(global_rule)
# Event Handler
def add_group(self):
self.ui.remove_group_button.setEnabled(True)
nbr_row = self.ui.tableWidget.rowCount()
self.add_row(row=nbr_row, check_new_row=True)
def remove_group(self):
_select = self.ui.tableWidget.selectedRanges()
if not _select:
return
else:
row = _select[0].topRow()
self.ui.tableWidget.removeRow(row)
self.check_widgets()
self.refresh_global_rule()
def save_global_rule_dict(self):
nbr_row = self.ui.tableWidget.rowCount()
total_nbr_columns = self.ui.tableWidget.columnCount()
nbr_rules = total_nbr_columns - 3
list_of_rule_names = self.list_of_rule_names
global_rule_dict = {}
for _row in np.arange(nbr_row):
_row_rule_dict = {}
group_name = str(self.ui.tableWidget.item(_row, 0).text())
if _row == 0:
outer_rule = None
else:
outer_rule = str(self.ui.tableWidget.cellWidget(_row, 1).currentText())
inner_rule = str(self.ui.tableWidget.cellWidget(_row, total_nbr_columns-1).currentText())
list_rules_checked = []
for _rule_index in np.arange(nbr_rules):
_is_checked = self.ui.tableWidget.cellWidget(_row, _rule_index+2).children()[1].isChecked()
if _is_checked:
_name = list_of_rule_names[_rule_index]
list_rules_checked.append(_name)
_row_rule_dict['group_name'] = group_name
_row_rule_dict['list_rules'] = list_rules_checked
_row_rule_dict['inner_rule'] = inner_rule
_row_rule_dict['outer_rule'] = outer_rule
global_rule_dict[_row] = _row_rule_dict
self.parent.global_rule_dict = global_rule_dict
def accept(self):
# copy global rule into import_from_database ui
self.save_global_rule_dict()
global_rule_string = self.parent.create_global_rule_string()
self.parent.ui.global_rule_lineedit.setText(global_rule_string)
self.parent.update_rule_filter()
self.close()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/global_rule_handler.py",
"copies": "1",
"size": "11730",
"license": "mit",
"hash": -2127664959525857500,
"line_mean": 38.4949494949,
"line_max": 122,
"alpha_frac": 0.5883205456,
"autogenerated": false,
"ratio": 3.681732580037665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4770053125637665,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
from qtpy.QtWidgets import QMainWindow
from addie.utilities import load_ui
from addie.processing.mantid.master_table.table_row_handler import \
TableRowHandler
from addie.utilities import math_tools
from addie.processing.mantid.master_table.tree_definition import \
INDEX_OF_COLUMNS_WITH_MASS_DENSITY
from addie.processing.mantid.master_table.periodic_table.material_handler import \
retrieving_molecular_mass_and_number_of_atoms_worked
class MassDensityHandler:
def __init__(self, parent=None, key=None, data_type='sample'):
if parent.mass_density_ui is None:
o_mass = MassDensityWindow(
parent=parent, key=key, data_type=data_type)
parent.mass_density_ui = o_mass
if parent.mass_density_ui_position:
parent.mass_density_ui.move(parent.mass_density_ui_position)
o_mass.show()
else:
parent.mass_density_ui.setFocus()
parent.mass_density_ui.activateWindow()
class MassDensityWindow(QMainWindow):
chemical_formula_defined = False
geometry_dimensions_defined = False
total_number_of_atoms = np.NaN
total_molecular_mass = np.NaN
column = 0
precision = 5
def __init__(self, parent=None, key=None, data_type='sample'):
self.parent = parent
self.key = key
self.data_type = data_type
QMainWindow.__init__(self, parent=parent)
self.ui = load_ui('mass_density.ui', baseinstance=self)
self.init_widgets()
self.set_column_index()
def _to_precision_string(self, value):
return "{:.{num}f}".format(value, num=self.precision)
def set_column_index(self):
self.column = INDEX_OF_COLUMNS_WITH_MASS_DENSITY[0] if self.data_type == 'sample' else \
INDEX_OF_COLUMNS_WITH_MASS_DENSITY[1]
def init_widgets(self):
self.ui.number_density_units.setText(u"Atoms/\u212B\u00B3")
self.ui.mass_density_label.setText(u"g/cm\u00B3")
self.ui.volume_units.setText(u"cm\u00B3")
self.ui.ok.setEnabled(False)
# error messages
self.ui.mass_density_error_message.setStyleSheet("color: red")
self.ui.number_density_error_message.setStyleSheet("color: red")
self.ui.mass_error_message.setStyleSheet("color: red")
# geometry
geometry = str(
self.parent.master_table_list_ui[self.key][self.data_type]['shape'].currentText())
self.ui.geometry_label.setText(geometry)
self.geometry_dimensions_defined = self._is_geometry_dimensions_defined()
if self.geometry_dimensions_defined:
self._calculate_and_display_geometry_volume()
self.chemical_formula_defined = self._is_chemical_formula_defined()
if self.chemical_formula_defined:
chemical_formula = self._get_chemical_formula()
molecular_mass, total_number_of_atoms = retrieving_molecular_mass_and_number_of_atoms_worked(
chemical_formula)
self.total_molecular_mass = molecular_mass
self.total_number_of_atoms = total_number_of_atoms
mass_density_list_ui = self.parent.master_table_list_ui[self.key][self.data_type]
mass_density_infos = mass_density_list_ui['mass_density_infos']
_mass_density = str(
mass_density_list_ui['mass_density']['text'].text())
self.ui.mass_density_line_edit.setText(_mass_density)
_mass_density_checked = mass_density_infos['mass_density']['selected']
_number_density_checked = mass_density_infos['number_density']['selected']
_number_density = mass_density_infos['number_density']['value']
self.ui.number_density_line_edit.setText(_number_density)
_mass_value = mass_density_infos['mass']['value']
self.ui.mass_line_edit.setText(_mass_value)
if _mass_density_checked:
self.ui.mass_density_radio_button.setChecked(True)
elif _number_density_checked:
self.ui.number_density_radio_button.setChecked(True)
else:
self.ui.mass_geometry_radio_button.setChecked(True)
self.radio_button_changed()
def _get_chemical_formula(self):
return self.parent.master_table_list_ui[self.key][self.data_type]['material']['text'].text(
)
def _is_chemical_formula_defined(self):
chemical_formula = self._get_chemical_formula()
if chemical_formula == "" or chemical_formula == 'N/A':
return False
return True
def _is_geometry_dimensions_defined(self):
geometry_defined = str(self.ui.geometry_label.text())
radius = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius']['value'].text())
radius2 = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius2']['value'].text())
height = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['height']['value'].text())
if geometry_defined.lower() == 'cylinder':
if math_tools.is_number(radius) and math_tools.is_number(height):
return True
elif geometry_defined.lower() == 'sphere':
if math_tools.is_number(radius):
return True
else:
if math_tools.is_number(radius) and math_tools.is_number(
radius2) and math_tools.is_number(height):
return True
return False
def _calculate_and_display_geometry_volume(self):
geometry_defined = str(self.ui.geometry_label.text())
radius = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius']['value'].text())
radius2 = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['radius2']['value'].text())
height = str(self.parent.master_table_list_ui[self.key]
[self.data_type]['geometry']['height']['value'].text())
# construct geometry object
geom = {
'Shape': geometry_defined,
'Radius': radius,
'Radius2': radius2,
'Height': height
}
volume = math_tools.get_volume_from_geometry(geom)
str_volume = "{:.4}".format(volume)
self.ui.volume_label.setText(str_volume)
def mass_density_value_changed(self):
mass_density = np.float(self.ui.mass_density_line_edit.text())
# calculate number density if chemical formula defined
if self.chemical_formula_defined:
natoms = self.total_number_of_atoms
molecular_mass = self.total_molecular_mass
number_density = math_tools.mass_density2number_density(
mass_density, natoms, molecular_mass)
number_density = self._to_precision_string(number_density)
else:
number_density = 'N/A'
# calculate mass if geometry defined
if self.geometry_dimensions_defined:
volume = np.float(self.ui.volume_label.text())
mass = math_tools.mass_density2mass(mass_density, volume)
mass = self._to_precision_string(mass)
else:
mass = 'N/A'
self.ui.number_density_line_edit.setText(number_density)
self.ui.mass_line_edit.setText(mass)
self.update_status_of_save_button()
def number_density_value_changed(self):
number_density = np.float(self.ui.number_density_line_edit.text())
# calculate mass density if chemical formula defined
if self.chemical_formula_defined:
natoms = self.total_number_of_atoms
molecular_mass = self.total_molecular_mass
mass_density = math_tools.number_density2mass_density(
number_density, natoms, molecular_mass)
mass_density = self._to_precision_string(mass_density)
# calculate mass if geometry defined
if self.geometry_dimensions_defined:
volume = np.float(self.ui.volume_label.text())
mass = math_tools.number_density2mass(
number_density, volume, natoms, molecular_mass)
mass = self._to_precision_string(mass)
else:
mass = 'N/A'
else:
mass_density = 'N/A'
mass = 'N/A'
self.ui.mass_density_line_edit.setText(mass_density)
self.ui.mass_line_edit.setText(mass)
self.update_status_of_save_button()
def mass_value_changed(self):
mass = np.float(self.ui.mass_line_edit.text())
# calculate mass if geometry defined
if self.geometry_dimensions_defined:
volume = np.float(self.ui.volume_label.text())
mass_density = math_tools.mass2mass_density(mass, volume)
mass_density = self._to_precision_string(mass_density)
# calculate mass if chemical formula defined
if self.chemical_formula_defined:
natoms = self.total_number_of_atoms
molecular_mass = self.total_molecular_mass
number_density = math_tools.mass2number_density(
mass, volume, natoms, molecular_mass)
number_density = self._to_precision_string(number_density)
else:
number_density = "N/A"
else:
mass_density = "N/A"
number_density = "N/A"
self.ui.mass_density_line_edit.setText(mass_density)
self.ui.number_density_line_edit.setText(number_density)
self.update_status_of_save_button()
def radio_button_changed(self):
mass_density_line_edit_status = False
number_density_line_edit_status = False
mass_line_edit_status = False
if self.ui.mass_density_radio_button.isChecked():
self.ui.mass_density_error_message.setVisible(False)
self.ui.number_density_error_message.setVisible(
not self.chemical_formula_defined)
self.ui.mass_error_message.setVisible(
not self.geometry_dimensions_defined)
mass_density_line_edit_status = True
elif self.ui.number_density_radio_button.isChecked():
self.ui.mass_density_error_message.setVisible(
not self.chemical_formula_defined)
self.ui.number_density_error_message.setVisible(False)
self.ui.mass_error_message.setVisible(
not self.chemical_formula_defined and not self.geometry_dimensions_defined)
number_density_line_edit_status = True
else:
self.ui.mass_density_error_message.setVisible(
not self.geometry_dimensions_defined)
self.ui.number_density_error_message.setVisible(
not self.chemical_formula_defined and not self.geometry_dimensions_defined)
self.ui.mass_error_message.setVisible(
not self.geometry_dimensions_defined)
mass_line_edit_status = True
self.ui.mass_line_edit.setEnabled(mass_line_edit_status)
self.ui.number_density_line_edit.setEnabled(
number_density_line_edit_status)
self.ui.mass_density_line_edit.setEnabled(
mass_density_line_edit_status)
self.update_status_of_save_button()
def update_status_of_save_button(self):
# check the active radio button and check if value is there to enable
# save button
enabled_save_button = False
if self.ui.mass_density_radio_button.isChecked():
string_value = str(self.ui.mass_density_line_edit.text())
if math_tools.is_number(string_value):
enabled_save_button = True
elif self.ui.number_density_radio_button.isChecked():
string_value = str(self.ui.number_density_line_edit.text())
if math_tools.is_number(string_value):
enabled_save_button = True
else:
string_value = str(self.ui.mass_line_edit.text())
if math_tools.is_number(string_value) and self.chemical_formula_defined and \
self.geometry_dimensions_defined:
enabled_save_button = True
self.ui.ok.setEnabled(enabled_save_button)
def save(self):
# first validate fields in case user forgot to hit enter before leaving
# window
if self.ui.mass_density_radio_button.isChecked():
self.mass_density_value_changed()
elif self.ui.number_density_radio_button.isChecked():
self.number_density_value_changed()
else:
self.mass_value_changed()
mass_density_list_ui = self.parent.master_table_list_ui[self.key][self.data_type]
mass_density_infos = mass_density_list_ui['mass_density_infos']
mass_density_flag = False
number_density_flag = False
mass_flag = False
if self.ui.mass_density_radio_button.isChecked():
mass_density_flag = True
elif self.ui.number_density_radio_button.isChecked():
number_density_flag = True
else:
mass_flag = True
mass_density = str(self.ui.mass_density_line_edit.text())
mass_density_list_ui['mass_density']['text'].setText(mass_density)
mass_density_infos['mass_density']['value'] = mass_density
mass_density_infos['mass_density']['selected'] = mass_density_flag
number_density = str(self.ui.number_density_line_edit.text())
mass_density_infos['number_density']['value'] = number_density
mass_density_infos['number_density']['selected'] = number_density_flag
mass = str(self.ui.mass_line_edit.text())
mass_density_infos['mass']['value'] = mass
mass_density_infos['mass']['selected'] = mass_flag
def accept(self):
self.save()
o_table = TableRowHandler(main_window=self.parent)
o_table.transfer_widget_states(
from_key=self.key, data_type=self.data_type)
self.parent.check_master_table_column_highlighting(column=self.column)
self.close()
def reject(self):
self.close()
def closeEvent(self, c):
self.parent.mass_density_ui = None
self.parent.mass_density_ui_position = self.pos()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/mass_density_handler.py",
"copies": "1",
"size": "14472",
"license": "mit",
"hash": -6878807287725796000,
"line_mean": 40.1136363636,
"line_max": 105,
"alpha_frac": 0.6216141515,
"autogenerated": false,
"ratio": 3.785508762751766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4907122914251766,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
import pdb
from cfractald import corrDim, getCOMs
__all__ = ['corrcalc','getCOMsPy','getCOMs','getCOMnumpy','methodL','fit2']
def getCOMnumpy(poslist,masslist):
#return com coordinates of a single molecule, written in python using numpy
poslist3 = poslist.reshape([len(poslist)/3,3])
com = np.dot(masslist,poslist3)/masslist.sum()
return com
def getCOMsPy(poslist,masslist,ats):
#return the center of mass of each molecule in a position list of peptides
N = int(len(poslist)/3/ats) #total number of molecules
comlocs = np.zeros(3*N)
for i in range(N):
Rcom = getCOMnumpy(poslist[3*ats*i:3*ats*i+3*ats],masslist)
comlocs[3*i:3*i+3] = Rcom
return comlocs
def getcomsPy( pos, coms, masslist, beads, mols):
pos = np.reshape(pos,[1,3 * beads * mols])[0]
coms = np.reshape(coms,[1,3*mols])[0]
for i in range(mols):
X = 0;
Y = 0;
Z = 0;
M = 0;
for j in range(beads):
#if i == 1:
# pdb.set_trace()
x = masslist[j] * pos[beads*i + 3*j];
y = masslist[j] * pos[beads*i + 3*j+1];
z = masslist[j] * pos[beads*i + 3*j+2];
X+=x;
Y+=y;
Z+=z;
M += masslist[j];
X/=M;
Y/=M;
Z/=M;
coms[3*i] = X;
coms[3*i + 1] = Y;
coms[3*i + 2] = Z;
coms = np.reshape(coms,[mols,3])
return coms
class LineFit(object):
""" A simple container for slope, intercept, and variance of a line fit
Attributes
----------
slope: slope of line fit
intercept: intercept of line fit
slopeVar: variance of the slope
interVar: variance of the intercept
"""
def __init__(self,s,i,sV,iV):
self.slope = s
self.intercept = i
self.slopeVar = sV
self.interVar = iV
def fit2(x,y,w,i):
""" Helper function for L method. Takes independent and dependent data
and performs two linear fits, splitting the data at index i.
Parameters
----------
x: numpy vector
the independent variable
y: numpy vector
the dependent variable
w: numpy vector
weights for the fit
i: int
the index to split on
Returns
-------
rmset: float
total root mean square error
line1: LineFit object
first line fit
line2: LineFit object
second line fit
"""
pf1 = np.polyfit(x[0:i+1],y[0:i+1],1,w=w[0:i+1],full=True)
p1 = pf1[0]
sse1 = pf1[1][0]
v1 = i - 2
mse1 = sse1/v1
rmse1 = np.sqrt(mse1)
pf2 = np.polyfit(x[i:len(x)],y[i:len(x)],1,w=w[i:len(x)],full=True)
p2 = pf2[0]
sse2 = pf2[1][0]
v2 = len(x) - i - 2
mse2 = sse2/v2
rmse2 = np.sqrt(mse2)
(p1,cov1) = np.polyfit(x[0:i+1],y[0:i+1],1,w=w[0:i+1],cov=True)
(p2,cov2) = np.polyfit(x[i:len(x)],y[i:len(x)],1,w=w[i:len(x)],cov=True)
line1 = LineFit(p1[0],p1[1],cov1[0][0],cov1[1][1])
line2 = LineFit(p2[0],p2[1],cov2[0][0],cov2[1][1])
#pdb.set_trace()
rmset = ((i-1.)/(len(x)-1.))*rmse1 + ((len(x)-i)/(len(x)-1.))*rmse2
return (rmset,line1,line2)
def methodL(x,y,w,xstart=None,xend=None):
""" Performs the L method on y vs x data
Parameters
----------
x: numpy vector
the independent variable
y: numpy vector
the dependent variable
w: numpy vector
weights for the fit
xstart: float
the x value at which to start, if not starting from the initial one
if it is None, defaults to starting from the beginning
xend: float
the x value at which to end, if not ending at the final value
if it is None, defaults to ending at the final value
Returns
-------
xjunct: float
the x value at which to break between the two lines
line1: a simple struct containing slope and intercept
line2: a simple struct containing slope and intercept
totalErr: float
total root mean squared error
Notes
-----
The L method simply finds the spot where to break the data such that two
lines fitted to the data return the minimum possible root mean square
error, which is computed as
RMSET = ((j-1)/(length(vals)-1))*RMSE1
+ ((length(vals)-j)/(length(vals)-1))*RMSE2,
where RMSE1 is the root mean square error of line 1, RMSE2 is the root
mean square error of line 2, j is the point of switching over, and vals
is the set of dependent variables
"""
if xstart is None:
xstart = min(x)
if xend is None:
xend = max(x)
istart = np.abs(x-xstart).argmin()
iend = np.abs(x-xend).argmin()
x = x[istart:(iend+1)]
y = y[istart:(iend+1)]
rmset = np.inf
for i in range(3,len(x)-3):
(rmsetj,linei1,linei2) = fit2(x,y,w,i)
#pdb.set_trace()
if rmsetj < rmset:
rmset = rmsetj
xjunct = i
line1 = linei1
line2 = linei2
xjunct = xjunct + istart
return (xjunct,line1,line2,rmset)
def corrcalc(coms,emax,estep,fname=None):
""" Given the locations of the beads in a snapshot, find and write out
the C(e) function
Parameters
----------
coms: numpy array [M x 3]
M = molno
array contains COM positions for all molecules in a snapshot
emax: double
distance out to which to compute the correlation integral
estep: double
size of steps for correlation integral calculation
fname: file name to write to
if None, do not write file
Returns
-------
corrInt: numpy array [N x 2]
matrix representing the correlation integral versus radius (eps)
"""
(epsnap,cdsnap) = corrDim(coms,emax,estep)
if fname is not None:
f = open(fname,'w')
for i in range(len(cdsnap)):
f.write('{0}\t{1}\n'.format(epsnap[i],cdsnap[i]))
f.close()
return np.array([epsnap,cdsnap])
| {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/fractald.py",
"copies": "1",
"size": "6320",
"license": "mit",
"hash": 3962068024579423700,
"line_mean": 29.0952380952,
"line_max": 79,
"alpha_frac": 0.5579113924,
"autogenerated": false,
"ratio": 3.28653146125845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.434444285365845,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
class DataToImportHandler:
def __init__(self, parent=None):
self.parent = parent
def is_with_filter(self):
if self.parent.ui.toolBox.currentIndex() == 0:
return False
return True
def get_runs_from_row_number(self, list_of_rows_to_load):
"""looking at the tablewidget of all runs, this method will return the equivalent run numbers of the
equivalent list of rows
"""
list_of_runs = []
for _row in list_of_rows_to_load:
_run_for_row = str(self.parent.ui.tableWidget_all_runs.item(_row, 0).text())
list_of_runs.append(_run_for_row)
return list_of_runs
def isolate_runs_from_json(self, json=None, list_of_runs=[]):
clean_json_list = []
for _entry in json:
_run = str(_entry["indexed"]["run_number"])
if _run in list_of_runs:
clean_json_list.append(_entry)
return clean_json_list
def get_json_of_data_to_import(self):
if self.is_with_filter():
# work only with filtered runs
list_of_rows_to_load = list(self.parent.list_of_rows_with_global_rule)
else:
# work with entire stack of runs
nbr_rows = self.parent.ui.tableWidget_all_runs.rowCount()
list_of_rows_to_load = np.arange(nbr_rows)
list_of_runs = self.get_runs_from_row_number(list_of_rows_to_load)
nexus_json_to_import = self.isolate_runs_from_json(json=self.parent.nexus_json_all_infos,
list_of_runs=list_of_runs)
return nexus_json_to_import
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_from_database/data_to_import_handler.py",
"copies": "1",
"size": "1741",
"license": "mit",
"hash": -8826519960374580000,
"line_mean": 35.2708333333,
"line_max": 108,
"alpha_frac": 0.5881677197,
"autogenerated": false,
"ratio": 3.567622950819672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4655790670519672,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import numpy as np
class GuiHandler(object):
def __init__(self, parent=None):
self.parent = parent
def dropdown_get_value(self, widget_id=None):
if not widget_id:
return "N/A"
return widget_id.currentText()
def dropdown_get_index(self, widget_id=None):
if not widget_id:
return -1
return widget_id.currentIndex()
def dropdown_set_index(self, widget_id=None, index=-1):
if not widget_id:
return
widget_id.setCurrentIndex(index)
def radiobutton_get_state(self, widget_id=None):
return widget_id.isChecked()
def radiobutton_set_state(self, widget_id = None, state = True):
widget_id.setChecked(state)
class TableHandler(object):
def __init__(self, table_ui=None):
self.table_ui = table_ui
def get_current_row(self):
return self.table_ui.currentRow()
@staticmethod
def clear_table(table_ui):
nbr_row = table_ui.rowCount()
for _ in np.arange(nbr_row):
table_ui.removeRow(0)
def show_all_rows(self):
self.set_row_visibility(visibility=True, all_rows=True)
def hide_all_rows(self):
self.set_row_visibility(visibility=False, all_rows=True)
def set_row_visibility(self, visibility=True, list_of_rows=[], all_rows=False):
if all_rows:
list_of_rows = np.arange(self.table_ui.rowCount())
for _row in list_of_rows:
self.table_ui.setRowHidden(_row, not visibility)
def show_list_of_rows(self, list_of_rows=[]):
self.hide_all_rows()
self.set_row_visibility(visibility=True, list_of_rows=list_of_rows)
def unlock_signals_ui(list_ui=[]):
if list_ui == []:
return
for _ui in list_ui:
_ui.blockSignals(False)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/utilities/gui_handler.py",
"copies": "1",
"size": "1885",
"license": "mit",
"hash": 4535089010033235500,
"line_mean": 25.1805555556,
"line_max": 83,
"alpha_frac": 0.6169761273,
"autogenerated": false,
"ratio": 3.446069469835466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4563045597135466,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def bh_procedure(p_vals, Q):
"""
Return an array (mask) of the significant, valid tests
out of the p-values. not significant p-values are denoted by ones.
Parameters
----------
p_vals: p-values from the t_stat function (1-dimensional array)
Q: The false discovery rate
Returns
-------
significant_pvals : 1-d array of p-values of tests that are
deemed significant, denoted by 1's and p-values
Note: You will have to reshape the output to the shape of the data set.
"""
# k is Q/m where m = len(p_vals)
k = Q/p_vals.shape[0]
# Multiply an array of rank values by k
upper = k*np.fromiter(range(1, 1 + p_vals.shape[0]), dtype = "int")
p_sorted = np.sort(p_vals, axis = 0)
bool_array = np.zeros(p_sorted.shape[0], dtype = bool)
for i in range(p_sorted.shape[0]):
if p_sorted[i] < upper[i]:
bool_array[i] = True
# Find maximum True index and the element in it from p_sorted
indices = np.where(bool_array)
# Make sure there are indices that returned True!!
if sum(indices[0]) != 0:
max_true_index = np.max(indices)
# max_upper is the highest that a p-value can be to be considered significant.
max_upper = np.ravel(p_sorted)[max_true_index]
# If no indices where p < upper
else:
print("**** Oh no. No p-values smaller than upper bound FDR were found. ****")
return p_vals
# Make all non-siginificant p-values zero
final_p = [x if x <= max_upper else 1 for x in np.ravel(p_vals)]
# shape of returned array is (len,)
return np.array(final_p)
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/functions/benjamini_hochberg.py",
"copies": "1",
"size": "1604",
"license": "bsd-3-clause",
"hash": -7443446584588915000,
"line_mean": 28.1636363636,
"line_max": 80,
"alpha_frac": 0.6708229426,
"autogenerated": false,
"ratio": 2.9981308411214953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8834532624449563,
"avg_score": 0.06688423185438648,
"num_lines": 55
} |
from __future__ import absolute_import, division, print_function
import numpy as np
def fit_quad_to_peak(x, y):
"""
Fits a quadratic to the data points handed in
to the from y = b[0](x-b[1])**2 + b[2] and R2
(measure of goodness of fit)
Parameters
----------
x : ndarray
locations
y : ndarray
values
Returns
-------
b : tuple
coefficients of form y = b[0](x-b[1])**2 + b[2]
R2 : float
R2 value
"""
lenx = len(x)
# some sanity checks
if lenx < 3:
raise Exception('insufficient points handed in ')
# set up fitting array
X = np.vstack((x ** 2, x, np.ones(lenx))).T
# use linear least squares fitting
beta, _, _, _ = np.linalg.lstsq(X, y)
SSerr = np.sum((np.polyval(beta, x) - y)**2)
SStot = np.sum((y - np.mean(y))**2)
# re-map the returned value to match the form we want
ret_beta = (beta[0],
-beta[1] / (2 * beta[0]),
beta[2] - beta[0] * (beta[1] / (2 * beta[0])) ** 2)
return ret_beta, 1 - SSerr / SStot
| {
"repo_name": "licode/scikit-xray",
"path": "skbeam/core/fitting/funcs.py",
"copies": "7",
"size": "1084",
"license": "bsd-3-clause",
"hash": -1682874080664882000,
"line_mean": 23.0888888889,
"line_max": 67,
"alpha_frac": 0.5295202952,
"autogenerated": false,
"ratio": 3.1060171919770774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7135537487177077,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np, random
np.random.seed(1)
random.seed(1)
from dragonn.models import SequenceDNN
from simdna.simulations import simulate_single_motif_detection
from dragonn.utils import one_hot_encode, get_motif_scores, reverse_complement
try:
from sklearn.model_selection import train_test_split # sklearn >= 0.18
except ImportError:
from sklearn.cross_validation import train_test_split # sklearn < 0.18
import sys
# Settings
seq_length = 500
num_sequences = 8000
num_positives = 4000
num_negatives = num_sequences - num_positives
GC_fraction = 0.4
test_fraction = 0.2
validation_fraction = 0.2
do_hyperparameter_search = False
num_hyperparameter_trials = 50
num_epochs = 100
use_deep_CNN = False
use_RNN = False
print('Generating sequences...')
sequences, labels, embeddings = simulate_single_motif_detection(
'SPI1_disc1', seq_length, num_positives, num_negatives, GC_fraction)
print('One-hot encoding sequences...')
encoded_sequences = one_hot_encode(sequences)
print('Getting motif scores...')
motif_scores = get_motif_scores(encoded_sequences, motif_names=['SPI1_disc1'])
print('Partitioning data into training, validation and test sets...')
X_train, X_test, y_train, y_test = train_test_split(encoded_sequences, labels, test_size=test_fraction)
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=validation_fraction)
print('Adding reverse complements...')
X_train = np.concatenate((X_train, reverse_complement(X_train)))
y_train = np.concatenate((y_train, y_train))
print('Randomly splitting data into training and test sets...')
random_order = np.arange(len(X_train))
np.random.shuffle(random_order)
X_train = X_train[random_order]
y_train = y_train[random_order]
# Build and train model
if not do_hyperparameter_search:
hyperparameters = {'seq_length': seq_length, 'use_RNN': use_RNN,
'num_filters': (45,), 'pool_width': 25, 'conv_width': (10,),
'L1': 0, 'dropout': 0.2, 'num_epochs': num_epochs}
if use_deep_CNN:
hyperparameters.update({'num_filters': (45, 50, 50), 'conv_width': (10, 8, 5)})
if use_RNN:
hyperparameters.update({'GRU_size': 35, 'TDD_size': 45})
model = SequenceDNN(**hyperparameters)
model.train(X_train, y_train, validation_data=(X_valid, y_valid),
save_best_model_to_prefix='best_model')
else:
print('Starting hyperparameter search...')
from dragonn.hyperparameter_search import HyperparameterSearcher, RandomSearch
fixed_hyperparameters = {'seq_length': seq_length, 'use_RNN': use_RNN, 'num_epochs': num_epochs}
grid = {'num_filters': ((5, 100),), 'pool_width': (5, 40),
'conv_width': ((6, 20),), 'dropout': (0, 0.5)}
if use_deep_CNN:
grid.update({'num_filters': ((5, 100), (5, 100), (5, 100)),
'conv_width': ((6, 20), (6, 20), (6, 20))})
if use_RNN:
grid.update({'GRU_size': (10, 50), 'TDD_size': (20, 60)})
# Backend is RandomSearch; if using Python 2, can also specify MOESearch
# (requires separate installation)
searcher = HyperparameterSearcher(SequenceDNN, fixed_hyperparameters, grid, X_train, y_train,
validation_data=(X_valid, y_valid), backend=RandomSearch)
searcher.search(num_hyperparameter_trials)
print('Best hyperparameters: {}'.format(searcher.best_hyperparameters))
model = searcher.best_model
# Test model
print('Test results: {}'.format(model.test(X_test, y_test)))
# Plot DeepLift and ISM scores for the first 10 test examples, and model architecture
if sys.version[0] == 2:
model.plot_deeplift(X_test[:10], output_directory='deeplift_plots')
model.plot_in_silico_mutagenesis(X_test[:10], output_directory='ISM_plots')
model.plot_architecture(output_file='architecture_plot.png')
| {
"repo_name": "agitter/dragonn",
"path": "examples/simple_motif_detection.py",
"copies": "2",
"size": "3903",
"license": "mit",
"hash": -3835364449215288300,
"line_mean": 37.2647058824,
"line_max": 103,
"alpha_frac": 0.685370228,
"autogenerated": false,
"ratio": 3.2579298831385644,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4943300111138565,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import numpy as np, sys
from abc import abstractmethod, ABCMeta
class HyperparameterBackend(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, grid):
"""
Parameters
----------
grid: dict
Keys are hyperparameter names and values are either
a single (min, max) tuple for single value parameters
or a tuple of (min, max) tuples for tuple-valued parameters.
"""
pass
@abstractmethod
def get_next_hyperparameters(self):
pass
@abstractmethod
def record_result(self, hyperparam_dict, score):
"""
Parameters
----------
hyperparam_dict: dict
hyperparameter names as keys and values as values.
score: int or float
The result, or metric value, of using the hyparameters.
"""
pass
class RandomSearch(HyperparameterBackend):
def __init__(self, grid):
self.grid = grid
def get_next_hyperparameters(self):
return [np.random.uniform(start, end) for start, end in self.grid]
def record_result(self, hyperparam_dict, score):
pass # Random search doesn't base its decisions on the results of previous trials
class MOESearch(HyperparameterBackend):
def __init__(self, grid):
if sys.version_info[0] == 2:
from httplib import BadStatusLine
from moe.easy_interface.experiment import Experiment
from moe.easy_interface.simple_endpoint import gp_next_points
from moe.optimal_learning.python.data_containers import SamplePoint
else:
raise RuntimeError("MOESearch requires Python2!")
self.experiment = Experiment(grid)
def get_next_hyperparameters(self):
try:
return gp_next_points(self.experiment)[0]
except BadStatusLine:
raise RuntimeError('MOE server is not running!')
def record_result(self, hyperparam_dict, score):
self.experiment.historical_data.append_sample_points(
[SamplePoint(point=hyperparam_dict.values(), value=score)])
class HyperparameterSearcher(object):
def __init__(self, model_class, fixed_hyperparameters, grid, X_train, y_train, validation_data,
metric='auPRG', maximize=True, backend=RandomSearch):
self.model_class = model_class
self.fixed_hyperparameters = fixed_hyperparameters
self.grid = grid
self.X_train = X_train
self.y_train = y_train
self.validation_data = validation_data
self.metric = metric
self.maximize = maximize
self.best_score = 0
self.best_model = self.best_hyperparameters = None
# Some hyperparameters have multiple elements, and we need backend to treat each of them
# as a separate dimension, so unpack them here.
backend_grid = [bounds for value in grid.values()
for bounds in (value if isinstance(value[0], (list, tuple, np.ndarray))
else (value,))]
self.backend = backend(backend_grid)
def search(self, num_hyperparameter_trials):
for trial in range(num_hyperparameter_trials):
# Select next hyperparameters with MOE, rounding hyperparameters that are integers
# and re-packing multi-element hyperparameters
raw_hyperparameters = self.backend.get_next_hyperparameters()
hyperparameters = {}
i = 0
for name, bounds in self.grid.items():
if isinstance(bounds[0], (list, tuple, np.ndarray)):
# Multi-element hyperparameter
hyperparameters[name] = raw_hyperparameters[i : i + len(bounds)]
if isinstance(bounds[0][0], int):
hyperparameters[name] = np.rint(hyperparameters[name]).astype(int)
i += len(bounds)
else:
hyperparameters[name] = raw_hyperparameters[i]
if isinstance(bounds[0], int):
hyperparameters[name] = int(round(hyperparameters[name]))
i += 1
assert i == len(raw_hyperparameters)
# Try these hyperparameters
model = self.model_class(**{key: value
for dictionary in (hyperparameters, self.fixed_hyperparameters)
for key, value in dictionary.items()})
model.train(self.X_train, self.y_train, validation_data=self.validation_data)
print(self.validation_data)
task_scores = model.score(self.validation_data[0], self.validation_data[1], self.metric)
score = task_scores.mean() # mean across tasks
# Record hyperparameters and validation loss
self.backend.record_result(hyperparameters, score)
# If these hyperparameters were the best so far, store this model
if self.maximize == (score > self.best_score):
self.best_score = score
self.best_model = model
self.best_hyperparameters = hyperparameters
| {
"repo_name": "agitter/dragonn",
"path": "dragonn/hyperparameter_search.py",
"copies": "2",
"size": "5278",
"license": "mit",
"hash": 7772536620244083000,
"line_mean": 41.224,
"line_max": 103,
"alpha_frac": 0.6030693444,
"autogenerated": false,
"ratio": 4.546080964685616,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6149150309085616,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import opsimsummary as oss
from sqlalchemy import create_engine
import pandas as pd
import time
import os
import numpy as np
from opsimsummary import (OpSimOutput,
Simlibs)
logfile = 'feature_sim.simlib.log'
opsim_fname = 'feature_rolling_half_mask_10yrs.db'
simlib_fname = 'feature_rolling_half_mask_10yrs.simlib'
script_start = time.time()
log_str = 'Running script with opsimsummary version {}\n'.format(oss.__version__)
log_val = 'Starting Calculation at {}\n'.format(script_start)
log_str += log_val
pkgDir = os.path.split(oss.__file__)[0]
dbname = os.path.join('/Users/rbiswas/data/', 'LSST/OpSimData',
opsim_fname)
log_val = 'The OpSim DataBase used is {}\n'.format(dbname)
log_str += log_val
# read the database into a `pd.DataFrame`
opsout = OpSimOutput.fromOpSimDB(dbname,
opsimversion='lsstv4',
tableNames=('SummaryAllProps', 'Proposal'))
summary = opsout.summary
log_val = 'dataframe read in from database {}\n'.format(time.time())
log_str += log_val
simlibs = Simlibs(summary, opsimversion='lsstv4', usePointingTree=True)
rng = np.random.RandomState(1)
simlibs.randomSimlibs(numFields=50000, fname=simlib_fname)#, rng=rng)
log_val = 'Done'
log_str += log_val
log_val = 'Writing simlib for input to outfile {0} at time {1}\n'.format(simlib_fname, time.time())
print(log_val)
log_str += log_val
with open(logfile, 'w') as f:
f.write(log_str)
| {
"repo_name": "rbiswas4/simlib",
"path": "scripts/make_simlib_feature.py",
"copies": "1",
"size": "1544",
"license": "mit",
"hash": -421685907330211600,
"line_mean": 33.3111111111,
"line_max": 99,
"alpha_frac": 0.6806994819,
"autogenerated": false,
"ratio": 3.13184584178499,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.431254532368499,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import opsimsummary as oss
import opsimsummary.summarize_opsim as so
from sqlalchemy import create_engine
import pandas as pd
import os
def test_writeSimlib():
pkgDir = os.path.split(oss.__file__)[0]
dbname = os.path.join(pkgDir, 'example_data', 'enigma_1189_micro.db')
template_simlib = os.path.join(pkgDir, 'example_data',
'Enigma_1189_micro_main.simlib')
engineFile = 'sqlite:///' + dbname
engine = create_engine(engineFile)
# read the database into a `pd.DataFrame`
Summary = pd.read_sql_table('Summary', engine)
EnigmaMain = Summary.query('propID == [364]')
EnigmaMainSummary = so.SummaryOpsim(EnigmaMain, calculateSNANASimlibs=True,
user='rbiswas', host='time')
simlibfilename = './Enigma_1189_micro_main.simlib'
EnigmaMainSummary.writeSimlib(simlibfilename)
with open(template_simlib) as f:
template_data = f.read()
with open(simlibfilename) as f:
new_data = f.read()
assert new_data == template_data
if new_data == template_data :
os.remove(simlibfilename)
if __name__ == '__main__':
test_writeSimlib()
| {
"repo_name": "rbiswas4/simlib",
"path": "tests/test_simlibWrite.py",
"copies": "1",
"size": "1246",
"license": "mit",
"hash": -4015338789209372000,
"line_mean": 33.6111111111,
"line_max": 79,
"alpha_frac": 0.6492776886,
"autogenerated": false,
"ratio": 3.4804469273743015,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46297246159743016,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import opsimsummary as oss
import opsimsummary.summarize_opsim as so
from sqlalchemy import create_engine
import pandas as pd
import time
import os
script_start = time.time()
log_str = 'Running script with opsimsummary version {}\n'.format(oss.__VERSION__)
log_val = 'Starting Calculation at {}\n'.format(script_start)
log_str += log_val
pkgDir = os.path.split(oss.__file__)[0]
dbname = os.path.join('/Users/rbiswas/data/', 'LSST/OpSimData',
'enigma_1189_sqlite.db')
log_val = 'The OpSim DataBase used is {}\n'.format(dbname)
log_str += log_val
engineFile = 'sqlite:///' + dbname
engine = create_engine(engineFile)
# read the database into a `pd.DataFrame`
Summary = pd.read_sql_table('Summary', engine)
log_val = 'dataframe read in from database {}\n'.format(time.time())
log_str += log_val
def _writeSimlibFor(propIDList, simlibFileName, description='DDF',
log_str= log_str):
df = Summary.query('propID == @propIDList')
df.drop_duplicates(inplace=True)
opSummary = so.SummaryOpsim(df, calculateSNANASimlibs=True,
user='rbiswas', host='time')
log_val = 'The summary has {} entries\n'.format(len(df))
print(log_val)
log_str += log_val
log_val = \
'The summary has {} unique fields\n'.format(len(df.fieldID.unique()))
print(log_val)
log_str += log_val
log_val = 'Writing simlib for {0} input to outfile {1}\n'.format(description,
simlibFileName)
print(log_val)
log_str += log_val
opSummary.writeSimlib(simlibFileName)
log_val = 'Done simlib calculation at {0} and simlib written to {1}\n'.\
format(time.time(), simlibFileName)
print(log_val)
log_str += log_val
WFDSimlib = os.path.join('../opsimsummary/example_data/',
'Enigma_1189_WFD.simlib')
DDFSimlib = os.path.join('../opsimsummary/example_data/',
'Enigma_1189_DDF.simlib')
CombSimlib = os.path.join('../opsimsummary/example_data/',
'Enigma_1189_Combined.simlib')
_writeSimlibFor([366], DDFSimlib, description='DDF')
_writeSimlibFor([364], WFDSimlib, description='WFD')
_writeSimlibFor([364, 366], CombSimlib, description='Combined')
logfile = 'enigma_simlibs.log'
with open(logfile, 'w') as f:
f.write(log_str)
| {
"repo_name": "rbiswas4/simlib",
"path": "scripts/make_simlib_enigma.py",
"copies": "1",
"size": "2405",
"license": "mit",
"hash": -3328873828777014300,
"line_mean": 33.8550724638,
"line_max": 81,
"alpha_frac": 0.6486486486,
"autogenerated": false,
"ratio": 3.202396804260985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43510454528609854,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
from addie.processing.idl.exp_ini_file_loader import ExpIniFileLoader
class PopulateBackgroundWidgets(object):
list_names = []
exp_ini_back_file = 'N/A'
current_folder = None
we_are_done_here = False
def __init__(self, main_window=None):
self.main_window_postprocessing_ui = main_window.postprocessing_ui
self.current_folder = main_window.current_folder
def run(self):
self.retrieve_list_names_from_table()
if self.we_are_done_here:
return
self.reset_background_combobox_index()
self.retrieve_background_file_from_exp_ini_file()
self.populate_widgets()
def refresh_contain(self):
_index_selected = self.main_window_postprocessing_ui.background_comboBox.currentIndex()
self.retrieve_list_names_from_table()
self.main_window_postprocessing_ui.background_comboBox.clear()
for _item in self.list_names:
self.main_window_postprocessing_ui.background_comboBox.addItem(_item)
self.main_window_postprocessing_ui.background_comboBox.setCurrentIndex(_index_selected)
def retrieve_list_names_from_table(self):
_list_names = []
_nbr_row = self.main_window_postprocessing_ui.table.rowCount()
if _nbr_row == 0:
self.we_are_done_here = True
return
for _index_row in range(_nbr_row):
_label = self.main_window_postprocessing_ui.table.item(_index_row, 1).text()
_list_names.append(_label)
self.list_names = _list_names
def retrieve_background_file_from_exp_ini_file(self):
_exp_ini_full_file_name = os.path.join(self.current_folder, 'exp.ini')
_o_exp_ini = ExpIniFileLoader(full_file_name=_exp_ini_full_file_name)
_metadata = _o_exp_ini.metadata
self.exp_ini_back_file = _metadata['MTc']
def reset_background_combobox_index(self):
self.main_window_postprocessing_ui.background_comboBox.setCurrentIndex(0)
def populate_widgets(self):
self.main_window_postprocessing_ui.background_comboBox.clear()
for _item in self.list_names:
self.main_window_postprocessing_ui.background_comboBox.addItem(_item)
background_text = self.main_window_postprocessing_ui.table.item(0, 2).text()
self.main_window_postprocessing_ui.background_line_edit.setText(background_text)
self.main_window_postprocessing_ui.background_no_field.setText(self.exp_ini_back_file)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/populate_background_widgets.py",
"copies": "1",
"size": "2557",
"license": "mit",
"hash": -8628144575012708000,
"line_mean": 40.9180327869,
"line_max": 95,
"alpha_frac": 0.6765741103,
"autogenerated": false,
"ratio": 3.5123626373626373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.968056120414041,
"avg_score": 0.0016751087044455419,
"num_lines": 61
} |
from __future__ import (absolute_import, division, print_function)
import os
from addie.processing.idl.table_handler import TableHandler
from addie.processing.idl.step2_gui_handler import Step2GuiHandler
class CreateNdsumFile(object):
list_selected_row = None
gui_settings = None
current_folder = None
def __init__(self, parent=None):
self.parent = parent
self.ui = parent.postprocessing_ui
self.current_folder = self.parent.current_folder
def run(self):
self._retrieve_list_of_selected_rows()
self._retrieve_gui_settings()
self._create_sto_output_file()
def _retrieve_list_of_selected_rows(self):
o_table_handler = TableHandler(parent=self.parent)
o_table_handler.retrieve_list_of_selected_rows()
self.list_selected_row = o_table_handler.list_selected_row
def _retrieve_gui_settings(self):
_gui_settings = {}
_gui_settings['background_flag'] = self.ui.background_yes.isChecked()
_gui_settings['background_no_field'] = str(self.ui.background_no_field.text())
_gui_settings['background_yes_field'] = str(self.ui.background_line_edit.text())
_gui_settings['muscat_flag'] = self.ui.muscat_yes.isChecked()
_gui_settings['scale_data_flag'] = self.ui.scale_data_yes.isChecked()
_gui_settings['run_rmc_flag'] = self.ui.run_rmc_yes.isChecked()
_gui_settings['plazcek_from'] = str(self.ui.plazcek_fit_range_min.text())
_gui_settings['plazcek_to'] = str(self.ui.plazcek_fit_range_max.text())
_gui_settings['bfil_from'] = str(self.ui.fourier_filter_from.text())
_gui_settings['bfil_to'] = str(self.ui.fourier_filter_to.text())
_gui_settings['platype'] = self.ui.hydrogen_yes.isChecked()
o_gui_handler = Step2GuiHandler(main_window=self.parent)
_gui_settings['qrangeft'] = o_gui_handler.get_q_range()
self.gui_settings = _gui_settings
def _create_sto_output_file(self):
_sto_file_name = str(self.ui.run_ndabs_output_file_name.text()) + '.ndsum'
full_file_name = os.path.join(self.current_folder, _sto_file_name)
_text = []
for _entry in self.list_selected_row:
_text.append(_entry['name'] + ' ' + _entry['runs'] + '\n')
_text.append('endsamples\n')
_gui_settings = self.gui_settings
if _gui_settings['background_flag']:
_background = _gui_settings['background_yes_field']
else:
_background = _gui_settings['background_no_field']
_text.append('Background\t' + _background + '\n')
if _gui_settings['muscat_flag']:
_muscat_flag = 'Yes'
else:
_muscat_flag = 'No'
_text.append('muscat\t' + _muscat_flag + '\n')
_bfil = "bfil \t%s,%s\n" % (_gui_settings['bfil_from'], _gui_settings['bfil_to'])
_text.append(_bfil)
if _gui_settings['scale_data_flag']:
_scale_data = "Yes"
else:
_scale_data = "No"
_text.append("scale_data \t%s\n" % _scale_data)
if _gui_settings['qrangeft']:
qmin, qmax = _gui_settings['qrangeft']
_text.append("qrangeft {},{}\n".format(qmin, qmax))
# if _gui_settings['run_rmc_flag']:
#_run_rmc = "Yes"
# else:
#_run_rmc = "No"
#_text.append("run_rmc \t%s\n" %_run_rmc)
_plazcek = "plarange \t%s,%s\n" % (_gui_settings['plazcek_from'], _gui_settings['plazcek_to'])
_text.append(_plazcek)
if _gui_settings['platype']:
_hydrogen_value = '2'
else:
_hydrogen_value = '0'
_hydrogen = "platype \t %s\n" % (_hydrogen_value)
_text.append(_hydrogen)
print("[LOG] creating file %s" % full_file_name)
f = open(full_file_name, 'w')
for _line in _text:
f.write(_line)
f.close()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/create_ndsum_file.py",
"copies": "1",
"size": "3922",
"license": "mit",
"hash": -381130720892370300,
"line_mean": 37.8316831683,
"line_max": 102,
"alpha_frac": 0.5889852116,
"autogenerated": false,
"ratio": 3.1126984126984127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42016836242984124,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
from addie.utilities.file_handler import FileHandler
from addie.autoNOM.step1_widgets_handler import Step1WidgetsHandler
class AutoPopulateWidgets(object):
input_file_name = 'exp.ini'
file_found_message = "Config file %s has been found!" % input_file_name
file_not_found_message = "Config file %s has not been found! " % input_file_name
def __init__(self, main_window=None):
self.main_window = main_window
# self.main_window.autonom_ui = parent.ui
def run(self):
full_file_name = os.path.join(self.main_window.current_folder, self.input_file_name)
if os.path.exists(full_file_name):
self.main_window.autonom_ui.exp_ini_file_status.setText(self.file_found_message)
o_retriever = RetrieveExpIniConfiguration(exp_ini_file_name=full_file_name)
o_retriever.run()
self.populate_widgets(o_retriever.exp_ini_dico)
return
self.main_window.autonom_ui.exp_ini_file_status.setText(self.file_not_found_message)
def populate_widgets(self, widgets_dico):
self.main_window.autonom_ui.diamond.setText(widgets_dico['Dia'])
self.main_window.autonom_ui.diamond_background.setText(widgets_dico['DiaBg'])
self.main_window.autonom_ui.vanadium.setText(widgets_dico['Vana'])
self.main_window.autonom_ui.vanadium_background.setText(widgets_dico['VanaBg'])
self.main_window.autonom_ui.sample_background.setText(widgets_dico['MTc'])
o_gui = Step1WidgetsHandler(parent=self.main_window)
try:
_recali = True if (widgets_dico['recali'].strip() == 'yes') else False
except:
_recali = False
finally:
o_gui.set_recalibration(_recali)
try:
_renorm = True if (widgets_dico['renorm'].strip() == 'yes') else False
except:
_renorm = False
finally:
o_gui.set_renormalization(_renorm)
try:
_auto = True if (widgets_dico['autotemp'].strip() == 'yes') else False
except:
_auto = False
finally:
o_gui.set_autotemplate(_auto)
self.main_window.autonom_ui.first_scan.setText(widgets_dico['scan1'])
self.main_window.autonom_ui.last_scan.setText(widgets_dico['scanl'])
if str(self.main_window.autonom_ui.frequency.currentText()) == '60':
self.main_window.autonom_ui.frequency.setCurrentIndex(0)
else:
self.main_window.autonom_ui.frequency.setCurrentIndex(1)
try:
_comments = widgets_dico['#']
except:
_comments = ''
finally:
self.main_window.autonom_ui.comments.setText(_comments)
class RetrieveExpIniConfiguration(object):
exp_ini_dico = {}
def __init__(self, exp_ini_file_name=None):
self.full_file_name = exp_ini_file_name
def run(self):
o_file = FileHandler(filename=self.full_file_name)
o_file.retrieve_contain()
_file_contrain = o_file.file_contain
self.retrieve_settings(_file_contrain)
def retrieve_settings(self, file_contain):
_exp_ini_dico = {}
file_contain = file_contain.split("\n")
for _line in file_contain:
_parsed_line = _line.split()
if len(_parsed_line) > 1:
_keyword = _parsed_line[0]
if _keyword == '#':
_value = " ".join(_parsed_line[1:])
else:
_value = _parsed_line[1]
_exp_ini_dico[_keyword] = _value
self.exp_ini_dico = _exp_ini_dico
| {
"repo_name": "neutrons/FastGR",
"path": "addie/autoNOM/auto_populate_widgets.py",
"copies": "1",
"size": "3693",
"license": "mit",
"hash": 6816707094022827000,
"line_mean": 35.5643564356,
"line_max": 92,
"alpha_frac": 0.608989981,
"autogenerated": false,
"ratio": 3.5204957102001906,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9610850348513,
"avg_score": 0.0037270685374378963,
"num_lines": 101
} |
from __future__ import (absolute_import, division, print_function)
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
__metaclass__ = type
ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200'
ANSIBLE_HASHI_VAULT_TOKEN = None
if os.getenv('VAULT_ADDR') is not None:
ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR']
if os.getenv('VAULT_TOKEN') is not None:
ANSIBLE_HASHI_VAULT_TOKEN = os.environ['VAULT_TOKEN']
class HashiVault:
def __init__(self, logger, **kwargs):
try:
import hvac
except ImportError:
AnsibleError("Please pip install hvac to use this module")
self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR)
self.token = kwargs.get('token', ANSIBLE_HASHI_VAULT_TOKEN)
if self.token is None:
raise AnsibleError("No Vault Token specified")
# split secret arg, which has format 'secret/hello:value'
# into secret='secret/hello' and secret_field='value'
s = kwargs.get('secret')
if s is None:
raise AnsibleError("No secret specified")
s_f = s.split(':')
self.secret = s_f[0]
if len(s_f) >= 2:
self.secret_field = s_f[1]
else:
self.secret_field = 'value'
logger.warning('%s %s' % (self.url, self.token))
self.client = hvac.Client(url=self.url, token=self.token)
if self.client.is_authenticated():
pass
else:
raise AnsibleError("Invalid Hashicorp Vault Token Specified")
def get(self):
data = self.client.read(self.secret)
if data is None:
raise AnsibleError("The secret %s doesn't seem to exist"
% self.secret)
if self.secret_field == '': # secret was specified with trailing ':'
return data['data']
if self.secret_field not in data['data']:
raise AnsibleError("The secret %s does not contain the field '%s'."
% (self.secret, self.secret_field))
return data['data'][self.secret_field]
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
vault_args = terms[0].split(' ')
vault_dict = {}
ret = []
for param in vault_args:
try:
key, value = param.split('=')
except ValueError as e:
raise AnsibleError("hashi_vault plugin needs key=value pairs, but received %s %s"
% (terms, e.message))
vault_dict[key] = value
vault_conn = HashiVault(self._display, **vault_dict)
for term in terms:
key = term.split()[0]
value = vault_conn.get()
ret.append(value)
if 'write_to_file' in vault_dict.keys():
text_file = open(vault_dict['write_to_file'], "w")
text_file.write(value)
text_file.close()
return ret
| {
"repo_name": "StarterSquad/prudentia",
"path": "prudentia/plugins/lookup/hashi_vault.py",
"copies": "1",
"size": "3013",
"license": "mit",
"hash": -5225251975581330000,
"line_mean": 31.0531914894,
"line_max": 97,
"alpha_frac": 0.5692001328,
"autogenerated": false,
"ratio": 3.862820512820513,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4932020645620513,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from appr.commands.command_base import CommandBase
from appr.display import print_package_info
class ShowCmd(CommandBase):
name = 'show'
help_message = "print the package manifest"
default_media_type = None
def __init__(self, options):
super(ShowCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.version = options.version
self.verbose = options.wide
self.media_type = options.media_type
if options.media_type == self.default_media_type:
self.media_type = os.getenv("APPR_DEFAULT_MEDIA_TYPE", self.default_media_type)
self.result = None
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
cls._add_mediatype_option(parser, default=cls.default_media_type, required=False)
parser.add_argument("-w", "--wide", help="Extend display informations",
action="store_true", default=False)
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
self.result = client.show_package(self.package, version=self.version,
media_type=self.media_type)
def _render_dict(self):
return self.result
def _render_console(self):
return "Info: %s\n\n" % self.package + print_package_info(self.result, self.verbose)
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/show.py",
"copies": "2",
"size": "1696",
"license": "apache-2.0",
"hash": -5353150050260842000,
"line_mean": 38.4418604651,
"line_max": 92,
"alpha_frac": 0.6515330189,
"autogenerated": false,
"ratio": 3.889908256880734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5541441275780734,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from appr.commands.command_base import CommandBase
from appr.display import print_packages
class ListPackageCmd(CommandBase):
name = 'list'
help_message = "list packages"
default_media_type = None
def __init__(self, options):
super(ListPackageCmd, self).__init__(options)
self.registry_host = options.registry_host
self.user = options.user
self.organization = options.organization
self.query = options.search
self.media_type = options.media_type
if options.media_type is None:
self.media_type = os.getenv("APPR_DEFAULT_MEDIA_TYPE", None)
self.result = None
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_arg(parser)
cls._add_mediatype_option(parser, default=cls.default_media_type, required=False)
parser.add_argument("-u", "--user", default=None, help="list packages owned by USER")
parser.add_argument("-o", "--organization", default=None,
help="list ORGANIZATION packages")
parser.add_argument("-s", "--search", default=None, help="search query")
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
params = {}
if self.user:
params['username'] = self.user
if self.organization:
params["namespace"] = self.organization
if self.query:
params['query'] = self.query
if self.media_type:
params['media_type'] = self.media_type
self.result = client.list_packages(params)
def _render_dict(self):
return self.result
def _render_console(self):
return print_packages(self.result, registry_host=self.registry_host)
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/list_package.py",
"copies": "2",
"size": "1923",
"license": "apache-2.0",
"hash": -6172326961104096000,
"line_mean": 35.9807692308,
"line_max": 93,
"alpha_frac": 0.6391055642,
"autogenerated": false,
"ratio": 4.023012552301255,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008693499828251602,
"num_lines": 52
} |
from __future__ import absolute_import, division, print_function
import os
from appr.commands.command_base import CommandBase
from appr.pack import ApprPackage
class InspectCmd(CommandBase):
name = 'inspect'
help_message = "Browse package files"
def __init__(self, options):
super(InspectCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.version = options.version
self.version_parts = options.version_parts
self.file = options.file
self.tree = options.tree
self.media_type = options.media_type
if options.media_type is self.default_media_type:
self.media_type = os.getenv("APPR_DEFAULT_MEDIA_TYPE", self.default_media_type)
self.result = None
self.format = options.media_type
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_mediatype_option(parser, required=True)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
parser.add_argument('--tree', help="List files inside the package", action='store_true',
default=True)
parser.add_argument('-f', '--file', help="Display a file", default=None)
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
result = client.pull(self.package, version_parts=self.version_parts,
media_type=self.media_type)
package = ApprPackage(result, b64_encoded=False)
if self.file:
self.result = package.file(self.file)
elif self.tree:
self.result = "\n".join(package.tree())
else:
self.result = package.manifest
def _render_dict(self):
return {"inspect": self.package, "output": self.result}
def _render_console(self):
return self.result
| {
"repo_name": "cn-app-registry/cnr-server",
"path": "appr/commands/inspect.py",
"copies": "2",
"size": "2043",
"license": "apache-2.0",
"hash": 5051356730435240000,
"line_mean": 37.5471698113,
"line_max": 96,
"alpha_frac": 0.6377875673,
"autogenerated": false,
"ratio": 3.944015444015444,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008421817677615765,
"num_lines": 53
} |
from __future__ import absolute_import, division, print_function
import os
from appr.commands.command_base import CommandBase
class DeletePackageCmd(CommandBase):
name = 'delete-package'
help_message = 'delete package from the registry'
def __init__(self, options):
super(DeletePackageCmd, self).__init__(options)
self.package = options.package
self.registry_host = options.registry_host
self.version = options.version
self.media_type = options.media_type
if options.media_type is self.default_media_type:
self.media_type = os.getenv("APPR_DEFAULT_MEDIA_TYPE", self.default_media_type)
self.result = None
self.ssl_verify = options.cacert or not options.insecure
@classmethod
def _add_arguments(cls, parser):
cls._add_registryhost_option(parser)
cls._add_mediatype_option(parser)
cls._add_packagename_option(parser)
cls._add_packageversion_option(parser)
def _call(self):
client = self.RegistryClient(self.registry_host, requests_verify=self.ssl_verify)
self.result = client.delete_package(self.package, version=self.version,
media_type=self.media_type)
def _render_dict(self):
return self.result
def _render_console(self):
return "Deleted package: %s - %s" % (self.result['package'], self.result['release'])
| {
"repo_name": "app-registry/appr",
"path": "appr/commands/delete_package.py",
"copies": "2",
"size": "1432",
"license": "apache-2.0",
"hash": 8746054122297890000,
"line_mean": 36.6842105263,
"line_max": 92,
"alpha_frac": 0.656424581,
"autogenerated": false,
"ratio": 3.934065934065934,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5590490515065935,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
from .compilation import compile_run_strings
from .util import CompilerNotFoundError
def has_fortran():
if not hasattr(has_fortran, 'result'):
try:
(stdout, stderr), info = compile_run_strings(
[('main.f90', (
'program foo\n'
'print *, "hello world"\n'
'end program'
))], clean=True
)
except CompilerNotFoundError:
has_fortran.result = False
if os.environ.get('SYMPY_STRICT_COMPILER_CHECKS', '0') == '1':
raise
else:
if info['exit_status'] != os.EX_OK or 'hello world' not in stdout:
if os.environ.get('SYMPY_STRICT_COMPILER_CHECKS', '0') == '1':
raise ValueError("Failed to compile test program:\n%s\n%s\n" % (stdout, stderr))
has_fortran.result = False
else:
has_fortran.result = True
return has_fortran.result
def has_c():
if not hasattr(has_c, 'result'):
try:
(stdout, stderr), info = compile_run_strings(
[('main.c', (
'#include <stdio.h>\n'
'int main(){\n'
'printf("hello world\\n");\n'
'return 0;\n'
'}'
))], clean=True
)
except CompilerNotFoundError:
has_c.result = False
if os.environ.get('SYMPY_STRICT_COMPILER_CHECKS', '0') == '1':
raise
else:
if info['exit_status'] != os.EX_OK or 'hello world' not in stdout:
if os.environ.get('SYMPY_STRICT_COMPILER_CHECKS', '0') == '1':
raise ValueError("Failed to compile test program:\n%s\n%s\n" % (stdout, stderr))
has_c.result = False
else:
has_c.result = True
return has_c.result
def has_cxx():
if not hasattr(has_cxx, 'result'):
try:
(stdout, stderr), info = compile_run_strings(
[('main.cxx', (
'#include <iostream>\n'
'int main(){\n'
'std::cout << "hello world" << std::endl;\n'
'}'
))], clean=True
)
except CompilerNotFoundError:
has_cxx.result = False
if os.environ.get('SYMPY_STRICT_COMPILER_CHECKS', '0') == '1':
raise
else:
if info['exit_status'] != os.EX_OK or 'hello world' not in stdout:
if os.environ.get('SYMPY_STRICT_COMPILER_CHECKS', '0') == '1':
raise ValueError("Failed to compile test program:\n%s\n%s\n" % (stdout, stderr))
has_cxx.result = False
else:
has_cxx.result = True
return has_cxx.result
| {
"repo_name": "kaushik94/sympy",
"path": "sympy/utilities/_compilation/availability.py",
"copies": "3",
"size": "2951",
"license": "bsd-3-clause",
"hash": 7723583287475822000,
"line_mean": 36.8333333333,
"line_max": 100,
"alpha_frac": 0.4778041342,
"autogenerated": false,
"ratio": 3.977088948787062,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010555711441589501,
"num_lines": 78
} |
from __future__ import absolute_import, division, print_function
import os
from jinja2 import Environment, PackageLoader
import click
import boto3
from botocore.client import ClientError
DEFAULT_AWS_PROFILE = 'default'
DEFAULT_BACKUP_CREDENTIAL_FILE = 'credentials'
env = Environment(loader=PackageLoader('awsbackup', 'templates'))
class AwsBackup(object):
def __init__(self, home=None, profile=''):
self.home = os.path.abspath(home or '.')
self.profile = profile
class BucketExistsError(Exception):
pass
#pass_awsbackup = click.make_pass_decorator(AwsBackup)
@click.group()
@click.version_option()
@click.option('--profile', '-p', default=lambda: os.environ.get('AWS_PROFILE', DEFAULT_AWS_PROFILE),
help="tell awsbackup which aws profile to use from your aws credential file, by default it will use '%s'"
% (DEFAULT_AWS_PROFILE,))
@click.pass_context
def main(ctx, profile):
ctx.obj = AwsBackup(profile=profile)
@main.command()
@click.option('--bucket', '-b', prompt=True, default=lambda: os.environ.get('AWS_S3_BUCKET', ''),
help='tell awsbackup what to call the bucket to send backups to')
@click.option('--user', '-u', prompt=True,
help='tell awsbackup what user to create for the server to use to backup with')
@click.option('--file', '-f', type=click.File('w'), prompt=True,
default=lambda: os.environ.get('AWS_BACKUP_CREDENTIAL_FILE', DEFAULT_BACKUP_CREDENTIAL_FILE),
help="Location of file to SAVE user's credentials to")
@click.pass_context
def create(ctx, bucket, user, file):
policy_template = env.get_template('backup_user_policy.json')
backup_policy = policy_template.render(bucket=bucket)
backup_policy_name = user+'_access_policy'
profile = ctx.obj.profile # get the profile from the parent command's options
session = boto3.Session(profile_name=profile)
s3_client = session.client('s3')
try:
bl = s3_client.get_bucket_location(Bucket=bucket)
raise BucketExistsError("Bucket %s already exists!" % (bucket,)) # this bucket has been created already
except ClientError as ce:
if ce.response['Error']['Code'] == 'NoSuchBucket':
pass # the bucket doesn't exist, phew
elif ce.response['Error']['Code'] == 'AllAccessDisabled':
raise BucketExistsError("Bucket %s already exists with a different owner!" % (bucket,)) # someone else has a bucket with this name
else:
raise ce
bucket_rc = s3_client.create_bucket(Bucket=bucket)
iam_client = session.client('iam')
usr = iam_client.create_user(UserName=user)
usr_policy = iam_client.put_user_policy(UserName=user, PolicyName=backup_policy_name, PolicyDocument=backup_policy)
usr_keys = iam_client.create_access_key(UserName=user)
access_key = usr_keys['AccessKey']['AccessKeyId']
access_secret = usr_keys['AccessKey']['SecretAccessKey']
credentials = "[%s]\naws_access_key_id = %s\naws_secret_access_key = %s" % (user, access_key, access_secret)
file.write(credentials)
import pdb; pdb.set_trace()
cleanup(session, bucket, user, backup_policy_name, usr_keys['AccessKey']['AccessKeyId'])
@main.command()
@click.option('--bucket', '-b', prompt=True, default=lambda: os.environ.get('AWS_S3_BUCKET', ''),
help='tell awsbackup what bucket to send backups to')
@click.option('--name', '-n', type=click.File('w'), prompt=True,
default=lambda: os.environ.get('AWS_BACKUP_SCRIPT_FILE', DEFAULT_BACKUP_SCRIPT_FILE),
help="Location of file to SAVE script to")
@click.option('--from', '-f', prompt=True,
default=lambda: os.environ.get('AWS_BACKUP_DIRECTORY', DEFAULT_BACKUP_DIRECTORY),
help="Location of directory to BACKUP")
@click.pass_context
def syncscript(ctx, bucket, name):
pass
def cleanup(session, bucket, user, backup_policy_name, key_id):
client = session.client('s3')
client.delete_bucket(Bucket=bucket)
client = session.client('iam')
client.delete_user_policy(UserName=user, PolicyName=backup_policy_name)
client.delete_access_key(UserName=user,AccessKeyId=key_id)
client.delete_user(UserName=user)
| {
"repo_name": "dantagg/awsbackup",
"path": "awsbackup/__main__.py",
"copies": "1",
"size": "4227",
"license": "mit",
"hash": -7658046417920336000,
"line_mean": 40.8514851485,
"line_max": 143,
"alpha_frac": 0.6832268749,
"autogenerated": false,
"ratio": 3.6283261802575106,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48115530551575103,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
from ply.yacc import yacc
from .lexer import tokens as lexer_tokens
from .elements import (ID, LongString, ShortString, Float, Integer, Boolean,
List, Dict, TestCase)
__all__ = ['create_parser']
tokens = lexer_tokens
start = 'testcases'
def p_testcases(p):
'''testcases : testcases single_testcase
| empty'''
if len(p) == 2:
p[0] = []
else:
p[1].append(p[2])
p[0] = p[1]
def p_single_testcase(p):
'single_testcase : BEGIN ID body END'
# build single_testcase.
p[0] = TestCase(ID(p[2]), *p[3])
def p_body(p):
'body : input_mark value output_mark value'
p[0] = (p[2], p[4])
def p_input_mark(p):
'''input_mark : INPUT COLON
| INPUT EQUAL_SIGN'''
pass
def p_output_mark(p):
'''output_mark : OUTPUT COLON
| OUTPUT EQUAL_SIGN'''
pass
def p_value(p):
'''value : single_value_element
| compound'''
p[0] = p[1]
def p_compound(p):
'''compound : list
| dict'''
p[0] = p[1]
def p_list(p):
'''list : L_BRACKET elements R_BRACKET
| L_BRACKET R_BRACKET'''
value = p[2] if len(p) == 4 else []
p[0] = List(value)
def p_dict(p):
'''dict : L_BRACE pairs R_BRACE
| L_BRACE R_BRACE'''
if len(p) == 3:
p[0] = {}
return
generated_dict = dict()
first_recorded = {}
for key, value in p[2]:
if key in generated_dict:
if first_recorded[key]:
generated_dict[key] = [generated_dict[key], value]
first_recorded[key] = False
else:
generated_dict[key].append(value)
else:
generated_dict[key] = value
first_recorded[key] = True
p[0] = Dict(generated_dict)
def p_elements(p):
'''elements : single_value_element COMMA elements
| single_value_element COMMA
| single_value_element'''
if len(p) == 4:
# non-leaf.
p[3].insert(0, p[1])
p[0] = p[3]
else:
# leaf.
p[0] = [p[1]]
def p_pairs(p):
'''pairs : single_pair COMMA pairs
| single_pair COMMA
| single_pair'''
if len(p) == 4:
# non-leaf.
p[3].insert(0, p[1])
p[0] = p[3]
else:
# leaf.
p[0] = [p[1]]
def p_single_pair(p):
'single_pair : single_key_element COLON single_value_element'
p[0] = (p[1], p[3])
def p_single_key_element(p):
'''single_key_element : boolean
| number
| string'''
p[0] = p[1]
def p_single_value_element(p):
'''single_value_element : single_key_element
| compound'''
p[0] = p[1]
def p_boolean(p):
'boolean : BOOLEAN'
p[0] = Boolean(p[1])
def p_number(p):
'''number : DECIMAL_INTEGER
| FLOAT_NUMBER'''
value = p[1]
if isinstance(value, Integer.value_type):
p[0] = Integer(value)
else:
p[0] = Float(value)
def p_short_string(p):
'string : SHORT_STRING'
p[0] = ShortString(p[1])
def p_long_string(p):
'string : LONG_STRING'
p[0] = LongString(p[1])
def p_empty(p):
'empty :'
pass
def p_error(p):
pass
def create_parser():
return yacc(
debug=0,
optimize=1,
tabmodule='generated_parsetab',
outputdir=os.path.dirname(__file__),
)
| {
"repo_name": "huntzhan/tcg",
"path": "tcg/ast/parser.py",
"copies": "1",
"size": "3549",
"license": "mit",
"hash": 8544055120927019000,
"line_mean": 19.3965517241,
"line_max": 76,
"alpha_frac": 0.5083122006,
"autogenerated": false,
"ratio": 3.217588395285585,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9225900595885586,
"avg_score": 0,
"num_lines": 174
} |
from __future__ import (absolute_import, division, print_function)
import os
from qtpy import QtGui, QtCore
from addie.utilities.file_handler import FileHandler
class ImportTable(object):
file_contain = []
table_contain = []
contain_parsed = []
full_contain_parsed = []
def __init__(self, parent=None, filename=''):
self.parent = parent
self.filename = filename
def run(self):
self.load_ascii()
self.parse_contain()
self.change_path()
self.populate_gui()
def load_ascii(self):
_filename = self.filename
o_file = FileHandler(filename = _filename)
o_file.retrieve_contain()
self.file_contain = o_file.file_contain
def parse_config_table(self):
self._list_row = eval(self.table_contain)
self.parser()
def parse_contain(self):
_contain = self.file_contain
self._list_row = _contain.split("\n")
self.parser()
def parser(self):
_list_row = self._list_row
_contain_parsed = []
for _row in _list_row:
_row_split = _row.split('|')
_contain_parsed.append(_row_split)
self.contain_parsed = _contain_parsed[2:]
self.full_contain_parsed = _contain_parsed
def change_path(self):
full_contain_parsed = self.full_contain_parsed
try:
_path_string_list = full_contain_parsed[0][0].split(':')
self.parent.current_folder = _path_string_list[1].strip()
os.chdir(self.parent.current_folder)
except:
pass
def populate_gui(self):
_contain_parsed = self.contain_parsed
for _row, _entry in enumerate(_contain_parsed):
if _entry == ['']:
continue
self.parent.ui.table.insertRow(_row)
#select
_layout = QtGui.QHBoxLayout()
_widget = QtGui.QCheckBox()
_widget.setEnabled(True)
_layout.addWidget(_widget)
_layout.addStretch()
_new_widget = QtGui.QWidget()
_new_widget.setLayout(_layout)
#if _entry[0] == "True":
# _widget.setChecked(True)
_widget.stateChanged.connect(lambda state = 0,
row = _row: self.parent.table_select_state_changed(state, row))
self.parent.ui.table.setCellWidget(_row, 0, _new_widget)
#name
_item = QtGui.QTableWidgetItem(_entry[1])
self.parent.ui.table.setItem(_row, 1, _item)
#runs
_item = QtGui.QTableWidgetItem(_entry[2])
self.parent.ui.table.setItem(_row, 2, _item)
#Sample formula
if _entry[3]:
_item = QtGui.QTableWidgetItem(_entry[3])
else:
_item = QtGui.QTableWidgetItem("")
self.parent.ui.table.setItem(_row, 3, _item)
#mass density
if _entry[4]:
_item = QtGui.QTableWidgetItem(_entry[4])
else:
_item = QtGui.QTableWidgetItem("")
self.parent.ui.table.setItem(_row, 4, _item)
#radius
if _entry[5]:
_item = QtGui.QTableWidgetItem(_entry[5])
else:
_item = QtGui.QTableWidgetItem("")
self.parent.ui.table.setItem(_row, 5, _item)
#packing fraction
if _entry[6]:
_item = QtGui.QTableWidgetItem(_entry[6])
else:
_item = QtGui.QTableWidgetItem("")
self.parent.ui.table.setItem(_row, 6, _item)
#sample shape
_widget = QtGui.QComboBox()
_widget.addItem("Cylinder")
_widget.addItem("Sphere")
if _entry[7] == "Sphere":
_widget.setCurrentIndex(1)
self.parent.ui.table.setCellWidget(_row, 7, _widget)
#do abs corr
_layout = QtGui.QHBoxLayout()
_widget = QtGui.QCheckBox()
if _entry[8] == "True":
_widget.setCheckState(QtCore.Qt.Checked)
_widget.setStyleSheet("border: 2px; solid-black")
_widget.setEnabled(True)
_layout.addStretch()
_layout.addWidget(_widget)
_layout.addStretch()
_new_widget = QtGui.QWidget()
_new_widget.setLayout(_layout)
self.parent.ui.table.setCellWidget(_row, 8, _new_widget)
for _row, _entry in enumerate(_contain_parsed):
if _entry == ['']:
continue
#select
_widget = self.parent.ui.table.cellWidget(_row, 0).children()[1]
if _entry[0] == "True":
_widget.setChecked(True)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/import_table.py",
"copies": "1",
"size": "4792",
"license": "mit",
"hash": 2309817004641425000,
"line_mean": 30.7350993377,
"line_max": 104,
"alpha_frac": 0.5287979967,
"autogenerated": false,
"ratio": 4.026890756302521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5055688753002521,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
from qtpy.QtCore import Qt
from addie.processing.idl.step2_gui_handler import Step2GuiHandler
class RunSumScans(object):
script = 'python /SNS/NOM/shared/autoNOM/stable/sumscans.py '
output_file = ''
def __init__(self, parent=None):
self.parent = parent.ui.postprocessing_ui
self.parent_no_ui = parent
self.folder = os.getcwd()
def run(self):
self._background = self.collect_background_runs()
self._runs = self.collect_runs_checked()
self.create_output_file()
self.run_script()
def run_script(self):
_script_to_run = self.add_script_flags()
_script_to_run += ' -f ' + self.full_output_file_name + ' &'
self.parent_no_ui.launch_job_manager(job_name="SumScans",
script_to_run=_script_to_run)
print("[LOG] " + _script_to_run)
def add_script_flags(self):
_script = self.script
if not self.parent.interactive_mode_checkbox.isChecked():
_script += "-n True"
if self.parent.pytest.isChecked():
_script += "-u True"
qmax_list = str(self.parent.pdf_qmax_line_edit.text()).strip()
if not (qmax_list == ""):
_script += ' -q ' + qmax_list
return _script
def create_output_file(self):
_output_file_name = "sum_" + self.parent.sum_scans_output_file_name.text() + ".inp"
# print("_output_file_name: {}".format(_output_file_name))
_full_output_file_name = os.path.join(self.folder, _output_file_name)
# print("_full_output_file_name: {}".format(_full_output_file_name))
self.full_output_file_name = _full_output_file_name
f = open(_full_output_file_name, 'w')
for _label in self._runs:
f.write("%s %s\n" % (_label, self._runs[_label]))
f.write("endsamples\n")
f.write("Background %s\n" % self._background)
o_gui_handler = Step2GuiHandler(main_window=self.parent_no_ui)
# hydrogen flag
plattype_flag = 0
if o_gui_handler.is_hidrogen_clicked():
plattype_flag = 2
f.write("platype {}\n".format(plattype_flag))
# platrange
[plarange_min, plarange_max] = o_gui_handler.get_plazcek_range()
if plarange_min and plarange_max:
f.write("plarange {},{}\n".format(plarange_min, plarange_max))
# poly degree
poly_degree = str(self.parent.ndeg.value())
f.write("ndeg {}\n".format(poly_degree))
# qrangeft
[q_range_min, q_range_max] = o_gui_handler.get_q_range()
if q_range_min and q_range_max:
f.write("qrangeft {},{}\n".format(q_range_min, q_range_max))
# rmax
rmax = str(self.parent.sum_scans_rmax.text()).strip()
if not (rmax == ""):
f.write("rmax {}\n".format(rmax))
f.close()
print("[LOG] created file %s" % _full_output_file_name)
def collect_runs_checked(self):
table = self.parent.table
_runs = {}
for _row_index in range(table.rowCount()):
_selected_widget = table.cellWidget(_row_index, 0).children()[1]
if (_selected_widget.checkState() == Qt.Checked):
_label = str(table.item(_row_index, 1).text())
_value = str(table.item(_row_index, 2).text())
_runs[_label] = _value
return _runs
def collect_background_runs(self):
if self.parent.background_no.isChecked():
_background = str(self.parent.background_no_field.text())
else:
_background = str(self.parent.background_line_edit.text())
return _background
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/run_sum_scans.py",
"copies": "1",
"size": "3773",
"license": "mit",
"hash": -1131044315973222500,
"line_mean": 34.261682243,
"line_max": 91,
"alpha_frac": 0.5743440233,
"autogenerated": false,
"ratio": 3.396039603960396,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9469260848757861,
"avg_score": 0.0002245557005068543,
"num_lines": 107
} |
from __future__ import (absolute_import, division, print_function)
import os
from qtpy.QtCore import Qt
from qtpy.QtWidgets import (QCheckBox, QComboBox, QHBoxLayout, QMessageBox, QTableWidgetItem, QWidget)
from addie.processing.idl.generate_sumthing import GenerateSumthing
class PopulateMasterTable(object):
auto_sum_ini_file = 'auto_sum.inp'
error_reported = False
def __init__(self, main_window=None):
self.parent = main_window
def run(self):
try:
o_generate = GenerateSumthing(parent=self.parent,
folder=self.parent.current_folder)
o_generate.create_sum_inp_file()
self.read_auto_sum_file()
self.populate_table()
except IOError:
QMessageBox.warning(self.parent, "File does not exist!", "Check your folder! ")
self.error_reported = True
def empty_metadata(self):
_metadata = {'name': "",
'runs': "",
'sample_formula': "",
'mass_density': "",
'radius': "",
'packing_fraction': "",
'sample_shape': "",
'do_abs_correction': ""}
return _metadata
def read_auto_sum_file(self):
_full_auto_sum_file_name = os.path.join(self.parent.current_folder, self.auto_sum_ini_file)
f = open(_full_auto_sum_file_name, 'r')
_data = f.read()
f.close()
_data_table = _data.split("\n")
# remove first line (background)
self._data_from_file = _data_table[1:]
print("[LOG] Reading auto_sum_file (%s)" % _full_auto_sum_file_name)
print("[LOG] _data_table: ", _data_table)
def populate_table(self):
'''
In this new version, the table will append the new entries
'''
#o_table = addie.processing_idl.table_handler.TableHandler(parent = self.parent)
# o_table._clear_table()
# disable sorting
self.parent.postprocessing_ui.table.setSortingEnabled(False)
_index = 0
_columns_runs = self.get_columns_value(column=2)
for _entry in self._data_from_file:
if _entry.strip() == "":
continue
name_value = _entry.split(" ")
[name, value] = name_value
_metadata = self.empty_metadata()
_metadata['name'] = name
_metadata['runs'] = value
if self.runs_already_in_table(runs=value, table_runs=_columns_runs):
_index += 1
continue
self.add_new_row(_metadata, row=_index)
_index += 1
self.parent.postprocessing_ui.table.setSortingEnabled(True)
def get_columns_value(self, column=2):
column_values = []
nbr_row = self.parent.postprocessing_ui.table.rowCount()
for _row in range(nbr_row):
_value = str(self.parent.postprocessing_ui.table.item(_row, column).text())
column_values.append(_value)
return column_values
def runs_already_in_table(self, runs='', table_runs=[]):
if runs in table_runs:
return True
return False
def add_new_row(self, _metadata, row=0):
self.parent.postprocessing_ui.table.insertRow(row)
_layout = QHBoxLayout()
_widget = QCheckBox()
_widget.setEnabled(True)
_layout.addWidget(_widget)
_layout.addStretch()
_new_widget = QWidget()
_new_widget.setLayout(_layout)
_widget.stateChanged.connect(lambda state=0, row=row:
self.parent.table_select_state_changed(state, row))
self.parent.postprocessing_ui.table.setCellWidget(row, 0, _new_widget)
_item = QTableWidgetItem(_metadata['name'])
self.parent.postprocessing_ui.table.setItem(row, 1, _item)
_item = QTableWidgetItem(_metadata['runs'])
self.parent.postprocessing_ui.table.setItem(row, 2, _item)
if not _metadata['sample_formula']:
_item = QTableWidgetItem(_metadata['sample_formula'])
self.parent.postprocessing_ui.table.setItem(row, 3, _item)
if not _metadata['mass_density']:
_item = QTableWidgetItem(_metadata['mass_density'])
self.parent.postprocessing_ui.table.setItem(row, 4, _item)
if not _metadata['radius']:
_item = QTableWidgetItem(_metadata['radius'])
self.parent.postprocessing_ui.table.setItem(row, 5, _item)
if not _metadata['packing_fraction']:
_item = QTableWidgetItem(_metadata['packing_fraction'])
self.parent.postprocessing_ui.table.setItem(row, 6, _item)
_widget = QComboBox()
_widget.addItem("Cylinder")
_widget.addItem("Sphere")
if _metadata['sample_shape'] == 'Sphere':
_widget.setCurrentIndex(1)
self.parent.postprocessing_ui.table.setCellWidget(row, 7, _widget)
_layout = QHBoxLayout()
_widget = QCheckBox()
if _metadata['do_abs_correction'] == 'go':
_widget.setCheckState(Qt.Checked)
_widget.setStyleSheet("border: 2px; solid-black")
_widget.setEnabled(True)
_layout.addStretch()
_layout.addWidget(_widget)
_layout.addStretch()
_new_widget = QWidget()
_new_widget.setLayout(_layout)
self.parent.postprocessing_ui.table.setCellWidget(row, 8, _new_widget)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/populate_master_table.py",
"copies": "1",
"size": "5527",
"license": "mit",
"hash": -1297704840560142800,
"line_mean": 33.9810126582,
"line_max": 102,
"alpha_frac": 0.5778903564,
"autogenerated": false,
"ratio": 3.8867791842475388,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49646695406475383,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
from qtpy.QtWidgets import (QMessageBox)
import glob
from addie.autoNOM.make_exp_ini_file_and_run_autonom import MakeExpIniFileAndRunAutonom
class RunStep1(object):
keep_running_status = True
folder = None
auto_folder_base_name = 'autoNOM'
def __init__(self, parent=None, run_autonom=True):
self.parent = parent.ui
self.parent_no_ui = parent
self.run_autonom = run_autonom
def create_folder(self):
self._current_path = os.getcwd()
if not self.parent.create_folder_button.isChecked():
self.folder = self._current_path
return
if self.parent.manual_output_folder.isChecked():
self.create_manual_folder()
else:
self.create_auto_folder()
def create_exp_ini_file(self):
if self.keep_running_status is False:
return
_make_exp = MakeExpIniFileAndRunAutonom(parent=self.parent_no_ui, folder=self.folder)
_make_exp.create()
if self.run_autonom:
_make_exp.run_autonom()
def create_manual_folder(self):
_folder_name = str(self.parent.manual_output_folder_field.text()).strip()
_current_path = self._current_path
_full_path = os.path.join(_current_path, _folder_name)
self.folder = _full_path
if os.path.exists(_full_path):
message_box = QMessageBox()
message_box.setText("Folder Exists Already!")
message_box.setInformativeText("Do you want to replace it?")
message_box.setStandardButtons(QMessageBox.No | QMessageBox.Yes)
result = message_box.exec_()
if result == QMessageBox.Yes:
self._remove_folder(_full_path)
self._make_folder(_full_path)
else:
self.keep_running_status = False
else:
self._make_folder(_full_path)
def create_auto_folder(self):
list_folder = [_folder for _folder in glob.glob(self.auto_folder_base_name + '*') if os.path.isdir(_folder)]
if list_folder == []:
_folder_name = self.auto_folder_base_name + '_00'
else:
_last_index = self.retrieve_last_incremented_index(list_folder)
_new_index = "%.2d" % (int(_last_index)+1)
_folder_name = self.auto_folder_base_name + '_' + _new_index
_full_path = os.path.join(self._current_path, _folder_name)
self.folder = _full_path
self._make_folder(_full_path)
if self.run_autonom:
self.parent.statusbar.showMessage("Created folder: " + _full_path + " and running autoNOM script !")
else:
self.parent.statusbar.showMessage("Created folder: " + _full_path)
def retrieve_last_incremented_index(self, list_folder):
_list_index = []
for _folder in list_folder:
_folder_split = _folder.split('_')
if len(_folder_split) > 1:
try:
# checking that the variable is an integer
_list_index.append(int(_folder_split[1]))
except:
pass
if _list_index == []:
return -1
_list_index.sort()
return(_list_index[-1])
def _remove_folder(self, folder_name):
os.rmdir(folder_name)
def _make_folder(self, folder_name):
os.mkdir(folder_name)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/autoNOM/run_step1.py",
"copies": "1",
"size": "3476",
"license": "mit",
"hash": -12565472475804572,
"line_mean": 33.76,
"line_max": 116,
"alpha_frac": 0.585155351,
"autogenerated": false,
"ratio": 3.7376344086021507,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4822789759602151,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
import configparser
import numpy as np
class FileHandler(object):
file_contain = []
def __init__(self, filename=None):
self.filename = filename
@staticmethod
def is_file_correct_extension(filename='', ext_requested='csv'):
[_, _ext] = os.path.splitext(filename)
if _ext == ".{}".format(ext_requested):
return True
return False
def csv_parser(self):
data = np.genfromtxt(self.filename,dtype='str',delimiter=',',comments=None)
headers = data[0,:]
obj = dict()
for col_id, col_name in enumerate(headers):
obj[col_name] = data[1:,col_id]
return obj
def retrieve_contain(self):
file_contain = []
with open(self.filename, 'r') as f:
file_contain = f.read()
self.file_contain = file_contain
def check_file_extension(self, ext_requested='txt'):
file_parsed = self.filename.split(".")
if len(file_parsed) > 1:
_ext = file_parsed[-1]
if _ext != ext_requested:
self.filename = self.filename + "." + ext_requested
else:
self.filename = self.filename + "." + ext_requested
def create_ascii(self, contain=None, carriage_return=True):
_filename = self.filename
f = open(_filename, 'w')
for _line in contain:
if carriage_return:
f.write(_line + "\n")
else:
f.write(_line)
f.close()
def create_config_parser(self, section_name='Configuration', dictionary=None):
config = configparser.ConfigParser()
cfgfile = open(self.filename, 'w')
config.add_section(section_name)
for key, value in dictionary.items():
config.set(section_name, key, value)
config.write(cfgfile)
cfgfile.close()
def is_file_writable(self):
if os.path.exists(self.filename):
if os.path.isfile(self.filename):
return os.access(self.filename, os.W_OK)
else:
return False
pdir = os.path.dirname(self.filename)
if not pdir:
pdir = '.'
return os.access(pdir, os.W_OK)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/utilities/file_handler.py",
"copies": "1",
"size": "2300",
"license": "mit",
"hash": -7503333513245508000,
"line_mean": 29.6666666667,
"line_max": 83,
"alpha_frac": 0.5643478261,
"autogenerated": false,
"ratio": 3.9383561643835616,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5002703990483561,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import hashlib
from cbopensource.tools.eventduplicator.utils import get_process_id, json_encode
import json
import codecs
from collections import defaultdict
import logging
__author__ = 'jgarman'
log = logging.getLogger(__name__)
def get_process_path(proc_guid):
key = hashlib.md5(str(proc_guid).encode('utf8')).hexdigest()
return os.path.join(key[:2].upper(), '%s.json' % proc_guid)
def get_binary_path(md5sum):
return os.path.join(md5sum[:2].upper(), '%s.json' % md5sum.lower())
class FileInputSource(object):
def __init__(self, pathname):
self.pathname = pathname
self.reader = codecs.getreader("utf-8")
def get_version(self):
return open(os.path.join(self.pathname, 'VERSION'), 'r').read()
def get_process_docs(self, query_filter=None):
# TODO: the query_filter is a code smell... we should push the traversal code into the Source?
if query_filter:
return
for root, dirs, files in os.walk(os.path.join(self.pathname, 'procs')):
for fn in files:
yield json.load(self.reader(open(os.path.join(root, fn), 'rb')))
def get_feed_doc(self, feed_key):
pathname = os.path.join(self.pathname, 'feeds', '%s.json' % feed_key)
try:
return json.load(self.reader(open(pathname, 'rb')))
except Exception as e:
log.warning("Could not open feed document: %s - %s" % (pathname, str(e)))
return None
def get_feed_metadata(self, feed_id):
pathname = os.path.join(self.pathname, 'feeds', '%s.json' % feed_id)
try:
return json.load(self.reader(open(pathname, 'rb')))
except Exception as e:
log.warning("Could not open feed metadata: %s - %s" % (pathname, str(e)))
return None
def get_binary_doc(self, md5sum):
md5sum = md5sum.lower()
pathname = os.path.join(self.pathname, 'binaries', get_binary_path(md5sum))
try:
return json.load(self.reader(open(pathname, 'rb')))
except Exception as e:
log.warning("Could not open binary document: %s - %s" % (pathname, str(e)))
return None
def get_sensor_doc(self, sensor_id):
pathname = os.path.join(self.pathname, 'sensors', '%d.json' % sensor_id)
try:
return json.load(open(os.path.join(self.pathname, 'sensors', '%d.json' % sensor_id), 'r'))
except Exception as e:
log.warning("Could not open sensor document: %s - %s" % (pathname, str(e)))
return None
def connection_name(self):
return self.pathname
def cleanup(self):
pass
class FileOutputSink(object):
def __init__(self, pathname):
self.pathname = pathname
os.makedirs(pathname, 0o755)
os.makedirs(os.path.join(pathname, 'procs'), 0o755)
os.makedirs(os.path.join(pathname, 'binaries'), 0o755)
os.makedirs(os.path.join(pathname, 'sensors'), 0o755)
os.makedirs(os.path.join(pathname, 'feeds'), 0o755)
# TODO: only create the directories we need
for dirname in ['procs', 'binaries']:
for segment in ['%02X' % x for x in range(0, 256)]:
os.makedirs(os.path.join(pathname, dirname, segment), 0o755)
self.written_docs = defaultdict(int)
self.new_metadata = defaultdict(list)
def output_process_doc(self, doc_content):
proc_guid = get_process_id(doc_content)
pathname = os.path.join(self.pathname, 'procs', get_process_path(proc_guid))
if os.path.exists(pathname):
log.warning('process %s already existed, writing twice' % proc_guid)
self.format_date_fields(doc_content)
open(os.path.join(self.pathname, 'procs', get_process_path(proc_guid)), 'w').write(json_encode(doc_content))
self.written_docs['proc'] += 1
def format_date_fields(self, doc_content):
for date_field in ['last_update', 'start', 'server_added_timestamp', 'last_server_update']:
if (date_field in doc_content) and ('.' not in doc_content[date_field]):
# Change a date string like 2015-11-10T19:54:45Z to 2015-11-10T19:54:45.000Z
doc_content[date_field] = '{}.000Z'.format(doc_content[date_field][:-1])
def output_binary_doc(self, doc_content):
md5sum = doc_content.get('md5').lower()
open(os.path.join(self.pathname, 'binaries', get_binary_path(md5sum)), 'w').write(json_encode(doc_content))
self.written_docs['binary'] += 1
def output_sensor_info(self, doc_content):
open(os.path.join(self.pathname, 'sensors', '%s.json' % doc_content['sensor_info']['id']), 'w').\
write(json_encode(doc_content))
self.new_metadata['sensor'].append(doc_content['sensor_info']['computer_name'])
def output_feed_doc(self, doc_content):
open(os.path.join(self.pathname, 'feeds', '%s:%s.json' % (doc_content['feed_name'], doc_content['id'])), 'w').\
write(json_encode(doc_content))
self.written_docs['feed'] += 1
def output_feed_metadata(self, doc_content):
open(os.path.join(self.pathname, 'feeds', '%s.json' % (doc_content['id'],)), 'w').\
write(json_encode(doc_content))
self.new_metadata['feed'].append(doc_content['name'])
def set_data_version(self, version):
if type(version) != str:
version = version.decode('utf8')
open(os.path.join(self.pathname, 'VERSION'), 'w').write(version)
return True
def cleanup(self):
pass
def connection_name(self):
return self.pathname
def report(self):
report_data = "Documents saved to %s by type:\n" % (self.pathname,)
for key in self.written_docs.keys():
report_data += " %8s: %d\n" % (key, self.written_docs[key])
for key in self.new_metadata.keys():
report_data += "New %ss created in %s:\n" % (key, self.pathname)
for value in self.new_metadata[key]:
report_data += " %s\n" % value
return report_data
| {
"repo_name": "carbonblack/cb-event-duplicator",
"path": "cbopensource/tools/eventduplicator/file_endpoint.py",
"copies": "1",
"size": "6176",
"license": "mit",
"hash": -7864493006316601000,
"line_mean": 38.8451612903,
"line_max": 119,
"alpha_frac": 0.6081606218,
"autogenerated": false,
"ratio": 3.4735658042744655,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45817264260744656,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import hashlib
import json
import random
import string
from base64 import b64decode, b64encode
import jinja2
import yaml
def get_hash(data, hashtype='sha1'):
h = hashlib.new(hashtype)
h.update(data)
return h.hexdigest()
def rand_string(size=32, chars=(string.ascii_letters + string.digits), seed=None):
if seed == "":
seed = None
random.seed(seed)
size = int(size)
return ''.join(random.choice(chars) for _ in range(size))
def rand_alphanum(size=32, seed=None):
return rand_string(size=int(size), seed=seed)
def rand_alpha(size=32, seed=None):
return rand_string(size=int(size), chars=string.ascii_letters, seed=seed)
def randint(size=32, seed=None):
size = int(size)
return rand_string(size=size, chars=string.digits, seed=seed)
def gen_private_ecdsa():
from ecdsa import SigningKey
sk = SigningKey.generate()
return sk.to_pem()
def gen_private_rsa():
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048,
backend=default_backend())
pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
return pem
def gen_private_dsa():
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import dsa
private_key = dsa.generate_private_key(key_size=1024, backend=default_backend())
pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
return pem
all_privates = {}
def gen_privatekey(keytype='rsa', key='', seed=None):
if seed is None:
seed = rand_alphanum(128)
k = seed + key
generators = {"ecdsa": gen_private_ecdsa, "rsa": gen_private_rsa, "dsa": gen_private_dsa}
if k not in all_privates:
all_privates[k] = {}
if keytype not in ["ecdsa", "dsa", "rsa"]:
raise ValueError("Unknow private key type: %s" % keytype)
if keytype not in all_privates[k]:
all_privates[k][keytype] = generators[keytype]()
return all_privates[k][keytype]
def jinja_env():
from appr.template_filters import jinja_filters
jinjaenv = jinja2.Environment()
jinjaenv.filters.update(jinja_filters())
return jinjaenv
def getenv(name, default=None):
return os.getenv(name, default)
def jinja_template(val, env=None):
from appr.utils import convert_utf8
jinjaenv = jinja_env()
template = jinjaenv.from_string(val)
if env is not None:
variables = convert_utf8(json.loads(env))
return template.render(variables)
def readfile(val, encode=False):
with open(val, 'rb') as f:
content = f.read()
if encode:
content = b64encode(content)
return content
def listdir(path):
return os.listdir(path)
def walkdir(path):
files = []
for root, _, filenames in os.walk(path):
for filename in filenames:
files.append(os.path.join(root, filename))
return files
def jsonnet(val, env=None):
from appr.render_jsonnet import RenderJsonnet
from appr.utils import convert_utf8
r = RenderJsonnet()
if env is not None:
variables = convert_utf8(json.loads(env))
return r.render_jsonnet(val, tla_codes=variables)
def json_to_yaml(value):
"""
Serializes an object as YAML. Optionally given keyword arguments
are passed to yaml.dumps(), ensure_ascii however defaults to False.
"""
return yaml.safe_dump(json.loads(value))
def json_dumps(value, **kwargs):
"""
Serializes an object as JSON. Optionally given keyword arguments
are passed to json.dumps(), ensure_ascii however defaults to False.
"""
kwargs.setdefault('ensure_ascii', False)
return json.dumps(value, **kwargs)
def yaml_dumps(value):
"""
Serializes an object as YAML. Optionally given keyword arguments
are passed to yaml.dumps(), ensure_ascii however defaults to False.
"""
return yaml.dump(value, default_flow_style=True)
def json_loads(value):
"""
Serializes an object as JSON. Optionally given keyword arguments
are passed to json.dumps(), ensure_ascii however defaults to False.
"""
return json.loads(value)
def yaml_loads(value):
"""
Serializes an object as JSON. Optionally given keyword arguments
are passed to json.dumps(), ensure_ascii however defaults to False.
"""
return yaml.load(value)
def path_exists(path, isfile=None):
if isfile:
return os.path.isfile(path)
else:
return os.path.exists(path)
def obj_loads(value):
try:
return json.loads(value)
except ValueError:
return yaml.load(value)
def jinja_filters():
filters = {
'json': json_dumps,
'yaml': yaml_dumps,
'get_hash': get_hash,
'b64decode': b64decode,
'b64encode': b64encode,
'gen_privatekey': gen_privatekey,
'rand_alphanum': rand_alphanum,
'rand_alpha': rand_alpha}
return filters
def jsonnet_callbacks():
filters = {
'getenv': (('value', 'default', ), getenv),
'b64encode': (('value', ), b64encode),
'b64decode': (('value', ), b64decode),
'path_exists': (('path', 'isfile', ), path_exists),
'walkdir': (('path', ), walkdir),
'listdir': (('path', ), listdir),
'read': (('filepath', 'b64encodee', ), readfile),
'hash': (('data', 'hashtype'), get_hash),
'to_yaml': (('value', ), json_to_yaml),
'rand_alphanum': (('size', 'seed'), rand_alphanum),
'rand_alpha': (('size', 'seed'), rand_alpha),
'randint': (('size', 'seed'), randint),
'jinja2': (('template', 'env'), jinja_template),
'jsonnet': (('template', 'env'), jsonnet),
'json_loads': (('jsonstr', ), json_loads),
'yaml_loads': (('jsonstr', ), yaml_loads),
'obj_loads': (('jsonstr', ), obj_loads),
'privatekey': (('keytype', "key", "seed"), gen_privatekey), }
return filters
| {
"repo_name": "app-registry/appr",
"path": "appr/template_filters.py",
"copies": "2",
"size": "6642",
"license": "apache-2.0",
"hash": -2550097358742126000,
"line_mean": 28.52,
"line_max": 93,
"alpha_frac": 0.6371574827,
"autogenerated": false,
"ratio": 3.712688652878703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349846135578703,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import importlib
import logging
logger = logging.getLogger(__name__)
filetypes = ['py', 'txt', 'dat']
blacklisted = [' his ', ' him ', ' guys ', ' guy ']
class ValuesError(ValueError):
pass
class UnwelcomenessError(ValuesError):
pass
def _everybody_welcome_here(string_to_check, blacklisted=blacklisted):
for line in string_to_check.split('\n'):
for b in blacklisted:
if b in string_to_check:
raise UnwelcomenessError(
"string %s contains '%s' which is blacklisted. Tests will "
"not pass until this language is changed. For tips on "
"writing gender-neutrally, see "
"http://www.lawprose.org/blog/?p=499. Blacklisted words: "
"%s" % (string_to_check, b, blacklisted)
)
def _openess_tester(module):
if hasattr(module, '__all__'):
funcs = module.__all__
else:
funcs = dir(module)
for f in funcs:
yield _everybody_welcome_here, f.__doc__
def test_openness():
"""Testing for sexist language
Ensure that our library does not contain sexist (intentional or otherwise)
language. For tips on writing gender-neutrally,
see http://www.lawprose.org/blog/?p=499
Notes
-----
Inspired by
https://modelviewculture.com/pieces/gendered-language-feature-or-bug-in-software-documentation
and
https://modelviewculture.com/pieces/the-open-source-identity-crisis
"""
starting_package = 'skbeam'
modules, files = get_modules_in_library(starting_package)
for m in modules:
yield _openess_tester, importlib.import_module(m)
for afile in files:
# logger.debug('testing file %s', afile)
with open(afile, 'r') as f:
yield _everybody_welcome_here, f.read()
_IGNORE_FILE_EXT = ['.pyc', '.so', '.ipynb', '.jpg', '.txt', '.zip', '.c']
_IGNORE_DIRS = ['__pycache__', '.git', 'cover', 'build', 'dist', 'tests',
'.ipynb_checkpoints', 'SOFC']
def get_modules_in_library(library, ignorefileext=None, ignoredirs=None):
"""
Parameters
----------
library : str
The library to be imported
ignorefileext : list, optional
List of strings (not including the dot) that are file extensions that
should be ignored
Defaults to the ``ignorefileext`` list in this module
ignoredirs : list, optional
List of strings that, if present in the file path, will cause all
sub-directories to be ignored
Defaults to the ``ignoredirs`` list in this module
Returns
-------
modules : str
List of modules that can be imported with
``importlib.import_module(module)``
other_files : str
List of other files that
"""
if ignoredirs is None:
ignoredirs = _IGNORE_DIRS
if ignorefileext is None:
ignorefileext = _IGNORE_FILE_EXT
module = importlib.import_module(library)
# if hasattr(module, '__all__'):
# functions = module.__all__
# else:
# functions = dir(module)
# print('functions: %s' % functions)
mods = []
other_files = []
top_level = os.sep.join(module.__file__.split(os.sep)[:-1])
for path, dirs, files in os.walk(top_level):
skip = False
for ignore in ignoredirs:
if ignore in path:
skip = True
break
if skip:
continue
if path.split(os.sep)[-1] in ignoredirs:
continue
for f in files:
file_base, file_ext = os.path.splitext(f)
if file_ext not in ignorefileext:
if file_ext == 'py':
mod_path = path[len(top_level)-len(library):].split(os.sep)
if not file_base == '__init__':
mod_path.append(file_base)
mod_path = '.'.join(mod_path)
mods.append(mod_path)
else:
other_files.append(os.path.join(path, f))
return mods, other_files
if __name__ == '__main__':
import nose
import sys
nose_args = ['-s'] + sys.argv[1:]
nose.runmodule(argv=nose_args, exit=False)
| {
"repo_name": "licode/scikit-beam",
"path": "skbeam/tests/test_openness.py",
"copies": "7",
"size": "4309",
"license": "bsd-3-clause",
"hash": -3835814145431209000,
"line_mean": 30.2246376812,
"line_max": 97,
"alpha_frac": 0.5785565096,
"autogenerated": false,
"ratio": 3.8473214285714286,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7925877938171428,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import io
import hashlib
import json
import tempfile
import matplotlib.pyplot as plt
HASH_LIBRARY_NAME = 'figure_hashes.json'
# Load the hash library if it exists
try:
with open(os.path.join(os.path.dirname(__file__), HASH_LIBRARY_NAME)) as infile:
hash_library = json.load(infile)
except IOError:
hash_library = {}
file_list = {}
def hash_figure(figure=None):
"""
For a matplotlib.figure.Figure, returns the SHA256 hash as a hexadecimal string.
Parameters
----------
figure : matplotlib.figure.Figure
If None is specified, the current figure is used (as determined by matplotlib.pyplot.gcf())
Returns
-------
out : string
The SHA256 hash in hexadecimal representation
"""
if figure is None:
figure = plt.gcf()
imgdata = tempfile.NamedTemporaryFile(delete=False)
figure.savefig(imgdata, format='png')
imgdata.seek(0)
buf = imgdata.read()
imgdata.close()
hasher = hashlib.sha256()
hasher.update(buf)
file_list[hasher.hexdigest()] = imgdata.name
return hasher.hexdigest()
def verify_figure_hash(name, figure=None):
"""
Verifies whether a figure has the same hash as the named hash in the current hash library.
If the hash library does not contain the specified name, the hash is added to the library.
Parameters
----------
name : string
The identifier for the hash in the hash library
figure : matplotlib.figure.Figure
If None is specified, the current figure is used (as determined by matplotlib.pyplot.gcf())
Returns
-------
out : bool
False if the figure's hash does not match the named hash, otherwise True
"""
if name not in hash_library:
hash_library[name] = hash_figure(figure)
return True
return hash_library[name] == hash_figure(figure)
| {
"repo_name": "Alex-Ian-Hamilton/sunpy",
"path": "sunpy/tests/hash.py",
"copies": "1",
"size": "1941",
"license": "bsd-2-clause",
"hash": -1094753007994943900,
"line_mean": 26.338028169,
"line_max": 99,
"alpha_frac": 0.6687274601,
"autogenerated": false,
"ratio": 4.112288135593221,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0018805192907439528,
"num_lines": 71
} |
from __future__ import absolute_import, division, print_function
import os
import math
import hmac
import json
import hashlib
import argparse
from random import shuffle
from pathlib2 import Path
import numpy as np
import tensorflow as tf
from tensorflow.data import Dataset
def info(msg, char="#", width=75):
print("")
print(char * width)
print(char + " %0*s" % ((-1 * width) + 5, msg) + char)
print(char * width)
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return Path(path).resolve(strict=False)
def process_image(path, label, img_size):
img_raw = tf.io.read_file(path)
img_tensor = tf.image.decode_jpeg(img_raw, channels=3)
img_final = tf.image.resize(img_tensor, [img_size, img_size]) / 255
return img_final, label
def load_dataset(base_path, dset, split=None):
# normalize splits
if split is None:
split = [8, 1, 1]
splits = np.array(split) / np.sum(np.array(split))
# find labels - parent folder names
labels = {}
for (_, dirs, _) in os.walk(base_path):
print('found {}'.format(dirs))
labels = {k: v for (v, k) in enumerate(dirs)}
print('using {}'.format(labels))
break
# load all files along with idx label
print('loading dataset from {}'.format(dset))
with open(dset, 'r') as d:
data = [(str(Path(line.strip()).absolute()),
labels[Path(line.strip()).parent.name]) for line in d.readlines()] # noqa: E501
print('dataset size: {}\nsuffling data...'.format(len(data)))
# shuffle data
shuffle(data)
print('splitting data...')
# split data
train_idx = int(len(data) * splits[0])
return data[:train_idx]
# @print_info
def run(
dpath,
img_size=160,
epochs=10,
batch_size=32,
learning_rate=0.0001,
output='model',
dset=None):
img_shape = (img_size, img_size, 3)
info('Loading Data Set')
# load dataset
train = load_dataset(dpath, dset)
# training data
train_data, train_labels = zip(*train)
train_ds = Dataset.zip((Dataset.from_tensor_slices(list(train_data)),
Dataset.from_tensor_slices(list(train_labels)),
Dataset.from_tensor_slices([img_size]*len(train_data))))
print(train_ds)
train_ds = train_ds.map(map_func=process_image,
num_parallel_calls=5)
train_ds = train_ds.apply(tf.data.experimental.ignore_errors())
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.prefetch(buffer_size=5)
train_ds = train_ds.repeat()
# model
info('Creating Model')
base_model = tf.keras.applications.MobileNetV2(input_shape=img_shape,
include_top=False,
weights='imagenet')
base_model.trainable = True
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
# training
info('Training')
steps_per_epoch = math.ceil(len(train) / batch_size)
model.fit(train_ds, epochs=epochs, steps_per_epoch=steps_per_epoch)
# save model
info('Saving Model')
# check existence of base model folder
output = check_dir(output)
print('Serializing into saved_model format')
tf.saved_model.save(model, str(output))
print('Done!')
# add time prefix folder
file_output = str(Path(output).joinpath('latest.h5'))
print('Serializing h5 model to:\n{}'.format(file_output))
model.save(file_output)
return generate_hash(file_output, 'kf_pipeline')
def generate_hash(dfile, key):
print('Generating hash for {}'.format(dfile))
m = hmac.new(str.encode(key), digestmod=hashlib.sha256)
BUF_SIZE = 65536
with open(str(dfile), 'rb') as myfile:
while True:
data = myfile.read(BUF_SIZE)
if not data:
break
m.update(data)
return m.hexdigest()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='transfer learning for binary image task')
parser.add_argument('-s', '--base_path',
help='directory to base data', default='../../data')
parser.add_argument(
'-d', '--data', help='directory to training and test data', default='train') # noqa: E501
parser.add_argument(
'-e', '--epochs', help='number of epochs', default=10, type=int)
parser.add_argument('-b', '--batch', help='batch size',
default=32, type=int)
parser.add_argument('-i', '--image_size',
help='image size', default=160, type=int)
parser.add_argument('-l', '--lr', help='learning rate',
default=0.0001, type=float)
parser.add_argument('-o', '--outputs',
help='output directory', default='model')
parser.add_argument('-f', '--dataset', help='cleaned data listing')
args = parser.parse_args()
info('Using TensorFlow v.{}'.format(tf.__version__))
data_path = Path(args.base_path).joinpath(args.data).resolve(strict=False)
target_path = Path(args.base_path).resolve(
strict=False).joinpath(args.outputs)
dataset = Path(args.base_path).joinpath(args.dataset)
image_size = args.image_size
params = Path(args.base_path).joinpath('params.json')
args = {
"dpath": str(data_path),
"img_size": image_size,
"epochs": args.epochs,
"batch_size": args.batch,
"learning_rate": args.lr,
"output": str(target_path),
"dset": str(dataset)
}
dataset_signature = generate_hash(dataset, 'kf_pipeline')
# printing out args for posterity
for i in args:
print('{} => {}'.format(i, args[i]))
model_signature = run(**args)
args['dataset_signature'] = dataset_signature.upper()
args['model_signature'] = model_signature.upper()
args['model_type'] = 'tfv2-MobileNetV2'
print('Writing out params...', end='')
with open(str(params), 'w') as f:
json.dump(args, f)
print(' Saved to {}'.format(str(params)))
# python train.py -d train -e 3 -b 32 -l 0.0001 -o model -f train.txt
| {
"repo_name": "kubeflow/examples",
"path": "pipelines/azurepipeline/code/training/train.py",
"copies": "1",
"size": "6509",
"license": "apache-2.0",
"hash": -4263225679812289500,
"line_mean": 29.8483412322,
"line_max": 98,
"alpha_frac": 0.5954831771,
"autogenerated": false,
"ratio": 3.626183844011142,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4721667021111142,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import multiprocessing
import threading
import chainlet
import chainlet.dataflow
import chainlet.chainlink
import chainlet.primitives.link
import chainlet.signals
class NamedChainlet(chainlet.dataflow.NoOp):
"""Chainlet with nice representation"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '%s' % self.name
class Adder(NamedChainlet):
def __init__(self, value=2):
NamedChainlet.__init__(self, name='<%+d>' % value)
self.value = value
def chainlet_send(self, value=None):
return value + self.value
class Buffer(chainlet.primitives.link.ChainLink):
def __init__(self):
self.buffer = []
def chainlet_send(self, value=None):
self.buffer.append(value)
return value
def __repr__(self):
return '<%s>' % self.buffer
class MultiprocessBuffer(Buffer):
def __init__(self):
super(MultiprocessBuffer, self).__init__()
self._queue = multiprocessing.Queue()
self._close_signal = os.urandom(16)
self._pid = os.getpid()
receiver = threading.Thread(target=self._recv)
receiver.daemon = True
receiver.start()
def _recv(self):
_close_signal = self._close_signal
_queue = self._queue
buffer = self.buffer
del self
while True:
value = _queue.get()
if value == _close_signal:
break
buffer.append(value)
_queue.close()
def chainlet_send(self, value=None):
self._queue.put(value)
return value
def __del__(self):
self._queue.put(self._close_signal)
self._queue.close()
@chainlet.genlet(prime=False)
def produce(iterable):
"""Produce values from an iterable for a chain"""
for element in iterable:
yield element
@chainlet.funclet
def abort_swallow(value):
"""Always abort the chain without returning"""
raise chainlet.signals.StopTraversal
class AbortEvery(chainlet.primitives.link.ChainLink):
"""
Abort every n'th traversal of the chain
This returns its input for calls 1, ..., n-1, then raise StopTraversal on n.
"""
def __init__(self, every=2):
super(AbortEvery, self).__init__()
self.every = every
self._count = 0
def chainlet_send(self, value=None):
self._count += 1
if self._count % self.every:
return value
raise chainlet.signals.StopTraversal
class ReturnEvery(chainlet.primitives.link.ChainLink):
"""
Abort-return every n'th traversal of the chain
This abort-returns its input for call 1, then raise StopTraversal on 2, ..., n.
"""
def __init__(self, every=2):
super(ReturnEvery, self).__init__()
self.every = every
self._count = 0
def chainlet_send(self, value=None):
if self._count % self.every:
self._count += 1
raise chainlet.signals.StopTraversal
self._count += 1
return value
| {
"repo_name": "maxfischer2781/chainlet",
"path": "chainlet_unittests/utility.py",
"copies": "1",
"size": "3107",
"license": "mit",
"hash": -381563194153056260,
"line_mean": 24.6776859504,
"line_max": 83,
"alpha_frac": 0.6121660766,
"autogenerated": false,
"ratio": 3.922979797979798,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00020041686708353374,
"num_lines": 121
} |
from __future__ import (absolute_import, division, print_function)
import os
import numpy as np
from collections import OrderedDict
import copy
import simplejson
from qtpy.QtWidgets import QDialog
from addie.utilities import load_ui
from qtpy import QtCore, QtGui
from addie.utilities.file_handler import FileHandler
from addie.utilities.list_runs_parser import ListRunsParser
from addie.utilities.set import Set
from addie.processing.mantid.master_table.table_row_handler import TableRowHandler
from addie.processing.mantid.master_table.utilities import LoadGroupingFile
# init test dictionary (to test loader)
_dictionary_test = OrderedDict()
_density_dict = {"mass_density": {"value": "N/A",
"selected": True},
"number_density": {"value": "N/A",
"selected": False},
"mass": {"value": "N/A",
"selected": False},
}
_default_empty_row = {
"activate": True,
"title": "",
"sample": {
"runs": "",
"background": {
"runs": "",
"background": "",
},
"material": "",
"density": copy.deepcopy(_density_dict),
"packing_fraction": "",
"geometry": {
"shape": "Cylinder",
"radius": "N/A",
"radius2": "N/A",
"height": "N/A",
},
"abs_correction": "",
"multi_scattering_correction": "",
"inelastic_correction": "",
"placzek": {},
},
"normalization": {
"runs": "",
"background": {
"runs": "",
"background": "",
},
"material": "",
"density": copy.deepcopy(_density_dict),
"packing_fraction": "",
"geometry": {
"shape": "Cylinder",
"radius": "N/A",
"radius2": "N/A",
"height": "N/A",
},
"abs_correction": "",
"multi_scattering_correction": "",
"inelastic_correction": "",
"placzek": {},
},
"input_grouping": "",
"output_grouping": "",
"AlignAndFocusArgs": {},
}
# for debugging, faking a 2 row dictionary
# _dictionary_test[0] = copy.deepcopy(_default_empty_row)
# _dictionary_test[0]["activate"] = False
# _dictionary_test[0]["title"] = "this is row 0"
# _dictionary_test[0]["sample"]["run"] = "1,2,3,4,5"
# _dictionary_test[0]["sample"]["background"]["runs"] = "10,20"
# _dictionary_test[0]["sample"]["background"]["background"] = "100:300"
# _dictionary_test[0]["sample"]["material"] = "material 1"
# _dictionary_test[0]["sample"]["packing_fraction"] = "fraction 1"
# _dictionary_test[0]["sample"]["geometry"]["shape"] = "Sphere"
# _dictionary_test[0]["sample"]["geometry"]["radius_cm"] = "5"
# _dictionary_test[0]["sample"]["geometry"]["height_cm"] = "15"
# _dictionary_test[0]["sample"]["abs_correction"] = "Monte Carlo"
# _dictionary_test[0]["sample"]["multi_scattering_correction"] = "None"
# _dictionary_test[0]["sample"]["inelastic_correction"] = "Placzek"
#
# _dictionary_test[1] = copy.deepcopy(_default_empty_row)
class LoaderOptionsInterface(QDialog):
real_parent = None
def __init__(self, parent=None, is_parent_main_ui=True, real_parent=None):
"""
This class can be called from different level of ui. In the case of the import from database ui,
real_parent parameter is needed to be able to close this ui and the ui above it as well as running a function
in the parent ui before closing.
:param parent:
:param is_parent_main_ui:
:param real_parent:
"""
if is_parent_main_ui:
self.parent = parent
else:
self.real_parent = real_parent
self.parent = parent.parent
QDialog.__init__(self, parent=parent)
self.ui = load_ui('list_of_scan_loader_dialog.ui', baseinstance=self)
self.init_widgets()
self.setWindowTitle("Options to load list of runs selected")
self.parent.ascii_loader_option = None
def init_widgets(self):
self.radio_button_changed()
def get_option_selected(self):
if self.ui.option1.isChecked():
return 1
elif self.ui.option2.isChecked():
return 2
elif self.ui.option3.isChecked():
return 3
else:
return 4
def radio_button_changed(self):
option_selected = self.get_option_selected()
image = ":/preview/load_csv_case{}.png".format(option_selected)
self.ui.preview_label.setPixmap(QtGui.QPixmap(image))
class AsciiLoaderOptionsInterface(LoaderOptionsInterface):
def __init__(self, parent=None, filename=''):
self.filename = filename
self.parent = parent
QDialog.__init__(self, parent=parent)
self.ui = load_ui('list_of_scan_loader_dialog.ui', baseinstance=self)
self.init_widgets()
short_filename = os.path.basename(filename)
self.setWindowTitle("Options to load {}".format(short_filename))
self.parent.ascii_loader_option = None
class AsciiLoaderOptions(AsciiLoaderOptionsInterface):
def accept(self):
self.parent.ascii_loader_option = self.get_option_selected()
self.parent.load_ascii(filename=self.filename)
self.close()
class JsonLoader:
filename = ''
def __init__(self, parent=None, filename=''):
self.filename = filename
self.parent = parent
def _retrieve_element_dict(self, element='sample', source_row_entry={}):
_target_row_entry = {}
_source_entry = source_row_entry[element]
_target_row_entry["runs"] = _source_entry['Runs']
_target_row_entry["background"] = {}
_target_row_entry["background"]["runs"] = _source_entry['Background']["Runs"]
_target_row_entry["background"]["background"] = _source_entry["Background"]["Background"]["Runs"]
_target_row_entry["material"] = _source_entry["Material"]
_target_row_entry["mass_density"] = copy.deepcopy(_density_dict)
_target_row_entry["mass_density"]["mass_density"]["value"] = _source_entry["Density"]["MassDensity"]
_target_row_entry["mass_density"]["mass_density"]["selected"] = _source_entry["Density"]["UseMassDensity"]
_target_row_entry["mass_density"]["number_density"]["value"] = _source_entry["Density"]["NumberDensity"]
_target_row_entry["mass_density"]["number_density"]["selected"] = _source_entry["Density"]["UseNumberDensity"]
_target_row_entry["mass_density"]["mass"]["value"] = _source_entry["Density"]["Mass"]
_target_row_entry["mass_density"]["mass"]["selected"] = _source_entry["Density"]["UseMass"]
_target_row_entry["packing_fraction"] = _source_entry["PackingFraction"]
_target_row_entry["geometry"] = {}
_target_row_entry["geometry"]["shape"] = _source_entry["Geometry"]["Shape"]
_target_row_entry["geometry"]["radius"] = _source_entry["Geometry"]["Radius"]
_target_row_entry["geometry"]["radius2"] = _source_entry["Geometry"]["Radius2"]
_target_row_entry["geometry"]["height"] = _source_entry["Geometry"]["Height"]
_target_row_entry["abs_correction"] = _source_entry["AbsorptionCorrection"]["Type"]
_target_row_entry["multi_scattering_correction"] = _source_entry["MultipleScatteringCorrection"]["Type"]
_target_row_entry["placzek"] = copy.deepcopy(self.parent.placzek_default)
if "InelasticCorrection" in _source_entry:
_target_row_entry["inelastic_correction"] = _source_entry["InelasticCorrection"]["Type"]
_target_row_entry["placzek"]["order"]["index_selected"] = _source_entry["InelasticCorrection"]["Order"]
_target_row_entry["placzek"]["is_self"] = _source_entry["InelasticCorrection"]["Self"]
_target_row_entry["placzek"]["is_interference"] = _source_entry["InelasticCorrection"]["Interference"]
_target_row_entry["placzek"]["fit_spectrum_with"]["text"] = \
_source_entry["InelasticCorrection"]["FitSpectrumWith"]
lambda_binning_for_fit = _source_entry["InelasticCorrection"]["LambdaBinningForFit"].split(
",")
if len(lambda_binning_for_fit) == 3:
_target_row_entry["placzek"]["lambda_binning_for_fit"]["min"] = lambda_binning_for_fit[0]
_target_row_entry["placzek"]["lambda_binning_for_fit"]["delta"] = lambda_binning_for_fit[1]
_target_row_entry["placzek"]["lambda_binning_for_fit"]["max"] = lambda_binning_for_fit[2]
else:
default_placzek = self.parent.placzek_default["lambda_binning_for_fit"]
_target_row_entry["placzek"]["lambda_binning_for_fit"]["min"] = default_placzek["min"]
_target_row_entry["placzek"]["lambda_binning_for_fit"]["delta"] = default_placzek["delta"]
_target_row_entry["placzek"]["lambda_binning_for_fit"]["max"] = default_placzek["max"]
else:
_target_row_entry['inelastic_correction'] = None
return _target_row_entry
def load(self):
# load json
with open(self.filename) as f:
data = simplejson.load(f)
# convert into UI dictionary
list_keys = sorted([_key for _key in data.keys()])
table_dictionary = {}
first_entry = True
for _row in list_keys:
_source_row_entry = data[str(_row)]
_row = np.int(_row)
_target_row_entry = copy.deepcopy(_default_empty_row)
_target_row_entry["activate"] = _source_row_entry['Activate']
_target_row_entry["title"] = _source_row_entry['Title']
_target_row_entry["sample"] = self._retrieve_element_dict(
element='Sample', source_row_entry=_source_row_entry)
_target_row_entry["runs"] = _source_row_entry['Sample']['Runs']
_target_row_entry["normalization"] = self._retrieve_element_dict(
element='Normalization', source_row_entry=_source_row_entry)
_target_row_entry["align_and_focus_args"] = _source_row_entry.get(
"AlignAndFocusArgs", {})
table_dictionary[_row] = _target_row_entry
# load general settings of first entry only
if first_entry:
o_set = Set(parent=self.parent)
# short name of instrument (ex: NOM)
short_instrument_name = str(_source_row_entry['Instrument'])
o_set.set_instrument(short_name=short_instrument_name)
# name of facility (not used yet)
facility = str(_source_row_entry["Facility"])
self.parent.facility = facility
# cache and output dir
cache_folder = str(_source_row_entry["CacheDir"])
self.parent.cache_folder = cache_folder
output_dir = str(_source_row_entry["OutputDir"])
self.parent.output_folder = output_dir
calibration_file = str(
_source_row_entry["Calibration"]["Filename"])
self.parent.processing_ui.calibration_file.setText(
calibration_file)
intermediate_grouping_file = str(
_source_row_entry["Merging"]["Grouping"]["Initial"])
if not (intermediate_grouping_file == ''):
self.parent.intermediate_grouping['filename'] = intermediate_grouping_file
self.parent.intermediate_grouping['enabled'] = True
o_grouping = LoadGroupingFile(
filename=intermediate_grouping_file)
nbr_groups = o_grouping.get_number_of_groups()
self.parent.intermediate_grouping['nbr_groups'] = nbr_groups
output_grouping_file = str(
_source_row_entry["Merging"]["Grouping"]["Output"])
if not (output_grouping_file == ''):
self.parent.output_grouping['filename'] = output_grouping_file
self.parent.output_grouping['enabled'] = True
o_grouping = LoadGroupingFile(
filename=output_grouping_file)
nbr_groups = o_grouping.get_number_of_groups()
self.parent.output_grouping['nbr_groups'] = nbr_groups
first_entry = False
o_table_ui_loader = FromDictionaryToTableUi(parent=self.parent)
o_table_ui_loader.fill(input_dictionary=table_dictionary)
self.parent.ui.statusbar.setStyleSheet("color: blue")
self.parent.ui.statusbar.showMessage(
"File {} has been imported".format(
self.filename), self.parent.statusbar_display_time)
class AsciiLoader:
filename = ''
file_contain = [] # raw file contain
table_dictionary = {}
def __init__(self, parent=None, filename=''):
self.filename = filename
self.parent = parent
def show_dialog(self):
o_dialog = AsciiLoaderOptions(
parent=self.parent, filename=self.filename)
o_dialog.show()
def load(self):
# options selected by user
options = self.parent.ascii_loader_option
if options is None:
return
filename = self.filename
o_file = FileHandler(filename=filename)
o_table = o_file.csv_parser()
list_runs = o_table['#Scan']
list_titles = o_table['title']
o_format = FormatAsciiList(list1=list_runs,
list2=list_titles)
# option 1
# keep raw title and merge lines with exact same title
if options == 1:
o_format.option1()
# option 2
# remove temperature part of title and merge lines with exact same
# title
elif options == 2:
o_format.option2()
# option 3
# keep raw title, append run number
elif options == 3:
o_format.option3()
# option 4
# take raw title, remove temperature part, add run number
elif options == 4:
o_format.option4()
else:
raise ValueError("Options nos implemented yet!")
list_runs = o_format.new_list1
list_titles = o_format.new_list2
_table_dictionary = {}
runs_titles = zip(list_runs, list_titles)
_index = 0
for [_run, _title] in runs_titles:
_entry = copy.deepcopy(_default_empty_row)
_entry['title'] = str(_title)
_entry['sample']['runs'] = str(_run)
_table_dictionary[_index] = _entry
_index += 1
self.table_dictionary = _table_dictionary
self.parent.ascii_loader_dictionary = _table_dictionary
o_table_ui_loader = FromDictionaryToTableUi(parent=self.parent)
o_table_ui_loader.fill(input_dictionary=_table_dictionary)
self.parent.ui.statusbar.setStyleSheet("color: blue")
self.parent.ui.statusbar.showMessage(
"File {} has been imported".format(
self.filename), self.parent.statusbar_display_time)
class FormatAsciiList:
''' This class takes 2 list as input. According to the option selected, the list2 will be
modified. Once it has been modified, if two element are equal, the runs coming from the list1 will
be combined using a compact version
ex: list1 = ["1","2","3","4"]
list2 = ["sampleA at temperature 10C",
"sampleA at temperature 5C",
"sampleA at temperature 15C",
"sampleA at temperature 15C"]
options1: keep raw title and merge lines with exact same title
list1 = ["1", "2", "3,4"]
list2 = ["sampleA at temperature 10C",
"sampleA at temperature 5C",
"sampleA at temperature 15C"]
options2: remove temperature part of title and merge lines with exact same title
list1 = ["1-4"]
list2 = ["sampleA"]
options3: keep raw title, append run number
list1 = ["1", "2", "3,4"]
list2 = ["sampleA at temperature 10C_1",
"sampleA at temperature 5C_2",
"sampleA at temperature 15C_3,4"]
options4: take raw title, remove temperature part, add run number
list1 = ["1", "2", "3", "4"]
list2 = ["sampleA at temperature 10C_1",
"sampleA at temperature 5C_2",
"sampleA at temperature 15C_3",
"sampleA at temperature 15C_4"]
'''
new_list1 = []
new_list2 = []
def __init__(self, list1=[], list2=[]):
self.list1 = list1
self.list2 = list2
def __combine_identical_elements(self, check_list=[], combine_list=[]):
'''This method will combine the element of the combine_list according to the
similitude of the check_list
for example:
check_list = ["sampleA", "sampleB", "sampleB"]
combine_list = ["1", "2", "3"]
new_check_list = ["sampleA", "sampleB"]
new_combine_list = ["1", "2,3"]
'''
list2 = list(check_list)
list1 = list(combine_list)
final_list1 = []
final_list2 = []
while (list2):
element_list2 = list2.pop(0)
str_element_to_merge = str(list1.pop(0))
# find all indexes where element_list2 are identical
indices = [i for i, x in enumerate(list2) if x == element_list2]
if not (indices == []):
# remove all element already treated
for _index in indices:
list2[_index] = ''
clean_list2 = []
for _entry in list2:
if not (_entry == ''):
clean_list2.append(_entry)
list2 = clean_list2
list_element_to_merge = [str(list1[i]) for i in indices]
str_element_to_merge += "," + (",".join(list_element_to_merge))
o_combine = ListRunsParser(current_runs=str_element_to_merge)
str_element_to_merge = o_combine.new_runs()
for _index in indices:
list1[_index] = ''
clean_list1 = []
for _entry in list1:
if not (_entry == ''):
clean_list1.append(_entry)
list1 = clean_list1
final_list2.append(element_list2)
final_list1.append(str_element_to_merge)
return [final_list1, final_list2]
def __keep_string_before(self, list=[], splitter_string=""):
'''this function will split each element by the given splitter_string and will
only keep the string before that splitter
ex:
list = ["sampleA at temperature 150C", "sampleB at temperature 160C"]
splitter_string = "at temperature"
:return
["sampleA", "sampleB"]
'''
new_list = []
for _element in list:
split_element = _element.split(splitter_string)
element_to_keep = split_element[0].strip()
new_list.append(element_to_keep)
return new_list
def __convert_list_to_combine_version(self, list=[]):
'''this method is to make sure we are working on the combine version of the list of runs
examples:
list = ["1", "2,3,4,5"]
return:
["1", "2-5"]
'''
new_list = []
for _element in list:
o_parser = ListRunsParser(current_runs=str(_element))
_combine_element = o_parser.new_runs()
new_list.append(_combine_element)
return new_list
def __append_list1_to_list2(self, list1=[], list2=[]):
'''will append to the end of each list2 element, the value of the list1, with the same index
examples:
list1 = ["1", "2", "3", "4-6"]
list2 = ["Sample A", "Sample B", "Sample C", "Sample D"]
:returns
["Sample A_1", "Sample B_2", "Sample C_3", "Sample D_4-6"]
'''
new_list2 = [_ele2 + "_" + str(_ele1)
for _ele1, _ele2 in zip(list1, list2)]
# new_list2 = []
# for element1, element2 in zip(list1, list2):
# new_list2.append(list2 + "_" + str(element1))
return new_list2
def option1(self):
# keep raw title and merge lines with exact same title
[self.new_list1, self.new_list2] = self.__combine_identical_elements(
check_list=self.list2, combine_list=self.list1)
def option2(self):
# remove temperature part of title and merge lines with exact same
# title
clean_list2 = self.__keep_string_before(
list=self.list2, splitter_string=" at temperature")
[self.new_list1, self.new_list2] = self.__combine_identical_elements(
check_list=clean_list2, combine_list=self.list1)
def option3(self):
# keep raw title, append run number
combine_list1 = self.__convert_list_to_combine_version(list=self.list1)
list2_with_run_number = self.__append_list1_to_list2(
list1=combine_list1, list2=self.list2)
[self.new_list1, self.new_list2] = self.__combine_identical_elements(
check_list=list2_with_run_number, combine_list=self.list1)
def option4(self):
# take raw title, remove temperature part, add run number
clean_list2 = self.__keep_string_before(
list=self.list2, splitter_string=" at temperature")
combine_list1 = self.__convert_list_to_combine_version(list=self.list1)
list2_with_run_number = self.__append_list1_to_list2(
list1=combine_list1, list2=clean_list2)
[self.new_list1, self.new_list2] = self.__combine_identical_elements(
check_list=list2_with_run_number, combine_list=self.list1)
def apply_option(self, option=1):
if option == 1:
return self.option1()
elif option == 2:
return self.option2()
elif option == 3:
return self.option3()
elif option == 4:
return self.option4()
else:
raise NotImplementedError
class TableFileLoader:
'''This class will take a table config file and will return a dictionary the program can use to
populate the table
For now, this loader will take 2 different file format, the old ascii and a new json file format.
This json file format will be format used when exporting the table
'''
def __init__(self, parent=None, filename=''):
if not os.path.exists(filename):
raise IOError("{} does not exist!".format(filename))
self.parent = parent
self.filename = filename
self.init_raw_dict()
def init_raw_dict(self):
_default_empty_row['sample']['placzek'] = self.parent.placzek_default
_default_empty_row['normalization']['placzek'] = self.parent.placzek_default
def display_dialog(self):
try:
# if extension is csv, use ascii loader
if FileHandler.is_file_correct_extension(
filename=self.filename, ext_requested='csv'): # ascii file
o_loader = AsciiLoader(
parent=self.parent, filename=self.filename)
o_loader.show_dialog()
# json file
elif FileHandler.is_file_correct_extension(filename=self.filename, ext_requested='json'):
o_loader = JsonLoader(
parent=self.parent, filename=self.filename)
o_loader.load()
else:
raise IOError(
"File format not supported for {}!".format(
self.filename))
self.parent.check_master_table_column_highlighting()
except ValueError:
self.parent.ui.statusbar.setStyleSheet("color: red")
self.parent.ui.statusbar.showMessage(
"Unable to load configuration file {}!".format(
self.filename), self.parent.statusbar_display_time)
except TypeError:
self.parent.ui.statusbar.setStyleSheet("color: red")
self.parent.ui.statusbar.showMessage(
"Error while trying to load file {}!".format(
self.filename), self.parent.statusbar_display_time)
class FromDictionaryToTableUi:
'''This class will take a dictionary especially designed for the master table to fill all the rows and cells'''
def __init__(self, parent=None):
self.parent = parent
self.table_ui = self.parent.processing_ui.h3_table
def fill(self, input_dictionary={}):
if input_dictionary == {}:
# # use for debugging
# input_dictionary = _dictionary_test
return
o_table = TableRowHandler(main_window=self.parent)
for _row_entry in input_dictionary.keys():
# insert row but also initialize the hidden arguments such as
# placzek settings
o_table.insert_row(
row=_row_entry,
align_and_focus_args=input_dictionary[_row_entry]['align_and_focus_args'],
normalization_placzek_arguments=input_dictionary[_row_entry]['normalization']['placzek'],
sample_placzek_arguments=input_dictionary[_row_entry]['sample']['placzek'])
self.populate_row(row=_row_entry,
entry=input_dictionary[_row_entry],
key=o_table.key)
def __fill_data_type(
self,
data_type="sample",
starting_col=1,
row=0,
entry={},
key=None):
column = starting_col
# run
self.table_ui.item(row, column).setText(entry[data_type]["runs"])
# background - runs
column += 1
self.table_ui.item(
row, column).setText(
entry[data_type]["background"]["runs"])
# background - background
column += 1
self.table_ui.item(
row, column).setText(
entry[data_type]["background"]["background"])
# material
column += 1
self.parent.master_table_list_ui[key][data_type]['material']['text'].setText(
entry[data_type]["material"])
# mass density
column += 1
self.parent.master_table_list_ui[key][data_type]['mass_density']['text'].setText(
entry[data_type]["mass_density"]["mass_density"]["value"])
# packing_fraction
column += 1
self.table_ui.item(
row, column).setText(str(entry[data_type]["packing_fraction"]))
# geometry - shape
column += 1
_requested_shape = entry[data_type]["geometry"]["shape"]
self.__set_combobox(
requested_value=_requested_shape,
row=row,
col=column)
# geometry
column += 1
self.parent.master_table_list_ui[key][data_type]['geometry']['radius']['value'].setText(
str(entry[data_type]['geometry']['radius']))
self.parent.master_table_list_ui[key][data_type]['geometry']['radius2']['value'].setText(
str(entry[data_type]['geometry']['radius2']))
self.parent.master_table_list_ui[key][data_type]['geometry']['height']['value'].setText(
str(entry[data_type]['geometry']['height']))
# abs correction
column += 1
_requested_correction = entry[data_type]["abs_correction"]
self.__set_combobox(
requested_value=_requested_correction,
row=row,
col=column)
# multi scattering correction
column += 1
_requested_scattering = entry[data_type]["multi_scattering_correction"]
self.__set_combobox(
requested_value=_requested_scattering,
row=row,
col=column)
# inelastic correction
column += 1
_requested_inelastic = entry[data_type]["inelastic_correction"]
self.__set_combobox(
requested_value=_requested_inelastic,
row=row,
col=column)
def __set_combobox(self, requested_value="", row=-1, col=-1):
_widget = self.table_ui.cellWidget(row, col).children()[1]
_index = _widget.findText(requested_value)
if _index == -1:
_index = 0
_widget.setCurrentIndex(_index)
def populate_row(self, row=-1, entry=None, key=None):
# activate
_status = QtCore.Qt.Checked if entry["activate"] else QtCore.Qt.Unchecked
_widget = self.table_ui.cellWidget(row, 0).children()[1]
_widget.setCheckState(_status)
# title
self.table_ui.item(row, 1).setText(entry["title"])
# sample
self.__fill_data_type(
data_type='sample',
starting_col=2,
row=row,
entry=entry,
key=key)
# normalization
self.__fill_data_type(
data_type='normalization',
starting_col=13,
row=row,
entry=entry,
key=key)
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/mantid/master_table/master_table_loader.py",
"copies": "1",
"size": "29420",
"license": "mit",
"hash": 346060110386451300,
"line_mean": 37.0595084088,
"line_max": 118,
"alpha_frac": 0.5706662135,
"autogenerated": false,
"ratio": 3.962823275862069,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5033489489362069,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import bha
data_path = os.path.join(bha.__path__[0], 'data')
#from .due import due, Doi
#
#
## Use duecredit (duecredit.org) to provide a citation to relevant work to
## be cited. This does nothing, unless the user has duecredit installed,
## And calls this with duecredit (as in `python -m duecredit script.py`):
#due.cite(Doi("10.1038/srep10532"),
# description="A novel brain partition highlights the modular skeleton shared by structure and function.",
# tags=["reference-implementation"],
# path='bha')
def crossmodularity(A, B, alpha, beta, T):
"""
Given two input (symmetrical) matrices A and B, this function
calculates the crossmodularity index X
Parameters
----------
A : array
squared matrice of N*N (typically connectivity matrices), being N the number of ROIs
B : array
squared matrice of N*N (typically connectivity matrices), being N the number of ROIs
alpha : float
artibitrary thersholds to binarize the two matrices (necessary for the similarity calculation)
beta : float
artibitrary thersholds to binarize the two matrices (necessary for the similarity calculation)
T : array
label vector: each element vector is defined as an integer corresponding to the module that ROI belongs to
Returns
-------
X : float
crossmodularity
Qa : array
modularities of inA associatted to partition T
Qb : array
modularities of inB associatted to partition T
L: float
similarity between A and B
"""
# Get the different labels of the modules
labels = np.unique(T)
# For each module compute sorensen index
sorensen = np.zeros(len(labels))
indx_m = np.empty(0)
for m in labels:
# Select the rois of each module and binarizes the resulting matrices using alpha and betha
indx_m = np.array(np.where(T == labels[m]))
indx = np.ix_(indx_m[0], indx_m[0])
bin_A = A[indx] > alpha
bin_B = B[indx] > beta
bin_A = bin_A.astype(int)
bin_B = bin_B.astype(int)
sorensen[m] = np.sum(2*(np.multiply(bin_A, bin_B))) / (np.sum(bin_A) + np.sum(bin_B))
# The total similarity is the mean similarity of all the modules
L = np.mean(sorensen)
# Compute the modularity index
Qa = modularity_index(np.absolute(A), T)
Qb = modularity_index(np.absolute(B), T)
# Compute the cross modularity
X = np.power((np.multiply(np.multiply(Qa, Qb), L)), 1/3)
return X, Qa, Qb, L
def modularity_index(A, T):
"""
A newman spectral algorithm adapted from the brain connectivity toolbox.
Original code: https://sites.google.com/site/bctnet/measures/list
Parameters
----------
A : array
squared matrice of N*N (typically connectivity matrices), being N the number of ROIs
T : array
label vector: each element vector is defined as an integer corresponding to the module that ROI belongs to
Returns
-------
Q : float
modularity index
"""
N = np.amax(np.shape(A)) # number of vertices
K = np.sum(A, axis = 0, keepdims=True ) # degree
m = np.sum(K) # number of edges (each undirected edge is counted twice)
B = A - np.divide(K.T.dot(K), m) # modularity matrix
if T.shape[0] == 1:
T= T.T
s = np.array([T,]*N).T # compute modularity
zero_idx = np.where((s - s.T)==0)
others_idx = np.where((s - s.T)!=0)
s[zero_idx] = 1
s[others_idx] = 0
Q = (s * B) / m
Q = np.sum(Q)
return Q
if __name__ == "__main__":
from bha.utils import fetch_bha_data
from scipy import spatial, cluster
if not os.path.exists(os.path.join(data_path, 'average_networks.npz')):
fetch_bha_data()
data = np.load('bha/data/average_networks.npz')
struct_network = data.f.struct_network
func_network = data.f.func_network
# These parameters are based on the reference paper
num_clusters = 20
alpha = 0.45
beta = 0.0
struct_network = struct_network / np.max(struct_network)
"""
Functional dendogram -> structure follows function
"""
Y = spatial.distance.pdist(func_network, metric='cosine')
Z = cluster.hierarchy.linkage(Y, method='weighted')
T = cluster.hierarchy.cut_tree(Z, n_clusters=num_clusters)
Xsf, Qff, Qsf, Lsf = crossmodularity(func_network, struct_network,
alpha, beta, T[:, 0])
"""
Structural dendogram -> function follows structure
X=1-struct_network
Y = zeros(1,size(X,1)*(size(X,1)-1)/2)
idxEnd=0
for i=1:size(X,1)-1
Y(idxEnd+1:idxEnd+length(X(i,i+1:end)))=X(i,i+1:end)
idxEnd=idxEnd+length(X(i,i+1:end))
end
Z = linkage(Y,'average')
H,T,permAll = dendrogram(Z,num_clusters,'colorthreshold',1000)
Xfs Qfs Qss Lfs =crossmodularity(func_network,struct_network,alpha,beta,T)
"""
| {
"repo_name": "christiancarballo/bha",
"path": "bha/bha.py",
"copies": "1",
"size": "5070",
"license": "mit",
"hash": -3981557117561869300,
"line_mean": 30.8867924528,
"line_max": 114,
"alpha_frac": 0.6291913215,
"autogenerated": false,
"ratio": 3.391304347826087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45204956693260867,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from lsst.utils import getPackageDir
from .phosim_cpu_pred import CpuPred
__all__ = ['OpSimOrdering']
class OpSimOrdering(object):
"""
Code to split the Twinkles 3 obsHistIDs into sets that will be ordered so
that we would try to do Twinkles_3p1 first, followed by Twinkles_3p2,
followed by Twinkles_3p3
Parameters
----------
opSimDBPath : absolute path to OpSim database
timeMax : float, unit of hours, default to 100.0
a threshold of time, such that any OpSim pointings with predictedPhoSim
times above that threshold will be dropped.
Attributes
----------
distinctGroup : list
unique combination of variables in which the records are grouped. The
variables are 'night' and 'filter'
timeMax : float,
max value of `predictedPhoSimTimes` in hours for a record for it to be
used in the calculation
filteredOpSim : `pd.DataFrame`
dataFrame representing the OpSim data with duplicate records dropped in
favor of the ones with propID ==54 (WFD) and any record that has a
`predictedPhoSimTimes` > `self.timeMax` dropped
ignorePredictedTimes : Bool
cpuPred : instance of `sklearn.ensemble.forest.RandomForestRegressor`
obtained from a pickle file if `self.ignorePredictedTimes` is False
minimizeBy : parameter `minimizeBy`
"""
def __init__(self, opSimDBPath,
randomForestPickle=None,
timeMax=100.0,
ignorePredictedTimes=False,
minimizeBy='predictedPhoSimTimes'):
"""
Parameters
----------
opSimDBPath : string, mandatory
absolute path to a sqlite OpSim database
randomForestPickle : string, defaults to None
absolute path to a pickle of an instance of
`sklearn.ensemble.forest.RandomForestRegressor`
timeMax: float, defaults to 120.0
max value of predicted PhoSim Run times of selected OpSim records.
Records with predicted PhoSIm run times beyond this value are
filtered out of `filteredOpSim`
ignorePredictedTimes : Bool, defaults to False
If True, ignores predicted PhoSim Run times, and therefore does not
require a `randomForestPickle`
minimizeBy : string, defaults to `predictedPhoSimTimes`
the column of `OpSim` which is minimized to select a single record
from all the members of a group having the same values of `self.distinctGroup`.
An example for such a parameter would be `expMJD` which is by definition unique
for every pointing and thus a minimization is easy
"""
twinklesDir = getPackageDir('Twinkles')
self.ignorePredictedTimes = ignorePredictedTimes
self._opsimDF = self.fullOpSimDF(opSimDBPath)
self._opsimDF['year'] = self._opsimDF.night // 365
if randomForestPickle is None:
randomForestPickle = os.path.join(twinklesDir, 'data',
'RF_pickle.p')
if minimizeBy not in self._opsimDF.columns and minimizeBy != 'predictedPhoSimTimes':
raise NotImplementedError('minimizing by {} not implemented, try `expMJD`', minimizeBy)
self.minimizeBy = minimizeBy
# We don't need a pickle file if we an ignorePredictedTimes and have a minimize
predictionsNotRequired = self.ignorePredictedTimes and minimizeBy != 'predictedPhoSimTimes'
if not predictionsNotRequired:
if not os.path.exists(randomForestPickle):
raise ValueError('pickle does not exist at {}'.format(randomForestPickle))
self.cpuPred = CpuPred(rf_pickle_file=randomForestPickle,
opsim_df=self._opsimDF,
fieldID=1427)
self._opsimDF['predictedPhoSimTimes'] = self.predictedTimes()
self.timeMax = timeMax
self.distinctGroup = ['night', 'filter']
def predictedTimes(self, obsHistIDs=None):
"""
predicted time for `PhoSim` image sigmulation on a SLAC 'fell' CPU
in units of hours
Parameters
----------
obsHistIDs : float or sequence of integers, defaults to None
if None, obsHistIDs defaults to the sequence of obsHistIDs in
`self._opsimDF`
Returns
-------
`numpy.ndarray` of predicted PhoSim simulation times in hours
"""
# default obsHistIDs
if obsHistIDs is None:
obsHistIDs = self._opsimDF.reset_index()['obsHistID'].values
obsHistIds = np.ravel(obsHistIDs)
times = np.ones_like(obsHistIds) * np.nan
for i, obshistid in enumerate(obsHistIds):
times[i] = self.cpuPred(obshistid)
# convert to hours from seconds before return
return times / 3600.0
@property
def uniqueOpSimRecords(self):
"""
- drop duplicates in favor of propID for WFD
"""
pts = self._opsimDF.copy()
# Since the original SQL query ordered by propID, keep=first
# preferentially chooses the propID for WFD
pts.drop_duplicates(subset='obsHistID', inplace=True, keep='first')
return pts
@property
def filteredOpSim(self):
"""
dataframe dropping records from the unique set `self.uniqueOpSimRecords`
where the phoSim Runtime estimates exceeds threshold. If
`self.ignorePredictedTimes` is set to `True`, then this simply returns
`self.uniqueOpSimRecords`
"""
thresh = self.timeMax
if self.ignorePredictedTimes:
return self.uniqueOpSimRecords
else:
return self.uniqueOpSimRecords.query('predictedPhoSimTimes < @thresh')
@property
def opSimCols(self):
"""
columns in `filteredOpSim`
"""
return self.filteredOpSim.columns
@property
def obsHistIDsPredictedToTakeTooLong(self):
"""
obsHistIDs dropped from Twink_3p1, Twink_3p2, Twink_3p3 because the
estimated phoSim run time is too long in the form a dataframe with
column headers `obsHistID` and `predictedPhoSimTimes`.
This returns None, if no obsHistIds are missing due to their
predictedPhoSimRunTime being too long
"""
if self.ignorePredictedTimes:
return None
filteredObsHistID = \
tuple(self.filteredOpSim.reset_index().obsHistID.values.tolist())
missing = self.uniqueOpSimRecords.query('obsHistID not in @filteredObsHistID')
if len(missing) > 0:
return missing[['obsHistID', 'expMJD', 'predictedPhoSimTimes', 'filter', 'propID']]
else:
return None
@property
def Twinkles_WFD(self):
"""
return a dataframe with all the visits for each unique combination with
the lowest propID (all WFD visits or all DDF visits) in each unique
combination
"""
groupDistinct = self.filteredOpSim.groupby(self.distinctGroup)
gdf = groupDistinct[self.opSimCols].agg(dict(propID=min))
idx = gdf.propID.obsHistID.values
df = self.filteredOpSim.set_index('obsHistID').ix[idx].sort_values(by='expMJD')
return df.reset_index()
@property
def Twinkles_3p1(self):
"""
for visits selected in Twinkles_WFD, pick the visit in each unique
combination with the lowest value of the `predictedPhoSimTimes`
"""
groupDistinct = self.Twinkles_WFD.groupby(self.distinctGroup)
# The variable we are minimizing by
discVar = self.minimizeBy
gdf = groupDistinct[self.opSimCols].agg(dict(discVar=min))
idx = gdf.discVar.obsHistID.values
df = self.filteredOpSim.set_index('obsHistID').ix[idx]
return df.sort_values(by='expMJD', inplace=False).reset_index()
@property
def Twinkles_3p1b(self):
"""
dataframe containing those WFD visits that are part of `Twinkles_WFD` and not
covered in `self.Twinkles_3p1` (for example on nights when there are
multiple WFD visits in the same filter)
"""
doneObsHist = tuple(self.Twinkles_3p1.obsHistID.values.tolist())
query = 'obsHistID not in @doneObsHist and propID == 54'
df = self.filteredOpSim.query(query).sort_values(by='expMJD',
inplace=False)
return df
@property
def Twinkles_3p2(self):
"""
dr5 Observations that are in `filteredOpSim` and have not been done in
Twinkles_3p1
"""
obs_1 = self.Twinkles_3p1.obsHistID.values.tolist()
obs_1b = self.Twinkles_3p1b.obsHistID.values.tolist()
doneObsHist = tuple(obs_1 + obs_1b)
query = 'year == 4 and obsHistID not in @doneObsHist'
return self.filteredOpSim.query(query).sort_values(by='expMJD',
inplace=False)
@property
def Twinkles_3p3(self):
"""
dataFrame of visit obsHistID for Run 3p3. These are DDF visits
that have `predictedPhoSimRunTime` smaller than `maxtime`, and
were not covered in either the set of unique visits covered in
Run 3.1 or the visits in a particular year covered as part of
3.2
"""
obs_1 = self.Twinkles_3p1.obsHistID.values.tolist()
obs_1b = self.Twinkles_3p1b.obsHistID.values.tolist()
obs_2 = self.Twinkles_3p2.obsHistID.values.tolist()
obs = tuple(obs_1 + obs_1b + obs_1b + obs_2)
query = 'obsHistID not in @obs'
return self.filteredOpSim.query(query).sort_values(by='expMJD',
inplace=False)
@property
def Twinkles_3p4(self):
"""
tuple of dataFrames for wfd and ddf visits for visits left out
of Run 3p1,2,3 due to high predicted phosim times, orderded by
predicted phosim run times.
"""
leftovers = self.obsHistIDsPredictedToTakeTooLong
wfdvisits = leftovers.query('propID == 54')
wfdvisits.sort_values(by='expMJD', inplace=True)
ddfvisits = leftovers.query('propID == 56')
ddfvisits.sort_values(by='expMJD', inplace=True)
return wfdvisits, ddfvisits
@staticmethod
def fullOpSimDF(opsimdbpath,
query="SELECT * FROM Summary WHERE FieldID == 1427 ORDER BY PROPID"):
engine = create_engine('sqlite:///' + opsimdbpath)
pts = pd.read_sql_query(query, engine)
return pts
| {
"repo_name": "LSSTDESC/Twinkles",
"path": "python/desc/twinkles/obsHistIDOrdering.py",
"copies": "2",
"size": "10882",
"license": "mit",
"hash": 642907513084858600,
"line_mean": 40.3764258555,
"line_max": 103,
"alpha_frac": 0.6285609263,
"autogenerated": false,
"ratio": 4.014016967908521,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003054635788344446,
"num_lines": 263
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
# Define DB information
BASE_PATH = 'D:/DB/IQA/LIVE/LIVE IQA DB'
LIST_FILE_NAME = 'LIVE_IQA.txt'
ALL_SCENES = list(range(29))
ALL_DIST_TYPES = list(range(5))
def make_image_list(scenes, dist_types=None, show_info=True):
"""
Make image list from LIVE database
LIVE: 29 reference images x 5 distortions
(jpeg2000: 227 / jpeg: 233 / white_noise: 174 /
gaussian_blur: 174 / fast_fading: 174)
"""
# Get reference / distorted image file lists:
# d_img_list and score_list
d_img_list, r_img_list, r_idx_list, score_list = [], [], [], []
list_file_name = LIST_FILE_NAME
with open(list_file_name, 'r') as listFile:
for line in listFile:
# ref_idx ref_name dist_name dist_types, DMOS, widht, height
scn_idx, dis_idx, ref, dis, score, width, height = line.split()
scn_idx = int(scn_idx)
dis_idx = int(dis_idx)
if scn_idx in scenes and dis_idx in dist_types:
d_img_list.append(dis)
r_img_list.append(ref)
r_idx_list.append(scn_idx)
score_list.append(float(score))
score_list = np.array(score_list, dtype='float32')
# DMOS -> reverse subjecive scores by default
score_list = 1.0 - score_list
n_images = len(d_img_list)
dist_names = ['jp2k', 'jpeg', 'wn', 'gblur', 'fastfading']
if show_info:
scenes.sort()
print(' - Scenes: %s' % ', '.join([str(i) for i in scenes]))
print(' - Distortion types: %s' % ', '.join(
[dist_names[idx] for idx in dist_types]))
print(' - Number of images: {:,}'.format(n_images))
print(' - DMOS range: [{:.2f}, {:.2f}]'.format(
np.min(score_list), np.max(score_list)), end='')
print(' (Scale reversed)')
return {
'scenes': scenes,
'dist_types': dist_types,
'base_path': BASE_PATH,
'n_images': n_images,
'd_img_list': d_img_list,
'r_img_list': r_img_list,
'r_idx_list': r_idx_list,
'score_list': score_list}
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/data_load/LIVE.py",
"copies": "1",
"size": "2166",
"license": "mit",
"hash": 5657474054917497000,
"line_mean": 34.5081967213,
"line_max": 75,
"alpha_frac": 0.5692520776,
"autogenerated": false,
"ratio": 3.081081081081081,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4150333158681081,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
# Define DB information
BASE_PATH = 'D:/DB/IQA/TID2008'
LIST_FILE_NAME = 'TID2008.txt'
ALL_SCENES = list(range(24))
# ALL_SCENES = list(range(25))
ALL_DIST_TYPES = list(range(17))
def make_image_list(scenes, dist_types=None, show_info=True):
"""
Make image list from TID2008 database
TID2008: 25 reference images x 17 distortions x 4 levels
"""
# Get reference / distorted image file lists:
# d_img_list and score_list
d_img_list, r_img_list, score_list = [], [], []
list_file_name = LIST_FILE_NAME
with open(list_file_name, 'r') as listFile:
for line in listFile:
# ref_idx ref_name dist_name dist_types, DMOS
(scn_idx, dis_idx, ref, dis, score) = line.split()
scn_idx = int(scn_idx)
dis_idx = int(dis_idx)
if scn_idx in scenes and dis_idx in dist_types:
d_img_list.append(dis)
r_img_list.append(ref)
score_list.append(float(score))
score_list = np.array(score_list, dtype='float32')
n_images = len(d_img_list)
if show_info:
print(' - Scenes: %s' % ', '.join([str(i) for i in scenes]))
print(' - Distortion types: %s' % ', '.join(
[str(i) for i in dist_types]))
print(' - Number of images: {:,}'.format(n_images))
print(' - MOS range: [{:.2f}, {:.2f}]'.format(
np.min(score_list), np.max(score_list)))
return {
'scenes': scenes,
'dist_types': dist_types,
'base_path': BASE_PATH,
'n_images': n_images,
'd_img_list': d_img_list,
'r_img_list': r_img_list,
'score_list': score_list}
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/data_load/TID2008.py",
"copies": "1",
"size": "1761",
"license": "mit",
"hash": -9135426908207386000,
"line_mean": 32.8653846154,
"line_max": 68,
"alpha_frac": 0.5718341851,
"autogenerated": false,
"ratio": 3.0894736842105264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41613078693105265,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
# Define DB information
BASE_PATH = 'D:/DB/IQA/TID2013'
LIST_FILE_NAME = 'TID2013.txt'
ALL_SCENES = list(range(24))
# ALL_SCENES = list(range(25))
ALL_DIST_TYPES = list(range(24))
def make_image_list(scenes, dist_types=None, show_info=True):
"""
Make image list from TID2013 database
TID2013: 25 reference images x 24 distortions x 5 levels
"""
# Get reference / distorted image file lists:
# d_img_list and score_list
d_img_list, r_img_list, score_list = [], [], []
list_file_name = LIST_FILE_NAME
with open(list_file_name, 'r') as listFile:
for line in listFile:
# ref_idx ref_name dist_name dist_types, DMOS
(scn_idx, dis_idx, ref, dis, score) = line.split()
scn_idx = int(scn_idx)
dis_idx = int(dis_idx)
if scn_idx in scenes and dis_idx in dist_types:
d_img_list.append(dis)
r_img_list.append(ref)
score_list.append(float(score))
score_list = np.array(score_list, dtype='float32')
n_images = len(d_img_list)
if show_info:
print(' - Scenes: %s' % ', '.join([str(i) for i in scenes]))
print(' - Distortion types: %s' % ', '.join(
[str(i) for i in dist_types]))
print(' - Number of images: {:,}'.format(n_images))
print(' - MOS range: [{:.2f}, {:.2f}]'.format(
np.min(score_list), np.max(score_list)))
return {
'scenes': scenes,
'dist_types': dist_types,
'base_path': BASE_PATH,
'n_images': n_images,
'd_img_list': d_img_list,
'r_img_list': r_img_list,
'score_list': score_list}
| {
"repo_name": "jongyookim/IQA_BIECON_release",
"path": "IQA_BIECON_release/data_load/TID2013.py",
"copies": "1",
"size": "1761",
"license": "mit",
"hash": 5494124675876890000,
"line_mean": 32.8653846154,
"line_max": 68,
"alpha_frac": 0.5718341851,
"autogenerated": false,
"ratio": 3.0894736842105264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9065154023156681,
"avg_score": 0.019230769230769232,
"num_lines": 52
} |
from __future__ import absolute_import, division, print_function
import os
import os.path as op
import sys
import six
def ask_for_export_email():
"""Ask for a registered email address."""
print('You have not set the email variable at the top of this script.')
print('Please set this variable in the script, or enter it below. Note')
print('that you need to register your email at JSOC first. You can do')
print('this at: http://jsoc.stanford.edu/ajax/register_email.html')
try:
email = six.moves.input('\nPlease enter a REGISTERED email address: ')
except EOFError:
email = ''
print()
return email
def get_export_email():
"""
Get export email address from the JSOC_EXPORT_EMAIL environ variable, or
ask the the user to enter their address if JSOC_EXPORT_EMAIL is not set.
"""
email = os.environ.get('JSOC_EXPORT_EMAIL')
return email if email is not None else ask_for_export_email()
def python_path_prepend(reldir):
"""Prepend relative path to the Python import path list."""
absdir = op.abspath(op.join(op.dirname(__file__), reldir))
sys.path.insert(0, absdir)
def is_drms_package_directory(path):
"""Check if the given path is a directory containing the drms package."""
if not op.isdir(path):
return False
init_fpath = op.join(path, '__init__.py')
if not op.isfile(init_fpath):
return False
client_fpath = op.join(path, 'client.py')
if not op.isfile(client_fpath):
return False
try:
code = open(client_fpath).read()
except IOError:
return False
for s in ['class Client', 'def series', 'def query', 'def export']:
if s not in code:
return False
return True
# If the parent directory contains the drms package, then we assume that we
# are in the drms source directory and add the parent directory to the top
# of the Python import path to make sure that this version of the drms package
# is imported instead of any other installed version.
if is_drms_package_directory(op.join(op.dirname(__file__), '..', 'drms')):
python_path_prepend('..')
| {
"repo_name": "kbg/drms",
"path": "examples/example_helpers.py",
"copies": "1",
"size": "2150",
"license": "mit",
"hash": 1763578232245692000,
"line_mean": 31.0895522388,
"line_max": 78,
"alpha_frac": 0.6688372093,
"autogenerated": false,
"ratio": 3.8053097345132745,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9974146943813275,
"avg_score": 0,
"num_lines": 67
} |
from __future__ import absolute_import, division, print_function
import os
import os.path
import time
import glob
from fcntl import flock, LOCK_EX, LOCK_UN
from threading import Event
from .text import TextLog
class TimeRotatingLog(TextLog):
"""
A type of log which writes records as individual lines to a series of
files, with one file per hour of time in which events occur. Files are
written atomically (using ``fcntl.flock``) and only appended to.
"""
sleep_delay = 0.5
def __init__(self, path):
self.path = path
self.current_log_name = None
self.killed = Event()
self.f = None
def create_dirs(self):
dirpath = os.path.dirname(self.path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
def log_name_for(self, ts):
ts = int(ts)
start = ts - (ts % 3600)
return '%s.%s' % (self.path, start)
def write(self, *records):
check_log_name = self.log_name_for(time.time())
if check_log_name != self.current_log_name:
self.current_log_name = check_log_name
self.create_dirs()
if self.f:
self.f.close()
self.f = open(self.current_log_name, 'ab')
data = [self.format(r) for r in records]
for r in data:
assert b'\n' not in r, '\\n found in %r' % r
data.append('') # to get the final \n
data = b'\n'.join(data)
flock(self.f, LOCK_EX)
self.f.write(data)
self.f.flush()
flock(self.f, LOCK_UN)
def live_iter_glob(self, start_file):
"""
Yield an infinite iterator of the available log file names. If there
are no new files to yield, just re-yields the most recent one.
"""
last_consumed = None
while True:
fnames = glob.glob('%s.[0-9]*' % self.path)
fnames.sort()
# Crop fnames to start at ``start_file`` if it is supplied.
if start_file and (start_file in fnames):
fnames = fnames[fnames.index(start_file):]
fresh_files = False
for fn in fnames:
if (not last_consumed) or (fn > last_consumed):
fresh_files = True
last_consumed = fn
yield fn
if not fresh_files:
if fnames:
yield fnames[-1]
elif not self.killed.is_set():
time.sleep(self.sleep_delay)
else:
break
def tail_glob(self, start_file, start_offset):
"""
Return an iterator over all the matching log files, yielding a line at
a time. At the end of all available files, poll the last file for new
lines and look for new files. If a new file is created, abandon the
previous file and follow that one.
"""
fnames = self.live_iter_glob(start_file=start_file)
this_file = next(fnames)
f = open(this_file, 'rb')
if start_offset:
f.seek(start_offset)
while True:
start = f.tell()
line = f.readline()
if not line:
next_file = next(fnames)
if next_file != this_file:
this_file = next_file
f = open(this_file, 'rb')
elif not self.killed.is_set():
time.sleep(self.sleep_delay)
f.seek(start)
else:
break
else:
pointer = '%s:%d' % (this_file, f.tell())
yield line, pointer
def process(self, process_from=None, stay_alive=False, killed_event=None):
if not stay_alive:
self.killed.set()
if killed_event:
self.killed = killed_event
if process_from:
start_file, start_offset = process_from.rsplit(':', 1)
start_offset = int(start_offset)
else:
start_file = start_offset = None
for line, pointer in self.tail_glob(start_file=start_file,
start_offset=start_offset):
yield self.parse(line.strip()), pointer
| {
"repo_name": "storborg/manhattan",
"path": "manhattan/log/timerotating.py",
"copies": "1",
"size": "4291",
"license": "mit",
"hash": -6828685286597927000,
"line_mean": 32.2635658915,
"line_max": 78,
"alpha_frac": 0.5292472617,
"autogenerated": false,
"ratio": 3.9621421975992615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49913894592992614,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import re
import logging
from watchdog.observers import Observer as FSObserver # Auto-detect best fs event api according to OS
from watchdog.observers.polling import PollingObserver
from watchdog.events import RegexMatchingEventHandler, FileSystemEventHandler
from watchdog.events import EVENT_TYPE_CREATED, EVENT_TYPE_MODIFIED, EVENT_TYPE_DELETED, EVENT_TYPE_MOVED
import fcntl
from .registry import register
from .run import run_task
logger = logging.getLogger(__name__)
class Observer():
polling_observer = PollingObserver()
default_observer = FSObserver()
def start():
Observer.polling_observer.start()
Observer.default_observer.start()
def stop():
Observer.polling_observer.stop()
Observer.default_observer.stop()
def join():
Observer.polling_observer.join()
Observer.default_observer.join()
@register('file', start, stop, join)
def file_trigger(job_name, task_queue, path, events, patterns, **kwargs):
class Handler(FileSystemEventHandler):
def __init__(self, basepath, regexes=[r".*"]):
self._basepath = basepath
self._regexes = [re.compile(r) for r in regexes]
@staticmethod
def run_task(event_path):
logger.debug('running task %s for %s' % (job_name, event_path))
environment = os.environ
environment["FILENAME"] = event_path
task_queue.put((job_name, dict(environment), kwargs))
def should_wait_for_unlock(self, path):
if "unlocked_flock" in events:
try:
logger.debug('trying to open %s (flock)' % path)
with open(path, 'r') as lock_file:
logger.debug('trying to acquire lock')
fcntl.flock(lock_file, fcntl.LOCK_EX)
logger.debug('lock acquired')
except IOError as e:
logger.debug('%s is locked (%d)' % (path, e.errno))
return True
if "unlocked_lockf" in events:
try:
logger.debug('trying to open %s (lockf)' % path)
with open(path, 'rb+') as lock_file:
logger.debug('trying to acquire lock')
fcntl.lockf(lock_file, fcntl.LOCK_EX)
logger.debug('lock acquired')
except OSError as e:
logger.debug('%s is locked (%d)' % (path, e.errno))
return True
return False
def on_created(self, event):
if "create" in events:
logger.debug('%s was created' % event.src_path)
relative_path = os.path.relpath(event.src_path, start=self._basepath)
if any(r.match(relative_path) for r in self._regexes):
if self.should_wait_for_unlock(event.src_path):
return
self.run_task(event.src_path)
def on_deleted(self, event):
if "delete" in events:
logger.debug('%s was deleted' % event.src_path)
relative_path = os.path.relpath(event.src_path, start=self._basepath)
if any(r.match(relative_path) for r in self._regexes):
self.run_task(event.src_path)
def on_modified(self, event):
if "modify" in events:
logger.debug('%s was modified' % event.src_path)
relative_path = os.path.relpath(event.src_path, start=self._basepath)
if any(r.match(relative_path) for r in self._regexes):
if self.should_wait_for_unlock(event.src_path):
return
self.run_task(event.src_path)
def on_moved(self, event):
if "movefrom" in events or "moveto" in events:
logger.debug('%s was moved to %s' % (event.src_path, event.dest_path))
if "movefrom" in events:
relative_path = os.path.relpath(event.src_path, start=self._basepath)
if any(r.match(relative_path) for r in self._regexes):
self.run_task(event.src_path)
if "moveto" in events:
relative_path = os.path.relpath(event.dest_path, start=self._basepath)
if any(r.match(relative_path) for r in self._regexes):
self.run_task(event.dest_path)
operating_system = os.uname()
if 'Linux' in operating_system:
file_system = os.popen('stat -f -c %%T -- %s' % path).read()
# If the file type is ext2/3/4 use i-notify, else use the polling mechanism
if file_system.startswith('ext'):
_observer = Observer.default_observer
else:
_observer = Observer.polling_observer
else:
_observer = Observer.default_observer
_observer.schedule(Handler(path, regexes=patterns), path, recursive=True)
| {
"repo_name": "stcorp/legato",
"path": "legato/filesystem.py",
"copies": "1",
"size": "5056",
"license": "bsd-3-clause",
"hash": -2661348680554804000,
"line_mean": 38.811023622,
"line_max": 105,
"alpha_frac": 0.5719936709,
"autogenerated": false,
"ratio": 4.035115722266561,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.510710939316656,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import mock
import pytest
from _pytest.mark import (
MarkGenerator as Mark,
ParameterSet,
transfer_markers,
EMPTY_PARAMETERSET_OPTION,
)
from _pytest.nodes import Node
ignore_markinfo = pytest.mark.filterwarnings(
"ignore:MarkInfo objects:_pytest.deprecated.RemovedInPytest4Warning"
)
class TestMark(object):
def test_markinfo_repr(self):
from _pytest.mark import MarkInfo, Mark
m = MarkInfo.for_mark(Mark("hello", (1, 2), {}))
repr(m)
@pytest.mark.parametrize("attr", ["mark", "param"])
@pytest.mark.parametrize("modulename", ["py.test", "pytest"])
def test_pytest_exists_in_namespace_all(self, attr, modulename):
module = sys.modules[modulename]
assert attr in module.__all__
def test_pytest_mark_notcallable(self):
mark = Mark()
pytest.raises((AttributeError, TypeError), mark)
def test_mark_with_param(self):
def some_function(abc):
pass
class SomeClass(object):
pass
assert pytest.mark.fun(some_function) is some_function
assert pytest.mark.fun.with_args(some_function) is not some_function
assert pytest.mark.fun(SomeClass) is SomeClass
assert pytest.mark.fun.with_args(SomeClass) is not SomeClass
def test_pytest_mark_name_starts_with_underscore(self):
mark = Mark()
pytest.raises(AttributeError, getattr, mark, "_some_name")
def test_pytest_mark_bare(self):
mark = Mark()
def f():
pass
mark.hello(f)
assert f.hello
@ignore_markinfo
def test_pytest_mark_keywords(self):
mark = Mark()
def f():
pass
mark.world(x=3, y=4)(f)
assert f.world
assert f.world.kwargs["x"] == 3
assert f.world.kwargs["y"] == 4
@ignore_markinfo
def test_apply_multiple_and_merge(self):
mark = Mark()
def f():
pass
mark.world
mark.world(x=3)(f)
assert f.world.kwargs["x"] == 3
mark.world(y=4)(f)
assert f.world.kwargs["x"] == 3
assert f.world.kwargs["y"] == 4
mark.world(y=1)(f)
assert f.world.kwargs["y"] == 1
assert len(f.world.args) == 0
@ignore_markinfo
def test_pytest_mark_positional(self):
mark = Mark()
def f():
pass
mark.world("hello")(f)
assert f.world.args[0] == "hello"
mark.world("world")(f)
@ignore_markinfo
def test_pytest_mark_positional_func_and_keyword(self):
mark = Mark()
def f():
raise Exception
m = mark.world(f, omega="hello")
def g():
pass
assert m(g) == g
assert g.world.args[0] is f
assert g.world.kwargs["omega"] == "hello"
@ignore_markinfo
def test_pytest_mark_reuse(self):
mark = Mark()
def f():
pass
w = mark.some
w("hello", reason="123")(f)
assert f.some.args[0] == "hello"
assert f.some.kwargs["reason"] == "123"
def g():
pass
w("world", reason2="456")(g)
assert g.some.args[0] == "world"
assert "reason" not in g.some.kwargs
assert g.some.kwargs["reason2"] == "456"
def test_marked_class_run_twice(testdir, request):
"""Test fails file is run twice that contains marked class.
See issue#683.
"""
py_file = testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('abc', [1, 2, 3])
class Test1(object):
def test_1(self, abc):
assert abc in [1, 2, 3]
"""
)
file_name = os.path.basename(py_file.strpath)
rec = testdir.inline_run(file_name, file_name)
rec.assertoutcome(passed=6)
def test_ini_markers(testdir):
testdir.makeini(
"""
[pytest]
markers =
a1: this is a webtest marker
a2: this is a smoke marker
"""
)
testdir.makepyfile(
"""
def test_markers(pytestconfig):
markers = pytestconfig.getini("markers")
print (markers)
assert len(markers) >= 2
assert markers[0].startswith("a1:")
assert markers[1].startswith("a2:")
"""
)
rec = testdir.inline_run()
rec.assertoutcome(passed=1)
def test_markers_option(testdir):
testdir.makeini(
"""
[pytest]
markers =
a1: this is a webtest marker
a1some: another marker
nodescription
"""
)
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(
["*a1*this is a webtest*", "*a1some*another marker", "*nodescription*"]
)
def test_ini_markers_whitespace(testdir):
testdir.makeini(
"""
[pytest]
markers =
a1 : this is a whitespace marker
"""
)
testdir.makepyfile(
"""
import pytest
@pytest.mark.a1
def test_markers():
assert True
"""
)
rec = testdir.inline_run("--strict", "-m", "a1")
rec.assertoutcome(passed=1)
def test_marker_without_description(testdir):
testdir.makefile(
".cfg",
setup="""
[tool:pytest]
markers=slow
""",
)
testdir.makeconftest(
"""
import pytest
pytest.mark.xfail('FAIL')
"""
)
ftdir = testdir.mkdir("ft1_dummy")
testdir.tmpdir.join("conftest.py").move(ftdir.join("conftest.py"))
rec = testdir.runpytest_subprocess("--strict")
rec.assert_outcomes()
def test_markers_option_with_plugin_in_current_dir(testdir):
testdir.makeconftest('pytest_plugins = "flip_flop"')
testdir.makepyfile(
flip_flop="""\
def pytest_configure(config):
config.addinivalue_line("markers", "flip:flop")
def pytest_generate_tests(metafunc):
try:
mark = metafunc.function.flipper
except AttributeError:
return
metafunc.parametrize("x", (10, 20))"""
)
testdir.makepyfile(
"""\
import pytest
@pytest.mark.flipper
def test_example(x):
assert x"""
)
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(["*flip*flop*"])
def test_mark_on_pseudo_function(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.r(lambda x: 0/0)
def test_hello():
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_strict_prohibits_unregistered_markers(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.unregisteredmark
def test_hello():
pass
"""
)
result = testdir.runpytest("--strict")
assert result.ret != 0
result.stdout.fnmatch_lines(["*unregisteredmark*not*registered*"])
@pytest.mark.parametrize(
"spec",
[
("xyz", ("test_one",)),
("xyz and xyz2", ()),
("xyz2", ("test_two",)),
("xyz or xyz2", ("test_one", "test_two")),
],
)
def test_mark_option(spec, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.xyz
def test_one():
pass
@pytest.mark.xyz2
def test_two():
pass
"""
)
opt, passed_result = spec
rec = testdir.inline_run("-m", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize(
"spec", [("interface", ("test_interface",)), ("not interface", ("test_nointer",))]
)
def test_mark_option_custom(spec, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_collection_modifyitems(items):
for item in items:
if "interface" in item.nodeid:
item.add_marker(pytest.mark.interface)
"""
)
testdir.makepyfile(
"""
def test_interface():
pass
def test_nointer():
pass
"""
)
opt, passed_result = spec
rec = testdir.inline_run("-m", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize(
"spec",
[
("interface", ("test_interface",)),
("not interface", ("test_nointer", "test_pass")),
("pass", ("test_pass",)),
("not pass", ("test_interface", "test_nointer")),
],
)
def test_keyword_option_custom(spec, testdir):
testdir.makepyfile(
"""
def test_interface():
pass
def test_nointer():
pass
def test_pass():
pass
"""
)
opt, passed_result = spec
rec = testdir.inline_run("-k", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize(
"spec",
[
("None", ("test_func[None]",)),
("1.3", ("test_func[1.3]",)),
("2-3", ("test_func[2-3]",)),
],
)
def test_keyword_option_parametrize(spec, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
"""
)
opt, passed_result = spec
rec = testdir.inline_run("-k", opt)
passed, skipped, fail = rec.listoutcomes()
passed = [x.nodeid.split("::")[-1] for x in passed]
assert len(passed) == len(passed_result)
assert list(passed) == list(passed_result)
@pytest.mark.parametrize(
"spec",
[
(
"foo or import",
"ERROR: Python keyword 'import' not accepted in expressions passed to '-k'",
),
("foo or", "ERROR: Wrong expression passed to '-k': foo or"),
],
)
def test_keyword_option_wrong_arguments(spec, testdir, capsys):
testdir.makepyfile(
"""
def test_func(arg):
pass
"""
)
opt, expected_result = spec
testdir.inline_run("-k", opt)
out = capsys.readouterr().err
assert expected_result in out
def test_parametrized_collected_from_command_line(testdir):
"""Parametrized test not collected if test named specified
in command line issue#649.
"""
py_file = testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
def test_func(arg):
pass
"""
)
file_name = os.path.basename(py_file.strpath)
rec = testdir.inline_run(file_name + "::" + "test_func")
rec.assertoutcome(passed=3)
def test_parametrized_collect_with_wrong_args(testdir):
"""Test collect parametrized func with wrong number of args."""
py_file = testdir.makepyfile(
"""
import pytest
@pytest.mark.parametrize('foo, bar', [(1, 2, 3)])
def test_func(foo, bar):
pass
"""
)
result = testdir.runpytest(py_file)
result.stdout.fnmatch_lines(
[
'E ValueError: In "parametrize" the number of values ((1, 2, 3)) '
"must be equal to the number of names (['foo', 'bar'])"
]
)
def test_parametrized_with_kwargs(testdir):
"""Test collect parametrized func with wrong number of args."""
py_file = testdir.makepyfile(
"""
import pytest
@pytest.fixture(params=[1,2])
def a(request):
return request.param
@pytest.mark.parametrize(argnames='b', argvalues=[1, 2])
def test_func(a, b):
pass
"""
)
result = testdir.runpytest(py_file)
assert result.ret == 0
class TestFunctional(object):
def test_mark_per_function(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.hello
def test_hello():
assert hasattr(test_hello, 'hello')
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_mark_per_module(self, testdir):
item = testdir.getitem(
"""
import pytest
pytestmark = pytest.mark.hello
def test_func():
pass
"""
)
keywords = item.keywords
assert "hello" in keywords
def test_marklist_per_class(self, testdir):
item = testdir.getitem(
"""
import pytest
class TestClass(object):
pytestmark = [pytest.mark.hello, pytest.mark.world]
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
"""
)
keywords = item.keywords
assert "hello" in keywords
def test_marklist_per_module(self, testdir):
item = testdir.getitem(
"""
import pytest
pytestmark = [pytest.mark.hello, pytest.mark.world]
class TestClass(object):
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
"""
)
keywords = item.keywords
assert "hello" in keywords
assert "world" in keywords
def test_mark_per_class_decorator(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.hello
class TestClass(object):
def test_func(self):
assert TestClass.test_func.hello
"""
)
keywords = item.keywords
assert "hello" in keywords
def test_mark_per_class_decorator_plus_existing_dec(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.hello
class TestClass(object):
pytestmark = pytest.mark.world
def test_func(self):
assert TestClass.test_func.hello
assert TestClass.test_func.world
"""
)
keywords = item.keywords
assert "hello" in keywords
assert "world" in keywords
@ignore_markinfo
def test_merging_markers(self, testdir):
p = testdir.makepyfile(
"""
import pytest
pytestmark = pytest.mark.hello("pos1", x=1, y=2)
class TestClass(object):
# classlevel overrides module level
pytestmark = pytest.mark.hello(x=3)
@pytest.mark.hello("pos0", z=4)
def test_func(self):
pass
"""
)
items, rec = testdir.inline_genitems(p)
item, = items
keywords = item.keywords
marker = keywords["hello"]
assert marker.args == ("pos0", "pos1")
assert marker.kwargs == {"x": 1, "y": 2, "z": 4}
# test the new __iter__ interface
values = list(marker)
assert len(values) == 3
assert values[0].args == ("pos0",)
assert values[1].args == ()
assert values[2].args == ("pos1",)
def test_merging_markers_deep(self, testdir):
# issue 199 - propagate markers into nested classes
p = testdir.makepyfile(
"""
import pytest
class TestA(object):
pytestmark = pytest.mark.a
def test_b(self):
assert True
class TestC(object):
# this one didnt get marked
def test_d(self):
assert True
"""
)
items, rec = testdir.inline_genitems(p)
for item in items:
print(item, item.keywords)
assert [x for x in item.iter_markers() if x.name == "a"]
def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.a
class Base(object): pass
@pytest.mark.b
class Test1(Base):
def test_foo(self): pass
class Test2(Base):
def test_bar(self): pass
"""
)
items, rec = testdir.inline_genitems(p)
self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",))
@pytest.mark.issue568
def test_mark_should_not_pass_to_siebling_class(self, testdir):
p = testdir.makepyfile(
"""
import pytest
class TestBase(object):
def test_foo(self):
pass
@pytest.mark.b
class TestSub(TestBase):
pass
class TestOtherSub(TestBase):
pass
"""
)
items, rec = testdir.inline_genitems(p)
base_item, sub_item, sub_item_other = items
print(items, [x.nodeid for x in items])
# legacy api smears
assert hasattr(base_item.obj, "b")
assert hasattr(sub_item_other.obj, "b")
assert hasattr(sub_item.obj, "b")
# new api seregates
assert not list(base_item.iter_markers(name="b"))
assert not list(sub_item_other.iter_markers(name="b"))
assert list(sub_item.iter_markers(name="b"))
def test_mark_decorator_baseclasses_merged(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.a
class Base(object): pass
@pytest.mark.b
class Base2(Base): pass
@pytest.mark.c
class Test1(Base2):
def test_foo(self): pass
class Test2(Base2):
@pytest.mark.d
def test_bar(self): pass
"""
)
items, rec = testdir.inline_genitems(p)
self.assert_markers(items, test_foo=("a", "b", "c"), test_bar=("a", "b", "d"))
def test_mark_closest(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.c(location="class")
class Test:
@pytest.mark.c(location="function")
def test_has_own():
pass
def test_has_inherited():
pass
"""
)
items, rec = testdir.inline_genitems(p)
has_own, has_inherited = items
assert has_own.get_closest_marker("c").kwargs == {"location": "function"}
assert has_inherited.get_closest_marker("c").kwargs == {"location": "class"}
assert has_own.get_closest_marker("missing") is None
def test_mark_with_wrong_marker(self, testdir):
reprec = testdir.inline_runsource(
"""
import pytest
class pytestmark(object):
pass
def test_func():
pass
"""
)
values = reprec.getfailedcollections()
assert len(values) == 1
assert "TypeError" in str(values[0].longrepr)
def test_mark_dynamically_in_funcarg(self, testdir):
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def arg(request):
request.applymarker(pytest.mark.hello)
def pytest_terminal_summary(terminalreporter):
values = terminalreporter.stats['passed']
terminalreporter._tw.line("keyword: %s" % values[0].keywords)
"""
)
testdir.makepyfile(
"""
def test_func(arg):
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["keyword: *hello*"])
@ignore_markinfo
def test_merging_markers_two_functions(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.hello("pos1", z=4)
@pytest.mark.hello("pos0", z=3)
def test_func():
pass
"""
)
items, rec = testdir.inline_genitems(p)
item, = items
keywords = item.keywords
marker = keywords["hello"]
values = list(marker)
assert len(values) == 2
assert values[0].args == ("pos0",)
assert values[1].args == ("pos1",)
def test_no_marker_match_on_unmarked_names(self, testdir):
p = testdir.makepyfile(
"""
import pytest
@pytest.mark.shouldmatch
def test_marked():
assert 1
def test_unmarked():
assert 1
"""
)
reprec = testdir.inline_run("-m", "test_unmarked", p)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) + len(skipped) + len(failed) == 0
dlist = reprec.getcalls("pytest_deselected")
deselected_tests = dlist[0].items
assert len(deselected_tests) == 2
def test_keywords_at_node_level(self, testdir):
testdir.makepyfile(
"""
import pytest
@pytest.fixture(scope="session", autouse=True)
def some(request):
request.keywords["hello"] = 42
assert "world" not in request.keywords
@pytest.fixture(scope="function", autouse=True)
def funcsetup(request):
assert "world" in request.keywords
assert "hello" in request.keywords
@pytest.mark.world
def test_function():
pass
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@ignore_markinfo
def test_keyword_added_for_session(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_collection_modifyitems(session):
session.add_marker("mark1")
session.add_marker(pytest.mark.mark2)
session.add_marker(pytest.mark.mark3)
pytest.raises(ValueError, lambda:
session.add_marker(10))
"""
)
testdir.makepyfile(
"""
def test_some(request):
assert "mark1" in request.keywords
assert "mark2" in request.keywords
assert "mark3" in request.keywords
assert 10 not in request.keywords
marker = request.node.get_marker("mark1")
assert marker.name == "mark1"
assert marker.args == ()
assert marker.kwargs == {}
"""
)
reprec = testdir.inline_run("-m", "mark1")
reprec.assertoutcome(passed=1)
def assert_markers(self, items, **expected):
"""assert that given items have expected marker names applied to them.
expected should be a dict of (item name -> seq of expected marker names)
.. note:: this could be moved to ``testdir`` if proven to be useful
to other modules.
"""
from _pytest.mark import MarkInfo
items = {x.name: x for x in items}
for name, expected_markers in expected.items():
markers = items[name].keywords._markers
marker_names = {
name for (name, v) in markers.items() if isinstance(v, MarkInfo)
}
assert marker_names == set(expected_markers)
@pytest.mark.issue1540
@pytest.mark.filterwarnings("ignore")
def test_mark_from_parameters(self, testdir):
testdir.makepyfile(
"""
import pytest
pytestmark = pytest.mark.skipif(True, reason='skip all')
# skipifs inside fixture params
params = [pytest.mark.skipif(False, reason='dont skip')('parameter')]
@pytest.fixture(params=params)
def parameter(request):
return request.param
def test_1(parameter):
assert True
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(skipped=1)
class TestKeywordSelection(object):
def test_select_simple(self, testdir):
file_test = testdir.makepyfile(
"""
def test_one():
assert 0
class TestClass(object):
def test_method_one(self):
assert 42 == 43
"""
)
def check(keyword, name):
reprec = testdir.inline_run("-s", "-k", keyword, file_test)
passed, skipped, failed = reprec.listoutcomes()
assert len(failed) == 1
assert failed[0].nodeid.split("::")[-1] == name
assert len(reprec.getcalls("pytest_deselected")) == 1
for keyword in ["test_one", "est_on"]:
check(keyword, "test_one")
check("TestClass and test", "test_method_one")
@pytest.mark.parametrize(
"keyword",
[
"xxx",
"xxx and test_2",
"TestClass",
"xxx and not test_1",
"TestClass and test_2",
"xxx and TestClass and test_2",
],
)
def test_select_extra_keywords(self, testdir, keyword):
p = testdir.makepyfile(
test_select="""
def test_1():
pass
class TestClass(object):
def test_2(self):
pass
"""
)
testdir.makepyfile(
conftest="""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(name):
outcome = yield
if name == "TestClass":
item = outcome.get_result()
item.extra_keyword_matches.add("xxx")
"""
)
reprec = testdir.inline_run(p.dirpath(), "-s", "-k", keyword)
print("keyword", repr(keyword))
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 1
assert passed[0].nodeid.endswith("test_2")
dlist = reprec.getcalls("pytest_deselected")
assert len(dlist) == 1
assert dlist[0].items[0].name == "test_1"
def test_select_starton(self, testdir):
threepass = testdir.makepyfile(
test_threepass="""
def test_one(): assert 1
def test_two(): assert 1
def test_three(): assert 1
"""
)
reprec = testdir.inline_run("-k", "test_two:", threepass)
passed, skipped, failed = reprec.listoutcomes()
assert len(passed) == 2
assert not failed
dlist = reprec.getcalls("pytest_deselected")
assert len(dlist) == 1
item = dlist[0].items[0]
assert item.name == "test_one"
def test_keyword_extra(self, testdir):
p = testdir.makepyfile(
"""
def test_one():
assert 0
test_one.mykeyword = True
"""
)
reprec = testdir.inline_run("-k", "mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
@pytest.mark.xfail
def test_keyword_extra_dash(self, testdir):
p = testdir.makepyfile(
"""
def test_one():
assert 0
test_one.mykeyword = True
"""
)
# with argparse the argument to an option cannot
# start with '-'
reprec = testdir.inline_run("-k", "-mykeyword", p)
passed, skipped, failed = reprec.countoutcomes()
assert passed + skipped + failed == 0
def test_no_magic_values(self, testdir):
"""Make sure the tests do not match on magic values,
no double underscored values, like '__dict__',
and no instance values, like '()'.
"""
p = testdir.makepyfile(
"""
def test_one(): assert 1
"""
)
def assert_test_is_not_selected(keyword):
reprec = testdir.inline_run("-k", keyword, p)
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed + skipped + failed == 0
deselected_tests = dlist[0].items
assert len(deselected_tests) == 1
assert_test_is_not_selected("__")
assert_test_is_not_selected("()")
@pytest.mark.parametrize(
"argval, expected",
[
(
pytest.mark.skip()((1, 2)),
ParameterSet(values=(1, 2), marks=[pytest.mark.skip], id=None),
),
(
pytest.mark.xfail(pytest.mark.skip()((1, 2))),
ParameterSet(
values=(1, 2), marks=[pytest.mark.xfail, pytest.mark.skip], id=None
),
),
],
)
@pytest.mark.filterwarnings("ignore")
def test_parameterset_extractfrom(argval, expected):
extracted = ParameterSet.extract_from(argval)
assert extracted == expected
def test_legacy_transfer():
class FakeModule(object):
pytestmark = []
class FakeClass(object):
pytestmark = pytest.mark.nofun
@pytest.mark.fun
def fake_method(self):
pass
transfer_markers(fake_method, FakeClass, FakeModule)
# legacy marks transfer smeared
assert fake_method.nofun
assert fake_method.fun
# pristine marks dont transfer
assert fake_method.pytestmark == [pytest.mark.fun.mark]
class TestMarkDecorator(object):
@pytest.mark.parametrize(
"lhs, rhs, expected",
[
(pytest.mark.foo(), pytest.mark.foo(), True),
(pytest.mark.foo(), pytest.mark.bar(), False),
(pytest.mark.foo(), "bar", False),
("foo", pytest.mark.bar(), False),
],
)
def test__eq__(self, lhs, rhs, expected):
assert (lhs == rhs) == expected
@pytest.mark.parametrize("mark", [None, "", "skip", "xfail"])
def test_parameterset_for_parametrize_marks(testdir, mark):
if mark is not None:
testdir.makeini("[pytest]\n{}={}".format(EMPTY_PARAMETERSET_OPTION, mark))
config = testdir.parseconfig()
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
pytest_configure(config)
result_mark = get_empty_parameterset_mark(config, ["a"], all)
if mark in (None, ""):
# normalize to the requested name
mark = "skip"
assert result_mark.name == mark
assert result_mark.kwargs["reason"].startswith("got empty parameter set ")
if mark == "xfail":
assert result_mark.kwargs.get("run") is False
def test_parameterset_for_parametrize_bad_markname(testdir):
with pytest.raises(pytest.UsageError):
test_parameterset_for_parametrize_marks(testdir, "bad")
def test_mark_expressions_no_smear(testdir):
testdir.makepyfile(
"""
import pytest
class BaseTests(object):
def test_something(self):
pass
@pytest.mark.FOO
class TestFooClass(BaseTests):
pass
@pytest.mark.BAR
class TestBarClass(BaseTests):
pass
"""
)
reprec = testdir.inline_run("-m", "FOO")
passed, skipped, failed = reprec.countoutcomes()
dlist = reprec.getcalls("pytest_deselected")
assert passed == 1
assert skipped == failed == 0
deselected_tests = dlist[0].items
assert len(deselected_tests) == 1
# keywords smear - expected behaviour
reprec_keywords = testdir.inline_run("-k", "FOO")
passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes()
assert passed_k == 2
assert skipped_k == failed_k == 0
def test_addmarker_getmarker():
node = Node("Test", config=mock.Mock(), session=mock.Mock(), nodeid="Test")
node.add_marker(pytest.mark.a(1))
node.add_marker("b")
node.get_marker("a").combined
node.get_marker("b").combined
| {
"repo_name": "sadmansk/servo",
"path": "tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_mark.py",
"copies": "30",
"size": "32326",
"license": "mpl-2.0",
"hash": -4967787228656274000,
"line_mean": 27.5061728395,
"line_max": 88,
"alpha_frac": 0.5387613686,
"autogenerated": false,
"ratio": 4.080535218379197,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import pprint
import random
import socket
import docker
import logging
import time
import tempfile
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin import (ClipperConnection, DockerContainerManager,
KubernetesContainerManager, CLIPPER_TEMP_DIR,
ClipperException)
from clipper_admin.container_manager import CLIPPER_DOCKER_LABEL
from clipper_admin import __version__ as clipper_version
logger = logging.getLogger(__name__)
headers = {'Content-type': 'application/json'}
if not os.path.exists(CLIPPER_TEMP_DIR):
os.makedirs(CLIPPER_TEMP_DIR)
fake_model_data = tempfile.mkdtemp(dir=CLIPPER_TEMP_DIR)
class BenchmarkException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
# range of ports where available ports can be found
PORT_RANGE = [34256, 50000]
def get_docker_client():
if "DOCKER_API_VERSION" in os.environ:
return docker.from_env(version=os.environ["DOCKER_API_VERSION"])
else:
return docker.from_env()
def find_unbound_port():
"""
Returns an unbound port number on 127.0.0.1.
"""
while True:
port = random.randint(*PORT_RANGE)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(("127.0.0.1", port))
# Make sure we clean up after binding
del sock
return port
except socket.error as e:
logger.info("Socket error: {}".format(e))
logger.info(
"randomly generated port %d is bound. Trying again." % port)
def create_docker_connection(cleanup=True, start_clipper=True):
logger.info("Creating DockerContainerManager")
cm = DockerContainerManager(
clipper_query_port=find_unbound_port(),
clipper_management_port=find_unbound_port(),
clipper_rpc_port=find_unbound_port(),
redis_port=find_unbound_port())
cl = ClipperConnection(cm)
if cleanup:
cl.stop_all()
docker_client = get_docker_client()
docker_client.containers.prune(filters={"label": CLIPPER_DOCKER_LABEL})
if start_clipper:
# Try to start Clipper in a retry loop here to address flaky tests
# as described in https://github.com/ucbrise/clipper/issues/352
while True:
try:
logger.info("Starting Clipper")
cl.start_clipper()
time.sleep(1)
break
except docker.errors.APIError as e:
logger.info(
"Problem starting Clipper: {}\nTrying again.".format(e))
cl.stop_all()
cm = DockerContainerManager(
clipper_query_port=find_unbound_port(),
clipper_management_port=find_unbound_port(),
clipper_rpc_port=find_unbound_port(),
redis_port=find_unbound_port())
cl = ClipperConnection(cm)
else:
cl.connect()
return cl
def create_kubernetes_connection(cleanup=True,
start_clipper=True,
connect=True,
with_proxy=False):
logger.info("Creating KubernetesContainerManager")
if with_proxy:
cm = KubernetesContainerManager(kubernetes_proxy_addr="127.0.0.1:8080")
else:
cm = KubernetesContainerManager()
cl = ClipperConnection(cm)
if cleanup:
cl.stop_all()
# Give kubernetes some time to clean up
time.sleep(20)
logger.info("Done cleaning up clipper")
if start_clipper:
logger.info("Starting Clipper")
cl.start_clipper(
query_frontend_image=
"568959175238.dkr.ecr.us-west-1.amazonaws.com/clipper/query_frontend:{}".
format(clipper_version),
mgmt_frontend_image=
"568959175238.dkr.ecr.us-west-1.amazonaws.com/clipper/management_frontend:{}".
format(clipper_version))
time.sleep(1)
if connect:
try:
cl.connect()
except Exception:
pass
except ClipperException as e:
pass
return cl
def log_clipper_state(cl):
pp = pprint.PrettyPrinter(indent=4)
logger.info("\nAPPLICATIONS:\n{app_str}".format(
app_str=pp.pformat(cl.get_all_apps(verbose=True))))
logger.info("\nMODELS:\n{model_str}".format(
model_str=pp.pformat(cl.get_all_models(verbose=True))))
logger.info("\nCONTAINERS:\n{cont_str}".format(
cont_str=pp.pformat(cl.get_all_model_replicas(verbose=True))))
| {
"repo_name": "dcrankshaw/clipper",
"path": "integration-tests/test_utils.py",
"copies": "1",
"size": "4856",
"license": "apache-2.0",
"hash": 3090184084945704000,
"line_mean": 32.958041958,
"line_max": 90,
"alpha_frac": 0.6091433278,
"autogenerated": false,
"ratio": 3.8448139350752175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49539572628752176,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import requests
import json
import tempfile
import shutil
import numpy as np
import time
import logging
from test_utils import (create_docker_connection, BenchmarkException,
fake_model_data, headers, log_clipper_state)
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin import __version__ as clipper_version, CLIPPER_TEMP_DIR
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def deploy_model(clipper_conn, name, version, link=False):
app_name = "%s-app" % name
model_name = "%s-model" % name
clipper_conn.build_and_deploy_model(
model_name,
version,
"doubles",
fake_model_data,
"clipper/noop-container:{}".format(clipper_version),
num_replicas=1)
time.sleep(10)
if link:
clipper_conn.link_model_to_app(app_name, model_name)
success = False
num_tries = 0
while not success and num_tries < 5:
time.sleep(30)
num_preds = 25
num_defaults = 0
addr = clipper_conn.get_query_addr()
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': list(np.random.random(30))
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"]:
num_defaults += 1
if num_defaults > 0:
logger.error("Error: %d/%d predictions were default" %
(num_defaults, num_preds))
if num_defaults < num_preds / 2:
success = True
num_tries += 1
if not success:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app_name, model_name, version))
def create_and_test_app(clipper_conn, name, num_models):
app_name = "%s-app" % name
clipper_conn.register_application(app_name, "doubles", "default_pred",
100000)
time.sleep(1)
addr = clipper_conn.get_query_addr()
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': list(np.random.random(30))
}))
response.json()
if response.status_code != requests.codes.ok:
logger.error("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
link = True
for i in range(num_models):
deploy_model(clipper_conn, name, i, link)
link = False
time.sleep(1)
if __name__ == "__main__":
num_apps = 6
num_models = 8
try:
if len(sys.argv) > 1:
num_apps = int(sys.argv[1])
if len(sys.argv) > 2:
num_models = int(sys.argv[2])
except IndexError:
# it's okay to pass here, just use the default values
# for num_apps and num_models
pass
try:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=True)
time.sleep(10)
try:
logger.info("Running integration test with %d apps and %d models" %
(num_apps, num_models))
for a in range(num_apps):
create_and_test_app(clipper_conn, "testapp%s" % a, num_models)
if not os.path.exists(CLIPPER_TEMP_DIR):
os.makedirs(CLIPPER_TEMP_DIR)
tmp_log_dir = tempfile.mkdtemp(dir=CLIPPER_TEMP_DIR)
logger.info(clipper_conn.get_clipper_logs(logging_dir=tmp_log_dir))
# Remove temp files
shutil.rmtree(tmp_log_dir)
log_clipper_state(clipper_conn)
logger.info("SUCCESS")
except BenchmarkException as e:
log_clipper_state(clipper_conn)
logger.exception("BenchmarkException")
create_docker_connection(cleanup=True, start_clipper=False)
sys.exit(1)
else:
create_docker_connection(cleanup=True, start_clipper=False)
except Exception as e:
logger.exception("Exception")
create_docker_connection(cleanup=True, start_clipper=False)
sys.exit(1)
| {
"repo_name": "dcrankshaw/clipper",
"path": "integration-tests/many_apps_many_models.py",
"copies": "1",
"size": "4569",
"license": "apache-2.0",
"hash": 5173512916932833000,
"line_mean": 32.5955882353,
"line_max": 79,
"alpha_frac": 0.576274896,
"autogenerated": false,
"ratio": 3.6817082997582595,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47579831957582597,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
import textwrap
import pytest
from _pytest.monkeypatch import MonkeyPatch
@pytest.fixture
def mp():
cwd = os.getcwd()
sys_path = list(sys.path)
yield MonkeyPatch()
sys.path[:] = sys_path
os.chdir(cwd)
def test_setattr():
class A(object):
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
monkeypatch.setattr(A, 'y', 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, 'y')
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, 'x', 2)
assert A.x == 2
monkeypatch.setattr(A, 'x', 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath(object):
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
def test_unicode_string(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(ImportError,
lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(AttributeError,
lambda: monkeypatch.setattr("os.path.qweqwe", None))
def test_unknown_attr_non_raising(self, monkeypatch):
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr('os.path.qweqwe', 42, raising=False)
assert os.path.qweqwe == 42
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A(object):
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
assert not hasattr(A, 'x')
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
monkeypatch.delattr(A, 'y', raising=False)
monkeypatch.setattr(A, 'x', 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
monkeypatch.setitem(d, 'y', 1700)
monkeypatch.setitem(d, 'y', 1700)
assert d['x'] == 2
assert d['y'] == 1700
monkeypatch.setitem(d, 'x', 3)
assert d['x'] == 3
monkeypatch.undo()
assert d['x'] == 1
assert 'y' not in d
d['x'] = 5
monkeypatch.undo()
assert d['x'] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
del d['x']
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, 'hello')
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, 'x')
assert 'x' not in d
monkeypatch.delitem(d, 'y', raising=False)
pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
assert not d
monkeypatch.setitem(d, 'y', 1700)
assert d['y'] == 1700
d['hello'] = 'world'
monkeypatch.setitem(d, 'x', 1500)
assert d['x'] == 1500
monkeypatch.undo()
assert d == {'hello': 'world', 'x': 1}
def test_setenv():
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2)
import os
assert os.environ['XYZ123'] == "2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_delenv():
name = 'xyz1234'
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2, prepend="-")
assert os.environ['XYZ123'] == "2"
monkeypatch.setenv('XYZ123', 3, prepend="-")
assert os.environ['XYZ123'] == "3-2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource("""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend('world')
mp.syspath_prepend('hello')
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
mp.syspath_prepend('hello world')
mp.undo()
sys.path.append('more hello world')
mp.undo()
assert sys.path[-1] == 'more hello world'
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile("""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*
""")
def test_importerror(testdir):
p = testdir.mkpydir("package")
p.join("a.py").write(textwrap.dedent("""\
import doesnotexist
x = 1
"""))
testdir.tmpdir.join("test_importerror.py").write(textwrap.dedent("""\
def test_importerror(monkeypatch):
monkeypatch.setattr('package.a.x', 2)
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*import error in package.a: No module named {0}doesnotexist{0}*
""".format("'" if sys.version_info > (3, 0) else ""))
class SampleNew(object):
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld(object):
# oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize('Sample', [
SampleNew, SampleNewInherit,
SampleOld, SampleOldInherit,
], ids=['new', 'new-inherit', 'old', 'old-inherit'])
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, 'hello', None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
def test_issue1338_name_resolving():
pytest.importorskip('requests')
monkeypatch = MonkeyPatch()
try:
monkeypatch.delattr('requests.sessions.Session.request')
finally:
monkeypatch.undo()
| {
"repo_name": "flub/pytest",
"path": "testing/test_monkeypatch.py",
"copies": "1",
"size": "8416",
"license": "mit",
"hash": 8503717498844262000,
"line_mean": 24.6585365854,
"line_max": 75,
"alpha_frac": 0.6222671103,
"autogenerated": false,
"ratio": 3.5661016949152544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46883688052152545,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "functions"))
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from Image_Visualizing import present_3d, make_mask
# Relative path to subject 1 data
pathtodata = "../../../data/ds009/sub001/"
condition_location=pathtodata+"model/model001/onsets/task001_run001/"
location_of_images="../../../images/"
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
data = np.load('cluster_mask.npy')
data_new = data[..., 10:13]
X = np.reshape(data_new, (-1, 1))
connectivity = grid_to_graph(n_x= data_new.shape[0], n_y = data_new.shape[1], n_z = data_new.shape[2])
st = time.time()
n_clusters = 7 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, data_new.shape)
label_mean = np.zeros(n_clusters)
center = list()
#FIND THE AVERAGE T-VALUE PER CLUSTER
for j in range(n_clusters):
mask = label==j
index = np.where(mask)
center.append((np.mean(index[0]),np.mean(index[1]),np.mean(index[2])))
label_mean[j] =np.mean(data_new[mask])
#PRINT THE PLOTS
for i in range(data_new.shape[-1]):
plt.figure()
plt.imshow(data_new[...,i], cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label[...,i] == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ],linewidths= 0.4)
plt.xticks(())
plt.yticks(())
plt.savefig(location_of_images+"ward"+str(i)+'.png')
| {
"repo_name": "berkeley-stat159/project-alpha",
"path": "code/utils/scripts/cluster.py",
"copies": "1",
"size": "1770",
"license": "bsd-3-clause",
"hash": -1195223848598151400,
"line_mean": 28.0163934426,
"line_max": 102,
"alpha_frac": 0.6785310734,
"autogenerated": false,
"ratio": 3.0412371134020617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42197681868020614,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function)
import os
import time
import simplejson
from datetime import datetime
import re
from addie.autoNOM.step1_gui_handler import Step1GuiHandler
class MakeExpIniFileAndRunAutonom(object):
_dict_mandatory = None
_dict_optional = None
EXP_INI_FILENAME = 'exp.ini'
_star = '*' * 19
title_mandatory = 'required ' + _star
_star = '*' * 18
title_optional = 'optional ' + _star
list_mandatory = ['Dia', 'DiaBg', 'Vana', 'VanaBg', 'MTc']
list_optional = ['recali', 'renorm', 'autotemp', 'scan1', 'scanl', 'Hz', '#']
script_to_run = "python /SNS/NOM/shared/autoNOM/stable/autoNOM.py -l -P /SNS/NOM/shared/autoNOM/stable/"
script_flag = ""
def __init__(self, parent=None, folder=None):
self.parent_no_ui = parent
self.parent = parent.ui
self.folder = folder
def create(self):
self.retrieve_metadata()
self.create_exp_ini_file()
def retrieve_flags(self):
_postprocessing_flag = self.parent.postprocessing_yes.isChecked()
self.script_flag += " -p %s" % _postprocessing_flag
def run_autonom(self):
self.retrieve_flags()
self.run_auto_nom_script()
def retrieve_metadata(self):
_dict_mandatory = {}
_dict_optional = {}
_diamond = str(self.parent.diamond.text())
_diamond_background = str(self.parent.diamond_background.text())
_vanadium = str(self.parent.vanadium.text())
_vanadium_background = str(self.parent.vanadium_background.text())
_sample_background = str(self.parent.sample_background.text())
_first_scan = str(self.parent.first_scan.text())
_last_scan = str(self.parent.last_scan.text())
_frequency = self.parent.frequency.currentText()
_recalibration_flag = self.yes_no(self.parent.recalibration_yes.isChecked())
_renormalization_flag = self.yes_no(self.parent.renormalization_yes.isChecked())
_autotemplate_flag = self.yes_no(self.parent.autotemplate_yes.isChecked())
_comments = self.parent.comments.text()
_dict_mandatory['Dia'] = _diamond
_dict_mandatory['DiaBg'] = _diamond_background
_dict_mandatory['Vana'] = _vanadium
_dict_mandatory['VanaBg'] = _vanadium_background
_dict_mandatory['MTc'] = _sample_background
_dict_optional['recali'] = _recalibration_flag
_dict_optional['renorm'] = _renormalization_flag
_dict_optional['autotemp'] = _autotemplate_flag
_dict_optional['scan1'] = _first_scan
_dict_optional['scanl'] = _last_scan
_dict_optional['Hz'] = _frequency
_dict_optional['#'] = _comments
self._dict_mandatory = _dict_mandatory
self._dict_optional = _dict_optional
def get_previous_cycle_cal_path(cycle, year, calstring="/SNS/NOM/shared/CALIBRATION/%s_%d_1B_CAL/"):
if cycle == 1:
_old_year = int(year) - 1
_calpath = calstring % (_old_year, 2)
else:
_calpath = calstring % (year, 1)
return _calpath
def check_calfiles_in_calpath(calpath, calibrant, samp_env, same_samp_env_dict=None, file_extension='.h5'):
cal_list = [os.path.splitext(filename) for filename in os.listdir(calpath)]
if same_samp_env_dict is None:
same_samp_env_dict = dict()
found_in_cycle = False
cal_file = None
for basename, ext in cal_list:
if file_extension in ext:
_pulled_run = re.search(r'd(\d+)', basename.split('_')[1]).group(1) # parses "d####" str for number
_pulled_samp_env = basename.split('_')[-1]
_full_path = calpath+basename+ext
if _pulled_run == calibrant and _pulled_samp_env == samp_env:
found_in_cycle = True
cal_file = _full_path
if _pulled_run != calibrant and _pulled_samp_env == samp_env:
same_samp_env_dict[int(_pulled_run)] = _full_path
return found_in_cycle, same_samp_env_dict, cal_file
def setup_mantid_calibration(self, script='calibration_creation.py',
input_file='calibration_creation.json',
script_dir='/SNS/NOM/shared/scripts/',
calstring="/SNS/NOM/shared/CALIBRATION/%s_%d_1B_CAL/",
calformat="NOM_d%d_%s_%s.h5"):
# Setup calibration input
_diamond = self._dict_mandatory['Dia']
_vanadium = self._dict_mandatory['Vana']
_today = datetime.now().date().strftime("%Y_%m_%d")
_samp_env = str(self.parent.sample_environment_comboBox.currentText())
_script_to_run = script_dir+script+' '+input_file
# Get cycle based on month and year
_year = datetime.now().date().strftime("%Y")
_month = datetime.now().date().strftime("%m")
if int(_month) <= 6:
_cycle = 1
else:
_cycle = 2
_calpath = calstring % (_year, _cycle)
# Check current cycle directory exists and make if not
if not os.path.isdir(_calpath):
os.mkdir(_calpath)
# Check current cycle for calibration
found_in_current_cycle, same_sample_env_dict, current_cal_file = \
self.check_calfiles_in_calpath(calpath=_calpath, calibrant=_diamond,
samp_env=_samp_env, same_samp_env_dict=None,
file_extention='.h5')
# Check previous cycle for calibration
_calpath = self.get_previous_cycle_cal_path(_cycle, _year)
found_in_previous_cycle, same_sample_env_dict, old_cal_file = \
self.check_calfiles_in_calpath(calpath=_calpath, calibrant=_diamond,
samp_env=_samp_env, same_samp_env_dict=same_sample_env_dict,
file_extention='.h5')
# Get old calibration to use
old_cal = None
if same_sample_env_dict:
old_cal = sorted(same_sample_env_dict)[-1]
# Finish setting up calibation input
_cal_input = {"sample": _diamond,
"vanadium": _vanadium,
"date": _today,
"sample_environment": _samp_env,
"oldCal": old_cal
}
# Write file if we either did not find a calibration or force rerunning the calibration
_run_cali = False
_recalibration_flag = self.parent.recalibration_yes.isChecked()
if (not found_in_current_cycle and not found_in_previous_cycle) or _recalibration_flag:
_run_cali = True
_mantid_calibration = calstring % (_year, _month)
_mantid_calibration += calformat % (int(_diamond), _today, _samp_env)
with open(input_file, 'w') as handle:
simplejson.dump(_cal_input, handle, indent=2, ignore_nan=True)
elif not found_in_current_cycle and found_in_previous_cycle:
_mantid_calibration = current_cal_file
elif found_in_previous_cycle:
_mantid_calibration = old_cal_file
return _run_cali, _mantid_calibration, _script_to_run
def create_exp_ini_file(self):
_full_file_name = os.path.join(self.folder, self.EXP_INI_FILENAME)
f = open(_full_file_name, 'w')
# mandatory part
_dict_mandatory = self._dict_mandatory
f.write(self.title_mandatory + "\n")
for _name in self.list_mandatory:
f.write(_name + ' ' + _dict_mandatory[_name] + '\n')
# optional part
_dict_optional = self._dict_optional
f.write(self.title_optional + '\n')
for _name in self.list_optional:
_value = _dict_optional[_name]
if _value == '':
continue
else:
f.write(_name + ' ' + _dict_optional[_name] + '\n')
f.close()
print("[LOG] created file %s" % _full_file_name)
def run_auto_nom_script(self):
_script_to_run = self.script_to_run + self.script_flag
os.chdir(self.folder)
o_gui = Step1GuiHandler(parent=self.parent_no_ui)
o_gui.set_main_window_title()
_dict_mandatory = self._dict_mandatory
_pre_script = '/SNS/NOM/shared/autoNOM/stable/readtitles.py -a -s'
for _values in list(_dict_mandatory.values()):
_pre_script += ' ' + _values
'''
print("[LOG] testing Mantid calibration")
_run_cali, _mantid_calibration, _script_to_run = self.setup_mantid_calibration()
if _run_cali:
self.parent_no_ui.launch_job_manager(job_name = 'Mantid calibration',
script_to_run = _script_to_run)
'''
print("[LOG] running pre-script")
print("[LOG] " + _pre_script)
os.system(_pre_script)
while not os.path.isfile("./los.txt"):
time.sleep(1)
print("[LOG] running script:")
print("[LOG] " + _script_to_run)
self.parent_no_ui.launch_job_manager(job_name='autoNOM',
script_to_run=_script_to_run)
# os.system(_script_to_run)
# self.parent.statusbar.showMessage("autoNOM script: DONE !")
def yes_no(self, condition):
if condition:
return "yes"
else:
return "no"
| {
"repo_name": "neutrons/FastGR",
"path": "addie/autoNOM/make_exp_ini_file_and_run_autonom.py",
"copies": "1",
"size": "9557",
"license": "mit",
"hash": -3395861440926234000,
"line_mean": 38.0081632653,
"line_max": 116,
"alpha_frac": 0.5679606571,
"autogenerated": false,
"ratio": 3.5982680722891565,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9661715226008274,
"avg_score": 0.0009027006761764897,
"num_lines": 245
} |
from __future__ import absolute_import, division, print_function
import os
import warnings
import six
import py
import attr
import _pytest
import _pytest._code
from _pytest.compat import getfslineno
from _pytest.outcomes import fail
from _pytest.mark.structures import NodeKeywords, MarkInfo
SEP = "/"
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
def _splitnode(nodeid):
"""Split a nodeid into constituent 'parts'.
Node IDs are strings, and can be things like:
''
'testing/code'
'testing/code/test_excinfo.py'
'testing/code/test_excinfo.py::TestFormattedExcinfo::()'
Return values are lists e.g.
[]
['testing', 'code']
['testing', 'code', 'test_excinfo.py']
['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()']
"""
if nodeid == "":
# If there is no root node at all, return an empty list so the caller's logic can remain sane
return []
parts = nodeid.split(SEP)
# Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()'
parts[-1:] = parts[-1].split("::")
return parts
def ischildnode(baseid, nodeid):
"""Return True if the nodeid is a child node of the baseid.
E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'
"""
base_parts = _splitnode(baseid)
node_parts = _splitnode(nodeid)
if len(node_parts) < len(base_parts):
return False
return node_parts[: len(base_parts)] == base_parts
@attr.s
class _CompatProperty(object):
name = attr.ib()
def __get__(self, obj, owner):
if obj is None:
return self
from _pytest.deprecated import COMPAT_PROPERTY
warnings.warn(
COMPAT_PROPERTY.format(name=self.name, owner=owner.__name__), stacklevel=2
)
return getattr(__import__("pytest"), self.name)
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(
self, name, parent=None, config=None, session=None, fspath=None, nodeid=None
):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = fspath or getattr(parent, "fspath", None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: the marker objects belonging to this node
self.own_markers = []
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
if nodeid is not None:
self._nodeid = nodeid
else:
assert parent is not None
self._nodeid = self.parent.nodeid + "::" + self.name
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
Module = _CompatProperty("Module")
Class = _CompatProperty("Class")
Instance = _CompatProperty("Instance")
Function = _CompatProperty("Function")
File = _CompatProperty("File")
Item = _CompatProperty("Item")
def _getcustomclass(self, name):
maybe_compatprop = getattr(type(self), name)
if isinstance(maybe_compatprop, _CompatProperty):
return getattr(__import__("pytest"), name)
else:
from _pytest.deprecated import CUSTOM_CLASS
cls = getattr(self, name)
self.warn(CUSTOM_CLASS.format(name=name, type_name=type(self).__name__))
return cls
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None))
def warn(self, _code_or_warning=None, message=None, code=None):
"""Issue a warning for this item.
Warnings will be displayed after the test session, unless explicitly suppressed.
This can be called in two forms:
**Warning instance**
This was introduced in pytest 3.8 and uses the standard warning mechanism to issue warnings.
.. code-block:: python
node.warn(PytestWarning("some message"))
The warning instance must be a subclass of :class:`pytest.PytestWarning`.
**code/message (deprecated)**
This form was used in pytest prior to 3.8 and is considered deprecated. Using this form will emit another
warning about the deprecation:
.. code-block:: python
node.warn("CI", "some message")
:param Union[Warning,str] _code_or_warning:
warning instance or warning code (legacy). This parameter receives an underscore for backward
compatibility with the legacy code/message form, and will be replaced for something
more usual when the legacy form is removed.
:param Union[str,None] message: message to display when called in the legacy form.
:param str code: code for the warning, in legacy form when using keyword arguments.
:return:
"""
if message is None:
if _code_or_warning is None:
raise ValueError("code_or_warning must be given")
self._std_warn(_code_or_warning)
else:
if _code_or_warning and code:
raise ValueError(
"code_or_warning and code cannot both be passed to this function"
)
code = _code_or_warning or code
self._legacy_warn(code, message)
def _legacy_warn(self, code, message):
"""
.. deprecated:: 3.8
Use :meth:`Node.std_warn <_pytest.nodes.Node.std_warn>` instead.
Generate a warning with the given code and message for this item.
"""
from _pytest.deprecated import NODE_WARN
self._std_warn(NODE_WARN)
assert isinstance(code, str)
fslocation = get_fslocation_from_item(self)
self.ihook.pytest_logwarning.call_historic(
kwargs=dict(
code=code, message=message, nodeid=self.nodeid, fslocation=fslocation
)
)
def _std_warn(self, warning):
"""Issue a warning for this item.
Warnings will be displayed after the test session, unless explicitly suppressed
:param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.
:raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.
"""
from _pytest.warning_types import PytestWarning
if not isinstance(warning, PytestWarning):
raise ValueError(
"warning must be an instance of PytestWarning or subclass, got {!r}".format(
warning
)
)
path, lineno = get_fslocation_from_item(self)
warnings.warn_explicit(
warning,
category=None,
filename=str(path),
lineno=lineno + 1 if lineno is not None else None,
)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
return self._nodeid
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker, append=True):
"""dynamically add a marker object to the node.
:type marker: ``str`` or ``pytest.mark.*`` object
:param marker:
``append=True`` whether to append the marker,
if ``False`` insert at position ``0``.
"""
from _pytest.mark import MarkDecorator, MARK_GEN
if isinstance(marker, six.string_types):
marker = getattr(MARK_GEN, marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
if append:
self.own_markers.append(marker.mark)
else:
self.own_markers.insert(0, marker.mark)
def iter_markers(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
returns sequence of tuples (node, mark)
"""
for node in reversed(self.listchain()):
for mark in node.own_markers:
if name is None or getattr(mark, "name", None) == name:
yield node, mark
def get_closest_marker(self, name, default=None):
"""return the first marker matching the name, from closest (for example function) to farther level (for example
module level).
:param default: fallback return value of no marker was found
:param name: name to filter by
"""
return next(self.iter_markers(name=name), default)
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name.
.. deprecated:: 3.6
This function has been deprecated in favor of
:meth:`Node.get_closest_marker <_pytest.nodes.Node.get_closest_marker>` and
:meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`, see :ref:`update marker code`
for more details.
"""
markers = list(self.iter_markers(name=name))
if markers:
return MarkInfo(markers)
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
if excinfo.errisinstance(fail.Exception):
if not excinfo.value.pytrace:
return six.text_type(excinfo.value)
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style = "long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
if self.config.option.verbose > 1:
truncate_locals = False
else:
truncate_locals = True
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(
funcargs=True,
abspath=abspath,
showlocals=self.config.option.showlocals,
style=style,
tbfilter=tbfilter,
truncate_locals=truncate_locals,
)
repr_failure = _repr_failure_py
def get_fslocation_from_item(item):
"""Tries to extract the actual location from an item, depending on available attributes:
* "fslocation": a pair (path, lineno)
* "obj": a Python object that the item wraps.
* "fspath": just a path
:rtype: a tuple of (str|LocalPath, int) with filename and line number.
"""
result = getattr(item, "location", None)
if result is not None:
return result[:2]
obj = getattr(item, "obj", None)
if obj is not None:
return getfslineno(obj)
return getattr(item, "fspath", "unknown location"), -1
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _prunetraceback(self, excinfo):
if hasattr(self, "fspath"):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
def _check_initialpaths_for_relpath(session, fspath):
for initial_path in session._initialpaths:
if fspath.common(initial_path) == initial_path:
return fspath.relto(initial_path.dirname)
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, SEP)
self.fspath = fspath
session = session or parent.session
if nodeid is None:
nodeid = self.fspath.relto(session.config.rootdir)
if not nodeid:
nodeid = _check_initialpaths_for_relpath(session, fspath)
if nodeid and os.sep != SEP:
nodeid = nodeid.replace(os.sep, SEP)
super(FSCollector, self).__init__(
name, parent, config, session, nodeid=nodeid, fspath=fspath
)
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None, nodeid=None):
super(Item, self).__init__(name, parent, config, session, nodeid=nodeid)
self._report_sections = []
#: user properties is a list of tuples (name, value) that holds user
#: defined properties for this test.
self.user_properties = []
def add_report_section(self, when, key, content):
"""
Adds a new report section, similar to what's done internally to add stdout and
stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
:param str when:
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
:param str content:
The full contents as a string.
"""
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
| {
"repo_name": "ddboline/pytest",
"path": "src/_pytest/nodes.py",
"copies": "1",
"size": "17771",
"license": "mit",
"hash": -1199841808011707600,
"line_mean": 32.3414634146,
"line_max": 119,
"alpha_frac": 0.6009791233,
"autogenerated": false,
"ratio": 4.30082284607938,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006785665195094739,
"num_lines": 533
} |
from __future__ import (absolute_import, division, print_function)
import os
from addie.processing.idl.table_handler import TableHandler
class CreateSampleFiles(object):
list_selected_row = None
file_extension = '.ini'
list_sample_files = None
def __init__(self, parent=None):
self.parent = parent
self.current_dir = self.parent.current_folder
self.list_sample_files = []
def run(self):
self._retrieve_list_of_selected_rows()
self._create_list_of_sample_properties_files()
def _retrieve_list_of_selected_rows(self):
o_table_handler = TableHandler(parent=self.parent)
o_table_handler.retrieve_list_of_selected_rows()
self.list_selected_row = o_table_handler.list_selected_row
def _create_list_of_sample_properties_files(self):
_list_selected_row = self.list_selected_row
nbr_files = len(_list_selected_row)
for _index_file in range(nbr_files):
self._export_ini_file(_list_selected_row[_index_file])
def _export_ini_file(self, row_metadata):
full_name_of_file = os.path.join(self.current_dir, row_metadata['name'] + self.file_extension)
self.list_sample_files.append(full_name_of_file)
_text = []
_text.append(row_metadata['name'] + ' #sample title\n')
if row_metadata['sample_formula']:
_text.append(row_metadata['sample_formula'] + ' #sample formula\n')
if row_metadata['mass_density']:
_text.append(row_metadata['mass_density'] + ' #mass density in g/cc\n')
if row_metadata['radius']:
_text.append(row_metadata['radius'] + ' #radius in cm\n')
if row_metadata['packing_fraction']:
_text.append(row_metadata['packing_fraction'] + ' #packing fraction\n')
_text.append(row_metadata['sample_shape'] + ' #sample shape\n')
_text.append(row_metadata['do_abs_correction'] + ' #do absorption correction in IDL\n')
f = open(full_name_of_file, 'w')
print(">creating file %s" % full_name_of_file)
for _line in _text:
f.write(_line)
f.close()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/processing/idl/create_sample_files.py",
"copies": "1",
"size": "2149",
"license": "mit",
"hash": 2990628454749937700,
"line_mean": 36.0517241379,
"line_max": 102,
"alpha_frac": 0.6258724988,
"autogenerated": false,
"ratio": 3.4773462783171523,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9599431283294715,
"avg_score": 0.0007574987644873818,
"num_lines": 58
} |
from __future__ import absolute_import, division, print_function
import os
from .common import Benchmark
import numpy as np
class Records(Benchmark):
def setup(self):
self.l50 = np.arange(1000)
self.fields_number = 10000
self.arrays = [self.l50 for _ in range(self.fields_number)]
self.formats = [self.l50.dtype.str for _ in range(self.fields_number)]
self.formats_str = ','.join(self.formats)
self.dtype_ = np.dtype(
[
('field_{}'.format(i), self.l50.dtype.str)
for i in range(self.fields_number)
]
)
self.buffer = self.l50.tostring() * self.fields_number
def time_fromarrays_w_dtype(self):
np.core.records.fromarrays(self.arrays, dtype=self.dtype_)
def time_fromarrays_wo_dtype(self):
np.core.records.fromarrays(self.arrays)
def time_fromarrays_formats_as_list(self):
np.core.records.fromarrays(self.arrays, formats=self.formats)
def time_fromarrays_formats_as_string(self):
np.core.records.fromarrays(self.arrays, formats=self.formats_str)
def time_fromstring_w_dtype(self):
np.core.records.fromstring(self.buffer, dtype=self.dtype_)
def time_fromstring_formats_as_list(self):
np.core.records.fromstring(self.buffer, formats=self.formats)
def time_fromstring_formats_as_string(self):
np.core.records.fromstring(self.buffer, formats=self.formats_str)
| {
"repo_name": "shoyer/numpy",
"path": "benchmarks/benchmarks/bench_records.py",
"copies": "8",
"size": "1472",
"license": "bsd-3-clause",
"hash": -6493422900236288000,
"line_mean": 33.2325581395,
"line_max": 78,
"alpha_frac": 0.65625,
"autogenerated": false,
"ratio": 3.5047619047619047,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 43
} |
from __future__ import (absolute_import, division, print_function)
import os
import addie.processing.idl.table_handler
from addie.processing.idl.mantid_reduction_dialogbox import MantidReductionDialogbox
from addie.processing.idl.mantid_reduction_view import MantidReductionView
class GlobalMantidReduction(object):
parameters = {'max_chunk_size': 8,
'preserve_events': True,
'exp_ini_filename': 'exp.ini',
'push_data_positive': 'AddMinimum',
'remove_prompt_pulse_width': 50,
'bin_in_d_space': True,
'filter_bad_pulses': 25,
'save_as': 'gsas fullprof topas',
'strip_vanadium_peaks': True,
'normalize_by_current': True,
'final_data_units': 'dSpacing',
'runs': [],
'calibration_file': '',
'characterization_file': '',
'background_number': '',
'vanadium_number': '',
'vanadium_background_number': '',
'resamplex': None,
'crop_wavelength_min': None,
'corp_wavelength_max': None,
'output_directory': '',
'vanadium_radius': None}
def __init__(self, parent=None):
self.parent = parent
self.collect_parameters()
self.collect_runs()
self.create_output_folder()
def collect_parameters(self):
_parameters = self.parameters
_current_folder = self.parent.current_folder
_exp_ini = os.path.join(_current_folder, _parameters['exp_ini_filename'])
_parameters['exp_ini_filename'] = str(_exp_ini)
_parameters['calibration_file'] = str(self.parent.ui.mantid_calibration_value.text())
_parameters['characterization_file'] = str(self.parent.ui.mantid_characterization_value.text())
_parameters['background_number'] = str(self.collect_background_number())
_parameters['vanadium_number'] = str(self.parent.ui.vanadium.text())
_parameters['vanadium_background_number'] = str(self.parent.ui.vanadium_background.text())
_parameters['resamplex'] = str(self.parent.ui.mantid_number_of_bins.text())
_parameters['crop_wavelength_min'] = str(self.parent.ui.mantid_min_crop_wavelength.text())
_parameters['crop_wavelength_max'] = str(self.parent.ui.mantid_max_crop_wavelength.text())
_parameters['output_directory'] = str(self.parent.ui.mantid_output_directory_value.text())
_parameters['vanadium_radius'] = float(str(self.parent.ui.mantid_vanadium_radius.text()))
if self.parent.debugging:
_parameters['exp_ini_filename'] = '/SNS/NOM/IPTS-17118/shared/autoNOM/exp.ini'
_parameters['calibration_file'] = '/SNS/NOM/IPTS-17210/shared/NOM_calibrate_d77194_2016_09_14.h5'
_parameters['characterization_file'] = '/SNS/NOM/shared/CALIBRATION/2016_2_1B_CAL/NOM_char_2016_08_18-rietveld.txt'
_parameters['background_number'] = '80289'
_parameters['vanadium_number'] = '79335'
_parameters['vanadium_background_number'] = '80289'
_parameters['resamplex'] = -6000
_parameters['crop_wavelength_min'] = .1
_parameters['crop_wavelength_max'] = 2.9
_parameters['vanadium_radius'] = 0.58
self.parameters = _parameters
def collect_runs(self):
o_table_handler = addie.processing.idl.table_handler.TableHandler(parent=self.parent)
o_table_handler.retrieve_list_of_selected_rows()
list_of_selected_row = o_table_handler.list_selected_row
runs = []
for _row in list_of_selected_row:
_runs = 'NOM_' + _row['runs']
runs.append(_runs)
self.parameters['runs'] = runs
def collect_background_number(self):
if self.parent.ui.background_yes.isChecked():
return str(self.parent.ui.background_line_edit.text())
else:
return str(self.parent.ui.background_no_field.text())
def run(self):
# display message
o_mantid_launcher = MantidReductionDialogbox(parent=self.parent, father=self)
o_mantid_launcher.show()
def run_reduction(self):
for index, runs in enumerate(self.parameters['runs']):
_o_mantid = self.parent._mantid_thread_array[index]
_o_mantid.setup(runs=runs, parameters=self.parameters)
_o_mantid.start()
self.parent.launch_job_manager(job_name='Mantid', thread_index=index)
def create_output_folder(self):
output_folder = self.parameters['output_directory']
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def view_jobs(self):
o_view_launcher = MantidReductionView(parent=self.parent, father=self)
o_view_launcher.show()
| {
"repo_name": "neutrons/FastGR",
"path": "addie/mantid_handler/mantid_reduction.py",
"copies": "1",
"size": "4931",
"license": "mit",
"hash": -4481171405131095600,
"line_mean": 44.6574074074,
"line_max": 127,
"alpha_frac": 0.6090042588,
"autogenerated": false,
"ratio": 3.6498889711324947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9750493886406785,
"avg_score": 0.0016798687051418776,
"num_lines": 108
} |
from __future__ import absolute_import, division, print_function
import os
import matplotlib.pyplot as plt
import ROOT
import root_pandas
from histograms import histogram
from root_converters import roocurve, tgraphasymerrors
from plotting_utilities import (
COLOURS as colours,
set_axis_labels
)
PREFIX = 'root://eoslhcb.cern.ch//eos/lhcb/user/a/apearce/CharmProduction/2015_MagDown_MC/{0}' # noqa
FNAME = 'DVntuple.root'
DATA_PATHS = [
os.path.join(PREFIX, str(idx), FNAME)
for idx in range(1, 3)
]
EVT_TYPES = {
'D0ToKpi': 27163003,
'DpToKpipi': 21263010
}
def background_categories(mode):
"""Plot BKGCAT values."""
tree = 'Tuple{0}/DecayTree'.format(mode)
parent = mode.split('To')[0]
columns = [
'{0}_M'.format(parent),
'{0}_BKGCAT'.format(parent)
]
paths = [p.format(EVT_TYPES[mode]) for p in DATA_PATHS]
df = root_pandas.read_root(paths, key=tree, columns=columns)
df.columns = ['M', 'BKGCAT']
if mode == 'D0ToKpi':
mrange = (1800, 1930)
elif mode == 'DpToKpipi':
mrange = (1805, 1935)
nbins = mrange[1] - mrange[0]
signal = df.M[(df.BKGCAT == 0) | (df.BKGCAT == 10)]
ghost = df.M[(df.BKGCAT == 60)]
other = df.M[~((df.BKGCAT == 0) | (df.BKGCAT == 10) | (df.BKGCAT == 60))]
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
histogram([signal, ghost, other], range=mrange, bins=nbins,
label=['Signal', 'Ghost background', 'Other background'], ax=ax)
# Don't have the y-axis go to zero, and add some padding at the top
ax.set_ylim(bottom=0.1, top=2*ax.get_ylim()[1])
ax.set_yscale('log')
set_axis_labels(ax, mode)
ax.legend(loc='best')
fig.savefig('output/{0}_BKGCAT.pdf'.format(mode))
def fits(mode):
f = ROOT.TFile('~/Physics/CharmProduction/analysis/{0}_2015_MagDown_truth_matching_fit.root'.format(mode)) # noqa
w = f.Get('workspace_{0}'.format(mode))
parent = mode.split('To')[0]
x = w.var('{0}_M'.format(parent))
pdf_tot = w.pdf('pdf_m_tot')
pdf_bkg = w.pdf('pdf_m_tot')
data = w.data('data_binned')
frame = x.frame()
data.plotOn(frame)
pdf_bkg.plotOn(frame)
pdf_tot.plotOn(frame, ROOT.RooFit.Components('*bkg*'))
plotobjs = [frame.getObject(i) for i in range(int(frame.numItems()))]
tgraph, tcurve_tot, tcurve_bkg = plotobjs
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
roocurve(ax, tcurve_bkg, color=colours.red, linestyle=':',
label='Background')
roocurve(ax, tcurve_tot, color=colours.blue,
label='Total fit')
tgraphasymerrors(ax, tgraph, color=colours.black, label='MC data')
ax.set_xlim((frame.GetXaxis().GetXmin(), frame.GetXaxis().GetXmax()))
ax.set_ylim(top=1.2*ax.get_ylim()[1])
# Swap the legend entry order so the data is first
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], loc='best')
set_axis_labels(ax, mode)
fig.savefig('output/{0}_BKGCAT_fit.pdf'.format(mode))
if __name__ == '__main__':
# background_categories('D0ToKpi')
# background_categories('DpToKpipi')
fits('D0ToKpi')
fits('DpToKpipi')
| {
"repo_name": "alexpearce/thesis",
"path": "scripts/background_categories.py",
"copies": "1",
"size": "3206",
"license": "mit",
"hash": -1967345471581070600,
"line_mean": 30.431372549,
"line_max": 118,
"alpha_frac": 0.6250779788,
"autogenerated": false,
"ratio": 2.8221830985915495,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39472610773915495,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path as op
from os.path import join as pjoin
import glob
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 3
_version_micro = '' # use '' for first of series, number for 1 and above
_version_extra = 'dev'
# _version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "AFQ-Browser"
# Long description will go up on the pypi page
long_description = """
AFQ-browser is a software library for visualization of results from
automated fiber quantification of human brain tractography.
The software takes as input the results of analysis from the ``AFQ`` software
and produces a browser-based visualization of the data. Command-line tools
allow users to create these visualizations from their data and upload them to
share with others as a website.
For instructions on installation and use, visit the documentation:
https://yeatmanlab.github.io/AFQ-Browser
"""
NAME = "AFQ-Browser"
MAINTAINER = "Ariel Rokem"
MAINTAINER_EMAIL = "arokem@gmail.com"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/yeatmanlab/AFQ-Browser"
DOWNLOAD_URL = ""
LICENSE = "MIT"
AUTHOR = "Ariel Rokem"
AUTHOR_EMAIL = "arokem@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'afqbrowser': [pjoin('site', '*'),
pjoin('site', 'client', '*'),
pjoin('site', 'client', 'data', '*'),
pjoin('site', 'client', 'data', 'tracula_data', '*'),
pjoin('site', 'client', 'data', 'tracula_data', 'stats', '*'),
pjoin('site', 'client', 'css', '*'),
pjoin('site', 'client', 'js', '*'),
pjoin('site', 'client', 'js',
'third-party', '*')]}
REQUIRES = ["numpy", "pandas", "scipy", "PyGithub", "GitPython"]
SCRIPTS = [op.join('bin', op.split(f)[-1]) for f in glob.glob('bin/*')]
| {
"repo_name": "richford/AFQ-Browser",
"path": "afqbrowser/version.py",
"copies": "3",
"size": "2793",
"license": "bsd-3-clause",
"hash": -6113664906693592000,
"line_mean": 36.24,
"line_max": 93,
"alpha_frac": 0.6147511636,
"autogenerated": false,
"ratio": 3.6701708278580814,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5784921991458082,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import numpy.testing as npt
import pdb
import gsd.hoomd
import sys
import clustering as cl
#from context import clustering as cl
#from context import smoluchowski as smol
from cdistances import conOptDistanceCython,alignDistancesCython
#import imp
#cl = imp.load_source('cl','/home/rachael/Analysis_and_run_code/analysis/cluster_analysis/clustering/clustering.py')
data_path = op.join(cl.__path__[0], 'data')
def test_write_out_frame():
fname = 'mols8.gsd'
traj = gsd.hoomd.open(op.join(data_path, fname))
box = traj[0].configuration.box
ats = {'contact':17}
cutoff= 1.1*1.1
molno = 8
cldict = {'contact':cutoff}
syst = cl.SnapSystem(traj,ats,molno,cldict)
syst.get_clusters_serial('contact',box)
syst.writeCIDs('contact',op.join(data_path,'mols8cIDs.dat'))
cIDfile = op.join(data_path,'mols8cIDs.dat')
cIDfile = open(cIDfile)
lines = cIDfile.readlines()
cIDfile.close()
line = lines[35]
cIDsf = [float(c) for c in line.split()]
cIDs = [int(c) for c in cIDsf]
cl.writeFrameForVMD(cIDs,molno,ats['contact'],
op.join(data_path,'testframe35.dat')) | {
"repo_name": "ramansbach/cluster_analysis",
"path": "clustering/tests/test_visualization.py",
"copies": "1",
"size": "1247",
"license": "mit",
"hash": -5758892858615032000,
"line_mean": 33.6666666667,
"line_max": 116,
"alpha_frac": 0.6920609463,
"autogenerated": false,
"ratio": 2.976133651551313,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41681945978513124,
"avg_score": null,
"num_lines": null
} |
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import pandas as pd
import numpy.testing as npt
import bha as sb
data_path = op.join(sb.__path__[0], 'data')
def test_transform_data():
"""
Testing the transformation of the data from raw data to functions
used for fitting a function.
"""
# We start with actual data. We test here just that reading the data in
# different ways ultimately generates the same arrays.
from matplotlib import mlab
ortho = mlab.csv2rec(op.join(data_path, 'ortho.csv'))
x1, y1, n1 = sb.transform_data(ortho)
x2, y2, n2 = sb.transform_data(op.join(data_path, 'ortho.csv'))
npt.assert_equal(x1, x2)
npt.assert_equal(y1, y2)
# We can also be a bit more critical, by testing with data that we
# generate, and should produce a particular answer:
my_data = pd.DataFrame(
np.array([[0.1, 2], [0.1, 1], [0.2, 2], [0.2, 2], [0.3, 1],
[0.3, 1]]),
columns=['contrast1', 'answer'])
my_x, my_y, my_n = sb.transform_data(my_data)
npt.assert_equal(my_x, np.array([0.1, 0.2, 0.3]))
npt.assert_equal(my_y, np.array([0.5, 0, 1.0]))
npt.assert_equal(my_n, np.array([2, 2, 2]))
def test_cum_gauss():
sigma = 1
mu = 0
x = np.linspace(-1, 1, 12)
y = sb.cumgauss(x, mu, sigma)
# A basic test that the input and output have the same shape:
npt.assert_equal(y.shape, x.shape)
# The function evaluated over items symmetrical about mu should be
# symmetrical relative to 0 and 1:
npt.assert_equal(y[0], 1 - y[-1])
# Approximately 68% of the Gaussian distribution is in mu +/- sigma, so
# the value of the cumulative Gaussian at mu - sigma should be
# approximately equal to (1 - 0.68/2). Note the low precision!
npt.assert_almost_equal(y[0], (1 - 0.68) / 2, decimal=2)
def test_opt_err_func():
# We define a truly silly function, that returns its input, regardless of
# the params:
def my_silly_func(x, my_first_silly_param, my_other_silly_param):
return x
# The silly function takes two parameters and ignores them
my_params = [1, 10]
my_x = np.linspace(-1, 1, 12)
my_y = my_x
my_err = sb.opt_err_func(my_params, my_x, my_y, my_silly_func)
# Since x and y are equal, the error is zero:
npt.assert_equal(my_err, np.zeros(my_x.shape[0]))
# Let's consider a slightly less silly function, that implements a linear
# relationship between inputs and outputs:
def not_so_silly_func(x, a, b):
return x * a + b
my_params = [1, 10]
my_x = np.linspace(-1, 1, 12)
# To test this, we calculate the relationship explicitely:
my_y = my_x * my_params[0] + my_params[1]
my_err = sb.opt_err_func(my_params, my_x, my_y, not_so_silly_func)
# Since x and y are equal, the error is zero:
npt.assert_equal(my_err, np.zeros(my_x.shape[0]))
def test_Model():
""" """
M = sb.Model()
x = np.linspace(0.1, 0.9, 22)
target_mu = 0.5
target_sigma = 1
target_y = sb.cumgauss(x, target_mu, target_sigma)
F = M.fit(x, target_y, initial=[target_mu, target_sigma])
npt.assert_equal(F.predict(x), target_y)
def test_params_regression():
"""
Test for regressions in model parameter values from provided data
"""
model = sb.Model()
ortho_x, ortho_y, ortho_n = sb.transform_data(op.join(data_path,
'ortho.csv'))
para_x, para_y, para_n = sb.transform_data(op.join(data_path,
'para.csv'))
ortho_fit = model.fit(ortho_x, ortho_y)
para_fit = model.fit(para_x, para_y)
npt.assert_almost_equal(ortho_fit.params[0], 0.46438638)
npt.assert_almost_equal(ortho_fit.params[1], 0.13845926)
npt.assert_almost_equal(para_fit.params[0], 0.57456788)
npt.assert_almost_equal(para_fit.params[1], 0.13684096)
| {
"repo_name": "christiancarballo/bha",
"path": "bha/tests/tests_bha.py",
"copies": "1",
"size": "3977",
"license": "mit",
"hash": 3365397824534920700,
"line_mean": 35.1545454545,
"line_max": 77,
"alpha_frac": 0.618556701,
"autogenerated": false,
"ratio": 3.0174506828528074,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9136007383852808,
"avg_score": 0,
"num_lines": 110
} |
from __future__ import absolute_import, division, print_function
import os.path as op
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 0
_version_minor = 3
_version_micro = 7 # use '' for first of series, number for 1 and above
# _version_extra = 'dev'
_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = "bidsify: Converts your (raw) data to the BIDS-format"
NAME = "bidsify"
MAINTAINER = "Lukas Snoek"
MAINTAINER_EMAIL = "lukassnoek@gmail.com"
DESCRIPTION = description
URL = "https://github.com/spinoza-rec/bidsify"
DOWNLOAD_URL = ""
LICENSE = "3-clause BSD"
AUTHOR = "Lukas Snoek"
AUTHOR_EMAIL = "lukassnoek@gmail.com"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'bidsify': [op.join('data', '*')]}
| {
"repo_name": "lukassnoek/BidsConverter",
"path": "bidsify/version.py",
"copies": "1",
"size": "1482",
"license": "bsd-3-clause",
"hash": 877323328785897100,
"line_mean": 31.9333333333,
"line_max": 76,
"alpha_frac": 0.6639676113,
"autogenerated": false,
"ratio": 3.3080357142857144,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9472003325585714,
"avg_score": 0,
"num_lines": 45
} |
from __future__ import absolute_import, division, print_function
import os.path
import subprocess
from typing import List, Optional, cast
_HAS_ARMOR = {".gpg": False, ".asc": True}
_EXTENSIONS = _HAS_ARMOR.keys()
_OVERRIDE_HOMEDIR = None # type: Optional[str] # useful for unit tests
def is_encrypted(path: str) -> bool:
_, ext = os.path.splitext(path)
return ext in _EXTENSIONS
def has_armor(path: str) -> bool:
_, ext = os.path.splitext(path)
if ext not in _EXTENSIONS:
raise ValueError("File extension not recognized as encrypted (%r)." % ext)
return _HAS_ARMOR[ext]
def unencrypted_ext(path: str) -> str:
root, ext = os.path.splitext(path)
if ext in _EXTENSIONS:
_, ext = os.path.splitext(root)
return ext
def _base_args() -> List[str]:
binary = os.environ.get("PW_GPG", "gpg")
args = [binary, "--use-agent", "--quiet", "--batch", "--yes"]
if _OVERRIDE_HOMEDIR is not None:
args += ["--homedir", _OVERRIDE_HOMEDIR]
return args
def decrypt(path: str) -> bytes:
args = ["--decrypt", path]
return cast(bytes, subprocess.check_output(_base_args() + args))
def encrypt(recipient: str, dest_path: str, content: bytes) -> None:
args = ["--encrypt"]
if has_armor(dest_path):
args += ["--armor"]
args += ["--recipient", recipient, "--output", dest_path]
popen = subprocess.Popen(
_base_args() + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = popen.communicate(content)
assert popen.returncode == 0, stderr
| {
"repo_name": "catch22/pw",
"path": "pw/_gpg.py",
"copies": "1",
"size": "1613",
"license": "mit",
"hash": 6977568754076554000,
"line_mean": 28.3272727273,
"line_max": 82,
"alpha_frac": 0.6255424675,
"autogenerated": false,
"ratio": 3.431914893617021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4557457361117021,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.